import xml.etree.ElementTree
import random
import math
+import operator
from .utils import *
self._ready = False
self.set_downloader(downloader)
- def suitable(self, url):
+ @classmethod
+ def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
- return re.match(self._VALID_URL, url) is not None
+ return re.match(cls._VALID_URL, url) is not None
- def working(self):
+ @classmethod
+ def working(cls):
"""Getter method for _WORKING."""
- return self._WORKING
+ return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|
tube\.majestyc\.net/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
- (?!view_play_list|my_playlists|artist|playlist) # ignore playlist URLs
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?(1).+)? # if we found the ID, everything can follow
$"""
_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
- _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
+ _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_NETRC_MACHINE = 'youtube'
}
IE_NAME = u'youtube'
- def suitable(self, url):
+ @classmethod
+ def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
- return re.match(self._VALID_URL, url, re.VERBOSE) is not None
+ if YoutubePlaylistIE.suitable(url): return False
+ return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
def report_lang(self):
"""Report attempt to set language."""
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
- self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))
+ self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
return
# Set language
self.report_lang()
compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.to_stderr(u'WARNING: unable to set language: %s' % compat_str(err))
+ self._downloader.report_warning(u'unable to set language: %s' % compat_str(err))
return
# No authentication to be performed
if username is None:
return
+ request = compat_urllib_request.Request(self._LOGIN_URL)
+ try:
+ login_page = compat_urllib_request.urlopen(request).read().decode('utf-8')
+ except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+ self._downloader.report_warning(u'unable to fetch login page: %s' % compat_str(err))
+ return
+
+ galx = None
+ dsh = None
+ match = re.search(re.compile(r'<input.+?name="GALX".+?value="(.+?)"', re.DOTALL), login_page)
+ if match:
+ galx = match.group(1)
+
+ match = re.search(re.compile(r'<input.+?name="dsh".+?value="(.+?)"', re.DOTALL), login_page)
+ if match:
+ dsh = match.group(1)
+
# Log in
- login_form = {
- 'current_form': 'loginForm',
- 'next': '/',
- 'action_login': 'Log In',
- 'username': username,
- 'password': password,
- }
- request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
+ login_form_strs = {
+ u'continue': u'http://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
+ u'Email': username,
+ u'GALX': galx,
+ u'Passwd': password,
+ u'PersistentCookie': u'yes',
+ u'_utf8': u'霱',
+ u'bgresponse': u'js_disabled',
+ u'checkConnection': u'',
+ u'checkedDomains': u'youtube',
+ u'dnConn': u'',
+ u'dsh': dsh,
+ u'pstMsg': u'0',
+ u'rmShown': u'1',
+ u'secTok': u'',
+ u'signIn': u'Sign in',
+ u'timeStmp': u'',
+ u'service': u'youtube',
+ u'uilel': u'3',
+ u'hl': u'en_US',
+ }
+ # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
+ # chokes on unicode
+ login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
+ login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
+ request = compat_urllib_request.Request(self._LOGIN_URL, login_data)
try:
self.report_login()
login_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
- if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
- self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
+ if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
+ self._downloader.report_warning(u'unable to log in: bad username or password')
return
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))
+ self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
return
# Confirm age
_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
IE_NAME = u'dailymotion'
+ _WORKING = False
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
"""Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs
- _VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?:videos?/)?([0-9]+)'
+ _VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)'
IE_NAME = u'vimeo'
def __init__(self, downloader=None):
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
- video_id = mobj.group(1)
+ video_id = mobj.group('id')
+ if not mobj.group('proto'):
+ url = 'https://' + url
+ if mobj.group('direct_link'):
+ url = 'https://vimeo.com/' + video_id
# Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url, None, std_headers)
opener = compat_urllib_request.OpenerDirector()
for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
HTTPMethodFallback, HEADRedirectHandler,
- compat_urllib_error.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
+ compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
opener.add_handler(handler())
response = opener.open(HeadRequest(url))
if mobj is None:
# Broaden the search a little bit
mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
+ if mobj is None:
+ # Broaden the search a little bit: JWPlayer JS loader
+ mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return
elif n > self._max_youtube_results:
- self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
+ self._downloader.report_warning(u'ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
n = self._max_youtube_results
self._download_n_results(query, n)
return
result_url = self._API_URL % (compat_urllib_parse.quote_plus(query), (50*pagenum)+1)
request = compat_urllib_request.Request(result_url)
try:
- data = compat_urllib_request.urlopen(request).read()
+ data = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err))
return
api_response = json.loads(data)['data']
+ if not 'items' in api_response:
+ self._downloader.trouble(u'[youtube] No video results')
+ return
+
new_ids = list(video['id'] for video in api_response['items'])
video_ids += new_ids
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return
elif n > self._max_google_results:
- self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
+ self._downloader.report_warning(u'gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
n = self._max_google_results
self._download_n_results(query, n)
return
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return
elif n > self._max_yahoo_results:
- self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
+ self._downloader.report_warning(u'yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
n = self._max_yahoo_results
self._download_n_results(query, n)
return
class YoutubePlaylistIE(InfoExtractor):
"""Information Extractor for YouTube playlists."""
- _VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]{10,})(?:/.*?/([0-9A-Za-z_-]+))?.*'
- _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
- _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&([^&"]+&)*list=.*?%s'
- _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}"
+ _VALID_URL = r"""(?:
+ (?:https?://)?
+ (?:\w+\.)?
+ youtube\.com/
+ (?:
+ (?:course|view_play_list|my_playlists|artist|playlist|watch)
+ \? (?:.*?&)*? (?:p|a|list)=
+ | user/.*?/user/
+ | p/
+ | user/.*?#[pg]/c/
+ )
+ ((?:PL|EC|UU)?[0-9A-Za-z-_]{10,})
+ .*
+ |
+ ((?:PL|EC|UU)[0-9A-Za-z-_]{10,})
+ )"""
+ _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/playlists/%s?max-results=%i&start-index=%i&v=2&alt=json'
+ _MAX_RESULTS = 50
IE_NAME = u'youtube:playlist'
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
+ @classmethod
+ def suitable(cls, url):
+ """Receives a URL and returns True if suitable for this IE."""
+ return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
+
def report_download_page(self, playlist_id, pagenum):
"""Report attempt to download playlist page with given number."""
self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
def _real_extract(self, url):
# Extract playlist id
- mobj = re.match(self._VALID_URL, url)
+ mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid url: %s' % url)
return
- # Single video case
- if mobj.group(3) is not None:
- self._downloader.download([mobj.group(3)])
- return
-
- # Download playlist pages
- # prefix is 'p' as default for playlists but there are other types that need extra care
- playlist_prefix = mobj.group(1)
- if playlist_prefix == 'a':
- playlist_access = 'artist'
- else:
- playlist_prefix = 'p'
- playlist_access = 'view_play_list'
- playlist_id = mobj.group(2)
- video_ids = []
- pagenum = 1
+ # Download playlist videos from API
+ playlist_id = mobj.group(1) or mobj.group(2)
+ page_num = 1
+ videos = []
while True:
- self.report_download_page(playlist_id, pagenum)
- url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum)
- request = compat_urllib_request.Request(url)
+ self.report_download_page(playlist_id, page_num)
+
+ url = self._TEMPLATE_URL % (playlist_id, self._MAX_RESULTS, self._MAX_RESULTS * (page_num - 1) + 1)
try:
- page = compat_urllib_request.urlopen(request).read().decode('utf-8')
+ page = compat_urllib_request.urlopen(url).read().decode('utf8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return
- # Extract video identifiers
- ids_in_page = []
- for mobj in re.finditer(self._VIDEO_INDICATOR_TEMPLATE % playlist_id, page):
- if mobj.group(1) not in ids_in_page:
- ids_in_page.append(mobj.group(1))
- video_ids.extend(ids_in_page)
+ try:
+ response = json.loads(page)
+ except ValueError as err:
+ self._downloader.trouble(u'ERROR: Invalid JSON in API response: ' + compat_str(err))
+ return
- if self._MORE_PAGES_INDICATOR not in page:
+ if not 'feed' in response or not 'entry' in response['feed']:
+ self._downloader.trouble(u'ERROR: Got a malformed response from YouTube API')
+ return
+ videos += [ (entry['yt$position']['$t'], entry['content']['src'])
+ for entry in response['feed']['entry']
+ if 'content' in entry ]
+
+ if len(response['feed']['entry']) < self._MAX_RESULTS:
break
- pagenum = pagenum + 1
+ page_num += 1
- total = len(video_ids)
+ videos = [v[1] for v in sorted(videos)]
+ total = len(videos)
playliststart = self._downloader.params.get('playliststart', 1) - 1
playlistend = self._downloader.params.get('playlistend', -1)
if playlistend == -1:
- video_ids = video_ids[playliststart:]
+ videos = videos[playliststart:]
else:
- video_ids = video_ids[playliststart:playlistend]
+ videos = videos[playliststart:playlistend]
- if len(video_ids) == total:
+ if len(videos) == total:
self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total))
else:
- self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(video_ids)))
+ self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(videos)))
- for id in video_ids:
- self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
+ for video in videos:
+ self._downloader.download([video])
return
while True:
self.report_download_page(username, pagenum)
-
- request = compat_urllib_request.Request( page_base + "&page=" + str(pagenum) )
-
+ url = page_base + "&page=" + str(pagenum)
+ request = compat_urllib_request.Request( url )
try:
page = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
- self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))
+ self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
return
if useremail is None:
self.report_login()
login_results = compat_urllib_request.urlopen(request).read()
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
- self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
+ self._downloader.report_warning(u'unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
return
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))
+ self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
return
def _real_extract(self, url):
params_raw = compat_urllib_parse.unquote(data['params'])
params = json.loads(params_raw)
video_url = params['hd_src']
+ if not video_url:
+ video_url = params['sd_src']
+ if not video_url:
+ raise ExtractorError(u'Cannot find video URL')
video_duration = int(params['video_duration'])
m = re.search('<h2 class="uiHeaderTitle">([^<]+)</h2>', webpage)
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
+ urlp = compat_urllib_parse_urlparse(url)
+ if urlp.path.startswith('/play/'):
+ request = compat_urllib_request.Request(url)
+ response = compat_urllib_request.urlopen(request)
+ redirecturl = response.geturl()
+ rurlp = compat_urllib_parse_urlparse(redirecturl)
+ file_id = compat_parse_qs(rurlp.fragment)['file'][0].rpartition('/')[2]
+ url = 'http://blip.tv/a/a-' + file_id
+ return self._real_extract(url)
+
+
if '?' in url:
cchar = '&'
else:
webpage = self._download_webpage(webpage_url, video_id)
self.report_extraction(video_id)
- mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />',
+ mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\' />',
webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
'400': '384x216',
}
- def suitable(self, url):
+ @classmethod
+ def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
- return re.match(self._VALID_URL, url, re.VERBOSE) is not None
+ return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
def report_extraction(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
'uploader': showName,
'upload_date': None,
'title': showName,
- 'ext': 'flv',
+ 'ext': 'mp4',
'thumbnail': imgUrl,
'description': description,
'player_url': playerUrl,
}
return [info]
-class TweetReelIE(InfoExtractor):
- _VALID_URL = r'^(?:https?://)?(?:www\.)?tweetreel\.com/[?](?P<id>[0-9a-z]+)$'
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
- return
-
- video_id = mobj.group('id')
- webpage = self._download_webpage(url, video_id)
-
- m = re.search(r'<div id="left" status_id="([0-9]+)">', webpage)
- if not m:
- self._downloader.trouble(u'ERROR: Cannot find status ID')
- status_id = m.group(1)
-
- m = re.search(r'<div class="tweet_text">(.*?)</div>', webpage, flags=re.DOTALL)
- if not m:
- self._downloader.trouble(u'WARNING: Cannot find description')
- desc = unescapeHTML(re.sub('<a.*?</a>', '', m.group(1))).strip()
-
- m = re.search(r'<div class="tweet_info">.*?from <a target="_blank" href="https?://twitter.com/(?P<uploader_id>.+?)">(?P<uploader>.+?)</a>', webpage, flags=re.DOTALL)
- if not m:
- self._downloader.trouble(u'ERROR: Cannot find uploader')
- uploader = unescapeHTML(m.group('uploader'))
- uploader_id = unescapeHTML(m.group('uploader_id'))
-
- m = re.search(r'<span unixtime="([0-9]+)"', webpage)
- if not m:
- self._downloader.trouble(u'ERROR: Cannot find upload date')
- upload_date = datetime.datetime.fromtimestamp(int(m.group(1))).strftime('%Y%m%d')
-
- title = desc
- video_url = 'http://files.tweetreel.com/video/' + status_id + '.mov'
-
- info = {
- 'id': video_id,
- 'url': video_url,
- 'ext': 'mov',
- 'title': title,
- 'description': desc,
- 'uploader': uploader,
- 'uploader_id': uploader_id,
- 'internal_id': status_id,
- 'upload_date': upload_date
- }
- return [info]
-
class SteamIE(InfoExtractor):
- _VALID_URL = r"""http://store.steampowered.com/
+ _VALID_URL = r"""http://store.steampowered.com/
(?P<urltype>video|app)/ #If the page is only for videos or for a game
(?P<gameID>\d+)/?
(?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID
"""
- def suitable(self, url):
+ @classmethod
+ def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
- return re.match(self._VALID_URL, url, re.VERBOSE) is not None
+ return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
def _real_extract(self, url):
m = re.match(self._VALID_URL, url, re.VERBOSE)
mweb = re.finditer(urlRE, webpage)
namesRE = r'<span class="title">(?P<videoName>.+?)</span>'
titles = re.finditer(namesRE, webpage)
+ thumbsRE = r'<img class="movie_thumb" src="(?P<thumbnail>.+?)">'
+ thumbs = re.finditer(thumbsRE, webpage)
videos = []
- for vid,vtitle in zip(mweb,titles):
+ for vid,vtitle,thumb in zip(mweb,titles,thumbs):
video_id = vid.group('videoID')
title = vtitle.group('videoName')
video_url = vid.group('videoURL')
+ video_thumb = thumb.group('thumbnail')
if not video_url:
self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id)
info = {
'id':video_id,
'url':video_url,
'ext': 'flv',
- 'title': unescapeHTML(title)
+ 'title': unescapeHTML(title),
+ 'thumbnail': video_thumb
}
videos.append(info)
return videos
}
return [info]
+class WorldStarHipHopIE(InfoExtractor):
+ _VALID_URL = r"""(http://(?:www|m).worldstar(?:candy|hiphop)\.com.*)"""
+ IE_NAME = u'WorldStarHipHop'
+
+ def _real_extract(self, url):
+ results = []
+
+ _src_url = r"""(http://hw-videos.*(?:mp4|flv))"""
+
+ webpage_src = compat_urllib_request.urlopen(str(url)).read()
+
+ mobj = re.search(_src_url, webpage_src)
+
+ if mobj is not None:
+ video_url = mobj.group()
+ if 'mp4' in video_url:
+ ext = '.mp4'
+ else:
+ ext = '.flv'
+ else:
+ video_url = None
+ ext = None
+
+ _title = r"""<title>(.*)</title>"""
+
+ mobj = re.search(_title, webpage_src)
+
+ if mobj is not None:
+ title = mobj.group(1)
+ title = title.replace("'", "")
+ title = title.replace("'", "")
+ title = title.replace('Video: ', '')
+ title = title.replace('"', '"')
+ title = title.replace('&', 'n')
+ else:
+ title = None
+
+ _thumbnail = r"""rel="image_src" href="(.*)" />"""
+
+ mobj = re.search(_thumbnail, webpage_src)
+
+ # Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
+ if mobj is not None:
+ thumbnail = mobj.group(1)
+ else:
+ _title = r"""candytitles.*>(.*)</span>"""
+ mobj = re.search(_title, webpage_src)
+ if mobj is not None:
+ title = mobj.group(1)
+ thumbnail = None
+
+ results.append({
+ 'url' : video_url,
+ 'title' : title,
+ 'thumbnail' : thumbnail,
+ 'ext' : ext
+ })
+
+ return results
+
class RBMARadioIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?rbmaradio\.com/shows/(?P<videoID>[^/]+)$'
class YouPornIE(InfoExtractor):
"""Information extractor for youporn.com."""
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)'
-
+
def _print_formats(self, formats):
"""Print all available formats"""
print(u'Available formats:')
webpage = self._download_webpage(req, video_id)
# Get the video title
- result = re.search(r'videoTitleArea">(?P<title>.*)</h1>', webpage)
+ result = re.search(r'<h1.*?>(?P<title>.*)</h1>', webpage)
if result is None:
- raise ExtractorError(u'ERROR: unable to extract video title')
+ raise ExtractorError(u'Unable to extract video title')
video_title = result.group('title').strip()
# Get the video date
- result = re.search(r'Date:</b>(?P<date>.*)</li>', webpage)
+ result = re.search(r'Date:</label>(?P<date>.*) </li>', webpage)
if result is None:
- self._downloader.to_stderr(u'WARNING: unable to extract video date')
+ self._downloader.report_warning(u'unable to extract video date')
upload_date = None
else:
upload_date = result.group('date').strip()
# Get the video uploader
- result = re.search(r'Submitted:</b>(?P<uploader>.*)</li>', webpage)
+ result = re.search(r'Submitted:</label>(?P<uploader>.*)</li>', webpage)
if result is None:
- self._downloader.to_stderr(u'ERROR: unable to extract uploader')
+ self._downloader.report_warning(u'unable to extract uploader')
video_uploader = None
else:
video_uploader = result.group('uploader').strip()
links = re.findall(LINK_RE, download_list_html)
if(len(links) == 0):
raise ExtractorError(u'ERROR: no known formats available for video')
-
- self._downloader.to_screen(u'[youporn] Links found: %d' % len(links))
+
+ self._downloader.to_screen(u'[youporn] Links found: %d' % len(links))
formats = []
for link in links:
return
return [format]
-
+
class PornotubeIE(InfoExtractor):
"""Information extractor for pornotube.com."""
embed_page_url = result.group(0).strip()
video_id = result.group('videoid')
-
+
webpage = self._download_webpage(embed_page_url, video_id)
# Get the video URL
webpage = self._download_webpage(url, playlist_id)
- m = re.search(r"new TRAX.Mix\((.*?)\);\n*\s*TRAX.initSearchAutocomplete\('#search'\);", webpage, flags=re.DOTALL)
+ m = re.search(r"PAGE.mix = (.*?);\n", webpage, flags=re.DOTALL)
if not m:
raise ExtractorError(u'Cannot find trax information')
json_like = m.group(1)
next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])
return res
+class KeekIE(InfoExtractor):
+ _VALID_URL = r'http://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<videoID>\w+)'
+ IE_NAME = u'keek'
+
+ def _real_extract(self, url):
+ m = re.match(self._VALID_URL, url)
+ video_id = m.group('videoID')
+ video_url = u'http://cdn.keek.com/keek/video/%s' % video_id
+ thumbnail = u'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
+ webpage = self._download_webpage(url, video_id)
+ m = re.search(r'<meta property="og:title" content="(?P<title>.+)"', webpage)
+ title = unescapeHTML(m.group('title'))
+ m = re.search(r'<div class="bio-names-and-report">[\s\n]+<h4>(?P<uploader>\w+)</h4>', webpage)
+ uploader = unescapeHTML(m.group('uploader'))
+ info = {
+ 'id':video_id,
+ 'url':video_url,
+ 'ext': 'mp4',
+ 'title': title,
+ 'thumbnail': thumbnail,
+ 'uploader': uploader
+ }
+ return [info]
+
+class TEDIE(InfoExtractor):
+ _VALID_URL=r'''http://www.ted.com/
+ (
+ ((?P<type_playlist>playlists)/(?P<playlist_id>\d+)) # We have a playlist
+ |
+ ((?P<type_talk>talks)) # We have a simple talk
+ )
+ /(?P<name>\w+) # Here goes the name and then ".html"
+ '''
+
+ @classmethod
+ def suitable(cls, url):
+ """Receives a URL and returns True if suitable for this IE."""
+ return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
+
+ def _real_extract(self, url):
+ m=re.match(self._VALID_URL, url, re.VERBOSE)
+ if m.group('type_talk'):
+ return [self._talk_info(url)]
+ else :
+ playlist_id=m.group('playlist_id')
+ name=m.group('name')
+ self._downloader.to_screen(u'[%s] Getting info of playlist %s: "%s"' % (self.IE_NAME,playlist_id,name))
+ return self._playlist_videos_info(url,name,playlist_id)
+
+ def _talk_video_link(self,mediaSlug):
+ '''Returns the video link for that mediaSlug'''
+ return 'http://download.ted.com/talks/%s.mp4' % mediaSlug
+
+ def _playlist_videos_info(self,url,name,playlist_id=0):
+ '''Returns the videos of the playlist'''
+ video_RE=r'''
+ <li\ id="talk_(\d+)"([.\s]*?)data-id="(?P<video_id>\d+)"
+ ([.\s]*?)data-playlist_item_id="(\d+)"
+ ([.\s]*?)data-mediaslug="(?P<mediaSlug>.+?)"
+ '''
+ video_name_RE=r'<p\ class="talk-title"><a href="(?P<talk_url>/talks/(.+).html)">(?P<fullname>.+?)</a></p>'
+ webpage=self._download_webpage(url, playlist_id, 'Downloading playlist webpage')
+ m_videos=re.finditer(video_RE,webpage,re.VERBOSE)
+ m_names=re.finditer(video_name_RE,webpage)
+ info=[]
+ for m_video, m_name in zip(m_videos,m_names):
+ video_id=m_video.group('video_id')
+ talk_url='http://www.ted.com%s' % m_name.group('talk_url')
+ info.append(self._talk_info(talk_url,video_id))
+ return info
+
+ def _talk_info(self, url, video_id=0):
+ """Return the video for the talk in the url"""
+ m=re.match(self._VALID_URL, url,re.VERBOSE)
+ videoName=m.group('name')
+ webpage=self._download_webpage(url, video_id, 'Downloading \"%s\" page' % videoName)
+ # If the url includes the language we get the title translated
+ title_RE=r'<h1><span id="altHeadline" >(?P<title>.*)</span></h1>'
+ title=re.search(title_RE, webpage).group('title')
+ info_RE=r'''<script\ type="text/javascript">var\ talkDetails\ =(.*?)
+ "id":(?P<videoID>[\d]+).*?
+ "mediaSlug":"(?P<mediaSlug>[\w\d]+?)"'''
+ thumb_RE=r'</span>[\s.]*</div>[\s.]*<img src="(?P<thumbnail>.*?)"'
+ thumb_match=re.search(thumb_RE,webpage)
+ info_match=re.search(info_RE,webpage,re.VERBOSE)
+ video_id=info_match.group('videoID')
+ mediaSlug=info_match.group('mediaSlug')
+ video_url=self._talk_video_link(mediaSlug)
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'title': title,
+ 'thumbnail': thumb_match.group('thumbnail')
+ }
+ return info
+
+class MySpassIE(InfoExtractor):
+ _VALID_URL = r'http://www.myspass.de/.*'
+
+ def _real_extract(self, url):
+ META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s'
+
+ # video id is the last path element of the URL
+ # usually there is a trailing slash, so also try the second but last
+ url_path = compat_urllib_parse_urlparse(url).path
+ url_parent_path, video_id = os.path.split(url_path)
+ if not video_id:
+ _, video_id = os.path.split(url_parent_path)
+
+ # get metadata
+ metadata_url = META_DATA_URL_TEMPLATE % video_id
+ metadata_text = self._download_webpage(metadata_url, video_id)
+ metadata = xml.etree.ElementTree.fromstring(metadata_text.encode('utf-8'))
+
+ # extract values from metadata
+ url_flv_el = metadata.find('url_flv')
+ if url_flv_el is None:
+ self._downloader.trouble(u'ERROR: unable to extract download url')
+ return
+ video_url = url_flv_el.text
+ extension = os.path.splitext(video_url)[1][1:]
+ title_el = metadata.find('title')
+ if title_el is None:
+ self._downloader.trouble(u'ERROR: unable to extract title')
+ return
+ title = title_el.text
+ format_id_el = metadata.find('format_id')
+ if format_id_el is None:
+ format = ext
+ else:
+ format = format_id_el.text
+ description_el = metadata.find('description')
+ if description_el is not None:
+ description = description_el.text
+ else:
+ description = None
+ imagePreview_el = metadata.find('imagePreview')
+ if imagePreview_el is not None:
+ thumbnail = imagePreview_el.text
+ else:
+ thumbnail = None
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'title': title,
+ 'ext': extension,
+ 'format': format,
+ 'thumbnail': thumbnail,
+ 'description': description
+ }
+ return [info]
+
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
GooglePlusIE(),
ArteTvIE(),
NBAIE(),
+ WorldStarHipHopIE(),
JustinTVIE(),
FunnyOrDieIE(),
- TweetReelIE(),
SteamIE(),
UstreamIE(),
RBMARadioIE(),
EightTracksIE(),
+ KeekIE(),
+ TEDIE(),
+ MySpassIE(),
GenericIE()
]