import base64
import datetime
+import itertools
import netrc
import os
import re
url: Final video URL.
title: Video title, unescaped.
ext: Video filename extension.
- uploader: Full name of the video uploader.
- upload_date: Video upload date (YYYYMMDD).
The following fields are optional:
format: The video format, defaults to ext (used for --get-format)
thumbnail: Full URL to a video thumbnail image.
description: One-line video description.
+ uploader: Full name of the video uploader.
+ upload_date: Video upload date (YYYYMMDD).
uploader_id: Nickname or id of the video uploader.
+ location: Physical location of the video.
player_url: SWF Player URL (used for rtmpdump).
subtitles: The .srt file contents.
urlhandle: [internal] The urlHandle to be used to download the file,
(?(1).+)? # if we found the ID, everything can follow
$"""
_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
- _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
+ _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_NETRC_MACHINE = 'youtube'
"""Indicate the download will use the RTMP protocol."""
self._downloader.to_screen(u'[youtube] RTMP download detected')
- def _closed_captions_xml_to_srt(self, xml_string):
- srt = ''
- texts = re.findall(r'<text start="([\d\.]+)"( dur="([\d\.]+)")?>([^<]+)</text>', xml_string, re.MULTILINE)
- # TODO parse xml instead of regex
- for n, (start, dur_tag, dur, caption) in enumerate(texts):
- if not dur: dur = '4'
- start = float(start)
- end = start + float(dur)
- start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000)
- end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000)
- caption = unescapeHTML(caption)
- caption = unescapeHTML(caption) # double cycle, intentional
- srt += str(n+1) + '\n'
- srt += start + ' --> ' + end + '\n'
- srt += caption + '\n\n'
- return srt
-
def _extract_subtitles(self, video_id):
self.report_video_subtitles_download(video_id)
request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
srt_lang = list(srt_lang_list.keys())[0]
if not srt_lang in srt_lang_list:
return (u'WARNING: no closed captions found in the specified language', None)
- request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id))
+ params = compat_urllib_parse.urlencode({
+ 'lang': srt_lang,
+ 'name': srt_lang_list[srt_lang].encode('utf-8'),
+ 'v': video_id,
+ 'fmt': 'srt',
+ })
+ url = 'http://www.youtube.com/api/timedtext?' + params
try:
- srt_xml = compat_urllib_request.urlopen(request).read().decode('utf-8')
+ srt = compat_urllib_request.urlopen(url).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
- if not srt_xml:
- return (u'WARNING: unable to download video subtitles', None)
- return (None, self._closed_captions_xml_to_srt(srt_xml))
+ if not srt:
+ return (u'WARNING: Did not fetch video subtitles', None)
+ return (None, srt)
def _print_formats(self, formats):
print('Available formats:')
if username is None:
return
+ request = compat_urllib_request.Request(self._LOGIN_URL)
+ try:
+ login_page = compat_urllib_request.urlopen(request).read().decode('utf-8')
+ except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+ self._downloader.to_stderr(u'WARNING: unable to fetch login page: %s' % compat_str(err))
+ return
+
+ galx = None
+ dsh = None
+ match = re.search(re.compile(r'<input.+?name="GALX".+?value="(.+?)"', re.DOTALL), login_page)
+ if match:
+ galx = match.group(1)
+
+ match = re.search(re.compile(r'<input.+?name="dsh".+?value="(.+?)"', re.DOTALL), login_page)
+ if match:
+ dsh = match.group(1)
+
# Log in
- login_form = {
- 'current_form': 'loginForm',
- 'next': '/',
- 'action_login': 'Log In',
- 'username': username,
- 'password': password,
- }
- request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
+ login_form_strs = {
+ u'continue': u'http://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
+ u'Email': username,
+ u'GALX': galx,
+ u'Passwd': password,
+ u'PersistentCookie': u'yes',
+ u'_utf8': u'霱',
+ u'bgresponse': u'js_disabled',
+ u'checkConnection': u'',
+ u'checkedDomains': u'youtube',
+ u'dnConn': u'',
+ u'dsh': dsh,
+ u'pstMsg': u'0',
+ u'rmShown': u'1',
+ u'secTok': u'',
+ u'signIn': u'Sign in',
+ u'timeStmp': u'',
+ u'service': u'youtube',
+ u'uilel': u'3',
+ u'hl': u'en_US',
+ }
+ # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
+ # chokes on unicode
+ login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
+ login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
+ request = compat_urllib_request.Request(self._LOGIN_URL, login_data)
try:
self.report_login()
login_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
- if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
+ if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
return
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
IE_NAME = u'dailymotion'
+ _WORKING = False
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
"""Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs
- _VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?:videos?/)?([0-9]+)'
+ _VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)'
IE_NAME = u'vimeo'
def __init__(self, downloader=None):
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
- video_id = mobj.group(1)
+ video_id = mobj.group('id')
+ if not mobj.group('proto'):
+ url = 'https://' + url
+ if mobj.group('direct_link'):
+ url = 'https://vimeo.com/' + video_id
# Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url, None, std_headers)
opener = compat_urllib_request.OpenerDirector()
for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
HTTPMethodFallback, HEADRedirectHandler,
- compat_urllib_error.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
+ compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
opener.add_handler(handler())
response = opener.open(HeadRequest(url))
if mobj is None:
# Broaden the search a little bit
mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
+ if mobj is None:
+ # Broaden the search a little bit: JWPlayer JS loader
+ mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
class FacebookIE(InfoExtractor):
"""Information Extractor for Facebook"""
- _WORKING = False
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
_LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
_NETRC_MACHINE = 'facebook'
- _available_formats = ['video', 'highqual', 'lowqual']
- _video_extensions = {
- 'video': 'mp4',
- 'highqual': 'mp4',
- 'lowqual': 'mp4',
- }
IE_NAME = u'facebook'
- def __init__(self, downloader=None):
- InfoExtractor.__init__(self, downloader)
-
- def _reporter(self, message):
- """Add header and report message."""
- self._downloader.to_screen(u'[facebook] %s' % message)
-
def report_login(self):
"""Report attempt to log in."""
- self._reporter(u'Logging in')
-
- def report_video_webpage_download(self, video_id):
- """Report attempt to download video webpage."""
- self._reporter(u'%s: Downloading video webpage' % video_id)
-
- def report_information_extraction(self, video_id):
- """Report attempt to extract video information."""
- self._reporter(u'%s: Extracting video information' % video_id)
-
- def _parse_page(self, video_webpage):
- """Extract video information from page"""
- # General data
- data = {'title': r'\("video_title", "(.*?)"\)',
- 'description': r'<div class="datawrap">(.*?)</div>',
- 'owner': r'\("video_owner_name", "(.*?)"\)',
- 'thumbnail': r'\("thumb_url", "(?P<THUMB>.*?)"\)',
- }
- video_info = {}
- for piece in data.keys():
- mobj = re.search(data[piece], video_webpage)
- if mobj is not None:
- video_info[piece] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape"))
-
- # Video urls
- video_urls = {}
- for fmt in self._available_formats:
- mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage)
- if mobj is not None:
- # URL is in a Javascript segment inside an escaped Unicode format within
- # the generally utf-8 page
- video_urls[fmt] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape"))
- video_info['video_urls'] = video_urls
-
- return video_info
+ self._downloader.to_screen(u'[%s] Logging in' % self.IE_NAME)
def _real_initialize(self):
if self._downloader is None:
return
video_id = mobj.group('ID')
- # Get video webpage
- self.report_video_webpage_download(video_id)
- request = compat_urllib_request.Request('https://www.facebook.com/video/video.php?v=%s' % video_id)
- try:
- page = compat_urllib_request.urlopen(request)
- video_webpage = page.read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
- return
-
- # Start extracting information
- self.report_information_extraction(video_id)
-
- # Extract information
- video_info = self._parse_page(video_webpage)
-
- # uploader
- if 'owner' not in video_info:
- self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
- return
- video_uploader = video_info['owner']
-
- # title
- if 'title' not in video_info:
- self._downloader.trouble(u'ERROR: unable to extract video title')
- return
- video_title = video_info['title']
- video_title = video_title.decode('utf-8')
-
- # thumbnail image
- if 'thumbnail' not in video_info:
- self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
- video_thumbnail = ''
- else:
- video_thumbnail = video_info['thumbnail']
-
- # upload date
- upload_date = None
- if 'upload_date' in video_info:
- upload_time = video_info['upload_date']
- timetuple = email.utils.parsedate_tz(upload_time)
- if timetuple is not None:
- try:
- upload_date = time.strftime('%Y%m%d', timetuple[0:9])
- except:
- pass
-
- # description
- video_description = video_info.get('description', 'No description available.')
-
- url_map = video_info['video_urls']
- if url_map:
- # Decide which formats to download
- req_format = self._downloader.params.get('format', None)
- format_limit = self._downloader.params.get('format_limit', None)
+ url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
+ webpage = self._download_webpage(url, video_id)
- if format_limit is not None and format_limit in self._available_formats:
- format_list = self._available_formats[self._available_formats.index(format_limit):]
- else:
- format_list = self._available_formats
- existing_formats = [x for x in format_list if x in url_map]
- if len(existing_formats) == 0:
- self._downloader.trouble(u'ERROR: no known formats available for video')
- return
- if req_format is None:
- video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
- elif req_format == 'worst':
- video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
- elif req_format == '-1':
- video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
- else:
- # Specific format
- if req_format not in url_map:
- self._downloader.trouble(u'ERROR: requested format not available')
- return
- video_url_list = [(req_format, url_map[req_format])] # Specific format
+ BEFORE = '[["allowFullScreen","true"],["allowScriptAccess","always"],["salign","tl"],["scale","noscale"],["wmode","opaque"]].forEach(function(param) {swf.addParam(param[0], param[1]);});\n'
+ AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
+ m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
+ if not m:
+ raise ExtractorError(u'Cannot parse data')
+ data = dict(json.loads(m.group(1)))
+ params_raw = compat_urllib_parse.unquote(data['params'])
+ params = json.loads(params_raw)
+ video_url = params['hd_src']
+ if not video_url:
+ video_url = params['sd_src']
+ if not video_url:
+ raise ExtractorError(u'Cannot find video URL')
+ video_duration = int(params['video_duration'])
+
+ m = re.search('<h2 class="uiHeaderTitle">([^<]+)</h2>', webpage)
+ if not m:
+ raise ExtractorError(u'Cannot find title in webpage')
+ video_title = unescapeHTML(m.group(1))
- results = []
- for format_param, video_real_url in video_url_list:
- # Extension
- video_extension = self._video_extensions.get(format_param, 'mp4')
+ info = {
+ 'id': video_id,
+ 'title': video_title,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'duration': video_duration,
+ 'thumbnail': params['thumbnail_src'],
+ }
+ return [info]
- results.append({
- 'id': video_id.decode('utf-8'),
- 'url': video_real_url.decode('utf-8'),
- 'uploader': video_uploader.decode('utf-8'),
- 'upload_date': upload_date,
- 'title': video_title,
- 'ext': video_extension.decode('utf-8'),
- 'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
- 'thumbnail': video_thumbnail.decode('utf-8'),
- 'description': video_description.decode('utf-8'),
- })
- return results
class BlipTVIE(InfoExtractor):
"""Information extractor for blip.tv"""
cchar = '?'
json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
request = compat_urllib_request.Request(json_url)
+ request.add_header('User-Agent', 'iTunes/10.6.1')
self.report_extraction(mobj.group(1))
info = None
try:
'urlhandle': urlh
}
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
- return
+ raise ExtractorError(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
if info is None: # Regular URL
try:
json_code_bytes = urlh.read()
'format': data['media']['mimeType'],
'thumbnail': data['thumbnailUrl'],
'description': data['description'],
- 'player_url': data['embedUrl']
+ 'player_url': data['embedUrl'],
+ 'user_agent': 'iTunes/10.6.1',
}
except (ValueError,KeyError) as err:
self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
return
- std_headers['User-Agent'] = 'iTunes/10.6.1'
return [info]
webpage = self._download_webpage(webpage_url, video_id)
self.report_extraction(video_id)
- mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />',
+ mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\' />',
webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
if mobj.group('course') and mobj.group('video'): # A specific video
course = mobj.group('course')
'upload_date': None,
}
- self.report_download_webpage(info['id'])
- try:
- coursepage = compat_urllib_request.urlopen(url).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
- return
+ coursepage = self._download_webpage(url, info['id'],
+ note='Downloading course info page',
+ errnote='Unable to download course info page')
m = re.search('<h1>([^<]+)</h1>', coursepage)
if m:
assert entry['type'] == 'reference'
results += self.extract(entry['url'])
return results
-
else: # Root page
info = {
'id': 'Stanford OpenClassroom',
video_extension = os.path.splitext(video_url)[1][1:]
video_date = re.sub('-', '', clip['start_time'][:10])
video_uploader_id = clip.get('user_id', clip.get('channel_id'))
+ video_id = clip['id']
+ video_title = clip.get('title', video_id)
info.append({
- 'id': clip['id'],
+ 'id': video_id,
'url': video_url,
- 'title': clip['title'],
+ 'title': video_title,
'uploader': clip.get('channel_name', video_uploader_id),
'uploader_id': video_uploader_id,
'upload_date': video_date,
}
return [info]
+class RBMARadioIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?rbmaradio\.com/shows/(?P<videoID>[^/]+)$'
+
+ def _real_extract(self, url):
+ m = re.match(self._VALID_URL, url)
+ video_id = m.group('videoID')
+
+ webpage = self._download_webpage(url, video_id)
+ m = re.search(r'<script>window.gon = {.*?};gon\.show=(.+?);</script>', webpage)
+ if not m:
+ raise ExtractorError(u'Cannot find metadata')
+ json_data = m.group(1)
+
+ try:
+ data = json.loads(json_data)
+ except ValueError as e:
+ raise ExtractorError(u'Invalid JSON: ' + str(e))
+
+ video_url = data['akamai_url'] + '&cbr=256'
+ url_parts = compat_urllib_parse_urlparse(video_url)
+ video_ext = url_parts.path.rpartition('.')[2]
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': video_ext,
+ 'title': data['title'],
+ 'description': data.get('teaser_text'),
+ 'location': data.get('country_of_origin'),
+ 'uploader': data.get('host', {}).get('name'),
+ 'uploader_id': data.get('host', {}).get('slug'),
+ 'thumbnail': data.get('image', {}).get('large_url_2x'),
+ 'duration': data.get('duration'),
+ }
+ return [info]
class YouPornIE(InfoExtractor):
webpage = self._download_webpage(req, video_id)
# Get the video title
- result = re.search(r'videoTitleArea">(?P<title>.*)</h1>', webpage)
+ result = re.search(r'<h1.*?>(?P<title>.*)</h1>', webpage)
if result is None:
- raise ExtractorError(u'ERROR: unable to extract video title')
+ raise ExtractorError(u'Unable to extract video title')
video_title = result.group('title').strip()
# Get the video date
- result = re.search(r'Date:</b>(?P<date>.*)</li>', webpage)
+ result = re.search(r'Date:</label>(?P<date>.*) </li>', webpage)
if result is None:
self._downloader.to_stderr(u'WARNING: unable to extract video date')
upload_date = None
upload_date = result.group('date').strip()
# Get the video uploader
- result = re.search(r'Submitted:</b>(?P<uploader>.*)</li>', webpage)
+ result = re.search(r'Submitted:</label>(?P<uploader>.*)</li>', webpage)
if result is None:
- self._downloader.to_stderr(u'ERROR: unable to extract uploader')
+ self._downloader.to_stderr(u'WARNING: unable to extract uploader')
video_uploader = None
else:
video_uploader = result.group('uploader').strip()
return [info]
-
-
class YouJizzIE(InfoExtractor):
"""Information extractor for youjizz.com."""
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?youjizz\.com/videos/(?P<videoid>[^.]+).html$'
- def __init__(self, downloader=None):
- InfoExtractor.__init__(self, downloader)
-
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
webpage = self._download_webpage(url, video_id)
# Get the video title
- VIDEO_TITLE_RE = r'<title>(?P<title>.*)</title>'
- result = re.search(VIDEO_TITLE_RE, webpage)
+ result = re.search(r'<title>(?P<title>.*)</title>', webpage)
if result is None:
- self._downloader.trouble(u'ERROR: unable to extract video title')
- return
+ raise ExtractorError(u'ERROR: unable to extract video title')
video_title = result.group('title').strip()
# Get the embed page
- EMBED_PAGE_RE = r'http://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)'
- result = re.search(EMBED_PAGE_RE, webpage)
+ result = re.search(r'https?://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)', webpage)
if result is None:
- self._downloader.trouble(u'ERROR: unable to extract embed page')
- return
+ raise ExtractorError(u'ERROR: unable to extract embed page')
embed_page_url = result.group(0).strip()
video_id = result.group('videoid')
webpage = self._download_webpage(embed_page_url, video_id)
# Get the video URL
- SOURCE_RE = r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);'
- result = re.search(SOURCE_RE, webpage)
+ result = re.search(r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);', webpage)
if result is None:
- self._downloader.trouble(u'ERROR: unable to extract video url')
- return
+ raise ExtractorError(u'ERROR: unable to extract video url')
video_url = result.group('source')
info = {'id': video_id,
'url': video_url,
- 'uploader': None,
- 'upload_date': None,
'title': video_title,
'ext': 'flv',
'format': 'flv',
- 'thumbnail': None,
- 'description': None,
'player_url': embed_page_url}
return [info]
+class EightTracksIE(InfoExtractor):
+ IE_NAME = '8tracks'
+ _VALID_URL = r'https?://8tracks.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ raise ExtractorError(u'Invalid URL: %s' % url)
+ playlist_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, playlist_id)
+
+ m = re.search(r"PAGE.mix = (.*?);\n", webpage, flags=re.DOTALL)
+ if not m:
+ raise ExtractorError(u'Cannot find trax information')
+ json_like = m.group(1)
+ data = json.loads(json_like)
+
+ session = str(random.randint(0, 1000000000))
+ mix_id = data['id']
+ track_count = data['tracks_count']
+ first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
+ next_url = first_url
+ res = []
+ for i in itertools.count():
+ api_json = self._download_webpage(next_url, playlist_id,
+ note=u'Downloading song information %s/%s' % (str(i+1), track_count),
+ errnote=u'Failed to download song information')
+ api_data = json.loads(api_json)
+ track_data = api_data[u'set']['track']
+ info = {
+ 'id': track_data['id'],
+ 'url': track_data['track_file_stream_url'],
+ 'title': track_data['performer'] + u' - ' + track_data['name'],
+ 'raw_title': track_data['name'],
+ 'uploader_id': data['user']['login'],
+ 'ext': 'm4a',
+ }
+ res.append(info)
+ if api_data['set']['at_last_track']:
+ break
+ next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])
+ return res
+
+class KeekIE(InfoExtractor):
+ _VALID_URL = r'http://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<videoID>\w+)'
+ IE_NAME = u'keek'
+
+ def _real_extract(self, url):
+ m = re.match(self._VALID_URL, url)
+ video_id = m.group('videoID')
+ video_url = u'http://cdn.keek.com/keek/video/%s' % video_id
+ thumbnail = u'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
+ webpage = self._download_webpage(url, video_id)
+ m = re.search(r'<meta property="og:title" content="(?P<title>.+)"', webpage)
+ title = unescapeHTML(m.group('title'))
+ m = re.search(r'<div class="bio-names-and-report">[\s\n]+<h4>(?P<uploader>\w+)</h4>', webpage)
+ uploader = unescapeHTML(m.group('uploader'))
+ info = {
+ 'id':video_id,
+ 'url':video_url,
+ 'ext': 'mp4',
+ 'title': title,
+ 'thumbnail': thumbnail,
+ 'uploader': uploader
+ }
+ return [info]
+
+class TEDIE(InfoExtractor):
+ _VALID_URL=r'''http://www.ted.com/
+ (
+ ((?P<type_playlist>playlists)/(?P<playlist_id>\d+)) # We have a playlist
+ |
+ ((?P<type_talk>talks)) # We have a simple talk
+ )
+ /(?P<name>\w+) # Here goes the name and then ".html"
+ '''
+
+ def suitable(self, url):
+ """Receives a URL and returns True if suitable for this IE."""
+ return re.match(self._VALID_URL, url, re.VERBOSE) is not None
+
+ def _real_extract(self, url):
+ m=re.match(self._VALID_URL, url, re.VERBOSE)
+ if m.group('type_talk'):
+ return [self._talk_info(url)]
+ else :
+ playlist_id=m.group('playlist_id')
+ name=m.group('name')
+ self._downloader.to_screen(u'[%s] Getting info of playlist %s: "%s"' % (self.IE_NAME,playlist_id,name))
+ return self._playlist_videos_info(url,name,playlist_id)
+
+ def _talk_video_link(self,mediaSlug):
+ '''Returns the video link for that mediaSlug'''
+ return 'http://download.ted.com/talks/%s.mp4' % mediaSlug
+
+ def _playlist_videos_info(self,url,name,playlist_id=0):
+ '''Returns the videos of the playlist'''
+ video_RE=r'''
+ <li\ id="talk_(\d+)"([.\s]*?)data-id="(?P<video_id>\d+)"
+ ([.\s]*?)data-playlist_item_id="(\d+)"
+ ([.\s]*?)data-mediaslug="(?P<mediaSlug>.+?)"
+ '''
+ video_name_RE=r'<p\ class="talk-title"><a href="/talks/(.+).html">(?P<fullname>.+?)</a></p>'
+ webpage=self._download_webpage(url, playlist_id, 'Downloading playlist webpage')
+ m_videos=re.finditer(video_RE,webpage,re.VERBOSE)
+ m_names=re.finditer(video_name_RE,webpage)
+ info=[]
+ for m_video, m_name in zip(m_videos,m_names):
+ video_dic={
+ 'id': m_video.group('video_id'),
+ 'url': self._talk_video_link(m_video.group('mediaSlug')),
+ 'ext': 'mp4',
+ 'title': m_name.group('fullname')
+ }
+ info.append(video_dic)
+ return info
+ def _talk_info(self, url, video_id=0):
+ """Return the video for the talk in the url"""
+ m=re.match(self._VALID_URL, url,re.VERBOSE)
+ videoName=m.group('name')
+ webpage=self._download_webpage(url, video_id, 'Downloading \"%s\" page' % videoName)
+ # If the url includes the language we get the title translated
+ title_RE=r'<h1><span id="altHeadline" >(?P<title>[\s\w:/\.\?=\+-\\\']*)</span></h1>'
+ title=re.search(title_RE, webpage).group('title')
+ info_RE=r'''<script\ type="text/javascript">var\ talkDetails\ =(.*?)
+ "id":(?P<videoID>[\d]+).*?
+ "mediaSlug":"(?P<mediaSlug>[\w\d]+?)"'''
+ info_match=re.search(info_RE,webpage,re.VERBOSE)
+ video_id=info_match.group('videoID')
+ mediaSlug=info_match.group('mediaSlug')
+ video_url=self._talk_video_link(mediaSlug)
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'title': title
+ }
+ return info
+
+class MySpassIE(InfoExtractor):
+ _VALID_URL = r'http://www.myspass.de/.*'
+
+ def _real_extract(self, url):
+ META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s'
+
+ # video id is the last path element of the URL
+ # usually there is a trailing slash, so also try the second but last
+ url_path = compat_urllib_parse_urlparse(url).path
+ url_parent_path, video_id = os.path.split(url_path)
+ if not video_id:
+ _, video_id = os.path.split(url_parent_path)
+
+ # get metadata
+ metadata_url = META_DATA_URL_TEMPLATE % video_id
+ metadata_text = self._download_webpage(metadata_url, video_id)
+ metadata = xml.etree.ElementTree.fromstring(metadata_text.encode('utf-8'))
+
+ # extract values from metadata
+ url_flv_el = metadata.find('url_flv')
+ if url_flv_el is None:
+ self._downloader.trouble(u'ERROR: unable to extract download url')
+ return
+ video_url = url_flv_el.text
+ extension = os.path.splitext(video_url)[1][1:]
+ title_el = metadata.find('title')
+ if title_el is None:
+ self._downloader.trouble(u'ERROR: unable to extract title')
+ return
+ title = title_el.text
+ format_id_el = metadata.find('format_id')
+ if format_id_el is None:
+ format = ext
+ else:
+ format = format_id_el.text
+ description_el = metadata.find('description')
+ if description_el is not None:
+ description = description_el.text
+ else:
+ description = None
+ imagePreview_el = metadata.find('imagePreview')
+ if imagePreview_el is not None:
+ thumbnail = imagePreview_el.text
+ else:
+ thumbnail = None
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'title': title,
+ 'ext': extension,
+ 'format': format,
+ 'thumbnail': thumbnail,
+ 'description': description
+ }
+ return [info]
def gen_extractors():
""" Return a list of an instance of every supported extractor.
TweetReelIE(),
SteamIE(),
UstreamIE(),
+ RBMARadioIE(),
+ EightTracksIE(),
+ KeekIE(),
+ TEDIE(),
+ MySpassIE(),
GenericIE()
]