from __future__ import absolute_import
+import base64
import datetime
+import itertools
import netrc
import os
import re
import xml.etree.ElementTree
import random
import math
+import operator
from .utils import *
id: Video identifier.
url: Final video URL.
- uploader: Nickname of the video uploader, unescaped.
- upload_date: Video upload date (YYYYMMDD).
title: Video title, unescaped.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
thumbnail: Full URL to a video thumbnail image.
description: One-line video description.
+ uploader: Full name of the video uploader.
+ upload_date: Video upload date (YYYYMMDD).
+ uploader_id: Nickname or id of the video uploader.
+ location: Physical location of the video.
player_url: SWF Player URL (used for rtmpdump).
subtitles: The .srt file contents.
urlhandle: [internal] The urlHandle to be used to download the file,
self._ready = False
self.set_downloader(downloader)
- def suitable(self, url):
+ @classmethod
+ def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
- return re.match(self._VALID_URL, url) is not None
+ return re.match(cls._VALID_URL, url) is not None
- def working(self):
+ @classmethod
+ def working(cls):
"""Getter method for _WORKING."""
- return self._WORKING
+ return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
"""Real extraction process. Redefine in subclasses."""
pass
+ @property
+ def IE_NAME(self):
+ return type(self).__name__[:-2]
+
+ def _request_webpage(self, url_or_request, video_id, note=None, errnote=None):
+ """ Returns the response handle """
+ if note is None:
+ note = u'Downloading video webpage'
+ self._downloader.to_screen(u'[%s] %s: %s' % (self.IE_NAME, video_id, note))
+ try:
+ return compat_urllib_request.urlopen(url_or_request)
+ except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+ if errnote is None:
+ errnote = u'Unable to download webpage'
+ raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2])
+
+ def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
+ """ Returns the data of the page as a string """
+ urlh = self._request_webpage(url_or_request, video_id, note, errnote)
+ webpage_bytes = urlh.read()
+ return webpage_bytes.decode('utf-8', 'replace')
+
class YoutubeIE(InfoExtractor):
"""Information extractor for youtube.com."""
(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|
tube\.majestyc\.net/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
- (?!view_play_list|my_playlists|artist|playlist) # ignore playlist URLs
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:watch(?:_popup)?(?:\.php)?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
- (?:.+&)? # any other preceding param (like /?s=tuff&v=xxxx)
+ (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx)
v=
)
)? # optional -> youtube.com/xxxx is OK
(?(1).+)? # if we found the ID, everything can follow
$"""
_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
- _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
+ _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_NETRC_MACHINE = 'youtube'
}
IE_NAME = u'youtube'
- def suitable(self, url):
+ @classmethod
+ def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
- return re.match(self._VALID_URL, url, re.VERBOSE) is not None
+ if YoutubePlaylistIE.suitable(url): return False
+ return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
def report_lang(self):
"""Report attempt to set language."""
srt += caption + '\n\n'
return srt
+ def _extract_subtitles(self, video_id):
+ self.report_video_subtitles_download(video_id)
+ request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
+ try:
+ srt_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
+ except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+ return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
+ srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list)
+ srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list)
+ if not srt_lang_list:
+ return (u'WARNING: video has no closed captions', None)
+ if self._downloader.params.get('subtitleslang', False):
+ srt_lang = self._downloader.params.get('subtitleslang')
+ elif 'en' in srt_lang_list:
+ srt_lang = 'en'
+ else:
+ srt_lang = list(srt_lang_list.keys())[0]
+ if not srt_lang in srt_lang_list:
+ return (u'WARNING: no closed captions found in the specified language', None)
+ params = compat_urllib_parse.urlencode({
+ 'lang': srt_lang,
+ 'name': srt_lang_list[srt_lang].encode('utf-8'),
+ 'v': video_id,
+ })
+ url = 'http://www.youtube.com/api/timedtext?' + params
+ try:
+ srt_xml = compat_urllib_request.urlopen(url).read().decode('utf-8')
+ except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+ return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
+ if not srt_xml:
+ return (u'WARNING: Did not fetch video subtitles', None)
+ return (None, self._closed_captions_xml_to_srt(srt_xml))
+
def _print_formats(self, formats):
print('Available formats:')
for x in formats:
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
- self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))
+ self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
return
# Set language
self.report_lang()
compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.to_stderr(u'WARNING: unable to set language: %s' % compat_str(err))
+ self._downloader.report_warning(u'unable to set language: %s' % compat_str(err))
return
# No authentication to be performed
if username is None:
return
+ request = compat_urllib_request.Request(self._LOGIN_URL)
+ try:
+ login_page = compat_urllib_request.urlopen(request).read().decode('utf-8')
+ except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+ self._downloader.report_warning(u'unable to fetch login page: %s' % compat_str(err))
+ return
+
+ galx = None
+ dsh = None
+ match = re.search(re.compile(r'<input.+?name="GALX".+?value="(.+?)"', re.DOTALL), login_page)
+ if match:
+ galx = match.group(1)
+
+ match = re.search(re.compile(r'<input.+?name="dsh".+?value="(.+?)"', re.DOTALL), login_page)
+ if match:
+ dsh = match.group(1)
+
# Log in
- login_form = {
- 'current_form': 'loginForm',
- 'next': '/',
- 'action_login': 'Log In',
- 'username': username,
- 'password': password,
- }
- request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
+ login_form_strs = {
+ u'continue': u'http://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
+ u'Email': username,
+ u'GALX': galx,
+ u'Passwd': password,
+ u'PersistentCookie': u'yes',
+ u'_utf8': u'霱',
+ u'bgresponse': u'js_disabled',
+ u'checkConnection': u'',
+ u'checkedDomains': u'youtube',
+ u'dnConn': u'',
+ u'dsh': dsh,
+ u'pstMsg': u'0',
+ u'rmShown': u'1',
+ u'secTok': u'',
+ u'signIn': u'Sign in',
+ u'timeStmp': u'',
+ u'service': u'youtube',
+ u'uilel': u'3',
+ u'hl': u'en_US',
+ }
+ # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
+ # chokes on unicode
+ login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
+ login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
+ request = compat_urllib_request.Request(self._LOGIN_URL, login_data)
try:
self.report_login()
login_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
- if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
- self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
+ if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
+ self._downloader.report_warning(u'unable to log in: bad username or password')
return
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))
+ self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
return
# Confirm age
self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
return
- def _real_extract(self, url):
- # Extract original video URL from URL with redirection, like age verification, using next_url parameter
- mobj = re.search(self._NEXT_URL_RE, url)
- if mobj:
- url = 'http://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
-
- # Extract video id from URL
+ def _extract_id(self, url):
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
video_id = mobj.group(2)
+ return video_id
+
+ def _real_extract(self, url):
+ # Extract original video URL from URL with redirection, like age verification, using next_url parameter
+ mobj = re.search(self._NEXT_URL_RE, url)
+ if mobj:
+ url = 'http://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
+ video_id = self._extract_id(url)
# Get video webpage
self.report_video_webpage_download(video_id)
- request = compat_urllib_request.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id)
+ url = 'http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
+ request = compat_urllib_request.Request(url)
try:
video_webpage_bytes = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
# uploader
if 'author' not in video_info:
- self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+ self._downloader.trouble(u'ERROR: unable to extract uploader name')
return
video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
+ # uploader_id
+ video_uploader_id = None
+ mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
+ if mobj is not None:
+ video_uploader_id = mobj.group(1)
+ else:
+ self._downloader.trouble(u'WARNING: unable to extract uploader nickname')
+
# title
if 'title' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract video title')
# closed captions
video_subtitles = None
if self._downloader.params.get('writesubtitles', False):
- try:
- self.report_video_subtitles_download(video_id)
- request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
- try:
- srt_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err))
- srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list)
- srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list)
- if not srt_lang_list:
- raise Trouble(u'WARNING: video has no closed captions')
- if self._downloader.params.get('subtitleslang', False):
- srt_lang = self._downloader.params.get('subtitleslang')
- elif 'en' in srt_lang_list:
- srt_lang = 'en'
- else:
- srt_lang = srt_lang_list.keys()[0]
- if not srt_lang in srt_lang_list:
- raise Trouble(u'WARNING: no closed captions found in the specified language')
- request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id))
- try:
- srt_xml = compat_urllib_request.urlopen(request).read().decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err))
- if not srt_xml:
- raise Trouble(u'WARNING: unable to download video subtitles')
- video_subtitles = self._closed_captions_xml_to_srt(srt_xml)
- except Trouble as trouble:
- self._downloader.trouble(str(trouble))
+ (srt_error, video_subtitles) = self._extract_subtitles(video_id)
+ if srt_error:
+ self._downloader.trouble(srt_error)
if 'length_seconds' not in video_info:
self._downloader.trouble(u'WARNING: unable to extract video duration')
elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')
url_data = [compat_parse_qs(uds) for uds in url_data_strs]
- url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data)
+ url_data = [ud for ud in url_data if 'itag' in ud and 'url' in ud]
url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data)
format_limit = self._downloader.params.get('format_limit', None)
'id': video_id,
'url': video_real_url,
'uploader': video_uploader,
+ 'uploader_id': video_uploader_id,
'upload_date': upload_date,
'title': video_title,
'ext': video_extension,
_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
IE_NAME = u'dailymotion'
+ _WORKING = False
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
- def report_download_webpage(self, video_id):
- """Report webpage download."""
- self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id)
-
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id)
# Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url)
request.add_header('Cookie', 'family_filter=off')
- try:
- self.report_download_webpage(video_id)
- webpage_bytes = compat_urllib_request.urlopen(request).read()
- webpage = webpage_bytes.decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err))
- return
+ webpage = self._download_webpage(request, video_id)
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
"""Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs
- _VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?:videos?/)?([0-9]+)'
+ _VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)'
IE_NAME = u'vimeo'
def __init__(self, downloader=None):
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
- video_id = mobj.group(1)
+ video_id = mobj.group('id')
+ if not mobj.group('proto'):
+ url = 'https://' + url
+ if mobj.group('direct_link'):
+ url = 'https://vimeo.com/' + video_id
# Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url, None, std_headers)
# Extract title
video_title = config["video"]["title"]
- # Extract uploader
+ # Extract uploader and uploader_id
video_uploader = config["video"]["owner"]["name"]
+ video_uploader_id = config["video"]["owner"]["url"].split('/')[-1]
# Extract video thumbnail
video_thumbnail = config["video"]["thumbnail"]
# Extract video description
- video_description = get_element_by_id("description", webpage)
+ video_description = get_element_by_attribute("itemprop", "description", webpage)
if video_description: video_description = clean_html(video_description)
else: video_description = ''
# Extract upload date
video_upload_date = None
- mobj = re.search(r'<span id="clip-date" style="display:none">[^:]*: (.*?)( \([^\(]*\))?</span>', webpage)
+ mobj = re.search(r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage)
if mobj is not None:
- video_upload_date = mobj.group(1)
+ video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
# Vimeo specific: extract request signature and timestamp
sig = config['request']['signature']
'id': video_id,
'url': video_url,
'uploader': video_uploader,
+ 'uploader_id': video_uploader_id,
'upload_date': video_upload_date,
'title': video_title,
'ext': video_extension,
self._downloader.to_screen(u'[arte.tv] %s: Extracting information' % video_id)
def fetch_webpage(self, url):
- self._downloader.increment_downloads()
request = compat_urllib_request.Request(url)
try:
self.report_download_webpage(url)
opener = compat_urllib_request.OpenerDirector()
for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
HTTPMethodFallback, HEADRedirectHandler,
- compat_urllib_error.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
+ compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
opener.add_handler(handler())
response = opener.open(HeadRequest(url))
if mobj is None:
# Broaden the search a little bit
mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
+ if mobj is None:
+ # Broaden the search a little bit: JWPlayer JS loader
+ mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return
elif n > self._max_youtube_results:
- self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
+ self._downloader.report_warning(u'ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
n = self._max_youtube_results
self._download_n_results(query, n)
return
result_url = self._API_URL % (compat_urllib_parse.quote_plus(query), (50*pagenum)+1)
request = compat_urllib_request.Request(result_url)
try:
- data = compat_urllib_request.urlopen(request).read()
+ data = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err))
return
api_response = json.loads(data)['data']
+ if not 'items' in api_response:
+ self._downloader.trouble(u'[youtube] No video results')
+ return
+
new_ids = list(video['id'] for video in api_response['items'])
video_ids += new_ids
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return
elif n > self._max_google_results:
- self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
+ self._downloader.report_warning(u'gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
n = self._max_google_results
self._download_n_results(query, n)
return
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return
elif n > self._max_yahoo_results:
- self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
+ self._downloader.report_warning(u'yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
n = self._max_yahoo_results
self._download_n_results(query, n)
return
class YoutubePlaylistIE(InfoExtractor):
"""Information Extractor for YouTube playlists."""
- _VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]{10,})(?:/.*?/([0-9A-Za-z_-]+))?.*'
- _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
- _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&([^&"]+&)*list=.*?%s'
- _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}"
+ _VALID_URL = r"""(?:
+ (?:https?://)?
+ (?:\w+\.)?
+ youtube\.com/
+ (?:
+ (?:course|view_play_list|my_playlists|artist|playlist|watch)
+ \? (?:.*?&)*? (?:p|a|list)=
+ | user/.*?/user/
+ | p/
+ | user/.*?#[pg]/c/
+ )
+ ((?:PL|EC|UU)?[0-9A-Za-z-_]{10,})
+ .*
+ |
+ ((?:PL|EC|UU)[0-9A-Za-z-_]{10,})
+ )"""
+ _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/playlists/%s?max-results=%i&start-index=%i&v=2&alt=json'
+ _MAX_RESULTS = 50
IE_NAME = u'youtube:playlist'
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
+ @classmethod
+ def suitable(cls, url):
+ """Receives a URL and returns True if suitable for this IE."""
+ return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
+
def report_download_page(self, playlist_id, pagenum):
"""Report attempt to download playlist page with given number."""
self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
def _real_extract(self, url):
# Extract playlist id
- mobj = re.match(self._VALID_URL, url)
+ mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid url: %s' % url)
return
- # Single video case
- if mobj.group(3) is not None:
- self._downloader.download([mobj.group(3)])
- return
-
- # Download playlist pages
- # prefix is 'p' as default for playlists but there are other types that need extra care
- playlist_prefix = mobj.group(1)
- if playlist_prefix == 'a':
- playlist_access = 'artist'
- else:
- playlist_prefix = 'p'
- playlist_access = 'view_play_list'
- playlist_id = mobj.group(2)
- video_ids = []
- pagenum = 1
+ # Download playlist videos from API
+ playlist_id = mobj.group(1) or mobj.group(2)
+ page_num = 1
+ videos = []
while True:
- self.report_download_page(playlist_id, pagenum)
- url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum)
- request = compat_urllib_request.Request(url)
+ self.report_download_page(playlist_id, page_num)
+
+ url = self._TEMPLATE_URL % (playlist_id, self._MAX_RESULTS, self._MAX_RESULTS * (page_num - 1) + 1)
try:
- page = compat_urllib_request.urlopen(request).read().decode('utf-8')
+ page = compat_urllib_request.urlopen(url).read().decode('utf8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return
- # Extract video identifiers
- ids_in_page = []
- for mobj in re.finditer(self._VIDEO_INDICATOR_TEMPLATE % playlist_id, page):
- if mobj.group(1) not in ids_in_page:
- ids_in_page.append(mobj.group(1))
- video_ids.extend(ids_in_page)
+ try:
+ response = json.loads(page)
+ except ValueError as err:
+ self._downloader.trouble(u'ERROR: Invalid JSON in API response: ' + compat_str(err))
+ return
- if self._MORE_PAGES_INDICATOR not in page:
+ if not 'feed' in response or not 'entry' in response['feed']:
+ self._downloader.trouble(u'ERROR: Got a malformed response from YouTube API')
+ return
+ videos += [ (entry['yt$position']['$t'], entry['content']['src'])
+ for entry in response['feed']['entry']
+ if 'content' in entry ]
+
+ if len(response['feed']['entry']) < self._MAX_RESULTS:
break
- pagenum = pagenum + 1
+ page_num += 1
- total = len(video_ids)
+ videos = [v[1] for v in sorted(videos)]
+ total = len(videos)
playliststart = self._downloader.params.get('playliststart', 1) - 1
playlistend = self._downloader.params.get('playlistend', -1)
if playlistend == -1:
- video_ids = video_ids[playliststart:]
+ videos = videos[playliststart:]
else:
- video_ids = video_ids[playliststart:playlistend]
+ videos = videos[playliststart:playlistend]
- if len(video_ids) == total:
+ if len(videos) == total:
self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total))
else:
- self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(video_ids)))
+ self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(videos)))
- for id in video_ids:
- self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
+ for video in videos:
+ self._downloader.download([video])
return
while True:
self.report_download_page(username, pagenum)
-
- request = compat_urllib_request.Request( page_base + "&page=" + str(pagenum) )
-
+ url = page_base + "&page=" + str(pagenum)
+ request = compat_urllib_request.Request( url )
try:
page = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
"""Information extractor for depositfiles.com"""
_VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)'
- IE_NAME = u'DepositFiles'
-
- def __init__(self, downloader=None):
- InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, file_id):
"""Report webpage download."""
class FacebookIE(InfoExtractor):
"""Information Extractor for Facebook"""
- _WORKING = False
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
_LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
_NETRC_MACHINE = 'facebook'
- _available_formats = ['video', 'highqual', 'lowqual']
- _video_extensions = {
- 'video': 'mp4',
- 'highqual': 'mp4',
- 'lowqual': 'mp4',
- }
IE_NAME = u'facebook'
- def __init__(self, downloader=None):
- InfoExtractor.__init__(self, downloader)
-
- def _reporter(self, message):
- """Add header and report message."""
- self._downloader.to_screen(u'[facebook] %s' % message)
-
def report_login(self):
"""Report attempt to log in."""
- self._reporter(u'Logging in')
-
- def report_video_webpage_download(self, video_id):
- """Report attempt to download video webpage."""
- self._reporter(u'%s: Downloading video webpage' % video_id)
-
- def report_information_extraction(self, video_id):
- """Report attempt to extract video information."""
- self._reporter(u'%s: Extracting video information' % video_id)
-
- def _parse_page(self, video_webpage):
- """Extract video information from page"""
- # General data
- data = {'title': r'\("video_title", "(.*?)"\)',
- 'description': r'<div class="datawrap">(.*?)</div>',
- 'owner': r'\("video_owner_name", "(.*?)"\)',
- 'thumbnail': r'\("thumb_url", "(?P<THUMB>.*?)"\)',
- }
- video_info = {}
- for piece in data.keys():
- mobj = re.search(data[piece], video_webpage)
- if mobj is not None:
- video_info[piece] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape"))
-
- # Video urls
- video_urls = {}
- for fmt in self._available_formats:
- mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage)
- if mobj is not None:
- # URL is in a Javascript segment inside an escaped Unicode format within
- # the generally utf-8 page
- video_urls[fmt] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape"))
- video_info['video_urls'] = video_urls
-
- return video_info
+ self._downloader.to_screen(u'[%s] Logging in' % self.IE_NAME)
def _real_initialize(self):
if self._downloader is None:
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
- self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))
+ self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
return
if useremail is None:
self.report_login()
login_results = compat_urllib_request.urlopen(request).read()
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
- self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
+ self._downloader.report_warning(u'unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
return
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))
+ self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
return
def _real_extract(self, url):
return
video_id = mobj.group('ID')
- # Get video webpage
- self.report_video_webpage_download(video_id)
- request = compat_urllib_request.Request('https://www.facebook.com/video/video.php?v=%s' % video_id)
- try:
- page = compat_urllib_request.urlopen(request)
- video_webpage = page.read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
- return
-
- # Start extracting information
- self.report_information_extraction(video_id)
-
- # Extract information
- video_info = self._parse_page(video_webpage)
-
- # uploader
- if 'owner' not in video_info:
- self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
- return
- video_uploader = video_info['owner']
-
- # title
- if 'title' not in video_info:
- self._downloader.trouble(u'ERROR: unable to extract video title')
- return
- video_title = video_info['title']
- video_title = video_title.decode('utf-8')
-
- # thumbnail image
- if 'thumbnail' not in video_info:
- self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
- video_thumbnail = ''
- else:
- video_thumbnail = video_info['thumbnail']
-
- # upload date
- upload_date = None
- if 'upload_date' in video_info:
- upload_time = video_info['upload_date']
- timetuple = email.utils.parsedate_tz(upload_time)
- if timetuple is not None:
- try:
- upload_date = time.strftime('%Y%m%d', timetuple[0:9])
- except:
- pass
-
- # description
- video_description = video_info.get('description', 'No description available.')
-
- url_map = video_info['video_urls']
- if len(url_map.keys()) > 0:
- # Decide which formats to download
- req_format = self._downloader.params.get('format', None)
- format_limit = self._downloader.params.get('format_limit', None)
-
- if format_limit is not None and format_limit in self._available_formats:
- format_list = self._available_formats[self._available_formats.index(format_limit):]
- else:
- format_list = self._available_formats
- existing_formats = [x for x in format_list if x in url_map]
- if len(existing_formats) == 0:
- self._downloader.trouble(u'ERROR: no known formats available for video')
- return
- if req_format is None:
- video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
- elif req_format == 'worst':
- video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
- elif req_format == '-1':
- video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
- else:
- # Specific format
- if req_format not in url_map:
- self._downloader.trouble(u'ERROR: requested format not available')
- return
- video_url_list = [(req_format, url_map[req_format])] # Specific format
+ url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
+ webpage = self._download_webpage(url, video_id)
+
+ BEFORE = '[["allowFullScreen","true"],["allowScriptAccess","always"],["salign","tl"],["scale","noscale"],["wmode","opaque"]].forEach(function(param) {swf.addParam(param[0], param[1]);});\n'
+ AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
+ m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
+ if not m:
+ raise ExtractorError(u'Cannot parse data')
+ data = dict(json.loads(m.group(1)))
+ params_raw = compat_urllib_parse.unquote(data['params'])
+ params = json.loads(params_raw)
+ video_url = params['hd_src']
+ if not video_url:
+ video_url = params['sd_src']
+ if not video_url:
+ raise ExtractorError(u'Cannot find video URL')
+ video_duration = int(params['video_duration'])
+
+ m = re.search('<h2 class="uiHeaderTitle">([^<]+)</h2>', webpage)
+ if not m:
+ raise ExtractorError(u'Cannot find title in webpage')
+ video_title = unescapeHTML(m.group(1))
- results = []
- for format_param, video_real_url in video_url_list:
- # Extension
- video_extension = self._video_extensions.get(format_param, 'mp4')
+ info = {
+ 'id': video_id,
+ 'title': video_title,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'duration': video_duration,
+ 'thumbnail': params['thumbnail_src'],
+ }
+ return [info]
- results.append({
- 'id': video_id.decode('utf-8'),
- 'url': video_real_url.decode('utf-8'),
- 'uploader': video_uploader.decode('utf-8'),
- 'upload_date': upload_date,
- 'title': video_title,
- 'ext': video_extension.decode('utf-8'),
- 'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
- 'thumbnail': video_thumbnail.decode('utf-8'),
- 'description': video_description.decode('utf-8'),
- })
- return results
class BlipTVIE(InfoExtractor):
"""Information extractor for blip.tv"""
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
+ urlp = compat_urllib_parse_urlparse(url)
+ if urlp.path.startswith('/play/'):
+ request = compat_urllib_request.Request(url)
+ response = compat_urllib_request.urlopen(request)
+ redirecturl = response.geturl()
+ rurlp = compat_urllib_parse_urlparse(redirecturl)
+ file_id = compat_parse_qs(rurlp.fragment)['file'][0].rpartition('/')[2]
+ url = 'http://blip.tv/a/a-' + file_id
+ return self._real_extract(url)
+
+
if '?' in url:
cchar = '&'
else:
cchar = '?'
json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
request = compat_urllib_request.Request(json_url)
+ request.add_header('User-Agent', 'iTunes/10.6.1')
self.report_extraction(mobj.group(1))
info = None
try:
'urlhandle': urlh
}
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
- return
+ raise ExtractorError(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
if info is None: # Regular URL
try:
json_code_bytes = urlh.read()
'format': data['media']['mimeType'],
'thumbnail': data['thumbnailUrl'],
'description': data['description'],
- 'player_url': data['embedUrl']
+ 'player_url': data['embedUrl'],
+ 'user_agent': 'iTunes/10.6.1',
}
except (ValueError,KeyError) as err:
self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
return
- std_headers['User-Agent'] = 'iTunes/10.6.1'
return [info]
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
- def report_download_webpage(self, video_id):
- """Report webpage download."""
- self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id)
-
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id)
video_id = mobj.group(1)
# Get video webpage
- request = compat_urllib_request.Request('http://www.myvideo.de/watch/%s' % video_id)
- try:
- self.report_download_webpage(video_id)
- webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
- return
+ webpage_url = 'http://www.myvideo.de/watch/%s' % video_id
+ webpage = self._download_webpage(webpage_url, video_id)
self.report_extraction(video_id)
- mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />',
+ mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\' />',
webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*)))))
$"""
- IE_NAME = u'comedycentral'
_available_formats = ['3500', '2200', '1700', '1200', '750', '400']
'400': '384x216',
}
- def suitable(self, url):
+ @classmethod
+ def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
- return re.match(self._VALID_URL, url, re.VERBOSE) is not None
+ return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
def report_extraction(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
- def report_config_download(self, episode_id):
- self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id)
+ def report_config_download(self, episode_id, media_id):
+ self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration for %s' % (episode_id, media_id))
def report_index_download(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Downloading show index' % episode_id)
- def report_player_url(self, episode_id):
- self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id)
-
-
def _print_formats(self, formats):
print('Available formats:')
for x in formats:
try:
htmlHandle = compat_urllib_request.urlopen(req)
html = htmlHandle.read()
+ webpage = html.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return
return
epTitle = mobj.group('episode')
- mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', html)
+ mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
if len(mMovieParams) == 0:
# The Colbert Report embeds the information in a without
# a URL prefix; so extract the alternate reference
# and then add the URL prefix manually.
- altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', html)
+ altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
if len(altMovieParams) == 0:
self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url)
return
else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
- playerUrl_raw = mMovieParams[0][0]
- self.report_player_url(epTitle)
- try:
- urlHandle = compat_urllib_request.urlopen(playerUrl_raw)
- playerUrl = urlHandle.geturl()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to find out player URL: ' + compat_str(err))
- return
-
uri = mMovieParams[0][1]
indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
self.report_index_download(epTitle)
idoc = xml.etree.ElementTree.fromstring(indexXml)
itemEls = idoc.findall('.//item')
- for itemEl in itemEls:
+ for partNum,itemEl in enumerate(itemEls):
mediaId = itemEl.findall('./guid')[0].text
shortMediaId = mediaId.split(':')[-1]
showId = mediaId.split(':')[-2].replace('.com', '')
configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
compat_urllib_parse.urlencode({'uri': mediaId}))
configReq = compat_urllib_request.Request(configUrl)
- self.report_config_download(epTitle)
+ self.report_config_download(epTitle, shortMediaId)
try:
configXml = compat_urllib_request.urlopen(configReq).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
return
# For now, just pick the highest bitrate
- format,video_url = turls[-1]
+ format,rtmp_video_url = turls[-1]
# Get the format arg from the arg stream
req_format = self._downloader.params.get('format', None)
# Select format if we can find one
for f,v in turls:
if f == req_format:
- format, video_url = f, v
+ format, rtmp_video_url = f, v
break
- # Patch to download from alternative CDN, which does not
- # break on current RTMPDump builds
- broken_cdn = "rtmpe://viacomccstrmfs.fplive.net/viacomccstrm/gsp.comedystor/"
- better_cdn = "rtmpe://cp10740.edgefcs.net/ondemand/mtvnorigin/gsp.comedystor/"
-
- if video_url.startswith(broken_cdn):
- video_url = video_url.replace(broken_cdn, better_cdn)
+ m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp.comedystor/.*)$', rtmp_video_url)
+ if not m:
+ raise ExtractorError(u'Cannot transform RTMP url')
+ base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
+ video_url = base + m.group('finalid')
- effTitle = showId + u'-' + epTitle
+ effTitle = showId + u'-' + epTitle + u' part ' + compat_str(partNum+1)
info = {
'id': shortMediaId,
'url': video_url,
'format': format,
'thumbnail': None,
'description': officialTitle,
- 'player_url': None #playerUrl
}
-
results.append(info)
return results
'uploader': showName,
'upload_date': None,
'title': showName,
- 'ext': 'flv',
+ 'ext': 'mp4',
'thumbnail': imgUrl,
'description': description,
'player_url': playerUrl,
return [info]
-
class CollegeHumorIE(InfoExtractor):
"""Information extractor for collegehumor.com"""
_VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)'
IE_NAME = u'xvideos'
- def report_webpage(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
-
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
return
video_id = mobj.group(1)
- self.report_webpage(video_id)
-
- request = compat_urllib_request.Request(r'http://www.xvideos.com/video' + video_id)
- try:
- webpage_bytes = compat_urllib_request.urlopen(request).read()
- webpage = webpage_bytes.decode('utf-8', 'replace')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
- return
+ webpage = self._download_webpage(url, video_id)
self.report_extraction(video_id)
stream_json_bytes = compat_urllib_request.urlopen(request).read()
stream_json = stream_json_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
+ self._downloader.trouble(u'ERROR: unable to download stream definitions: %s' % compat_str(err))
return
streams = json.loads(stream_json)
class InfoQIE(InfoExtractor):
"""Information extractor for infoq.com"""
-
_VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$'
- IE_NAME = u'infoq'
-
- def report_webpage(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
- self.report_webpage(url)
-
- request = compat_urllib_request.Request(url)
- try:
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
- return
-
+ webpage = self._download_webpage(url, video_id=url)
self.report_extraction(url)
-
# Extract video URL
mobj = re.search(r"jsclassref='([^']*)'", webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video url')
return
- video_url = 'rtmpe://video.infoq.com/cfx/st/' + compat_urllib_parse.unquote(mobj.group(1).decode('base64'))
-
+ real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8'))
+ video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id
# Extract title
mobj = re.search(r'contentTitle = "(.*?)";', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video title')
return
- video_title = mobj.group(1).decode('utf-8')
+ video_title = mobj.group(1)
# Extract description
video_description = u'No description available.'
mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', webpage)
if mobj is not None:
- video_description = mobj.group(1).decode('utf-8')
+ video_description = mobj.group(1)
video_filename = video_url.split('/')[-1]
video_id, extension = video_filename.split('.')
if file_url is not None:
break # got it!
else:
- if req_format not in formats.keys():
+ if req_format not in formats:
self._downloader.trouble(u'ERROR: format is not available')
return
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
if mobj.group('course') and mobj.group('video'): # A specific video
course = mobj.group('course')
'upload_date': None,
}
- self.report_download_webpage(info['id'])
- try:
- coursepage = compat_urllib_request.urlopen(url).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
- return
+ coursepage = self._download_webpage(url, info['id'],
+ note='Downloading course info page',
+ errnote='Unable to download course info page')
m = re.search('<h1>([^<]+)</h1>', coursepage)
if m:
assert entry['type'] == 'reference'
results += self.extract(entry['url'])
return results
-
else: # Root page
info = {
'id': 'Stanford OpenClassroom',
_VALID_URL = r'^(?P<proto>https?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P<videoid>[0-9]+)/[^/]+$'
IE_NAME = u'mtv'
- def report_webpage(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
-
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
if not mobj.group('proto'):
url = 'http://' + url
video_id = mobj.group('videoid')
- self.report_webpage(video_id)
- request = compat_urllib_request.Request(url)
- try:
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
- return
+ webpage = self._download_webpage(url, video_id)
mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage)
if mobj is None:
class YoukuIE(InfoExtractor):
-
_VALID_URL = r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html'
- IE_NAME = u'Youku'
-
- def __init__(self, downloader=None):
- InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, file_id):
"""Report webpage download."""
- self._downloader.to_screen(u'[Youku] %s: Downloading webpage' % file_id)
+ self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, file_id))
def report_extraction(self, file_id):
"""Report information extraction."""
- self._downloader.to_screen(u'[Youku] %s: Extracting information' % file_id)
+ self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
def _gen_sid(self):
nowTime = int(time.time() * 1000)
seed = config['data'][0]['seed']
format = self._downloader.params.get('format', None)
- supported_format = config['data'][0]['streamfileids'].keys()
+ supported_format = list(config['data'][0]['streamfileids'].keys())
if format is None or format == 'best':
if 'hd2' in supported_format:
class XNXXIE(InfoExtractor):
"""Information extractor for xnxx.com"""
- _VALID_URL = r'^http://video\.xnxx\.com/video([0-9]+)/(.*)'
+ _VALID_URL = r'^(?:https?://)?video\.xnxx\.com/video([0-9]+)/(.*)'
IE_NAME = u'xnxx'
VIDEO_URL_RE = r'flv_url=(.*?)&'
VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM'
_VALID_URL = r'^(?:https?://)?(?:watch\.|www\.)?nba\.com/(?:nba/)?video(/[^?]*)(\?.*)?$'
IE_NAME = u'nba'
- def report_extraction(self, video_id):
- self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
-
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
if video_id.endswith('/index.html'):
video_id = video_id[:-len('/index.html')]
- self.report_extraction(video_id)
- try:
- urlh = compat_urllib_request.urlopen(url)
- webpage_bytes = urlh.read()
- webpage = webpage_bytes.decode('utf-8', 'ignore')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
- return
+ webpage = self._download_webpage(url, video_id)
video_url = u'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4'
def _findProp(rexp, default=None):
return
response = json.loads(webpage)
+ if type(response) != list:
+ error_text = response.get('error', 'unknown error')
+ self._downloader.trouble(u'ERROR: Justin.tv API: %s' % error_text)
+ return
info = []
for clip in response:
video_url = clip['video_file_url']
if video_url:
video_extension = os.path.splitext(video_url)[1][1:]
- video_date = re.sub('-', '', clip['created_on'][:10])
+ video_date = re.sub('-', '', clip['start_time'][:10])
+ video_uploader_id = clip.get('user_id', clip.get('channel_id'))
+ video_id = clip['id']
+ video_title = clip.get('title', video_id)
info.append({
- 'id': clip['id'],
+ 'id': video_id,
'url': video_url,
- 'title': clip['title'],
- 'uploader': clip.get('user_id', clip.get('channel_id')),
+ 'title': video_title,
+ 'uploader': clip.get('channel_name', video_uploader_id),
+ 'uploader_id': video_uploader_id,
'upload_date': video_date,
'ext': video_extension,
})
paged = True
api += '/channel/archives/%s.json'
else:
- api += '/clip/show/%s.json'
+ api += '/broadcast/by_archive/%s.json'
api = api % (video_id,)
self.report_extraction(video_id)
break
offset += limit
return info
+
+class FunnyOrDieIE(InfoExtractor):
+ _VALID_URL = r'^(?:https?://)?(?:www\.)?funnyordie\.com/videos/(?P<id>[0-9a-f]+)/.*$'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+
+ video_id = mobj.group('id')
+ webpage = self._download_webpage(url, video_id)
+
+ m = re.search(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"', webpage, re.DOTALL)
+ if not m:
+ self._downloader.trouble(u'ERROR: unable to find video information')
+ video_url = unescapeHTML(m.group('url'))
+
+ m = re.search(r"class='player_page_h1'>\s+<a.*?>(?P<title>.*?)</a>", webpage)
+ if not m:
+ self._downloader.trouble(u'Cannot find video title')
+ title = unescapeHTML(m.group('title'))
+
+ m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
+ if m:
+ desc = unescapeHTML(m.group('desc'))
+ else:
+ desc = None
+
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'title': title,
+ 'description': desc,
+ }
+ return [info]
+
+class SteamIE(InfoExtractor):
+ _VALID_URL = r"""http://store.steampowered.com/
+ (?P<urltype>video|app)/ #If the page is only for videos or for a game
+ (?P<gameID>\d+)/?
+ (?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID
+ """
+
+ @classmethod
+ def suitable(cls, url):
+ """Receives a URL and returns True if suitable for this IE."""
+ return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
+
+ def _real_extract(self, url):
+ m = re.match(self._VALID_URL, url, re.VERBOSE)
+ urlRE = r"'movie_(?P<videoID>\d+)': \{\s*FILENAME: \"(?P<videoURL>[\w:/\.\?=]+)\"(,\s*MOVIE_NAME: \"(?P<videoName>[\w:/\.\?=\+-]+)\")?\s*\},"
+ gameID = m.group('gameID')
+ videourl = 'http://store.steampowered.com/video/%s/' % gameID
+ webpage = self._download_webpage(videourl, gameID)
+ mweb = re.finditer(urlRE, webpage)
+ namesRE = r'<span class="title">(?P<videoName>.+?)</span>'
+ titles = re.finditer(namesRE, webpage)
+ thumbsRE = r'<img class="movie_thumb" src="(?P<thumbnail>.+?)">'
+ thumbs = re.finditer(thumbsRE, webpage)
+ videos = []
+ for vid,vtitle,thumb in zip(mweb,titles,thumbs):
+ video_id = vid.group('videoID')
+ title = vtitle.group('videoName')
+ video_url = vid.group('videoURL')
+ video_thumb = thumb.group('thumbnail')
+ if not video_url:
+ self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id)
+ info = {
+ 'id':video_id,
+ 'url':video_url,
+ 'ext': 'flv',
+ 'title': unescapeHTML(title),
+ 'thumbnail': video_thumb
+ }
+ videos.append(info)
+ return videos
+
+class UstreamIE(InfoExtractor):
+ _VALID_URL = r'https?://www\.ustream\.tv/recorded/(?P<videoID>\d+)'
+ IE_NAME = u'ustream'
+
+ def _real_extract(self, url):
+ m = re.match(self._VALID_URL, url)
+ video_id = m.group('videoID')
+ video_url = u'http://tcdn.ustream.tv/video/%s' % video_id
+ webpage = self._download_webpage(url, video_id)
+ m = re.search(r'data-title="(?P<title>.+)"',webpage)
+ title = m.group('title')
+ m = re.search(r'<a class="state" data-content-type="channel" data-content-id="(?P<uploader>\d+)"',webpage)
+ uploader = m.group('uploader')
+ info = {
+ 'id':video_id,
+ 'url':video_url,
+ 'ext': 'flv',
+ 'title': title,
+ 'uploader': uploader
+ }
+ return [info]
+
+class WorldStarHipHopIE(InfoExtractor):
+ _VALID_URL = r'http://(?:www|m)\.worldstar(?:candy|hiphop)\.com/videos/video\.php\?v=(?P<id>.*)'
+ IE_NAME = u'WorldStarHipHop'
+
+ def _real_extract(self, url):
+ _src_url = r"""(http://hw-videos.*(?:mp4|flv))"""
+
+ webpage_src = compat_urllib_request.urlopen(url).read()
+ webpage_src = webpage_src.decode('utf-8')
+
+ mobj = re.search(_src_url, webpage_src)
+
+ m = re.match(self._VALID_URL, url)
+ video_id = m.group('id')
+
+ if mobj is not None:
+ video_url = mobj.group()
+ if 'mp4' in video_url:
+ ext = 'mp4'
+ else:
+ ext = 'flv'
+ else:
+ self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id)
+ return
+
+ _title = r"""<title>(.*)</title>"""
+
+ mobj = re.search(_title, webpage_src)
+
+ if mobj is not None:
+ title = mobj.group(1)
+ else:
+ title = 'World Start Hip Hop - %s' % time.ctime()
+
+ _thumbnail = r"""rel="image_src" href="(.*)" />"""
+ mobj = re.search(_thumbnail, webpage_src)
+
+ # Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
+ if mobj is not None:
+ thumbnail = mobj.group(1)
+ else:
+ _title = r"""candytitles.*>(.*)</span>"""
+ mobj = re.search(_title, webpage_src)
+ if mobj is not None:
+ title = mobj.group(1)
+ thumbnail = None
+
+ results = [{
+ 'id': video_id,
+ 'url' : video_url,
+ 'title' : title,
+ 'thumbnail' : thumbnail,
+ 'ext' : ext,
+ }]
+ return results
+
+class RBMARadioIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?rbmaradio\.com/shows/(?P<videoID>[^/]+)$'
+
+ def _real_extract(self, url):
+ m = re.match(self._VALID_URL, url)
+ video_id = m.group('videoID')
+
+ webpage = self._download_webpage(url, video_id)
+ m = re.search(r'<script>window.gon = {.*?};gon\.show=(.+?);</script>', webpage)
+ if not m:
+ raise ExtractorError(u'Cannot find metadata')
+ json_data = m.group(1)
+
+ try:
+ data = json.loads(json_data)
+ except ValueError as e:
+ raise ExtractorError(u'Invalid JSON: ' + str(e))
+
+ video_url = data['akamai_url'] + '&cbr=256'
+ url_parts = compat_urllib_parse_urlparse(video_url)
+ video_ext = url_parts.path.rpartition('.')[2]
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': video_ext,
+ 'title': data['title'],
+ 'description': data.get('teaser_text'),
+ 'location': data.get('country_of_origin'),
+ 'uploader': data.get('host', {}).get('name'),
+ 'uploader_id': data.get('host', {}).get('slug'),
+ 'thumbnail': data.get('image', {}).get('large_url_2x'),
+ 'duration': data.get('duration'),
+ }
+ return [info]
+
+
+class YouPornIE(InfoExtractor):
+ """Information extractor for youporn.com."""
+ _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)'
+
+ def _print_formats(self, formats):
+ """Print all available formats"""
+ print(u'Available formats:')
+ print(u'ext\t\tformat')
+ print(u'---------------------------------')
+ for format in formats:
+ print(u'%s\t\t%s' % (format['ext'], format['format']))
+
+ def _specific(self, req_format, formats):
+ for x in formats:
+ if(x["format"]==req_format):
+ return x
+ return None
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+
+ video_id = mobj.group('videoid')
+
+ req = compat_urllib_request.Request(url)
+ req.add_header('Cookie', 'age_verified=1')
+ webpage = self._download_webpage(req, video_id)
+
+ # Get the video title
+ result = re.search(r'<h1.*?>(?P<title>.*)</h1>', webpage)
+ if result is None:
+ raise ExtractorError(u'Unable to extract video title')
+ video_title = result.group('title').strip()
+
+ # Get the video date
+ result = re.search(r'Date:</label>(?P<date>.*) </li>', webpage)
+ if result is None:
+ self._downloader.report_warning(u'unable to extract video date')
+ upload_date = None
+ else:
+ upload_date = result.group('date').strip()
+
+ # Get the video uploader
+ result = re.search(r'Submitted:</label>(?P<uploader>.*)</li>', webpage)
+ if result is None:
+ self._downloader.report_warning(u'unable to extract uploader')
+ video_uploader = None
+ else:
+ video_uploader = result.group('uploader').strip()
+ video_uploader = clean_html( video_uploader )
+
+ # Get all of the formats available
+ DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
+ result = re.search(DOWNLOAD_LIST_RE, webpage)
+ if result is None:
+ raise ExtractorError(u'Unable to extract download list')
+ download_list_html = result.group('download_list').strip()
+
+ # Get all of the links from the page
+ LINK_RE = r'(?s)<a href="(?P<url>[^"]+)">'
+ links = re.findall(LINK_RE, download_list_html)
+ if(len(links) == 0):
+ raise ExtractorError(u'ERROR: no known formats available for video')
+
+ self._downloader.to_screen(u'[youporn] Links found: %d' % len(links))
+
+ formats = []
+ for link in links:
+
+ # A link looks like this:
+ # http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0
+ # A path looks like this:
+ # /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4
+ video_url = unescapeHTML( link )
+ path = compat_urllib_parse_urlparse( video_url ).path
+ extension = os.path.splitext( path )[1][1:]
+ format = path.split('/')[4].split('_')[:2]
+ size = format[0]
+ bitrate = format[1]
+ format = "-".join( format )
+ title = u'%s-%s-%s' % (video_title, size, bitrate)
+
+ formats.append({
+ 'id': video_id,
+ 'url': video_url,
+ 'uploader': video_uploader,
+ 'upload_date': upload_date,
+ 'title': title,
+ 'ext': extension,
+ 'format': format,
+ 'thumbnail': None,
+ 'description': None,
+ 'player_url': None
+ })
+
+ if self._downloader.params.get('listformats', None):
+ self._print_formats(formats)
+ return
+
+ req_format = self._downloader.params.get('format', None)
+ self._downloader.to_screen(u'[youporn] Format: %s' % req_format)
+
+ if req_format is None or req_format == 'best':
+ return [formats[0]]
+ elif req_format == 'worst':
+ return [formats[-1]]
+ elif req_format in ('-1', 'all'):
+ return formats
+ else:
+ format = self._specific( req_format, formats )
+ if result is None:
+ self._downloader.trouble(u'ERROR: requested format not available')
+ return
+ return [format]
+
+
+
+class PornotubeIE(InfoExtractor):
+ """Information extractor for pornotube.com."""
+ _VALID_URL = r'^(?:https?://)?(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+
+ video_id = mobj.group('videoid')
+ video_title = mobj.group('title')
+
+ # Get webpage content
+ webpage = self._download_webpage(url, video_id)
+
+ # Get the video URL
+ VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
+ result = re.search(VIDEO_URL_RE, webpage)
+ if result is None:
+ self._downloader.trouble(u'ERROR: unable to extract video url')
+ return
+ video_url = compat_urllib_parse.unquote(result.group('url'))
+
+ #Get the uploaded date
+ VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
+ result = re.search(VIDEO_UPLOADED_RE, webpage)
+ if result is None:
+ self._downloader.trouble(u'ERROR: unable to extract video title')
+ return
+ upload_date = result.group('date')
+
+ info = {'id': video_id,
+ 'url': video_url,
+ 'uploader': None,
+ 'upload_date': upload_date,
+ 'title': video_title,
+ 'ext': 'flv',
+ 'format': 'flv'}
+
+ return [info]
+
+class YouJizzIE(InfoExtractor):
+ """Information extractor for youjizz.com."""
+ _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youjizz\.com/videos/(?P<videoid>[^.]+).html$'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+
+ video_id = mobj.group('videoid')
+
+ # Get webpage content
+ webpage = self._download_webpage(url, video_id)
+
+ # Get the video title
+ result = re.search(r'<title>(?P<title>.*)</title>', webpage)
+ if result is None:
+ raise ExtractorError(u'ERROR: unable to extract video title')
+ video_title = result.group('title').strip()
+
+ # Get the embed page
+ result = re.search(r'https?://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)', webpage)
+ if result is None:
+ raise ExtractorError(u'ERROR: unable to extract embed page')
+
+ embed_page_url = result.group(0).strip()
+ video_id = result.group('videoid')
+
+ webpage = self._download_webpage(embed_page_url, video_id)
+
+ # Get the video URL
+ result = re.search(r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);', webpage)
+ if result is None:
+ raise ExtractorError(u'ERROR: unable to extract video url')
+ video_url = result.group('source')
+
+ info = {'id': video_id,
+ 'url': video_url,
+ 'title': video_title,
+ 'ext': 'flv',
+ 'format': 'flv',
+ 'player_url': embed_page_url}
+
+ return [info]
+
+class EightTracksIE(InfoExtractor):
+ IE_NAME = '8tracks'
+ _VALID_URL = r'https?://8tracks.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ raise ExtractorError(u'Invalid URL: %s' % url)
+ playlist_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, playlist_id)
+
+ m = re.search(r"PAGE.mix = (.*?);\n", webpage, flags=re.DOTALL)
+ if not m:
+ raise ExtractorError(u'Cannot find trax information')
+ json_like = m.group(1)
+ data = json.loads(json_like)
+
+ session = str(random.randint(0, 1000000000))
+ mix_id = data['id']
+ track_count = data['tracks_count']
+ first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
+ next_url = first_url
+ res = []
+ for i in itertools.count():
+ api_json = self._download_webpage(next_url, playlist_id,
+ note=u'Downloading song information %s/%s' % (str(i+1), track_count),
+ errnote=u'Failed to download song information')
+ api_data = json.loads(api_json)
+ track_data = api_data[u'set']['track']
+ info = {
+ 'id': track_data['id'],
+ 'url': track_data['track_file_stream_url'],
+ 'title': track_data['performer'] + u' - ' + track_data['name'],
+ 'raw_title': track_data['name'],
+ 'uploader_id': data['user']['login'],
+ 'ext': 'm4a',
+ }
+ res.append(info)
+ if api_data['set']['at_last_track']:
+ break
+ next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])
+ return res
+
+class KeekIE(InfoExtractor):
+ _VALID_URL = r'http://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<videoID>\w+)'
+ IE_NAME = u'keek'
+
+ def _real_extract(self, url):
+ m = re.match(self._VALID_URL, url)
+ video_id = m.group('videoID')
+ video_url = u'http://cdn.keek.com/keek/video/%s' % video_id
+ thumbnail = u'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
+ webpage = self._download_webpage(url, video_id)
+ m = re.search(r'<meta property="og:title" content="(?P<title>.+)"', webpage)
+ title = unescapeHTML(m.group('title'))
+ m = re.search(r'<div class="bio-names-and-report">[\s\n]+<h4>(?P<uploader>\w+)</h4>', webpage)
+ uploader = unescapeHTML(m.group('uploader'))
+ info = {
+ 'id':video_id,
+ 'url':video_url,
+ 'ext': 'mp4',
+ 'title': title,
+ 'thumbnail': thumbnail,
+ 'uploader': uploader
+ }
+ return [info]
+
+class TEDIE(InfoExtractor):
+ _VALID_URL=r'''http://www.ted.com/
+ (
+ ((?P<type_playlist>playlists)/(?P<playlist_id>\d+)) # We have a playlist
+ |
+ ((?P<type_talk>talks)) # We have a simple talk
+ )
+ /(?P<name>\w+) # Here goes the name and then ".html"
+ '''
+
+ @classmethod
+ def suitable(cls, url):
+ """Receives a URL and returns True if suitable for this IE."""
+ return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
+
+ def _real_extract(self, url):
+ m=re.match(self._VALID_URL, url, re.VERBOSE)
+ if m.group('type_talk'):
+ return [self._talk_info(url)]
+ else :
+ playlist_id=m.group('playlist_id')
+ name=m.group('name')
+ self._downloader.to_screen(u'[%s] Getting info of playlist %s: "%s"' % (self.IE_NAME,playlist_id,name))
+ return self._playlist_videos_info(url,name,playlist_id)
+
+ def _talk_video_link(self,mediaSlug):
+ '''Returns the video link for that mediaSlug'''
+ return 'http://download.ted.com/talks/%s.mp4' % mediaSlug
+
+ def _playlist_videos_info(self,url,name,playlist_id=0):
+ '''Returns the videos of the playlist'''
+ video_RE=r'''
+ <li\ id="talk_(\d+)"([.\s]*?)data-id="(?P<video_id>\d+)"
+ ([.\s]*?)data-playlist_item_id="(\d+)"
+ ([.\s]*?)data-mediaslug="(?P<mediaSlug>.+?)"
+ '''
+ video_name_RE=r'<p\ class="talk-title"><a href="(?P<talk_url>/talks/(.+).html)">(?P<fullname>.+?)</a></p>'
+ webpage=self._download_webpage(url, playlist_id, 'Downloading playlist webpage')
+ m_videos=re.finditer(video_RE,webpage,re.VERBOSE)
+ m_names=re.finditer(video_name_RE,webpage)
+ info=[]
+ for m_video, m_name in zip(m_videos,m_names):
+ video_id=m_video.group('video_id')
+ talk_url='http://www.ted.com%s' % m_name.group('talk_url')
+ info.append(self._talk_info(talk_url,video_id))
+ return info
+
+ def _talk_info(self, url, video_id=0):
+ """Return the video for the talk in the url"""
+ m=re.match(self._VALID_URL, url,re.VERBOSE)
+ videoName=m.group('name')
+ webpage=self._download_webpage(url, video_id, 'Downloading \"%s\" page' % videoName)
+ # If the url includes the language we get the title translated
+ title_RE=r'<h1><span id="altHeadline" >(?P<title>.*)</span></h1>'
+ title=re.search(title_RE, webpage).group('title')
+ info_RE=r'''<script\ type="text/javascript">var\ talkDetails\ =(.*?)
+ "id":(?P<videoID>[\d]+).*?
+ "mediaSlug":"(?P<mediaSlug>[\w\d]+?)"'''
+ thumb_RE=r'</span>[\s.]*</div>[\s.]*<img src="(?P<thumbnail>.*?)"'
+ thumb_match=re.search(thumb_RE,webpage)
+ info_match=re.search(info_RE,webpage,re.VERBOSE)
+ video_id=info_match.group('videoID')
+ mediaSlug=info_match.group('mediaSlug')
+ video_url=self._talk_video_link(mediaSlug)
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'title': title,
+ 'thumbnail': thumb_match.group('thumbnail')
+ }
+ return info
+
+class MySpassIE(InfoExtractor):
+ _VALID_URL = r'http://www.myspass.de/.*'
+
+ def _real_extract(self, url):
+ META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s'
+
+ # video id is the last path element of the URL
+ # usually there is a trailing slash, so also try the second but last
+ url_path = compat_urllib_parse_urlparse(url).path
+ url_parent_path, video_id = os.path.split(url_path)
+ if not video_id:
+ _, video_id = os.path.split(url_parent_path)
+
+ # get metadata
+ metadata_url = META_DATA_URL_TEMPLATE % video_id
+ metadata_text = self._download_webpage(metadata_url, video_id)
+ metadata = xml.etree.ElementTree.fromstring(metadata_text.encode('utf-8'))
+
+ # extract values from metadata
+ url_flv_el = metadata.find('url_flv')
+ if url_flv_el is None:
+ self._downloader.trouble(u'ERROR: unable to extract download url')
+ return
+ video_url = url_flv_el.text
+ extension = os.path.splitext(video_url)[1][1:]
+ title_el = metadata.find('title')
+ if title_el is None:
+ self._downloader.trouble(u'ERROR: unable to extract title')
+ return
+ title = title_el.text
+ format_id_el = metadata.find('format_id')
+ if format_id_el is None:
+ format = ext
+ else:
+ format = format_id_el.text
+ description_el = metadata.find('description')
+ if description_el is not None:
+ description = description_el.text
+ else:
+ description = None
+ imagePreview_el = metadata.find('imagePreview')
+ if imagePreview_el is not None:
+ thumbnail = imagePreview_el.text
+ else:
+ thumbnail = None
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'title': title,
+ 'ext': extension,
+ 'format': format,
+ 'thumbnail': thumbnail,
+ 'description': description
+ }
+ return [info]
+
+def gen_extractors():
+ """ Return a list of an instance of every supported extractor.
+ The order does matter; the first extractor matched is the one handling the URL.
+ """
+ return [
+ YoutubePlaylistIE(),
+ YoutubeChannelIE(),
+ YoutubeUserIE(),
+ YoutubeSearchIE(),
+ YoutubeIE(),
+ MetacafeIE(),
+ DailymotionIE(),
+ GoogleSearchIE(),
+ PhotobucketIE(),
+ YahooIE(),
+ YahooSearchIE(),
+ DepositFilesIE(),
+ FacebookIE(),
+ BlipTVUserIE(),
+ BlipTVIE(),
+ VimeoIE(),
+ MyVideoIE(),
+ ComedyCentralIE(),
+ EscapistIE(),
+ CollegeHumorIE(),
+ XVideosIE(),
+ SoundcloudIE(),
+ InfoQIE(),
+ MixcloudIE(),
+ StanfordOpenClassroomIE(),
+ MTVIE(),
+ YoukuIE(),
+ XNXXIE(),
+ YouJizzIE(),
+ PornotubeIE(),
+ YouPornIE(),
+ GooglePlusIE(),
+ ArteTvIE(),
+ NBAIE(),
+ WorldStarHipHopIE(),
+ JustinTVIE(),
+ FunnyOrDieIE(),
+ SteamIE(),
+ UstreamIE(),
+ RBMARadioIE(),
+ EightTracksIE(),
+ KeekIE(),
+ TEDIE(),
+ MySpassIE(),
+ GenericIE()
+ ]
+
+