X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;ds=sidebyside;f=youtube_dl%2FInfoExtractors.py;h=139173016a9f7139e9e3caae11dc563cf066f499;hb=2f58b12dad1b5e19f2daf338cdba958be0b0a87c;hp=8e164760b5d4c5ec6661a1dcd638263faea96c50;hpb=95506f1235e835c4a86ec903154ce697dae6975a;p=youtube-dl
diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index 8e164760b..139173016 100755
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -114,8 +114,9 @@ class InfoExtractor(object):
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None):
""" Returns the response handle """
if note is None:
- note = u'Downloading video webpage'
- self._downloader.to_screen(u'[%s] %s: %s' % (self.IE_NAME, video_id, note))
+ self.report_download_webpage(video_id)
+ elif note is not False:
+ self.to_screen(u'%s: %s' % (video_id, note))
try:
return compat_urllib_request.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
@@ -123,8 +124,8 @@ class InfoExtractor(object):
errnote = u'Unable to download webpage'
raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2])
- def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
- """ Returns the data of the page as a string """
+ def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None):
+ """ Returns a tuple (page content as string, URL handle) """
urlh = self._request_webpage(url_or_request, video_id, note, errnote)
content_type = urlh.headers.get('Content-Type', '')
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
@@ -133,7 +134,59 @@ class InfoExtractor(object):
else:
encoding = 'utf-8'
webpage_bytes = urlh.read()
- return webpage_bytes.decode(encoding, 'replace')
+ if self._downloader.params.get('dump_intermediate_pages', False):
+ try:
+ url = url_or_request.get_full_url()
+ except AttributeError:
+ url = url_or_request
+ self.to_screen(u'Dumping request to ' + url)
+ dump = base64.b64encode(webpage_bytes).decode('ascii')
+ self._downloader.to_screen(dump)
+ content = webpage_bytes.decode(encoding, 'replace')
+ return (content, urlh)
+
+ def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
+ """ Returns the data of the page as a string """
+ return self._download_webpage_handle(url_or_request, video_id, note, errnote)[0]
+
+ def to_screen(self, msg):
+ """Print msg to screen, prefixing it with '[ie_name]'"""
+ self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
+
+ def report_extraction(self, id_or_name):
+ """Report information extraction."""
+ self.to_screen(u'%s: Extracting information' % id_or_name)
+
+ def report_download_webpage(self, video_id):
+ """Report webpage download."""
+ self.to_screen(u'%s: Downloading webpage' % video_id)
+
+ def report_age_confirmation(self):
+ """Report attempt to confirm age."""
+ self.to_screen(u'Confirming age')
+
+ #Methods for following #608
+ #They set the correct value of the '_type' key
+ def video_result(self, video_info):
+ """Returns a video"""
+ video_info['_type'] = 'video'
+ return video_info
+ def url_result(self, url, ie=None):
+ """Returns a url that points to a page that should be processed"""
+ #TODO: ie should be the class used for getting the info
+ video_info = {'_type': 'url',
+ 'url': url,
+ 'ie_key': ie}
+ return video_info
+ def playlist_result(self, entries, playlist_id=None, playlist_title=None):
+ """Returns a playlist"""
+ video_info = {'_type': 'playlist',
+ 'entries': entries}
+ if playlist_id:
+ video_info['id'] = playlist_id
+ if playlist_title:
+ video_info['title'] = playlist_title
+ return video_info
class YoutubeIE(InfoExtractor):
@@ -158,7 +211,7 @@ class YoutubeIE(InfoExtractor):
([0-9A-Za-z_-]+) # here is it! the YouTube video ID
(?(1).+)? # if we found the ID, everything can follow
$"""
- _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
+ _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
@@ -204,48 +257,44 @@ class YoutubeIE(InfoExtractor):
def report_lang(self):
"""Report attempt to set language."""
- self._downloader.to_screen(u'[youtube] Setting language')
+ self.to_screen(u'Setting language')
def report_login(self):
"""Report attempt to log in."""
- self._downloader.to_screen(u'[youtube] Logging in')
-
- def report_age_confirmation(self):
- """Report attempt to confirm age."""
- self._downloader.to_screen(u'[youtube] Confirming age')
+ self.to_screen(u'Logging in')
def report_video_webpage_download(self, video_id):
"""Report attempt to download video webpage."""
- self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id)
+ self.to_screen(u'%s: Downloading video webpage' % video_id)
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
- self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id)
+ self.to_screen(u'%s: Downloading video info webpage' % video_id)
def report_video_subtitles_download(self, video_id):
"""Report attempt to download video info webpage."""
- self._downloader.to_screen(u'[youtube] %s: Checking available subtitles' % video_id)
+ self.to_screen(u'%s: Checking available subtitles' % video_id)
def report_video_subtitles_request(self, video_id, sub_lang, format):
"""Report attempt to download video info webpage."""
- self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles for %s.%s' % (video_id, sub_lang, format))
+ self.to_screen(u'%s: Downloading video subtitles for %s.%s' % (video_id, sub_lang, format))
def report_video_subtitles_available(self, video_id, sub_lang_list):
"""Report available subtitles."""
sub_lang = ",".join(list(sub_lang_list.keys()))
- self._downloader.to_screen(u'[youtube] %s: Available subtitles for video: %s' % (video_id, sub_lang))
+ self.to_screen(u'%s: Available subtitles for video: %s' % (video_id, sub_lang))
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
- self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id)
+ self.to_screen(u'%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
- self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format))
+ self.to_screen(u'%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
- self._downloader.to_screen(u'[youtube] RTMP download detected')
+ self.to_screen(u'RTMP download detected')
def _get_available_subtitles(self, video_id):
self.report_video_subtitles_download(video_id)
@@ -253,11 +302,11 @@ class YoutubeIE(InfoExtractor):
try:
sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
+ return (u'unable to download video subtitles: %s' % compat_str(err), None)
sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list)
if not sub_lang_list:
- return (u'WARNING: video doesn\'t have subtitles', None)
+ return (u'video doesn\'t have subtitles', None)
return sub_lang_list
def _list_available_subtitles(self, video_id):
@@ -265,6 +314,10 @@ class YoutubeIE(InfoExtractor):
self.report_video_subtitles_available(video_id, sub_lang_list)
def _request_subtitle(self, sub_lang, sub_name, video_id, format):
+ """
+ Return tuple:
+ (error_message, sub_lang, sub)
+ """
self.report_video_subtitles_request(video_id, sub_lang, format)
params = compat_urllib_parse.urlencode({
'lang': sub_lang,
@@ -276,14 +329,20 @@ class YoutubeIE(InfoExtractor):
try:
sub = compat_urllib_request.urlopen(url).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
+ return (u'unable to download video subtitles: %s' % compat_str(err), None, None)
if not sub:
- return (u'WARNING: Did not fetch video subtitles', None)
+ return (u'Did not fetch video subtitles', None, None)
return (None, sub_lang, sub)
def _extract_subtitle(self, video_id):
+ """
+ Return a list with a tuple:
+ [(error_message, sub_lang, sub)]
+ """
sub_lang_list = self._get_available_subtitles(video_id)
sub_format = self._downloader.params.get('subtitlesformat')
+ if isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
+ return [(sub_lang_list[0], None, None)]
if self._downloader.params.get('subtitleslang', False):
sub_lang = self._downloader.params.get('subtitleslang')
elif 'en' in sub_lang_list:
@@ -291,7 +350,7 @@ class YoutubeIE(InfoExtractor):
else:
sub_lang = list(sub_lang_list.keys())[0]
if not sub_lang in sub_lang_list:
- return (u'WARNING: no closed captions found in the specified language "%s"' % sub_lang, None)
+ return [(u'no closed captions found in the specified language "%s"' % sub_lang, None, None)]
subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
return [subtitle]
@@ -299,6 +358,8 @@ class YoutubeIE(InfoExtractor):
def _extract_all_subtitles(self, video_id):
sub_lang_list = self._get_available_subtitles(video_id)
sub_format = self._downloader.params.get('subtitlesformat')
+ if isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
+ return [(sub_lang_list[0], None, None)]
subtitles = []
for sub_lang in sub_lang_list:
subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
@@ -366,7 +427,7 @@ class YoutubeIE(InfoExtractor):
# Log in
login_form_strs = {
- u'continue': u'http://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
+ u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
u'Email': username,
u'GALX': galx,
u'Passwd': password,
@@ -411,14 +472,12 @@ class YoutubeIE(InfoExtractor):
self.report_age_confirmation()
age_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err))
def _extract_id(self, url):
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
@@ -426,18 +485,17 @@ class YoutubeIE(InfoExtractor):
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
- url = 'http://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
+ url = 'https://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
video_id = self._extract_id(url)
# Get video webpage
self.report_video_webpage_download(video_id)
- url = 'http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
+ url = 'https://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
request = compat_urllib_request.Request(url)
try:
video_webpage_bytes = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to download video webpage: %s' % compat_str(err))
video_webpage = video_webpage_bytes.decode('utf-8', 'ignore')
@@ -451,37 +509,30 @@ class YoutubeIE(InfoExtractor):
# Get video info
self.report_video_info_webpage_download(video_id)
for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
- video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
+ video_info_url = ('https://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
% (video_id, el_type))
- request = compat_urllib_request.Request(video_info_url)
- try:
- video_info_webpage_bytes = compat_urllib_request.urlopen(request).read()
- video_info_webpage = video_info_webpage_bytes.decode('utf-8', 'ignore')
- video_info = compat_parse_qs(video_info_webpage)
- if 'token' in video_info:
- break
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download video info webpage: %s' % compat_str(err))
- return
+ video_info_webpage = self._download_webpage(video_info_url, video_id,
+ note=False,
+ errnote='unable to download video info webpage')
+ video_info = compat_parse_qs(video_info_webpage)
+ if 'token' in video_info:
+ break
if 'token' not in video_info:
if 'reason' in video_info:
- self._downloader.report_error(u'YouTube said: %s' % video_info['reason'][0])
+ raise ExtractorError(u'YouTube said: %s' % video_info['reason'][0])
else:
- self._downloader.report_error(u'"token" parameter not in video info for unknown reason')
- return
+ raise ExtractorError(u'"token" parameter not in video info for unknown reason')
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
- self._downloader.report_error(u'"rental" videos not supported')
- return
+ raise ExtractorError(u'"rental" videos not supported')
# Start extracting information
self.report_information_extraction(video_id)
# uploader
if 'author' not in video_info:
- self._downloader.report_error(u'unable to extract uploader name')
- return
+ raise ExtractorError(u'Unable to extract uploader name')
video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
# uploader_id
@@ -494,8 +545,7 @@ class YoutubeIE(InfoExtractor):
# title
if 'title' not in video_info:
- self._downloader.report_error(u'unable to extract video title')
- return
+ raise ExtractorError(u'Unable to extract video title')
video_title = compat_urllib_parse.unquote_plus(video_info['title'][0])
# thumbnail image
@@ -510,19 +560,18 @@ class YoutubeIE(InfoExtractor):
mobj = re.search(r'id="eow-date.*?>(.*?)', video_webpage, re.DOTALL)
if mobj is not None:
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
- format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y']
- for expression in format_expressions:
- try:
- upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d')
- except:
- pass
+ upload_date = unified_strdate(upload_date)
# description
video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
video_description = clean_html(video_description)
else:
- video_description = ''
+ fd_mobj = re.search(r'= 1:
- url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')
- url_data = [compat_parse_qs(uds) for uds in url_data_strs]
- url_data = [ud for ud in url_data if 'itag' in ud and 'url' in ud]
- url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data)
+ url_map = {}
+ for url_data_str in video_info['url_encoded_fmt_stream_map'][0].split(','):
+ url_data = compat_parse_qs(url_data_str)
+ if 'itag' in url_data and 'url' in url_data:
+ url = url_data['url'][0] + '&signature=' + url_data['sig'][0]
+ if not 'ratebypass' in url: url += '&ratebypass=yes'
+ url_map[url_data['itag'][0]] = url
format_limit = self._downloader.params.get('format_limit', None)
available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
@@ -574,8 +626,7 @@ class YoutubeIE(InfoExtractor):
format_list = available_formats
existing_formats = [x for x in format_list if x in url_map]
if len(existing_formats) == 0:
- self._downloader.report_error(u'no known formats available for video')
- return
+ raise ExtractorError(u'no known formats available for video')
if self._downloader.params.get('listformats', None):
self._print_formats(existing_formats)
return
@@ -595,11 +646,9 @@ class YoutubeIE(InfoExtractor):
video_url_list = [(rf, url_map[rf])]
break
if video_url_list is None:
- self._downloader.report_error(u'requested format not available')
- return
+ raise ExtractorError(u'requested format not available')
else:
- self._downloader.report_error(u'no conn or url_encoded_fmt_stream_map information found in video info')
- return
+ raise ExtractorError(u'no conn or url_encoded_fmt_stream_map information found in video info')
results = []
for format_param, video_real_url in video_url_list:
@@ -635,24 +684,9 @@ class MetacafeIE(InfoExtractor):
_FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
IE_NAME = u'metacafe'
- def __init__(self, downloader=None):
- InfoExtractor.__init__(self, downloader)
-
def report_disclaimer(self):
"""Report disclaimer retrieval."""
- self._downloader.to_screen(u'[metacafe] Retrieving disclaimer')
-
- def report_age_confirmation(self):
- """Report attempt to confirm age."""
- self._downloader.to_screen(u'[metacafe] Confirming age')
-
- def report_download_webpage(self, video_id):
- """Report webpage download."""
- self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id)
-
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id)
+ self.to_screen(u'Retrieving disclaimer')
def _real_initialize(self):
# Retrieve disclaimer
@@ -661,8 +695,7 @@ class MetacafeIE(InfoExtractor):
self.report_disclaimer()
disclaimer = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to retrieve disclaimer: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to retrieve disclaimer: %s' % compat_str(err))
# Confirm age
disclaimer_form = {
@@ -674,32 +707,23 @@ class MetacafeIE(InfoExtractor):
self.report_age_confirmation()
disclaimer = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err))
def _real_extract(self, url):
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group(1)
# Check if video comes from YouTube
mobj2 = re.match(r'^yt-(.*)$', video_id)
if mobj2 is not None:
- self._downloader.download(['http://www.youtube.com/watch?v=%s' % mobj2.group(1)])
- return
+ return [self.url_result('http://www.youtube.com/watch?v=%s' % mobj2.group(1), 'Youtube')]
# Retrieve video webpage to extract further information
- request = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id)
- try:
- self.report_download_webpage(video_id)
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable retrieve video webpage: %s' % compat_str(err))
- return
+ webpage = self._download_webpage('http://www.metacafe.com/watch/%s/' % video_id, video_id)
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
@@ -718,30 +742,25 @@ class MetacafeIE(InfoExtractor):
else:
mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract media URL')
- return
+ raise ExtractorError(u'Unable to extract media URL')
vardict = compat_parse_qs(mobj.group(1))
if 'mediaData' not in vardict:
- self._downloader.report_error(u'unable to extract media URL')
- return
- mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
+ raise ExtractorError(u'Unable to extract media URL')
+ mobj = re.search(r'"mediaURL":"(?Phttp.*?)",(.*?)"key":"(?P.*?)"', vardict['mediaData'][0])
if mobj is None:
- self._downloader.report_error(u'unable to extract media URL')
- return
- mediaURL = mobj.group(1).replace('\\/', '/')
+ raise ExtractorError(u'Unable to extract media URL')
+ mediaURL = mobj.group('mediaURL').replace('\\/', '/')
video_extension = mediaURL[-3:]
- video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2))
+ video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key'))
mobj = re.search(r'(?im)(.*) - Video', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract title')
- return
+ raise ExtractorError(u'Unable to extract title')
video_title = mobj.group(1).decode('utf-8')
mobj = re.search(r'submitter=(.*?);', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract uploader nickname')
- return
+ raise ExtractorError(u'Unable to extract uploader nickname')
video_uploader = mobj.group(1)
return [{
@@ -753,27 +772,17 @@ class MetacafeIE(InfoExtractor):
'ext': video_extension.decode('utf-8'),
}]
-
class DailymotionIE(InfoExtractor):
"""Information Extractor for Dailymotion"""
_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
IE_NAME = u'dailymotion'
- _WORKING = False
-
- def __init__(self, downloader=None):
- InfoExtractor.__init__(self, downloader)
-
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id)
def _real_extract(self, url):
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group(1).split('_')[0].split('?')[0]
@@ -788,23 +797,20 @@ class DailymotionIE(InfoExtractor):
self.report_extraction(video_id)
mobj = re.search(r'\s*var flashvars = (.*)', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract media URL')
- return
+ raise ExtractorError(u'Unable to extract media URL')
flashvars = compat_urllib_parse.unquote(mobj.group(1))
for key in ['hd1080URL', 'hd720URL', 'hqURL', 'sdURL', 'ldURL', 'video_url']:
if key in flashvars:
max_quality = key
- self._downloader.to_screen(u'[dailymotion] Using %s' % key)
+ self.to_screen(u'Using %s' % key)
break
else:
- self._downloader.report_error(u'unable to extract video URL')
- return
+ raise ExtractorError(u'Unable to extract video URL')
mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
if mobj is None:
- self._downloader.report_error(u'unable to extract video URL')
- return
+ raise ExtractorError(u'Unable to extract video URL')
video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/')
@@ -812,8 +818,7 @@ class DailymotionIE(InfoExtractor):
mobj = re.search(r'', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract title')
- return
+ raise ExtractorError(u'Unable to extract title')
video_title = unescapeHTML(mobj.group('title'))
video_uploader = None
@@ -846,54 +851,52 @@ class DailymotionIE(InfoExtractor):
class PhotobucketIE(InfoExtractor):
"""Information extractor for photobucket.com."""
- _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
+ # TODO: the original _VALID_URL was:
+ # r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
+ # Check if it's necessary to keep the old extracion process
+ _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*(([\?\&]current=)|_)(?P.*)\.(?P(flv)|(mp4))'
IE_NAME = u'photobucket'
- def __init__(self, downloader=None):
- InfoExtractor.__init__(self, downloader)
-
- def report_download_webpage(self, video_id):
- """Report webpage download."""
- self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id)
-
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id)
-
def _real_extract(self, url):
# Extract id from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'Invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
- video_id = mobj.group(1)
+ video_id = mobj.group('id')
- video_extension = 'flv'
+ video_extension = mobj.group('ext')
# Retrieve video webpage to extract further information
- request = compat_urllib_request.Request(url)
- try:
- self.report_download_webpage(video_id)
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
- return
+ webpage = self._download_webpage(url, video_id)
# Extract URL, uploader, and title from webpage
self.report_extraction(video_id)
+ # We try first by looking the javascript code:
+ mobj = re.search(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (?P.*?)\);', webpage)
+ if mobj is not None:
+ info = json.loads(mobj.group('json'))
+ return [{
+ 'id': video_id,
+ 'url': info[u'downloadUrl'],
+ 'uploader': info[u'username'],
+ 'upload_date': datetime.date.fromtimestamp(info[u'creationDate']).strftime('%Y%m%d'),
+ 'title': info[u'title'],
+ 'ext': video_extension,
+ 'thumbnail': info[u'thumbUrl'],
+ }]
+
+ # We try looking in other parts of the webpage
mobj = re.search(r'', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract media URL')
- return
+ raise ExtractorError(u'Unable to extract media URL')
mediaURL = compat_urllib_parse.unquote(mobj.group(1))
video_url = mediaURL
mobj = re.search(r'(.*) video by (.*) - Photobucket', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract title')
- return
+ raise ExtractorError(u'Unable to extract title')
video_title = mobj.group(1).decode('utf-8')
video_uploader = mobj.group(2).decode('utf-8')
@@ -909,147 +912,72 @@ class PhotobucketIE(InfoExtractor):
class YahooIE(InfoExtractor):
- """Information extractor for video.yahoo.com."""
-
- _WORKING = False
- # _VALID_URL matches all Yahoo! Video URLs
- # _VPAGE_URL matches only the extractable '/watch/' URLs
- _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?'
- _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?'
- IE_NAME = u'video.yahoo'
-
- def __init__(self, downloader=None):
- InfoExtractor.__init__(self, downloader)
+ """Information extractor for screen.yahoo.com."""
+ _VALID_URL = r'http://screen\.yahoo\.com/.*?-(?P\d*?)\.html'
- def report_download_webpage(self, video_id):
- """Report webpage download."""
- self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id)
-
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id)
-
- def _real_extract(self, url, new_video=True):
- # Extract ID from URL
+ def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'Invalid URL: %s' % url)
- return
-
- video_id = mobj.group(2)
- video_extension = 'flv'
-
- # Rewrite valid but non-extractable URLs as
- # extractable English language /watch/ URLs
- if re.match(self._VPAGE_URL, url) is None:
- request = compat_urllib_request.Request(url)
- try:
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
- return
-
- mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
- if mobj is None:
- self._downloader.report_error(u'Unable to extract id field')
- return
- yahoo_id = mobj.group(1)
-
- mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
- if mobj is None:
- self._downloader.report_error(u'Unable to extract vid field')
- return
- yahoo_vid = mobj.group(1)
-
- url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id)
- return self._real_extract(url, new_video=False)
-
- # Retrieve video webpage to extract further information
- request = compat_urllib_request.Request(url)
- try:
- self.report_download_webpage(video_id)
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
- return
-
- # Extract uploader and title from webpage
- self.report_extraction(video_id)
- mobj = re.search(r'', webpage)
- if mobj is None:
- self._downloader.report_error(u'unable to extract video title')
- return
- video_title = mobj.group(1).decode('utf-8')
-
- mobj = re.search(r'', webpage)
- if mobj is None:
- self._downloader.report_error(u'unable to extract video uploader')
- return
- video_uploader = mobj.group(1).decode('utf-8')
-
- # Extract video thumbnail
- mobj = re.search(r'', webpage)
- if mobj is None:
- self._downloader.report_error(u'unable to extract video thumbnail')
- return
- video_thumbnail = mobj.group(1).decode('utf-8')
-
- # Extract video description
- mobj = re.search(r'', webpage)
- if mobj is None:
- self._downloader.report_error(u'unable to extract video description')
- return
- video_description = mobj.group(1).decode('utf-8')
- if not video_description:
- video_description = 'No description available.'
-
- # Extract video height and width
- mobj = re.search(r'', webpage)
- if mobj is None:
- self._downloader.report_error(u'unable to extract video height')
- return
- yv_video_height = mobj.group(1)
-
- mobj = re.search(r'', webpage)
- if mobj is None:
- self._downloader.report_error(u'unable to extract video width')
- return
- yv_video_width = mobj.group(1)
-
- # Retrieve video playlist to extract media URL
- # I'm not completely sure what all these options are, but we
- # seem to need most of them, otherwise the server sends a 401.
- yv_lg = 'R0xx6idZnW2zlrKP8xxAIR' # not sure what this represents
- yv_bitrate = '700' # according to Wikipedia this is hard-coded
- request = compat_urllib_request.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
- '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
- '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
- try:
- self.report_download_webpage(video_id)
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
- return
-
- # Extract media URL from playlist XML
- mobj = re.search(r'.+?)";', webpage)
+
+ if m_id is None:
+ # TODO: Check which url parameters are required
+ info_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;lmsoverride=1;outputformat=mrss;cb=974419660;id=%s;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id
+ webpage = self._download_webpage(info_url, video_id, u'Downloading info webpage')
+ info_re = r'''.*?)\]\]>.*
+ .*?)\]\]>.*
+ .*?)\ .*\]\]>.*
+ https?://)?(?:(?:www|player)\.)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?Pplay_redirect_hls\?clip_id=)?(?:videos?/)?(?P[0-9]+)'
IE_NAME = u'vimeo'
- def __init__(self, downloader=None):
- InfoExtractor.__init__(self, downloader)
-
- def report_download_webpage(self, video_id):
- """Report webpage download."""
- self._downloader.to_screen(u'[vimeo] %s: Downloading webpage' % video_id)
-
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[vimeo] %s: Extracting information' % video_id)
-
def _real_extract(self, url, new_video=True):
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'Invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group('id')
if not mobj.group('proto'):
@@ -1084,13 +1000,7 @@ class VimeoIE(InfoExtractor):
# Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url, None, std_headers)
- try:
- self.report_download_webpage(video_id)
- webpage_bytes = compat_urllib_request.urlopen(request).read()
- webpage = webpage_bytes.decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
- return
+ webpage = self._download_webpage(request, video_id)
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
@@ -1102,8 +1012,10 @@ class VimeoIE(InfoExtractor):
config = webpage.split(' = {config:')[1].split(',assets:')[0]
config = json.loads(config)
except:
- self._downloader.report_error(u'unable to extract info section')
- return
+ if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
+ raise ExtractorError(u'The author has restricted the access to this video, try with the "--referer" option')
+ else:
+ raise ExtractorError(u'Unable to extract info section')
# Extract title
video_title = config["video"]["title"]
@@ -1118,7 +1030,7 @@ class VimeoIE(InfoExtractor):
# Extract video description
video_description = get_element_by_attribute("itemprop", "description", webpage)
if video_description: video_description = clean_html(video_description)
- else: video_description = ''
+ else: video_description = u''
# Extract upload date
video_upload_date = None
@@ -1149,11 +1061,10 @@ class VimeoIE(InfoExtractor):
video_quality = files[quality][0][2]
video_codec = files[quality][0][0]
video_extension = files[quality][0][1]
- self._downloader.to_screen(u'[vimeo] %s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
+ self.to_screen(u'%s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
break
else:
- self._downloader.report_error(u'no known codec found')
- return
+ raise ExtractorError(u'No known codec found')
video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
%(video_id, sig, timestamp, video_quality, video_codec.upper())
@@ -1179,28 +1090,15 @@ class ArteTvIE(InfoExtractor):
IE_NAME = u'arte.tv'
- def __init__(self, downloader=None):
- InfoExtractor.__init__(self, downloader)
-
- def report_download_webpage(self, video_id):
- """Report webpage download."""
- self._downloader.to_screen(u'[arte.tv] %s: Downloading webpage' % video_id)
-
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[arte.tv] %s: Extracting information' % video_id)
-
def fetch_webpage(self, url):
request = compat_urllib_request.Request(url)
try:
self.report_download_webpage(url)
webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to retrieve video webpage: %s' % compat_str(err))
except ValueError as err:
- self._downloader.report_error(u'Invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
return webpage
def grep_webpage(self, url, regex, regexFlags, matchTuples):
@@ -1209,13 +1107,11 @@ class ArteTvIE(InfoExtractor):
info = {}
if mobj is None:
- self._downloader.report_error(u'Invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
for (i, key, err) in matchTuples:
if mobj.group(i) is None:
- self._downloader.trouble(err)
- return
+ raise ExtractorError(err)
else:
info[key] = mobj.group(i)
@@ -1228,7 +1124,7 @@ class ArteTvIE(InfoExtractor):
r'src="(.*?/videothek_js.*?\.js)',
0,
[
- (1, 'url', u'ERROR: Invalid URL: %s' % url)
+ (1, 'url', u'Invalid URL: %s' % url)
]
)
http_host = url.split('/')[2]
@@ -1240,9 +1136,9 @@ class ArteTvIE(InfoExtractor):
'(rtmp://.*?)\'',
re.DOTALL,
[
- (1, 'path', u'ERROR: could not extract video path: %s' % url),
- (2, 'player', u'ERROR: could not extract video player: %s' % url),
- (3, 'url', u'ERROR: could not extract video url: %s' % url)
+ (1, 'path', u'could not extract video path: %s' % url),
+ (2, 'player', u'could not extract video player: %s' % url),
+ (3, 'url', u'could not extract video url: %s' % url)
]
)
video_url = u'%s/%s' % (info.get('url'), info.get('path'))
@@ -1254,7 +1150,7 @@ class ArteTvIE(InfoExtractor):
r'param name="movie".*?videorefFileUrl=(http[^\'"&]*)',
0,
[
- (1, 'url', u'ERROR: Invalid URL: %s' % url)
+ (1, 'url', u'Invalid URL: %s' % url)
]
)
next_url = compat_urllib_parse.unquote(info.get('url'))
@@ -1263,7 +1159,7 @@ class ArteTvIE(InfoExtractor):
r'