self.report_age_confirmation()
age_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err))
def _extract_id(self, url):
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
try:
video_webpage_bytes = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to download video webpage: %s' % compat_str(err))
video_webpage = video_webpage_bytes.decode('utf-8', 'ignore')
break
if 'token' not in video_info:
if 'reason' in video_info:
- self._downloader.report_error(u'YouTube said: %s' % video_info['reason'][0])
+ raise ExtractorError(u'YouTube said: %s' % video_info['reason'][0])
else:
- self._downloader.report_error(u'"token" parameter not in video info for unknown reason')
- return
+ raise ExtractorError(u'"token" parameter not in video info for unknown reason')
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
- self._downloader.report_error(u'"rental" videos not supported')
- return
+ raise ExtractorError(u'"rental" videos not supported')
# Start extracting information
self.report_information_extraction(video_id)
# uploader
if 'author' not in video_info:
- self._downloader.report_error(u'unable to extract uploader name')
- return
+ raise ExtractorError(u'Unable to extract uploader name')
video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
# uploader_id
# title
if 'title' not in video_info:
- self._downloader.report_error(u'unable to extract video title')
- return
+ raise ExtractorError(u'Unable to extract video title')
video_title = compat_urllib_parse.unquote_plus(video_info['title'][0])
# thumbnail image
self.report_rtmp_download()
video_url_list = [(None, video_info['conn'][0])]
elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
- url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')
- url_data = [compat_parse_qs(uds) for uds in url_data_strs]
- url_data = [ud for ud in url_data if 'itag' in ud and 'url' in ud]
- url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data)
+ url_map = {}
+ for url_data_str in video_info['url_encoded_fmt_stream_map'][0].split(','):
+ url_data = compat_parse_qs(url_data_str)
+ if 'itag' in url_data and 'url' in url_data:
+ url = url_data['url'][0] + '&signature=' + url_data['sig'][0]
+ if not 'ratebypass' in url: url += '&ratebypass=yes'
+ url_map[url_data['itag'][0]] = url
format_limit = self._downloader.params.get('format_limit', None)
available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
self.report_disclaimer()
disclaimer = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to retrieve disclaimer: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to retrieve disclaimer: %s' % compat_str(err))
# Confirm age
disclaimer_form = {
self.report_age_confirmation()
disclaimer = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err))
def _real_extract(self, url):
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group(1)
else:
mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract media URL')
- return
+ raise ExtractorError(u'Unable to extract media URL')
vardict = compat_parse_qs(mobj.group(1))
if 'mediaData' not in vardict:
- self._downloader.report_error(u'unable to extract media URL')
- return
+ raise ExtractorError(u'Unable to extract media URL')
mobj = re.search(r'"mediaURL":"(?P<mediaURL>http.*?)",(.*?)"key":"(?P<key>.*?)"', vardict['mediaData'][0])
if mobj is None:
- self._downloader.report_error(u'unable to extract media URL')
- return
+ raise ExtractorError(u'Unable to extract media URL')
mediaURL = mobj.group('mediaURL').replace('\\/', '/')
video_extension = mediaURL[-3:]
video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key'))
mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract title')
- return
+ raise ExtractorError(u'Unable to extract title')
video_title = mobj.group(1).decode('utf-8')
mobj = re.search(r'submitter=(.*?);', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract uploader nickname')
- return
+ raise ExtractorError(u'Unable to extract uploader nickname')
video_uploader = mobj.group(1)
return [{
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group(1).split('_')[0].split('?')[0]
self.report_extraction(video_id)
mobj = re.search(r'\s*var flashvars = (.*)', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract media URL')
- return
+ raise ExtractorError(u'Unable to extract media URL')
flashvars = compat_urllib_parse.unquote(mobj.group(1))
for key in ['hd1080URL', 'hd720URL', 'hqURL', 'sdURL', 'ldURL', 'video_url']:
self.to_screen(u'Using %s' % key)
break
else:
- self._downloader.report_error(u'unable to extract video URL')
- return
+ raise ExtractorError(u'Unable to extract video URL')
mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
if mobj is None:
- self._downloader.report_error(u'unable to extract video URL')
- return
+ raise ExtractorError(u'Unable to extract video URL')
video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/')
mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract title')
- return
+ raise ExtractorError(u'Unable to extract title')
video_title = unescapeHTML(mobj.group('title'))
video_uploader = None
class PhotobucketIE(InfoExtractor):
"""Information extractor for photobucket.com."""
- _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
+ # TODO: the original _VALID_URL was:
+ # r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
+ # Check if it's necessary to keep the old extracion process
+ _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*(([\?\&]current=)|_)(?P<id>.*)\.(?P<ext>(flv)|(mp4))'
IE_NAME = u'photobucket'
def _real_extract(self, url):
# Extract id from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'Invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
- video_id = mobj.group(1)
+ video_id = mobj.group('id')
- video_extension = 'flv'
+ video_extension = mobj.group('ext')
# Retrieve video webpage to extract further information
- request = compat_urllib_request.Request(url)
- try:
- self.report_download_webpage(video_id)
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
- return
+ webpage = self._download_webpage(url, video_id)
# Extract URL, uploader, and title from webpage
self.report_extraction(video_id)
+ # We try first by looking the javascript code:
+ mobj = re.search(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (?P<json>.*?)\);', webpage)
+ if mobj is not None:
+ info = json.loads(mobj.group('json'))
+ return [{
+ 'id': video_id,
+ 'url': info[u'downloadUrl'],
+ 'uploader': info[u'username'],
+ 'upload_date': datetime.date.fromtimestamp(info[u'creationDate']).strftime('%Y%m%d'),
+ 'title': info[u'title'],
+ 'ext': video_extension,
+ 'thumbnail': info[u'thumbUrl'],
+ }]
+
+ # We try looking in other parts of the webpage
mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract media URL')
- return
+ raise ExtractorError(u'Unable to extract media URL')
mediaURL = compat_urllib_parse.unquote(mobj.group(1))
video_url = mediaURL
mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract title')
- return
+ raise ExtractorError(u'Unable to extract title')
video_title = mobj.group(1).decode('utf-8')
video_uploader = mobj.group(2).decode('utf-8')
class YahooIE(InfoExtractor):
- """Information extractor for video.yahoo.com."""
+ """Information extractor for screen.yahoo.com."""
+ _VALID_URL = r'http://screen\.yahoo\.com/.*?-(?P<id>\d*?)\.html'
- _WORKING = False
- # _VALID_URL matches all Yahoo! Video URLs
- # _VPAGE_URL matches only the extractable '/watch/' URLs
- _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?'
- _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?'
- IE_NAME = u'video.yahoo'
-
- def _real_extract(self, url, new_video=True):
- # Extract ID from URL
+ def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'Invalid URL: %s' % url)
- return
-
- video_id = mobj.group(2)
- video_extension = 'flv'
-
- # Rewrite valid but non-extractable URLs as
- # extractable English language /watch/ URLs
- if re.match(self._VPAGE_URL, url) is None:
- request = compat_urllib_request.Request(url)
- try:
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
- return
-
- mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
- if mobj is None:
- self._downloader.report_error(u'Unable to extract id field')
- return
- yahoo_id = mobj.group(1)
-
- mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
- if mobj is None:
- self._downloader.report_error(u'Unable to extract vid field')
- return
- yahoo_vid = mobj.group(1)
-
- url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id)
- return self._real_extract(url, new_video=False)
-
- # Retrieve video webpage to extract further information
- request = compat_urllib_request.Request(url)
- try:
- self.report_download_webpage(video_id)
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
- return
-
- # Extract uploader and title from webpage
- self.report_extraction(video_id)
- mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
- if mobj is None:
- self._downloader.report_error(u'unable to extract video title')
- return
- video_title = mobj.group(1).decode('utf-8')
-
- mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
- if mobj is None:
- self._downloader.report_error(u'unable to extract video uploader')
- return
- video_uploader = mobj.group(1).decode('utf-8')
-
- # Extract video thumbnail
- mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
- if mobj is None:
- self._downloader.report_error(u'unable to extract video thumbnail')
- return
- video_thumbnail = mobj.group(1).decode('utf-8')
-
- # Extract video description
- mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
- if mobj is None:
- self._downloader.report_error(u'unable to extract video description')
- return
- video_description = mobj.group(1).decode('utf-8')
- if not video_description:
- video_description = 'No description available.'
-
- # Extract video height and width
- mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
- if mobj is None:
- self._downloader.report_error(u'unable to extract video height')
- return
- yv_video_height = mobj.group(1)
-
- mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
- if mobj is None:
- self._downloader.report_error(u'unable to extract video width')
- return
- yv_video_width = mobj.group(1)
-
- # Retrieve video playlist to extract media URL
- # I'm not completely sure what all these options are, but we
- # seem to need most of them, otherwise the server sends a 401.
- yv_lg = 'R0xx6idZnW2zlrKP8xxAIR' # not sure what this represents
- yv_bitrate = '700' # according to Wikipedia this is hard-coded
- request = compat_urllib_request.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
- '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
- '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
- try:
- self.report_download_webpage(video_id)
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
- return
-
- # Extract media URL from playlist XML
- mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
- if mobj is None:
- self._downloader.report_error(u'Unable to extract media URL')
- return
- video_url = compat_urllib_parse.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
- video_url = unescapeHTML(video_url)
-
- return [{
- 'id': video_id.decode('utf-8'),
- 'url': video_url,
- 'uploader': video_uploader,
- 'upload_date': None,
- 'title': video_title,
- 'ext': video_extension.decode('utf-8'),
- 'thumbnail': video_thumbnail.decode('utf-8'),
- 'description': video_description,
- }]
-
+ raise ExtractorError(u'Invalid URL: %s' % url)
+ video_id = mobj.group('id')
+ webpage = self._download_webpage(url, video_id)
+ m_id = re.search(r'YUI\.namespace\("Media"\)\.CONTENT_ID = "(?P<new_id>.+?)";', webpage)
+
+ if m_id is None:
+ # TODO: Check which url parameters are required
+ info_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;lmsoverride=1;outputformat=mrss;cb=974419660;id=%s;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id
+ webpage = self._download_webpage(info_url, video_id, u'Downloading info webpage')
+ info_re = r'''<title><!\[CDATA\[(?P<title>.*?)\]\]></title>.*
+ <description><!\[CDATA\[(?P<description>.*?)\]\]></description>.*
+ <media:pubStart><!\[CDATA\[(?P<date>.*?)\ .*\]\]></media:pubStart>.*
+ <media:content\ medium="image"\ url="(?P<thumb>.*?)"\ name="LARGETHUMB"
+ '''
+ self.report_extraction(video_id)
+ m_info = re.search(info_re, webpage, re.VERBOSE|re.DOTALL)
+ if m_info is None:
+ raise ExtractorError(u'Unable to extract video info')
+ video_title = m_info.group('title')
+ video_description = m_info.group('description')
+ video_thumb = m_info.group('thumb')
+ video_date = m_info.group('date')
+ video_date = datetime.datetime.strptime(video_date, '%m/%d/%Y').strftime('%Y%m%d')
+
+ # TODO: Find a way to get mp4 videos
+ rest_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;element=stream;outputformat=mrss;id=%s;lmsoverride=1;bw=375;dynamicstream=1;cb=83521105;tech=flv,mp4;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id
+ webpage = self._download_webpage(rest_url, video_id, u'Downloading video url webpage')
+ m_rest = re.search(r'<media:content url="(?P<url>.*?)" path="(?P<path>.*?)"', webpage)
+ video_url = m_rest.group('url')
+ video_path = m_rest.group('path')
+ if m_rest is None:
+ raise ExtractorError(u'Unable to extract video url')
+
+ else: # We have to use a different method if another id is defined
+ long_id = m_id.group('new_id')
+ info_url = 'http://video.query.yahoo.com/v1/public/yql?q=SELECT%20*%20FROM%20yahoo.media.video.streams%20WHERE%20id%3D%22' + long_id + '%22%20AND%20format%3D%22mp4%2Cflv%22%20AND%20protocol%3D%22rtmp%2Chttp%22%20AND%20plrs%3D%2286Gj0vCaSzV_Iuf6hNylf2%22%20AND%20acctid%3D%22389%22%20AND%20plidl%3D%22%22%20AND%20pspid%3D%22792700001%22%20AND%20offnetwork%3D%22false%22%20AND%20site%3D%22ivy%22%20AND%20lang%3D%22en-US%22%20AND%20region%3D%22US%22%20AND%20override%3D%22none%22%3B&env=prod&format=json&callback=YUI.Env.JSONP.yui_3_8_1_1_1368368376830_335'
+ webpage = self._download_webpage(info_url, video_id, u'Downloading info json')
+ json_str = re.search(r'YUI.Env.JSONP.yui.*?\((.*?)\);', webpage).group(1)
+ info = json.loads(json_str)
+ res = info[u'query'][u'results'][u'mediaObj'][0]
+ stream = res[u'streams'][0]
+ video_path = stream[u'path']
+ video_url = stream[u'host']
+ meta = res[u'meta']
+ video_title = meta[u'title']
+ video_description = meta[u'description']
+ video_thumb = meta[u'thumbnail']
+ video_date = None # I can't find it
+
+ info_dict = {
+ 'id': video_id,
+ 'url': video_url,
+ 'play_path': video_path,
+ 'title':video_title,
+ 'description': video_description,
+ 'thumbnail': video_thumb,
+ 'upload_date': video_date,
+ 'ext': 'flv',
+ }
+ return info_dict
class VimeoIE(InfoExtractor):
"""Information extractor for vimeo.com."""
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'Invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group('id')
if not mobj.group('proto'):
config = json.loads(config)
except:
if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
- self._downloader.report_error(u'The author has restricted the access to this video, try with the "--referer" option')
+ raise ExtractorError(u'The author has restricted the access to this video, try with the "--referer" option')
else:
- self._downloader.report_error(u'unable to extract info section')
- return
+ raise ExtractorError(u'Unable to extract info section')
# Extract title
video_title = config["video"]["title"]
self.to_screen(u'%s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
break
else:
- self._downloader.report_error(u'no known codec found')
- return
+ raise ExtractorError(u'No known codec found')
video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
%(video_id, sig, timestamp, video_quality, video_codec.upper())
self.report_download_webpage(url)
webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to retrieve video webpage: %s' % compat_str(err))
except ValueError as err:
- self._downloader.report_error(u'Invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
return webpage
def grep_webpage(self, url, regex, regexFlags, matchTuples):
info = {}
if mobj is None:
- self._downloader.report_error(u'Invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
for (i, key, err) in matchTuples:
if mobj.group(i) is None:
- self._downloader.report_error(err)
- return
+ raise ExtractorError(err)
else:
info[key] = mobj.group(i)
'id': info.get('id'),
'url': compat_urllib_parse.unquote(info.get('url')),
'uploader': u'arte.tv',
- 'upload_date': info.get('date'),
+ 'upload_date': unified_strdate(info.get('date')),
'title': info.get('title').decode('utf-8'),
'ext': u'mp4',
'format': u'NA',
except ValueError as err:
# since this is the last-resort InfoExtractor, if
# this error is thrown, it'll be thrown here
- self._downloader.report_error(u'Invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
self.report_extraction(video_id)
# Start with something easy: JW Player in SWFObject
# Broaden the search a little bit: JWPlayer JS loader
mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
if mobj is None:
- self._downloader.report_error(u'Invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
# It's possible that one of the regexes
# matched, but returned an empty group:
if mobj.group(1) is None:
- self._downloader.report_error(u'Invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_url = compat_urllib_parse.unquote(mobj.group(1))
video_id = os.path.basename(video_url)
# and so on and so forth; it's just not practical
mobj = re.search(r'<title>(.*)</title>', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract title')
- return
+ raise ExtractorError(u'Unable to extract title')
video_title = mobj.group(1)
# video uploader is domain name
mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
if mobj is None:
- self._downloader.report_error(u'unable to extract title')
- return
+ raise ExtractorError(u'Unable to extract title')
video_uploader = mobj.group(1)
return [{
def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query)
if mobj is None:
- self._downloader.report_error(u'invalid search query "%s"' % query)
- return
+ raise ExtractorError(u'Invalid search query "%s"' % query)
prefix, query = query.split(':')
prefix = prefix[8:]
try:
n = int(prefix)
if n <= 0:
- self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
- return
+ raise ExtractorError(u'Invalid download number %s for query "%s"' % (n, query))
elif n > self._max_youtube_results:
self._downloader.report_warning(u'ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
n = self._max_youtube_results
try:
data = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download API page: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to download API page: %s' % compat_str(err))
api_response = json.loads(data)['data']
if not 'items' in api_response:
- self._downloader.report_error(u'[youtube] No video results')
- return
+ raise ExtractorError(u'[youtube] No video results')
new_ids = list(video['id'] for video in api_response['items'])
video_ids += new_ids
class GoogleSearchIE(InfoExtractor):
"""Information Extractor for Google Video search queries."""
- _VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+'
- _TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en'
- _VIDEO_INDICATOR = r'<a href="http://video\.google\.com/videoplay\?docid=([^"\&]+)'
+ _VALID_URL = r'gvsearch(?P<prefix>|\d+|all):(?P<query>[\s\S]+)'
_MORE_PAGES_INDICATOR = r'class="pn" id="pnnext"'
_max_google_results = 1000
IE_NAME = u'video.google:search'
- def report_download_page(self, query, pagenum):
- """Report attempt to download playlist page with given number."""
- query = query.decode(preferredencoding())
- self.to_screen(u'query "%s": Downloading page %s' % (query, pagenum))
-
def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query)
- if mobj is None:
- self._downloader.report_error(u'invalid search query "%s"' % query)
- return
- prefix, query = query.split(':')
- prefix = prefix[8:]
- query = query.encode('utf-8')
+ prefix = mobj.group('prefix')
+ query = mobj.group('query')
if prefix == '':
- self._download_n_results(query, 1)
- return
+ return self._get_n_results(query, 1)
elif prefix == 'all':
- self._download_n_results(query, self._max_google_results)
- return
+ return self._get_n_results(query, self._max_google_results)
else:
- try:
- n = int(prefix)
- if n <= 0:
- self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
- return
- elif n > self._max_google_results:
- self._downloader.report_warning(u'gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
- n = self._max_google_results
- self._download_n_results(query, n)
- return
- except ValueError: # parsing prefix as integer fails
- self._download_n_results(query, 1)
- return
-
- def _download_n_results(self, query, n):
- """Downloads a specified number of results for a query"""
+ n = int(prefix)
+ if n <= 0:
+ raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
+ elif n > self._max_google_results:
+ self._downloader.report_warning(u'gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
+ n = self._max_google_results
+ return self._get_n_results(query, n)
- video_ids = []
- pagenum = 0
-
- while True:
- self.report_download_page(query, pagenum)
- result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum*10)
- request = compat_urllib_request.Request(result_url)
- try:
- page = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
- return
+ def _get_n_results(self, query, n):
+ """Get a specified number of results for a query"""
- # Extract video identifiers
- for mobj in re.finditer(self._VIDEO_INDICATOR, page):
- video_id = mobj.group(1)
- if video_id not in video_ids:
- video_ids.append(video_id)
- if len(video_ids) == n:
- # Specified n videos reached
- for id in video_ids:
- self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id])
- return
+ res = {
+ '_type': 'playlist',
+ 'id': query,
+ 'entries': []
+ }
- if re.search(self._MORE_PAGES_INDICATOR, page) is None:
- for id in video_ids:
- self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id])
- return
+ for pagenum in itertools.count(1):
+ result_url = u'http://video.google.com/videosearch?q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10)
+ webpage = self._download_webpage(result_url, u'gvsearch:' + query,
+ note='Downloading result page ' + str(pagenum))
- pagenum = pagenum + 1
+ for mobj in re.finditer(r'<h3 class="r"><a href="([^"]+)"', webpage):
+ e = {
+ '_type': 'url',
+ 'url': mobj.group(1)
+ }
+ res['entries'].append(e)
+ if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage):
+ return res
class YahooSearchIE(InfoExtractor):
"""Information Extractor for Yahoo! Video search queries."""
def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query)
if mobj is None:
- self._downloader.report_error(u'invalid search query "%s"' % query)
- return
+ raise ExtractorError(u'Invalid search query "%s"' % query)
prefix, query = query.split(':')
prefix = prefix[8:]
try:
n = int(prefix)
if n <= 0:
- self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
- return
+ raise ExtractorError(u'Invalid download number %s for query "%s"' % (n, query))
elif n > self._max_yahoo_results:
self._downloader.report_warning(u'yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
n = self._max_yahoo_results
try:
page = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to download webpage: %s' % compat_str(err))
# Extract video identifiers
for mobj in re.finditer(self._VIDEO_INDICATOR, page):
# Extract playlist id
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
- self._downloader.report_error(u'invalid url: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
# Download playlist videos from API
playlist_id = mobj.group(1) or mobj.group(2)
try:
response = json.loads(page)
except ValueError as err:
- self._downloader.report_error(u'Invalid JSON in API response: ' + compat_str(err))
- return
+ raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err))
if 'feed' not in response:
- self._downloader.report_error(u'Got a malformed response from YouTube API')
- return
+ raise ExtractorError(u'Got a malformed response from YouTube API')
playlist_title = response['feed']['title']['$t']
if 'entry' not in response['feed']:
# Number of videos is a multiple of self._MAX_RESULTS
# Extract channel id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid url: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
# Download channel page
channel_id = mobj.group(1)
# Extract username
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid url: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
username = mobj.group(1)
# Extract username
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid url: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
username = mobj.group(1)
self.report_download_webpage(file_id)
webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'Unable to retrieve file webpage: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to retrieve file webpage: %s' % compat_str(err))
# Search for the real file URL
mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage)
mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
if (mobj is not None) and (mobj.group(1) is not None):
restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
- self._downloader.report_error(u'%s' % restriction_message)
+ raise ExtractorError(u'%s' % restriction_message)
else:
- self._downloader.report_error(u'unable to extract download URL from: %s' % url)
- return
+ raise ExtractorError(u'Unable to extract download URL from: %s' % url)
file_url = mobj.group(1)
file_extension = os.path.splitext(file_url)[1][1:]
# Search for file title
mobj = re.search(r'<b title="(.*?)">', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract title')
- return
+ raise ExtractorError(u'Unable to extract title')
file_title = mobj.group(1).decode('utf-8')
return [{
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group('ID')
url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
urlp = compat_urllib_parse_urlparse(url)
if urlp.path.startswith('/play/'):
json_code_bytes = urlh.read()
json_code = json_code_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to read video info webpage: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err))
try:
json_data = json.loads(json_code)
'user_agent': 'iTunes/10.6.1',
}
except (ValueError,KeyError) as err:
- self._downloader.report_error(u'unable to parse video information: %s' % repr(err))
- return
+ raise ExtractorError(u'Unable to parse video information: %s' % repr(err))
return [info]
def _real_extract(self,url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._download.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group(1)
mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\'',
webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract media URL')
- return
+ raise ExtractorError(u'Unable to extract media URL')
video_url = mobj.group(1) + ('/%s.flv' % video_id)
mobj = re.search('<title>([^<]+)</title>', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract title')
- return
+ raise ExtractorError(u'Unable to extract title')
video_title = mobj.group(1)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
if mobj.group('shortname'):
if mobj.group('shortname') in ('tds', 'thedailyshow'):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
showName = mobj.group('showname')
videoId = mobj.group('episode')
try:
config = json.loads(configJSON)
except (ValueError,) as err:
- self._downloader.report_error(u'Invalid JSON in configuration file: ' + compat_str(err))
- return
+ raise ExtractorError(u'Invalid JSON in configuration file: ' + compat_str(err))
playlist = config['playlist']
videoUrl = playlist[1]['url']
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group('videoid')
info = {
try:
metaXml = compat_urllib_request.urlopen(xmlUrl).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to download video info XML: %s' % compat_str(err))
mdoc = xml.etree.ElementTree.fromstring(metaXml)
try:
info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
manifest_url = videoNode.findall('./file')[0].text
except IndexError:
- self._downloader.report_error(u'Invalid metadata XML file')
- return
+ raise ExtractorError(u'Invalid metadata XML file')
manifest_url += '?hdcore=2.10.3'
self.report_manifest(video_id)
try:
manifestXml = compat_urllib_request.urlopen(manifest_url).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to download video info XML: %s' % compat_str(err))
adoc = xml.etree.ElementTree.fromstring(manifestXml)
try:
node_id = media_node.attrib['url']
video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text
except IndexError as err:
- self._downloader.report_error(u'Invalid manifest file')
- return
+ raise ExtractorError(u'Invalid manifest file')
url_pr = compat_urllib_parse_urlparse(manifest_url)
url = url_pr.scheme + '://' + url_pr.netloc + '/z' + video_id[:-2] + '/' + node_id + 'Seg1-Frag1'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group(1)
webpage = self._download_webpage(url, video_id)
# Extract video URL
mobj = re.search(r'flv_url=(.+?)&', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract video url')
- return
+ raise ExtractorError(u'Unable to extract video url')
video_url = compat_urllib_parse.unquote(mobj.group(1))
# Extract title
mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract video title')
- return
+ raise ExtractorError(u'Unable to extract video title')
video_title = mobj.group(1)
# Extract video thumbnail
mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract video thumbnail')
- return
+ raise ExtractorError(u'Unable to extract video thumbnail')
video_thumbnail = mobj.group(0)
info = {
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
# extract uploader (which is in the url)
uploader = mobj.group(1)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
# extract uploader (which is in the url)
uploader = mobj.group(1)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
webpage = self._download_webpage(url, video_id=url)
self.report_extraction(url)
# Extract video URL
mobj = re.search(r"jsclassref ?= ?'([^']*)'", webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract video url')
- return
+ raise ExtractorError(u'Unable to extract video url')
real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8'))
video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id
# Extract title
mobj = re.search(r'contentTitle = "(.*?)";', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract video title')
- return
+ raise ExtractorError(u'Unable to extract video title')
video_title = mobj.group(1)
# Extract description
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
# extract uploader & filename from url
uploader = mobj.group(1).decode('utf-8')
file_id = uploader + "-" + mobj.group(2).decode('utf-8')
self.report_download_json(file_url)
jsonData = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'Unable to retrieve file: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to retrieve file: %s' % compat_str(err))
# parse JSON
json_data = json.loads(jsonData)
break # got it!
else:
if req_format not in formats:
- self._downloader.report_error(u'format is not available')
- return
+ raise ExtractorError(u'Format is not available')
url_list = self.get_urls(formats, req_format)
file_url = self.check_urls(url_list)
try:
metaXml = compat_urllib_request.urlopen(xmlUrl).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to download video info XML: %s' % compat_str(err))
mdoc = xml.etree.ElementTree.fromstring(metaXml)
try:
info['title'] = mdoc.findall('./title')[0].text
info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
except IndexError:
- self._downloader.report_error(u'Invalid metadata XML file')
- return
+ raise ExtractorError(u'Invalid metadata XML file')
info['ext'] = info['url'].rpartition('.')[2]
return [info]
elif mobj.group('course'): # A course page
try:
rootpage = compat_urllib_request.urlopen(rootURL).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download course info page: ' + compat_str(err))
- return
+ raise ExtractorError(u'Unable to download course info page: ' + compat_str(err))
info['title'] = info['id']
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
if not mobj.group('proto'):
url = 'http://' + url
video_id = mobj.group('videoid')
mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract song name')
- return
+ raise ExtractorError(u'Unable to extract song name')
song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract performer')
- return
+ raise ExtractorError(u'Unable to extract performer')
performer = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
video_title = performer + ' - ' + song_name
mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to mtvn_uri')
- return
+ raise ExtractorError(u'Unable to mtvn_uri')
mtvn_uri = mobj.group(1)
mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract content id')
- return
+ raise ExtractorError(u'Unable to extract content id')
content_id = mobj.group(1)
videogen_url = 'http://www.mtv.com/player/includes/mediaGen.jhtml?uri=' + mtvn_uri + '&id=' + content_id + '&vid=' + video_id + '&ref=www.mtvn.com&viewUri=' + mtvn_uri
try:
metadataXml = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download video metadata: %s' % compat_str(err))
- return
+ raise ExtractorError(u'Unable to download video metadata: %s' % compat_str(err))
mdoc = xml.etree.ElementTree.fromstring(metadataXml)
renditions = mdoc.findall('.//rendition')
format = ext + '-' + rendition.attrib['width'] + 'x' + rendition.attrib['height'] + '_' + rendition.attrib['bitrate']
video_url = rendition.find('./src').text
except KeyError:
- self._downloader.report_error('Invalid rendition field.')
- return
+ raise ExtractorError('Invalid rendition field.')
info = {
'id': video_id,
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group('ID')
info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
fileid = config['data'][0]['streamfileids'][format]
keys = [s['k'] for s in config['data'][0]['segs'][format]]
except (UnicodeDecodeError, ValueError, KeyError):
- self._downloader.report_error(u'unable to extract info section')
- return
+ raise ExtractorError(u'Unable to extract info section')
files_info=[]
sid = self._gen_sid()
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group(1)
# Get webpage content
result = re.search(self.VIDEO_URL_RE, webpage)
if result is None:
- self._downloader.report_error(u'unable to extract video url')
- return
+ raise ExtractorError(u'Unable to extract video url')
video_url = compat_urllib_parse.unquote(result.group(1))
result = re.search(self.VIDEO_TITLE_RE, webpage)
if result is None:
- self._downloader.report_error(u'unable to extract video title')
- return
+ raise ExtractorError(u'Unable to extract video title')
video_title = result.group(1)
result = re.search(self.VIDEO_THUMB_RE, webpage)
if result is None:
- self._downloader.report_error(u'unable to extract video thumbnail')
- return
+ raise ExtractorError(u'Unable to extract video thumbnail')
video_thumbnail = result.group(1)
return [{
# Extract id from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'Invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
post_url = mobj.group(0)
video_id = mobj.group(1)
pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]'
mobj = re.search(pattern, webpage)
if mobj is None:
- self._downloader.report_error(u'unable to extract video page URL')
+ raise ExtractorError(u'Unable to extract video page URL')
video_page = mobj.group(1)
webpage = self._download_webpage(video_page, video_id, u'Downloading video page')
pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
mobj = re.findall(pattern, webpage)
if len(mobj) == 0:
- self._downloader.report_error(u'unable to extract video links')
+ raise ExtractorError(u'Unable to extract video links')
# Sort in resolution
links = sorted(mobj)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group(1)
if video_id.endswith('/index.html'):
api = api_base + '/channel/archives/%s.json' % video_id
elif mobj.group('chapterid'):
chapter_id = mobj.group('chapterid')
- # youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
webpage = self._download_webpage(url, chapter_id)
m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
if not m:
- raise ExtractorError('Cannot find archive of a chapter')
+ raise ExtractorError(u'Cannot find archive of a chapter')
archive_id = m.group(1)
- api = api_base + '/broadcast/by_chapter/%s.json' % chapter_id
- chapter_info_json = self._download_webpage(api, chapter_id,
- note='Downloading chapter information',
- errnote='Chapter information download failed')
+ api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
+ chapter_info_xml = self._download_webpage(api, chapter_id,
+ note=u'Downloading chapter information',
+ errnote=u'Chapter information download failed')
+ doc = xml.etree.ElementTree.fromstring(chapter_info_xml)
+ for a in doc.findall('.//archive'):
+ if archive_id == a.find('./id').text:
+ break
+ else:
+ raise ExtractorError(u'Could not find chapter in chapter information')
+
+ video_url = a.find('./video_file_url').text
+ video_ext = video_url.rpartition('.')[2] or u'flv'
+
+ chapter_api_url = u'https://api.twitch.tv/kraken/videos/c' + chapter_id
+ chapter_info_json = self._download_webpage(chapter_api_url, u'c' + chapter_id,
+ note='Downloading chapter metadata',
+ errnote='Download of chapter metadata failed')
chapter_info = json.loads(chapter_info_json)
- video_info = filter(lambda ci: str(ci['id']) == archive_id, chapter_info)
- video_url = 'TODO:SERVER_NAME' + '/archives/' + vi['file_name'] + '?start=TODO:startid'
+ bracket_start = int(doc.find('.//bracket_start').text)
+ bracket_end = int(doc.find('.//bracket_end').text)
+
+ # TODO determine start (and probably fix up file)
+ # youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
+ #video_url += u'?start=' + TODO:start_timestamp
+ # bracket_start is 13290, but we want 51670615
+ self._downloader.report_warning(u'Chapter detected, but we can just download the whole file. '
+ u'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
- # Result: http://store36.media36.justin.tv/archives/2012-12-2/live_user_firmbelief_1354484906.flv?start=51670615
- # (this may not be playable, may need to craft some additional headers)
- # TODO: title ("GOD", from webpage?)
- # TODO: ext (from vi['file_name'])
- # print(json.dumps(video_info, indent=2))
- # return
- raise NotImplementedError('twitch.tv chapters are not yet supported, sorry (See https://github.com/rg3/youtube-dl/issues/810 )')
+ info = {
+ 'id': u'c' + chapter_id,
+ 'url': video_url,
+ 'ext': video_ext,
+ 'title': chapter_info['title'],
+ 'thumbnail': chapter_info['preview'],
+ 'description': chapter_info['description'],
+ 'uploader': chapter_info['channel']['display_name'],
+ 'uploader_id': chapter_info['channel']['name'],
+ }
+ return [info]
else:
video_id = mobj.group('videoid')
api = api_base + '/broadcast/by_archive/%s.json' % video_id
m = re.search(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"', webpage, re.DOTALL)
if not m:
- self._downloader.report_error(u'unable to find video information')
+ raise ExtractorError(u'Unable to find video information')
video_url = unescapeHTML(m.group('url'))
m = re.search(r"<h1 class='player_page_h1'.*?>(?P<title>.*?)</h1>", webpage, flags=re.DOTALL)
if not m:
m = re.search(r'<title>(?P<title>[^<]+?)</title>', webpage)
if not m:
- self._downloader.report_error(u'Cannot find video title')
+ raise ExtractorError(u'Cannot find video title')
title = clean_html(m.group('title'))
m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
video_url = vid.group('videoURL')
video_thumb = thumb.group('thumbnail')
if not video_url:
- self._downloader.report_error(u'Cannot find video url for %s' % video_id)
+ raise ExtractorError(u'Cannot find video url for %s' % video_id)
info = {
'id':video_id,
'url':video_url,
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group('videoid')
else:
format = self._specific( req_format, formats )
if result is None:
- self._downloader.report_error(u'requested format not available')
- return
+ raise ExtractorError(u'Requested format not available')
return [format]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group('videoid')
video_title = mobj.group('title')
VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
result = re.search(VIDEO_URL_RE, webpage)
if result is None:
- self._downloader.report_error(u'unable to extract video url')
- return
+ raise ExtractorError(u'Unable to extract video url')
video_url = compat_urllib_parse.unquote(result.group('url'))
#Get the uploaded date
VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
result = re.search(VIDEO_UPLOADED_RE, webpage)
if result is None:
- self._downloader.report_error(u'unable to extract video title')
- return
+ raise ExtractorError(u'Unable to extract video title')
upload_date = unified_strdate(result.group('date'))
info = {'id': video_id,
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group('videoid')
# extract values from metadata
url_flv_el = metadata.find('url_flv')
if url_flv_el is None:
- self._downloader.report_error(u'unable to extract download url')
- return
+ raise ExtractorError(u'Unable to extract download url')
video_url = url_flv_el.text
extension = os.path.splitext(video_url)[1][1:]
title_el = metadata.find('title')
if title_el is None:
- self._downloader.report_error(u'unable to extract title')
- return
+ raise ExtractorError(u'Unable to extract title')
title = title_el.text
format_id_el = metadata.find('format_id')
if format_id_el is None:
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group('video_id')
m = re.search(r'file: "(.*?)",', webpage)
if not m:
- self._downloader.report_error(u'unable to find video url')
- return
+ raise ExtractorError(u'Unable to find video url')
video_url = m.group(1)
m = re.search(r'<meta property="og:title" content="(?P<title>.*?)"', webpage)
if not m:
- self._downloader.report_error(u'Cannot find video title')
+ raise ExtractorError(u'Cannot find video title')
title = unescapeHTML(m.group('title')).replace('LiveLeak.com -', '').strip()
m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
streams = [m.groupdict() for m in re.finditer(self._MEDIA_STREAM, html)]
if not streams:
assert '"fsk"' in html
- self._downloader.report_error(u'this video is only available after 8:00 pm')
- return
+ raise ExtractorError(u'This video is only available after 8:00 pm')
# choose default media type and highest quality for now
stream = max([s for s in streams if int(s["media_type"]) == 0],
# We get the link to the free download page
m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
if m_download is None:
- self._downloader.report_error('No free songs founded')
- return
+ raise ExtractorError(u'No free songs founded')
+
download_link = m_download.group(1)
id = re.search(r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
webpage, re.MULTILINE|re.DOTALL).group('id')
'ext': video_extension,
'title': video_title,
}]
+
+class InaIE(InfoExtractor):
+ """Information Extractor for Ina.fr"""
+ _VALID_URL = r'(?:http://)?(?:www.)?ina\.fr/video/(?P<id>I[0-9]+)/.*'
+ def _real_extract(self,url):
+ mobj = re.match(self._VALID_URL, url)
+
+ video_id = mobj.group('id')
+ mrss_url='http://player.ina.fr/notices/%s.mrss' % video_id
+ video_extension = 'mp4'
+ webpage = self._download_webpage(mrss_url, video_id)
+
+ mobj = re.search(r'<media:player url="(?P<mp4url>http://mp4.ina.fr/[^"]+\.mp4)', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract media URL')
+ video_url = mobj.group(1)
+
+ mobj = re.search(r'<title><!\[CDATA\[(?P<titre>.*?)]]></title>', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract title')
+ video_title = mobj.group(1)
+
+ return [{
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': video_extension,
+ 'title': video_title,
+ }]
def gen_extractors():
""" Return a list of an instance of every supported extractor.
TumblrIE(),
BandcampIE(),
RedTubeIE(),
+ InaIE(),
GenericIE()
]