X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=youtube_dl%2FInfoExtractors.py;h=3dad82835e1af48caa367cfa06ab8038e5f34678;hb=2069acc6a40136eebceb9f82cda37cf0b5fc855f;hp=b53071c43373b31492fb79d0a479b47af59f8578;hpb=993693aa796fa2d50e0ce3cc7b0d551ffcc4a786;p=youtube-dl diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index b53071c43..3dad82835 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -23,7 +23,7 @@ class InfoExtractor(object): Information extractors are the classes that, given a URL, extract information about the video (or videos) the URL refers to. This information includes the real video URL, the video title, author and - others. The information is stored in a dictionary which is then + others. The information is stored in a dictionary which is then passed to the FileDownloader. The FileDownloader processes this information possibly downloading the video to the file system, among other possible outcomes. @@ -32,16 +32,17 @@ class InfoExtractor(object): id: Video identifier. url: Final video URL. - uploader: Nickname of the video uploader, unescaped. - upload_date: Video upload date (YYYYMMDD). title: Video title, unescaped. ext: Video filename extension. + uploader: Full name of the video uploader. + upload_date: Video upload date (YYYYMMDD). The following fields are optional: format: The video format, defaults to ext (used for --get-format) thumbnail: Full URL to a video thumbnail image. description: One-line video description. + uploader_id: Nickname or id of the video uploader. player_url: SWF Player URL (used for rtmpdump). subtitles: The .srt file contents. urlhandle: [internal] The urlHandle to be used to download the file, @@ -100,6 +101,9 @@ class InfoExtractor(object): """Real extraction process. Redefine in subclasses.""" pass + @property + def IE_NAME(self): + return type(self).__name__[:-2] class YoutubeIE(InfoExtractor): """Information extractor for youtube.com.""" @@ -116,7 +120,7 @@ class YoutubeIE(InfoExtractor): |(?: # or the v= param in all its forms (?:watch(?:_popup)?(?:\.php)?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) (?:\?|\#!?) # the params delimiter ? or # or #! - (?:.+&)? # any other preceding param (like /?s=tuff&v=xxxx) + (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx) v= ) )? # optional -> youtube.com/xxxx is OK @@ -159,7 +163,7 @@ class YoutubeIE(InfoExtractor): '44': '480x854', '45': '720x1280', '46': '1080x1920', - } + } IE_NAME = u'youtube' def suitable(self, url): @@ -219,6 +223,34 @@ class YoutubeIE(InfoExtractor): srt += caption + '\n\n' return srt + def _extract_subtitles(self, video_id): + self.report_video_subtitles_download(video_id) + request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) + try: + srt_list = compat_urllib_request.urlopen(request).read().decode('utf-8') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None) + srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list) + srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list) + if not srt_lang_list: + return (u'WARNING: video has no closed captions', None) + if self._downloader.params.get('subtitleslang', False): + srt_lang = self._downloader.params.get('subtitleslang') + elif 'en' in srt_lang_list: + srt_lang = 'en' + else: + srt_lang = list(srt_lang_list.keys())[0] + if not srt_lang in srt_lang_list: + return (u'WARNING: no closed captions found in the specified language', None) + request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id)) + try: + srt_xml = compat_urllib_request.urlopen(request).read().decode('utf-8') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None) + if not srt_xml: + return (u'WARNING: unable to download video subtitles', None) + return (None, self._closed_captions_xml_to_srt(srt_xml)) + def _print_formats(self, formats): print('Available formats:') for x in formats: @@ -293,22 +325,25 @@ class YoutubeIE(InfoExtractor): self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) return - def _real_extract(self, url): - # Extract original video URL from URL with redirection, like age verification, using next_url parameter - mobj = re.search(self._NEXT_URL_RE, url) - if mobj: - url = 'http://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/') - - # Extract video id from URL + def _extract_id(self, url): mobj = re.match(self._VALID_URL, url, re.VERBOSE) if mobj is None: self._downloader.trouble(u'ERROR: invalid URL: %s' % url) return video_id = mobj.group(2) + return video_id + + def _real_extract(self, url): + # Extract original video URL from URL with redirection, like age verification, using next_url parameter + mobj = re.search(self._NEXT_URL_RE, url) + if mobj: + url = 'http://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/') + video_id = self._extract_id(url) # Get video webpage self.report_video_webpage_download(video_id) - request = compat_urllib_request.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id) + url = 'http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id + request = compat_urllib_request.Request(url) try: video_webpage_bytes = compat_urllib_request.urlopen(request).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: @@ -356,10 +391,18 @@ class YoutubeIE(InfoExtractor): # uploader if 'author' not in video_info: - self._downloader.trouble(u'ERROR: unable to extract uploader nickname') + self._downloader.trouble(u'ERROR: unable to extract uploader name') return video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0]) + # uploader_id + video_uploader_id = None + mobj = re.search(r'', video_webpage) + if mobj is not None: + video_uploader_id = mobj.group(1) + else: + self._downloader.trouble(u'WARNING: unable to extract uploader nickname') + # title if 'title' not in video_info: self._downloader.trouble(u'ERROR: unable to extract video title') @@ -395,35 +438,9 @@ class YoutubeIE(InfoExtractor): # closed captions video_subtitles = None if self._downloader.params.get('writesubtitles', False): - try: - self.report_video_subtitles_download(video_id) - request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) - try: - srt_list = compat_urllib_request.urlopen(request).read().decode('utf-8') - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err)) - srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list) - srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list) - if not srt_lang_list: - raise Trouble(u'WARNING: video has no closed captions') - if self._downloader.params.get('subtitleslang', False): - srt_lang = self._downloader.params.get('subtitleslang') - elif 'en' in srt_lang_list: - srt_lang = 'en' - else: - srt_lang = srt_lang_list.keys()[0] - if not srt_lang in srt_lang_list: - raise Trouble(u'WARNING: no closed captions found in the specified language') - request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id)) - try: - srt_xml = compat_urllib_request.urlopen(request).read().decode('utf-8') - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err)) - if not srt_xml: - raise Trouble(u'WARNING: unable to download video subtitles') - video_subtitles = self._closed_captions_xml_to_srt(srt_xml) - except Trouble as trouble: - self._downloader.trouble(str(trouble)) + (srt_error, video_subtitles) = self._extract_subtitles(video_id) + if srt_error: + self._downloader.trouble(srt_error) if 'length_seconds' not in video_info: self._downloader.trouble(u'WARNING: unable to extract video duration') @@ -443,7 +460,7 @@ class YoutubeIE(InfoExtractor): elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1: url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',') url_data = [compat_parse_qs(uds) for uds in url_data_strs] - url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data) + url_data = [ud for ud in url_data if 'itag' in ud and 'url' in ud] url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data) format_limit = self._downloader.params.get('format_limit', None) @@ -493,6 +510,7 @@ class YoutubeIE(InfoExtractor): 'id': video_id, 'url': video_real_url, 'uploader': video_uploader, + 'uploader_id': video_uploader_id, 'upload_date': upload_date, 'title': video_title, 'ext': video_extension, @@ -988,26 +1006,27 @@ class VimeoIE(InfoExtractor): except: self._downloader.trouble(u'ERROR: unable to extract info section') return - + # Extract title video_title = config["video"]["title"] - # Extract uploader + # Extract uploader and uploader_id video_uploader = config["video"]["owner"]["name"] + video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] # Extract video thumbnail video_thumbnail = config["video"]["thumbnail"] # Extract video description - video_description = get_element_by_id("description", webpage) + video_description = get_element_by_attribute("itemprop", "description", webpage) if video_description: video_description = clean_html(video_description) else: video_description = '' # Extract upload date video_upload_date = None - mobj = re.search(r'', webpage) + mobj = re.search(r' 0: + if url_map: # Decide which formats to download req_format = self._downloader.params.get('format', None) format_limit = self._downloader.params.get('format_limit', None) @@ -2256,7 +2275,7 @@ class MyVideoIE(InfoExtractor): def __init__(self, downloader=None): InfoExtractor.__init__(self, downloader) - + def report_download_webpage(self, video_id): """Report webpage download.""" self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id) @@ -2310,10 +2329,10 @@ class ComedyCentralIE(InfoExtractor): """Information extractor for The Daily Show and Colbert Report """ # urls can be abbreviations like :thedailyshow or :colbert - # urls for episodes like: + # urls for episodes like: # or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day # or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news - # or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524 + # or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524 _VALID_URL = r"""^(:(?Ptds|thedailyshow|cr|colbert|colbertnation|colbertreport) |(https?://)?(www\.)? (?Pthedailyshow|colbertnation)\.com/ @@ -2321,7 +2340,7 @@ class ComedyCentralIE(InfoExtractor): (?P (the-colbert-report-(videos|collections)/(?P[0-9]+)/[^/]*/(?P.*?)) |(watch/(?P[^/]*)/(?P.*))))) - $""" + $""" IE_NAME = u'comedycentral' _available_formats = ['3500', '2200', '1700', '1200', '750', '400'] @@ -2425,7 +2444,7 @@ class ComedyCentralIE(InfoExtractor): return else: mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])] - + playerUrl_raw = mMovieParams[0][0] self.report_player_url(epTitle) try: @@ -2474,7 +2493,7 @@ class ComedyCentralIE(InfoExtractor): if len(turls) == 0: self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found') continue - + if self._downloader.params.get('listformats', None): self._print_formats([i[0] for i in turls]) return @@ -2514,7 +2533,7 @@ class ComedyCentralIE(InfoExtractor): } results.append(info) - + return results @@ -2973,7 +2992,7 @@ class MixcloudIE(InfoExtractor): if file_url is not None: break # got it! else: - if req_format not in formats.keys(): + if req_format not in formats: self._downloader.trouble(u'ERROR: format is not available') return @@ -3078,7 +3097,7 @@ class StanfordOpenClassroomIE(InfoExtractor): assert entry['type'] == 'reference' results += self.extract(entry['url']) return results - + else: # Root page info = { 'id': 'Stanford OpenClassroom', @@ -3152,7 +3171,7 @@ class MTVIE(InfoExtractor): self._downloader.trouble(u'ERROR: unable to extract performer') return performer = unescapeHTML(mobj.group(1).decode('iso-8859-1')) - video_title = performer + ' - ' + song_name + video_title = performer + ' - ' + song_name mobj = re.search(r'', webpage) if mobj is None: @@ -3272,7 +3291,7 @@ class YoukuIE(InfoExtractor): seed = config['data'][0]['seed'] format = self._downloader.params.get('format', None) - supported_format = config['data'][0]['streamfileids'].keys() + supported_format = list(config['data'][0]['streamfileids'].keys()) if format is None or format == 'best': if 'hd2' in supported_format: @@ -3581,7 +3600,7 @@ class JustinTVIE(InfoExtractor): except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self._downloader.trouble(u'ERROR: unable to download video info JSON: %s' % compat_str(err)) return - + response = json.loads(webpage) info = [] for clip in response: @@ -3604,7 +3623,7 @@ class JustinTVIE(InfoExtractor): if mobj is None: self._downloader.trouble(u'ERROR: invalid URL: %s' % url) return - + api = 'http://api.justin.tv' video_id = mobj.group(mobj.lastindex) paged = False @@ -3614,9 +3633,9 @@ class JustinTVIE(InfoExtractor): else: api += '/clip/show/%s.json' api = api % (video_id,) - + self.report_extraction(video_id) - + info = [] offset = 0 limit = self._JUSTIN_PAGE_LIMIT @@ -3630,3 +3649,188 @@ class JustinTVIE(InfoExtractor): break offset += limit return info + +class FunnyOrDieIE(InfoExtractor): + _VALID_URL = r'^(?:https?://)?(?:www\.)?funnyordie\.com/videos/(?P[0-9a-f]+)/.*$' + IE_NAME = u'FunnyOrDie' + + def report_extraction(self, video_id): + self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + + video_id = mobj.group('id') + self.report_extraction(video_id) + try: + urlh = compat_urllib_request.urlopen(url) + webpage_bytes = urlh.read() + webpage = webpage_bytes.decode('utf-8', 'ignore') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + return + + m = re.search(r']*>\s*]*>\s*\s+(?P.*?)</a>", webpage) + if not m: + self._downloader.trouble(u'Cannot find video title') + title = unescapeHTML(m.group('title')) + + m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage) + if m: + desc = unescapeHTML(m.group('desc')) + else: + desc = None + + info = { + 'id': video_id, + 'url': video_url, + 'ext': 'mp4', + 'title': title, + 'description': desc, + } + return [info] + +class TweetReelIE(InfoExtractor): + _VALID_URL = r'^(?:https?://)?(?:www\.)?tweetreel\.com/[?](?P<id>[0-9a-z]+)$' + + def report_extraction(self, video_id): + self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + + video_id = mobj.group('id') + self.report_extraction(video_id) + try: + urlh = compat_urllib_request.urlopen(url) + webpage_bytes = urlh.read() + webpage = webpage_bytes.decode('utf-8', 'ignore') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + return + + m = re.search(r'<div id="left" status_id="([0-9]+)">', webpage) + if not m: + self._downloader.trouble(u'ERROR: Cannot find status ID') + status_id = m.group(1) + + m = re.search(r'<div class="tweet_text">(.*?)</div>', webpage, flags=re.DOTALL) + if not m: + self._downloader.trouble(u'WARNING: Cannot find description') + desc = unescapeHTML(re.sub('<a.*?</a>', '', m.group(1))).strip() + + m = re.search(r'<div class="tweet_info">.*?from <a target="_blank" href="https?://twitter.com/(?P<uploader_id>.+?)">(?P<uploader>.+?)</a>', webpage, flags=re.DOTALL) + if not m: + self._downloader.trouble(u'ERROR: Cannot find uploader') + uploader = unescapeHTML(m.group('uploader')) + uploader_id = unescapeHTML(m.group('uploader_id')) + + m = re.search(r'<span unixtime="([0-9]+)"', webpage) + if not m: + self._downloader.trouble(u'ERROR: Cannot find upload date') + upload_date = datetime.datetime.fromtimestamp(int(m.group(1))).strftime('%Y%m%d') + + title = desc + video_url = 'http://files.tweetreel.com/video/' + status_id + '.mov' + + info = { + 'id': video_id, + 'url': video_url, + 'ext': 'mov', + 'title': title, + 'description': desc, + 'uploader': uploader, + 'uploader_id': uploader_id, + 'internal_id': status_id, + 'upload_date': upload_date + } + return [info] + +class SteamIE(InfoExtractor): + _VALID_URL = r"""http://store.steampowered.com/ + (?P<urltype>video|app)/ #If the page is only for videos or for a game + (?P<gameID>\d+)/? + (?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID + """ + IE_NAME = u'Steam' + + def suitable(self, url): + """Receives a URL and returns True if suitable for this IE.""" + return re.match(self._VALID_URL, url, re.VERBOSE) is not None + + def report_download_video_page(self, game_id): + self._downloader.to_screen(u'[%s] %s: Downloading video page' % (self.IE_NAME, game_id)) + + def _real_extract(self, url): + m = re.match(self._VALID_URL, url, re.VERBOSE) + urlRE = r"'movie_(?P<videoID>\d+)': \{\s*FILENAME: \"(?P<videoURL>[\w:/\.\?=]+)\"(,\s*MOVIE_NAME: \"(?P<videoName>[\w:/\.\?=\+-]+)\")?\s*\}," + gameID = m.group('gameID') + videourl = 'http://store.steampowered.com/video/%s/' % gameID + try: + self.report_download_video_page(gameID) + urlh = compat_urllib_request.urlopen(videourl) + webpage_bytes = urlh.read() + webpage = webpage_bytes.decode('utf-8', 'ignore') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + return + mweb = re.finditer(urlRE, webpage) + namesRE = r'<span class=\"title\">(?P<videoName>[\w:/\.\?=\+\s-]+)</span>' + titles = list(re.finditer(namesRE, webpage)) + videos = [] + i = 0 + for vid in mweb: + video_id = vid.group('videoID') + title = titles[i].group('videoName') + video_url=vid.group('videoURL') + if not video_url: + self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id) + i += 1 + info = { + 'id':video_id, + 'url':video_url, + 'ext': 'flv', + 'title': title + } + videos.append(info) + return videos + +class UstreamIE(InfoExtractor): + _VALID_URL = r'http://www.ustream.tv/recorded/(?P<videoID>\d+)' + IE_NAME = u'ustream' + + def _real_extract(self, url): + m = re.match(self._VALID_URL, url) + video_id = m.group('videoID') + video_url = u'http://tcdn.ustream.tv/video/%s' % video_id + try: + urlh = compat_urllib_request.urlopen(url) + webpage_bytes = urlh.read() + webpage = webpage_bytes.decode('utf-8', 'ignore') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + return + m = re.search(r'data-title="(?P<title>.+)"',webpage) + title = m.group('title') + m = re.search(r'<a class="state" data-content-type="channel" data-content-id="(?P<uploader>\d+)"',webpage) + uploader = m.group('uploader') + info = { + 'id':video_id, + 'url':video_url, + 'ext': 'flv', + 'title': title, + 'uploader': uploader + } + return [info] + pass