X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=youtube_dl%2FInfoExtractors.py;h=d2b9fbedcedcc91a122cdd23837efb89a198f6be;hb=be8786a6a46f43be54daa85d6c70091948175f34;hp=d02bd29f74d79abd2f194bcf60bab81e2319fa8a;hpb=7df97fb59f7994c32ecf552ee9dfec6c6be3bb1e;p=youtube-dl diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index d02bd29f7..d2b9fbedc 100644 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -182,7 +182,7 @@ class YoutubeIE(InfoExtractor): end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000) caption = unescapeHTML(caption) caption = unescapeHTML(caption) # double cycle, intentional - srt += str(n) + '\n' + srt += str(n+1) + '\n' srt += start + ' --> ' + end + '\n' srt += caption + '\n\n' return srt @@ -366,7 +366,8 @@ class YoutubeIE(InfoExtractor): srt_list = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: raise Trouble(u'WARNING: unable to download video subtitles: %s' % str(err)) - srt_lang_list = re.findall(r'lang_code="([\w\-]+)"', srt_list) + srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list) + srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list) if not srt_lang_list: raise Trouble(u'WARNING: video has no closed captions') if self._downloader.params.get('subtitleslang', False): @@ -374,14 +375,16 @@ class YoutubeIE(InfoExtractor): elif 'en' in srt_lang_list: srt_lang = 'en' else: - srt_lang = srt_lang_list[0] + srt_lang = srt_lang_list.keys()[0] if not srt_lang in srt_lang_list: raise Trouble(u'WARNING: no closed captions found in the specified language') - request = urllib2.Request('http://video.google.com/timedtext?hl=en&lang=%s&v=%s' % (srt_lang, video_id)) + request = urllib2.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id)) try: srt_xml = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: raise Trouble(u'WARNING: unable to download video subtitles: %s' % str(err)) + if not srt_xml: + raise Trouble(u'WARNING: unable to download video subtitles') video_subtitles = self._closed_captions_xml_to_srt(srt_xml.decode('utf-8')) except Trouble as trouble: self._downloader.trouble(trouble[0]) @@ -442,6 +445,7 @@ class YoutubeIE(InfoExtractor): video_extension = self._video_extensions.get(format_param, 'flv') results.append({ + 'provider': IE_NAME, 'id': video_id.decode('utf-8'), 'url': video_real_url.decode('utf-8'), 'uploader': video_uploader.decode('utf-8'), @@ -575,6 +579,7 @@ class MetacafeIE(InfoExtractor): video_uploader = mobj.group(1) return [{ + 'provider': IE_NAME, 'id': video_id.decode('utf-8'), 'url': video_url.decode('utf-8'), 'uploader': video_uploader.decode('utf-8'), @@ -654,6 +659,7 @@ class DailymotionIE(InfoExtractor): video_uploader = mobj.group(1) return [{ + 'provider': IE_NAME, 'id': video_id.decode('utf-8'), 'url': video_url.decode('utf-8'), 'uploader': video_uploader.decode('utf-8'), @@ -749,6 +755,7 @@ class GoogleIE(InfoExtractor): video_thumbnail = '' return [{ + 'provider': IE_NAME, 'id': video_id.decode('utf-8'), 'url': video_url.decode('utf-8'), 'uploader': u'NA', @@ -816,6 +823,7 @@ class PhotobucketIE(InfoExtractor): video_uploader = mobj.group(2).decode('utf-8') return [{ + 'provider': IE_NAME, 'id': video_id.decode('utf-8'), 'url': video_url.decode('utf-8'), 'uploader': video_uploader, @@ -958,6 +966,7 @@ class YahooIE(InfoExtractor): video_url = unescapeHTML(video_url) return [{ + 'provider': IE_NAME, 'id': video_id.decode('utf-8'), 'url': video_url, 'uploader': video_uploader, @@ -1062,6 +1071,7 @@ class VimeoIE(InfoExtractor): %(video_id, sig, timestamp, quality, video_codec.upper()) return [{ + 'provider': IE_NAME, 'id': video_id, 'url': video_url, 'uploader': video_uploader, @@ -1210,6 +1220,7 @@ class GenericIE(InfoExtractor): video_uploader = mobj.group(1).decode('utf-8') return [{ + 'provider': IE_NAME, 'id': video_id.decode('utf-8'), 'url': video_url.decode('utf-8'), 'uploader': video_uploader, @@ -1232,7 +1243,7 @@ class YoutubeSearchIE(InfoExtractor): InfoExtractor.__init__(self, downloader) def report_download_page(self, query, pagenum): - """Report attempt to download playlist page with given number.""" + """Report attempt to download search page with given number.""" query = query.decode(preferredencoding()) self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum)) @@ -1468,8 +1479,8 @@ class YoutubePlaylistIE(InfoExtractor): _VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*' _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en' - _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&list=PL%s&' - _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*' + _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&list=(PL)?%s&' + _MORE_PAGES_INDICATOR = r'yt-uix-pager-next' IE_NAME = u'youtube:playlist' def __init__(self, downloader=None): @@ -1619,6 +1630,98 @@ class YoutubeUserIE(InfoExtractor): self._downloader.download(['http://www.youtube.com/watch?v=%s' % video_id]) +class BlipTVUserIE(InfoExtractor): + """Information Extractor for blip.tv users.""" + + _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$' + _PAGE_SIZE = 12 + IE_NAME = u'blip.tv:user' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + def report_download_page(self, username, pagenum): + """Report attempt to download user page.""" + self._downloader.to_screen(u'[%s] user %s: Downloading video ids from page %d' % + (self.IE_NAME, username, pagenum)) + + def _real_extract(self, url): + # Extract username + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid url: %s' % url) + return + + username = mobj.group(1) + + page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1' + + request = urllib2.Request(url) + + try: + page = urllib2.urlopen(request).read().decode('utf-8') + mobj = re.search(r'data-users-id="([^"]+)"', page) + page_base = page_base % mobj.group(1) + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) + return + + + # Download video ids using BlipTV Ajax calls. Result size per + # query is limited (currently to 12 videos) so we need to query + # page by page until there are no video ids - it means we got + # all of them. + + video_ids = [] + pagenum = 1 + + while True: + self.report_download_page(username, pagenum) + + request = urllib2.Request( page_base + "&page=" + str(pagenum) ) + + try: + page = urllib2.urlopen(request).read().decode('utf-8') + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) + return + + # Extract video identifiers + ids_in_page = [] + + for mobj in re.finditer(r'href="/([^"]+)"', page): + if mobj.group(1) not in ids_in_page: + ids_in_page.append(unescapeHTML(mobj.group(1))) + + video_ids.extend(ids_in_page) + + # A little optimization - if current page is not + # "full", ie. does not contain PAGE_SIZE video ids then + # we can assume that this page is the last one - there + # are no more ids on further pages - no need to query + # again. + + if len(ids_in_page) < self._PAGE_SIZE: + break + + pagenum += 1 + + all_ids_count = len(video_ids) + playliststart = self._downloader.params.get('playliststart', 1) - 1 + playlistend = self._downloader.params.get('playlistend', -1) + + if playlistend == -1: + video_ids = video_ids[playliststart:] + else: + video_ids = video_ids[playliststart:playlistend] + + self._downloader.to_screen(u"[%s] user %s: Collected %d video ids (downloading %d of them)" % + (self.IE_NAME, username, all_ids_count, len(video_ids))) + + for video_id in video_ids: + self._downloader.download([u'http://blip.tv/'+video_id]) + + class DepositFilesIE(InfoExtractor): """Information extractor for depositfiles.com""" @@ -1674,6 +1777,7 @@ class DepositFilesIE(InfoExtractor): file_title = mobj.group(1).decode('utf-8') return [{ + 'provider': IE_NAME, 'id': file_id.decode('utf-8'), 'url': file_url.decode('utf-8'), 'uploader': u'NA', @@ -1878,6 +1982,7 @@ class FacebookIE(InfoExtractor): video_extension = self._video_extensions.get(format_param, 'mp4') results.append({ + 'provider': IE_NAME, 'id': video_id.decode('utf-8'), 'url': video_real_url.decode('utf-8'), 'uploader': video_uploader.decode('utf-8'), @@ -1917,7 +2022,7 @@ class BlipTVIE(InfoExtractor): else: cchar = '?' json_url = url + cchar + 'skin=json&version=2&no_wrap=1' - request = urllib2.Request(json_url) + request = urllib2.Request(json_url.encode('utf-8')) self.report_extraction(mobj.group(1)) info = None try: @@ -1929,6 +2034,7 @@ class BlipTVIE(InfoExtractor): ext = ext.replace('.', '') self.report_direct_download(title) info = { + 'provider': IE_NAME, 'id': title, 'url': url, 'title': title, @@ -1960,6 +2066,7 @@ class BlipTVIE(InfoExtractor): ext = umobj.group(1) info = { + 'provider': IE_NAME, 'id': data['item_id'], 'url': video_url, 'uploader': data['display_name'], @@ -1975,6 +2082,7 @@ class BlipTVIE(InfoExtractor): self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err)) return + std_headers['User-Agent'] = 'iTunes/10.6.1' return [info] @@ -2028,6 +2136,7 @@ class MyVideoIE(InfoExtractor): video_title = mobj.group(1) return [{ + 'provider': IE_NAME, 'id': video_id, 'url': video_url, 'uploader': u'NA', @@ -2154,6 +2263,7 @@ class ComedyCentralIE(InfoExtractor): effTitle = showId + u'-' + epTitle info = { + 'provider': IE_NAME, 'id': shortMediaId, 'url': video_url, 'uploader': showId, @@ -2193,12 +2303,14 @@ class EscapistIE(InfoExtractor): self.report_extraction(showName) try: - webPageBytes = urllib2.urlopen(url).read() + webPage = urllib2.urlopen(url) + webPageBytes = webPage.read() + m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type']) + webPage = webPageBytes.decode(m.group(1) if m else 'utf-8') except (urllib2.URLError, httplib.HTTPException, socket.error), err: self._downloader.trouble(u'ERROR: unable to download webpage: ' + unicode(err)) return - webPage = webPageBytes.decode('utf-8') descMatch = re.search('