X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=youtube_dl%2FInfoExtractors.py;h=3a6e84ebb10d6e283668b32a5b24d122dd4144c9;hb=0be41ec241d8308378c134d803f6b67b93a6c8de;hp=9cfff153b12fce1c4db4c1420b6f0caab323a8c2;hpb=1ca63e3ae3f7f61a9c38e04eec421faa42a1faee;p=youtube-dl diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 9cfff153b..3a6e84ebb 100644 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -666,7 +666,8 @@ class DailymotionIE(InfoExtractor): request.add_header('Cookie', 'family_filter=off') try: self.report_download_webpage(video_id) - webpage = compat_urllib_request.urlopen(request).read() + webpage_bytes = compat_urllib_request.urlopen(request).read() + webpage = webpage_bytes.decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err)) return @@ -701,7 +702,7 @@ class DailymotionIE(InfoExtractor): if mobj is None: self._downloader.trouble(u'ERROR: unable to extract title') return - video_title = unescapeHTML(mobj.group('title').decode('utf-8')) + video_title = unescapeHTML(mobj.group('title')) video_uploader = None mobj = re.search(r'(?im)[^<]+?]+?>([^<]+?)', webpage) @@ -721,12 +722,12 @@ class DailymotionIE(InfoExtractor): video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1) return [{ - 'id': video_id.decode('utf-8'), - 'url': video_url.decode('utf-8'), - 'uploader': video_uploader.decode('utf-8'), + 'id': video_id, + 'url': video_url, + 'uploader': video_uploader, 'upload_date': video_upload_date, 'title': video_title, - 'ext': video_extension.decode('utf-8'), + 'ext': video_extension, }] @@ -1061,7 +1062,8 @@ class VimeoIE(InfoExtractor): request = compat_urllib_request.Request(url, None, std_headers) try: self.report_download_webpage(video_id) - webpage = compat_urllib_request.urlopen(request).read() + webpage_bytes = compat_urllib_request.urlopen(request).read() + webpage = webpage_bytes.decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) return @@ -1089,7 +1091,7 @@ class VimeoIE(InfoExtractor): video_thumbnail = config["video"]["thumbnail"] # Extract video description - video_description = get_element_by_id("description", webpage.decode('utf8')) + video_description = get_element_by_id("description", webpage) if video_description: video_description = clean_html(video_description) else: video_description = '' @@ -1407,22 +1409,22 @@ class GenericIE(InfoExtractor): if mobj is None: self._downloader.trouble(u'ERROR: unable to extract title') return - video_title = mobj.group(1).decode('utf-8') + video_title = mobj.group(1) # video uploader is domain name mobj = re.match(r'(?:https?://)?([^/]*)/.*', url) if mobj is None: self._downloader.trouble(u'ERROR: unable to extract title') return - video_uploader = mobj.group(1).decode('utf-8') + video_uploader = mobj.group(1) return [{ - 'id': video_id.decode('utf-8'), - 'url': video_url.decode('utf-8'), + 'id': video_id, + 'url': video_url, 'uploader': video_uploader, 'upload_date': None, 'title': video_title, - 'ext': video_extension.decode('utf-8'), + 'ext': video_extension, }] @@ -1674,7 +1676,7 @@ class YoutubePlaylistIE(InfoExtractor): _VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]{10,})(?:/.*?/([0-9A-Za-z_-]+))?.*' _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en' _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&([^&"]+&)*list=.*?%s' - _MORE_PAGES_INDICATOR = r'yt-uix-pager-next' + _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}" IE_NAME = u'youtube:playlist' def __init__(self, downloader=None): @@ -1713,7 +1715,7 @@ class YoutubePlaylistIE(InfoExtractor): url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum) request = compat_urllib_request.Request(url) try: - page = compat_urllib_request.urlopen(request).read() + page = compat_urllib_request.urlopen(request).read().decode('utf8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) return @@ -1725,10 +1727,12 @@ class YoutubePlaylistIE(InfoExtractor): ids_in_page.append(mobj.group(1)) video_ids.extend(ids_in_page) - if re.search(self._MORE_PAGES_INDICATOR, page) is None: + if self._MORE_PAGES_INDICATOR not in page: break pagenum = pagenum + 1 + total = len(video_ids) + playliststart = self._downloader.params.get('playliststart', 1) - 1 playlistend = self._downloader.params.get('playlistend', -1) if playlistend == -1: @@ -1736,6 +1740,11 @@ class YoutubePlaylistIE(InfoExtractor): else: video_ids = video_ids[playliststart:playlistend] + if len(video_ids) == total: + self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total)) + else: + self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(video_ids))) + for id in video_ids: self._downloader.download(['http://www.youtube.com/watch?v=%s' % id]) return @@ -1746,7 +1755,7 @@ class YoutubeChannelIE(InfoExtractor): _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)(?:/.*)?$" _TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en' - _MORE_PAGES_INDICATOR = r'yt-uix-button-content">Next' # TODO + _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}" IE_NAME = u'youtube:channel' def report_download_page(self, channel_id, pagenum): @@ -1770,7 +1779,7 @@ class YoutubeChannelIE(InfoExtractor): url = self._TEMPLATE_URL % (channel_id, pagenum) request = compat_urllib_request.Request(url) try: - page = compat_urllib_request.urlopen(request).read() + page = compat_urllib_request.urlopen(request).read().decode('utf8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) return @@ -1782,10 +1791,12 @@ class YoutubeChannelIE(InfoExtractor): ids_in_page.append(mobj.group(1)) video_ids.extend(ids_in_page) - if re.search(self._MORE_PAGES_INDICATOR, page) is None: + if self._MORE_PAGES_INDICATOR not in page: break pagenum = pagenum + 1 + self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids))) + for id in video_ids: self._downloader.download(['http://www.youtube.com/watch?v=%s' % id]) return @@ -2262,7 +2273,7 @@ class BlipTVIE(InfoExtractor): else: cchar = '?' json_url = url + cchar + 'skin=json&version=2&no_wrap=1' - request = compat_urllib_request.Request(json_url.encode('utf-8')) + request = compat_urllib_request.Request(json_url) self.report_extraction(mobj.group(1)) info = None try: @@ -2287,7 +2298,8 @@ class BlipTVIE(InfoExtractor): return if info is None: # Regular URL try: - json_code = urlh.read() + json_code_bytes = urlh.read() + json_code = json_code_bytes.decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err)) return @@ -2387,7 +2399,19 @@ class MyVideoIE(InfoExtractor): class ComedyCentralIE(InfoExtractor): """Information extractor for The Daily Show and Colbert Report """ - _VALID_URL = r'^(:(?Ptds|thedailyshow|cr|colbert|colbertnation|colbertreport))|(https?://)?(www\.)?(?Pthedailyshow|colbertnation)\.com/full-episodes/(?P.*)$' + # urls can be abbreviations like :thedailyshow or :colbert + # urls for episodes like: + # or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day + # or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news + # or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524 + _VALID_URL = r"""^(:(?Ptds|thedailyshow|cr|colbert|colbertnation|colbertreport) + |(https?://)?(www\.)? + (?Pthedailyshow|colbertnation)\.com/ + (full-episodes/(?P.*)| + (?P + (the-colbert-report-(videos|collections)/(?P[0-9]+)/[^/]*/(?P.*?)) + |(watch/(?P[^/]*)/(?P.*))))) + $""" IE_NAME = u'comedycentral' _available_formats = ['3500', '2200', '1700', '1200', '750', '400'] @@ -2409,6 +2433,10 @@ class ComedyCentralIE(InfoExtractor): '400': '384x216', } + def suitable(self, url): + """Receives a URL and returns True if suitable for this IE.""" + return re.match(self._VALID_URL, url, re.VERBOSE) is not None + def report_extraction(self, episode_id): self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id) @@ -2429,7 +2457,7 @@ class ComedyCentralIE(InfoExtractor): def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) + mobj = re.match(self._VALID_URL, url, re.VERBOSE) if mobj is None: self._downloader.trouble(u'ERROR: invalid URL: %s' % url) return @@ -2439,14 +2467,21 @@ class ComedyCentralIE(InfoExtractor): url = u'http://www.thedailyshow.com/full-episodes/' else: url = u'http://www.colbertnation.com/full-episodes/' - mobj = re.match(self._VALID_URL, url) + mobj = re.match(self._VALID_URL, url, re.VERBOSE) assert mobj is not None - dlNewest = not mobj.group('episode') - if dlNewest: - epTitle = mobj.group('showname') + if mobj.group('clip'): + if mobj.group('showname') == 'thedailyshow': + epTitle = mobj.group('tdstitle') + else: + epTitle = mobj.group('cntitle') + dlNewest = False else: - epTitle = mobj.group('episode') + dlNewest = not mobj.group('episode') + if dlNewest: + epTitle = mobj.group('showname') + else: + epTitle = mobj.group('episode') req = compat_urllib_request.Request(url) self.report_extraction(epTitle) @@ -2458,7 +2493,7 @@ class ComedyCentralIE(InfoExtractor): return if dlNewest: url = htmlHandle.geturl() - mobj = re.match(self._VALID_URL, url) + mobj = re.match(self._VALID_URL, url, re.VERBOSE) if mobj is None: self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url) return @@ -2467,14 +2502,14 @@ class ComedyCentralIE(InfoExtractor): return epTitle = mobj.group('episode') - mMovieParams = re.findall('(?:Date: (.*?)'), + 'description': _findProp(r'
(.*?)'), + } + return [info]