3 from __future__ import unicode_literals
14 from .common import InfoExtractor, SearchInfoExtractor
15 from ..jsinterp import JSInterpreter
16 from ..swfinterp import SWFInterpreter
17 from ..compat import (
20 compat_urllib_parse_unquote,
21 compat_urllib_parse_unquote_plus,
22 compat_urllib_parse_urlencode,
23 compat_urllib_parse_urlparse,
32 get_element_by_attribute,
52 class YoutubeBaseInfoExtractor(InfoExtractor):
53 """Provide base functions for Youtube extractors"""
54 _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
55 _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
56 _NETRC_MACHINE = 'youtube'
57 # If True it will raise an error if no login info is provided
58 _LOGIN_REQUIRED = False
60 def _set_language(self):
62 '.youtube.com', 'PREF', 'f1=50000000&hl=en',
63 # YouTube sets the expire time to about two months
64 expire_time=time.time() + 2 * 30 * 24 * 3600)
66 def _ids_to_results(self, ids):
68 self.url_result(vid_id, 'Youtube', video_id=vid_id)
73 Attempt to log in to YouTube.
74 True is returned if successful or skipped.
75 False is returned if login failed.
77 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
79 (username, password) = self._get_login_info()
80 # No authentication to be performed
82 if self._LOGIN_REQUIRED:
83 raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
86 login_page = self._download_webpage(
87 self._LOGIN_URL, None,
88 note='Downloading login page',
89 errnote='unable to fetch login page', fatal=False)
90 if login_page is False:
93 galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
94 login_page, 'Login GALX parameter')
98 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
103 'PersistentCookie': 'yes',
105 'bgresponse': 'js_disabled',
106 'checkConnection': '',
107 'checkedDomains': 'youtube',
114 'service': 'youtube',
119 login_data = urlencode_postdata(login_form_strs)
121 req = sanitized_Request(self._LOGIN_URL, login_data)
122 login_results = self._download_webpage(
124 note='Logging in', errnote='unable to log in', fatal=False)
125 if login_results is False:
128 if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
129 raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
132 # TODO add SMS and phone call support - these require making a request and then prompting the user
134 if re.search(r'(?i)<form[^>]* id="challenge"', login_results) is not None:
135 tfa_code = self._get_tfa_info('2-step verification code')
138 self._downloader.report_warning(
139 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
140 '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
143 tfa_code = remove_start(tfa_code, 'G-')
145 tfa_form_strs = self._form_hidden_inputs('challenge', login_results)
147 tfa_form_strs.update({
152 tfa_data = urlencode_postdata(tfa_form_strs)
154 tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
155 tfa_results = self._download_webpage(
157 note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
159 if tfa_results is False:
162 if re.search(r'(?i)<form[^>]* id="challenge"', tfa_results) is not None:
163 self._downloader.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.')
165 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
166 self._downloader.report_warning('unable to log in - did the page structure change?')
168 if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
169 self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
172 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
173 self._downloader.report_warning('unable to log in: bad username or password')
177 def _real_initialize(self):
178 if self._downloader is None:
181 if not self._login():
185 class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
186 # Extract entries from page with "Load more" button
187 def _entries(self, page, playlist_id):
188 more_widget_html = content_html = page
189 for page_num in itertools.count(1):
190 for entry in self._process_page(content_html):
193 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
197 more = self._download_json(
198 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
199 'Downloading page #%s' % page_num,
200 transform_source=uppercase_escape)
201 content_html = more['content_html']
202 if not content_html.strip():
203 # Some webpages show a "Load more" button but they don't
206 more_widget_html = more['load_more_widget_html']
209 class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
210 def _process_page(self, content):
211 for video_id, video_title in self.extract_videos_from_page(content):
212 yield self.url_result(video_id, 'Youtube', video_id, video_title)
214 def extract_videos_from_page(self, page):
217 for mobj in re.finditer(self._VIDEO_RE, page):
218 # The link with index 0 is not the first video of the playlist (not sure if still actual)
219 if 'index' in mobj.groupdict() and mobj.group('id') == '0':
221 video_id = mobj.group('id')
222 video_title = unescapeHTML(mobj.group('title'))
224 video_title = video_title.strip()
226 idx = ids_in_page.index(video_id)
227 if video_title and not titles_in_page[idx]:
228 titles_in_page[idx] = video_title
230 ids_in_page.append(video_id)
231 titles_in_page.append(video_title)
232 return zip(ids_in_page, titles_in_page)
235 class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
236 def _process_page(self, content):
237 for playlist_id in orderedSet(re.findall(
238 r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
240 yield self.url_result(
241 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
243 def _real_extract(self, url):
244 playlist_id = self._match_id(url)
245 webpage = self._download_webpage(url, playlist_id)
246 title = self._og_search_title(webpage, fatal=False)
247 return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
250 class YoutubeIE(YoutubeBaseInfoExtractor):
251 IE_DESC = 'YouTube.com'
252 _VALID_URL = r"""(?x)^
254 (?:https?://|//) # http(s):// or protocol-independent URL
255 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
256 (?:www\.)?deturl\.com/www\.youtube\.com/|
257 (?:www\.)?pwnyoutube\.com/|
258 (?:www\.)?yourepeat\.com/|
259 tube\.majestyc\.net/|
260 youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
261 (?:.*?\#/)? # handle anchor (#/) redirect urls
262 (?: # the various things that can precede the ID:
263 (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
264 |(?: # or the v= param in all its forms
265 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
266 (?:\?|\#!?) # the params delimiter ? or # or #!
267 (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
272 youtu\.be| # just youtu.be/xxxx
273 vid\.plus # or vid.plus/xxxx
275 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
277 )? # all until now is optional -> you can pass the naked ID
278 ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
279 (?!.*?&list=) # combined list/video URLs are handled by the playlist IE
280 (?(1).+)? # if we found the ID, everything can follow
282 _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
284 '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
285 '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
286 '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
287 '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
288 '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
289 '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
290 '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
291 '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
292 # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
293 '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
294 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
295 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
296 '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
297 '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
298 '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
299 '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
300 '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
301 '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
305 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
306 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
307 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
308 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
309 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
310 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
311 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
313 # Apple HTTP Live Streaming
314 '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
315 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
316 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
317 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
318 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
319 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
320 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
321 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
324 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
325 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
326 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
327 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
328 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
329 '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
330 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
331 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
332 '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
333 '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
334 '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
337 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
338 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
339 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
342 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
343 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
344 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
345 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
346 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
347 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
348 '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9', 'preference': -40},
349 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
350 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
351 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
352 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
353 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
354 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
355 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
356 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
357 # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
358 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
359 '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
360 '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
361 '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
362 '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
363 '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
366 '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
367 '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
369 # Dash webm audio with opus inside
370 '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
371 '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
372 '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
375 '_rtmp': {'protocol': 'rtmp'},
377 _SUBTITLE_FORMATS = ('ttml', 'vtt')
382 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
386 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
387 'uploader': 'Philipp Hagemeister',
388 'uploader_id': 'phihag',
389 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/phihag',
390 'upload_date': '20121002',
391 'license': 'Standard YouTube License',
392 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
393 'categories': ['Science & Technology'],
394 'tags': ['youtube-dl'],
396 'dislike_count': int,
402 'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
403 'note': 'Test generic use_cipher_signature video (#897)',
407 'upload_date': '20120506',
408 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
409 'alt_title': 'I Love It (feat. Charli XCX)',
410 'description': 'md5:f3ceb5ef83a08d95b9d146f973157cc8',
411 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
412 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
413 'iconic ep', 'iconic', 'love', 'it'],
414 'uploader': 'Icona Pop',
415 'uploader_id': 'IconaPop',
416 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/IconaPop',
417 'license': 'Standard YouTube License',
418 'creator': 'Icona Pop',
422 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
423 'note': 'Test VEVO video with age protection (#956)',
427 'upload_date': '20130703',
428 'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
429 'alt_title': 'Tunnel Vision',
430 'description': 'md5:64249768eec3bc4276236606ea996373',
431 'uploader': 'justintimberlakeVEVO',
432 'uploader_id': 'justintimberlakeVEVO',
433 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
434 'license': 'Standard YouTube License',
435 'creator': 'Justin Timberlake',
440 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
441 'note': 'Embed-only video (#1746)',
445 'upload_date': '20120608',
446 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
447 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
448 'uploader': 'SET India',
449 'uploader_id': 'setindia',
450 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/setindia',
451 'license': 'Standard YouTube License',
456 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
457 'note': 'Use the first video ID in the URL',
461 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
462 'uploader': 'Philipp Hagemeister',
463 'uploader_id': 'phihag',
464 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/phihag',
465 'upload_date': '20121002',
466 'license': 'Standard YouTube License',
467 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
468 'categories': ['Science & Technology'],
469 'tags': ['youtube-dl'],
471 'dislike_count': int,
474 'skip_download': True,
478 'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
479 'note': '256k DASH audio (format 141) via DASH manifest',
483 'upload_date': '20121002',
484 'uploader_id': '8KVIDEO',
485 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
487 'uploader': '8KVIDEO',
488 'license': 'Standard YouTube License',
489 'title': 'UHDTV TEST 8K VIDEO.mp4'
492 'youtube_include_dash_manifest': True,
496 # DASH manifest with encrypted signature
498 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
502 'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
503 'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
504 'uploader': 'AfrojackVEVO',
505 'uploader_id': 'AfrojackVEVO',
506 'upload_date': '20131011',
507 'license': 'Standard YouTube License',
510 'youtube_include_dash_manifest': True,
514 # JS player signature function name containing $
516 'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
520 'title': 'Taylor Swift - Shake It Off',
521 'alt_title': 'Shake It Off',
522 'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
523 'uploader': 'TaylorSwiftVEVO',
524 'uploader_id': 'TaylorSwiftVEVO',
525 'upload_date': '20140818',
526 'license': 'Standard YouTube License',
527 'creator': 'Taylor Swift',
530 'youtube_include_dash_manifest': True,
536 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
540 'upload_date': '20100909',
541 'uploader': 'The Amazing Atheist',
542 'uploader_id': 'TheAmazingAtheist',
543 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
544 'license': 'Standard YouTube License',
545 'title': 'Burning Everyone\'s Koran',
546 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
549 # Normal age-gate video (No vevo, embed allowed)
551 'url': 'http://youtube.com/watch?v=HtVdAasjOgU',
555 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
556 'description': 're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
557 'uploader': 'The Witcher',
558 'uploader_id': 'WitcherGame',
559 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
560 'upload_date': '20140605',
561 'license': 'Standard YouTube License',
565 # Age-gate video with encrypted signature
567 'url': 'http://www.youtube.com/watch?v=6kLq3WMV1nU',
571 'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
572 'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
573 'uploader': 'LloydVEVO',
574 'uploader_id': 'LloydVEVO',
575 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
576 'upload_date': '20110629',
577 'license': 'Standard YouTube License',
581 # video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
583 'url': '__2ABJjxzNo',
587 'upload_date': '20100430',
588 'uploader_id': 'deadmau5',
589 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/deadmau5',
590 'creator': 'deadmau5',
591 'description': 'md5:12c56784b8032162bb936a5f76d55360',
592 'uploader': 'deadmau5',
593 'license': 'Standard YouTube License',
594 'title': 'Deadmau5 - Some Chords (HD)',
595 'alt_title': 'Some Chords',
597 'expected_warnings': [
598 'DASH manifest missing',
601 # Olympics (https://github.com/rg3/youtube-dl/issues/4431)
603 'url': 'lqQg6PlCWgI',
607 'upload_date': '20150827',
608 'uploader_id': 'olympic',
609 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/olympic',
610 'license': 'Standard YouTube License',
611 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
612 'uploader': 'Olympics',
613 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
616 'skip_download': 'requires avconv',
621 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
625 'stretched_ratio': 16 / 9.,
626 'upload_date': '20110310',
627 'uploader_id': 'AllenMeow',
628 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
629 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
631 'license': 'Standard YouTube License',
632 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
635 # url_encoded_fmt_stream_map is empty string
637 'url': 'qEJwOuvDf7I',
641 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
643 'upload_date': '20150404',
644 'uploader_id': 'spbelect',
645 'uploader': 'Наблюдатели Петербурга',
648 'skip_download': 'requires avconv',
650 'skip': 'This live event has ended.',
652 # Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
654 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
658 'title': 'md5:7b81415841e02ecd4313668cde88737a',
659 'description': 'md5:116377fd2963b81ec4ce64b542173306',
660 'upload_date': '20150625',
661 'uploader_id': 'dorappi2000',
662 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
663 'uploader': 'dorappi2000',
664 'license': 'Standard YouTube License',
665 'formats': 'mincount:33',
668 # DASH manifest with segment_list
670 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
671 'md5': '8ce563a1d667b599d21064e982ab9e31',
675 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
676 'uploader': 'Airtek',
677 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
678 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
679 'license': 'Standard YouTube License',
680 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
683 'youtube_include_dash_manifest': True,
684 'format': '135', # bestvideo
688 # Multifeed videos (multiple cameras), URL is for Main Camera
689 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
692 'title': 'teamPGP: Rocket League Noob Stream',
693 'description': 'md5:dc7872fb300e143831327f1bae3af010',
699 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
700 'description': 'md5:dc7872fb300e143831327f1bae3af010',
701 'upload_date': '20150721',
702 'uploader': 'Beer Games Beer',
703 'uploader_id': 'beergamesbeer',
704 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
705 'license': 'Standard YouTube License',
711 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
712 'description': 'md5:dc7872fb300e143831327f1bae3af010',
713 'upload_date': '20150721',
714 'uploader': 'Beer Games Beer',
715 'uploader_id': 'beergamesbeer',
716 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
717 'license': 'Standard YouTube License',
723 'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
724 'description': 'md5:dc7872fb300e143831327f1bae3af010',
725 'upload_date': '20150721',
726 'uploader': 'Beer Games Beer',
727 'uploader_id': 'beergamesbeer',
728 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
729 'license': 'Standard YouTube License',
735 'title': 'teamPGP: Rocket League Noob Stream (zim)',
736 'description': 'md5:dc7872fb300e143831327f1bae3af010',
737 'upload_date': '20150721',
738 'uploader': 'Beer Games Beer',
739 'uploader_id': 'beergamesbeer',
740 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
741 'license': 'Standard YouTube License',
745 'skip_download': True,
749 # Multifeed video with comma in title (see https://github.com/rg3/youtube-dl/issues/8536)
750 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
753 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
758 'url': 'http://vid.plus/FlRa-iH7PGw',
759 'only_matching': True,
762 # Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
763 # Also tests cut-off URL expansion in video description (see
764 # https://github.com/rg3/youtube-dl/issues/1892,
765 # https://github.com/rg3/youtube-dl/issues/8164)
766 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
770 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
771 'alt_title': 'Dark Walk',
772 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
773 'upload_date': '20151119',
774 'uploader_id': 'IronSoulElf',
775 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
776 'uploader': 'IronSoulElf',
777 'license': 'Standard YouTube License',
778 'creator': 'Todd Haberman, Daniel Law Heath & Aaron Kaplan',
781 'skip_download': True,
785 # Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
786 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
787 'only_matching': True,
790 # Video with yt:stretch=17:0
791 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
795 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
796 'description': 'md5:ee18a25c350637c8faff806845bddee9',
797 'upload_date': '20151107',
798 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
799 'uploader': 'CH GAMER DROID',
802 'skip_download': True,
806 # Video licensed under Creative Commons
807 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
811 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
812 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
813 'upload_date': '20150127',
814 'uploader_id': 'BerkmanCenter',
815 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
816 'uploader': 'BerkmanCenter',
817 'license': 'Creative Commons Attribution license (reuse allowed)',
820 'skip_download': True,
824 # Channel-like uploader_url
825 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
829 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
830 'description': 'md5:dda0d780d5a6e120758d1711d062a867',
831 'upload_date': '20151119',
832 'uploader': 'Bernie 2016',
833 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
834 'uploader_url': 're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
835 'license': 'Creative Commons Attribution license (reuse allowed)',
838 'skip_download': True,
842 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
843 'only_matching': True,
847 def __init__(self, *args, **kwargs):
848 super(YoutubeIE, self).__init__(*args, **kwargs)
849 self._player_cache = {}
851 def report_video_info_webpage_download(self, video_id):
852 """Report attempt to download video info webpage."""
853 self.to_screen('%s: Downloading video info webpage' % video_id)
855 def report_information_extraction(self, video_id):
856 """Report attempt to extract video information."""
857 self.to_screen('%s: Extracting video information' % video_id)
859 def report_unavailable_format(self, video_id, format):
860 """Report extracted video URL."""
861 self.to_screen('%s: Format %s not available' % (video_id, format))
863 def report_rtmp_download(self):
864 """Indicate the download will use the RTMP protocol."""
865 self.to_screen('RTMP download detected')
867 def _signature_cache_id(self, example_sig):
868 """ Return a string representation of a signature """
869 return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
871 def _extract_signature_function(self, video_id, player_url, example_sig):
873 r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|/base)?\.(?P<ext>[a-z]+)$',
876 raise ExtractorError('Cannot identify player %r' % player_url)
877 player_type = id_m.group('ext')
878 player_id = id_m.group('id')
880 # Read from filesystem cache
881 func_id = '%s_%s_%s' % (
882 player_type, player_id, self._signature_cache_id(example_sig))
883 assert os.path.basename(func_id) == func_id
885 cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
886 if cache_spec is not None:
887 return lambda s: ''.join(s[i] for i in cache_spec)
890 'Downloading player %s' % player_url
891 if self._downloader.params.get('verbose') else
892 'Downloading %s player %s' % (player_type, player_id)
894 if player_type == 'js':
895 code = self._download_webpage(
896 player_url, video_id,
898 errnote='Download of %s failed' % player_url)
899 res = self._parse_sig_js(code)
900 elif player_type == 'swf':
901 urlh = self._request_webpage(
902 player_url, video_id,
904 errnote='Download of %s failed' % player_url)
906 res = self._parse_sig_swf(code)
908 assert False, 'Invalid player type %r' % player_type
910 test_string = ''.join(map(compat_chr, range(len(example_sig))))
911 cache_res = res(test_string)
912 cache_spec = [ord(c) for c in cache_res]
914 self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
917 def _print_sig_code(self, func, example_sig):
918 def gen_sig_code(idxs):
919 def _genslice(start, end, step):
920 starts = '' if start == 0 else str(start)
921 ends = (':%d' % (end + step)) if end + step >= 0 else ':'
922 steps = '' if step == 1 else (':%d' % step)
923 return 's[%s%s%s]' % (starts, ends, steps)
926 # Quelch pyflakes warnings - start will be set when step is set
927 start = '(Never used)'
928 for i, prev in zip(idxs[1:], idxs[:-1]):
932 yield _genslice(start, prev, step)
935 if i - prev in [-1, 1]:
944 yield _genslice(start, i, step)
946 test_string = ''.join(map(compat_chr, range(len(example_sig))))
947 cache_res = func(test_string)
948 cache_spec = [ord(c) for c in cache_res]
949 expr_code = ' + '.join(gen_sig_code(cache_spec))
950 signature_id_tuple = '(%s)' % (
951 ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
952 code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
953 ' return %s\n') % (signature_id_tuple, expr_code)
954 self.to_screen('Extracted signature function:\n' + code)
956 def _parse_sig_js(self, jscode):
957 funcname = self._search_regex(
958 r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode,
959 'Initial JS player signature function name')
961 jsi = JSInterpreter(jscode)
962 initial_function = jsi.extract_function(funcname)
963 return lambda s: initial_function([s])
965 def _parse_sig_swf(self, file_contents):
966 swfi = SWFInterpreter(file_contents)
967 TARGET_CLASSNAME = 'SignatureDecipher'
968 searched_class = swfi.extract_class(TARGET_CLASSNAME)
969 initial_function = swfi.extract_function(searched_class, 'decipher')
970 return lambda s: initial_function([s])
972 def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
973 """Turn the encrypted s field into a working signature"""
975 if player_url is None:
976 raise ExtractorError('Cannot decrypt signature without player_url')
978 if player_url.startswith('//'):
979 player_url = 'https:' + player_url
981 player_id = (player_url, self._signature_cache_id(s))
982 if player_id not in self._player_cache:
983 func = self._extract_signature_function(
984 video_id, player_url, s
986 self._player_cache[player_id] = func
987 func = self._player_cache[player_id]
988 if self._downloader.params.get('youtube_print_sig_code'):
989 self._print_sig_code(func, s)
991 except Exception as e:
992 tb = traceback.format_exc()
993 raise ExtractorError(
994 'Signature extraction failed: ' + tb, cause=e)
996 def _get_subtitles(self, video_id, webpage):
998 subs_doc = self._download_xml(
999 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
1000 video_id, note=False)
1001 except ExtractorError as err:
1002 self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
1006 for track in subs_doc.findall('track'):
1007 lang = track.attrib['lang_code']
1008 if lang in sub_lang_list:
1011 for ext in self._SUBTITLE_FORMATS:
1012 params = compat_urllib_parse_urlencode({
1016 'name': track.attrib['name'].encode('utf-8'),
1018 sub_formats.append({
1019 'url': 'https://www.youtube.com/api/timedtext?' + params,
1022 sub_lang_list[lang] = sub_formats
1023 if not sub_lang_list:
1024 self._downloader.report_warning('video doesn\'t have subtitles')
1026 return sub_lang_list
1028 def _get_ytplayer_config(self, video_id, webpage):
1030 # User data may contain arbitrary character sequences that may affect
1031 # JSON extraction with regex, e.g. when '};' is contained the second
1032 # regex won't capture the whole JSON. Yet working around by trying more
1033 # concrete regex first keeping in mind proper quoted string handling
1034 # to be implemented in future that will replace this workaround (see
1035 # https://github.com/rg3/youtube-dl/issues/7468,
1036 # https://github.com/rg3/youtube-dl/pull/7599)
1037 r';ytplayer\.config\s*=\s*({.+?});ytplayer',
1038 r';ytplayer\.config\s*=\s*({.+?});',
1040 config = self._search_regex(
1041 patterns, webpage, 'ytplayer.config', default=None)
1043 return self._parse_json(
1044 uppercase_escape(config), video_id, fatal=False)
1046 def _get_automatic_captions(self, video_id, webpage):
1047 """We need the webpage for getting the captions url, pass it as an
1048 argument to speed up the process."""
1049 self.to_screen('%s: Looking for automatic captions' % video_id)
1050 player_config = self._get_ytplayer_config(video_id, webpage)
1051 err_msg = 'Couldn\'t find automatic captions for %s' % video_id
1052 if not player_config:
1053 self._downloader.report_warning(err_msg)
1056 args = player_config['args']
1057 caption_url = args.get('ttsurl')
1059 timestamp = args['timestamp']
1060 # We get the available subtitles
1061 list_params = compat_urllib_parse_urlencode({
1066 list_url = caption_url + '&' + list_params
1067 caption_list = self._download_xml(list_url, video_id)
1068 original_lang_node = caption_list.find('track')
1069 if original_lang_node is None:
1070 self._downloader.report_warning('Video doesn\'t have automatic captions')
1072 original_lang = original_lang_node.attrib['lang_code']
1073 caption_kind = original_lang_node.attrib.get('kind', '')
1076 for lang_node in caption_list.findall('target'):
1077 sub_lang = lang_node.attrib['lang_code']
1079 for ext in self._SUBTITLE_FORMATS:
1080 params = compat_urllib_parse_urlencode({
1081 'lang': original_lang,
1085 'kind': caption_kind,
1087 sub_formats.append({
1088 'url': caption_url + '&' + params,
1091 sub_lang_list[sub_lang] = sub_formats
1092 return sub_lang_list
1094 # Some videos don't provide ttsurl but rather caption_tracks and
1095 # caption_translation_languages (e.g. 20LmZk1hakA)
1096 caption_tracks = args['caption_tracks']
1097 caption_translation_languages = args['caption_translation_languages']
1098 caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
1099 parsed_caption_url = compat_urllib_parse_urlparse(caption_url)
1100 caption_qs = compat_parse_qs(parsed_caption_url.query)
1103 for lang in caption_translation_languages.split(','):
1104 lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
1105 sub_lang = lang_qs.get('lc', [None])[0]
1109 for ext in self._SUBTITLE_FORMATS:
1111 'tlang': [sub_lang],
1114 sub_url = compat_urlparse.urlunparse(parsed_caption_url._replace(
1115 query=compat_urllib_parse_urlencode(caption_qs, True)))
1116 sub_formats.append({
1120 sub_lang_list[sub_lang] = sub_formats
1121 return sub_lang_list
1122 # An extractor error can be raise by the download process if there are
1123 # no automatic captions but there are subtitles
1124 except (KeyError, ExtractorError):
1125 self._downloader.report_warning(err_msg)
1128 def _mark_watched(self, video_id, video_info):
1129 playback_url = video_info.get('videostats_playback_base_url', [None])[0]
1130 if not playback_url:
1132 parsed_playback_url = compat_urlparse.urlparse(playback_url)
1133 qs = compat_urlparse.parse_qs(parsed_playback_url.query)
1135 # cpn generation algorithm is reverse engineered from base.js.
1136 # In fact it works even with dummy cpn.
1137 CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
1138 cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
1144 playback_url = compat_urlparse.urlunparse(
1145 parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
1147 self._download_webpage(
1148 playback_url, video_id, 'Marking watched',
1149 'Unable to mark watched', fatal=False)
1152 def extract_id(cls, url):
1153 mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
1155 raise ExtractorError('Invalid URL: %s' % url)
1156 video_id = mobj.group(2)
1159 def _extract_from_m3u8(self, manifest_url, video_id):
1162 def _get_urls(_manifest):
1163 lines = _manifest.split('\n')
1164 urls = filter(lambda l: l and not l.startswith('#'),
1167 manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
1168 formats_urls = _get_urls(manifest)
1169 for format_url in formats_urls:
1170 itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
1171 url_map[itag] = format_url
1174 def _extract_annotations(self, video_id):
1175 url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
1176 return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
1178 def _real_extract(self, url):
1179 url, smuggled_data = unsmuggle_url(url, {})
1182 'http' if self._downloader.params.get('prefer_insecure', False)
1187 parsed_url = compat_urllib_parse_urlparse(url)
1188 for component in [parsed_url.fragment, parsed_url.query]:
1189 query = compat_parse_qs(component)
1190 if start_time is None and 't' in query:
1191 start_time = parse_duration(query['t'][0])
1192 if start_time is None and 'start' in query:
1193 start_time = parse_duration(query['start'][0])
1194 if end_time is None and 'end' in query:
1195 end_time = parse_duration(query['end'][0])
1197 # Extract original video URL from URL with redirection, like age verification, using next_url parameter
1198 mobj = re.search(self._NEXT_URL_RE, url)
1200 url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
1201 video_id = self.extract_id(url)
1204 url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
1205 video_webpage = self._download_webpage(url, video_id)
1207 # Attempt to extract SWF player URL
1208 mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
1209 if mobj is not None:
1210 player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
1216 def add_dash_mpd(video_info):
1217 dash_mpd = video_info.get('dashmpd')
1218 if dash_mpd and dash_mpd[0] not in dash_mpds:
1219 dash_mpds.append(dash_mpd[0])
1222 embed_webpage = None
1224 if re.search(r'player-age-gate-content">', video_webpage) is not None:
1226 # We simulate the access to the video from www.youtube.com/v/{video_id}
1227 # this can be viewed without login into Youtube
1228 url = proto + '://www.youtube.com/embed/%s' % video_id
1229 embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
1230 data = compat_urllib_parse_urlencode({
1231 'video_id': video_id,
1232 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
1233 'sts': self._search_regex(
1234 r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
1236 video_info_url = proto + '://www.youtube.com/get_video_info?' + data
1237 video_info_webpage = self._download_webpage(
1238 video_info_url, video_id,
1239 note='Refetching age-gated info webpage',
1240 errnote='unable to download video info webpage')
1241 video_info = compat_parse_qs(video_info_webpage)
1242 add_dash_mpd(video_info)
1246 # Try looking directly into the video webpage
1247 ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
1249 args = ytplayer_config['args']
1250 if args.get('url_encoded_fmt_stream_map'):
1251 # Convert to the same format returned by compat_parse_qs
1252 video_info = dict((k, [v]) for k, v in args.items())
1253 add_dash_mpd(video_info)
1254 if args.get('livestream') == '1' or args.get('live_playback') == 1:
1256 if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
1257 # We also try looking in get_video_info since it may contain different dashmpd
1258 # URL that points to a DASH manifest with possibly different itag set (some itags
1259 # are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
1260 # manifest pointed by get_video_info's dashmpd).
1261 # The general idea is to take a union of itags of both DASH manifests (for example
1262 # video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
1263 self.report_video_info_webpage_download(video_id)
1264 for el_type in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']:
1266 '%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
1267 % (proto, video_id, el_type))
1268 video_info_webpage = self._download_webpage(
1270 video_id, note=False,
1271 errnote='unable to download video info webpage')
1272 get_video_info = compat_parse_qs(video_info_webpage)
1273 if get_video_info.get('use_cipher_signature') != ['True']:
1274 add_dash_mpd(get_video_info)
1276 video_info = get_video_info
1277 if 'token' in get_video_info:
1278 # Different get_video_info requests may report different results, e.g.
1279 # some may report video unavailability, but some may serve it without
1280 # any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
1281 # the original webpage as well as el=info and el=embedded get_video_info
1282 # requests report video unavailability due to geo restriction while
1283 # el=detailpage succeeds and returns valid data). This is probably
1284 # due to YouTube measures against IP ranges of hosting providers.
1285 # Working around by preferring the first succeeded video_info containing
1286 # the token if no such video_info yet was found.
1287 if 'token' not in video_info:
1288 video_info = get_video_info
1290 if 'token' not in video_info:
1291 if 'reason' in video_info:
1292 if 'The uploader has not made this video available in your country.' in video_info['reason']:
1293 regions_allowed = self._html_search_meta('regionsAllowed', video_webpage, default=None)
1295 raise ExtractorError('YouTube said: This video is available in %s only' % (
1296 ', '.join(map(ISO3166Utils.short2full, regions_allowed.split(',')))),
1298 raise ExtractorError(
1299 'YouTube said: %s' % video_info['reason'][0],
1300 expected=True, video_id=video_id)
1302 raise ExtractorError(
1303 '"token" parameter not in video info for unknown reason',
1307 if 'title' in video_info:
1308 video_title = video_info['title'][0]
1310 self._downloader.report_warning('Unable to extract video title')
1314 video_description = get_element_by_id("eow-description", video_webpage)
1315 if video_description:
1316 video_description = re.sub(r'''(?x)
1318 (?:[a-zA-Z-]+="[^"]+"\s+)*?
1319 (?:title|href)="([^"]+)"\s+
1320 (?:[a-zA-Z-]+="[^"]+"\s+)*?
1321 class="(?:yt-uix-redirect-link|yt-uix-sessionlink[^"]*)"[^>]*>
1324 ''', r'\1', video_description)
1325 video_description = clean_html(video_description)
1327 fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
1329 video_description = unescapeHTML(fd_mobj.group(1))
1331 video_description = ''
1333 if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
1334 if not self._downloader.params.get('noplaylist'):
1337 multifeed_metadata_list = video_info['multifeed_metadata_list'][0]
1338 for feed in multifeed_metadata_list.split(','):
1339 # Unquote should take place before split on comma (,) since textual
1340 # fields may contain comma as well (see
1341 # https://github.com/rg3/youtube-dl/issues/8536)
1342 feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
1344 '_type': 'url_transparent',
1345 'ie_key': 'Youtube',
1347 '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
1348 {'force_singlefeed': True}),
1349 'title': '%s (%s)' % (video_title, feed_data['title'][0]),
1351 feed_ids.append(feed_data['id'][0])
1353 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
1354 % (', '.join(feed_ids), video_id))
1355 return self.playlist_result(entries, video_id, video_title, video_description)
1356 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1358 if 'view_count' in video_info:
1359 view_count = int(video_info['view_count'][0])
1363 # Check for "rental" videos
1364 if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
1365 raise ExtractorError('"rental" videos not supported')
1367 # Start extracting information
1368 self.report_information_extraction(video_id)
1371 if 'author' not in video_info:
1372 raise ExtractorError('Unable to extract uploader name')
1373 video_uploader = compat_urllib_parse_unquote_plus(video_info['author'][0])
1376 video_uploader_id = None
1377 video_uploader_url = None
1379 r'<link itemprop="url" href="(?P<uploader_url>https?://www.youtube.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
1381 if mobj is not None:
1382 video_uploader_id = mobj.group('uploader_id')
1383 video_uploader_url = mobj.group('uploader_url')
1385 self._downloader.report_warning('unable to extract uploader nickname')
1388 # We try first to get a high quality image:
1389 m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
1390 video_webpage, re.DOTALL)
1391 if m_thumb is not None:
1392 video_thumbnail = m_thumb.group(1)
1393 elif 'thumbnail_url' not in video_info:
1394 self._downloader.report_warning('unable to extract video thumbnail')
1395 video_thumbnail = None
1396 else: # don't panic if we can't find it
1397 video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
1400 upload_date = self._html_search_meta(
1401 'datePublished', video_webpage, 'upload date', default=None)
1403 upload_date = self._search_regex(
1404 [r'(?s)id="eow-date.*?>(.*?)</span>',
1405 r'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>'],
1406 video_webpage, 'upload date', default=None)
1408 upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
1409 upload_date = unified_strdate(upload_date)
1411 video_license = self._html_search_regex(
1412 r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
1413 video_webpage, 'license', default=None)
1415 m_music = re.search(
1416 r'<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*<ul[^>]*>\s*<li>(?P<title>.+?) by (?P<creator>.+?)(?:\(.+?\))?</li',
1419 video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
1420 video_creator = clean_html(m_music.group('creator'))
1422 video_alt_title = video_creator = None
1424 m_cat_container = self._search_regex(
1425 r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
1426 video_webpage, 'categories', default=None)
1428 category = self._html_search_regex(
1429 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
1431 video_categories = None if category is None else [category]
1433 video_categories = None
1436 unescapeHTML(m.group('content'))
1437 for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
1439 def _extract_count(count_name):
1440 return str_to_int(self._search_regex(
1441 r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
1442 % re.escape(count_name),
1443 video_webpage, count_name, default=None))
1445 like_count = _extract_count('like')
1446 dislike_count = _extract_count('dislike')
1449 video_subtitles = self.extract_subtitles(video_id, video_webpage)
1450 automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
1452 if 'length_seconds' not in video_info:
1453 self._downloader.report_warning('unable to extract video duration')
1454 video_duration = None
1456 video_duration = int(compat_urllib_parse_unquote_plus(video_info['length_seconds'][0]))
1459 video_annotations = None
1460 if self._downloader.params.get('writeannotations', False):
1461 video_annotations = self._extract_annotations(video_id)
1463 def _map_to_format_list(urlmap):
1465 for itag, video_real_url in urlmap.items():
1468 'url': video_real_url,
1469 'player_url': player_url,
1471 if itag in self._formats:
1472 dct.update(self._formats[itag])
1476 if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
1477 self.report_rtmp_download()
1479 'format_id': '_rtmp',
1481 'url': video_info['conn'][0],
1482 'player_url': player_url,
1484 elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
1485 encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
1486 if 'rtmpe%3Dyes' in encoded_url_map:
1487 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
1489 fmt_list = video_info.get('fmt_list', [''])[0]
1491 for fmt in fmt_list.split(','):
1492 spec = fmt.split('/')
1494 width_height = spec[1].split('x')
1495 if len(width_height) == 2:
1496 formats_spec[spec[0]] = {
1497 'resolution': spec[1],
1498 'width': int_or_none(width_height[0]),
1499 'height': int_or_none(width_height[1]),
1502 for url_data_str in encoded_url_map.split(','):
1503 url_data = compat_parse_qs(url_data_str)
1504 if 'itag' not in url_data or 'url' not in url_data:
1506 format_id = url_data['itag'][0]
1507 url = url_data['url'][0]
1509 if 'sig' in url_data:
1510 url += '&signature=' + url_data['sig'][0]
1511 elif 's' in url_data:
1512 encrypted_sig = url_data['s'][0]
1513 ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
1515 jsplayer_url_json = self._search_regex(
1517 embed_webpage if age_gate else video_webpage,
1518 'JS player URL (1)', default=None)
1519 if not jsplayer_url_json and not age_gate:
1520 # We need the embed website after all
1521 if embed_webpage is None:
1522 embed_url = proto + '://www.youtube.com/embed/%s' % video_id
1523 embed_webpage = self._download_webpage(
1524 embed_url, video_id, 'Downloading embed webpage')
1525 jsplayer_url_json = self._search_regex(
1526 ASSETS_RE, embed_webpage, 'JS player URL')
1528 player_url = json.loads(jsplayer_url_json)
1529 if player_url is None:
1530 player_url_json = self._search_regex(
1531 r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
1532 video_webpage, 'age gate player URL')
1533 player_url = json.loads(player_url_json)
1535 if self._downloader.params.get('verbose'):
1536 if player_url is None:
1537 player_version = 'unknown'
1538 player_desc = 'unknown'
1540 if player_url.endswith('swf'):
1541 player_version = self._search_regex(
1542 r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
1543 'flash player', fatal=False)
1544 player_desc = 'flash player %s' % player_version
1546 player_version = self._search_regex(
1547 [r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js', r'(?:www|player)-([^/]+)/base\.js'],
1549 'html5 player', fatal=False)
1550 player_desc = 'html5 player %s' % player_version
1552 parts_sizes = self._signature_cache_id(encrypted_sig)
1553 self.to_screen('{%s} signature length %s, %s' %
1554 (format_id, parts_sizes, player_desc))
1556 signature = self._decrypt_signature(
1557 encrypted_sig, video_id, player_url, age_gate)
1558 url += '&signature=' + signature
1559 if 'ratebypass' not in url:
1560 url += '&ratebypass=yes'
1563 'format_id': format_id,
1565 'player_url': player_url,
1567 if format_id in self._formats:
1568 dct.update(self._formats[format_id])
1569 if format_id in formats_spec:
1570 dct.update(formats_spec[format_id])
1572 # Some itags are not included in DASH manifest thus corresponding formats will
1573 # lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
1574 # Trying to extract metadata from url_encoded_fmt_stream_map entry.
1575 mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
1576 width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
1579 'filesize': int_or_none(url_data.get('clen', [None])[0]),
1580 'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
1583 'fps': int_or_none(url_data.get('fps', [None])[0]),
1584 'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
1586 for key, value in more_fields.items():
1589 type_ = url_data.get('type', [None])[0]
1591 type_split = type_.split(';')
1592 kind_ext = type_split[0].split('/')
1593 if len(kind_ext) == 2:
1595 dct['ext'] = mimetype2ext(type_split[0])
1596 if kind in ('audio', 'video'):
1598 for mobj in re.finditer(
1599 r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
1600 if mobj.group('key') == 'codecs':
1601 codecs = mobj.group('val')
1604 codecs = codecs.split(',')
1605 if len(codecs) == 2:
1606 acodec, vcodec = codecs[1], codecs[0]
1608 acodec, vcodec = (codecs[0], 'none') if kind == 'audio' else ('none', codecs[0])
1614 elif video_info.get('hlsvp'):
1615 manifest_url = video_info['hlsvp'][0]
1616 url_map = self._extract_from_m3u8(manifest_url, video_id)
1617 formats = _map_to_format_list(url_map)
1618 # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
1619 for a_format in formats:
1620 a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
1622 unavailable_message = self._html_search_regex(
1623 r'(?s)<h1[^>]+id="unavailable-message"[^>]*>(.+?)</h1>',
1624 video_webpage, 'unavailable message', default=None)
1625 if unavailable_message:
1626 raise ExtractorError(unavailable_message, expected=True)
1627 raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
1629 # Look for the DASH manifest
1630 if self._downloader.params.get('youtube_include_dash_manifest', True):
1631 dash_mpd_fatal = True
1632 for mpd_url in dash_mpds:
1635 def decrypt_sig(mobj):
1637 dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
1638 return '/signature/%s' % dec_s
1640 mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
1642 for df in self._extract_mpd_formats(
1643 mpd_url, video_id, fatal=dash_mpd_fatal,
1644 formats_dict=self._formats):
1645 # Do not overwrite DASH format found in some previous DASH manifest
1646 if df['format_id'] not in dash_formats:
1647 dash_formats[df['format_id']] = df
1648 # Additional DASH manifests may end up in HTTP Error 403 therefore
1649 # allow them to fail without bug report message if we already have
1650 # some DASH manifest succeeded. This is temporary workaround to reduce
1651 # burst of bug reports until we figure out the reason and whether it
1652 # can be fixed at all.
1653 dash_mpd_fatal = False
1654 except (ExtractorError, KeyError) as e:
1655 self.report_warning(
1656 'Skipping DASH manifest: %r' % e, video_id)
1658 # Remove the formats we found through non-DASH, they
1659 # contain less info and it can be wrong, because we use
1660 # fixed values (for example the resolution). See
1661 # https://github.com/rg3/youtube-dl/issues/5774 for an
1663 formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
1664 formats.extend(dash_formats.values())
1666 # Check for malformed aspect ratio
1667 stretched_m = re.search(
1668 r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
1671 w = float(stretched_m.group('w'))
1672 h = float(stretched_m.group('h'))
1673 # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
1674 # We will only process correct ratios.
1678 if f.get('vcodec') != 'none':
1679 f['stretched_ratio'] = ratio
1681 self._sort_formats(formats)
1683 self.mark_watched(video_id, video_info)
1687 'uploader': video_uploader,
1688 'uploader_id': video_uploader_id,
1689 'uploader_url': video_uploader_url,
1690 'upload_date': upload_date,
1691 'license': video_license,
1692 'creator': video_creator,
1693 'title': video_title,
1694 'alt_title': video_alt_title,
1695 'thumbnail': video_thumbnail,
1696 'description': video_description,
1697 'categories': video_categories,
1699 'subtitles': video_subtitles,
1700 'automatic_captions': automatic_captions,
1701 'duration': video_duration,
1702 'age_limit': 18 if age_gate else 0,
1703 'annotations': video_annotations,
1704 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
1705 'view_count': view_count,
1706 'like_count': like_count,
1707 'dislike_count': dislike_count,
1708 'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
1711 'start_time': start_time,
1712 'end_time': end_time,
1716 class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
1717 IE_DESC = 'YouTube.com playlists'
1718 _VALID_URL = r"""(?x)(?:
1723 (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
1724 \? (?:.*?[&;])*? (?:p|a|list)=
1728 (?:PL|LL|EC|UU|FL|RD|UL)?[0-9A-Za-z-_]{10,}
1729 # Top tracks, they can also include dots
1734 ((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,})
1736 _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
1737 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
1738 IE_NAME = 'youtube:playlist'
1740 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1742 'title': 'ytdl test PL',
1743 'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1745 'playlist_count': 3,
1747 'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1749 'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1750 'title': 'YDL_Empty_List',
1752 'playlist_count': 0,
1754 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
1755 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1757 'title': '29C3: Not my department',
1758 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1760 'playlist_count': 95,
1762 'note': 'issue #673',
1763 'url': 'PLBB231211A4F62143',
1765 'title': '[OLD]Team Fortress 2 (Class-based LP)',
1766 'id': 'PLBB231211A4F62143',
1768 'playlist_mincount': 26,
1770 'note': 'Large playlist',
1771 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
1773 'title': 'Uploads from Cauchemar',
1774 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
1776 'playlist_mincount': 799,
1778 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1780 'title': 'YDL_safe_search',
1781 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1783 'playlist_count': 2,
1786 'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1787 'playlist_count': 4,
1790 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1793 'note': 'Embedded SWF player',
1794 'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
1795 'playlist_count': 4,
1798 'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
1801 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
1802 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
1804 'title': 'Uploads from Interstellar Movie',
1805 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
1807 'playlist_mincout': 21,
1810 def _real_initialize(self):
1813 def _extract_mix(self, playlist_id):
1814 # The mixes are generated from a single video
1815 # the id of the playlist is just 'RD' + video_id
1816 url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
1817 webpage = self._download_webpage(
1818 url, playlist_id, 'Downloading Youtube mix')
1819 search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
1821 search_title('playlist-title') or
1822 search_title('title long-title') or
1823 search_title('title'))
1824 title = clean_html(title_span)
1825 ids = orderedSet(re.findall(
1826 r'''(?xs)data-video-username=".*?".*?
1827 href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re.escape(playlist_id),
1829 url_results = self._ids_to_results(ids)
1831 return self.playlist_result(url_results, playlist_id, title)
1833 def _extract_playlist(self, playlist_id):
1834 url = self._TEMPLATE_URL % playlist_id
1835 page = self._download_webpage(url, playlist_id)
1837 for match in re.findall(r'<div class="yt-alert-message">([^<]+)</div>', page):
1838 match = match.strip()
1839 # Check if the playlist exists or is private
1840 if re.match(r'[^<]*(The|This) playlist (does not exist|is private)[^<]*', match):
1841 raise ExtractorError(
1842 'The playlist doesn\'t exist or is private, use --username or '
1843 '--netrc to access it.',
1845 elif re.match(r'[^<]*Invalid parameters[^<]*', match):
1846 raise ExtractorError(
1847 'Invalid parameters. Maybe URL is incorrect.',
1849 elif re.match(r'[^<]*Choose your language[^<]*', match):
1852 self.report_warning('Youtube gives an alert message: ' + match)
1854 playlist_title = self._html_search_regex(
1855 r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
1858 return self.playlist_result(self._entries(page, playlist_id), playlist_id, playlist_title)
1860 def _check_download_just_video(self, url, playlist_id):
1861 # Check if it's a video-specific URL
1862 query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
1863 if 'v' in query_dict:
1864 video_id = query_dict['v'][0]
1865 if self._downloader.params.get('noplaylist'):
1866 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1867 return self.url_result(video_id, 'Youtube', video_id=video_id)
1869 self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
1871 def _real_extract(self, url):
1872 # Extract playlist id
1873 mobj = re.match(self._VALID_URL, url)
1875 raise ExtractorError('Invalid URL: %s' % url)
1876 playlist_id = mobj.group(1) or mobj.group(2)
1878 video = self._check_download_just_video(url, playlist_id)
1882 if playlist_id.startswith('RD') or playlist_id.startswith('UL'):
1883 # Mixes require a custom extraction process
1884 return self._extract_mix(playlist_id)
1886 return self._extract_playlist(playlist_id)
1889 class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
1890 IE_DESC = 'YouTube.com channels'
1891 _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
1892 _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
1893 _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
1894 IE_NAME = 'youtube:channel'
1896 'note': 'paginated channel',
1897 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
1898 'playlist_mincount': 91,
1900 'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
1901 'title': 'Uploads from lex will',
1904 'note': 'Age restricted channel',
1905 # from https://www.youtube.com/user/DeusExOfficial
1906 'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
1907 'playlist_mincount': 64,
1909 'id': 'UUs0ifCMCm1icqRbqhUINa0w',
1910 'title': 'Uploads from Deus Ex',
1915 def suitable(cls, url):
1916 return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
1917 else super(YoutubeChannelIE, cls).suitable(url))
1919 def _real_extract(self, url):
1920 channel_id = self._match_id(url)
1922 url = self._TEMPLATE_URL % channel_id
1924 # Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
1925 # Workaround by extracting as a playlist if managed to obtain channel playlist URL
1926 # otherwise fallback on channel by page extraction
1927 channel_page = self._download_webpage(
1928 url + '?view=57', channel_id,
1929 'Downloading channel page', fatal=False)
1930 if channel_page is False:
1931 channel_playlist_id = False
1933 channel_playlist_id = self._html_search_meta(
1934 'channelId', channel_page, 'channel id', default=None)
1935 if not channel_playlist_id:
1936 channel_playlist_id = self._search_regex(
1937 r'data-(?:channel-external-|yt)id="([^"]+)"',
1938 channel_page, 'channel id', default=None)
1939 if channel_playlist_id and channel_playlist_id.startswith('UC'):
1940 playlist_id = 'UU' + channel_playlist_id[2:]
1941 return self.url_result(
1942 compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
1944 channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
1945 autogenerated = re.search(r'''(?x)
1947 channel-header-autogenerated-label|
1948 yt-channel-title-autogenerated
1949 )[^"]*"''', channel_page) is not None
1952 # The videos are contained in a single page
1953 # the ajax pages can't be used, they are empty
1956 video_id, 'Youtube', video_id=video_id,
1957 video_title=video_title)
1958 for video_id, video_title in self.extract_videos_from_page(channel_page)]
1959 return self.playlist_result(entries, channel_id)
1961 return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
1964 class YoutubeUserIE(YoutubeChannelIE):
1965 IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
1966 _VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
1967 _TEMPLATE_URL = 'https://www.youtube.com/user/%s/videos'
1968 IE_NAME = 'youtube:user'
1971 'url': 'https://www.youtube.com/user/TheLinuxFoundation',
1972 'playlist_mincount': 320,
1974 'title': 'TheLinuxFoundation',
1977 'url': 'ytuser:phihag',
1978 'only_matching': True,
1982 def suitable(cls, url):
1983 # Don't return True if the url can be extracted with other youtube
1984 # extractor, the regex would is too permissive and it would match.
1985 other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
1986 if any(ie.suitable(url) for ie in other_ies):
1989 return super(YoutubeUserIE, cls).suitable(url)
1992 class YoutubeLiveIE(YoutubeBaseInfoExtractor):
1993 IE_DESC = 'YouTube.com live streams'
1994 _VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+))/live'
1995 IE_NAME = 'youtube:live'
1998 'url': 'http://www.youtube.com/user/TheYoungTurks/live',
2000 'id': 'a48o2S1cPoo',
2002 'title': 'The Young Turks - Live Main Show',
2003 'uploader': 'The Young Turks',
2004 'uploader_id': 'TheYoungTurks',
2005 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
2006 'upload_date': '20150715',
2007 'license': 'Standard YouTube License',
2008 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
2009 'categories': ['News & Politics'],
2010 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
2012 'dislike_count': int,
2015 'skip_download': True,
2018 'url': 'http://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
2019 'only_matching': True,
2022 def _real_extract(self, url):
2023 mobj = re.match(self._VALID_URL, url)
2024 channel_id = mobj.group('id')
2025 base_url = mobj.group('base_url')
2026 webpage = self._download_webpage(url, channel_id, fatal=False)
2028 page_type = self._og_search_property(
2029 'type', webpage, 'page type', default=None)
2030 video_id = self._html_search_meta(
2031 'videoId', webpage, 'video id', default=None)
2032 if page_type == 'video' and video_id and re.match(r'^[0-9A-Za-z_-]{11}$', video_id):
2033 return self.url_result(video_id, YoutubeIE.ie_key())
2034 return self.url_result(base_url)
2037 class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
2038 IE_DESC = 'YouTube.com user/channel playlists'
2039 _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
2040 IE_NAME = 'youtube:playlists'
2043 'url': 'http://www.youtube.com/user/ThirstForScience/playlists',
2044 'playlist_mincount': 4,
2046 'id': 'ThirstForScience',
2047 'title': 'Thirst for Science',
2050 # with "Load more" button
2051 'url': 'http://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
2052 'playlist_mincount': 70,
2055 'title': 'Игорь Клейнер',
2058 'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
2059 'playlist_mincount': 17,
2061 'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
2062 'title': 'Chem Player',
2067 class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE):
2068 IE_DESC = 'YouTube.com searches'
2069 # there doesn't appear to be a real limit, for example if you search for
2070 # 'python' you get more than 8.000.000 results
2071 _MAX_RESULTS = float('inf')
2072 IE_NAME = 'youtube:search'
2073 _SEARCH_KEY = 'ytsearch'
2074 _EXTRA_QUERY_ARGS = {}
2077 def _get_n_results(self, query, n):
2078 """Get a specified number of results for a query"""
2083 for pagenum in itertools.count(1):
2085 'search_query': query.encode('utf-8'),
2089 url_query.update(self._EXTRA_QUERY_ARGS)
2090 result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
2091 data = self._download_json(
2092 result_url, video_id='query "%s"' % query,
2093 note='Downloading page %s' % pagenum,
2094 errnote='Unable to download API page')
2095 html_content = data[1]['body']['content']
2097 if 'class="search-message' in html_content:
2098 raise ExtractorError(
2099 '[youtube] No video results', expected=True)
2101 new_videos = self._ids_to_results(orderedSet(re.findall(
2102 r'href="/watch\?v=(.{11})', html_content)))
2103 videos += new_videos
2104 if not new_videos or len(videos) > limit:
2109 return self.playlist_result(videos, query)
2112 class YoutubeSearchDateIE(YoutubeSearchIE):
2113 IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
2114 _SEARCH_KEY = 'ytsearchdate'
2115 IE_DESC = 'YouTube.com searches, newest videos first'
2116 _EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
2119 class YoutubeSearchURLIE(InfoExtractor):
2120 IE_DESC = 'YouTube.com search URLs'
2121 IE_NAME = 'youtube:search_url'
2122 _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
2124 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
2125 'playlist_mincount': 5,
2127 'title': 'youtube-dl test video',
2130 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
2131 'only_matching': True,
2134 def _real_extract(self, url):
2135 mobj = re.match(self._VALID_URL, url)
2136 query = compat_urllib_parse_unquote_plus(mobj.group('query'))
2138 webpage = self._download_webpage(url, query)
2139 result_code = self._search_regex(
2140 r'(?s)<ol[^>]+class="item-section"(.*?)</ol>', webpage, 'result HTML')
2142 part_codes = re.findall(
2143 r'(?s)<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*>(.*?)</h3>', result_code)
2145 for part_code in part_codes:
2146 part_title = self._html_search_regex(
2147 [r'(?s)title="([^"]+)"', r'>([^<]+)</a>'], part_code, 'item title', fatal=False)
2148 part_url_snippet = self._html_search_regex(
2149 r'(?s)href="([^"]+)"', part_code, 'item URL')
2150 part_url = compat_urlparse.urljoin(
2151 'https://www.youtube.com/', part_url_snippet)
2155 'title': part_title,
2159 '_type': 'playlist',
2165 class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
2166 IE_DESC = 'YouTube.com (multi-season) shows'
2167 _VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
2168 IE_NAME = 'youtube:show'
2170 'url': 'https://www.youtube.com/show/airdisasters',
2171 'playlist_mincount': 5,
2173 'id': 'airdisasters',
2174 'title': 'Air Disasters',
2178 def _real_extract(self, url):
2179 playlist_id = self._match_id(url)
2180 return super(YoutubeShowIE, self)._real_extract(
2181 'https://www.youtube.com/show/%s/playlists' % playlist_id)
2184 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
2186 Base class for feed extractors
2187 Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
2189 _LOGIN_REQUIRED = True
2193 return 'youtube:%s' % self._FEED_NAME
2195 def _real_initialize(self):
2198 def _real_extract(self, url):
2199 page = self._download_webpage(
2200 'https://www.youtube.com/feed/%s' % self._FEED_NAME, self._PLAYLIST_TITLE)
2202 # The extraction process is the same as for playlists, but the regex
2203 # for the video ids doesn't contain an index
2205 more_widget_html = content_html = page
2206 for page_num in itertools.count(1):
2207 matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
2209 # 'recommended' feed has infinite 'load more' and each new portion spins
2210 # the same videos in (sometimes) slightly different order, so we'll check
2211 # for unicity and break when portion has no new videos
2212 new_ids = filter(lambda video_id: video_id not in ids, orderedSet(matches))
2218 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
2222 more = self._download_json(
2223 'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
2224 'Downloading page #%s' % page_num,
2225 transform_source=uppercase_escape)
2226 content_html = more['content_html']
2227 more_widget_html = more['load_more_widget_html']
2229 return self.playlist_result(
2230 self._ids_to_results(ids), playlist_title=self._PLAYLIST_TITLE)
2233 class YoutubeWatchLaterIE(YoutubePlaylistIE):
2234 IE_NAME = 'youtube:watchlater'
2235 IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
2236 _VALID_URL = r'https?://www\.youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
2239 'url': 'https://www.youtube.com/playlist?list=WL',
2240 'only_matching': True,
2242 'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
2243 'only_matching': True,
2246 def _real_extract(self, url):
2247 video = self._check_download_just_video(url, 'WL')
2250 return self._extract_playlist('WL')
2253 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
2254 IE_NAME = 'youtube:favorites'
2255 IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
2256 _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
2257 _LOGIN_REQUIRED = True
2259 def _real_extract(self, url):
2260 webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
2261 playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
2262 return self.url_result(playlist_id, 'YoutubePlaylist')
2265 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
2266 IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
2267 _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
2268 _FEED_NAME = 'recommended'
2269 _PLAYLIST_TITLE = 'Youtube Recommended videos'
2272 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
2273 IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
2274 _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
2275 _FEED_NAME = 'subscriptions'
2276 _PLAYLIST_TITLE = 'Youtube Subscriptions'
2279 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
2280 IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
2281 _VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
2282 _FEED_NAME = 'history'
2283 _PLAYLIST_TITLE = 'Youtube History'
2286 class YoutubeTruncatedURLIE(InfoExtractor):
2287 IE_NAME = 'youtube:truncated_url'
2288 IE_DESC = False # Do not list
2289 _VALID_URL = r'''(?x)
2291 (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
2294 annotation_id=annotation_[^&]+|
2300 attribution_link\?a=[^&]+
2306 'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
2307 'only_matching': True,
2309 'url': 'http://www.youtube.com/watch?',
2310 'only_matching': True,
2312 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
2313 'only_matching': True,
2315 'url': 'https://www.youtube.com/watch?feature=foo',
2316 'only_matching': True,
2318 'url': 'https://www.youtube.com/watch?hl=en-GB',
2319 'only_matching': True,
2321 'url': 'https://www.youtube.com/watch?t=2372',
2322 'only_matching': True,
2325 def _real_extract(self, url):
2326 raise ExtractorError(
2327 'Did you forget to quote the URL? Remember that & is a meta '
2328 'character in most shells, so you want to put the URL in quotes, '
2330 '"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
2331 ' or simply youtube-dl BaW_jenozKc .',
2335 class YoutubeTruncatedIDIE(InfoExtractor):
2336 IE_NAME = 'youtube:truncated_id'
2337 IE_DESC = False # Do not list
2338 _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
2341 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
2342 'only_matching': True,
2345 def _real_extract(self, url):
2346 video_id = self._match_id(url)
2347 raise ExtractorError(
2348 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),