[youtube] Improve age-gated videos extraction in 429 error conditions (refs #24283)
[youtube-dl] / youtube_dl / extractor / youtube.py
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5
6 import itertools
7 import json
8 import os.path
9 import random
10 import re
11 import time
12 import traceback
13
14 from .common import InfoExtractor, SearchInfoExtractor
15 from ..jsinterp import JSInterpreter
16 from ..swfinterp import SWFInterpreter
17 from ..compat import (
18     compat_chr,
19     compat_HTTPError,
20     compat_kwargs,
21     compat_parse_qs,
22     compat_urllib_parse_unquote,
23     compat_urllib_parse_unquote_plus,
24     compat_urllib_parse_urlencode,
25     compat_urllib_parse_urlparse,
26     compat_urlparse,
27     compat_str,
28 )
29 from ..utils import (
30     bool_or_none,
31     clean_html,
32     dict_get,
33     error_to_compat_str,
34     extract_attributes,
35     ExtractorError,
36     float_or_none,
37     get_element_by_attribute,
38     get_element_by_id,
39     int_or_none,
40     mimetype2ext,
41     orderedSet,
42     parse_codecs,
43     parse_duration,
44     remove_quotes,
45     remove_start,
46     smuggle_url,
47     str_or_none,
48     str_to_int,
49     try_get,
50     unescapeHTML,
51     unified_strdate,
52     unsmuggle_url,
53     uppercase_escape,
54     url_or_none,
55     urlencode_postdata,
56 )
57
58
59 class YoutubeBaseInfoExtractor(InfoExtractor):
60     """Provide base functions for Youtube extractors"""
61     _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
62     _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
63
64     _LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
65     _CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
66     _TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
67
68     _NETRC_MACHINE = 'youtube'
69     # If True it will raise an error if no login info is provided
70     _LOGIN_REQUIRED = False
71
72     _PLAYLIST_ID_RE = r'(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}'
73
74     def _set_language(self):
75         self._set_cookie(
76             '.youtube.com', 'PREF', 'f1=50000000&hl=en',
77             # YouTube sets the expire time to about two months
78             expire_time=time.time() + 2 * 30 * 24 * 3600)
79
80     def _ids_to_results(self, ids):
81         return [
82             self.url_result(vid_id, 'Youtube', video_id=vid_id)
83             for vid_id in ids]
84
85     def _login(self):
86         """
87         Attempt to log in to YouTube.
88         True is returned if successful or skipped.
89         False is returned if login failed.
90
91         If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
92         """
93         username, password = self._get_login_info()
94         # No authentication to be performed
95         if username is None:
96             if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
97                 raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
98             return True
99
100         login_page = self._download_webpage(
101             self._LOGIN_URL, None,
102             note='Downloading login page',
103             errnote='unable to fetch login page', fatal=False)
104         if login_page is False:
105             return
106
107         login_form = self._hidden_inputs(login_page)
108
109         def req(url, f_req, note, errnote):
110             data = login_form.copy()
111             data.update({
112                 'pstMsg': 1,
113                 'checkConnection': 'youtube',
114                 'checkedDomains': 'youtube',
115                 'hl': 'en',
116                 'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
117                 'f.req': json.dumps(f_req),
118                 'flowName': 'GlifWebSignIn',
119                 'flowEntry': 'ServiceLogin',
120                 # TODO: reverse actual botguard identifier generation algo
121                 'bgRequest': '["identifier",""]',
122             })
123             return self._download_json(
124                 url, None, note=note, errnote=errnote,
125                 transform_source=lambda s: re.sub(r'^[^[]*', '', s),
126                 fatal=False,
127                 data=urlencode_postdata(data), headers={
128                     'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
129                     'Google-Accounts-XSRF': 1,
130                 })
131
132         def warn(message):
133             self._downloader.report_warning(message)
134
135         lookup_req = [
136             username,
137             None, [], None, 'US', None, None, 2, False, True,
138             [
139                 None, None,
140                 [2, 1, None, 1,
141                  'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
142                  None, [], 4],
143                 1, [None, None, []], None, None, None, True
144             ],
145             username,
146         ]
147
148         lookup_results = req(
149             self._LOOKUP_URL, lookup_req,
150             'Looking up account info', 'Unable to look up account info')
151
152         if lookup_results is False:
153             return False
154
155         user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
156         if not user_hash:
157             warn('Unable to extract user hash')
158             return False
159
160         challenge_req = [
161             user_hash,
162             None, 1, None, [1, None, None, None, [password, None, True]],
163             [
164                 None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
165                 1, [None, None, []], None, None, None, True
166             ]]
167
168         challenge_results = req(
169             self._CHALLENGE_URL, challenge_req,
170             'Logging in', 'Unable to log in')
171
172         if challenge_results is False:
173             return
174
175         login_res = try_get(challenge_results, lambda x: x[0][5], list)
176         if login_res:
177             login_msg = try_get(login_res, lambda x: x[5], compat_str)
178             warn(
179                 'Unable to login: %s' % 'Invalid password'
180                 if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
181             return False
182
183         res = try_get(challenge_results, lambda x: x[0][-1], list)
184         if not res:
185             warn('Unable to extract result entry')
186             return False
187
188         login_challenge = try_get(res, lambda x: x[0][0], list)
189         if login_challenge:
190             challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
191             if challenge_str == 'TWO_STEP_VERIFICATION':
192                 # SEND_SUCCESS - TFA code has been successfully sent to phone
193                 # QUOTA_EXCEEDED - reached the limit of TFA codes
194                 status = try_get(login_challenge, lambda x: x[5], compat_str)
195                 if status == 'QUOTA_EXCEEDED':
196                     warn('Exceeded the limit of TFA codes, try later')
197                     return False
198
199                 tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
200                 if not tl:
201                     warn('Unable to extract TL')
202                     return False
203
204                 tfa_code = self._get_tfa_info('2-step verification code')
205
206                 if not tfa_code:
207                     warn(
208                         'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
209                         '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
210                     return False
211
212                 tfa_code = remove_start(tfa_code, 'G-')
213
214                 tfa_req = [
215                     user_hash, None, 2, None,
216                     [
217                         9, None, None, None, None, None, None, None,
218                         [None, tfa_code, True, 2]
219                     ]]
220
221                 tfa_results = req(
222                     self._TFA_URL.format(tl), tfa_req,
223                     'Submitting TFA code', 'Unable to submit TFA code')
224
225                 if tfa_results is False:
226                     return False
227
228                 tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
229                 if tfa_res:
230                     tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
231                     warn(
232                         'Unable to finish TFA: %s' % 'Invalid TFA code'
233                         if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
234                     return False
235
236                 check_cookie_url = try_get(
237                     tfa_results, lambda x: x[0][-1][2], compat_str)
238             else:
239                 CHALLENGES = {
240                     'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
241                     'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
242                     'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
243                 }
244                 challenge = CHALLENGES.get(
245                     challenge_str,
246                     '%s returned error %s.' % (self.IE_NAME, challenge_str))
247                 warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
248                 return False
249         else:
250             check_cookie_url = try_get(res, lambda x: x[2], compat_str)
251
252         if not check_cookie_url:
253             warn('Unable to extract CheckCookie URL')
254             return False
255
256         check_cookie_results = self._download_webpage(
257             check_cookie_url, None, 'Checking cookie', fatal=False)
258
259         if check_cookie_results is False:
260             return False
261
262         if 'https://myaccount.google.com/' not in check_cookie_results:
263             warn('Unable to log in')
264             return False
265
266         return True
267
268     def _download_webpage_handle(self, *args, **kwargs):
269         query = kwargs.get('query', {}).copy()
270         query['disable_polymer'] = 'true'
271         kwargs['query'] = query
272         return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle(
273             *args, **compat_kwargs(kwargs))
274
275     def _real_initialize(self):
276         if self._downloader is None:
277             return
278         self._set_language()
279         if not self._login():
280             return
281
282
283 class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
284     # Extract entries from page with "Load more" button
285     def _entries(self, page, playlist_id):
286         more_widget_html = content_html = page
287         for page_num in itertools.count(1):
288             for entry in self._process_page(content_html):
289                 yield entry
290
291             mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
292             if not mobj:
293                 break
294
295             count = 0
296             retries = 3
297             while count <= retries:
298                 try:
299                     # Downloading page may result in intermittent 5xx HTTP error
300                     # that is usually worked around with a retry
301                     more = self._download_json(
302                         'https://youtube.com/%s' % mobj.group('more'), playlist_id,
303                         'Downloading page #%s%s'
304                         % (page_num, ' (retry #%d)' % count if count else ''),
305                         transform_source=uppercase_escape)
306                     break
307                 except ExtractorError as e:
308                     if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503):
309                         count += 1
310                         if count <= retries:
311                             continue
312                     raise
313
314             content_html = more['content_html']
315             if not content_html.strip():
316                 # Some webpages show a "Load more" button but they don't
317                 # have more videos
318                 break
319             more_widget_html = more['load_more_widget_html']
320
321
322 class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
323     def _process_page(self, content):
324         for video_id, video_title in self.extract_videos_from_page(content):
325             yield self.url_result(video_id, 'Youtube', video_id, video_title)
326
327     def extract_videos_from_page_impl(self, video_re, page, ids_in_page, titles_in_page):
328         for mobj in re.finditer(video_re, page):
329             # The link with index 0 is not the first video of the playlist (not sure if still actual)
330             if 'index' in mobj.groupdict() and mobj.group('id') == '0':
331                 continue
332             video_id = mobj.group('id')
333             video_title = unescapeHTML(
334                 mobj.group('title')) if 'title' in mobj.groupdict() else None
335             if video_title:
336                 video_title = video_title.strip()
337             if video_title == '► Play all':
338                 video_title = None
339             try:
340                 idx = ids_in_page.index(video_id)
341                 if video_title and not titles_in_page[idx]:
342                     titles_in_page[idx] = video_title
343             except ValueError:
344                 ids_in_page.append(video_id)
345                 titles_in_page.append(video_title)
346
347     def extract_videos_from_page(self, page):
348         ids_in_page = []
349         titles_in_page = []
350         self.extract_videos_from_page_impl(
351             self._VIDEO_RE, page, ids_in_page, titles_in_page)
352         return zip(ids_in_page, titles_in_page)
353
354
355 class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
356     def _process_page(self, content):
357         for playlist_id in orderedSet(re.findall(
358                 r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
359                 content)):
360             yield self.url_result(
361                 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
362
363     def _real_extract(self, url):
364         playlist_id = self._match_id(url)
365         webpage = self._download_webpage(url, playlist_id)
366         title = self._og_search_title(webpage, fatal=False)
367         return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
368
369
370 class YoutubeIE(YoutubeBaseInfoExtractor):
371     IE_DESC = 'YouTube.com'
372     _VALID_URL = r"""(?x)^
373                      (
374                          (?:https?://|//)                                    # http(s):// or protocol-independent URL
375                          (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com/|
376                             (?:www\.)?deturl\.com/www\.youtube\.com/|
377                             (?:www\.)?pwnyoutube\.com/|
378                             (?:www\.)?hooktube\.com/|
379                             (?:www\.)?yourepeat\.com/|
380                             tube\.majestyc\.net/|
381                             # Invidious instances taken from https://github.com/omarroth/invidious/wiki/Invidious-Instances
382                             (?:(?:www|dev)\.)?invidio\.us/|
383                             (?:(?:www|no)\.)?invidiou\.sh/|
384                             (?:(?:www|fi|de)\.)?invidious\.snopyta\.org/|
385                             (?:www\.)?invidious\.kabi\.tk/|
386                             (?:www\.)?invidious\.13ad\.de/|
387                             (?:www\.)?invidious\.mastodon\.host/|
388                             (?:www\.)?invidious\.nixnet\.xyz/|
389                             (?:www\.)?invidious\.drycat\.fr/|
390                             (?:www\.)?tube\.poal\.co/|
391                             (?:www\.)?vid\.wxzm\.sx/|
392                             (?:www\.)?yt\.elukerio\.org/|
393                             (?:www\.)?yt\.lelux\.fi/|
394                             (?:www\.)?kgg2m7yk5aybusll\.onion/|
395                             (?:www\.)?qklhadlycap4cnod\.onion/|
396                             (?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion/|
397                             (?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion/|
398                             (?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion/|
399                             (?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion/|
400                             (?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p/|
401                             youtube\.googleapis\.com/)                        # the various hostnames, with wildcard subdomains
402                          (?:.*?\#/)?                                          # handle anchor (#/) redirect urls
403                          (?:                                                  # the various things that can precede the ID:
404                              (?:(?:v|embed|e)/(?!videoseries))                # v/ or embed/ or e/
405                              |(?:                                             # or the v= param in all its forms
406                                  (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)?  # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
407                                  (?:\?|\#!?)                                  # the params delimiter ? or # or #!
408                                  (?:.*?[&;])??                                # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
409                                  v=
410                              )
411                          ))
412                          |(?:
413                             youtu\.be|                                        # just youtu.be/xxxx
414                             vid\.plus|                                        # or vid.plus/xxxx
415                             zwearz\.com/watch|                                # or zwearz.com/watch/xxxx
416                          )/
417                          |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
418                          )
419                      )?                                                       # all until now is optional -> you can pass the naked ID
420                      ([0-9A-Za-z_-]{11})                                      # here is it! the YouTube video ID
421                      (?!.*?\blist=
422                         (?:
423                             %(playlist_id)s|                                  # combined list/video URLs are handled by the playlist IE
424                             WL                                                # WL are handled by the watch later IE
425                         )
426                      )
427                      (?(1).+)?                                                # if we found the ID, everything can follow
428                      $""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
429     _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
430     _formats = {
431         '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
432         '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
433         '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
434         '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
435         '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
436         '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
437         '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
438         '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
439         # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
440         '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
441         '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
442         '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
443         '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
444         '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
445         '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
446         '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
447         '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
448         '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
449
450
451         # 3D videos
452         '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
453         '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
454         '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
455         '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
456         '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
457         '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
458         '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
459
460         # Apple HTTP Live Streaming
461         '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
462         '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
463         '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
464         '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
465         '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
466         '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
467         '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
468         '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
469
470         # DASH mp4 video
471         '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
472         '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
473         '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
474         '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
475         '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
476         '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'},  # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
477         '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
478         '212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
479         '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
480         '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
481         '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
482         '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
483
484         # Dash mp4 audio
485         '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
486         '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
487         '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
488         '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
489         '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
490         '325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
491         '328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
492
493         # Dash webm
494         '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
495         '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
496         '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
497         '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
498         '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
499         '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
500         '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
501         '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
502         '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
503         '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
504         '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
505         '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
506         '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
507         '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
508         '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
509         # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
510         '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
511         '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
512         '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
513         '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
514         '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
515         '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
516
517         # Dash webm audio
518         '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
519         '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
520
521         # Dash webm audio with opus inside
522         '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
523         '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
524         '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
525
526         # RTMP (unnamed)
527         '_rtmp': {'protocol': 'rtmp'},
528
529         # av01 video only formats sometimes served with "unknown" codecs
530         '394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
531         '395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
532         '396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
533         '397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
534     }
535     _SUBTITLE_FORMATS = ('srv1', 'srv2', 'srv3', 'ttml', 'vtt')
536
537     _GEO_BYPASS = False
538
539     IE_NAME = 'youtube'
540     _TESTS = [
541         {
542             'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
543             'info_dict': {
544                 'id': 'BaW_jenozKc',
545                 'ext': 'mp4',
546                 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
547                 'uploader': 'Philipp Hagemeister',
548                 'uploader_id': 'phihag',
549                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
550                 'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
551                 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
552                 'upload_date': '20121002',
553                 'description': 'test chars:  "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
554                 'categories': ['Science & Technology'],
555                 'tags': ['youtube-dl'],
556                 'duration': 10,
557                 'view_count': int,
558                 'like_count': int,
559                 'dislike_count': int,
560                 'start_time': 1,
561                 'end_time': 9,
562             }
563         },
564         {
565             'url': 'https://www.youtube.com/watch?v=UxxajLWwzqY',
566             'note': 'Test generic use_cipher_signature video (#897)',
567             'info_dict': {
568                 'id': 'UxxajLWwzqY',
569                 'ext': 'mp4',
570                 'upload_date': '20120506',
571                 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
572                 'alt_title': 'I Love It (feat. Charli XCX)',
573                 'description': 'md5:19a2f98d9032b9311e686ed039564f63',
574                 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
575                          'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
576                          'iconic ep', 'iconic', 'love', 'it'],
577                 'duration': 180,
578                 'uploader': 'Icona Pop',
579                 'uploader_id': 'IconaPop',
580                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IconaPop',
581                 'creator': 'Icona Pop',
582                 'track': 'I Love It (feat. Charli XCX)',
583                 'artist': 'Icona Pop',
584             }
585         },
586         {
587             'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
588             'note': 'Test VEVO video with age protection (#956)',
589             'info_dict': {
590                 'id': '07FYdnEawAQ',
591                 'ext': 'mp4',
592                 'upload_date': '20130703',
593                 'title': 'Justin Timberlake - Tunnel Vision (Official Music Video) (Explicit)',
594                 'alt_title': 'Tunnel Vision',
595                 'description': 'md5:07dab3356cde4199048e4c7cd93471e1',
596                 'duration': 419,
597                 'uploader': 'justintimberlakeVEVO',
598                 'uploader_id': 'justintimberlakeVEVO',
599                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
600                 'creator': 'Justin Timberlake',
601                 'track': 'Tunnel Vision',
602                 'artist': 'Justin Timberlake',
603                 'age_limit': 18,
604             }
605         },
606         {
607             'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
608             'note': 'Embed-only video (#1746)',
609             'info_dict': {
610                 'id': 'yZIXLfi8CZQ',
611                 'ext': 'mp4',
612                 'upload_date': '20120608',
613                 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
614                 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
615                 'uploader': 'SET India',
616                 'uploader_id': 'setindia',
617                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
618                 'age_limit': 18,
619             }
620         },
621         {
622             'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
623             'note': 'Use the first video ID in the URL',
624             'info_dict': {
625                 'id': 'BaW_jenozKc',
626                 'ext': 'mp4',
627                 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
628                 'uploader': 'Philipp Hagemeister',
629                 'uploader_id': 'phihag',
630                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
631                 'upload_date': '20121002',
632                 'description': 'test chars:  "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
633                 'categories': ['Science & Technology'],
634                 'tags': ['youtube-dl'],
635                 'duration': 10,
636                 'view_count': int,
637                 'like_count': int,
638                 'dislike_count': int,
639             },
640             'params': {
641                 'skip_download': True,
642             },
643         },
644         {
645             'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
646             'note': '256k DASH audio (format 141) via DASH manifest',
647             'info_dict': {
648                 'id': 'a9LDPn-MO4I',
649                 'ext': 'm4a',
650                 'upload_date': '20121002',
651                 'uploader_id': '8KVIDEO',
652                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
653                 'description': '',
654                 'uploader': '8KVIDEO',
655                 'title': 'UHDTV TEST 8K VIDEO.mp4'
656             },
657             'params': {
658                 'youtube_include_dash_manifest': True,
659                 'format': '141',
660             },
661             'skip': 'format 141 not served anymore',
662         },
663         # DASH manifest with encrypted signature
664         {
665             'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
666             'info_dict': {
667                 'id': 'IB3lcPjvWLA',
668                 'ext': 'm4a',
669                 'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
670                 'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
671                 'duration': 244,
672                 'uploader': 'AfrojackVEVO',
673                 'uploader_id': 'AfrojackVEVO',
674                 'upload_date': '20131011',
675             },
676             'params': {
677                 'youtube_include_dash_manifest': True,
678                 'format': '141/bestaudio[ext=m4a]',
679             },
680         },
681         # JS player signature function name containing $
682         {
683             'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
684             'info_dict': {
685                 'id': 'nfWlot6h_JM',
686                 'ext': 'm4a',
687                 'title': 'Taylor Swift - Shake It Off',
688                 'description': 'md5:307195cd21ff7fa352270fe884570ef0',
689                 'duration': 242,
690                 'uploader': 'TaylorSwiftVEVO',
691                 'uploader_id': 'TaylorSwiftVEVO',
692                 'upload_date': '20140818',
693             },
694             'params': {
695                 'youtube_include_dash_manifest': True,
696                 'format': '141/bestaudio[ext=m4a]',
697             },
698         },
699         # Controversy video
700         {
701             'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
702             'info_dict': {
703                 'id': 'T4XJQO3qol8',
704                 'ext': 'mp4',
705                 'duration': 219,
706                 'upload_date': '20100909',
707                 'uploader': 'Amazing Atheist',
708                 'uploader_id': 'TheAmazingAtheist',
709                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
710                 'title': 'Burning Everyone\'s Koran',
711                 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
712             }
713         },
714         # Normal age-gate video (No vevo, embed allowed)
715         {
716             'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
717             'info_dict': {
718                 'id': 'HtVdAasjOgU',
719                 'ext': 'mp4',
720                 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
721                 'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
722                 'duration': 142,
723                 'uploader': 'The Witcher',
724                 'uploader_id': 'WitcherGame',
725                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
726                 'upload_date': '20140605',
727                 'age_limit': 18,
728             },
729         },
730         # Age-gate video with encrypted signature
731         {
732             'url': 'https://www.youtube.com/watch?v=6kLq3WMV1nU',
733             'info_dict': {
734                 'id': '6kLq3WMV1nU',
735                 'ext': 'mp4',
736                 'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
737                 'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
738                 'duration': 246,
739                 'uploader': 'LloydVEVO',
740                 'uploader_id': 'LloydVEVO',
741                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
742                 'upload_date': '20110629',
743                 'age_limit': 18,
744             },
745         },
746         # video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
747         # YouTube Red ad is not captured for creator
748         {
749             'url': '__2ABJjxzNo',
750             'info_dict': {
751                 'id': '__2ABJjxzNo',
752                 'ext': 'mp4',
753                 'duration': 266,
754                 'upload_date': '20100430',
755                 'uploader_id': 'deadmau5',
756                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
757                 'creator': 'Dada Life, deadmau5',
758                 'description': 'md5:12c56784b8032162bb936a5f76d55360',
759                 'uploader': 'deadmau5',
760                 'title': 'Deadmau5 - Some Chords (HD)',
761                 'alt_title': 'This Machine Kills Some Chords',
762             },
763             'expected_warnings': [
764                 'DASH manifest missing',
765             ]
766         },
767         # Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
768         {
769             'url': 'lqQg6PlCWgI',
770             'info_dict': {
771                 'id': 'lqQg6PlCWgI',
772                 'ext': 'mp4',
773                 'duration': 6085,
774                 'upload_date': '20150827',
775                 'uploader_id': 'olympic',
776                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
777                 'description': 'HO09  - Women -  GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
778                 'uploader': 'Olympic',
779                 'title': 'Hockey - Women -  GER-AUS - London 2012 Olympic Games',
780             },
781             'params': {
782                 'skip_download': 'requires avconv',
783             }
784         },
785         # Non-square pixels
786         {
787             'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
788             'info_dict': {
789                 'id': '_b-2C3KPAM0',
790                 'ext': 'mp4',
791                 'stretched_ratio': 16 / 9.,
792                 'duration': 85,
793                 'upload_date': '20110310',
794                 'uploader_id': 'AllenMeow',
795                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
796                 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
797                 'uploader': '孫ᄋᄅ',
798                 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
799             },
800         },
801         # url_encoded_fmt_stream_map is empty string
802         {
803             'url': 'qEJwOuvDf7I',
804             'info_dict': {
805                 'id': 'qEJwOuvDf7I',
806                 'ext': 'webm',
807                 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
808                 'description': '',
809                 'upload_date': '20150404',
810                 'uploader_id': 'spbelect',
811                 'uploader': 'Наблюдатели Петербурга',
812             },
813             'params': {
814                 'skip_download': 'requires avconv',
815             },
816             'skip': 'This live event has ended.',
817         },
818         # Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
819         {
820             'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
821             'info_dict': {
822                 'id': 'FIl7x6_3R5Y',
823                 'ext': 'webm',
824                 'title': 'md5:7b81415841e02ecd4313668cde88737a',
825                 'description': 'md5:116377fd2963b81ec4ce64b542173306',
826                 'duration': 220,
827                 'upload_date': '20150625',
828                 'uploader_id': 'dorappi2000',
829                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
830                 'uploader': 'dorappi2000',
831                 'formats': 'mincount:31',
832             },
833             'skip': 'not actual anymore',
834         },
835         # DASH manifest with segment_list
836         {
837             'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
838             'md5': '8ce563a1d667b599d21064e982ab9e31',
839             'info_dict': {
840                 'id': 'CsmdDsKjzN8',
841                 'ext': 'mp4',
842                 'upload_date': '20150501',  # According to '<meta itemprop="datePublished"', but in other places it's 20150510
843                 'uploader': 'Airtek',
844                 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
845                 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
846                 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
847             },
848             'params': {
849                 'youtube_include_dash_manifest': True,
850                 'format': '135',  # bestvideo
851             },
852             'skip': 'This live event has ended.',
853         },
854         {
855             # Multifeed videos (multiple cameras), URL is for Main Camera
856             'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
857             'info_dict': {
858                 'id': 'jqWvoWXjCVs',
859                 'title': 'teamPGP: Rocket League Noob Stream',
860                 'description': 'md5:dc7872fb300e143831327f1bae3af010',
861             },
862             'playlist': [{
863                 'info_dict': {
864                     'id': 'jqWvoWXjCVs',
865                     'ext': 'mp4',
866                     'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
867                     'description': 'md5:dc7872fb300e143831327f1bae3af010',
868                     'duration': 7335,
869                     'upload_date': '20150721',
870                     'uploader': 'Beer Games Beer',
871                     'uploader_id': 'beergamesbeer',
872                     'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
873                     'license': 'Standard YouTube License',
874                 },
875             }, {
876                 'info_dict': {
877                     'id': '6h8e8xoXJzg',
878                     'ext': 'mp4',
879                     'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
880                     'description': 'md5:dc7872fb300e143831327f1bae3af010',
881                     'duration': 7337,
882                     'upload_date': '20150721',
883                     'uploader': 'Beer Games Beer',
884                     'uploader_id': 'beergamesbeer',
885                     'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
886                     'license': 'Standard YouTube License',
887                 },
888             }, {
889                 'info_dict': {
890                     'id': 'PUOgX5z9xZw',
891                     'ext': 'mp4',
892                     'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
893                     'description': 'md5:dc7872fb300e143831327f1bae3af010',
894                     'duration': 7337,
895                     'upload_date': '20150721',
896                     'uploader': 'Beer Games Beer',
897                     'uploader_id': 'beergamesbeer',
898                     'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
899                     'license': 'Standard YouTube License',
900                 },
901             }, {
902                 'info_dict': {
903                     'id': 'teuwxikvS5k',
904                     'ext': 'mp4',
905                     'title': 'teamPGP: Rocket League Noob Stream (zim)',
906                     'description': 'md5:dc7872fb300e143831327f1bae3af010',
907                     'duration': 7334,
908                     'upload_date': '20150721',
909                     'uploader': 'Beer Games Beer',
910                     'uploader_id': 'beergamesbeer',
911                     'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
912                     'license': 'Standard YouTube License',
913                 },
914             }],
915             'params': {
916                 'skip_download': True,
917             },
918             'skip': 'This video is not available.',
919         },
920         {
921             # Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
922             'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
923             'info_dict': {
924                 'id': 'gVfLd0zydlo',
925                 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
926             },
927             'playlist_count': 2,
928             'skip': 'Not multifeed anymore',
929         },
930         {
931             'url': 'https://vid.plus/FlRa-iH7PGw',
932             'only_matching': True,
933         },
934         {
935             'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
936             'only_matching': True,
937         },
938         {
939             # Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
940             # Also tests cut-off URL expansion in video description (see
941             # https://github.com/ytdl-org/youtube-dl/issues/1892,
942             # https://github.com/ytdl-org/youtube-dl/issues/8164)
943             'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
944             'info_dict': {
945                 'id': 'lsguqyKfVQg',
946                 'ext': 'mp4',
947                 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
948                 'alt_title': 'Dark Walk - Position Music',
949                 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
950                 'duration': 133,
951                 'upload_date': '20151119',
952                 'uploader_id': 'IronSoulElf',
953                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
954                 'uploader': 'IronSoulElf',
955                 'creator': 'Todd Haberman,  Daniel Law Heath and Aaron Kaplan',
956                 'track': 'Dark Walk - Position Music',
957                 'artist': 'Todd Haberman,  Daniel Law Heath and Aaron Kaplan',
958                 'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
959             },
960             'params': {
961                 'skip_download': True,
962             },
963         },
964         {
965             # Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
966             'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
967             'only_matching': True,
968         },
969         {
970             # Video with yt:stretch=17:0
971             'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
972             'info_dict': {
973                 'id': 'Q39EVAstoRM',
974                 'ext': 'mp4',
975                 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
976                 'description': 'md5:ee18a25c350637c8faff806845bddee9',
977                 'upload_date': '20151107',
978                 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
979                 'uploader': 'CH GAMER DROID',
980             },
981             'params': {
982                 'skip_download': True,
983             },
984             'skip': 'This video does not exist.',
985         },
986         {
987             # Video licensed under Creative Commons
988             'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
989             'info_dict': {
990                 'id': 'M4gD1WSo5mA',
991                 'ext': 'mp4',
992                 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
993                 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
994                 'duration': 721,
995                 'upload_date': '20150127',
996                 'uploader_id': 'BerkmanCenter',
997                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
998                 'uploader': 'The Berkman Klein Center for Internet & Society',
999                 'license': 'Creative Commons Attribution license (reuse allowed)',
1000             },
1001             'params': {
1002                 'skip_download': True,
1003             },
1004         },
1005         {
1006             # Channel-like uploader_url
1007             'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
1008             'info_dict': {
1009                 'id': 'eQcmzGIKrzg',
1010                 'ext': 'mp4',
1011                 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
1012                 'description': 'md5:dda0d780d5a6e120758d1711d062a867',
1013                 'duration': 4060,
1014                 'upload_date': '20151119',
1015                 'uploader': 'Bernie Sanders',
1016                 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
1017                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
1018                 'license': 'Creative Commons Attribution license (reuse allowed)',
1019             },
1020             'params': {
1021                 'skip_download': True,
1022             },
1023         },
1024         {
1025             'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
1026             'only_matching': True,
1027         },
1028         {
1029             # YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
1030             'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
1031             'only_matching': True,
1032         },
1033         {
1034             # Rental video preview
1035             'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
1036             'info_dict': {
1037                 'id': 'uGpuVWrhIzE',
1038                 'ext': 'mp4',
1039                 'title': 'Piku - Trailer',
1040                 'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
1041                 'upload_date': '20150811',
1042                 'uploader': 'FlixMatrix',
1043                 'uploader_id': 'FlixMatrixKaravan',
1044                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
1045                 'license': 'Standard YouTube License',
1046             },
1047             'params': {
1048                 'skip_download': True,
1049             },
1050             'skip': 'This video is not available.',
1051         },
1052         {
1053             # YouTube Red video with episode data
1054             'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
1055             'info_dict': {
1056                 'id': 'iqKdEhx-dD4',
1057                 'ext': 'mp4',
1058                 'title': 'Isolation - Mind Field (Ep 1)',
1059                 'description': 'md5:46a29be4ceffa65b92d277b93f463c0f',
1060                 'duration': 2085,
1061                 'upload_date': '20170118',
1062                 'uploader': 'Vsauce',
1063                 'uploader_id': 'Vsauce',
1064                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
1065                 'series': 'Mind Field',
1066                 'season_number': 1,
1067                 'episode_number': 1,
1068             },
1069             'params': {
1070                 'skip_download': True,
1071             },
1072             'expected_warnings': [
1073                 'Skipping DASH manifest',
1074             ],
1075         },
1076         {
1077             # The following content has been identified by the YouTube community
1078             # as inappropriate or offensive to some audiences.
1079             'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
1080             'info_dict': {
1081                 'id': '6SJNVb0GnPI',
1082                 'ext': 'mp4',
1083                 'title': 'Race Differences in Intelligence',
1084                 'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
1085                 'duration': 965,
1086                 'upload_date': '20140124',
1087                 'uploader': 'New Century Foundation',
1088                 'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
1089                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
1090             },
1091             'params': {
1092                 'skip_download': True,
1093             },
1094         },
1095         {
1096             # itag 212
1097             'url': '1t24XAntNCY',
1098             'only_matching': True,
1099         },
1100         {
1101             # geo restricted to JP
1102             'url': 'sJL6WA-aGkQ',
1103             'only_matching': True,
1104         },
1105         {
1106             'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
1107             'only_matching': True,
1108         },
1109         {
1110             'url': 'https://invidio.us/watch?v=BaW_jenozKc',
1111             'only_matching': True,
1112         },
1113         {
1114             # DRM protected
1115             'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
1116             'only_matching': True,
1117         },
1118         {
1119             # Video with unsupported adaptive stream type formats
1120             'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
1121             'info_dict': {
1122                 'id': 'Z4Vy8R84T1U',
1123                 'ext': 'mp4',
1124                 'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
1125                 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
1126                 'duration': 433,
1127                 'upload_date': '20130923',
1128                 'uploader': 'Amelia Putri Harwita',
1129                 'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
1130                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
1131                 'formats': 'maxcount:10',
1132             },
1133             'params': {
1134                 'skip_download': True,
1135                 'youtube_include_dash_manifest': False,
1136             },
1137             'skip': 'not actual anymore',
1138         },
1139         {
1140             # Youtube Music Auto-generated description
1141             'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
1142             'info_dict': {
1143                 'id': 'MgNrAu2pzNs',
1144                 'ext': 'mp4',
1145                 'title': 'Voyeur Girl',
1146                 'description': 'md5:7ae382a65843d6df2685993e90a8628f',
1147                 'upload_date': '20190312',
1148                 'uploader': 'Stephen - Topic',
1149                 'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
1150                 'artist': 'Stephen',
1151                 'track': 'Voyeur Girl',
1152                 'album': 'it\'s too much love to know my dear',
1153                 'release_date': '20190313',
1154                 'release_year': 2019,
1155             },
1156             'params': {
1157                 'skip_download': True,
1158             },
1159         },
1160         {
1161             # Youtube Music Auto-generated description
1162             # Retrieve 'artist' field from 'Artist:' in video description
1163             # when it is present on youtube music video
1164             'url': 'https://www.youtube.com/watch?v=k0jLE7tTwjY',
1165             'info_dict': {
1166                 'id': 'k0jLE7tTwjY',
1167                 'ext': 'mp4',
1168                 'title': 'Latch Feat. Sam Smith',
1169                 'description': 'md5:3cb1e8101a7c85fcba9b4fb41b951335',
1170                 'upload_date': '20150110',
1171                 'uploader': 'Various Artists - Topic',
1172                 'uploader_id': 'UCNkEcmYdjrH4RqtNgh7BZ9w',
1173                 'artist': 'Disclosure',
1174                 'track': 'Latch Feat. Sam Smith',
1175                 'album': 'Latch Featuring Sam Smith',
1176                 'release_date': '20121008',
1177                 'release_year': 2012,
1178             },
1179             'params': {
1180                 'skip_download': True,
1181             },
1182         },
1183         {
1184             # Youtube Music Auto-generated description
1185             # handle multiple artists on youtube music video
1186             'url': 'https://www.youtube.com/watch?v=74qn0eJSjpA',
1187             'info_dict': {
1188                 'id': '74qn0eJSjpA',
1189                 'ext': 'mp4',
1190                 'title': 'Eastside',
1191                 'description': 'md5:290516bb73dcbfab0dcc4efe6c3de5f2',
1192                 'upload_date': '20180710',
1193                 'uploader': 'Benny Blanco - Topic',
1194                 'uploader_id': 'UCzqz_ksRu_WkIzmivMdIS7A',
1195                 'artist': 'benny blanco, Halsey, Khalid',
1196                 'track': 'Eastside',
1197                 'album': 'Eastside',
1198                 'release_date': '20180713',
1199                 'release_year': 2018,
1200             },
1201             'params': {
1202                 'skip_download': True,
1203             },
1204         },
1205         {
1206             # Youtube Music Auto-generated description
1207             # handle youtube music video with release_year and no release_date
1208             'url': 'https://www.youtube.com/watch?v=-hcAI0g-f5M',
1209             'info_dict': {
1210                 'id': '-hcAI0g-f5M',
1211                 'ext': 'mp4',
1212                 'title': 'Put It On Me',
1213                 'description': 'md5:f6422397c07c4c907c6638e1fee380a5',
1214                 'upload_date': '20180426',
1215                 'uploader': 'Matt Maeson - Topic',
1216                 'uploader_id': 'UCnEkIGqtGcQMLk73Kp-Q5LQ',
1217                 'artist': 'Matt Maeson',
1218                 'track': 'Put It On Me',
1219                 'album': 'The Hearse',
1220                 'release_date': None,
1221                 'release_year': 2018,
1222             },
1223             'params': {
1224                 'skip_download': True,
1225             },
1226         },
1227         {
1228             'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
1229             'only_matching': True,
1230         },
1231     ]
1232
1233     def __init__(self, *args, **kwargs):
1234         super(YoutubeIE, self).__init__(*args, **kwargs)
1235         self._player_cache = {}
1236
1237     def report_video_info_webpage_download(self, video_id):
1238         """Report attempt to download video info webpage."""
1239         self.to_screen('%s: Downloading video info webpage' % video_id)
1240
1241     def report_information_extraction(self, video_id):
1242         """Report attempt to extract video information."""
1243         self.to_screen('%s: Extracting video information' % video_id)
1244
1245     def report_unavailable_format(self, video_id, format):
1246         """Report extracted video URL."""
1247         self.to_screen('%s: Format %s not available' % (video_id, format))
1248
1249     def report_rtmp_download(self):
1250         """Indicate the download will use the RTMP protocol."""
1251         self.to_screen('RTMP download detected')
1252
1253     def _signature_cache_id(self, example_sig):
1254         """ Return a string representation of a signature """
1255         return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
1256
1257     def _extract_signature_function(self, video_id, player_url, example_sig):
1258         id_m = re.match(
1259             r'.*?[-.](?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|(?:/[a-z]{2,3}_[A-Z]{2})?/base)?\.(?P<ext>[a-z]+)$',
1260             player_url)
1261         if not id_m:
1262             raise ExtractorError('Cannot identify player %r' % player_url)
1263         player_type = id_m.group('ext')
1264         player_id = id_m.group('id')
1265
1266         # Read from filesystem cache
1267         func_id = '%s_%s_%s' % (
1268             player_type, player_id, self._signature_cache_id(example_sig))
1269         assert os.path.basename(func_id) == func_id
1270
1271         cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
1272         if cache_spec is not None:
1273             return lambda s: ''.join(s[i] for i in cache_spec)
1274
1275         download_note = (
1276             'Downloading player %s' % player_url
1277             if self._downloader.params.get('verbose') else
1278             'Downloading %s player %s' % (player_type, player_id)
1279         )
1280         if player_type == 'js':
1281             code = self._download_webpage(
1282                 player_url, video_id,
1283                 note=download_note,
1284                 errnote='Download of %s failed' % player_url)
1285             res = self._parse_sig_js(code)
1286         elif player_type == 'swf':
1287             urlh = self._request_webpage(
1288                 player_url, video_id,
1289                 note=download_note,
1290                 errnote='Download of %s failed' % player_url)
1291             code = urlh.read()
1292             res = self._parse_sig_swf(code)
1293         else:
1294             assert False, 'Invalid player type %r' % player_type
1295
1296         test_string = ''.join(map(compat_chr, range(len(example_sig))))
1297         cache_res = res(test_string)
1298         cache_spec = [ord(c) for c in cache_res]
1299
1300         self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
1301         return res
1302
1303     def _print_sig_code(self, func, example_sig):
1304         def gen_sig_code(idxs):
1305             def _genslice(start, end, step):
1306                 starts = '' if start == 0 else str(start)
1307                 ends = (':%d' % (end + step)) if end + step >= 0 else ':'
1308                 steps = '' if step == 1 else (':%d' % step)
1309                 return 's[%s%s%s]' % (starts, ends, steps)
1310
1311             step = None
1312             # Quelch pyflakes warnings - start will be set when step is set
1313             start = '(Never used)'
1314             for i, prev in zip(idxs[1:], idxs[:-1]):
1315                 if step is not None:
1316                     if i - prev == step:
1317                         continue
1318                     yield _genslice(start, prev, step)
1319                     step = None
1320                     continue
1321                 if i - prev in [-1, 1]:
1322                     step = i - prev
1323                     start = prev
1324                     continue
1325                 else:
1326                     yield 's[%d]' % prev
1327             if step is None:
1328                 yield 's[%d]' % i
1329             else:
1330                 yield _genslice(start, i, step)
1331
1332         test_string = ''.join(map(compat_chr, range(len(example_sig))))
1333         cache_res = func(test_string)
1334         cache_spec = [ord(c) for c in cache_res]
1335         expr_code = ' + '.join(gen_sig_code(cache_spec))
1336         signature_id_tuple = '(%s)' % (
1337             ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
1338         code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
1339                 '    return %s\n') % (signature_id_tuple, expr_code)
1340         self.to_screen('Extracted signature function:\n' + code)
1341
1342     def _parse_sig_js(self, jscode):
1343         funcname = self._search_regex(
1344             (r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1345              r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1346              r'\b(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
1347              r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
1348              # Obsolete patterns
1349              r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1350              r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
1351              r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1352              r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1353              r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1354              r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1355              r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1356              r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
1357             jscode, 'Initial JS player signature function name', group='sig')
1358
1359         jsi = JSInterpreter(jscode)
1360         initial_function = jsi.extract_function(funcname)
1361         return lambda s: initial_function([s])
1362
1363     def _parse_sig_swf(self, file_contents):
1364         swfi = SWFInterpreter(file_contents)
1365         TARGET_CLASSNAME = 'SignatureDecipher'
1366         searched_class = swfi.extract_class(TARGET_CLASSNAME)
1367         initial_function = swfi.extract_function(searched_class, 'decipher')
1368         return lambda s: initial_function([s])
1369
1370     def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
1371         """Turn the encrypted s field into a working signature"""
1372
1373         if player_url is None:
1374             raise ExtractorError('Cannot decrypt signature without player_url')
1375
1376         if player_url.startswith('//'):
1377             player_url = 'https:' + player_url
1378         elif not re.match(r'https?://', player_url):
1379             player_url = compat_urlparse.urljoin(
1380                 'https://www.youtube.com', player_url)
1381         try:
1382             player_id = (player_url, self._signature_cache_id(s))
1383             if player_id not in self._player_cache:
1384                 func = self._extract_signature_function(
1385                     video_id, player_url, s
1386                 )
1387                 self._player_cache[player_id] = func
1388             func = self._player_cache[player_id]
1389             if self._downloader.params.get('youtube_print_sig_code'):
1390                 self._print_sig_code(func, s)
1391             return func(s)
1392         except Exception as e:
1393             tb = traceback.format_exc()
1394             raise ExtractorError(
1395                 'Signature extraction failed: ' + tb, cause=e)
1396
1397     def _get_subtitles(self, video_id, webpage):
1398         try:
1399             subs_doc = self._download_xml(
1400                 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
1401                 video_id, note=False)
1402         except ExtractorError as err:
1403             self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
1404             return {}
1405
1406         sub_lang_list = {}
1407         for track in subs_doc.findall('track'):
1408             lang = track.attrib['lang_code']
1409             if lang in sub_lang_list:
1410                 continue
1411             sub_formats = []
1412             for ext in self._SUBTITLE_FORMATS:
1413                 params = compat_urllib_parse_urlencode({
1414                     'lang': lang,
1415                     'v': video_id,
1416                     'fmt': ext,
1417                     'name': track.attrib['name'].encode('utf-8'),
1418                 })
1419                 sub_formats.append({
1420                     'url': 'https://www.youtube.com/api/timedtext?' + params,
1421                     'ext': ext,
1422                 })
1423             sub_lang_list[lang] = sub_formats
1424         if not sub_lang_list:
1425             self._downloader.report_warning('video doesn\'t have subtitles')
1426             return {}
1427         return sub_lang_list
1428
1429     def _get_ytplayer_config(self, video_id, webpage):
1430         patterns = (
1431             # User data may contain arbitrary character sequences that may affect
1432             # JSON extraction with regex, e.g. when '};' is contained the second
1433             # regex won't capture the whole JSON. Yet working around by trying more
1434             # concrete regex first keeping in mind proper quoted string handling
1435             # to be implemented in future that will replace this workaround (see
1436             # https://github.com/ytdl-org/youtube-dl/issues/7468,
1437             # https://github.com/ytdl-org/youtube-dl/pull/7599)
1438             r';ytplayer\.config\s*=\s*({.+?});ytplayer',
1439             r';ytplayer\.config\s*=\s*({.+?});',
1440         )
1441         config = self._search_regex(
1442             patterns, webpage, 'ytplayer.config', default=None)
1443         if config:
1444             return self._parse_json(
1445                 uppercase_escape(config), video_id, fatal=False)
1446
1447     def _get_automatic_captions(self, video_id, webpage):
1448         """We need the webpage for getting the captions url, pass it as an
1449            argument to speed up the process."""
1450         self.to_screen('%s: Looking for automatic captions' % video_id)
1451         player_config = self._get_ytplayer_config(video_id, webpage)
1452         err_msg = 'Couldn\'t find automatic captions for %s' % video_id
1453         if not player_config:
1454             self._downloader.report_warning(err_msg)
1455             return {}
1456         try:
1457             args = player_config['args']
1458             caption_url = args.get('ttsurl')
1459             if caption_url:
1460                 timestamp = args['timestamp']
1461                 # We get the available subtitles
1462                 list_params = compat_urllib_parse_urlencode({
1463                     'type': 'list',
1464                     'tlangs': 1,
1465                     'asrs': 1,
1466                 })
1467                 list_url = caption_url + '&' + list_params
1468                 caption_list = self._download_xml(list_url, video_id)
1469                 original_lang_node = caption_list.find('track')
1470                 if original_lang_node is None:
1471                     self._downloader.report_warning('Video doesn\'t have automatic captions')
1472                     return {}
1473                 original_lang = original_lang_node.attrib['lang_code']
1474                 caption_kind = original_lang_node.attrib.get('kind', '')
1475
1476                 sub_lang_list = {}
1477                 for lang_node in caption_list.findall('target'):
1478                     sub_lang = lang_node.attrib['lang_code']
1479                     sub_formats = []
1480                     for ext in self._SUBTITLE_FORMATS:
1481                         params = compat_urllib_parse_urlencode({
1482                             'lang': original_lang,
1483                             'tlang': sub_lang,
1484                             'fmt': ext,
1485                             'ts': timestamp,
1486                             'kind': caption_kind,
1487                         })
1488                         sub_formats.append({
1489                             'url': caption_url + '&' + params,
1490                             'ext': ext,
1491                         })
1492                     sub_lang_list[sub_lang] = sub_formats
1493                 return sub_lang_list
1494
1495             def make_captions(sub_url, sub_langs):
1496                 parsed_sub_url = compat_urllib_parse_urlparse(sub_url)
1497                 caption_qs = compat_parse_qs(parsed_sub_url.query)
1498                 captions = {}
1499                 for sub_lang in sub_langs:
1500                     sub_formats = []
1501                     for ext in self._SUBTITLE_FORMATS:
1502                         caption_qs.update({
1503                             'tlang': [sub_lang],
1504                             'fmt': [ext],
1505                         })
1506                         sub_url = compat_urlparse.urlunparse(parsed_sub_url._replace(
1507                             query=compat_urllib_parse_urlencode(caption_qs, True)))
1508                         sub_formats.append({
1509                             'url': sub_url,
1510                             'ext': ext,
1511                         })
1512                     captions[sub_lang] = sub_formats
1513                 return captions
1514
1515             # New captions format as of 22.06.2017
1516             player_response = args.get('player_response')
1517             if player_response and isinstance(player_response, compat_str):
1518                 player_response = self._parse_json(
1519                     player_response, video_id, fatal=False)
1520                 if player_response:
1521                     renderer = player_response['captions']['playerCaptionsTracklistRenderer']
1522                     base_url = renderer['captionTracks'][0]['baseUrl']
1523                     sub_lang_list = []
1524                     for lang in renderer['translationLanguages']:
1525                         lang_code = lang.get('languageCode')
1526                         if lang_code:
1527                             sub_lang_list.append(lang_code)
1528                     return make_captions(base_url, sub_lang_list)
1529
1530             # Some videos don't provide ttsurl but rather caption_tracks and
1531             # caption_translation_languages (e.g. 20LmZk1hakA)
1532             # Does not used anymore as of 22.06.2017
1533             caption_tracks = args['caption_tracks']
1534             caption_translation_languages = args['caption_translation_languages']
1535             caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
1536             sub_lang_list = []
1537             for lang in caption_translation_languages.split(','):
1538                 lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
1539                 sub_lang = lang_qs.get('lc', [None])[0]
1540                 if sub_lang:
1541                     sub_lang_list.append(sub_lang)
1542             return make_captions(caption_url, sub_lang_list)
1543         # An extractor error can be raise by the download process if there are
1544         # no automatic captions but there are subtitles
1545         except (KeyError, IndexError, ExtractorError):
1546             self._downloader.report_warning(err_msg)
1547             return {}
1548
1549     def _mark_watched(self, video_id, video_info, player_response):
1550         playback_url = url_or_none(try_get(
1551             player_response,
1552             lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']) or try_get(
1553             video_info, lambda x: x['videostats_playback_base_url'][0]))
1554         if not playback_url:
1555             return
1556         parsed_playback_url = compat_urlparse.urlparse(playback_url)
1557         qs = compat_urlparse.parse_qs(parsed_playback_url.query)
1558
1559         # cpn generation algorithm is reverse engineered from base.js.
1560         # In fact it works even with dummy cpn.
1561         CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
1562         cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
1563
1564         qs.update({
1565             'ver': ['2'],
1566             'cpn': [cpn],
1567         })
1568         playback_url = compat_urlparse.urlunparse(
1569             parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
1570
1571         self._download_webpage(
1572             playback_url, video_id, 'Marking watched',
1573             'Unable to mark watched', fatal=False)
1574
1575     @staticmethod
1576     def _extract_urls(webpage):
1577         # Embedded YouTube player
1578         entries = [
1579             unescapeHTML(mobj.group('url'))
1580             for mobj in re.finditer(r'''(?x)
1581             (?:
1582                 <iframe[^>]+?src=|
1583                 data-video-url=|
1584                 <embed[^>]+?src=|
1585                 embedSWF\(?:\s*|
1586                 <object[^>]+data=|
1587                 new\s+SWFObject\(
1588             )
1589             (["\'])
1590                 (?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
1591                 (?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
1592             \1''', webpage)]
1593
1594         # lazyYT YouTube embed
1595         entries.extend(list(map(
1596             unescapeHTML,
1597             re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
1598
1599         # Wordpress "YouTube Video Importer" plugin
1600         matches = re.findall(r'''(?x)<div[^>]+
1601             class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
1602             data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
1603         entries.extend(m[-1] for m in matches)
1604
1605         return entries
1606
1607     @staticmethod
1608     def _extract_url(webpage):
1609         urls = YoutubeIE._extract_urls(webpage)
1610         return urls[0] if urls else None
1611
1612     @classmethod
1613     def extract_id(cls, url):
1614         mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
1615         if mobj is None:
1616             raise ExtractorError('Invalid URL: %s' % url)
1617         video_id = mobj.group(2)
1618         return video_id
1619
1620     @staticmethod
1621     def _extract_chapters(description, duration):
1622         if not description:
1623             return None
1624         chapter_lines = re.findall(
1625             r'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\.www\.watch\.player\.seekTo[^>]+>(\d{1,2}:\d{1,2}(?::\d{1,2})?)</a>[^>]*)(?=$|<br\s*/>)',
1626             description)
1627         if not chapter_lines:
1628             return None
1629         chapters = []
1630         for next_num, (chapter_line, time_point) in enumerate(
1631                 chapter_lines, start=1):
1632             start_time = parse_duration(time_point)
1633             if start_time is None:
1634                 continue
1635             if start_time > duration:
1636                 break
1637             end_time = (duration if next_num == len(chapter_lines)
1638                         else parse_duration(chapter_lines[next_num][1]))
1639             if end_time is None:
1640                 continue
1641             if end_time > duration:
1642                 end_time = duration
1643             if start_time > end_time:
1644                 break
1645             chapter_title = re.sub(
1646                 r'<a[^>]+>[^<]+</a>', '', chapter_line).strip(' \t-')
1647             chapter_title = re.sub(r'\s+', ' ', chapter_title)
1648             chapters.append({
1649                 'start_time': start_time,
1650                 'end_time': end_time,
1651                 'title': chapter_title,
1652             })
1653         return chapters
1654
1655     def _real_extract(self, url):
1656         url, smuggled_data = unsmuggle_url(url, {})
1657
1658         proto = (
1659             'http' if self._downloader.params.get('prefer_insecure', False)
1660             else 'https')
1661
1662         start_time = None
1663         end_time = None
1664         parsed_url = compat_urllib_parse_urlparse(url)
1665         for component in [parsed_url.fragment, parsed_url.query]:
1666             query = compat_parse_qs(component)
1667             if start_time is None and 't' in query:
1668                 start_time = parse_duration(query['t'][0])
1669             if start_time is None and 'start' in query:
1670                 start_time = parse_duration(query['start'][0])
1671             if end_time is None and 'end' in query:
1672                 end_time = parse_duration(query['end'][0])
1673
1674         # Extract original video URL from URL with redirection, like age verification, using next_url parameter
1675         mobj = re.search(self._NEXT_URL_RE, url)
1676         if mobj:
1677             url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
1678         video_id = self.extract_id(url)
1679
1680         # Get video webpage
1681         url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
1682         video_webpage = self._download_webpage(url, video_id)
1683
1684         # Attempt to extract SWF player URL
1685         mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
1686         if mobj is not None:
1687             player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
1688         else:
1689             player_url = None
1690
1691         dash_mpds = []
1692
1693         def add_dash_mpd(video_info):
1694             dash_mpd = video_info.get('dashmpd')
1695             if dash_mpd and dash_mpd[0] not in dash_mpds:
1696                 dash_mpds.append(dash_mpd[0])
1697
1698         def add_dash_mpd_pr(pl_response):
1699             dash_mpd = url_or_none(try_get(
1700                 pl_response, lambda x: x['streamingData']['dashManifestUrl'],
1701                 compat_str))
1702             if dash_mpd and dash_mpd not in dash_mpds:
1703                 dash_mpds.append(dash_mpd)
1704
1705         is_live = None
1706         view_count = None
1707
1708         def extract_view_count(v_info):
1709             return int_or_none(try_get(v_info, lambda x: x['view_count'][0]))
1710
1711         def extract_token(v_info):
1712             return dict_get(v_info, ('account_playback_token', 'accountPlaybackToken', 'token'))
1713
1714         def extract_player_response(player_response, video_id):
1715             pl_response = str_or_none(player_response)
1716             if not pl_response:
1717                 return
1718             pl_response = self._parse_json(pl_response, video_id, fatal=False)
1719             if isinstance(pl_response, dict):
1720                 add_dash_mpd_pr(pl_response)
1721                 return pl_response
1722
1723         player_response = {}
1724
1725         # Get video info
1726         embed_webpage = None
1727         if re.search(r'player-age-gate-content">', video_webpage) is not None:
1728             age_gate = True
1729             video_info = None
1730             # We simulate the access to the video from www.youtube.com/v/{video_id}
1731             # this can be viewed without login into Youtube
1732             url = proto + '://www.youtube.com/embed/%s' % video_id
1733             embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
1734             data = compat_urllib_parse_urlencode({
1735                 'video_id': video_id,
1736                 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
1737                 'sts': self._search_regex(
1738                     r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
1739             })
1740             video_info_url = proto + '://www.youtube.com/get_video_info?' + data
1741             try:
1742                 video_info_webpage = self._download_webpage(
1743                     video_info_url, video_id,
1744                     note='Refetching age-gated info webpage',
1745                     errnote='unable to download video info webpage')
1746             except ExtractorError:
1747                 video_info_webpage = None
1748             if video_info_webpage:
1749                 video_info = compat_parse_qs(video_info_webpage)
1750                 pl_response = video_info.get('player_response', [None])[0]
1751                 player_response = extract_player_response(pl_response, video_id)
1752                 add_dash_mpd(video_info)
1753                 view_count = extract_view_count(video_info)
1754         else:
1755             age_gate = False
1756             video_info = None
1757             sts = None
1758             # Try looking directly into the video webpage
1759             ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
1760             if ytplayer_config:
1761                 args = ytplayer_config['args']
1762                 if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
1763                     # Convert to the same format returned by compat_parse_qs
1764                     video_info = dict((k, [v]) for k, v in args.items())
1765                     add_dash_mpd(video_info)
1766                 # Rental video is not rented but preview is available (e.g.
1767                 # https://www.youtube.com/watch?v=yYr8q0y5Jfg,
1768                 # https://github.com/ytdl-org/youtube-dl/issues/10532)
1769                 if not video_info and args.get('ypc_vid'):
1770                     return self.url_result(
1771                         args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
1772                 if args.get('livestream') == '1' or args.get('live_playback') == 1:
1773                     is_live = True
1774                 sts = ytplayer_config.get('sts')
1775                 if not player_response:
1776                     player_response = extract_player_response(args.get('player_response'), video_id)
1777             if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
1778                 add_dash_mpd_pr(player_response)
1779                 # We also try looking in get_video_info since it may contain different dashmpd
1780                 # URL that points to a DASH manifest with possibly different itag set (some itags
1781                 # are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
1782                 # manifest pointed by get_video_info's dashmpd).
1783                 # The general idea is to take a union of itags of both DASH manifests (for example
1784                 # video with such 'manifest behavior' see https://github.com/ytdl-org/youtube-dl/issues/6093)
1785                 self.report_video_info_webpage_download(video_id)
1786                 for el in ('embedded', 'detailpage', 'vevo', ''):
1787                     query = {
1788                         'video_id': video_id,
1789                         'ps': 'default',
1790                         'eurl': '',
1791                         'gl': 'US',
1792                         'hl': 'en',
1793                     }
1794                     if el:
1795                         query['el'] = el
1796                     if sts:
1797                         query['sts'] = sts
1798                     try:
1799                         video_info_webpage = self._download_webpage(
1800                             '%s://www.youtube.com/get_video_info' % proto,
1801                             video_id, note=False,
1802                             errnote='unable to download video info webpage',
1803                             query=query)
1804                     except ExtractorError as e:
1805                         # Skip further retries if we get 429 since solving
1806                         # captcha only unblocks access to website but
1807                         # not get_video_info end point
1808                         if isinstance(e.cause, compat_HTTPError) and e.cause.code == 429:
1809                             break
1810                         continue
1811                     if not video_info_webpage:
1812                         continue
1813                     get_video_info = compat_parse_qs(video_info_webpage)
1814                     if not player_response:
1815                         pl_response = get_video_info.get('player_response', [None])[0]
1816                         player_response = extract_player_response(pl_response, video_id)
1817                     add_dash_mpd(get_video_info)
1818                     if view_count is None:
1819                         view_count = extract_view_count(get_video_info)
1820                     if not video_info:
1821                         video_info = get_video_info
1822                     get_token = extract_token(get_video_info)
1823                     if get_token:
1824                         # Different get_video_info requests may report different results, e.g.
1825                         # some may report video unavailability, but some may serve it without
1826                         # any complaint (see https://github.com/ytdl-org/youtube-dl/issues/7362,
1827                         # the original webpage as well as el=info and el=embedded get_video_info
1828                         # requests report video unavailability due to geo restriction while
1829                         # el=detailpage succeeds and returns valid data). This is probably
1830                         # due to YouTube measures against IP ranges of hosting providers.
1831                         # Working around by preferring the first succeeded video_info containing
1832                         # the token if no such video_info yet was found.
1833                         token = extract_token(video_info)
1834                         if not token:
1835                             video_info = get_video_info
1836                         break
1837
1838         def extract_unavailable_message():
1839             messages = []
1840             for tag, kind in (('h1', 'message'), ('div', 'submessage')):
1841                 msg = self._html_search_regex(
1842                     r'(?s)<{tag}[^>]+id=["\']unavailable-{kind}["\'][^>]*>(.+?)</{tag}>'.format(tag=tag, kind=kind),
1843                     video_webpage, 'unavailable %s' % kind, default=None)
1844                 if msg:
1845                     messages.append(msg)
1846             if messages:
1847                 return '\n'.join(messages)
1848
1849         if not video_info and not player_response:
1850             unavailable_message = extract_unavailable_message()
1851             if not unavailable_message:
1852                 unavailable_message = 'Unable to extract video data'
1853             raise ExtractorError(
1854                 'YouTube said: %s' % unavailable_message, expected=True, video_id=video_id)
1855
1856         if not isinstance(video_info, dict):
1857             video_info = {}
1858
1859         video_details = try_get(
1860             player_response, lambda x: x['videoDetails'], dict) or {}
1861
1862         video_title = video_info.get('title', [None])[0] or video_details.get('title')
1863         if not video_title:
1864             self._downloader.report_warning('Unable to extract video title')
1865             video_title = '_'
1866
1867         description_original = video_description = get_element_by_id("eow-description", video_webpage)
1868         if video_description:
1869
1870             def replace_url(m):
1871                 redir_url = compat_urlparse.urljoin(url, m.group(1))
1872                 parsed_redir_url = compat_urllib_parse_urlparse(redir_url)
1873                 if re.search(r'^(?:www\.)?(?:youtube(?:-nocookie)?\.com|youtu\.be)$', parsed_redir_url.netloc) and parsed_redir_url.path == '/redirect':
1874                     qs = compat_parse_qs(parsed_redir_url.query)
1875                     q = qs.get('q')
1876                     if q and q[0]:
1877                         return q[0]
1878                 return redir_url
1879
1880             description_original = video_description = re.sub(r'''(?x)
1881                 <a\s+
1882                     (?:[a-zA-Z-]+="[^"]*"\s+)*?
1883                     (?:title|href)="([^"]+)"\s+
1884                     (?:[a-zA-Z-]+="[^"]*"\s+)*?
1885                     class="[^"]*"[^>]*>
1886                 [^<]+\.{3}\s*
1887                 </a>
1888             ''', replace_url, video_description)
1889             video_description = clean_html(video_description)
1890         else:
1891             video_description = self._html_search_meta('description', video_webpage) or video_details.get('shortDescription')
1892
1893         if not smuggled_data.get('force_singlefeed', False):
1894             if not self._downloader.params.get('noplaylist'):
1895                 multifeed_metadata_list = try_get(
1896                     player_response,
1897                     lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
1898                     compat_str) or try_get(
1899                     video_info, lambda x: x['multifeed_metadata_list'][0], compat_str)
1900                 if multifeed_metadata_list:
1901                     entries = []
1902                     feed_ids = []
1903                     for feed in multifeed_metadata_list.split(','):
1904                         # Unquote should take place before split on comma (,) since textual
1905                         # fields may contain comma as well (see
1906                         # https://github.com/ytdl-org/youtube-dl/issues/8536)
1907                         feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
1908                         entries.append({
1909                             '_type': 'url_transparent',
1910                             'ie_key': 'Youtube',
1911                             'url': smuggle_url(
1912                                 '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
1913                                 {'force_singlefeed': True}),
1914                             'title': '%s (%s)' % (video_title, feed_data['title'][0]),
1915                         })
1916                         feed_ids.append(feed_data['id'][0])
1917                     self.to_screen(
1918                         'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
1919                         % (', '.join(feed_ids), video_id))
1920                     return self.playlist_result(entries, video_id, video_title, video_description)
1921             else:
1922                 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1923
1924         if view_count is None:
1925             view_count = extract_view_count(video_info)
1926         if view_count is None and video_details:
1927             view_count = int_or_none(video_details.get('viewCount'))
1928
1929         if is_live is None:
1930             is_live = bool_or_none(video_details.get('isLive'))
1931
1932         # Check for "rental" videos
1933         if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
1934             raise ExtractorError('"rental" videos not supported. See https://github.com/ytdl-org/youtube-dl/issues/359 for more information.', expected=True)
1935
1936         def _extract_filesize(media_url):
1937             return int_or_none(self._search_regex(
1938                 r'\bclen[=/](\d+)', media_url, 'filesize', default=None))
1939
1940         streaming_formats = try_get(player_response, lambda x: x['streamingData']['formats'], list) or []
1941         streaming_formats.extend(try_get(player_response, lambda x: x['streamingData']['adaptiveFormats'], list) or [])
1942
1943         if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
1944             self.report_rtmp_download()
1945             formats = [{
1946                 'format_id': '_rtmp',
1947                 'protocol': 'rtmp',
1948                 'url': video_info['conn'][0],
1949                 'player_url': player_url,
1950             }]
1951         elif not is_live and (streaming_formats or len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1):
1952             encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
1953             if 'rtmpe%3Dyes' in encoded_url_map:
1954                 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/ytdl-org/youtube-dl/issues/343 for more information.', expected=True)
1955             formats = []
1956             formats_spec = {}
1957             fmt_list = video_info.get('fmt_list', [''])[0]
1958             if fmt_list:
1959                 for fmt in fmt_list.split(','):
1960                     spec = fmt.split('/')
1961                     if len(spec) > 1:
1962                         width_height = spec[1].split('x')
1963                         if len(width_height) == 2:
1964                             formats_spec[spec[0]] = {
1965                                 'resolution': spec[1],
1966                                 'width': int_or_none(width_height[0]),
1967                                 'height': int_or_none(width_height[1]),
1968                             }
1969             for fmt in streaming_formats:
1970                 itag = str_or_none(fmt.get('itag'))
1971                 if not itag:
1972                     continue
1973                 quality = fmt.get('quality')
1974                 quality_label = fmt.get('qualityLabel') or quality
1975                 formats_spec[itag] = {
1976                     'asr': int_or_none(fmt.get('audioSampleRate')),
1977                     'filesize': int_or_none(fmt.get('contentLength')),
1978                     'format_note': quality_label,
1979                     'fps': int_or_none(fmt.get('fps')),
1980                     'height': int_or_none(fmt.get('height')),
1981                     # bitrate for itag 43 is always 2147483647
1982                     'tbr': float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) if itag != '43' else None,
1983                     'width': int_or_none(fmt.get('width')),
1984                 }
1985
1986             for fmt in streaming_formats:
1987                 if fmt.get('drm_families'):
1988                     continue
1989                 url = url_or_none(fmt.get('url'))
1990
1991                 if not url:
1992                     cipher = fmt.get('cipher')
1993                     if not cipher:
1994                         continue
1995                     url_data = compat_parse_qs(cipher)
1996                     url = url_or_none(try_get(url_data, lambda x: x['url'][0], compat_str))
1997                     if not url:
1998                         continue
1999                 else:
2000                     cipher = None
2001                     url_data = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
2002
2003                 stream_type = int_or_none(try_get(url_data, lambda x: x['stream_type'][0]))
2004                 # Unsupported FORMAT_STREAM_TYPE_OTF
2005                 if stream_type == 3:
2006                     continue
2007
2008                 format_id = fmt.get('itag') or url_data['itag'][0]
2009                 if not format_id:
2010                     continue
2011                 format_id = compat_str(format_id)
2012
2013                 if cipher:
2014                     if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True):
2015                         ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
2016                         jsplayer_url_json = self._search_regex(
2017                             ASSETS_RE,
2018                             embed_webpage if age_gate else video_webpage,
2019                             'JS player URL (1)', default=None)
2020                         if not jsplayer_url_json and not age_gate:
2021                             # We need the embed website after all
2022                             if embed_webpage is None:
2023                                 embed_url = proto + '://www.youtube.com/embed/%s' % video_id
2024                                 embed_webpage = self._download_webpage(
2025                                     embed_url, video_id, 'Downloading embed webpage')
2026                             jsplayer_url_json = self._search_regex(
2027                                 ASSETS_RE, embed_webpage, 'JS player URL')
2028
2029                         player_url = json.loads(jsplayer_url_json)
2030                         if player_url is None:
2031                             player_url_json = self._search_regex(
2032                                 r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
2033                                 video_webpage, 'age gate player URL')
2034                             player_url = json.loads(player_url_json)
2035
2036                     if 'sig' in url_data:
2037                         url += '&signature=' + url_data['sig'][0]
2038                     elif 's' in url_data:
2039                         encrypted_sig = url_data['s'][0]
2040
2041                         if self._downloader.params.get('verbose'):
2042                             if player_url is None:
2043                                 player_version = 'unknown'
2044                                 player_desc = 'unknown'
2045                             else:
2046                                 if player_url.endswith('swf'):
2047                                     player_version = self._search_regex(
2048                                         r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
2049                                         'flash player', fatal=False)
2050                                     player_desc = 'flash player %s' % player_version
2051                                 else:
2052                                     player_version = self._search_regex(
2053                                         [r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js',
2054                                          r'(?:www|player(?:_ias)?)[-.]([^/]+)(?:/[a-z]{2,3}_[A-Z]{2})?/base\.js'],
2055                                         player_url,
2056                                         'html5 player', fatal=False)
2057                                     player_desc = 'html5 player %s' % player_version
2058
2059                             parts_sizes = self._signature_cache_id(encrypted_sig)
2060                             self.to_screen('{%s} signature length %s, %s' %
2061                                            (format_id, parts_sizes, player_desc))
2062
2063                         signature = self._decrypt_signature(
2064                             encrypted_sig, video_id, player_url, age_gate)
2065                         sp = try_get(url_data, lambda x: x['sp'][0], compat_str) or 'signature'
2066                         url += '&%s=%s' % (sp, signature)
2067                 if 'ratebypass' not in url:
2068                     url += '&ratebypass=yes'
2069
2070                 dct = {
2071                     'format_id': format_id,
2072                     'url': url,
2073                     'player_url': player_url,
2074                 }
2075                 if format_id in self._formats:
2076                     dct.update(self._formats[format_id])
2077                 if format_id in formats_spec:
2078                     dct.update(formats_spec[format_id])
2079
2080                 # Some itags are not included in DASH manifest thus corresponding formats will
2081                 # lack metadata (see https://github.com/ytdl-org/youtube-dl/pull/5993).
2082                 # Trying to extract metadata from url_encoded_fmt_stream_map entry.
2083                 mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
2084                 width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
2085
2086                 if width is None:
2087                     width = int_or_none(fmt.get('width'))
2088                 if height is None:
2089                     height = int_or_none(fmt.get('height'))
2090
2091                 filesize = int_or_none(url_data.get(
2092                     'clen', [None])[0]) or _extract_filesize(url)
2093
2094                 quality = url_data.get('quality', [None])[0] or fmt.get('quality')
2095                 quality_label = url_data.get('quality_label', [None])[0] or fmt.get('qualityLabel')
2096
2097                 tbr = (float_or_none(url_data.get('bitrate', [None])[0], 1000)
2098                        or float_or_none(fmt.get('bitrate'), 1000)) if format_id != '43' else None
2099                 fps = int_or_none(url_data.get('fps', [None])[0]) or int_or_none(fmt.get('fps'))
2100
2101                 more_fields = {
2102                     'filesize': filesize,
2103                     'tbr': tbr,
2104                     'width': width,
2105                     'height': height,
2106                     'fps': fps,
2107                     'format_note': quality_label or quality,
2108                 }
2109                 for key, value in more_fields.items():
2110                     if value:
2111                         dct[key] = value
2112                 type_ = url_data.get('type', [None])[0] or fmt.get('mimeType')
2113                 if type_:
2114                     type_split = type_.split(';')
2115                     kind_ext = type_split[0].split('/')
2116                     if len(kind_ext) == 2:
2117                         kind, _ = kind_ext
2118                         dct['ext'] = mimetype2ext(type_split[0])
2119                         if kind in ('audio', 'video'):
2120                             codecs = None
2121                             for mobj in re.finditer(
2122                                     r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
2123                                 if mobj.group('key') == 'codecs':
2124                                     codecs = mobj.group('val')
2125                                     break
2126                             if codecs:
2127                                 dct.update(parse_codecs(codecs))
2128                 if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none':
2129                     dct['downloader_options'] = {
2130                         # Youtube throttles chunks >~10M
2131                         'http_chunk_size': 10485760,
2132                     }
2133                 formats.append(dct)
2134         else:
2135             manifest_url = (
2136                 url_or_none(try_get(
2137                     player_response,
2138                     lambda x: x['streamingData']['hlsManifestUrl'],
2139                     compat_str))
2140                 or url_or_none(try_get(
2141                     video_info, lambda x: x['hlsvp'][0], compat_str)))
2142             if manifest_url:
2143                 formats = []
2144                 m3u8_formats = self._extract_m3u8_formats(
2145                     manifest_url, video_id, 'mp4', fatal=False)
2146                 for a_format in m3u8_formats:
2147                     itag = self._search_regex(
2148                         r'/itag/(\d+)/', a_format['url'], 'itag', default=None)
2149                     if itag:
2150                         a_format['format_id'] = itag
2151                         if itag in self._formats:
2152                             dct = self._formats[itag].copy()
2153                             dct.update(a_format)
2154                             a_format = dct
2155                     a_format['player_url'] = player_url
2156                     # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
2157                     a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
2158                     formats.append(a_format)
2159             else:
2160                 error_message = extract_unavailable_message()
2161                 if not error_message:
2162                     error_message = clean_html(try_get(
2163                         player_response, lambda x: x['playabilityStatus']['reason'],
2164                         compat_str))
2165                 if not error_message:
2166                     error_message = clean_html(
2167                         try_get(video_info, lambda x: x['reason'][0], compat_str))
2168                 if error_message:
2169                     raise ExtractorError(error_message, expected=True)
2170                 raise ExtractorError('no conn, hlsvp, hlsManifestUrl or url_encoded_fmt_stream_map information found in video info')
2171
2172         # uploader
2173         video_uploader = try_get(
2174             video_info, lambda x: x['author'][0],
2175             compat_str) or str_or_none(video_details.get('author'))
2176         if video_uploader:
2177             video_uploader = compat_urllib_parse_unquote_plus(video_uploader)
2178         else:
2179             self._downloader.report_warning('unable to extract uploader name')
2180
2181         # uploader_id
2182         video_uploader_id = None
2183         video_uploader_url = None
2184         mobj = re.search(
2185             r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
2186             video_webpage)
2187         if mobj is not None:
2188             video_uploader_id = mobj.group('uploader_id')
2189             video_uploader_url = mobj.group('uploader_url')
2190         else:
2191             self._downloader.report_warning('unable to extract uploader nickname')
2192
2193         channel_id = (
2194             str_or_none(video_details.get('channelId'))
2195             or self._html_search_meta(
2196                 'channelId', video_webpage, 'channel id', default=None)
2197             or self._search_regex(
2198                 r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
2199                 video_webpage, 'channel id', default=None, group='id'))
2200         channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
2201
2202         # thumbnail image
2203         # We try first to get a high quality image:
2204         m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
2205                             video_webpage, re.DOTALL)
2206         if m_thumb is not None:
2207             video_thumbnail = m_thumb.group(1)
2208         elif 'thumbnail_url' not in video_info:
2209             self._downloader.report_warning('unable to extract video thumbnail')
2210             video_thumbnail = None
2211         else:   # don't panic if we can't find it
2212             video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
2213
2214         # upload date
2215         upload_date = self._html_search_meta(
2216             'datePublished', video_webpage, 'upload date', default=None)
2217         if not upload_date:
2218             upload_date = self._search_regex(
2219                 [r'(?s)id="eow-date.*?>(.*?)</span>',
2220                  r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'],
2221                 video_webpage, 'upload date', default=None)
2222         upload_date = unified_strdate(upload_date)
2223
2224         video_license = self._html_search_regex(
2225             r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
2226             video_webpage, 'license', default=None)
2227
2228         m_music = re.search(
2229             r'''(?x)
2230                 <h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*
2231                 <ul[^>]*>\s*
2232                 <li>(?P<title>.+?)
2233                 by (?P<creator>.+?)
2234                 (?:
2235                     \(.+?\)|
2236                     <a[^>]*
2237                         (?:
2238                             \bhref=["\']/red[^>]*>|             # drop possible
2239                             >\s*Listen ad-free with YouTube Red # YouTube Red ad
2240                         )
2241                     .*?
2242                 )?</li
2243             ''',
2244             video_webpage)
2245         if m_music:
2246             video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
2247             video_creator = clean_html(m_music.group('creator'))
2248         else:
2249             video_alt_title = video_creator = None
2250
2251         def extract_meta(field):
2252             return self._html_search_regex(
2253                 r'<h4[^>]+class="title"[^>]*>\s*%s\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li>\s*' % field,
2254                 video_webpage, field, default=None)
2255
2256         track = extract_meta('Song')
2257         artist = extract_meta('Artist')
2258         album = extract_meta('Album')
2259
2260         # Youtube Music Auto-generated description
2261         release_date = release_year = None
2262         if video_description:
2263             mobj = re.search(r'(?s)Provided to YouTube by [^\n]+\n+(?P<track>[^·]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?', video_description)
2264             if mobj:
2265                 if not track:
2266                     track = mobj.group('track').strip()
2267                 if not artist:
2268                     artist = mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·'))
2269                 if not album:
2270                     album = mobj.group('album'.strip())
2271                 release_year = mobj.group('release_year')
2272                 release_date = mobj.group('release_date')
2273                 if release_date:
2274                     release_date = release_date.replace('-', '')
2275                     if not release_year:
2276                         release_year = int(release_date[:4])
2277                 if release_year:
2278                     release_year = int(release_year)
2279
2280         m_episode = re.search(
2281             r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>',
2282             video_webpage)
2283         if m_episode:
2284             series = unescapeHTML(m_episode.group('series'))
2285             season_number = int(m_episode.group('season'))
2286             episode_number = int(m_episode.group('episode'))
2287         else:
2288             series = season_number = episode_number = None
2289
2290         m_cat_container = self._search_regex(
2291             r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
2292             video_webpage, 'categories', default=None)
2293         if m_cat_container:
2294             category = self._html_search_regex(
2295                 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
2296                 default=None)
2297             video_categories = None if category is None else [category]
2298         else:
2299             video_categories = None
2300
2301         video_tags = [
2302             unescapeHTML(m.group('content'))
2303             for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
2304
2305         def _extract_count(count_name):
2306             return str_to_int(self._search_regex(
2307                 r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
2308                 % re.escape(count_name),
2309                 video_webpage, count_name, default=None))
2310
2311         like_count = _extract_count('like')
2312         dislike_count = _extract_count('dislike')
2313
2314         if view_count is None:
2315             view_count = str_to_int(self._search_regex(
2316                 r'<[^>]+class=["\']watch-view-count[^>]+>\s*([\d,\s]+)', video_webpage,
2317                 'view count', default=None))
2318
2319         average_rating = (
2320             float_or_none(video_details.get('averageRating'))
2321             or try_get(video_info, lambda x: float_or_none(x['avg_rating'][0])))
2322
2323         # subtitles
2324         video_subtitles = self.extract_subtitles(video_id, video_webpage)
2325         automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
2326
2327         video_duration = try_get(
2328             video_info, lambda x: int_or_none(x['length_seconds'][0]))
2329         if not video_duration:
2330             video_duration = int_or_none(video_details.get('lengthSeconds'))
2331         if not video_duration:
2332             video_duration = parse_duration(self._html_search_meta(
2333                 'duration', video_webpage, 'video duration'))
2334
2335         # annotations
2336         video_annotations = None
2337         if self._downloader.params.get('writeannotations', False):
2338             xsrf_token = self._search_regex(
2339                 r'([\'"])XSRF_TOKEN\1\s*:\s*([\'"])(?P<xsrf_token>[A-Za-z0-9+/=]+)\2',
2340                 video_webpage, 'xsrf token', group='xsrf_token', fatal=False)
2341             invideo_url = try_get(
2342                 player_response, lambda x: x['annotations'][0]['playerAnnotationsUrlsRenderer']['invideoUrl'], compat_str)
2343             if xsrf_token and invideo_url:
2344                 xsrf_field_name = self._search_regex(
2345                     r'([\'"])XSRF_FIELD_NAME\1\s*:\s*([\'"])(?P<xsrf_field_name>\w+)\2',
2346                     video_webpage, 'xsrf field name',
2347                     group='xsrf_field_name', default='session_token')
2348                 video_annotations = self._download_webpage(
2349                     self._proto_relative_url(invideo_url),
2350                     video_id, note='Downloading annotations',
2351                     errnote='Unable to download video annotations', fatal=False,
2352                     data=urlencode_postdata({xsrf_field_name: xsrf_token}))
2353
2354         chapters = self._extract_chapters(description_original, video_duration)
2355
2356         # Look for the DASH manifest
2357         if self._downloader.params.get('youtube_include_dash_manifest', True):
2358             dash_mpd_fatal = True
2359             for mpd_url in dash_mpds:
2360                 dash_formats = {}
2361                 try:
2362                     def decrypt_sig(mobj):
2363                         s = mobj.group(1)
2364                         dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
2365                         return '/signature/%s' % dec_s
2366
2367                     mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
2368
2369                     for df in self._extract_mpd_formats(
2370                             mpd_url, video_id, fatal=dash_mpd_fatal,
2371                             formats_dict=self._formats):
2372                         if not df.get('filesize'):
2373                             df['filesize'] = _extract_filesize(df['url'])
2374                         # Do not overwrite DASH format found in some previous DASH manifest
2375                         if df['format_id'] not in dash_formats:
2376                             dash_formats[df['format_id']] = df
2377                         # Additional DASH manifests may end up in HTTP Error 403 therefore
2378                         # allow them to fail without bug report message if we already have
2379                         # some DASH manifest succeeded. This is temporary workaround to reduce
2380                         # burst of bug reports until we figure out the reason and whether it
2381                         # can be fixed at all.
2382                         dash_mpd_fatal = False
2383                 except (ExtractorError, KeyError) as e:
2384                     self.report_warning(
2385                         'Skipping DASH manifest: %r' % e, video_id)
2386                 if dash_formats:
2387                     # Remove the formats we found through non-DASH, they
2388                     # contain less info and it can be wrong, because we use
2389                     # fixed values (for example the resolution). See
2390                     # https://github.com/ytdl-org/youtube-dl/issues/5774 for an
2391                     # example.
2392                     formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
2393                     formats.extend(dash_formats.values())
2394
2395         # Check for malformed aspect ratio
2396         stretched_m = re.search(
2397             r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
2398             video_webpage)
2399         if stretched_m:
2400             w = float(stretched_m.group('w'))
2401             h = float(stretched_m.group('h'))
2402             # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
2403             # We will only process correct ratios.
2404             if w > 0 and h > 0:
2405                 ratio = w / h
2406                 for f in formats:
2407                     if f.get('vcodec') != 'none':
2408                         f['stretched_ratio'] = ratio
2409
2410         if not formats:
2411             token = extract_token(video_info)
2412             if not token:
2413                 if 'reason' in video_info:
2414                     if 'The uploader has not made this video available in your country.' in video_info['reason']:
2415                         regions_allowed = self._html_search_meta(
2416                             'regionsAllowed', video_webpage, default=None)
2417                         countries = regions_allowed.split(',') if regions_allowed else None
2418                         self.raise_geo_restricted(
2419                             msg=video_info['reason'][0], countries=countries)
2420                     reason = video_info['reason'][0]
2421                     if 'Invalid parameters' in reason:
2422                         unavailable_message = extract_unavailable_message()
2423                         if unavailable_message:
2424                             reason = unavailable_message
2425                     raise ExtractorError(
2426                         'YouTube said: %s' % reason,
2427                         expected=True, video_id=video_id)
2428                 else:
2429                     raise ExtractorError(
2430                         '"token" parameter not in video info for unknown reason',
2431                         video_id=video_id)
2432
2433         if not formats and (video_info.get('license_info') or try_get(player_response, lambda x: x['streamingData']['licenseInfos'])):
2434             raise ExtractorError('This video is DRM protected.', expected=True)
2435
2436         self._sort_formats(formats)
2437
2438         self.mark_watched(video_id, video_info, player_response)
2439
2440         return {
2441             'id': video_id,
2442             'uploader': video_uploader,
2443             'uploader_id': video_uploader_id,
2444             'uploader_url': video_uploader_url,
2445             'channel_id': channel_id,
2446             'channel_url': channel_url,
2447             'upload_date': upload_date,
2448             'license': video_license,
2449             'creator': video_creator or artist,
2450             'title': video_title,
2451             'alt_title': video_alt_title or track,
2452             'thumbnail': video_thumbnail,
2453             'description': video_description,
2454             'categories': video_categories,
2455             'tags': video_tags,
2456             'subtitles': video_subtitles,
2457             'automatic_captions': automatic_captions,
2458             'duration': video_duration,
2459             'age_limit': 18 if age_gate else 0,
2460             'annotations': video_annotations,
2461             'chapters': chapters,
2462             'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
2463             'view_count': view_count,
2464             'like_count': like_count,
2465             'dislike_count': dislike_count,
2466             'average_rating': average_rating,
2467             'formats': formats,
2468             'is_live': is_live,
2469             'start_time': start_time,
2470             'end_time': end_time,
2471             'series': series,
2472             'season_number': season_number,
2473             'episode_number': episode_number,
2474             'track': track,
2475             'artist': artist,
2476             'album': album,
2477             'release_date': release_date,
2478             'release_year': release_year,
2479         }
2480
2481
2482 class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
2483     IE_DESC = 'YouTube.com playlists'
2484     _VALID_URL = r"""(?x)(?:
2485                         (?:https?://)?
2486                         (?:\w+\.)?
2487                         (?:
2488                             (?:
2489                                 youtube(?:kids)?\.com|
2490                                 invidio\.us
2491                             )
2492                             /
2493                             (?:
2494                                (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11}))
2495                                \? (?:.*?[&;])*? (?:p|a|list)=
2496                             |  p/
2497                             )|
2498                             youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist=
2499                         )
2500                         (
2501                             (?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)?[0-9A-Za-z-_]{10,}
2502                             # Top tracks, they can also include dots
2503                             |(?:MC)[\w\.]*
2504                         )
2505                         .*
2506                      |
2507                         (%(playlist_id)s)
2508                      )""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
2509     _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
2510     _VIDEO_RE_TPL = r'href="\s*/watch\?v=%s(?:&amp;(?:[^"]*?index=(?P<index>\d+))?(?:[^>]+>(?P<title>[^<]+))?)?'
2511     _VIDEO_RE = _VIDEO_RE_TPL % r'(?P<id>[0-9A-Za-z_-]{11})'
2512     IE_NAME = 'youtube:playlist'
2513     _TESTS = [{
2514         'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
2515         'info_dict': {
2516             'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
2517             'uploader': 'Sergey M.',
2518             'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
2519             'title': 'youtube-dl public playlist',
2520         },
2521         'playlist_count': 1,
2522     }, {
2523         'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
2524         'info_dict': {
2525             'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
2526             'uploader': 'Sergey M.',
2527             'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
2528             'title': 'youtube-dl empty playlist',
2529         },
2530         'playlist_count': 0,
2531     }, {
2532         'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
2533         'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
2534         'info_dict': {
2535             'title': '29C3: Not my department',
2536             'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
2537             'uploader': 'Christiaan008',
2538             'uploader_id': 'ChRiStIaAn008',
2539         },
2540         'playlist_count': 96,
2541     }, {
2542         'note': 'issue #673',
2543         'url': 'PLBB231211A4F62143',
2544         'info_dict': {
2545             'title': '[OLD]Team Fortress 2 (Class-based LP)',
2546             'id': 'PLBB231211A4F62143',
2547             'uploader': 'Wickydoo',
2548             'uploader_id': 'Wickydoo',
2549         },
2550         'playlist_mincount': 26,
2551     }, {
2552         'note': 'Large playlist',
2553         'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
2554         'info_dict': {
2555             'title': 'Uploads from Cauchemar',
2556             'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
2557             'uploader': 'Cauchemar',
2558             'uploader_id': 'Cauchemar89',
2559         },
2560         'playlist_mincount': 799,
2561     }, {
2562         'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
2563         'info_dict': {
2564             'title': 'YDL_safe_search',
2565             'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
2566         },
2567         'playlist_count': 2,
2568         'skip': 'This playlist is private',
2569     }, {
2570         'note': 'embedded',
2571         'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
2572         'playlist_count': 4,
2573         'info_dict': {
2574             'title': 'JODA15',
2575             'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
2576             'uploader': 'milan',
2577             'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
2578         }
2579     }, {
2580         'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
2581         'playlist_mincount': 485,
2582         'info_dict': {
2583             'title': '2018 Chinese New Singles (11/6 updated)',
2584             'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
2585             'uploader': 'LBK',
2586             'uploader_id': 'sdragonfang',
2587         }
2588     }, {
2589         'note': 'Embedded SWF player',
2590         'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
2591         'playlist_count': 4,
2592         'info_dict': {
2593             'title': 'JODA7',
2594             'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
2595         },
2596         'skip': 'This playlist does not exist',
2597     }, {
2598         'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
2599         'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
2600         'info_dict': {
2601             'title': 'Uploads from Interstellar Movie',
2602             'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
2603             'uploader': 'Interstellar Movie',
2604             'uploader_id': 'InterstellarMovie1',
2605         },
2606         'playlist_mincount': 21,
2607     }, {
2608         # Playlist URL that does not actually serve a playlist
2609         'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
2610         'info_dict': {
2611             'id': 'FqZTN594JQw',
2612             'ext': 'webm',
2613             'title': "Smiley's People 01 detective, Adventure Series, Action",
2614             'uploader': 'STREEM',
2615             'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
2616             'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
2617             'upload_date': '20150526',
2618             'license': 'Standard YouTube License',
2619             'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
2620             'categories': ['People & Blogs'],
2621             'tags': list,
2622             'view_count': int,
2623             'like_count': int,
2624             'dislike_count': int,
2625         },
2626         'params': {
2627             'skip_download': True,
2628         },
2629         'skip': 'This video is not available.',
2630         'add_ie': [YoutubeIE.ie_key()],
2631     }, {
2632         'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
2633         'info_dict': {
2634             'id': 'yeWKywCrFtk',
2635             'ext': 'mp4',
2636             'title': 'Small Scale Baler and Braiding Rugs',
2637             'uploader': 'Backus-Page House Museum',
2638             'uploader_id': 'backuspagemuseum',
2639             'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
2640             'upload_date': '20161008',
2641             'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
2642             'categories': ['Nonprofits & Activism'],
2643             'tags': list,
2644             'like_count': int,
2645             'dislike_count': int,
2646         },
2647         'params': {
2648             'noplaylist': True,
2649             'skip_download': True,
2650         },
2651     }, {
2652         # https://github.com/ytdl-org/youtube-dl/issues/21844
2653         'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
2654         'info_dict': {
2655             'title': 'Data Analysis with Dr Mike Pound',
2656             'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
2657             'uploader_id': 'Computerphile',
2658             'uploader': 'Computerphile',
2659         },
2660         'playlist_mincount': 11,
2661     }, {
2662         'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
2663         'only_matching': True,
2664     }, {
2665         'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
2666         'only_matching': True,
2667     }, {
2668         # music album playlist
2669         'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
2670         'only_matching': True,
2671     }, {
2672         'url': 'https://invidio.us/playlist?list=PLDIoUOhQQPlXr63I_vwF9GD8sAKh77dWU',
2673         'only_matching': True,
2674     }, {
2675         'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
2676         'only_matching': True,
2677     }]
2678
2679     def _real_initialize(self):
2680         self._login()
2681
2682     def extract_videos_from_page(self, page):
2683         ids_in_page = []
2684         titles_in_page = []
2685
2686         for item in re.findall(
2687                 r'(<[^>]*\bdata-video-id\s*=\s*["\'][0-9A-Za-z_-]{11}[^>]+>)', page):
2688             attrs = extract_attributes(item)
2689             video_id = attrs['data-video-id']
2690             video_title = unescapeHTML(attrs.get('data-title'))
2691             if video_title:
2692                 video_title = video_title.strip()
2693             ids_in_page.append(video_id)
2694             titles_in_page.append(video_title)
2695
2696         # Fallback with old _VIDEO_RE
2697         self.extract_videos_from_page_impl(
2698             self._VIDEO_RE, page, ids_in_page, titles_in_page)
2699
2700         # Relaxed fallbacks
2701         self.extract_videos_from_page_impl(
2702             r'href="\s*/watch\?v\s*=\s*(?P<id>[0-9A-Za-z_-]{11})', page,
2703             ids_in_page, titles_in_page)
2704         self.extract_videos_from_page_impl(
2705             r'data-video-ids\s*=\s*["\'](?P<id>[0-9A-Za-z_-]{11})', page,
2706             ids_in_page, titles_in_page)
2707
2708         return zip(ids_in_page, titles_in_page)
2709
2710     def _extract_mix(self, playlist_id):
2711         # The mixes are generated from a single video
2712         # the id of the playlist is just 'RD' + video_id
2713         ids = []
2714         last_id = playlist_id[-11:]
2715         for n in itertools.count(1):
2716             url = 'https://youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
2717             webpage = self._download_webpage(
2718                 url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n))
2719             new_ids = orderedSet(re.findall(
2720                 r'''(?xs)data-video-username=".*?".*?
2721                            href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id),
2722                 webpage))
2723             # Fetch new pages until all the videos are repeated, it seems that
2724             # there are always 51 unique videos.
2725             new_ids = [_id for _id in new_ids if _id not in ids]
2726             if not new_ids:
2727                 break
2728             ids.extend(new_ids)
2729             last_id = ids[-1]
2730
2731         url_results = self._ids_to_results(ids)
2732
2733         search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
2734         title_span = (
2735             search_title('playlist-title')
2736             or search_title('title long-title')
2737             or search_title('title'))
2738         title = clean_html(title_span)
2739
2740         return self.playlist_result(url_results, playlist_id, title)
2741
2742     def _extract_playlist(self, playlist_id):
2743         url = self._TEMPLATE_URL % playlist_id
2744         page = self._download_webpage(url, playlist_id)
2745
2746         # the yt-alert-message now has tabindex attribute (see https://github.com/ytdl-org/youtube-dl/issues/11604)
2747         for match in re.findall(r'<div class="yt-alert-message"[^>]*>([^<]+)</div>', page):
2748             match = match.strip()
2749             # Check if the playlist exists or is private
2750             mobj = re.match(r'[^<]*(?:The|This) playlist (?P<reason>does not exist|is private)[^<]*', match)
2751             if mobj:
2752                 reason = mobj.group('reason')
2753                 message = 'This playlist %s' % reason
2754                 if 'private' in reason:
2755                     message += ', use --username or --netrc to access it'
2756                 message += '.'
2757                 raise ExtractorError(message, expected=True)
2758             elif re.match(r'[^<]*Invalid parameters[^<]*', match):
2759                 raise ExtractorError(
2760                     'Invalid parameters. Maybe URL is incorrect.',
2761                     expected=True)
2762             elif re.match(r'[^<]*Choose your language[^<]*', match):
2763                 continue
2764             else:
2765                 self.report_warning('Youtube gives an alert message: ' + match)
2766
2767         playlist_title = self._html_search_regex(
2768             r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
2769             page, 'title', default=None)
2770
2771         _UPLOADER_BASE = r'class=["\']pl-header-details[^>]+>\s*<li>\s*<a[^>]+\bhref='
2772         uploader = self._html_search_regex(
2773             r'%s["\']/(?:user|channel)/[^>]+>([^<]+)' % _UPLOADER_BASE,
2774             page, 'uploader', default=None)
2775         mobj = re.search(
2776             r'%s(["\'])(?P<path>/(?:user|channel)/(?P<uploader_id>.+?))\1' % _UPLOADER_BASE,
2777             page)
2778         if mobj:
2779             uploader_id = mobj.group('uploader_id')
2780             uploader_url = compat_urlparse.urljoin(url, mobj.group('path'))
2781         else:
2782             uploader_id = uploader_url = None
2783
2784         has_videos = True
2785
2786         if not playlist_title:
2787             try:
2788                 # Some playlist URLs don't actually serve a playlist (e.g.
2789                 # https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4)
2790                 next(self._entries(page, playlist_id))
2791             except StopIteration:
2792                 has_videos = False
2793
2794         playlist = self.playlist_result(
2795             self._entries(page, playlist_id), playlist_id, playlist_title)
2796         playlist.update({
2797             'uploader': uploader,
2798             'uploader_id': uploader_id,
2799             'uploader_url': uploader_url,
2800         })
2801
2802         return has_videos, playlist
2803
2804     def _check_download_just_video(self, url, playlist_id):
2805         # Check if it's a video-specific URL
2806         query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
2807         video_id = query_dict.get('v', [None])[0] or self._search_regex(
2808             r'(?:(?:^|//)youtu\.be/|youtube\.com/embed/(?!videoseries))([0-9A-Za-z_-]{11})', url,
2809             'video id', default=None)
2810         if video_id:
2811             if self._downloader.params.get('noplaylist'):
2812                 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
2813                 return video_id, self.url_result(video_id, 'Youtube', video_id=video_id)
2814             else:
2815                 self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
2816                 return video_id, None
2817         return None, None
2818
2819     def _real_extract(self, url):
2820         # Extract playlist id
2821         mobj = re.match(self._VALID_URL, url)
2822         if mobj is None:
2823             raise ExtractorError('Invalid URL: %s' % url)
2824         playlist_id = mobj.group(1) or mobj.group(2)
2825
2826         video_id, video = self._check_download_just_video(url, playlist_id)
2827         if video:
2828             return video
2829
2830         if playlist_id.startswith(('RD', 'UL', 'PU')):
2831             # Mixes require a custom extraction process
2832             return self._extract_mix(playlist_id)
2833
2834         has_videos, playlist = self._extract_playlist(playlist_id)
2835         if has_videos or not video_id:
2836             return playlist
2837
2838         # Some playlist URLs don't actually serve a playlist (see
2839         # https://github.com/ytdl-org/youtube-dl/issues/10537).
2840         # Fallback to plain video extraction if there is a video id
2841         # along with playlist id.
2842         return self.url_result(video_id, 'Youtube', video_id=video_id)
2843
2844
2845 class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
2846     IE_DESC = 'YouTube.com channels'
2847     _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie|kids)?\.com|(?:www\.)?invidio\.us)/channel/(?P<id>[0-9A-Za-z_-]+)'
2848     _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
2849     _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
2850     IE_NAME = 'youtube:channel'
2851     _TESTS = [{
2852         'note': 'paginated channel',
2853         'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
2854         'playlist_mincount': 91,
2855         'info_dict': {
2856             'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
2857             'title': 'Uploads from lex will',
2858             'uploader': 'lex will',
2859             'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
2860         }
2861     }, {
2862         'note': 'Age restricted channel',
2863         # from https://www.youtube.com/user/DeusExOfficial
2864         'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
2865         'playlist_mincount': 64,
2866         'info_dict': {
2867             'id': 'UUs0ifCMCm1icqRbqhUINa0w',
2868             'title': 'Uploads from Deus Ex',
2869             'uploader': 'Deus Ex',
2870             'uploader_id': 'DeusExOfficial',
2871         },
2872     }, {
2873         'url': 'https://invidio.us/channel/UC23qupoDRn9YOAVzeoxjOQA',
2874         'only_matching': True,
2875     }, {
2876         'url': 'https://www.youtubekids.com/channel/UCyu8StPfZWapR6rfW_JgqcA',
2877         'only_matching': True,
2878     }]
2879
2880     @classmethod
2881     def suitable(cls, url):
2882         return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
2883                 else super(YoutubeChannelIE, cls).suitable(url))
2884
2885     def _build_template_url(self, url, channel_id):
2886         return self._TEMPLATE_URL % channel_id
2887
2888     def _real_extract(self, url):
2889         channel_id = self._match_id(url)
2890
2891         url = self._build_template_url(url, channel_id)
2892
2893         # Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
2894         # Workaround by extracting as a playlist if managed to obtain channel playlist URL
2895         # otherwise fallback on channel by page extraction
2896         channel_page = self._download_webpage(
2897             url + '?view=57', channel_id,
2898             'Downloading channel page', fatal=False)
2899         if channel_page is False:
2900             channel_playlist_id = False
2901         else:
2902             channel_playlist_id = self._html_search_meta(
2903                 'channelId', channel_page, 'channel id', default=None)
2904             if not channel_playlist_id:
2905                 channel_url = self._html_search_meta(
2906                     ('al:ios:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad'),
2907                     channel_page, 'channel url', default=None)
2908                 if channel_url:
2909                     channel_playlist_id = self._search_regex(
2910                         r'vnd\.youtube://user/([0-9A-Za-z_-]+)',
2911                         channel_url, 'channel id', default=None)
2912         if channel_playlist_id and channel_playlist_id.startswith('UC'):
2913             playlist_id = 'UU' + channel_playlist_id[2:]
2914             return self.url_result(
2915                 compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
2916
2917         channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
2918         autogenerated = re.search(r'''(?x)
2919                 class="[^"]*?(?:
2920                     channel-header-autogenerated-label|
2921                     yt-channel-title-autogenerated
2922                 )[^"]*"''', channel_page) is not None
2923
2924         if autogenerated:
2925             # The videos are contained in a single page
2926             # the ajax pages can't be used, they are empty
2927             entries = [
2928                 self.url_result(
2929                     video_id, 'Youtube', video_id=video_id,
2930                     video_title=video_title)
2931                 for video_id, video_title in self.extract_videos_from_page(channel_page)]
2932             return self.playlist_result(entries, channel_id)
2933
2934         try:
2935             next(self._entries(channel_page, channel_id))
2936         except StopIteration:
2937             alert_message = self._html_search_regex(
2938                 r'(?s)<div[^>]+class=(["\']).*?\byt-alert-message\b.*?\1[^>]*>(?P<alert>[^<]+)</div>',
2939                 channel_page, 'alert', default=None, group='alert')
2940             if alert_message:
2941                 raise ExtractorError('Youtube said: %s' % alert_message, expected=True)
2942
2943         return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
2944
2945
2946 class YoutubeUserIE(YoutubeChannelIE):
2947     IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
2948     _VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:(?P<user>user|c)/)?(?!(?:attribution_link|watch|results|shared)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
2949     _TEMPLATE_URL = 'https://www.youtube.com/%s/%s/videos'
2950     IE_NAME = 'youtube:user'
2951
2952     _TESTS = [{
2953         'url': 'https://www.youtube.com/user/TheLinuxFoundation',
2954         'playlist_mincount': 320,
2955         'info_dict': {
2956             'id': 'UUfX55Sx5hEFjoC3cNs6mCUQ',
2957             'title': 'Uploads from The Linux Foundation',
2958             'uploader': 'The Linux Foundation',
2959             'uploader_id': 'TheLinuxFoundation',
2960         }
2961     }, {
2962         # Only available via https://www.youtube.com/c/12minuteathlete/videos
2963         # but not https://www.youtube.com/user/12minuteathlete/videos
2964         'url': 'https://www.youtube.com/c/12minuteathlete/videos',
2965         'playlist_mincount': 249,
2966         'info_dict': {
2967             'id': 'UUVjM-zV6_opMDx7WYxnjZiQ',
2968             'title': 'Uploads from 12 Minute Athlete',
2969             'uploader': '12 Minute Athlete',
2970             'uploader_id': 'the12minuteathlete',
2971         }
2972     }, {
2973         'url': 'ytuser:phihag',
2974         'only_matching': True,
2975     }, {
2976         'url': 'https://www.youtube.com/c/gametrailers',
2977         'only_matching': True,
2978     }, {
2979         'url': 'https://www.youtube.com/gametrailers',
2980         'only_matching': True,
2981     }, {
2982         # This channel is not available, geo restricted to JP
2983         'url': 'https://www.youtube.com/user/kananishinoSMEJ/videos',
2984         'only_matching': True,
2985     }]
2986
2987     @classmethod
2988     def suitable(cls, url):
2989         # Don't return True if the url can be extracted with other youtube
2990         # extractor, the regex would is too permissive and it would match.
2991         other_yt_ies = iter(klass for (name, klass) in globals().items() if name.startswith('Youtube') and name.endswith('IE') and klass is not cls)
2992         if any(ie.suitable(url) for ie in other_yt_ies):
2993             return False
2994         else:
2995             return super(YoutubeUserIE, cls).suitable(url)
2996
2997     def _build_template_url(self, url, channel_id):
2998         mobj = re.match(self._VALID_URL, url)
2999         return self._TEMPLATE_URL % (mobj.group('user') or 'user', mobj.group('id'))
3000
3001
3002 class YoutubeLiveIE(YoutubeBaseInfoExtractor):
3003     IE_DESC = 'YouTube.com live streams'
3004     _VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:(?:user|channel|c)/)?(?P<id>[^/]+))/live'
3005     IE_NAME = 'youtube:live'
3006
3007     _TESTS = [{
3008         'url': 'https://www.youtube.com/user/TheYoungTurks/live',
3009         'info_dict': {
3010             'id': 'a48o2S1cPoo',
3011             'ext': 'mp4',
3012             'title': 'The Young Turks - Live Main Show',
3013             'uploader': 'The Young Turks',
3014             'uploader_id': 'TheYoungTurks',
3015             'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
3016             'upload_date': '20150715',
3017             'license': 'Standard YouTube License',
3018             'description': 'md5:438179573adcdff3c97ebb1ee632b891',
3019             'categories': ['News & Politics'],
3020             'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
3021             'like_count': int,
3022             'dislike_count': int,
3023         },
3024         'params': {
3025             'skip_download': True,
3026         },
3027     }, {
3028         'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
3029         'only_matching': True,
3030     }, {
3031         'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
3032         'only_matching': True,
3033     }, {
3034         'url': 'https://www.youtube.com/TheYoungTurks/live',
3035         'only_matching': True,
3036     }]
3037
3038     def _real_extract(self, url):
3039         mobj = re.match(self._VALID_URL, url)
3040         channel_id = mobj.group('id')
3041         base_url = mobj.group('base_url')
3042         webpage = self._download_webpage(url, channel_id, fatal=False)
3043         if webpage:
3044             page_type = self._og_search_property(
3045                 'type', webpage, 'page type', default='')
3046             video_id = self._html_search_meta(
3047                 'videoId', webpage, 'video id', default=None)
3048             if page_type.startswith('video') and video_id and re.match(
3049                     r'^[0-9A-Za-z_-]{11}$', video_id):
3050                 return self.url_result(video_id, YoutubeIE.ie_key())
3051         return self.url_result(base_url)
3052
3053
3054 class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
3055     IE_DESC = 'YouTube.com user/channel playlists'
3056     _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
3057     IE_NAME = 'youtube:playlists'
3058
3059     _TESTS = [{
3060         'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
3061         'playlist_mincount': 4,
3062         'info_dict': {
3063             'id': 'ThirstForScience',
3064             'title': 'ThirstForScience',
3065         },
3066     }, {
3067         # with "Load more" button
3068         'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
3069         'playlist_mincount': 70,
3070         'info_dict': {
3071             'id': 'igorkle1',
3072             'title': 'Игорь Клейнер',
3073         },
3074     }, {
3075         'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
3076         'playlist_mincount': 17,
3077         'info_dict': {
3078             'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
3079             'title': 'Chem Player',
3080         },
3081         'skip': 'Blocked',
3082     }]
3083
3084
3085 class YoutubeSearchBaseInfoExtractor(YoutubePlaylistBaseInfoExtractor):
3086     _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?'
3087
3088
3089 class YoutubeSearchIE(SearchInfoExtractor, YoutubeSearchBaseInfoExtractor):
3090     IE_DESC = 'YouTube.com searches'
3091     # there doesn't appear to be a real limit, for example if you search for
3092     # 'python' you get more than 8.000.000 results
3093     _MAX_RESULTS = float('inf')
3094     IE_NAME = 'youtube:search'
3095     _SEARCH_KEY = 'ytsearch'
3096     _EXTRA_QUERY_ARGS = {}
3097     _TESTS = []
3098
3099     def _get_n_results(self, query, n):
3100         """Get a specified number of results for a query"""
3101
3102         videos = []
3103         limit = n
3104
3105         url_query = {
3106             'search_query': query.encode('utf-8'),
3107         }
3108         url_query.update(self._EXTRA_QUERY_ARGS)
3109         result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
3110
3111         for pagenum in itertools.count(1):
3112             data = self._download_json(
3113                 result_url, video_id='query "%s"' % query,
3114                 note='Downloading page %s' % pagenum,
3115                 errnote='Unable to download API page',
3116                 query={'spf': 'navigate'})
3117             html_content = data[1]['body']['content']
3118
3119             if 'class="search-message' in html_content:
3120                 raise ExtractorError(
3121                     '[youtube] No video results', expected=True)
3122
3123             new_videos = list(self._process_page(html_content))
3124             videos += new_videos
3125             if not new_videos or len(videos) > limit:
3126                 break
3127             next_link = self._html_search_regex(
3128                 r'href="(/results\?[^"]*\bsp=[^"]+)"[^>]*>\s*<span[^>]+class="[^"]*\byt-uix-button-content\b[^"]*"[^>]*>Next',
3129                 html_content, 'next link', default=None)
3130             if next_link is None:
3131                 break
3132             result_url = compat_urlparse.urljoin('https://www.youtube.com/', next_link)
3133
3134         if len(videos) > n:
3135             videos = videos[:n]
3136         return self.playlist_result(videos, query)
3137
3138
3139 class YoutubeSearchDateIE(YoutubeSearchIE):
3140     IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
3141     _SEARCH_KEY = 'ytsearchdate'
3142     IE_DESC = 'YouTube.com searches, newest videos first'
3143     _EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
3144
3145
3146 class YoutubeSearchURLIE(YoutubeSearchBaseInfoExtractor):
3147     IE_DESC = 'YouTube.com search URLs'
3148     IE_NAME = 'youtube:search_url'
3149     _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
3150     _TESTS = [{
3151         'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
3152         'playlist_mincount': 5,
3153         'info_dict': {
3154             'title': 'youtube-dl test video',
3155         }
3156     }, {
3157         'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
3158         'only_matching': True,
3159     }]
3160
3161     def _real_extract(self, url):
3162         mobj = re.match(self._VALID_URL, url)
3163         query = compat_urllib_parse_unquote_plus(mobj.group('query'))
3164         webpage = self._download_webpage(url, query)
3165         return self.playlist_result(self._process_page(webpage), playlist_title=query)
3166
3167
3168 class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
3169     IE_DESC = 'YouTube.com (multi-season) shows'
3170     _VALID_URL = r'https?://(?:www\.)?youtube\.com/show/(?P<id>[^?#]*)'
3171     IE_NAME = 'youtube:show'
3172     _TESTS = [{
3173         'url': 'https://www.youtube.com/show/airdisasters',
3174         'playlist_mincount': 5,
3175         'info_dict': {
3176             'id': 'airdisasters',
3177             'title': 'Air Disasters',
3178         }
3179     }]
3180
3181     def _real_extract(self, url):
3182         playlist_id = self._match_id(url)
3183         return super(YoutubeShowIE, self)._real_extract(
3184             'https://www.youtube.com/show/%s/playlists' % playlist_id)
3185
3186
3187 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
3188     """
3189     Base class for feed extractors
3190     Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
3191     """
3192     _LOGIN_REQUIRED = True
3193
3194     @property
3195     def IE_NAME(self):
3196         return 'youtube:%s' % self._FEED_NAME
3197
3198     def _real_initialize(self):
3199         self._login()
3200
3201     def _entries(self, page):
3202         # The extraction process is the same as for playlists, but the regex
3203         # for the video ids doesn't contain an index
3204         ids = []
3205         more_widget_html = content_html = page
3206         for page_num in itertools.count(1):
3207             matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
3208
3209             # 'recommended' feed has infinite 'load more' and each new portion spins
3210             # the same videos in (sometimes) slightly different order, so we'll check
3211             # for unicity and break when portion has no new videos
3212             new_ids = list(filter(lambda video_id: video_id not in ids, orderedSet(matches)))
3213             if not new_ids:
3214                 break
3215
3216             ids.extend(new_ids)
3217
3218             for entry in self._ids_to_results(new_ids):
3219                 yield entry
3220
3221             mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
3222             if not mobj:
3223                 break
3224
3225             more = self._download_json(
3226                 'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
3227                 'Downloading page #%s' % page_num,
3228                 transform_source=uppercase_escape)
3229             content_html = more['content_html']
3230             more_widget_html = more['load_more_widget_html']
3231
3232     def _real_extract(self, url):
3233         page = self._download_webpage(
3234             'https://www.youtube.com/feed/%s' % self._FEED_NAME,
3235             self._PLAYLIST_TITLE)
3236         return self.playlist_result(
3237             self._entries(page), playlist_title=self._PLAYLIST_TITLE)
3238
3239
3240 class YoutubeWatchLaterIE(YoutubePlaylistIE):
3241     IE_NAME = 'youtube:watchlater'
3242     IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
3243     _VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
3244
3245     _TESTS = [{
3246         'url': 'https://www.youtube.com/playlist?list=WL',
3247         'only_matching': True,
3248     }, {
3249         'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
3250         'only_matching': True,
3251     }]
3252
3253     def _real_extract(self, url):
3254         _, video = self._check_download_just_video(url, 'WL')
3255         if video:
3256             return video
3257         _, playlist = self._extract_playlist('WL')
3258         return playlist
3259
3260
3261 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
3262     IE_NAME = 'youtube:favorites'
3263     IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
3264     _VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
3265     _LOGIN_REQUIRED = True
3266
3267     def _real_extract(self, url):
3268         webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
3269         playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
3270         return self.url_result(playlist_id, 'YoutubePlaylist')
3271
3272
3273 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
3274     IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
3275     _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/recommended|:ytrec(?:ommended)?'
3276     _FEED_NAME = 'recommended'
3277     _PLAYLIST_TITLE = 'Youtube Recommended videos'
3278
3279
3280 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
3281     IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
3282     _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
3283     _FEED_NAME = 'subscriptions'
3284     _PLAYLIST_TITLE = 'Youtube Subscriptions'
3285
3286
3287 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
3288     IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
3289     _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory'
3290     _FEED_NAME = 'history'
3291     _PLAYLIST_TITLE = 'Youtube History'
3292
3293
3294 class YoutubeTruncatedURLIE(InfoExtractor):
3295     IE_NAME = 'youtube:truncated_url'
3296     IE_DESC = False  # Do not list
3297     _VALID_URL = r'''(?x)
3298         (?:https?://)?
3299         (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
3300         (?:watch\?(?:
3301             feature=[a-z_]+|
3302             annotation_id=annotation_[^&]+|
3303             x-yt-cl=[0-9]+|
3304             hl=[^&]*|
3305             t=[0-9]+
3306         )?
3307         |
3308             attribution_link\?a=[^&]+
3309         )
3310         $
3311     '''
3312
3313     _TESTS = [{
3314         'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
3315         'only_matching': True,
3316     }, {
3317         'url': 'https://www.youtube.com/watch?',
3318         'only_matching': True,
3319     }, {
3320         'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
3321         'only_matching': True,
3322     }, {
3323         'url': 'https://www.youtube.com/watch?feature=foo',
3324         'only_matching': True,
3325     }, {
3326         'url': 'https://www.youtube.com/watch?hl=en-GB',
3327         'only_matching': True,
3328     }, {
3329         'url': 'https://www.youtube.com/watch?t=2372',
3330         'only_matching': True,
3331     }]
3332
3333     def _real_extract(self, url):
3334         raise ExtractorError(
3335             'Did you forget to quote the URL? Remember that & is a meta '
3336             'character in most shells, so you want to put the URL in quotes, '
3337             'like  youtube-dl '
3338             '"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
3339             ' or simply  youtube-dl BaW_jenozKc  .',
3340             expected=True)
3341
3342
3343 class YoutubeTruncatedIDIE(InfoExtractor):
3344     IE_NAME = 'youtube:truncated_id'
3345     IE_DESC = False  # Do not list
3346     _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
3347
3348     _TESTS = [{
3349         'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
3350         'only_matching': True,
3351     }]
3352
3353     def _real_extract(self, url):
3354         video_id = self._match_id(url)
3355         raise ExtractorError(
3356             'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
3357             expected=True)