[youtube] Improve extraction in 429 error conditions (closes #24283)
[youtube-dl] / youtube_dl / extractor / youtube.py
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5
6 import itertools
7 import json
8 import os.path
9 import random
10 import re
11 import time
12 import traceback
13
14 from .common import InfoExtractor, SearchInfoExtractor
15 from ..jsinterp import JSInterpreter
16 from ..swfinterp import SWFInterpreter
17 from ..compat import (
18     compat_chr,
19     compat_HTTPError,
20     compat_kwargs,
21     compat_parse_qs,
22     compat_urllib_parse_unquote,
23     compat_urllib_parse_unquote_plus,
24     compat_urllib_parse_urlencode,
25     compat_urllib_parse_urlparse,
26     compat_urlparse,
27     compat_str,
28 )
29 from ..utils import (
30     bool_or_none,
31     clean_html,
32     dict_get,
33     error_to_compat_str,
34     extract_attributes,
35     ExtractorError,
36     float_or_none,
37     get_element_by_attribute,
38     get_element_by_id,
39     int_or_none,
40     mimetype2ext,
41     orderedSet,
42     parse_codecs,
43     parse_duration,
44     remove_quotes,
45     remove_start,
46     smuggle_url,
47     str_or_none,
48     str_to_int,
49     try_get,
50     unescapeHTML,
51     unified_strdate,
52     unsmuggle_url,
53     uppercase_escape,
54     url_or_none,
55     urlencode_postdata,
56 )
57
58
59 class YoutubeBaseInfoExtractor(InfoExtractor):
60     """Provide base functions for Youtube extractors"""
61     _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
62     _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
63
64     _LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
65     _CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
66     _TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
67
68     _NETRC_MACHINE = 'youtube'
69     # If True it will raise an error if no login info is provided
70     _LOGIN_REQUIRED = False
71
72     _PLAYLIST_ID_RE = r'(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}'
73
74     def _set_language(self):
75         self._set_cookie(
76             '.youtube.com', 'PREF', 'f1=50000000&hl=en',
77             # YouTube sets the expire time to about two months
78             expire_time=time.time() + 2 * 30 * 24 * 3600)
79
80     def _ids_to_results(self, ids):
81         return [
82             self.url_result(vid_id, 'Youtube', video_id=vid_id)
83             for vid_id in ids]
84
85     def _login(self):
86         """
87         Attempt to log in to YouTube.
88         True is returned if successful or skipped.
89         False is returned if login failed.
90
91         If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
92         """
93         username, password = self._get_login_info()
94         # No authentication to be performed
95         if username is None:
96             if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
97                 raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
98             return True
99
100         login_page = self._download_webpage(
101             self._LOGIN_URL, None,
102             note='Downloading login page',
103             errnote='unable to fetch login page', fatal=False)
104         if login_page is False:
105             return
106
107         login_form = self._hidden_inputs(login_page)
108
109         def req(url, f_req, note, errnote):
110             data = login_form.copy()
111             data.update({
112                 'pstMsg': 1,
113                 'checkConnection': 'youtube',
114                 'checkedDomains': 'youtube',
115                 'hl': 'en',
116                 'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
117                 'f.req': json.dumps(f_req),
118                 'flowName': 'GlifWebSignIn',
119                 'flowEntry': 'ServiceLogin',
120                 # TODO: reverse actual botguard identifier generation algo
121                 'bgRequest': '["identifier",""]',
122             })
123             return self._download_json(
124                 url, None, note=note, errnote=errnote,
125                 transform_source=lambda s: re.sub(r'^[^[]*', '', s),
126                 fatal=False,
127                 data=urlencode_postdata(data), headers={
128                     'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
129                     'Google-Accounts-XSRF': 1,
130                 })
131
132         def warn(message):
133             self._downloader.report_warning(message)
134
135         lookup_req = [
136             username,
137             None, [], None, 'US', None, None, 2, False, True,
138             [
139                 None, None,
140                 [2, 1, None, 1,
141                  'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
142                  None, [], 4],
143                 1, [None, None, []], None, None, None, True
144             ],
145             username,
146         ]
147
148         lookup_results = req(
149             self._LOOKUP_URL, lookup_req,
150             'Looking up account info', 'Unable to look up account info')
151
152         if lookup_results is False:
153             return False
154
155         user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
156         if not user_hash:
157             warn('Unable to extract user hash')
158             return False
159
160         challenge_req = [
161             user_hash,
162             None, 1, None, [1, None, None, None, [password, None, True]],
163             [
164                 None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
165                 1, [None, None, []], None, None, None, True
166             ]]
167
168         challenge_results = req(
169             self._CHALLENGE_URL, challenge_req,
170             'Logging in', 'Unable to log in')
171
172         if challenge_results is False:
173             return
174
175         login_res = try_get(challenge_results, lambda x: x[0][5], list)
176         if login_res:
177             login_msg = try_get(login_res, lambda x: x[5], compat_str)
178             warn(
179                 'Unable to login: %s' % 'Invalid password'
180                 if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
181             return False
182
183         res = try_get(challenge_results, lambda x: x[0][-1], list)
184         if not res:
185             warn('Unable to extract result entry')
186             return False
187
188         login_challenge = try_get(res, lambda x: x[0][0], list)
189         if login_challenge:
190             challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
191             if challenge_str == 'TWO_STEP_VERIFICATION':
192                 # SEND_SUCCESS - TFA code has been successfully sent to phone
193                 # QUOTA_EXCEEDED - reached the limit of TFA codes
194                 status = try_get(login_challenge, lambda x: x[5], compat_str)
195                 if status == 'QUOTA_EXCEEDED':
196                     warn('Exceeded the limit of TFA codes, try later')
197                     return False
198
199                 tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
200                 if not tl:
201                     warn('Unable to extract TL')
202                     return False
203
204                 tfa_code = self._get_tfa_info('2-step verification code')
205
206                 if not tfa_code:
207                     warn(
208                         'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
209                         '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
210                     return False
211
212                 tfa_code = remove_start(tfa_code, 'G-')
213
214                 tfa_req = [
215                     user_hash, None, 2, None,
216                     [
217                         9, None, None, None, None, None, None, None,
218                         [None, tfa_code, True, 2]
219                     ]]
220
221                 tfa_results = req(
222                     self._TFA_URL.format(tl), tfa_req,
223                     'Submitting TFA code', 'Unable to submit TFA code')
224
225                 if tfa_results is False:
226                     return False
227
228                 tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
229                 if tfa_res:
230                     tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
231                     warn(
232                         'Unable to finish TFA: %s' % 'Invalid TFA code'
233                         if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
234                     return False
235
236                 check_cookie_url = try_get(
237                     tfa_results, lambda x: x[0][-1][2], compat_str)
238             else:
239                 CHALLENGES = {
240                     'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
241                     'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
242                     'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
243                 }
244                 challenge = CHALLENGES.get(
245                     challenge_str,
246                     '%s returned error %s.' % (self.IE_NAME, challenge_str))
247                 warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
248                 return False
249         else:
250             check_cookie_url = try_get(res, lambda x: x[2], compat_str)
251
252         if not check_cookie_url:
253             warn('Unable to extract CheckCookie URL')
254             return False
255
256         check_cookie_results = self._download_webpage(
257             check_cookie_url, None, 'Checking cookie', fatal=False)
258
259         if check_cookie_results is False:
260             return False
261
262         if 'https://myaccount.google.com/' not in check_cookie_results:
263             warn('Unable to log in')
264             return False
265
266         return True
267
268     def _download_webpage_handle(self, *args, **kwargs):
269         query = kwargs.get('query', {}).copy()
270         query['disable_polymer'] = 'true'
271         kwargs['query'] = query
272         return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle(
273             *args, **compat_kwargs(kwargs))
274
275     def _real_initialize(self):
276         if self._downloader is None:
277             return
278         self._set_language()
279         if not self._login():
280             return
281
282
283 class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
284     # Extract entries from page with "Load more" button
285     def _entries(self, page, playlist_id):
286         more_widget_html = content_html = page
287         for page_num in itertools.count(1):
288             for entry in self._process_page(content_html):
289                 yield entry
290
291             mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
292             if not mobj:
293                 break
294
295             count = 0
296             retries = 3
297             while count <= retries:
298                 try:
299                     # Downloading page may result in intermittent 5xx HTTP error
300                     # that is usually worked around with a retry
301                     more = self._download_json(
302                         'https://youtube.com/%s' % mobj.group('more'), playlist_id,
303                         'Downloading page #%s%s'
304                         % (page_num, ' (retry #%d)' % count if count else ''),
305                         transform_source=uppercase_escape)
306                     break
307                 except ExtractorError as e:
308                     if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503):
309                         count += 1
310                         if count <= retries:
311                             continue
312                     raise
313
314             content_html = more['content_html']
315             if not content_html.strip():
316                 # Some webpages show a "Load more" button but they don't
317                 # have more videos
318                 break
319             more_widget_html = more['load_more_widget_html']
320
321
322 class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
323     def _process_page(self, content):
324         for video_id, video_title in self.extract_videos_from_page(content):
325             yield self.url_result(video_id, 'Youtube', video_id, video_title)
326
327     def extract_videos_from_page_impl(self, video_re, page, ids_in_page, titles_in_page):
328         for mobj in re.finditer(video_re, page):
329             # The link with index 0 is not the first video of the playlist (not sure if still actual)
330             if 'index' in mobj.groupdict() and mobj.group('id') == '0':
331                 continue
332             video_id = mobj.group('id')
333             video_title = unescapeHTML(
334                 mobj.group('title')) if 'title' in mobj.groupdict() else None
335             if video_title:
336                 video_title = video_title.strip()
337             if video_title == '► Play all':
338                 video_title = None
339             try:
340                 idx = ids_in_page.index(video_id)
341                 if video_title and not titles_in_page[idx]:
342                     titles_in_page[idx] = video_title
343             except ValueError:
344                 ids_in_page.append(video_id)
345                 titles_in_page.append(video_title)
346
347     def extract_videos_from_page(self, page):
348         ids_in_page = []
349         titles_in_page = []
350         self.extract_videos_from_page_impl(
351             self._VIDEO_RE, page, ids_in_page, titles_in_page)
352         return zip(ids_in_page, titles_in_page)
353
354
355 class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
356     def _process_page(self, content):
357         for playlist_id in orderedSet(re.findall(
358                 r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
359                 content)):
360             yield self.url_result(
361                 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
362
363     def _real_extract(self, url):
364         playlist_id = self._match_id(url)
365         webpage = self._download_webpage(url, playlist_id)
366         title = self._og_search_title(webpage, fatal=False)
367         return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
368
369
370 class YoutubeIE(YoutubeBaseInfoExtractor):
371     IE_DESC = 'YouTube.com'
372     _VALID_URL = r"""(?x)^
373                      (
374                          (?:https?://|//)                                    # http(s):// or protocol-independent URL
375                          (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com/|
376                             (?:www\.)?deturl\.com/www\.youtube\.com/|
377                             (?:www\.)?pwnyoutube\.com/|
378                             (?:www\.)?hooktube\.com/|
379                             (?:www\.)?yourepeat\.com/|
380                             tube\.majestyc\.net/|
381                             # Invidious instances taken from https://github.com/omarroth/invidious/wiki/Invidious-Instances
382                             (?:(?:www|dev)\.)?invidio\.us/|
383                             (?:(?:www|no)\.)?invidiou\.sh/|
384                             (?:(?:www|fi|de)\.)?invidious\.snopyta\.org/|
385                             (?:www\.)?invidious\.kabi\.tk/|
386                             (?:www\.)?invidious\.13ad\.de/|
387                             (?:www\.)?invidious\.mastodon\.host/|
388                             (?:www\.)?invidious\.nixnet\.xyz/|
389                             (?:www\.)?invidious\.drycat\.fr/|
390                             (?:www\.)?tube\.poal\.co/|
391                             (?:www\.)?vid\.wxzm\.sx/|
392                             (?:www\.)?yt\.elukerio\.org/|
393                             (?:www\.)?yt\.lelux\.fi/|
394                             (?:www\.)?kgg2m7yk5aybusll\.onion/|
395                             (?:www\.)?qklhadlycap4cnod\.onion/|
396                             (?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion/|
397                             (?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion/|
398                             (?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion/|
399                             (?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion/|
400                             (?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p/|
401                             youtube\.googleapis\.com/)                        # the various hostnames, with wildcard subdomains
402                          (?:.*?\#/)?                                          # handle anchor (#/) redirect urls
403                          (?:                                                  # the various things that can precede the ID:
404                              (?:(?:v|embed|e)/(?!videoseries))                # v/ or embed/ or e/
405                              |(?:                                             # or the v= param in all its forms
406                                  (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)?  # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
407                                  (?:\?|\#!?)                                  # the params delimiter ? or # or #!
408                                  (?:.*?[&;])??                                # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
409                                  v=
410                              )
411                          ))
412                          |(?:
413                             youtu\.be|                                        # just youtu.be/xxxx
414                             vid\.plus|                                        # or vid.plus/xxxx
415                             zwearz\.com/watch|                                # or zwearz.com/watch/xxxx
416                          )/
417                          |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
418                          )
419                      )?                                                       # all until now is optional -> you can pass the naked ID
420                      ([0-9A-Za-z_-]{11})                                      # here is it! the YouTube video ID
421                      (?!.*?\blist=
422                         (?:
423                             %(playlist_id)s|                                  # combined list/video URLs are handled by the playlist IE
424                             WL                                                # WL are handled by the watch later IE
425                         )
426                      )
427                      (?(1).+)?                                                # if we found the ID, everything can follow
428                      $""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
429     _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
430     _formats = {
431         '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
432         '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
433         '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
434         '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
435         '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
436         '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
437         '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
438         '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
439         # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
440         '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
441         '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
442         '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
443         '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
444         '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
445         '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
446         '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
447         '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
448         '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
449
450
451         # 3D videos
452         '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
453         '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
454         '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
455         '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
456         '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
457         '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
458         '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
459
460         # Apple HTTP Live Streaming
461         '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
462         '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
463         '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
464         '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
465         '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
466         '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
467         '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
468         '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
469
470         # DASH mp4 video
471         '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
472         '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
473         '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
474         '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
475         '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
476         '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'},  # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
477         '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
478         '212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
479         '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
480         '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
481         '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
482         '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
483
484         # Dash mp4 audio
485         '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
486         '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
487         '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
488         '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
489         '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
490         '325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
491         '328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
492
493         # Dash webm
494         '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
495         '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
496         '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
497         '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
498         '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
499         '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
500         '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
501         '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
502         '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
503         '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
504         '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
505         '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
506         '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
507         '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
508         '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
509         # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
510         '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
511         '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
512         '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
513         '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
514         '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
515         '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
516
517         # Dash webm audio
518         '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
519         '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
520
521         # Dash webm audio with opus inside
522         '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
523         '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
524         '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
525
526         # RTMP (unnamed)
527         '_rtmp': {'protocol': 'rtmp'},
528
529         # av01 video only formats sometimes served with "unknown" codecs
530         '394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
531         '395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
532         '396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
533         '397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
534     }
535     _SUBTITLE_FORMATS = ('srv1', 'srv2', 'srv3', 'ttml', 'vtt')
536
537     _GEO_BYPASS = False
538
539     IE_NAME = 'youtube'
540     _TESTS = [
541         {
542             'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
543             'info_dict': {
544                 'id': 'BaW_jenozKc',
545                 'ext': 'mp4',
546                 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
547                 'uploader': 'Philipp Hagemeister',
548                 'uploader_id': 'phihag',
549                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
550                 'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
551                 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
552                 'upload_date': '20121002',
553                 'description': 'test chars:  "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
554                 'categories': ['Science & Technology'],
555                 'tags': ['youtube-dl'],
556                 'duration': 10,
557                 'view_count': int,
558                 'like_count': int,
559                 'dislike_count': int,
560                 'start_time': 1,
561                 'end_time': 9,
562             }
563         },
564         {
565             'url': 'https://www.youtube.com/watch?v=UxxajLWwzqY',
566             'note': 'Test generic use_cipher_signature video (#897)',
567             'info_dict': {
568                 'id': 'UxxajLWwzqY',
569                 'ext': 'mp4',
570                 'upload_date': '20120506',
571                 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
572                 'alt_title': 'I Love It (feat. Charli XCX)',
573                 'description': 'md5:19a2f98d9032b9311e686ed039564f63',
574                 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
575                          'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
576                          'iconic ep', 'iconic', 'love', 'it'],
577                 'duration': 180,
578                 'uploader': 'Icona Pop',
579                 'uploader_id': 'IconaPop',
580                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IconaPop',
581                 'creator': 'Icona Pop',
582                 'track': 'I Love It (feat. Charli XCX)',
583                 'artist': 'Icona Pop',
584             }
585         },
586         {
587             'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
588             'note': 'Test VEVO video with age protection (#956)',
589             'info_dict': {
590                 'id': '07FYdnEawAQ',
591                 'ext': 'mp4',
592                 'upload_date': '20130703',
593                 'title': 'Justin Timberlake - Tunnel Vision (Official Music Video) (Explicit)',
594                 'alt_title': 'Tunnel Vision',
595                 'description': 'md5:07dab3356cde4199048e4c7cd93471e1',
596                 'duration': 419,
597                 'uploader': 'justintimberlakeVEVO',
598                 'uploader_id': 'justintimberlakeVEVO',
599                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
600                 'creator': 'Justin Timberlake',
601                 'track': 'Tunnel Vision',
602                 'artist': 'Justin Timberlake',
603                 'age_limit': 18,
604             }
605         },
606         {
607             'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
608             'note': 'Embed-only video (#1746)',
609             'info_dict': {
610                 'id': 'yZIXLfi8CZQ',
611                 'ext': 'mp4',
612                 'upload_date': '20120608',
613                 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
614                 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
615                 'uploader': 'SET India',
616                 'uploader_id': 'setindia',
617                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
618                 'age_limit': 18,
619             }
620         },
621         {
622             'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
623             'note': 'Use the first video ID in the URL',
624             'info_dict': {
625                 'id': 'BaW_jenozKc',
626                 'ext': 'mp4',
627                 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
628                 'uploader': 'Philipp Hagemeister',
629                 'uploader_id': 'phihag',
630                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
631                 'upload_date': '20121002',
632                 'description': 'test chars:  "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
633                 'categories': ['Science & Technology'],
634                 'tags': ['youtube-dl'],
635                 'duration': 10,
636                 'view_count': int,
637                 'like_count': int,
638                 'dislike_count': int,
639             },
640             'params': {
641                 'skip_download': True,
642             },
643         },
644         {
645             'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
646             'note': '256k DASH audio (format 141) via DASH manifest',
647             'info_dict': {
648                 'id': 'a9LDPn-MO4I',
649                 'ext': 'm4a',
650                 'upload_date': '20121002',
651                 'uploader_id': '8KVIDEO',
652                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
653                 'description': '',
654                 'uploader': '8KVIDEO',
655                 'title': 'UHDTV TEST 8K VIDEO.mp4'
656             },
657             'params': {
658                 'youtube_include_dash_manifest': True,
659                 'format': '141',
660             },
661             'skip': 'format 141 not served anymore',
662         },
663         # DASH manifest with encrypted signature
664         {
665             'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
666             'info_dict': {
667                 'id': 'IB3lcPjvWLA',
668                 'ext': 'm4a',
669                 'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
670                 'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
671                 'duration': 244,
672                 'uploader': 'AfrojackVEVO',
673                 'uploader_id': 'AfrojackVEVO',
674                 'upload_date': '20131011',
675             },
676             'params': {
677                 'youtube_include_dash_manifest': True,
678                 'format': '141/bestaudio[ext=m4a]',
679             },
680         },
681         # JS player signature function name containing $
682         {
683             'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
684             'info_dict': {
685                 'id': 'nfWlot6h_JM',
686                 'ext': 'm4a',
687                 'title': 'Taylor Swift - Shake It Off',
688                 'description': 'md5:307195cd21ff7fa352270fe884570ef0',
689                 'duration': 242,
690                 'uploader': 'TaylorSwiftVEVO',
691                 'uploader_id': 'TaylorSwiftVEVO',
692                 'upload_date': '20140818',
693             },
694             'params': {
695                 'youtube_include_dash_manifest': True,
696                 'format': '141/bestaudio[ext=m4a]',
697             },
698         },
699         # Controversy video
700         {
701             'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
702             'info_dict': {
703                 'id': 'T4XJQO3qol8',
704                 'ext': 'mp4',
705                 'duration': 219,
706                 'upload_date': '20100909',
707                 'uploader': 'Amazing Atheist',
708                 'uploader_id': 'TheAmazingAtheist',
709                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
710                 'title': 'Burning Everyone\'s Koran',
711                 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
712             }
713         },
714         # Normal age-gate video (No vevo, embed allowed)
715         {
716             'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
717             'info_dict': {
718                 'id': 'HtVdAasjOgU',
719                 'ext': 'mp4',
720                 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
721                 'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
722                 'duration': 142,
723                 'uploader': 'The Witcher',
724                 'uploader_id': 'WitcherGame',
725                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
726                 'upload_date': '20140605',
727                 'age_limit': 18,
728             },
729         },
730         # Age-gate video with encrypted signature
731         {
732             'url': 'https://www.youtube.com/watch?v=6kLq3WMV1nU',
733             'info_dict': {
734                 'id': '6kLq3WMV1nU',
735                 'ext': 'mp4',
736                 'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
737                 'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
738                 'duration': 246,
739                 'uploader': 'LloydVEVO',
740                 'uploader_id': 'LloydVEVO',
741                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
742                 'upload_date': '20110629',
743                 'age_limit': 18,
744             },
745         },
746         # video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
747         # YouTube Red ad is not captured for creator
748         {
749             'url': '__2ABJjxzNo',
750             'info_dict': {
751                 'id': '__2ABJjxzNo',
752                 'ext': 'mp4',
753                 'duration': 266,
754                 'upload_date': '20100430',
755                 'uploader_id': 'deadmau5',
756                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
757                 'creator': 'Dada Life, deadmau5',
758                 'description': 'md5:12c56784b8032162bb936a5f76d55360',
759                 'uploader': 'deadmau5',
760                 'title': 'Deadmau5 - Some Chords (HD)',
761                 'alt_title': 'This Machine Kills Some Chords',
762             },
763             'expected_warnings': [
764                 'DASH manifest missing',
765             ]
766         },
767         # Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
768         {
769             'url': 'lqQg6PlCWgI',
770             'info_dict': {
771                 'id': 'lqQg6PlCWgI',
772                 'ext': 'mp4',
773                 'duration': 6085,
774                 'upload_date': '20150827',
775                 'uploader_id': 'olympic',
776                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
777                 'description': 'HO09  - Women -  GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
778                 'uploader': 'Olympic',
779                 'title': 'Hockey - Women -  GER-AUS - London 2012 Olympic Games',
780             },
781             'params': {
782                 'skip_download': 'requires avconv',
783             }
784         },
785         # Non-square pixels
786         {
787             'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
788             'info_dict': {
789                 'id': '_b-2C3KPAM0',
790                 'ext': 'mp4',
791                 'stretched_ratio': 16 / 9.,
792                 'duration': 85,
793                 'upload_date': '20110310',
794                 'uploader_id': 'AllenMeow',
795                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
796                 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
797                 'uploader': '孫ᄋᄅ',
798                 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
799             },
800         },
801         # url_encoded_fmt_stream_map is empty string
802         {
803             'url': 'qEJwOuvDf7I',
804             'info_dict': {
805                 'id': 'qEJwOuvDf7I',
806                 'ext': 'webm',
807                 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
808                 'description': '',
809                 'upload_date': '20150404',
810                 'uploader_id': 'spbelect',
811                 'uploader': 'Наблюдатели Петербурга',
812             },
813             'params': {
814                 'skip_download': 'requires avconv',
815             },
816             'skip': 'This live event has ended.',
817         },
818         # Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
819         {
820             'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
821             'info_dict': {
822                 'id': 'FIl7x6_3R5Y',
823                 'ext': 'webm',
824                 'title': 'md5:7b81415841e02ecd4313668cde88737a',
825                 'description': 'md5:116377fd2963b81ec4ce64b542173306',
826                 'duration': 220,
827                 'upload_date': '20150625',
828                 'uploader_id': 'dorappi2000',
829                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
830                 'uploader': 'dorappi2000',
831                 'formats': 'mincount:31',
832             },
833             'skip': 'not actual anymore',
834         },
835         # DASH manifest with segment_list
836         {
837             'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
838             'md5': '8ce563a1d667b599d21064e982ab9e31',
839             'info_dict': {
840                 'id': 'CsmdDsKjzN8',
841                 'ext': 'mp4',
842                 'upload_date': '20150501',  # According to '<meta itemprop="datePublished"', but in other places it's 20150510
843                 'uploader': 'Airtek',
844                 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
845                 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
846                 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
847             },
848             'params': {
849                 'youtube_include_dash_manifest': True,
850                 'format': '135',  # bestvideo
851             },
852             'skip': 'This live event has ended.',
853         },
854         {
855             # Multifeed videos (multiple cameras), URL is for Main Camera
856             'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
857             'info_dict': {
858                 'id': 'jqWvoWXjCVs',
859                 'title': 'teamPGP: Rocket League Noob Stream',
860                 'description': 'md5:dc7872fb300e143831327f1bae3af010',
861             },
862             'playlist': [{
863                 'info_dict': {
864                     'id': 'jqWvoWXjCVs',
865                     'ext': 'mp4',
866                     'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
867                     'description': 'md5:dc7872fb300e143831327f1bae3af010',
868                     'duration': 7335,
869                     'upload_date': '20150721',
870                     'uploader': 'Beer Games Beer',
871                     'uploader_id': 'beergamesbeer',
872                     'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
873                     'license': 'Standard YouTube License',
874                 },
875             }, {
876                 'info_dict': {
877                     'id': '6h8e8xoXJzg',
878                     'ext': 'mp4',
879                     'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
880                     'description': 'md5:dc7872fb300e143831327f1bae3af010',
881                     'duration': 7337,
882                     'upload_date': '20150721',
883                     'uploader': 'Beer Games Beer',
884                     'uploader_id': 'beergamesbeer',
885                     'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
886                     'license': 'Standard YouTube License',
887                 },
888             }, {
889                 'info_dict': {
890                     'id': 'PUOgX5z9xZw',
891                     'ext': 'mp4',
892                     'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
893                     'description': 'md5:dc7872fb300e143831327f1bae3af010',
894                     'duration': 7337,
895                     'upload_date': '20150721',
896                     'uploader': 'Beer Games Beer',
897                     'uploader_id': 'beergamesbeer',
898                     'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
899                     'license': 'Standard YouTube License',
900                 },
901             }, {
902                 'info_dict': {
903                     'id': 'teuwxikvS5k',
904                     'ext': 'mp4',
905                     'title': 'teamPGP: Rocket League Noob Stream (zim)',
906                     'description': 'md5:dc7872fb300e143831327f1bae3af010',
907                     'duration': 7334,
908                     'upload_date': '20150721',
909                     'uploader': 'Beer Games Beer',
910                     'uploader_id': 'beergamesbeer',
911                     'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
912                     'license': 'Standard YouTube License',
913                 },
914             }],
915             'params': {
916                 'skip_download': True,
917             },
918             'skip': 'This video is not available.',
919         },
920         {
921             # Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
922             'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
923             'info_dict': {
924                 'id': 'gVfLd0zydlo',
925                 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
926             },
927             'playlist_count': 2,
928             'skip': 'Not multifeed anymore',
929         },
930         {
931             'url': 'https://vid.plus/FlRa-iH7PGw',
932             'only_matching': True,
933         },
934         {
935             'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
936             'only_matching': True,
937         },
938         {
939             # Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
940             # Also tests cut-off URL expansion in video description (see
941             # https://github.com/ytdl-org/youtube-dl/issues/1892,
942             # https://github.com/ytdl-org/youtube-dl/issues/8164)
943             'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
944             'info_dict': {
945                 'id': 'lsguqyKfVQg',
946                 'ext': 'mp4',
947                 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
948                 'alt_title': 'Dark Walk - Position Music',
949                 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
950                 'duration': 133,
951                 'upload_date': '20151119',
952                 'uploader_id': 'IronSoulElf',
953                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
954                 'uploader': 'IronSoulElf',
955                 'creator': 'Todd Haberman,  Daniel Law Heath and Aaron Kaplan',
956                 'track': 'Dark Walk - Position Music',
957                 'artist': 'Todd Haberman,  Daniel Law Heath and Aaron Kaplan',
958                 'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
959             },
960             'params': {
961                 'skip_download': True,
962             },
963         },
964         {
965             # Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
966             'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
967             'only_matching': True,
968         },
969         {
970             # Video with yt:stretch=17:0
971             'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
972             'info_dict': {
973                 'id': 'Q39EVAstoRM',
974                 'ext': 'mp4',
975                 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
976                 'description': 'md5:ee18a25c350637c8faff806845bddee9',
977                 'upload_date': '20151107',
978                 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
979                 'uploader': 'CH GAMER DROID',
980             },
981             'params': {
982                 'skip_download': True,
983             },
984             'skip': 'This video does not exist.',
985         },
986         {
987             # Video licensed under Creative Commons
988             'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
989             'info_dict': {
990                 'id': 'M4gD1WSo5mA',
991                 'ext': 'mp4',
992                 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
993                 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
994                 'duration': 721,
995                 'upload_date': '20150127',
996                 'uploader_id': 'BerkmanCenter',
997                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
998                 'uploader': 'The Berkman Klein Center for Internet & Society',
999                 'license': 'Creative Commons Attribution license (reuse allowed)',
1000             },
1001             'params': {
1002                 'skip_download': True,
1003             },
1004         },
1005         {
1006             # Channel-like uploader_url
1007             'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
1008             'info_dict': {
1009                 'id': 'eQcmzGIKrzg',
1010                 'ext': 'mp4',
1011                 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
1012                 'description': 'md5:dda0d780d5a6e120758d1711d062a867',
1013                 'duration': 4060,
1014                 'upload_date': '20151119',
1015                 'uploader': 'Bernie Sanders',
1016                 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
1017                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
1018                 'license': 'Creative Commons Attribution license (reuse allowed)',
1019             },
1020             'params': {
1021                 'skip_download': True,
1022             },
1023         },
1024         {
1025             'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
1026             'only_matching': True,
1027         },
1028         {
1029             # YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
1030             'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
1031             'only_matching': True,
1032         },
1033         {
1034             # Rental video preview
1035             'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
1036             'info_dict': {
1037                 'id': 'uGpuVWrhIzE',
1038                 'ext': 'mp4',
1039                 'title': 'Piku - Trailer',
1040                 'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
1041                 'upload_date': '20150811',
1042                 'uploader': 'FlixMatrix',
1043                 'uploader_id': 'FlixMatrixKaravan',
1044                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
1045                 'license': 'Standard YouTube License',
1046             },
1047             'params': {
1048                 'skip_download': True,
1049             },
1050             'skip': 'This video is not available.',
1051         },
1052         {
1053             # YouTube Red video with episode data
1054             'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
1055             'info_dict': {
1056                 'id': 'iqKdEhx-dD4',
1057                 'ext': 'mp4',
1058                 'title': 'Isolation - Mind Field (Ep 1)',
1059                 'description': 'md5:46a29be4ceffa65b92d277b93f463c0f',
1060                 'duration': 2085,
1061                 'upload_date': '20170118',
1062                 'uploader': 'Vsauce',
1063                 'uploader_id': 'Vsauce',
1064                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
1065                 'series': 'Mind Field',
1066                 'season_number': 1,
1067                 'episode_number': 1,
1068             },
1069             'params': {
1070                 'skip_download': True,
1071             },
1072             'expected_warnings': [
1073                 'Skipping DASH manifest',
1074             ],
1075         },
1076         {
1077             # The following content has been identified by the YouTube community
1078             # as inappropriate or offensive to some audiences.
1079             'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
1080             'info_dict': {
1081                 'id': '6SJNVb0GnPI',
1082                 'ext': 'mp4',
1083                 'title': 'Race Differences in Intelligence',
1084                 'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
1085                 'duration': 965,
1086                 'upload_date': '20140124',
1087                 'uploader': 'New Century Foundation',
1088                 'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
1089                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
1090             },
1091             'params': {
1092                 'skip_download': True,
1093             },
1094         },
1095         {
1096             # itag 212
1097             'url': '1t24XAntNCY',
1098             'only_matching': True,
1099         },
1100         {
1101             # geo restricted to JP
1102             'url': 'sJL6WA-aGkQ',
1103             'only_matching': True,
1104         },
1105         {
1106             'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
1107             'only_matching': True,
1108         },
1109         {
1110             'url': 'https://invidio.us/watch?v=BaW_jenozKc',
1111             'only_matching': True,
1112         },
1113         {
1114             # DRM protected
1115             'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
1116             'only_matching': True,
1117         },
1118         {
1119             # Video with unsupported adaptive stream type formats
1120             'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
1121             'info_dict': {
1122                 'id': 'Z4Vy8R84T1U',
1123                 'ext': 'mp4',
1124                 'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
1125                 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
1126                 'duration': 433,
1127                 'upload_date': '20130923',
1128                 'uploader': 'Amelia Putri Harwita',
1129                 'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
1130                 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
1131                 'formats': 'maxcount:10',
1132             },
1133             'params': {
1134                 'skip_download': True,
1135                 'youtube_include_dash_manifest': False,
1136             },
1137             'skip': 'not actual anymore',
1138         },
1139         {
1140             # Youtube Music Auto-generated description
1141             'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
1142             'info_dict': {
1143                 'id': 'MgNrAu2pzNs',
1144                 'ext': 'mp4',
1145                 'title': 'Voyeur Girl',
1146                 'description': 'md5:7ae382a65843d6df2685993e90a8628f',
1147                 'upload_date': '20190312',
1148                 'uploader': 'Stephen - Topic',
1149                 'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
1150                 'artist': 'Stephen',
1151                 'track': 'Voyeur Girl',
1152                 'album': 'it\'s too much love to know my dear',
1153                 'release_date': '20190313',
1154                 'release_year': 2019,
1155             },
1156             'params': {
1157                 'skip_download': True,
1158             },
1159         },
1160         {
1161             # Youtube Music Auto-generated description
1162             # Retrieve 'artist' field from 'Artist:' in video description
1163             # when it is present on youtube music video
1164             'url': 'https://www.youtube.com/watch?v=k0jLE7tTwjY',
1165             'info_dict': {
1166                 'id': 'k0jLE7tTwjY',
1167                 'ext': 'mp4',
1168                 'title': 'Latch Feat. Sam Smith',
1169                 'description': 'md5:3cb1e8101a7c85fcba9b4fb41b951335',
1170                 'upload_date': '20150110',
1171                 'uploader': 'Various Artists - Topic',
1172                 'uploader_id': 'UCNkEcmYdjrH4RqtNgh7BZ9w',
1173                 'artist': 'Disclosure',
1174                 'track': 'Latch Feat. Sam Smith',
1175                 'album': 'Latch Featuring Sam Smith',
1176                 'release_date': '20121008',
1177                 'release_year': 2012,
1178             },
1179             'params': {
1180                 'skip_download': True,
1181             },
1182         },
1183         {
1184             # Youtube Music Auto-generated description
1185             # handle multiple artists on youtube music video
1186             'url': 'https://www.youtube.com/watch?v=74qn0eJSjpA',
1187             'info_dict': {
1188                 'id': '74qn0eJSjpA',
1189                 'ext': 'mp4',
1190                 'title': 'Eastside',
1191                 'description': 'md5:290516bb73dcbfab0dcc4efe6c3de5f2',
1192                 'upload_date': '20180710',
1193                 'uploader': 'Benny Blanco - Topic',
1194                 'uploader_id': 'UCzqz_ksRu_WkIzmivMdIS7A',
1195                 'artist': 'benny blanco, Halsey, Khalid',
1196                 'track': 'Eastside',
1197                 'album': 'Eastside',
1198                 'release_date': '20180713',
1199                 'release_year': 2018,
1200             },
1201             'params': {
1202                 'skip_download': True,
1203             },
1204         },
1205         {
1206             # Youtube Music Auto-generated description
1207             # handle youtube music video with release_year and no release_date
1208             'url': 'https://www.youtube.com/watch?v=-hcAI0g-f5M',
1209             'info_dict': {
1210                 'id': '-hcAI0g-f5M',
1211                 'ext': 'mp4',
1212                 'title': 'Put It On Me',
1213                 'description': 'md5:f6422397c07c4c907c6638e1fee380a5',
1214                 'upload_date': '20180426',
1215                 'uploader': 'Matt Maeson - Topic',
1216                 'uploader_id': 'UCnEkIGqtGcQMLk73Kp-Q5LQ',
1217                 'artist': 'Matt Maeson',
1218                 'track': 'Put It On Me',
1219                 'album': 'The Hearse',
1220                 'release_date': None,
1221                 'release_year': 2018,
1222             },
1223             'params': {
1224                 'skip_download': True,
1225             },
1226         },
1227         {
1228             'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
1229             'only_matching': True,
1230         },
1231     ]
1232
1233     def __init__(self, *args, **kwargs):
1234         super(YoutubeIE, self).__init__(*args, **kwargs)
1235         self._player_cache = {}
1236
1237     def report_video_info_webpage_download(self, video_id):
1238         """Report attempt to download video info webpage."""
1239         self.to_screen('%s: Downloading video info webpage' % video_id)
1240
1241     def report_information_extraction(self, video_id):
1242         """Report attempt to extract video information."""
1243         self.to_screen('%s: Extracting video information' % video_id)
1244
1245     def report_unavailable_format(self, video_id, format):
1246         """Report extracted video URL."""
1247         self.to_screen('%s: Format %s not available' % (video_id, format))
1248
1249     def report_rtmp_download(self):
1250         """Indicate the download will use the RTMP protocol."""
1251         self.to_screen('RTMP download detected')
1252
1253     def _signature_cache_id(self, example_sig):
1254         """ Return a string representation of a signature """
1255         return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
1256
1257     def _extract_signature_function(self, video_id, player_url, example_sig):
1258         id_m = re.match(
1259             r'.*?[-.](?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|(?:/[a-z]{2,3}_[A-Z]{2})?/base)?\.(?P<ext>[a-z]+)$',
1260             player_url)
1261         if not id_m:
1262             raise ExtractorError('Cannot identify player %r' % player_url)
1263         player_type = id_m.group('ext')
1264         player_id = id_m.group('id')
1265
1266         # Read from filesystem cache
1267         func_id = '%s_%s_%s' % (
1268             player_type, player_id, self._signature_cache_id(example_sig))
1269         assert os.path.basename(func_id) == func_id
1270
1271         cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
1272         if cache_spec is not None:
1273             return lambda s: ''.join(s[i] for i in cache_spec)
1274
1275         download_note = (
1276             'Downloading player %s' % player_url
1277             if self._downloader.params.get('verbose') else
1278             'Downloading %s player %s' % (player_type, player_id)
1279         )
1280         if player_type == 'js':
1281             code = self._download_webpage(
1282                 player_url, video_id,
1283                 note=download_note,
1284                 errnote='Download of %s failed' % player_url)
1285             res = self._parse_sig_js(code)
1286         elif player_type == 'swf':
1287             urlh = self._request_webpage(
1288                 player_url, video_id,
1289                 note=download_note,
1290                 errnote='Download of %s failed' % player_url)
1291             code = urlh.read()
1292             res = self._parse_sig_swf(code)
1293         else:
1294             assert False, 'Invalid player type %r' % player_type
1295
1296         test_string = ''.join(map(compat_chr, range(len(example_sig))))
1297         cache_res = res(test_string)
1298         cache_spec = [ord(c) for c in cache_res]
1299
1300         self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
1301         return res
1302
1303     def _print_sig_code(self, func, example_sig):
1304         def gen_sig_code(idxs):
1305             def _genslice(start, end, step):
1306                 starts = '' if start == 0 else str(start)
1307                 ends = (':%d' % (end + step)) if end + step >= 0 else ':'
1308                 steps = '' if step == 1 else (':%d' % step)
1309                 return 's[%s%s%s]' % (starts, ends, steps)
1310
1311             step = None
1312             # Quelch pyflakes warnings - start will be set when step is set
1313             start = '(Never used)'
1314             for i, prev in zip(idxs[1:], idxs[:-1]):
1315                 if step is not None:
1316                     if i - prev == step:
1317                         continue
1318                     yield _genslice(start, prev, step)
1319                     step = None
1320                     continue
1321                 if i - prev in [-1, 1]:
1322                     step = i - prev
1323                     start = prev
1324                     continue
1325                 else:
1326                     yield 's[%d]' % prev
1327             if step is None:
1328                 yield 's[%d]' % i
1329             else:
1330                 yield _genslice(start, i, step)
1331
1332         test_string = ''.join(map(compat_chr, range(len(example_sig))))
1333         cache_res = func(test_string)
1334         cache_spec = [ord(c) for c in cache_res]
1335         expr_code = ' + '.join(gen_sig_code(cache_spec))
1336         signature_id_tuple = '(%s)' % (
1337             ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
1338         code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
1339                 '    return %s\n') % (signature_id_tuple, expr_code)
1340         self.to_screen('Extracted signature function:\n' + code)
1341
1342     def _parse_sig_js(self, jscode):
1343         funcname = self._search_regex(
1344             (r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1345              r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1346              r'\b(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
1347              r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
1348              # Obsolete patterns
1349              r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1350              r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
1351              r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1352              r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1353              r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1354              r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1355              r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1356              r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
1357             jscode, 'Initial JS player signature function name', group='sig')
1358
1359         jsi = JSInterpreter(jscode)
1360         initial_function = jsi.extract_function(funcname)
1361         return lambda s: initial_function([s])
1362
1363     def _parse_sig_swf(self, file_contents):
1364         swfi = SWFInterpreter(file_contents)
1365         TARGET_CLASSNAME = 'SignatureDecipher'
1366         searched_class = swfi.extract_class(TARGET_CLASSNAME)
1367         initial_function = swfi.extract_function(searched_class, 'decipher')
1368         return lambda s: initial_function([s])
1369
1370     def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
1371         """Turn the encrypted s field into a working signature"""
1372
1373         if player_url is None:
1374             raise ExtractorError('Cannot decrypt signature without player_url')
1375
1376         if player_url.startswith('//'):
1377             player_url = 'https:' + player_url
1378         elif not re.match(r'https?://', player_url):
1379             player_url = compat_urlparse.urljoin(
1380                 'https://www.youtube.com', player_url)
1381         try:
1382             player_id = (player_url, self._signature_cache_id(s))
1383             if player_id not in self._player_cache:
1384                 func = self._extract_signature_function(
1385                     video_id, player_url, s
1386                 )
1387                 self._player_cache[player_id] = func
1388             func = self._player_cache[player_id]
1389             if self._downloader.params.get('youtube_print_sig_code'):
1390                 self._print_sig_code(func, s)
1391             return func(s)
1392         except Exception as e:
1393             tb = traceback.format_exc()
1394             raise ExtractorError(
1395                 'Signature extraction failed: ' + tb, cause=e)
1396
1397     def _get_subtitles(self, video_id, webpage):
1398         try:
1399             subs_doc = self._download_xml(
1400                 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
1401                 video_id, note=False)
1402         except ExtractorError as err:
1403             self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
1404             return {}
1405
1406         sub_lang_list = {}
1407         for track in subs_doc.findall('track'):
1408             lang = track.attrib['lang_code']
1409             if lang in sub_lang_list:
1410                 continue
1411             sub_formats = []
1412             for ext in self._SUBTITLE_FORMATS:
1413                 params = compat_urllib_parse_urlencode({
1414                     'lang': lang,
1415                     'v': video_id,
1416                     'fmt': ext,
1417                     'name': track.attrib['name'].encode('utf-8'),
1418                 })
1419                 sub_formats.append({
1420                     'url': 'https://www.youtube.com/api/timedtext?' + params,
1421                     'ext': ext,
1422                 })
1423             sub_lang_list[lang] = sub_formats
1424         if not sub_lang_list:
1425             self._downloader.report_warning('video doesn\'t have subtitles')
1426             return {}
1427         return sub_lang_list
1428
1429     def _get_ytplayer_config(self, video_id, webpage):
1430         patterns = (
1431             # User data may contain arbitrary character sequences that may affect
1432             # JSON extraction with regex, e.g. when '};' is contained the second
1433             # regex won't capture the whole JSON. Yet working around by trying more
1434             # concrete regex first keeping in mind proper quoted string handling
1435             # to be implemented in future that will replace this workaround (see
1436             # https://github.com/ytdl-org/youtube-dl/issues/7468,
1437             # https://github.com/ytdl-org/youtube-dl/pull/7599)
1438             r';ytplayer\.config\s*=\s*({.+?});ytplayer',
1439             r';ytplayer\.config\s*=\s*({.+?});',
1440         )
1441         config = self._search_regex(
1442             patterns, webpage, 'ytplayer.config', default=None)
1443         if config:
1444             return self._parse_json(
1445                 uppercase_escape(config), video_id, fatal=False)
1446
1447     def _get_automatic_captions(self, video_id, webpage):
1448         """We need the webpage for getting the captions url, pass it as an
1449            argument to speed up the process."""
1450         self.to_screen('%s: Looking for automatic captions' % video_id)
1451         player_config = self._get_ytplayer_config(video_id, webpage)
1452         err_msg = 'Couldn\'t find automatic captions for %s' % video_id
1453         if not player_config:
1454             self._downloader.report_warning(err_msg)
1455             return {}
1456         try:
1457             args = player_config['args']
1458             caption_url = args.get('ttsurl')
1459             if caption_url:
1460                 timestamp = args['timestamp']
1461                 # We get the available subtitles
1462                 list_params = compat_urllib_parse_urlencode({
1463                     'type': 'list',
1464                     'tlangs': 1,
1465                     'asrs': 1,
1466                 })
1467                 list_url = caption_url + '&' + list_params
1468                 caption_list = self._download_xml(list_url, video_id)
1469                 original_lang_node = caption_list.find('track')
1470                 if original_lang_node is None:
1471                     self._downloader.report_warning('Video doesn\'t have automatic captions')
1472                     return {}
1473                 original_lang = original_lang_node.attrib['lang_code']
1474                 caption_kind = original_lang_node.attrib.get('kind', '')
1475
1476                 sub_lang_list = {}
1477                 for lang_node in caption_list.findall('target'):
1478                     sub_lang = lang_node.attrib['lang_code']
1479                     sub_formats = []
1480                     for ext in self._SUBTITLE_FORMATS:
1481                         params = compat_urllib_parse_urlencode({
1482                             'lang': original_lang,
1483                             'tlang': sub_lang,
1484                             'fmt': ext,
1485                             'ts': timestamp,
1486                             'kind': caption_kind,
1487                         })
1488                         sub_formats.append({
1489                             'url': caption_url + '&' + params,
1490                             'ext': ext,
1491                         })
1492                     sub_lang_list[sub_lang] = sub_formats
1493                 return sub_lang_list
1494
1495             def make_captions(sub_url, sub_langs):
1496                 parsed_sub_url = compat_urllib_parse_urlparse(sub_url)
1497                 caption_qs = compat_parse_qs(parsed_sub_url.query)
1498                 captions = {}
1499                 for sub_lang in sub_langs:
1500                     sub_formats = []
1501                     for ext in self._SUBTITLE_FORMATS:
1502                         caption_qs.update({
1503                             'tlang': [sub_lang],
1504                             'fmt': [ext],
1505                         })
1506                         sub_url = compat_urlparse.urlunparse(parsed_sub_url._replace(
1507                             query=compat_urllib_parse_urlencode(caption_qs, True)))
1508                         sub_formats.append({
1509                             'url': sub_url,
1510                             'ext': ext,
1511                         })
1512                     captions[sub_lang] = sub_formats
1513                 return captions
1514
1515             # New captions format as of 22.06.2017
1516             player_response = args.get('player_response')
1517             if player_response and isinstance(player_response, compat_str):
1518                 player_response = self._parse_json(
1519                     player_response, video_id, fatal=False)
1520                 if player_response:
1521                     renderer = player_response['captions']['playerCaptionsTracklistRenderer']
1522                     base_url = renderer['captionTracks'][0]['baseUrl']
1523                     sub_lang_list = []
1524                     for lang in renderer['translationLanguages']:
1525                         lang_code = lang.get('languageCode')
1526                         if lang_code:
1527                             sub_lang_list.append(lang_code)
1528                     return make_captions(base_url, sub_lang_list)
1529
1530             # Some videos don't provide ttsurl but rather caption_tracks and
1531             # caption_translation_languages (e.g. 20LmZk1hakA)
1532             # Does not used anymore as of 22.06.2017
1533             caption_tracks = args['caption_tracks']
1534             caption_translation_languages = args['caption_translation_languages']
1535             caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
1536             sub_lang_list = []
1537             for lang in caption_translation_languages.split(','):
1538                 lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
1539                 sub_lang = lang_qs.get('lc', [None])[0]
1540                 if sub_lang:
1541                     sub_lang_list.append(sub_lang)
1542             return make_captions(caption_url, sub_lang_list)
1543         # An extractor error can be raise by the download process if there are
1544         # no automatic captions but there are subtitles
1545         except (KeyError, IndexError, ExtractorError):
1546             self._downloader.report_warning(err_msg)
1547             return {}
1548
1549     def _mark_watched(self, video_id, video_info, player_response):
1550         playback_url = url_or_none(try_get(
1551             player_response,
1552             lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']) or try_get(
1553             video_info, lambda x: x['videostats_playback_base_url'][0]))
1554         if not playback_url:
1555             return
1556         parsed_playback_url = compat_urlparse.urlparse(playback_url)
1557         qs = compat_urlparse.parse_qs(parsed_playback_url.query)
1558
1559         # cpn generation algorithm is reverse engineered from base.js.
1560         # In fact it works even with dummy cpn.
1561         CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
1562         cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
1563
1564         qs.update({
1565             'ver': ['2'],
1566             'cpn': [cpn],
1567         })
1568         playback_url = compat_urlparse.urlunparse(
1569             parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
1570
1571         self._download_webpage(
1572             playback_url, video_id, 'Marking watched',
1573             'Unable to mark watched', fatal=False)
1574
1575     @staticmethod
1576     def _extract_urls(webpage):
1577         # Embedded YouTube player
1578         entries = [
1579             unescapeHTML(mobj.group('url'))
1580             for mobj in re.finditer(r'''(?x)
1581             (?:
1582                 <iframe[^>]+?src=|
1583                 data-video-url=|
1584                 <embed[^>]+?src=|
1585                 embedSWF\(?:\s*|
1586                 <object[^>]+data=|
1587                 new\s+SWFObject\(
1588             )
1589             (["\'])
1590                 (?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
1591                 (?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
1592             \1''', webpage)]
1593
1594         # lazyYT YouTube embed
1595         entries.extend(list(map(
1596             unescapeHTML,
1597             re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
1598
1599         # Wordpress "YouTube Video Importer" plugin
1600         matches = re.findall(r'''(?x)<div[^>]+
1601             class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
1602             data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
1603         entries.extend(m[-1] for m in matches)
1604
1605         return entries
1606
1607     @staticmethod
1608     def _extract_url(webpage):
1609         urls = YoutubeIE._extract_urls(webpage)
1610         return urls[0] if urls else None
1611
1612     @classmethod
1613     def extract_id(cls, url):
1614         mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
1615         if mobj is None:
1616             raise ExtractorError('Invalid URL: %s' % url)
1617         video_id = mobj.group(2)
1618         return video_id
1619
1620     @staticmethod
1621     def _extract_chapters(description, duration):
1622         if not description:
1623             return None
1624         chapter_lines = re.findall(
1625             r'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\.www\.watch\.player\.seekTo[^>]+>(\d{1,2}:\d{1,2}(?::\d{1,2})?)</a>[^>]*)(?=$|<br\s*/>)',
1626             description)
1627         if not chapter_lines:
1628             return None
1629         chapters = []
1630         for next_num, (chapter_line, time_point) in enumerate(
1631                 chapter_lines, start=1):
1632             start_time = parse_duration(time_point)
1633             if start_time is None:
1634                 continue
1635             if start_time > duration:
1636                 break
1637             end_time = (duration if next_num == len(chapter_lines)
1638                         else parse_duration(chapter_lines[next_num][1]))
1639             if end_time is None:
1640                 continue
1641             if end_time > duration:
1642                 end_time = duration
1643             if start_time > end_time:
1644                 break
1645             chapter_title = re.sub(
1646                 r'<a[^>]+>[^<]+</a>', '', chapter_line).strip(' \t-')
1647             chapter_title = re.sub(r'\s+', ' ', chapter_title)
1648             chapters.append({
1649                 'start_time': start_time,
1650                 'end_time': end_time,
1651                 'title': chapter_title,
1652             })
1653         return chapters
1654
1655     def _real_extract(self, url):
1656         url, smuggled_data = unsmuggle_url(url, {})
1657
1658         proto = (
1659             'http' if self._downloader.params.get('prefer_insecure', False)
1660             else 'https')
1661
1662         start_time = None
1663         end_time = None
1664         parsed_url = compat_urllib_parse_urlparse(url)
1665         for component in [parsed_url.fragment, parsed_url.query]:
1666             query = compat_parse_qs(component)
1667             if start_time is None and 't' in query:
1668                 start_time = parse_duration(query['t'][0])
1669             if start_time is None and 'start' in query:
1670                 start_time = parse_duration(query['start'][0])
1671             if end_time is None and 'end' in query:
1672                 end_time = parse_duration(query['end'][0])
1673
1674         # Extract original video URL from URL with redirection, like age verification, using next_url parameter
1675         mobj = re.search(self._NEXT_URL_RE, url)
1676         if mobj:
1677             url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
1678         video_id = self.extract_id(url)
1679
1680         # Get video webpage
1681         url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
1682         video_webpage = self._download_webpage(url, video_id)
1683
1684         # Attempt to extract SWF player URL
1685         mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
1686         if mobj is not None:
1687             player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
1688         else:
1689             player_url = None
1690
1691         dash_mpds = []
1692
1693         def add_dash_mpd(video_info):
1694             dash_mpd = video_info.get('dashmpd')
1695             if dash_mpd and dash_mpd[0] not in dash_mpds:
1696                 dash_mpds.append(dash_mpd[0])
1697
1698         def add_dash_mpd_pr(pl_response):
1699             dash_mpd = url_or_none(try_get(
1700                 pl_response, lambda x: x['streamingData']['dashManifestUrl'],
1701                 compat_str))
1702             if dash_mpd and dash_mpd not in dash_mpds:
1703                 dash_mpds.append(dash_mpd)
1704
1705         is_live = None
1706         view_count = None
1707
1708         def extract_view_count(v_info):
1709             return int_or_none(try_get(v_info, lambda x: x['view_count'][0]))
1710
1711         def extract_token(v_info):
1712             return dict_get(v_info, ('account_playback_token', 'accountPlaybackToken', 'token'))
1713
1714         def extract_player_response(player_response, video_id):
1715             pl_response = str_or_none(player_response)
1716             if not pl_response:
1717                 return
1718             pl_response = self._parse_json(pl_response, video_id, fatal=False)
1719             if isinstance(pl_response, dict):
1720                 add_dash_mpd_pr(pl_response)
1721                 return pl_response
1722
1723         player_response = {}
1724
1725         # Get video info
1726         embed_webpage = None
1727         if re.search(r'player-age-gate-content">', video_webpage) is not None:
1728             age_gate = True
1729             # We simulate the access to the video from www.youtube.com/v/{video_id}
1730             # this can be viewed without login into Youtube
1731             url = proto + '://www.youtube.com/embed/%s' % video_id
1732             embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
1733             data = compat_urllib_parse_urlencode({
1734                 'video_id': video_id,
1735                 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
1736                 'sts': self._search_regex(
1737                     r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
1738             })
1739             video_info_url = proto + '://www.youtube.com/get_video_info?' + data
1740             video_info_webpage = self._download_webpage(
1741                 video_info_url, video_id,
1742                 note='Refetching age-gated info webpage',
1743                 errnote='unable to download video info webpage')
1744             video_info = compat_parse_qs(video_info_webpage)
1745             pl_response = video_info.get('player_response', [None])[0]
1746             player_response = extract_player_response(pl_response, video_id)
1747             add_dash_mpd(video_info)
1748             view_count = extract_view_count(video_info)
1749         else:
1750             age_gate = False
1751             video_info = None
1752             sts = None
1753             # Try looking directly into the video webpage
1754             ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
1755             if ytplayer_config:
1756                 args = ytplayer_config['args']
1757                 if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
1758                     # Convert to the same format returned by compat_parse_qs
1759                     video_info = dict((k, [v]) for k, v in args.items())
1760                     add_dash_mpd(video_info)
1761                 # Rental video is not rented but preview is available (e.g.
1762                 # https://www.youtube.com/watch?v=yYr8q0y5Jfg,
1763                 # https://github.com/ytdl-org/youtube-dl/issues/10532)
1764                 if not video_info and args.get('ypc_vid'):
1765                     return self.url_result(
1766                         args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
1767                 if args.get('livestream') == '1' or args.get('live_playback') == 1:
1768                     is_live = True
1769                 sts = ytplayer_config.get('sts')
1770                 if not player_response:
1771                     player_response = extract_player_response(args.get('player_response'), video_id)
1772             if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
1773                 add_dash_mpd_pr(player_response)
1774                 # We also try looking in get_video_info since it may contain different dashmpd
1775                 # URL that points to a DASH manifest with possibly different itag set (some itags
1776                 # are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
1777                 # manifest pointed by get_video_info's dashmpd).
1778                 # The general idea is to take a union of itags of both DASH manifests (for example
1779                 # video with such 'manifest behavior' see https://github.com/ytdl-org/youtube-dl/issues/6093)
1780                 self.report_video_info_webpage_download(video_id)
1781                 for el in ('embedded', 'detailpage', 'vevo', ''):
1782                     query = {
1783                         'video_id': video_id,
1784                         'ps': 'default',
1785                         'eurl': '',
1786                         'gl': 'US',
1787                         'hl': 'en',
1788                     }
1789                     if el:
1790                         query['el'] = el
1791                     if sts:
1792                         query['sts'] = sts
1793                     try:
1794                         video_info_webpage = self._download_webpage(
1795                             '%s://www.youtube.com/get_video_info' % proto,
1796                             video_id, note=False,
1797                             errnote='unable to download video info webpage',
1798                             query=query)
1799                     except ExtractorError as e:
1800                         # Skip further retries if we get 429 since solving
1801                         # captcha only unblocks access to website but
1802                         # not get_video_info end point
1803                         if isinstance(e.cause, compat_HTTPError) and e.cause.code == 429:
1804                             break
1805                         continue
1806                     if not video_info_webpage:
1807                         continue
1808                     get_video_info = compat_parse_qs(video_info_webpage)
1809                     if not player_response:
1810                         pl_response = get_video_info.get('player_response', [None])[0]
1811                         player_response = extract_player_response(pl_response, video_id)
1812                     add_dash_mpd(get_video_info)
1813                     if view_count is None:
1814                         view_count = extract_view_count(get_video_info)
1815                     if not video_info:
1816                         video_info = get_video_info
1817                     get_token = extract_token(get_video_info)
1818                     if get_token:
1819                         # Different get_video_info requests may report different results, e.g.
1820                         # some may report video unavailability, but some may serve it without
1821                         # any complaint (see https://github.com/ytdl-org/youtube-dl/issues/7362,
1822                         # the original webpage as well as el=info and el=embedded get_video_info
1823                         # requests report video unavailability due to geo restriction while
1824                         # el=detailpage succeeds and returns valid data). This is probably
1825                         # due to YouTube measures against IP ranges of hosting providers.
1826                         # Working around by preferring the first succeeded video_info containing
1827                         # the token if no such video_info yet was found.
1828                         token = extract_token(video_info)
1829                         if not token:
1830                             video_info = get_video_info
1831                         break
1832
1833         def extract_unavailable_message():
1834             messages = []
1835             for tag, kind in (('h1', 'message'), ('div', 'submessage')):
1836                 msg = self._html_search_regex(
1837                     r'(?s)<{tag}[^>]+id=["\']unavailable-{kind}["\'][^>]*>(.+?)</{tag}>'.format(tag=tag, kind=kind),
1838                     video_webpage, 'unavailable %s' % kind, default=None)
1839                 if msg:
1840                     messages.append(msg)
1841             if messages:
1842                 return '\n'.join(messages)
1843
1844         if not video_info and not player_response:
1845             unavailable_message = extract_unavailable_message()
1846             if not unavailable_message:
1847                 unavailable_message = 'Unable to extract video data'
1848             raise ExtractorError(
1849                 'YouTube said: %s' % unavailable_message, expected=True, video_id=video_id)
1850
1851         if not isinstance(video_info, dict):
1852             video_info = {}
1853
1854         video_details = try_get(
1855             player_response, lambda x: x['videoDetails'], dict) or {}
1856
1857         video_title = video_info.get('title', [None])[0] or video_details.get('title')
1858         if not video_title:
1859             self._downloader.report_warning('Unable to extract video title')
1860             video_title = '_'
1861
1862         description_original = video_description = get_element_by_id("eow-description", video_webpage)
1863         if video_description:
1864
1865             def replace_url(m):
1866                 redir_url = compat_urlparse.urljoin(url, m.group(1))
1867                 parsed_redir_url = compat_urllib_parse_urlparse(redir_url)
1868                 if re.search(r'^(?:www\.)?(?:youtube(?:-nocookie)?\.com|youtu\.be)$', parsed_redir_url.netloc) and parsed_redir_url.path == '/redirect':
1869                     qs = compat_parse_qs(parsed_redir_url.query)
1870                     q = qs.get('q')
1871                     if q and q[0]:
1872                         return q[0]
1873                 return redir_url
1874
1875             description_original = video_description = re.sub(r'''(?x)
1876                 <a\s+
1877                     (?:[a-zA-Z-]+="[^"]*"\s+)*?
1878                     (?:title|href)="([^"]+)"\s+
1879                     (?:[a-zA-Z-]+="[^"]*"\s+)*?
1880                     class="[^"]*"[^>]*>
1881                 [^<]+\.{3}\s*
1882                 </a>
1883             ''', replace_url, video_description)
1884             video_description = clean_html(video_description)
1885         else:
1886             video_description = self._html_search_meta('description', video_webpage) or video_details.get('shortDescription')
1887
1888         if not smuggled_data.get('force_singlefeed', False):
1889             if not self._downloader.params.get('noplaylist'):
1890                 multifeed_metadata_list = try_get(
1891                     player_response,
1892                     lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
1893                     compat_str) or try_get(
1894                     video_info, lambda x: x['multifeed_metadata_list'][0], compat_str)
1895                 if multifeed_metadata_list:
1896                     entries = []
1897                     feed_ids = []
1898                     for feed in multifeed_metadata_list.split(','):
1899                         # Unquote should take place before split on comma (,) since textual
1900                         # fields may contain comma as well (see
1901                         # https://github.com/ytdl-org/youtube-dl/issues/8536)
1902                         feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
1903                         entries.append({
1904                             '_type': 'url_transparent',
1905                             'ie_key': 'Youtube',
1906                             'url': smuggle_url(
1907                                 '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
1908                                 {'force_singlefeed': True}),
1909                             'title': '%s (%s)' % (video_title, feed_data['title'][0]),
1910                         })
1911                         feed_ids.append(feed_data['id'][0])
1912                     self.to_screen(
1913                         'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
1914                         % (', '.join(feed_ids), video_id))
1915                     return self.playlist_result(entries, video_id, video_title, video_description)
1916             else:
1917                 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1918
1919         if view_count is None:
1920             view_count = extract_view_count(video_info)
1921         if view_count is None and video_details:
1922             view_count = int_or_none(video_details.get('viewCount'))
1923
1924         if is_live is None:
1925             is_live = bool_or_none(video_details.get('isLive'))
1926
1927         # Check for "rental" videos
1928         if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
1929             raise ExtractorError('"rental" videos not supported. See https://github.com/ytdl-org/youtube-dl/issues/359 for more information.', expected=True)
1930
1931         def _extract_filesize(media_url):
1932             return int_or_none(self._search_regex(
1933                 r'\bclen[=/](\d+)', media_url, 'filesize', default=None))
1934
1935         streaming_formats = try_get(player_response, lambda x: x['streamingData']['formats'], list) or []
1936         streaming_formats.extend(try_get(player_response, lambda x: x['streamingData']['adaptiveFormats'], list) or [])
1937
1938         if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
1939             self.report_rtmp_download()
1940             formats = [{
1941                 'format_id': '_rtmp',
1942                 'protocol': 'rtmp',
1943                 'url': video_info['conn'][0],
1944                 'player_url': player_url,
1945             }]
1946         elif not is_live and (streaming_formats or len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1):
1947             encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
1948             if 'rtmpe%3Dyes' in encoded_url_map:
1949                 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/ytdl-org/youtube-dl/issues/343 for more information.', expected=True)
1950             formats = []
1951             formats_spec = {}
1952             fmt_list = video_info.get('fmt_list', [''])[0]
1953             if fmt_list:
1954                 for fmt in fmt_list.split(','):
1955                     spec = fmt.split('/')
1956                     if len(spec) > 1:
1957                         width_height = spec[1].split('x')
1958                         if len(width_height) == 2:
1959                             formats_spec[spec[0]] = {
1960                                 'resolution': spec[1],
1961                                 'width': int_or_none(width_height[0]),
1962                                 'height': int_or_none(width_height[1]),
1963                             }
1964             for fmt in streaming_formats:
1965                 itag = str_or_none(fmt.get('itag'))
1966                 if not itag:
1967                     continue
1968                 quality = fmt.get('quality')
1969                 quality_label = fmt.get('qualityLabel') or quality
1970                 formats_spec[itag] = {
1971                     'asr': int_or_none(fmt.get('audioSampleRate')),
1972                     'filesize': int_or_none(fmt.get('contentLength')),
1973                     'format_note': quality_label,
1974                     'fps': int_or_none(fmt.get('fps')),
1975                     'height': int_or_none(fmt.get('height')),
1976                     # bitrate for itag 43 is always 2147483647
1977                     'tbr': float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) if itag != '43' else None,
1978                     'width': int_or_none(fmt.get('width')),
1979                 }
1980
1981             for fmt in streaming_formats:
1982                 if fmt.get('drm_families'):
1983                     continue
1984                 url = url_or_none(fmt.get('url'))
1985
1986                 if not url:
1987                     cipher = fmt.get('cipher')
1988                     if not cipher:
1989                         continue
1990                     url_data = compat_parse_qs(cipher)
1991                     url = url_or_none(try_get(url_data, lambda x: x['url'][0], compat_str))
1992                     if not url:
1993                         continue
1994                 else:
1995                     cipher = None
1996                     url_data = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
1997
1998                 stream_type = int_or_none(try_get(url_data, lambda x: x['stream_type'][0]))
1999                 # Unsupported FORMAT_STREAM_TYPE_OTF
2000                 if stream_type == 3:
2001                     continue
2002
2003                 format_id = fmt.get('itag') or url_data['itag'][0]
2004                 if not format_id:
2005                     continue
2006                 format_id = compat_str(format_id)
2007
2008                 if cipher:
2009                     if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True):
2010                         ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
2011                         jsplayer_url_json = self._search_regex(
2012                             ASSETS_RE,
2013                             embed_webpage if age_gate else video_webpage,
2014                             'JS player URL (1)', default=None)
2015                         if not jsplayer_url_json and not age_gate:
2016                             # We need the embed website after all
2017                             if embed_webpage is None:
2018                                 embed_url = proto + '://www.youtube.com/embed/%s' % video_id
2019                                 embed_webpage = self._download_webpage(
2020                                     embed_url, video_id, 'Downloading embed webpage')
2021                             jsplayer_url_json = self._search_regex(
2022                                 ASSETS_RE, embed_webpage, 'JS player URL')
2023
2024                         player_url = json.loads(jsplayer_url_json)
2025                         if player_url is None:
2026                             player_url_json = self._search_regex(
2027                                 r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
2028                                 video_webpage, 'age gate player URL')
2029                             player_url = json.loads(player_url_json)
2030
2031                     if 'sig' in url_data:
2032                         url += '&signature=' + url_data['sig'][0]
2033                     elif 's' in url_data:
2034                         encrypted_sig = url_data['s'][0]
2035
2036                         if self._downloader.params.get('verbose'):
2037                             if player_url is None:
2038                                 player_version = 'unknown'
2039                                 player_desc = 'unknown'
2040                             else:
2041                                 if player_url.endswith('swf'):
2042                                     player_version = self._search_regex(
2043                                         r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
2044                                         'flash player', fatal=False)
2045                                     player_desc = 'flash player %s' % player_version
2046                                 else:
2047                                     player_version = self._search_regex(
2048                                         [r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js',
2049                                          r'(?:www|player(?:_ias)?)[-.]([^/]+)(?:/[a-z]{2,3}_[A-Z]{2})?/base\.js'],
2050                                         player_url,
2051                                         'html5 player', fatal=False)
2052                                     player_desc = 'html5 player %s' % player_version
2053
2054                             parts_sizes = self._signature_cache_id(encrypted_sig)
2055                             self.to_screen('{%s} signature length %s, %s' %
2056                                            (format_id, parts_sizes, player_desc))
2057
2058                         signature = self._decrypt_signature(
2059                             encrypted_sig, video_id, player_url, age_gate)
2060                         sp = try_get(url_data, lambda x: x['sp'][0], compat_str) or 'signature'
2061                         url += '&%s=%s' % (sp, signature)
2062                 if 'ratebypass' not in url:
2063                     url += '&ratebypass=yes'
2064
2065                 dct = {
2066                     'format_id': format_id,
2067                     'url': url,
2068                     'player_url': player_url,
2069                 }
2070                 if format_id in self._formats:
2071                     dct.update(self._formats[format_id])
2072                 if format_id in formats_spec:
2073                     dct.update(formats_spec[format_id])
2074
2075                 # Some itags are not included in DASH manifest thus corresponding formats will
2076                 # lack metadata (see https://github.com/ytdl-org/youtube-dl/pull/5993).
2077                 # Trying to extract metadata from url_encoded_fmt_stream_map entry.
2078                 mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
2079                 width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
2080
2081                 if width is None:
2082                     width = int_or_none(fmt.get('width'))
2083                 if height is None:
2084                     height = int_or_none(fmt.get('height'))
2085
2086                 filesize = int_or_none(url_data.get(
2087                     'clen', [None])[0]) or _extract_filesize(url)
2088
2089                 quality = url_data.get('quality', [None])[0] or fmt.get('quality')
2090                 quality_label = url_data.get('quality_label', [None])[0] or fmt.get('qualityLabel')
2091
2092                 tbr = (float_or_none(url_data.get('bitrate', [None])[0], 1000)
2093                        or float_or_none(fmt.get('bitrate'), 1000)) if format_id != '43' else None
2094                 fps = int_or_none(url_data.get('fps', [None])[0]) or int_or_none(fmt.get('fps'))
2095
2096                 more_fields = {
2097                     'filesize': filesize,
2098                     'tbr': tbr,
2099                     'width': width,
2100                     'height': height,
2101                     'fps': fps,
2102                     'format_note': quality_label or quality,
2103                 }
2104                 for key, value in more_fields.items():
2105                     if value:
2106                         dct[key] = value
2107                 type_ = url_data.get('type', [None])[0] or fmt.get('mimeType')
2108                 if type_:
2109                     type_split = type_.split(';')
2110                     kind_ext = type_split[0].split('/')
2111                     if len(kind_ext) == 2:
2112                         kind, _ = kind_ext
2113                         dct['ext'] = mimetype2ext(type_split[0])
2114                         if kind in ('audio', 'video'):
2115                             codecs = None
2116                             for mobj in re.finditer(
2117                                     r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
2118                                 if mobj.group('key') == 'codecs':
2119                                     codecs = mobj.group('val')
2120                                     break
2121                             if codecs:
2122                                 dct.update(parse_codecs(codecs))
2123                 if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none':
2124                     dct['downloader_options'] = {
2125                         # Youtube throttles chunks >~10M
2126                         'http_chunk_size': 10485760,
2127                     }
2128                 formats.append(dct)
2129         else:
2130             manifest_url = (
2131                 url_or_none(try_get(
2132                     player_response,
2133                     lambda x: x['streamingData']['hlsManifestUrl'],
2134                     compat_str))
2135                 or url_or_none(try_get(
2136                     video_info, lambda x: x['hlsvp'][0], compat_str)))
2137             if manifest_url:
2138                 formats = []
2139                 m3u8_formats = self._extract_m3u8_formats(
2140                     manifest_url, video_id, 'mp4', fatal=False)
2141                 for a_format in m3u8_formats:
2142                     itag = self._search_regex(
2143                         r'/itag/(\d+)/', a_format['url'], 'itag', default=None)
2144                     if itag:
2145                         a_format['format_id'] = itag
2146                         if itag in self._formats:
2147                             dct = self._formats[itag].copy()
2148                             dct.update(a_format)
2149                             a_format = dct
2150                     a_format['player_url'] = player_url
2151                     # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
2152                     a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
2153                     formats.append(a_format)
2154             else:
2155                 error_message = extract_unavailable_message()
2156                 if not error_message:
2157                     error_message = clean_html(try_get(
2158                         player_response, lambda x: x['playabilityStatus']['reason'],
2159                         compat_str))
2160                 if not error_message:
2161                     error_message = clean_html(
2162                         try_get(video_info, lambda x: x['reason'][0], compat_str))
2163                 if error_message:
2164                     raise ExtractorError(error_message, expected=True)
2165                 raise ExtractorError('no conn, hlsvp, hlsManifestUrl or url_encoded_fmt_stream_map information found in video info')
2166
2167         # uploader
2168         video_uploader = try_get(
2169             video_info, lambda x: x['author'][0],
2170             compat_str) or str_or_none(video_details.get('author'))
2171         if video_uploader:
2172             video_uploader = compat_urllib_parse_unquote_plus(video_uploader)
2173         else:
2174             self._downloader.report_warning('unable to extract uploader name')
2175
2176         # uploader_id
2177         video_uploader_id = None
2178         video_uploader_url = None
2179         mobj = re.search(
2180             r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
2181             video_webpage)
2182         if mobj is not None:
2183             video_uploader_id = mobj.group('uploader_id')
2184             video_uploader_url = mobj.group('uploader_url')
2185         else:
2186             self._downloader.report_warning('unable to extract uploader nickname')
2187
2188         channel_id = (
2189             str_or_none(video_details.get('channelId'))
2190             or self._html_search_meta(
2191                 'channelId', video_webpage, 'channel id', default=None)
2192             or self._search_regex(
2193                 r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
2194                 video_webpage, 'channel id', default=None, group='id'))
2195         channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
2196
2197         # thumbnail image
2198         # We try first to get a high quality image:
2199         m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
2200                             video_webpage, re.DOTALL)
2201         if m_thumb is not None:
2202             video_thumbnail = m_thumb.group(1)
2203         elif 'thumbnail_url' not in video_info:
2204             self._downloader.report_warning('unable to extract video thumbnail')
2205             video_thumbnail = None
2206         else:   # don't panic if we can't find it
2207             video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
2208
2209         # upload date
2210         upload_date = self._html_search_meta(
2211             'datePublished', video_webpage, 'upload date', default=None)
2212         if not upload_date:
2213             upload_date = self._search_regex(
2214                 [r'(?s)id="eow-date.*?>(.*?)</span>',
2215                  r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'],
2216                 video_webpage, 'upload date', default=None)
2217         upload_date = unified_strdate(upload_date)
2218
2219         video_license = self._html_search_regex(
2220             r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
2221             video_webpage, 'license', default=None)
2222
2223         m_music = re.search(
2224             r'''(?x)
2225                 <h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*
2226                 <ul[^>]*>\s*
2227                 <li>(?P<title>.+?)
2228                 by (?P<creator>.+?)
2229                 (?:
2230                     \(.+?\)|
2231                     <a[^>]*
2232                         (?:
2233                             \bhref=["\']/red[^>]*>|             # drop possible
2234                             >\s*Listen ad-free with YouTube Red # YouTube Red ad
2235                         )
2236                     .*?
2237                 )?</li
2238             ''',
2239             video_webpage)
2240         if m_music:
2241             video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
2242             video_creator = clean_html(m_music.group('creator'))
2243         else:
2244             video_alt_title = video_creator = None
2245
2246         def extract_meta(field):
2247             return self._html_search_regex(
2248                 r'<h4[^>]+class="title"[^>]*>\s*%s\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li>\s*' % field,
2249                 video_webpage, field, default=None)
2250
2251         track = extract_meta('Song')
2252         artist = extract_meta('Artist')
2253         album = extract_meta('Album')
2254
2255         # Youtube Music Auto-generated description
2256         release_date = release_year = None
2257         if video_description:
2258             mobj = re.search(r'(?s)Provided to YouTube by [^\n]+\n+(?P<track>[^·]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?', video_description)
2259             if mobj:
2260                 if not track:
2261                     track = mobj.group('track').strip()
2262                 if not artist:
2263                     artist = mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·'))
2264                 if not album:
2265                     album = mobj.group('album'.strip())
2266                 release_year = mobj.group('release_year')
2267                 release_date = mobj.group('release_date')
2268                 if release_date:
2269                     release_date = release_date.replace('-', '')
2270                     if not release_year:
2271                         release_year = int(release_date[:4])
2272                 if release_year:
2273                     release_year = int(release_year)
2274
2275         m_episode = re.search(
2276             r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>',
2277             video_webpage)
2278         if m_episode:
2279             series = unescapeHTML(m_episode.group('series'))
2280             season_number = int(m_episode.group('season'))
2281             episode_number = int(m_episode.group('episode'))
2282         else:
2283             series = season_number = episode_number = None
2284
2285         m_cat_container = self._search_regex(
2286             r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
2287             video_webpage, 'categories', default=None)
2288         if m_cat_container:
2289             category = self._html_search_regex(
2290                 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
2291                 default=None)
2292             video_categories = None if category is None else [category]
2293         else:
2294             video_categories = None
2295
2296         video_tags = [
2297             unescapeHTML(m.group('content'))
2298             for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
2299
2300         def _extract_count(count_name):
2301             return str_to_int(self._search_regex(
2302                 r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
2303                 % re.escape(count_name),
2304                 video_webpage, count_name, default=None))
2305
2306         like_count = _extract_count('like')
2307         dislike_count = _extract_count('dislike')
2308
2309         if view_count is None:
2310             view_count = str_to_int(self._search_regex(
2311                 r'<[^>]+class=["\']watch-view-count[^>]+>\s*([\d,\s]+)', video_webpage,
2312                 'view count', default=None))
2313
2314         average_rating = (
2315             float_or_none(video_details.get('averageRating'))
2316             or try_get(video_info, lambda x: float_or_none(x['avg_rating'][0])))
2317
2318         # subtitles
2319         video_subtitles = self.extract_subtitles(video_id, video_webpage)
2320         automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
2321
2322         video_duration = try_get(
2323             video_info, lambda x: int_or_none(x['length_seconds'][0]))
2324         if not video_duration:
2325             video_duration = int_or_none(video_details.get('lengthSeconds'))
2326         if not video_duration:
2327             video_duration = parse_duration(self._html_search_meta(
2328                 'duration', video_webpage, 'video duration'))
2329
2330         # annotations
2331         video_annotations = None
2332         if self._downloader.params.get('writeannotations', False):
2333             xsrf_token = self._search_regex(
2334                 r'([\'"])XSRF_TOKEN\1\s*:\s*([\'"])(?P<xsrf_token>[A-Za-z0-9+/=]+)\2',
2335                 video_webpage, 'xsrf token', group='xsrf_token', fatal=False)
2336             invideo_url = try_get(
2337                 player_response, lambda x: x['annotations'][0]['playerAnnotationsUrlsRenderer']['invideoUrl'], compat_str)
2338             if xsrf_token and invideo_url:
2339                 xsrf_field_name = self._search_regex(
2340                     r'([\'"])XSRF_FIELD_NAME\1\s*:\s*([\'"])(?P<xsrf_field_name>\w+)\2',
2341                     video_webpage, 'xsrf field name',
2342                     group='xsrf_field_name', default='session_token')
2343                 video_annotations = self._download_webpage(
2344                     self._proto_relative_url(invideo_url),
2345                     video_id, note='Downloading annotations',
2346                     errnote='Unable to download video annotations', fatal=False,
2347                     data=urlencode_postdata({xsrf_field_name: xsrf_token}))
2348
2349         chapters = self._extract_chapters(description_original, video_duration)
2350
2351         # Look for the DASH manifest
2352         if self._downloader.params.get('youtube_include_dash_manifest', True):
2353             dash_mpd_fatal = True
2354             for mpd_url in dash_mpds:
2355                 dash_formats = {}
2356                 try:
2357                     def decrypt_sig(mobj):
2358                         s = mobj.group(1)
2359                         dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
2360                         return '/signature/%s' % dec_s
2361
2362                     mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
2363
2364                     for df in self._extract_mpd_formats(
2365                             mpd_url, video_id, fatal=dash_mpd_fatal,
2366                             formats_dict=self._formats):
2367                         if not df.get('filesize'):
2368                             df['filesize'] = _extract_filesize(df['url'])
2369                         # Do not overwrite DASH format found in some previous DASH manifest
2370                         if df['format_id'] not in dash_formats:
2371                             dash_formats[df['format_id']] = df
2372                         # Additional DASH manifests may end up in HTTP Error 403 therefore
2373                         # allow them to fail without bug report message if we already have
2374                         # some DASH manifest succeeded. This is temporary workaround to reduce
2375                         # burst of bug reports until we figure out the reason and whether it
2376                         # can be fixed at all.
2377                         dash_mpd_fatal = False
2378                 except (ExtractorError, KeyError) as e:
2379                     self.report_warning(
2380                         'Skipping DASH manifest: %r' % e, video_id)
2381                 if dash_formats:
2382                     # Remove the formats we found through non-DASH, they
2383                     # contain less info and it can be wrong, because we use
2384                     # fixed values (for example the resolution). See
2385                     # https://github.com/ytdl-org/youtube-dl/issues/5774 for an
2386                     # example.
2387                     formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
2388                     formats.extend(dash_formats.values())
2389
2390         # Check for malformed aspect ratio
2391         stretched_m = re.search(
2392             r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
2393             video_webpage)
2394         if stretched_m:
2395             w = float(stretched_m.group('w'))
2396             h = float(stretched_m.group('h'))
2397             # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
2398             # We will only process correct ratios.
2399             if w > 0 and h > 0:
2400                 ratio = w / h
2401                 for f in formats:
2402                     if f.get('vcodec') != 'none':
2403                         f['stretched_ratio'] = ratio
2404
2405         if not formats:
2406             token = extract_token(video_info)
2407             if not token:
2408                 if 'reason' in video_info:
2409                     if 'The uploader has not made this video available in your country.' in video_info['reason']:
2410                         regions_allowed = self._html_search_meta(
2411                             'regionsAllowed', video_webpage, default=None)
2412                         countries = regions_allowed.split(',') if regions_allowed else None
2413                         self.raise_geo_restricted(
2414                             msg=video_info['reason'][0], countries=countries)
2415                     reason = video_info['reason'][0]
2416                     if 'Invalid parameters' in reason:
2417                         unavailable_message = extract_unavailable_message()
2418                         if unavailable_message:
2419                             reason = unavailable_message
2420                     raise ExtractorError(
2421                         'YouTube said: %s' % reason,
2422                         expected=True, video_id=video_id)
2423                 else:
2424                     raise ExtractorError(
2425                         '"token" parameter not in video info for unknown reason',
2426                         video_id=video_id)
2427
2428         if not formats and (video_info.get('license_info') or try_get(player_response, lambda x: x['streamingData']['licenseInfos'])):
2429             raise ExtractorError('This video is DRM protected.', expected=True)
2430
2431         self._sort_formats(formats)
2432
2433         self.mark_watched(video_id, video_info, player_response)
2434
2435         return {
2436             'id': video_id,
2437             'uploader': video_uploader,
2438             'uploader_id': video_uploader_id,
2439             'uploader_url': video_uploader_url,
2440             'channel_id': channel_id,
2441             'channel_url': channel_url,
2442             'upload_date': upload_date,
2443             'license': video_license,
2444             'creator': video_creator or artist,
2445             'title': video_title,
2446             'alt_title': video_alt_title or track,
2447             'thumbnail': video_thumbnail,
2448             'description': video_description,
2449             'categories': video_categories,
2450             'tags': video_tags,
2451             'subtitles': video_subtitles,
2452             'automatic_captions': automatic_captions,
2453             'duration': video_duration,
2454             'age_limit': 18 if age_gate else 0,
2455             'annotations': video_annotations,
2456             'chapters': chapters,
2457             'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
2458             'view_count': view_count,
2459             'like_count': like_count,
2460             'dislike_count': dislike_count,
2461             'average_rating': average_rating,
2462             'formats': formats,
2463             'is_live': is_live,
2464             'start_time': start_time,
2465             'end_time': end_time,
2466             'series': series,
2467             'season_number': season_number,
2468             'episode_number': episode_number,
2469             'track': track,
2470             'artist': artist,
2471             'album': album,
2472             'release_date': release_date,
2473             'release_year': release_year,
2474         }
2475
2476
2477 class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
2478     IE_DESC = 'YouTube.com playlists'
2479     _VALID_URL = r"""(?x)(?:
2480                         (?:https?://)?
2481                         (?:\w+\.)?
2482                         (?:
2483                             (?:
2484                                 youtube(?:kids)?\.com|
2485                                 invidio\.us
2486                             )
2487                             /
2488                             (?:
2489                                (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11}))
2490                                \? (?:.*?[&;])*? (?:p|a|list)=
2491                             |  p/
2492                             )|
2493                             youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist=
2494                         )
2495                         (
2496                             (?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)?[0-9A-Za-z-_]{10,}
2497                             # Top tracks, they can also include dots
2498                             |(?:MC)[\w\.]*
2499                         )
2500                         .*
2501                      |
2502                         (%(playlist_id)s)
2503                      )""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
2504     _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
2505     _VIDEO_RE_TPL = r'href="\s*/watch\?v=%s(?:&amp;(?:[^"]*?index=(?P<index>\d+))?(?:[^>]+>(?P<title>[^<]+))?)?'
2506     _VIDEO_RE = _VIDEO_RE_TPL % r'(?P<id>[0-9A-Za-z_-]{11})'
2507     IE_NAME = 'youtube:playlist'
2508     _TESTS = [{
2509         'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
2510         'info_dict': {
2511             'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
2512             'uploader': 'Sergey M.',
2513             'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
2514             'title': 'youtube-dl public playlist',
2515         },
2516         'playlist_count': 1,
2517     }, {
2518         'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
2519         'info_dict': {
2520             'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
2521             'uploader': 'Sergey M.',
2522             'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
2523             'title': 'youtube-dl empty playlist',
2524         },
2525         'playlist_count': 0,
2526     }, {
2527         'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
2528         'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
2529         'info_dict': {
2530             'title': '29C3: Not my department',
2531             'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
2532             'uploader': 'Christiaan008',
2533             'uploader_id': 'ChRiStIaAn008',
2534         },
2535         'playlist_count': 96,
2536     }, {
2537         'note': 'issue #673',
2538         'url': 'PLBB231211A4F62143',
2539         'info_dict': {
2540             'title': '[OLD]Team Fortress 2 (Class-based LP)',
2541             'id': 'PLBB231211A4F62143',
2542             'uploader': 'Wickydoo',
2543             'uploader_id': 'Wickydoo',
2544         },
2545         'playlist_mincount': 26,
2546     }, {
2547         'note': 'Large playlist',
2548         'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
2549         'info_dict': {
2550             'title': 'Uploads from Cauchemar',
2551             'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
2552             'uploader': 'Cauchemar',
2553             'uploader_id': 'Cauchemar89',
2554         },
2555         'playlist_mincount': 799,
2556     }, {
2557         'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
2558         'info_dict': {
2559             'title': 'YDL_safe_search',
2560             'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
2561         },
2562         'playlist_count': 2,
2563         'skip': 'This playlist is private',
2564     }, {
2565         'note': 'embedded',
2566         'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
2567         'playlist_count': 4,
2568         'info_dict': {
2569             'title': 'JODA15',
2570             'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
2571             'uploader': 'milan',
2572             'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
2573         }
2574     }, {
2575         'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
2576         'playlist_mincount': 485,
2577         'info_dict': {
2578             'title': '2018 Chinese New Singles (11/6 updated)',
2579             'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
2580             'uploader': 'LBK',
2581             'uploader_id': 'sdragonfang',
2582         }
2583     }, {
2584         'note': 'Embedded SWF player',
2585         'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
2586         'playlist_count': 4,
2587         'info_dict': {
2588             'title': 'JODA7',
2589             'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
2590         },
2591         'skip': 'This playlist does not exist',
2592     }, {
2593         'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
2594         'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
2595         'info_dict': {
2596             'title': 'Uploads from Interstellar Movie',
2597             'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
2598             'uploader': 'Interstellar Movie',
2599             'uploader_id': 'InterstellarMovie1',
2600         },
2601         'playlist_mincount': 21,
2602     }, {
2603         # Playlist URL that does not actually serve a playlist
2604         'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
2605         'info_dict': {
2606             'id': 'FqZTN594JQw',
2607             'ext': 'webm',
2608             'title': "Smiley's People 01 detective, Adventure Series, Action",
2609             'uploader': 'STREEM',
2610             'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
2611             'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
2612             'upload_date': '20150526',
2613             'license': 'Standard YouTube License',
2614             'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
2615             'categories': ['People & Blogs'],
2616             'tags': list,
2617             'view_count': int,
2618             'like_count': int,
2619             'dislike_count': int,
2620         },
2621         'params': {
2622             'skip_download': True,
2623         },
2624         'skip': 'This video is not available.',
2625         'add_ie': [YoutubeIE.ie_key()],
2626     }, {
2627         'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
2628         'info_dict': {
2629             'id': 'yeWKywCrFtk',
2630             'ext': 'mp4',
2631             'title': 'Small Scale Baler and Braiding Rugs',
2632             'uploader': 'Backus-Page House Museum',
2633             'uploader_id': 'backuspagemuseum',
2634             'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
2635             'upload_date': '20161008',
2636             'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
2637             'categories': ['Nonprofits & Activism'],
2638             'tags': list,
2639             'like_count': int,
2640             'dislike_count': int,
2641         },
2642         'params': {
2643             'noplaylist': True,
2644             'skip_download': True,
2645         },
2646     }, {
2647         # https://github.com/ytdl-org/youtube-dl/issues/21844
2648         'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
2649         'info_dict': {
2650             'title': 'Data Analysis with Dr Mike Pound',
2651             'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
2652             'uploader_id': 'Computerphile',
2653             'uploader': 'Computerphile',
2654         },
2655         'playlist_mincount': 11,
2656     }, {
2657         'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
2658         'only_matching': True,
2659     }, {
2660         'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
2661         'only_matching': True,
2662     }, {
2663         # music album playlist
2664         'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
2665         'only_matching': True,
2666     }, {
2667         'url': 'https://invidio.us/playlist?list=PLDIoUOhQQPlXr63I_vwF9GD8sAKh77dWU',
2668         'only_matching': True,
2669     }, {
2670         'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
2671         'only_matching': True,
2672     }]
2673
2674     def _real_initialize(self):
2675         self._login()
2676
2677     def extract_videos_from_page(self, page):
2678         ids_in_page = []
2679         titles_in_page = []
2680
2681         for item in re.findall(
2682                 r'(<[^>]*\bdata-video-id\s*=\s*["\'][0-9A-Za-z_-]{11}[^>]+>)', page):
2683             attrs = extract_attributes(item)
2684             video_id = attrs['data-video-id']
2685             video_title = unescapeHTML(attrs.get('data-title'))
2686             if video_title:
2687                 video_title = video_title.strip()
2688             ids_in_page.append(video_id)
2689             titles_in_page.append(video_title)
2690
2691         # Fallback with old _VIDEO_RE
2692         self.extract_videos_from_page_impl(
2693             self._VIDEO_RE, page, ids_in_page, titles_in_page)
2694
2695         # Relaxed fallbacks
2696         self.extract_videos_from_page_impl(
2697             r'href="\s*/watch\?v\s*=\s*(?P<id>[0-9A-Za-z_-]{11})', page,
2698             ids_in_page, titles_in_page)
2699         self.extract_videos_from_page_impl(
2700             r'data-video-ids\s*=\s*["\'](?P<id>[0-9A-Za-z_-]{11})', page,
2701             ids_in_page, titles_in_page)
2702
2703         return zip(ids_in_page, titles_in_page)
2704
2705     def _extract_mix(self, playlist_id):
2706         # The mixes are generated from a single video
2707         # the id of the playlist is just 'RD' + video_id
2708         ids = []
2709         last_id = playlist_id[-11:]
2710         for n in itertools.count(1):
2711             url = 'https://youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
2712             webpage = self._download_webpage(
2713                 url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n))
2714             new_ids = orderedSet(re.findall(
2715                 r'''(?xs)data-video-username=".*?".*?
2716                            href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id),
2717                 webpage))
2718             # Fetch new pages until all the videos are repeated, it seems that
2719             # there are always 51 unique videos.
2720             new_ids = [_id for _id in new_ids if _id not in ids]
2721             if not new_ids:
2722                 break
2723             ids.extend(new_ids)
2724             last_id = ids[-1]
2725
2726         url_results = self._ids_to_results(ids)
2727
2728         search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
2729         title_span = (
2730             search_title('playlist-title')
2731             or search_title('title long-title')
2732             or search_title('title'))
2733         title = clean_html(title_span)
2734
2735         return self.playlist_result(url_results, playlist_id, title)
2736
2737     def _extract_playlist(self, playlist_id):
2738         url = self._TEMPLATE_URL % playlist_id
2739         page = self._download_webpage(url, playlist_id)
2740
2741         # the yt-alert-message now has tabindex attribute (see https://github.com/ytdl-org/youtube-dl/issues/11604)
2742         for match in re.findall(r'<div class="yt-alert-message"[^>]*>([^<]+)</div>', page):
2743             match = match.strip()
2744             # Check if the playlist exists or is private
2745             mobj = re.match(r'[^<]*(?:The|This) playlist (?P<reason>does not exist|is private)[^<]*', match)
2746             if mobj:
2747                 reason = mobj.group('reason')
2748                 message = 'This playlist %s' % reason
2749                 if 'private' in reason:
2750                     message += ', use --username or --netrc to access it'
2751                 message += '.'
2752                 raise ExtractorError(message, expected=True)
2753             elif re.match(r'[^<]*Invalid parameters[^<]*', match):
2754                 raise ExtractorError(
2755                     'Invalid parameters. Maybe URL is incorrect.',
2756                     expected=True)
2757             elif re.match(r'[^<]*Choose your language[^<]*', match):
2758                 continue
2759             else:
2760                 self.report_warning('Youtube gives an alert message: ' + match)
2761
2762         playlist_title = self._html_search_regex(
2763             r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
2764             page, 'title', default=None)
2765
2766         _UPLOADER_BASE = r'class=["\']pl-header-details[^>]+>\s*<li>\s*<a[^>]+\bhref='
2767         uploader = self._html_search_regex(
2768             r'%s["\']/(?:user|channel)/[^>]+>([^<]+)' % _UPLOADER_BASE,
2769             page, 'uploader', default=None)
2770         mobj = re.search(
2771             r'%s(["\'])(?P<path>/(?:user|channel)/(?P<uploader_id>.+?))\1' % _UPLOADER_BASE,
2772             page)
2773         if mobj:
2774             uploader_id = mobj.group('uploader_id')
2775             uploader_url = compat_urlparse.urljoin(url, mobj.group('path'))
2776         else:
2777             uploader_id = uploader_url = None
2778
2779         has_videos = True
2780
2781         if not playlist_title:
2782             try:
2783                 # Some playlist URLs don't actually serve a playlist (e.g.
2784                 # https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4)
2785                 next(self._entries(page, playlist_id))
2786             except StopIteration:
2787                 has_videos = False
2788
2789         playlist = self.playlist_result(
2790             self._entries(page, playlist_id), playlist_id, playlist_title)
2791         playlist.update({
2792             'uploader': uploader,
2793             'uploader_id': uploader_id,
2794             'uploader_url': uploader_url,
2795         })
2796
2797         return has_videos, playlist
2798
2799     def _check_download_just_video(self, url, playlist_id):
2800         # Check if it's a video-specific URL
2801         query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
2802         video_id = query_dict.get('v', [None])[0] or self._search_regex(
2803             r'(?:(?:^|//)youtu\.be/|youtube\.com/embed/(?!videoseries))([0-9A-Za-z_-]{11})', url,
2804             'video id', default=None)
2805         if video_id:
2806             if self._downloader.params.get('noplaylist'):
2807                 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
2808                 return video_id, self.url_result(video_id, 'Youtube', video_id=video_id)
2809             else:
2810                 self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
2811                 return video_id, None
2812         return None, None
2813
2814     def _real_extract(self, url):
2815         # Extract playlist id
2816         mobj = re.match(self._VALID_URL, url)
2817         if mobj is None:
2818             raise ExtractorError('Invalid URL: %s' % url)
2819         playlist_id = mobj.group(1) or mobj.group(2)
2820
2821         video_id, video = self._check_download_just_video(url, playlist_id)
2822         if video:
2823             return video
2824
2825         if playlist_id.startswith(('RD', 'UL', 'PU')):
2826             # Mixes require a custom extraction process
2827             return self._extract_mix(playlist_id)
2828
2829         has_videos, playlist = self._extract_playlist(playlist_id)
2830         if has_videos or not video_id:
2831             return playlist
2832
2833         # Some playlist URLs don't actually serve a playlist (see
2834         # https://github.com/ytdl-org/youtube-dl/issues/10537).
2835         # Fallback to plain video extraction if there is a video id
2836         # along with playlist id.
2837         return self.url_result(video_id, 'Youtube', video_id=video_id)
2838
2839
2840 class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
2841     IE_DESC = 'YouTube.com channels'
2842     _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie|kids)?\.com|(?:www\.)?invidio\.us)/channel/(?P<id>[0-9A-Za-z_-]+)'
2843     _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
2844     _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
2845     IE_NAME = 'youtube:channel'
2846     _TESTS = [{
2847         'note': 'paginated channel',
2848         'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
2849         'playlist_mincount': 91,
2850         'info_dict': {
2851             'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
2852             'title': 'Uploads from lex will',
2853             'uploader': 'lex will',
2854             'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
2855         }
2856     }, {
2857         'note': 'Age restricted channel',
2858         # from https://www.youtube.com/user/DeusExOfficial
2859         'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
2860         'playlist_mincount': 64,
2861         'info_dict': {
2862             'id': 'UUs0ifCMCm1icqRbqhUINa0w',
2863             'title': 'Uploads from Deus Ex',
2864             'uploader': 'Deus Ex',
2865             'uploader_id': 'DeusExOfficial',
2866         },
2867     }, {
2868         'url': 'https://invidio.us/channel/UC23qupoDRn9YOAVzeoxjOQA',
2869         'only_matching': True,
2870     }, {
2871         'url': 'https://www.youtubekids.com/channel/UCyu8StPfZWapR6rfW_JgqcA',
2872         'only_matching': True,
2873     }]
2874
2875     @classmethod
2876     def suitable(cls, url):
2877         return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
2878                 else super(YoutubeChannelIE, cls).suitable(url))
2879
2880     def _build_template_url(self, url, channel_id):
2881         return self._TEMPLATE_URL % channel_id
2882
2883     def _real_extract(self, url):
2884         channel_id = self._match_id(url)
2885
2886         url = self._build_template_url(url, channel_id)
2887
2888         # Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
2889         # Workaround by extracting as a playlist if managed to obtain channel playlist URL
2890         # otherwise fallback on channel by page extraction
2891         channel_page = self._download_webpage(
2892             url + '?view=57', channel_id,
2893             'Downloading channel page', fatal=False)
2894         if channel_page is False:
2895             channel_playlist_id = False
2896         else:
2897             channel_playlist_id = self._html_search_meta(
2898                 'channelId', channel_page, 'channel id', default=None)
2899             if not channel_playlist_id:
2900                 channel_url = self._html_search_meta(
2901                     ('al:ios:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad'),
2902                     channel_page, 'channel url', default=None)
2903                 if channel_url:
2904                     channel_playlist_id = self._search_regex(
2905                         r'vnd\.youtube://user/([0-9A-Za-z_-]+)',
2906                         channel_url, 'channel id', default=None)
2907         if channel_playlist_id and channel_playlist_id.startswith('UC'):
2908             playlist_id = 'UU' + channel_playlist_id[2:]
2909             return self.url_result(
2910                 compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
2911
2912         channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
2913         autogenerated = re.search(r'''(?x)
2914                 class="[^"]*?(?:
2915                     channel-header-autogenerated-label|
2916                     yt-channel-title-autogenerated
2917                 )[^"]*"''', channel_page) is not None
2918
2919         if autogenerated:
2920             # The videos are contained in a single page
2921             # the ajax pages can't be used, they are empty
2922             entries = [
2923                 self.url_result(
2924                     video_id, 'Youtube', video_id=video_id,
2925                     video_title=video_title)
2926                 for video_id, video_title in self.extract_videos_from_page(channel_page)]
2927             return self.playlist_result(entries, channel_id)
2928
2929         try:
2930             next(self._entries(channel_page, channel_id))
2931         except StopIteration:
2932             alert_message = self._html_search_regex(
2933                 r'(?s)<div[^>]+class=(["\']).*?\byt-alert-message\b.*?\1[^>]*>(?P<alert>[^<]+)</div>',
2934                 channel_page, 'alert', default=None, group='alert')
2935             if alert_message:
2936                 raise ExtractorError('Youtube said: %s' % alert_message, expected=True)
2937
2938         return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
2939
2940
2941 class YoutubeUserIE(YoutubeChannelIE):
2942     IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
2943     _VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:(?P<user>user|c)/)?(?!(?:attribution_link|watch|results|shared)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
2944     _TEMPLATE_URL = 'https://www.youtube.com/%s/%s/videos'
2945     IE_NAME = 'youtube:user'
2946
2947     _TESTS = [{
2948         'url': 'https://www.youtube.com/user/TheLinuxFoundation',
2949         'playlist_mincount': 320,
2950         'info_dict': {
2951             'id': 'UUfX55Sx5hEFjoC3cNs6mCUQ',
2952             'title': 'Uploads from The Linux Foundation',
2953             'uploader': 'The Linux Foundation',
2954             'uploader_id': 'TheLinuxFoundation',
2955         }
2956     }, {
2957         # Only available via https://www.youtube.com/c/12minuteathlete/videos
2958         # but not https://www.youtube.com/user/12minuteathlete/videos
2959         'url': 'https://www.youtube.com/c/12minuteathlete/videos',
2960         'playlist_mincount': 249,
2961         'info_dict': {
2962             'id': 'UUVjM-zV6_opMDx7WYxnjZiQ',
2963             'title': 'Uploads from 12 Minute Athlete',
2964             'uploader': '12 Minute Athlete',
2965             'uploader_id': 'the12minuteathlete',
2966         }
2967     }, {
2968         'url': 'ytuser:phihag',
2969         'only_matching': True,
2970     }, {
2971         'url': 'https://www.youtube.com/c/gametrailers',
2972         'only_matching': True,
2973     }, {
2974         'url': 'https://www.youtube.com/gametrailers',
2975         'only_matching': True,
2976     }, {
2977         # This channel is not available, geo restricted to JP
2978         'url': 'https://www.youtube.com/user/kananishinoSMEJ/videos',
2979         'only_matching': True,
2980     }]
2981
2982     @classmethod
2983     def suitable(cls, url):
2984         # Don't return True if the url can be extracted with other youtube
2985         # extractor, the regex would is too permissive and it would match.
2986         other_yt_ies = iter(klass for (name, klass) in globals().items() if name.startswith('Youtube') and name.endswith('IE') and klass is not cls)
2987         if any(ie.suitable(url) for ie in other_yt_ies):
2988             return False
2989         else:
2990             return super(YoutubeUserIE, cls).suitable(url)
2991
2992     def _build_template_url(self, url, channel_id):
2993         mobj = re.match(self._VALID_URL, url)
2994         return self._TEMPLATE_URL % (mobj.group('user') or 'user', mobj.group('id'))
2995
2996
2997 class YoutubeLiveIE(YoutubeBaseInfoExtractor):
2998     IE_DESC = 'YouTube.com live streams'
2999     _VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:(?:user|channel|c)/)?(?P<id>[^/]+))/live'
3000     IE_NAME = 'youtube:live'
3001
3002     _TESTS = [{
3003         'url': 'https://www.youtube.com/user/TheYoungTurks/live',
3004         'info_dict': {
3005             'id': 'a48o2S1cPoo',
3006             'ext': 'mp4',
3007             'title': 'The Young Turks - Live Main Show',
3008             'uploader': 'The Young Turks',
3009             'uploader_id': 'TheYoungTurks',
3010             'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
3011             'upload_date': '20150715',
3012             'license': 'Standard YouTube License',
3013             'description': 'md5:438179573adcdff3c97ebb1ee632b891',
3014             'categories': ['News & Politics'],
3015             'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
3016             'like_count': int,
3017             'dislike_count': int,
3018         },
3019         'params': {
3020             'skip_download': True,
3021         },
3022     }, {
3023         'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
3024         'only_matching': True,
3025     }, {
3026         'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
3027         'only_matching': True,
3028     }, {
3029         'url': 'https://www.youtube.com/TheYoungTurks/live',
3030         'only_matching': True,
3031     }]
3032
3033     def _real_extract(self, url):
3034         mobj = re.match(self._VALID_URL, url)
3035         channel_id = mobj.group('id')
3036         base_url = mobj.group('base_url')
3037         webpage = self._download_webpage(url, channel_id, fatal=False)
3038         if webpage:
3039             page_type = self._og_search_property(
3040                 'type', webpage, 'page type', default='')
3041             video_id = self._html_search_meta(
3042                 'videoId', webpage, 'video id', default=None)
3043             if page_type.startswith('video') and video_id and re.match(
3044                     r'^[0-9A-Za-z_-]{11}$', video_id):
3045                 return self.url_result(video_id, YoutubeIE.ie_key())
3046         return self.url_result(base_url)
3047
3048
3049 class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
3050     IE_DESC = 'YouTube.com user/channel playlists'
3051     _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
3052     IE_NAME = 'youtube:playlists'
3053
3054     _TESTS = [{
3055         'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
3056         'playlist_mincount': 4,
3057         'info_dict': {
3058             'id': 'ThirstForScience',
3059             'title': 'ThirstForScience',
3060         },
3061     }, {
3062         # with "Load more" button
3063         'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
3064         'playlist_mincount': 70,
3065         'info_dict': {
3066             'id': 'igorkle1',
3067             'title': 'Игорь Клейнер',
3068         },
3069     }, {
3070         'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
3071         'playlist_mincount': 17,
3072         'info_dict': {
3073             'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
3074             'title': 'Chem Player',
3075         },
3076         'skip': 'Blocked',
3077     }]
3078
3079
3080 class YoutubeSearchBaseInfoExtractor(YoutubePlaylistBaseInfoExtractor):
3081     _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?'
3082
3083
3084 class YoutubeSearchIE(SearchInfoExtractor, YoutubeSearchBaseInfoExtractor):
3085     IE_DESC = 'YouTube.com searches'
3086     # there doesn't appear to be a real limit, for example if you search for
3087     # 'python' you get more than 8.000.000 results
3088     _MAX_RESULTS = float('inf')
3089     IE_NAME = 'youtube:search'
3090     _SEARCH_KEY = 'ytsearch'
3091     _EXTRA_QUERY_ARGS = {}
3092     _TESTS = []
3093
3094     def _get_n_results(self, query, n):
3095         """Get a specified number of results for a query"""
3096
3097         videos = []
3098         limit = n
3099
3100         url_query = {
3101             'search_query': query.encode('utf-8'),
3102         }
3103         url_query.update(self._EXTRA_QUERY_ARGS)
3104         result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
3105
3106         for pagenum in itertools.count(1):
3107             data = self._download_json(
3108                 result_url, video_id='query "%s"' % query,
3109                 note='Downloading page %s' % pagenum,
3110                 errnote='Unable to download API page',
3111                 query={'spf': 'navigate'})
3112             html_content = data[1]['body']['content']
3113
3114             if 'class="search-message' in html_content:
3115                 raise ExtractorError(
3116                     '[youtube] No video results', expected=True)
3117
3118             new_videos = list(self._process_page(html_content))
3119             videos += new_videos
3120             if not new_videos or len(videos) > limit:
3121                 break
3122             next_link = self._html_search_regex(
3123                 r'href="(/results\?[^"]*\bsp=[^"]+)"[^>]*>\s*<span[^>]+class="[^"]*\byt-uix-button-content\b[^"]*"[^>]*>Next',
3124                 html_content, 'next link', default=None)
3125             if next_link is None:
3126                 break
3127             result_url = compat_urlparse.urljoin('https://www.youtube.com/', next_link)
3128
3129         if len(videos) > n:
3130             videos = videos[:n]
3131         return self.playlist_result(videos, query)
3132
3133
3134 class YoutubeSearchDateIE(YoutubeSearchIE):
3135     IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
3136     _SEARCH_KEY = 'ytsearchdate'
3137     IE_DESC = 'YouTube.com searches, newest videos first'
3138     _EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
3139
3140
3141 class YoutubeSearchURLIE(YoutubeSearchBaseInfoExtractor):
3142     IE_DESC = 'YouTube.com search URLs'
3143     IE_NAME = 'youtube:search_url'
3144     _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
3145     _TESTS = [{
3146         'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
3147         'playlist_mincount': 5,
3148         'info_dict': {
3149             'title': 'youtube-dl test video',
3150         }
3151     }, {
3152         'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
3153         'only_matching': True,
3154     }]
3155
3156     def _real_extract(self, url):
3157         mobj = re.match(self._VALID_URL, url)
3158         query = compat_urllib_parse_unquote_plus(mobj.group('query'))
3159         webpage = self._download_webpage(url, query)
3160         return self.playlist_result(self._process_page(webpage), playlist_title=query)
3161
3162
3163 class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
3164     IE_DESC = 'YouTube.com (multi-season) shows'
3165     _VALID_URL = r'https?://(?:www\.)?youtube\.com/show/(?P<id>[^?#]*)'
3166     IE_NAME = 'youtube:show'
3167     _TESTS = [{
3168         'url': 'https://www.youtube.com/show/airdisasters',
3169         'playlist_mincount': 5,
3170         'info_dict': {
3171             'id': 'airdisasters',
3172             'title': 'Air Disasters',
3173         }
3174     }]
3175
3176     def _real_extract(self, url):
3177         playlist_id = self._match_id(url)
3178         return super(YoutubeShowIE, self)._real_extract(
3179             'https://www.youtube.com/show/%s/playlists' % playlist_id)
3180
3181
3182 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
3183     """
3184     Base class for feed extractors
3185     Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
3186     """
3187     _LOGIN_REQUIRED = True
3188
3189     @property
3190     def IE_NAME(self):
3191         return 'youtube:%s' % self._FEED_NAME
3192
3193     def _real_initialize(self):
3194         self._login()
3195
3196     def _entries(self, page):
3197         # The extraction process is the same as for playlists, but the regex
3198         # for the video ids doesn't contain an index
3199         ids = []
3200         more_widget_html = content_html = page
3201         for page_num in itertools.count(1):
3202             matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
3203
3204             # 'recommended' feed has infinite 'load more' and each new portion spins
3205             # the same videos in (sometimes) slightly different order, so we'll check
3206             # for unicity and break when portion has no new videos
3207             new_ids = list(filter(lambda video_id: video_id not in ids, orderedSet(matches)))
3208             if not new_ids:
3209                 break
3210
3211             ids.extend(new_ids)
3212
3213             for entry in self._ids_to_results(new_ids):
3214                 yield entry
3215
3216             mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
3217             if not mobj:
3218                 break
3219
3220             more = self._download_json(
3221                 'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
3222                 'Downloading page #%s' % page_num,
3223                 transform_source=uppercase_escape)
3224             content_html = more['content_html']
3225             more_widget_html = more['load_more_widget_html']
3226
3227     def _real_extract(self, url):
3228         page = self._download_webpage(
3229             'https://www.youtube.com/feed/%s' % self._FEED_NAME,
3230             self._PLAYLIST_TITLE)
3231         return self.playlist_result(
3232             self._entries(page), playlist_title=self._PLAYLIST_TITLE)
3233
3234
3235 class YoutubeWatchLaterIE(YoutubePlaylistIE):
3236     IE_NAME = 'youtube:watchlater'
3237     IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
3238     _VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
3239
3240     _TESTS = [{
3241         'url': 'https://www.youtube.com/playlist?list=WL',
3242         'only_matching': True,
3243     }, {
3244         'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
3245         'only_matching': True,
3246     }]
3247
3248     def _real_extract(self, url):
3249         _, video = self._check_download_just_video(url, 'WL')
3250         if video:
3251             return video
3252         _, playlist = self._extract_playlist('WL')
3253         return playlist
3254
3255
3256 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
3257     IE_NAME = 'youtube:favorites'
3258     IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
3259     _VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
3260     _LOGIN_REQUIRED = True
3261
3262     def _real_extract(self, url):
3263         webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
3264         playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
3265         return self.url_result(playlist_id, 'YoutubePlaylist')
3266
3267
3268 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
3269     IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
3270     _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/recommended|:ytrec(?:ommended)?'
3271     _FEED_NAME = 'recommended'
3272     _PLAYLIST_TITLE = 'Youtube Recommended videos'
3273
3274
3275 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
3276     IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
3277     _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
3278     _FEED_NAME = 'subscriptions'
3279     _PLAYLIST_TITLE = 'Youtube Subscriptions'
3280
3281
3282 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
3283     IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
3284     _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory'
3285     _FEED_NAME = 'history'
3286     _PLAYLIST_TITLE = 'Youtube History'
3287
3288
3289 class YoutubeTruncatedURLIE(InfoExtractor):
3290     IE_NAME = 'youtube:truncated_url'
3291     IE_DESC = False  # Do not list
3292     _VALID_URL = r'''(?x)
3293         (?:https?://)?
3294         (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
3295         (?:watch\?(?:
3296             feature=[a-z_]+|
3297             annotation_id=annotation_[^&]+|
3298             x-yt-cl=[0-9]+|
3299             hl=[^&]*|
3300             t=[0-9]+
3301         )?
3302         |
3303             attribution_link\?a=[^&]+
3304         )
3305         $
3306     '''
3307
3308     _TESTS = [{
3309         'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
3310         'only_matching': True,
3311     }, {
3312         'url': 'https://www.youtube.com/watch?',
3313         'only_matching': True,
3314     }, {
3315         'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
3316         'only_matching': True,
3317     }, {
3318         'url': 'https://www.youtube.com/watch?feature=foo',
3319         'only_matching': True,
3320     }, {
3321         'url': 'https://www.youtube.com/watch?hl=en-GB',
3322         'only_matching': True,
3323     }, {
3324         'url': 'https://www.youtube.com/watch?t=2372',
3325         'only_matching': True,
3326     }]
3327
3328     def _real_extract(self, url):
3329         raise ExtractorError(
3330             'Did you forget to quote the URL? Remember that & is a meta '
3331             'character in most shells, so you want to put the URL in quotes, '
3332             'like  youtube-dl '
3333             '"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
3334             ' or simply  youtube-dl BaW_jenozKc  .',
3335             expected=True)
3336
3337
3338 class YoutubeTruncatedIDIE(InfoExtractor):
3339     IE_NAME = 'youtube:truncated_id'
3340     IE_DESC = False  # Do not list
3341     _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
3342
3343     _TESTS = [{
3344         'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
3345         'only_matching': True,
3346     }]
3347
3348     def _real_extract(self, url):
3349         video_id = self._match_id(url)
3350         raise ExtractorError(
3351             'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
3352             expected=True)