[youtube] Fix uploader id and uploader URL extraction
[youtube-dl] / youtube_dl / extractor / common.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import base64
5 import datetime
6 import hashlib
7 import json
8 import netrc
9 import os
10 import random
11 import re
12 import socket
13 import sys
14 import time
15 import math
16
17 from ..compat import (
18     compat_cookiejar_Cookie,
19     compat_cookies,
20     compat_etree_Element,
21     compat_etree_fromstring,
22     compat_getpass,
23     compat_integer_types,
24     compat_http_client,
25     compat_os_name,
26     compat_str,
27     compat_urllib_error,
28     compat_urllib_parse_unquote,
29     compat_urllib_parse_urlencode,
30     compat_urllib_request,
31     compat_urlparse,
32     compat_xml_parse_error,
33 )
34 from ..downloader.f4m import (
35     get_base_url,
36     remove_encrypted_media,
37 )
38 from ..utils import (
39     NO_DEFAULT,
40     age_restricted,
41     base_url,
42     bug_reports_message,
43     clean_html,
44     compiled_regex_type,
45     determine_ext,
46     determine_protocol,
47     dict_get,
48     error_to_compat_str,
49     ExtractorError,
50     extract_attributes,
51     fix_xml_ampersands,
52     float_or_none,
53     GeoRestrictedError,
54     GeoUtils,
55     int_or_none,
56     js_to_json,
57     JSON_LD_RE,
58     mimetype2ext,
59     orderedSet,
60     parse_bitrate,
61     parse_codecs,
62     parse_duration,
63     parse_iso8601,
64     parse_m3u8_attributes,
65     parse_resolution,
66     RegexNotFoundError,
67     sanitized_Request,
68     sanitize_filename,
69     str_or_none,
70     strip_or_none,
71     unescapeHTML,
72     unified_strdate,
73     unified_timestamp,
74     update_Request,
75     update_url_query,
76     urljoin,
77     url_basename,
78     url_or_none,
79     xpath_element,
80     xpath_text,
81     xpath_with_ns,
82 )
83
84
85 class InfoExtractor(object):
86     """Information Extractor class.
87
88     Information extractors are the classes that, given a URL, extract
89     information about the video (or videos) the URL refers to. This
90     information includes the real video URL, the video title, author and
91     others. The information is stored in a dictionary which is then
92     passed to the YoutubeDL. The YoutubeDL processes this
93     information possibly downloading the video to the file system, among
94     other possible outcomes.
95
96     The type field determines the type of the result.
97     By far the most common value (and the default if _type is missing) is
98     "video", which indicates a single video.
99
100     For a video, the dictionaries must include the following fields:
101
102     id:             Video identifier.
103     title:          Video title, unescaped.
104
105     Additionally, it must contain either a formats entry or a url one:
106
107     formats:        A list of dictionaries for each format available, ordered
108                     from worst to best quality.
109
110                     Potential fields:
111                     * url        The mandatory URL representing the media:
112                                    for plain file media - HTTP URL of this file,
113                                    for RTMP - RTMP URL,
114                                    for HLS - URL of the M3U8 media playlist,
115                                    for HDS - URL of the F4M manifest,
116                                    for DASH
117                                      - HTTP URL to plain file media (in case of
118                                        unfragmented media)
119                                      - URL of the MPD manifest or base URL
120                                        representing the media if MPD manifest
121                                        is parsed from a string (in case of
122                                        fragmented media)
123                                    for MSS - URL of the ISM manifest.
124                     * manifest_url
125                                  The URL of the manifest file in case of
126                                  fragmented media:
127                                    for HLS - URL of the M3U8 master playlist,
128                                    for HDS - URL of the F4M manifest,
129                                    for DASH - URL of the MPD manifest,
130                                    for MSS - URL of the ISM manifest.
131                     * ext        Will be calculated from URL if missing
132                     * format     A human-readable description of the format
133                                  ("mp4 container with h264/opus").
134                                  Calculated from the format_id, width, height.
135                                  and format_note fields if missing.
136                     * format_id  A short description of the format
137                                  ("mp4_h264_opus" or "19").
138                                 Technically optional, but strongly recommended.
139                     * format_note Additional info about the format
140                                  ("3D" or "DASH video")
141                     * width      Width of the video, if known
142                     * height     Height of the video, if known
143                     * resolution Textual description of width and height
144                     * tbr        Average bitrate of audio and video in KBit/s
145                     * abr        Average audio bitrate in KBit/s
146                     * acodec     Name of the audio codec in use
147                     * asr        Audio sampling rate in Hertz
148                     * vbr        Average video bitrate in KBit/s
149                     * fps        Frame rate
150                     * vcodec     Name of the video codec in use
151                     * container  Name of the container format
152                     * filesize   The number of bytes, if known in advance
153                     * filesize_approx  An estimate for the number of bytes
154                     * player_url SWF Player URL (used for rtmpdump).
155                     * protocol   The protocol that will be used for the actual
156                                  download, lower-case.
157                                  "http", "https", "rtsp", "rtmp", "rtmpe",
158                                  "m3u8", "m3u8_native" or "http_dash_segments".
159                     * fragment_base_url
160                                  Base URL for fragments. Each fragment's path
161                                  value (if present) will be relative to
162                                  this URL.
163                     * fragments  A list of fragments of a fragmented media.
164                                  Each fragment entry must contain either an url
165                                  or a path. If an url is present it should be
166                                  considered by a client. Otherwise both path and
167                                  fragment_base_url must be present. Here is
168                                  the list of all potential fields:
169                                  * "url" - fragment's URL
170                                  * "path" - fragment's path relative to
171                                             fragment_base_url
172                                  * "duration" (optional, int or float)
173                                  * "filesize" (optional, int)
174                     * preference Order number of this format. If this field is
175                                  present and not None, the formats get sorted
176                                  by this field, regardless of all other values.
177                                  -1 for default (order by other properties),
178                                  -2 or smaller for less than default.
179                                  < -1000 to hide the format (if there is
180                                     another one which is strictly better)
181                     * language   Language code, e.g. "de" or "en-US".
182                     * language_preference  Is this in the language mentioned in
183                                  the URL?
184                                  10 if it's what the URL is about,
185                                  -1 for default (don't know),
186                                  -10 otherwise, other values reserved for now.
187                     * quality    Order number of the video quality of this
188                                  format, irrespective of the file format.
189                                  -1 for default (order by other properties),
190                                  -2 or smaller for less than default.
191                     * source_preference  Order number for this video source
192                                   (quality takes higher priority)
193                                  -1 for default (order by other properties),
194                                  -2 or smaller for less than default.
195                     * http_headers  A dictionary of additional HTTP headers
196                                  to add to the request.
197                     * stretched_ratio  If given and not 1, indicates that the
198                                  video's pixels are not square.
199                                  width : height ratio as float.
200                     * no_resume  The server does not support resuming the
201                                  (HTTP or RTMP) download. Boolean.
202                     * downloader_options  A dictionary of downloader options as
203                                  described in FileDownloader
204
205     url:            Final video URL.
206     ext:            Video filename extension.
207     format:         The video format, defaults to ext (used for --get-format)
208     player_url:     SWF Player URL (used for rtmpdump).
209
210     The following fields are optional:
211
212     alt_title:      A secondary title of the video.
213     display_id      An alternative identifier for the video, not necessarily
214                     unique, but available before title. Typically, id is
215                     something like "4234987", title "Dancing naked mole rats",
216                     and display_id "dancing-naked-mole-rats"
217     thumbnails:     A list of dictionaries, with the following entries:
218                         * "id" (optional, string) - Thumbnail format ID
219                         * "url"
220                         * "preference" (optional, int) - quality of the image
221                         * "width" (optional, int)
222                         * "height" (optional, int)
223                         * "resolution" (optional, string "{width}x{height}",
224                                         deprecated)
225                         * "filesize" (optional, int)
226     thumbnail:      Full URL to a video thumbnail image.
227     description:    Full video description.
228     uploader:       Full name of the video uploader.
229     license:        License name the video is licensed under.
230     creator:        The creator of the video.
231     release_date:   The date (YYYYMMDD) when the video was released.
232     timestamp:      UNIX timestamp of the moment the video became available.
233     upload_date:    Video upload date (YYYYMMDD).
234                     If not explicitly set, calculated from timestamp.
235     uploader_id:    Nickname or id of the video uploader.
236     uploader_url:   Full URL to a personal webpage of the video uploader.
237     channel:        Full name of the channel the video is uploaded on.
238                     Note that channel fields may or may not repeat uploader
239                     fields. This depends on a particular extractor.
240     channel_id:     Id of the channel.
241     channel_url:    Full URL to a channel webpage.
242     location:       Physical location where the video was filmed.
243     subtitles:      The available subtitles as a dictionary in the format
244                     {tag: subformats}. "tag" is usually a language code, and
245                     "subformats" is a list sorted from lower to higher
246                     preference, each element is a dictionary with the "ext"
247                     entry and one of:
248                         * "data": The subtitles file contents
249                         * "url": A URL pointing to the subtitles file
250                     "ext" will be calculated from URL if missing
251     automatic_captions: Like 'subtitles', used by the YoutubeIE for
252                     automatically generated captions
253     duration:       Length of the video in seconds, as an integer or float.
254     view_count:     How many users have watched the video on the platform.
255     like_count:     Number of positive ratings of the video
256     dislike_count:  Number of negative ratings of the video
257     repost_count:   Number of reposts of the video
258     average_rating: Average rating give by users, the scale used depends on the webpage
259     comment_count:  Number of comments on the video
260     comments:       A list of comments, each with one or more of the following
261                     properties (all but one of text or html optional):
262                         * "author" - human-readable name of the comment author
263                         * "author_id" - user ID of the comment author
264                         * "id" - Comment ID
265                         * "html" - Comment as HTML
266                         * "text" - Plain text of the comment
267                         * "timestamp" - UNIX timestamp of comment
268                         * "parent" - ID of the comment this one is replying to.
269                                      Set to "root" to indicate that this is a
270                                      comment to the original video.
271     age_limit:      Age restriction for the video, as an integer (years)
272     webpage_url:    The URL to the video webpage, if given to youtube-dl it
273                     should allow to get the same result again. (It will be set
274                     by YoutubeDL if it's missing)
275     categories:     A list of categories that the video falls in, for example
276                     ["Sports", "Berlin"]
277     tags:           A list of tags assigned to the video, e.g. ["sweden", "pop music"]
278     is_live:        True, False, or None (=unknown). Whether this video is a
279                     live stream that goes on instead of a fixed-length video.
280     start_time:     Time in seconds where the reproduction should start, as
281                     specified in the URL.
282     end_time:       Time in seconds where the reproduction should end, as
283                     specified in the URL.
284     chapters:       A list of dictionaries, with the following entries:
285                         * "start_time" - The start time of the chapter in seconds
286                         * "end_time" - The end time of the chapter in seconds
287                         * "title" (optional, string)
288
289     The following fields should only be used when the video belongs to some logical
290     chapter or section:
291
292     chapter:        Name or title of the chapter the video belongs to.
293     chapter_number: Number of the chapter the video belongs to, as an integer.
294     chapter_id:     Id of the chapter the video belongs to, as a unicode string.
295
296     The following fields should only be used when the video is an episode of some
297     series, programme or podcast:
298
299     series:         Title of the series or programme the video episode belongs to.
300     season:         Title of the season the video episode belongs to.
301     season_number:  Number of the season the video episode belongs to, as an integer.
302     season_id:      Id of the season the video episode belongs to, as a unicode string.
303     episode:        Title of the video episode. Unlike mandatory video title field,
304                     this field should denote the exact title of the video episode
305                     without any kind of decoration.
306     episode_number: Number of the video episode within a season, as an integer.
307     episode_id:     Id of the video episode, as a unicode string.
308
309     The following fields should only be used when the media is a track or a part of
310     a music album:
311
312     track:          Title of the track.
313     track_number:   Number of the track within an album or a disc, as an integer.
314     track_id:       Id of the track (useful in case of custom indexing, e.g. 6.iii),
315                     as a unicode string.
316     artist:         Artist(s) of the track.
317     genre:          Genre(s) of the track.
318     album:          Title of the album the track belongs to.
319     album_type:     Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
320     album_artist:   List of all artists appeared on the album (e.g.
321                     "Ash Borer / Fell Voices" or "Various Artists", useful for splits
322                     and compilations).
323     disc_number:    Number of the disc or other physical medium the track belongs to,
324                     as an integer.
325     release_year:   Year (YYYY) when the album was released.
326
327     Unless mentioned otherwise, the fields should be Unicode strings.
328
329     Unless mentioned otherwise, None is equivalent to absence of information.
330
331
332     _type "playlist" indicates multiple videos.
333     There must be a key "entries", which is a list, an iterable, or a PagedList
334     object, each element of which is a valid dictionary by this specification.
335
336     Additionally, playlists can have "id", "title", "description", "uploader",
337     "uploader_id", "uploader_url" attributes with the same semantics as videos
338     (see above).
339
340
341     _type "multi_video" indicates that there are multiple videos that
342     form a single show, for examples multiple acts of an opera or TV episode.
343     It must have an entries key like a playlist and contain all the keys
344     required for a video at the same time.
345
346
347     _type "url" indicates that the video must be extracted from another
348     location, possibly by a different extractor. Its only required key is:
349     "url" - the next URL to extract.
350     The key "ie_key" can be set to the class name (minus the trailing "IE",
351     e.g. "Youtube") if the extractor class is known in advance.
352     Additionally, the dictionary may have any properties of the resolved entity
353     known in advance, for example "title" if the title of the referred video is
354     known ahead of time.
355
356
357     _type "url_transparent" entities have the same specification as "url", but
358     indicate that the given additional information is more precise than the one
359     associated with the resolved URL.
360     This is useful when a site employs a video service that hosts the video and
361     its technical metadata, but that video service does not embed a useful
362     title, description etc.
363
364
365     Subclasses of this one should re-define the _real_initialize() and
366     _real_extract() methods and define a _VALID_URL regexp.
367     Probably, they should also be added to the list of extractors.
368
369     _GEO_BYPASS attribute may be set to False in order to disable
370     geo restriction bypass mechanisms for a particular extractor.
371     Though it won't disable explicit geo restriction bypass based on
372     country code provided with geo_bypass_country.
373
374     _GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
375     countries for this extractor. One of these countries will be used by
376     geo restriction bypass mechanism right away in order to bypass
377     geo restriction, of course, if the mechanism is not disabled.
378
379     _GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
380     IP blocks in CIDR notation for this extractor. One of these IP blocks
381     will be used by geo restriction bypass mechanism similarly
382     to _GEO_COUNTRIES.
383
384     Finally, the _WORKING attribute should be set to False for broken IEs
385     in order to warn the users and skip the tests.
386     """
387
388     _ready = False
389     _downloader = None
390     _x_forwarded_for_ip = None
391     _GEO_BYPASS = True
392     _GEO_COUNTRIES = None
393     _GEO_IP_BLOCKS = None
394     _WORKING = True
395
396     def __init__(self, downloader=None):
397         """Constructor. Receives an optional downloader."""
398         self._ready = False
399         self._x_forwarded_for_ip = None
400         self.set_downloader(downloader)
401
402     @classmethod
403     def suitable(cls, url):
404         """Receives a URL and returns True if suitable for this IE."""
405
406         # This does not use has/getattr intentionally - we want to know whether
407         # we have cached the regexp for *this* class, whereas getattr would also
408         # match the superclass
409         if '_VALID_URL_RE' not in cls.__dict__:
410             cls._VALID_URL_RE = re.compile(cls._VALID_URL)
411         return cls._VALID_URL_RE.match(url) is not None
412
413     @classmethod
414     def _match_id(cls, url):
415         if '_VALID_URL_RE' not in cls.__dict__:
416             cls._VALID_URL_RE = re.compile(cls._VALID_URL)
417         m = cls._VALID_URL_RE.match(url)
418         assert m
419         return compat_str(m.group('id'))
420
421     @classmethod
422     def working(cls):
423         """Getter method for _WORKING."""
424         return cls._WORKING
425
426     def initialize(self):
427         """Initializes an instance (authentication, etc)."""
428         self._initialize_geo_bypass({
429             'countries': self._GEO_COUNTRIES,
430             'ip_blocks': self._GEO_IP_BLOCKS,
431         })
432         if not self._ready:
433             self._real_initialize()
434             self._ready = True
435
436     def _initialize_geo_bypass(self, geo_bypass_context):
437         """
438         Initialize geo restriction bypass mechanism.
439
440         This method is used to initialize geo bypass mechanism based on faking
441         X-Forwarded-For HTTP header. A random country from provided country list
442         is selected and a random IP belonging to this country is generated. This
443         IP will be passed as X-Forwarded-For HTTP header in all subsequent
444         HTTP requests.
445
446         This method will be used for initial geo bypass mechanism initialization
447         during the instance initialization with _GEO_COUNTRIES and
448         _GEO_IP_BLOCKS.
449
450         You may also manually call it from extractor's code if geo bypass
451         information is not available beforehand (e.g. obtained during
452         extraction) or due to some other reason. In this case you should pass
453         this information in geo bypass context passed as first argument. It may
454         contain following fields:
455
456         countries:  List of geo unrestricted countries (similar
457                     to _GEO_COUNTRIES)
458         ip_blocks:  List of geo unrestricted IP blocks in CIDR notation
459                     (similar to _GEO_IP_BLOCKS)
460
461         """
462         if not self._x_forwarded_for_ip:
463
464             # Geo bypass mechanism is explicitly disabled by user
465             if not self._downloader.params.get('geo_bypass', True):
466                 return
467
468             if not geo_bypass_context:
469                 geo_bypass_context = {}
470
471             # Backward compatibility: previously _initialize_geo_bypass
472             # expected a list of countries, some 3rd party code may still use
473             # it this way
474             if isinstance(geo_bypass_context, (list, tuple)):
475                 geo_bypass_context = {
476                     'countries': geo_bypass_context,
477                 }
478
479             # The whole point of geo bypass mechanism is to fake IP
480             # as X-Forwarded-For HTTP header based on some IP block or
481             # country code.
482
483             # Path 1: bypassing based on IP block in CIDR notation
484
485             # Explicit IP block specified by user, use it right away
486             # regardless of whether extractor is geo bypassable or not
487             ip_block = self._downloader.params.get('geo_bypass_ip_block', None)
488
489             # Otherwise use random IP block from geo bypass context but only
490             # if extractor is known as geo bypassable
491             if not ip_block:
492                 ip_blocks = geo_bypass_context.get('ip_blocks')
493                 if self._GEO_BYPASS and ip_blocks:
494                     ip_block = random.choice(ip_blocks)
495
496             if ip_block:
497                 self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
498                 if self._downloader.params.get('verbose', False):
499                     self._downloader.to_screen(
500                         '[debug] Using fake IP %s as X-Forwarded-For.'
501                         % self._x_forwarded_for_ip)
502                 return
503
504             # Path 2: bypassing based on country code
505
506             # Explicit country code specified by user, use it right away
507             # regardless of whether extractor is geo bypassable or not
508             country = self._downloader.params.get('geo_bypass_country', None)
509
510             # Otherwise use random country code from geo bypass context but
511             # only if extractor is known as geo bypassable
512             if not country:
513                 countries = geo_bypass_context.get('countries')
514                 if self._GEO_BYPASS and countries:
515                     country = random.choice(countries)
516
517             if country:
518                 self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
519                 if self._downloader.params.get('verbose', False):
520                     self._downloader.to_screen(
521                         '[debug] Using fake IP %s (%s) as X-Forwarded-For.'
522                         % (self._x_forwarded_for_ip, country.upper()))
523
524     def extract(self, url):
525         """Extracts URL information and returns it in list of dicts."""
526         try:
527             for _ in range(2):
528                 try:
529                     self.initialize()
530                     ie_result = self._real_extract(url)
531                     if self._x_forwarded_for_ip:
532                         ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
533                     return ie_result
534                 except GeoRestrictedError as e:
535                     if self.__maybe_fake_ip_and_retry(e.countries):
536                         continue
537                     raise
538         except ExtractorError:
539             raise
540         except compat_http_client.IncompleteRead as e:
541             raise ExtractorError('A network error has occurred.', cause=e, expected=True)
542         except (KeyError, StopIteration) as e:
543             raise ExtractorError('An extractor error has occurred.', cause=e)
544
545     def __maybe_fake_ip_and_retry(self, countries):
546         if (not self._downloader.params.get('geo_bypass_country', None)
547                 and self._GEO_BYPASS
548                 and self._downloader.params.get('geo_bypass', True)
549                 and not self._x_forwarded_for_ip
550                 and countries):
551             country_code = random.choice(countries)
552             self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
553             if self._x_forwarded_for_ip:
554                 self.report_warning(
555                     'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
556                     % (self._x_forwarded_for_ip, country_code.upper()))
557                 return True
558         return False
559
560     def set_downloader(self, downloader):
561         """Sets the downloader for this IE."""
562         self._downloader = downloader
563
564     def _real_initialize(self):
565         """Real initialization process. Redefine in subclasses."""
566         pass
567
568     def _real_extract(self, url):
569         """Real extraction process. Redefine in subclasses."""
570         pass
571
572     @classmethod
573     def ie_key(cls):
574         """A string for getting the InfoExtractor with get_info_extractor"""
575         return compat_str(cls.__name__[:-2])
576
577     @property
578     def IE_NAME(self):
579         return compat_str(type(self).__name__[:-2])
580
581     @staticmethod
582     def __can_accept_status_code(err, expected_status):
583         assert isinstance(err, compat_urllib_error.HTTPError)
584         if expected_status is None:
585             return False
586         if isinstance(expected_status, compat_integer_types):
587             return err.code == expected_status
588         elif isinstance(expected_status, (list, tuple)):
589             return err.code in expected_status
590         elif callable(expected_status):
591             return expected_status(err.code) is True
592         else:
593             assert False
594
595     def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}, expected_status=None):
596         """
597         Return the response handle.
598
599         See _download_webpage docstring for arguments specification.
600         """
601         if note is None:
602             self.report_download_webpage(video_id)
603         elif note is not False:
604             if video_id is None:
605                 self.to_screen('%s' % (note,))
606             else:
607                 self.to_screen('%s: %s' % (video_id, note))
608
609         # Some sites check X-Forwarded-For HTTP header in order to figure out
610         # the origin of the client behind proxy. This allows bypassing geo
611         # restriction by faking this header's value to IP that belongs to some
612         # geo unrestricted country. We will do so once we encounter any
613         # geo restriction error.
614         if self._x_forwarded_for_ip:
615             if 'X-Forwarded-For' not in headers:
616                 headers['X-Forwarded-For'] = self._x_forwarded_for_ip
617
618         if isinstance(url_or_request, compat_urllib_request.Request):
619             url_or_request = update_Request(
620                 url_or_request, data=data, headers=headers, query=query)
621         else:
622             if query:
623                 url_or_request = update_url_query(url_or_request, query)
624             if data is not None or headers:
625                 url_or_request = sanitized_Request(url_or_request, data, headers)
626         try:
627             return self._downloader.urlopen(url_or_request)
628         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
629             if isinstance(err, compat_urllib_error.HTTPError):
630                 if self.__can_accept_status_code(err, expected_status):
631                     # Retain reference to error to prevent file object from
632                     # being closed before it can be read. Works around the
633                     # effects of <https://bugs.python.org/issue15002>
634                     # introduced in Python 3.4.1.
635                     err.fp._error = err
636                     return err.fp
637
638             if errnote is False:
639                 return False
640             if errnote is None:
641                 errnote = 'Unable to download webpage'
642
643             errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
644             if fatal:
645                 raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
646             else:
647                 self._downloader.report_warning(errmsg)
648                 return False
649
650     def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
651         """
652         Return a tuple (page content as string, URL handle).
653
654         See _download_webpage docstring for arguments specification.
655         """
656         # Strip hashes from the URL (#1038)
657         if isinstance(url_or_request, (compat_str, str)):
658             url_or_request = url_or_request.partition('#')[0]
659
660         urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
661         if urlh is False:
662             assert not fatal
663             return False
664         content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
665         return (content, urlh)
666
667     @staticmethod
668     def _guess_encoding_from_content(content_type, webpage_bytes):
669         m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
670         if m:
671             encoding = m.group(1)
672         else:
673             m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
674                           webpage_bytes[:1024])
675             if m:
676                 encoding = m.group(1).decode('ascii')
677             elif webpage_bytes.startswith(b'\xff\xfe'):
678                 encoding = 'utf-16'
679             else:
680                 encoding = 'utf-8'
681
682         return encoding
683
684     def __check_blocked(self, content):
685         first_block = content[:512]
686         if ('<title>Access to this site is blocked</title>' in content
687                 and 'Websense' in first_block):
688             msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
689             blocked_iframe = self._html_search_regex(
690                 r'<iframe src="([^"]+)"', content,
691                 'Websense information URL', default=None)
692             if blocked_iframe:
693                 msg += ' Visit %s for more details' % blocked_iframe
694             raise ExtractorError(msg, expected=True)
695         if '<title>The URL you requested has been blocked</title>' in first_block:
696             msg = (
697                 'Access to this webpage has been blocked by Indian censorship. '
698                 'Use a VPN or proxy server (with --proxy) to route around it.')
699             block_msg = self._html_search_regex(
700                 r'</h1><p>(.*?)</p>',
701                 content, 'block message', default=None)
702             if block_msg:
703                 msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
704             raise ExtractorError(msg, expected=True)
705         if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content
706                 and 'blocklist.rkn.gov.ru' in content):
707             raise ExtractorError(
708                 'Access to this webpage has been blocked by decision of the Russian government. '
709                 'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
710                 expected=True)
711
712     def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
713         content_type = urlh.headers.get('Content-Type', '')
714         webpage_bytes = urlh.read()
715         if prefix is not None:
716             webpage_bytes = prefix + webpage_bytes
717         if not encoding:
718             encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
719         if self._downloader.params.get('dump_intermediate_pages', False):
720             self.to_screen('Dumping request to ' + urlh.geturl())
721             dump = base64.b64encode(webpage_bytes).decode('ascii')
722             self._downloader.to_screen(dump)
723         if self._downloader.params.get('write_pages', False):
724             basen = '%s_%s' % (video_id, urlh.geturl())
725             if len(basen) > 240:
726                 h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
727                 basen = basen[:240 - len(h)] + h
728             raw_filename = basen + '.dump'
729             filename = sanitize_filename(raw_filename, restricted=True)
730             self.to_screen('Saving request to ' + filename)
731             # Working around MAX_PATH limitation on Windows (see
732             # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
733             if compat_os_name == 'nt':
734                 absfilepath = os.path.abspath(filename)
735                 if len(absfilepath) > 259:
736                     filename = '\\\\?\\' + absfilepath
737             with open(filename, 'wb') as outf:
738                 outf.write(webpage_bytes)
739
740         try:
741             content = webpage_bytes.decode(encoding, 'replace')
742         except LookupError:
743             content = webpage_bytes.decode('utf-8', 'replace')
744
745         self.__check_blocked(content)
746
747         return content
748
749     def _download_webpage(
750             self, url_or_request, video_id, note=None, errnote=None,
751             fatal=True, tries=1, timeout=5, encoding=None, data=None,
752             headers={}, query={}, expected_status=None):
753         """
754         Return the data of the page as a string.
755
756         Arguments:
757         url_or_request -- plain text URL as a string or
758             a compat_urllib_request.Requestobject
759         video_id -- Video/playlist/item identifier (string)
760
761         Keyword arguments:
762         note -- note printed before downloading (string)
763         errnote -- note printed in case of an error (string)
764         fatal -- flag denoting whether error should be considered fatal,
765             i.e. whether it should cause ExtractionError to be raised,
766             otherwise a warning will be reported and extraction continued
767         tries -- number of tries
768         timeout -- sleep interval between tries
769         encoding -- encoding for a page content decoding, guessed automatically
770             when not explicitly specified
771         data -- POST data (bytes)
772         headers -- HTTP headers (dict)
773         query -- URL query (dict)
774         expected_status -- allows to accept failed HTTP requests (non 2xx
775             status code) by explicitly specifying a set of accepted status
776             codes. Can be any of the following entities:
777                 - an integer type specifying an exact failed status code to
778                   accept
779                 - a list or a tuple of integer types specifying a list of
780                   failed status codes to accept
781                 - a callable accepting an actual failed status code and
782                   returning True if it should be accepted
783             Note that this argument does not affect success status codes (2xx)
784             which are always accepted.
785         """
786
787         success = False
788         try_count = 0
789         while success is False:
790             try:
791                 res = self._download_webpage_handle(
792                     url_or_request, video_id, note, errnote, fatal,
793                     encoding=encoding, data=data, headers=headers, query=query,
794                     expected_status=expected_status)
795                 success = True
796             except compat_http_client.IncompleteRead as e:
797                 try_count += 1
798                 if try_count >= tries:
799                     raise e
800                 self._sleep(timeout, video_id)
801         if res is False:
802             return res
803         else:
804             content, _ = res
805             return content
806
807     def _download_xml_handle(
808             self, url_or_request, video_id, note='Downloading XML',
809             errnote='Unable to download XML', transform_source=None,
810             fatal=True, encoding=None, data=None, headers={}, query={},
811             expected_status=None):
812         """
813         Return a tuple (xml as an compat_etree_Element, URL handle).
814
815         See _download_webpage docstring for arguments specification.
816         """
817         res = self._download_webpage_handle(
818             url_or_request, video_id, note, errnote, fatal=fatal,
819             encoding=encoding, data=data, headers=headers, query=query,
820             expected_status=expected_status)
821         if res is False:
822             return res
823         xml_string, urlh = res
824         return self._parse_xml(
825             xml_string, video_id, transform_source=transform_source,
826             fatal=fatal), urlh
827
828     def _download_xml(
829             self, url_or_request, video_id,
830             note='Downloading XML', errnote='Unable to download XML',
831             transform_source=None, fatal=True, encoding=None,
832             data=None, headers={}, query={}, expected_status=None):
833         """
834         Return the xml as an compat_etree_Element.
835
836         See _download_webpage docstring for arguments specification.
837         """
838         res = self._download_xml_handle(
839             url_or_request, video_id, note=note, errnote=errnote,
840             transform_source=transform_source, fatal=fatal, encoding=encoding,
841             data=data, headers=headers, query=query,
842             expected_status=expected_status)
843         return res if res is False else res[0]
844
845     def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True):
846         if transform_source:
847             xml_string = transform_source(xml_string)
848         try:
849             return compat_etree_fromstring(xml_string.encode('utf-8'))
850         except compat_xml_parse_error as ve:
851             errmsg = '%s: Failed to parse XML ' % video_id
852             if fatal:
853                 raise ExtractorError(errmsg, cause=ve)
854             else:
855                 self.report_warning(errmsg + str(ve))
856
857     def _download_json_handle(
858             self, url_or_request, video_id, note='Downloading JSON metadata',
859             errnote='Unable to download JSON metadata', transform_source=None,
860             fatal=True, encoding=None, data=None, headers={}, query={},
861             expected_status=None):
862         """
863         Return a tuple (JSON object, URL handle).
864
865         See _download_webpage docstring for arguments specification.
866         """
867         res = self._download_webpage_handle(
868             url_or_request, video_id, note, errnote, fatal=fatal,
869             encoding=encoding, data=data, headers=headers, query=query,
870             expected_status=expected_status)
871         if res is False:
872             return res
873         json_string, urlh = res
874         return self._parse_json(
875             json_string, video_id, transform_source=transform_source,
876             fatal=fatal), urlh
877
878     def _download_json(
879             self, url_or_request, video_id, note='Downloading JSON metadata',
880             errnote='Unable to download JSON metadata', transform_source=None,
881             fatal=True, encoding=None, data=None, headers={}, query={},
882             expected_status=None):
883         """
884         Return the JSON object as a dict.
885
886         See _download_webpage docstring for arguments specification.
887         """
888         res = self._download_json_handle(
889             url_or_request, video_id, note=note, errnote=errnote,
890             transform_source=transform_source, fatal=fatal, encoding=encoding,
891             data=data, headers=headers, query=query,
892             expected_status=expected_status)
893         return res if res is False else res[0]
894
895     def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
896         if transform_source:
897             json_string = transform_source(json_string)
898         try:
899             return json.loads(json_string)
900         except ValueError as ve:
901             errmsg = '%s: Failed to parse JSON ' % video_id
902             if fatal:
903                 raise ExtractorError(errmsg, cause=ve)
904             else:
905                 self.report_warning(errmsg + str(ve))
906
907     def report_warning(self, msg, video_id=None):
908         idstr = '' if video_id is None else '%s: ' % video_id
909         self._downloader.report_warning(
910             '[%s] %s%s' % (self.IE_NAME, idstr, msg))
911
912     def to_screen(self, msg):
913         """Print msg to screen, prefixing it with '[ie_name]'"""
914         self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
915
916     def report_extraction(self, id_or_name):
917         """Report information extraction."""
918         self.to_screen('%s: Extracting information' % id_or_name)
919
920     def report_download_webpage(self, video_id):
921         """Report webpage download."""
922         self.to_screen('%s: Downloading webpage' % video_id)
923
924     def report_age_confirmation(self):
925         """Report attempt to confirm age."""
926         self.to_screen('Confirming age')
927
928     def report_login(self):
929         """Report attempt to log in."""
930         self.to_screen('Logging in')
931
932     @staticmethod
933     def raise_login_required(msg='This video is only available for registered users'):
934         raise ExtractorError(
935             '%s. Use --username and --password or --netrc to provide account credentials.' % msg,
936             expected=True)
937
938     @staticmethod
939     def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None):
940         raise GeoRestrictedError(msg, countries=countries)
941
942     # Methods for following #608
943     @staticmethod
944     def url_result(url, ie=None, video_id=None, video_title=None):
945         """Returns a URL that points to a page that should be processed"""
946         # TODO: ie should be the class used for getting the info
947         video_info = {'_type': 'url',
948                       'url': url,
949                       'ie_key': ie}
950         if video_id is not None:
951             video_info['id'] = video_id
952         if video_title is not None:
953             video_info['title'] = video_title
954         return video_info
955
956     def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None):
957         urls = orderedSet(
958             self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
959             for m in matches)
960         return self.playlist_result(
961             urls, playlist_id=playlist_id, playlist_title=playlist_title)
962
963     @staticmethod
964     def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
965         """Returns a playlist"""
966         video_info = {'_type': 'playlist',
967                       'entries': entries}
968         if playlist_id:
969             video_info['id'] = playlist_id
970         if playlist_title:
971             video_info['title'] = playlist_title
972         if playlist_description:
973             video_info['description'] = playlist_description
974         return video_info
975
976     def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
977         """
978         Perform a regex search on the given string, using a single or a list of
979         patterns returning the first matching group.
980         In case of failure return a default value or raise a WARNING or a
981         RegexNotFoundError, depending on fatal, specifying the field name.
982         """
983         if isinstance(pattern, (str, compat_str, compiled_regex_type)):
984             mobj = re.search(pattern, string, flags)
985         else:
986             for p in pattern:
987                 mobj = re.search(p, string, flags)
988                 if mobj:
989                     break
990
991         if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
992             _name = '\033[0;34m%s\033[0m' % name
993         else:
994             _name = name
995
996         if mobj:
997             if group is None:
998                 # return the first matching group
999                 return next(g for g in mobj.groups() if g is not None)
1000             else:
1001                 return mobj.group(group)
1002         elif default is not NO_DEFAULT:
1003             return default
1004         elif fatal:
1005             raise RegexNotFoundError('Unable to extract %s' % _name)
1006         else:
1007             self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
1008             return None
1009
1010     def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
1011         """
1012         Like _search_regex, but strips HTML tags and unescapes entities.
1013         """
1014         res = self._search_regex(pattern, string, name, default, fatal, flags, group)
1015         if res:
1016             return clean_html(res).strip()
1017         else:
1018             return res
1019
1020     def _get_netrc_login_info(self, netrc_machine=None):
1021         username = None
1022         password = None
1023         netrc_machine = netrc_machine or self._NETRC_MACHINE
1024
1025         if self._downloader.params.get('usenetrc', False):
1026             try:
1027                 info = netrc.netrc().authenticators(netrc_machine)
1028                 if info is not None:
1029                     username = info[0]
1030                     password = info[2]
1031                 else:
1032                     raise netrc.NetrcParseError(
1033                         'No authenticators for %s' % netrc_machine)
1034             except (IOError, netrc.NetrcParseError) as err:
1035                 self._downloader.report_warning(
1036                     'parsing .netrc: %s' % error_to_compat_str(err))
1037
1038         return username, password
1039
1040     def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
1041         """
1042         Get the login info as (username, password)
1043         First look for the manually specified credentials using username_option
1044         and password_option as keys in params dictionary. If no such credentials
1045         available look in the netrc file using the netrc_machine or _NETRC_MACHINE
1046         value.
1047         If there's no info available, return (None, None)
1048         """
1049         if self._downloader is None:
1050             return (None, None)
1051
1052         downloader_params = self._downloader.params
1053
1054         # Attempt to use provided username and password or .netrc data
1055         if downloader_params.get(username_option) is not None:
1056             username = downloader_params[username_option]
1057             password = downloader_params[password_option]
1058         else:
1059             username, password = self._get_netrc_login_info(netrc_machine)
1060
1061         return username, password
1062
1063     def _get_tfa_info(self, note='two-factor verification code'):
1064         """
1065         Get the two-factor authentication info
1066         TODO - asking the user will be required for sms/phone verify
1067         currently just uses the command line option
1068         If there's no info available, return None
1069         """
1070         if self._downloader is None:
1071             return None
1072         downloader_params = self._downloader.params
1073
1074         if downloader_params.get('twofactor') is not None:
1075             return downloader_params['twofactor']
1076
1077         return compat_getpass('Type %s and press [Return]: ' % note)
1078
1079     # Helper functions for extracting OpenGraph info
1080     @staticmethod
1081     def _og_regexes(prop):
1082         content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
1083         property_re = (r'(?:name|property)=(?:\'og[:-]%(prop)s\'|"og[:-]%(prop)s"|\s*og[:-]%(prop)s\b)'
1084                        % {'prop': re.escape(prop)})
1085         template = r'<meta[^>]+?%s[^>]+?%s'
1086         return [
1087             template % (property_re, content_re),
1088             template % (content_re, property_re),
1089         ]
1090
1091     @staticmethod
1092     def _meta_regex(prop):
1093         return r'''(?isx)<meta
1094                     (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
1095                     [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
1096
1097     def _og_search_property(self, prop, html, name=None, **kargs):
1098         if not isinstance(prop, (list, tuple)):
1099             prop = [prop]
1100         if name is None:
1101             name = 'OpenGraph %s' % prop[0]
1102         og_regexes = []
1103         for p in prop:
1104             og_regexes.extend(self._og_regexes(p))
1105         escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
1106         if escaped is None:
1107             return None
1108         return unescapeHTML(escaped)
1109
1110     def _og_search_thumbnail(self, html, **kargs):
1111         return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
1112
1113     def _og_search_description(self, html, **kargs):
1114         return self._og_search_property('description', html, fatal=False, **kargs)
1115
1116     def _og_search_title(self, html, **kargs):
1117         return self._og_search_property('title', html, **kargs)
1118
1119     def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
1120         regexes = self._og_regexes('video') + self._og_regexes('video:url')
1121         if secure:
1122             regexes = self._og_regexes('video:secure_url') + regexes
1123         return self._html_search_regex(regexes, html, name, **kargs)
1124
1125     def _og_search_url(self, html, **kargs):
1126         return self._og_search_property('url', html, **kargs)
1127
1128     def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
1129         if not isinstance(name, (list, tuple)):
1130             name = [name]
1131         if display_name is None:
1132             display_name = name[0]
1133         return self._html_search_regex(
1134             [self._meta_regex(n) for n in name],
1135             html, display_name, fatal=fatal, group='content', **kwargs)
1136
1137     def _dc_search_uploader(self, html):
1138         return self._html_search_meta('dc.creator', html, 'uploader')
1139
1140     def _rta_search(self, html):
1141         # See http://www.rtalabel.org/index.php?content=howtofaq#single
1142         if re.search(r'(?ix)<meta\s+name="rating"\s+'
1143                      r'     content="RTA-5042-1996-1400-1577-RTA"',
1144                      html):
1145             return 18
1146         return 0
1147
1148     def _media_rating_search(self, html):
1149         # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
1150         rating = self._html_search_meta('rating', html)
1151
1152         if not rating:
1153             return None
1154
1155         RATING_TABLE = {
1156             'safe for kids': 0,
1157             'general': 8,
1158             '14 years': 14,
1159             'mature': 17,
1160             'restricted': 19,
1161         }
1162         return RATING_TABLE.get(rating.lower())
1163
1164     def _family_friendly_search(self, html):
1165         # See http://schema.org/VideoObject
1166         family_friendly = self._html_search_meta(
1167             'isFamilyFriendly', html, default=None)
1168
1169         if not family_friendly:
1170             return None
1171
1172         RATING_TABLE = {
1173             '1': 0,
1174             'true': 0,
1175             '0': 18,
1176             'false': 18,
1177         }
1178         return RATING_TABLE.get(family_friendly.lower())
1179
1180     def _twitter_search_player(self, html):
1181         return self._html_search_meta('twitter:player', html,
1182                                       'twitter card player')
1183
1184     def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
1185         json_ld_list = list(re.finditer(JSON_LD_RE, html))
1186         default = kwargs.get('default', NO_DEFAULT)
1187         # JSON-LD may be malformed and thus `fatal` should be respected.
1188         # At the same time `default` may be passed that assumes `fatal=False`
1189         # for _search_regex. Let's simulate the same behavior here as well.
1190         fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
1191         json_ld = []
1192         for mobj in json_ld_list:
1193             json_ld_item = self._parse_json(
1194                 mobj.group('json_ld'), video_id, fatal=fatal)
1195             if not json_ld_item:
1196                 continue
1197             if isinstance(json_ld_item, dict):
1198                 json_ld.append(json_ld_item)
1199             elif isinstance(json_ld_item, (list, tuple)):
1200                 json_ld.extend(json_ld_item)
1201         if json_ld:
1202             json_ld = self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
1203         if json_ld:
1204             return json_ld
1205         if default is not NO_DEFAULT:
1206             return default
1207         elif fatal:
1208             raise RegexNotFoundError('Unable to extract JSON-LD')
1209         else:
1210             self._downloader.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
1211             return {}
1212
1213     def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
1214         if isinstance(json_ld, compat_str):
1215             json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
1216         if not json_ld:
1217             return {}
1218         info = {}
1219         if not isinstance(json_ld, (list, tuple, dict)):
1220             return info
1221         if isinstance(json_ld, dict):
1222             json_ld = [json_ld]
1223
1224         INTERACTION_TYPE_MAP = {
1225             'CommentAction': 'comment',
1226             'AgreeAction': 'like',
1227             'DisagreeAction': 'dislike',
1228             'LikeAction': 'like',
1229             'DislikeAction': 'dislike',
1230             'ListenAction': 'view',
1231             'WatchAction': 'view',
1232             'ViewAction': 'view',
1233         }
1234
1235         def extract_interaction_statistic(e):
1236             interaction_statistic = e.get('interactionStatistic')
1237             if not isinstance(interaction_statistic, list):
1238                 return
1239             for is_e in interaction_statistic:
1240                 if not isinstance(is_e, dict):
1241                     continue
1242                 if is_e.get('@type') != 'InteractionCounter':
1243                     continue
1244                 interaction_type = is_e.get('interactionType')
1245                 if not isinstance(interaction_type, compat_str):
1246                     continue
1247                 interaction_count = int_or_none(is_e.get('userInteractionCount'))
1248                 if interaction_count is None:
1249                     continue
1250                 count_kind = INTERACTION_TYPE_MAP.get(interaction_type.split('/')[-1])
1251                 if not count_kind:
1252                     continue
1253                 count_key = '%s_count' % count_kind
1254                 if info.get(count_key) is not None:
1255                     continue
1256                 info[count_key] = interaction_count
1257
1258         def extract_video_object(e):
1259             assert e['@type'] == 'VideoObject'
1260             info.update({
1261                 'url': url_or_none(e.get('contentUrl')),
1262                 'title': unescapeHTML(e.get('name')),
1263                 'description': unescapeHTML(e.get('description')),
1264                 'thumbnail': url_or_none(e.get('thumbnailUrl') or e.get('thumbnailURL')),
1265                 'duration': parse_duration(e.get('duration')),
1266                 'timestamp': unified_timestamp(e.get('uploadDate')),
1267                 'filesize': float_or_none(e.get('contentSize')),
1268                 'tbr': int_or_none(e.get('bitrate')),
1269                 'width': int_or_none(e.get('width')),
1270                 'height': int_or_none(e.get('height')),
1271                 'view_count': int_or_none(e.get('interactionCount')),
1272             })
1273             extract_interaction_statistic(e)
1274
1275         for e in json_ld:
1276             if '@context' in e:
1277                 item_type = e.get('@type')
1278                 if expected_type is not None and expected_type != item_type:
1279                     continue
1280                 if item_type in ('TVEpisode', 'Episode'):
1281                     episode_name = unescapeHTML(e.get('name'))
1282                     info.update({
1283                         'episode': episode_name,
1284                         'episode_number': int_or_none(e.get('episodeNumber')),
1285                         'description': unescapeHTML(e.get('description')),
1286                     })
1287                     if not info.get('title') and episode_name:
1288                         info['title'] = episode_name
1289                     part_of_season = e.get('partOfSeason')
1290                     if isinstance(part_of_season, dict) and part_of_season.get('@type') in ('TVSeason', 'Season', 'CreativeWorkSeason'):
1291                         info.update({
1292                             'season': unescapeHTML(part_of_season.get('name')),
1293                             'season_number': int_or_none(part_of_season.get('seasonNumber')),
1294                         })
1295                     part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
1296                     if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'):
1297                         info['series'] = unescapeHTML(part_of_series.get('name'))
1298                 elif item_type == 'Movie':
1299                     info.update({
1300                         'title': unescapeHTML(e.get('name')),
1301                         'description': unescapeHTML(e.get('description')),
1302                         'duration': parse_duration(e.get('duration')),
1303                         'timestamp': unified_timestamp(e.get('dateCreated')),
1304                     })
1305                 elif item_type in ('Article', 'NewsArticle'):
1306                     info.update({
1307                         'timestamp': parse_iso8601(e.get('datePublished')),
1308                         'title': unescapeHTML(e.get('headline')),
1309                         'description': unescapeHTML(e.get('articleBody')),
1310                     })
1311                 elif item_type == 'VideoObject':
1312                     extract_video_object(e)
1313                     if expected_type is None:
1314                         continue
1315                     else:
1316                         break
1317                 video = e.get('video')
1318                 if isinstance(video, dict) and video.get('@type') == 'VideoObject':
1319                     extract_video_object(video)
1320                 if expected_type is None:
1321                     continue
1322                 else:
1323                     break
1324         return dict((k, v) for k, v in info.items() if v is not None)
1325
1326     @staticmethod
1327     def _hidden_inputs(html):
1328         html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
1329         hidden_inputs = {}
1330         for input in re.findall(r'(?i)(<input[^>]+>)', html):
1331             attrs = extract_attributes(input)
1332             if not input:
1333                 continue
1334             if attrs.get('type') not in ('hidden', 'submit'):
1335                 continue
1336             name = attrs.get('name') or attrs.get('id')
1337             value = attrs.get('value')
1338             if name and value is not None:
1339                 hidden_inputs[name] = value
1340         return hidden_inputs
1341
1342     def _form_hidden_inputs(self, form_id, html):
1343         form = self._search_regex(
1344             r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
1345             html, '%s form' % form_id, group='form')
1346         return self._hidden_inputs(form)
1347
1348     def _sort_formats(self, formats, field_preference=None):
1349         if not formats:
1350             raise ExtractorError('No video formats found')
1351
1352         for f in formats:
1353             # Automatically determine tbr when missing based on abr and vbr (improves
1354             # formats sorting in some cases)
1355             if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
1356                 f['tbr'] = f['abr'] + f['vbr']
1357
1358         def _formats_key(f):
1359             # TODO remove the following workaround
1360             from ..utils import determine_ext
1361             if not f.get('ext') and 'url' in f:
1362                 f['ext'] = determine_ext(f['url'])
1363
1364             if isinstance(field_preference, (list, tuple)):
1365                 return tuple(
1366                     f.get(field)
1367                     if f.get(field) is not None
1368                     else ('' if field == 'format_id' else -1)
1369                     for field in field_preference)
1370
1371             preference = f.get('preference')
1372             if preference is None:
1373                 preference = 0
1374                 if f.get('ext') in ['f4f', 'f4m']:  # Not yet supported
1375                     preference -= 0.5
1376
1377             protocol = f.get('protocol') or determine_protocol(f)
1378             proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
1379
1380             if f.get('vcodec') == 'none':  # audio only
1381                 preference -= 50
1382                 if self._downloader.params.get('prefer_free_formats'):
1383                     ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
1384                 else:
1385                     ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
1386                 ext_preference = 0
1387                 try:
1388                     audio_ext_preference = ORDER.index(f['ext'])
1389                 except ValueError:
1390                     audio_ext_preference = -1
1391             else:
1392                 if f.get('acodec') == 'none':  # video only
1393                     preference -= 40
1394                 if self._downloader.params.get('prefer_free_formats'):
1395                     ORDER = ['flv', 'mp4', 'webm']
1396                 else:
1397                     ORDER = ['webm', 'flv', 'mp4']
1398                 try:
1399                     ext_preference = ORDER.index(f['ext'])
1400                 except ValueError:
1401                     ext_preference = -1
1402                 audio_ext_preference = 0
1403
1404             return (
1405                 preference,
1406                 f.get('language_preference') if f.get('language_preference') is not None else -1,
1407                 f.get('quality') if f.get('quality') is not None else -1,
1408                 f.get('tbr') if f.get('tbr') is not None else -1,
1409                 f.get('filesize') if f.get('filesize') is not None else -1,
1410                 f.get('vbr') if f.get('vbr') is not None else -1,
1411                 f.get('height') if f.get('height') is not None else -1,
1412                 f.get('width') if f.get('width') is not None else -1,
1413                 proto_preference,
1414                 ext_preference,
1415                 f.get('abr') if f.get('abr') is not None else -1,
1416                 audio_ext_preference,
1417                 f.get('fps') if f.get('fps') is not None else -1,
1418                 f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
1419                 f.get('source_preference') if f.get('source_preference') is not None else -1,
1420                 f.get('format_id') if f.get('format_id') is not None else '',
1421             )
1422         formats.sort(key=_formats_key)
1423
1424     def _check_formats(self, formats, video_id):
1425         if formats:
1426             formats[:] = filter(
1427                 lambda f: self._is_valid_url(
1428                     f['url'], video_id,
1429                     item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
1430                 formats)
1431
1432     @staticmethod
1433     def _remove_duplicate_formats(formats):
1434         format_urls = set()
1435         unique_formats = []
1436         for f in formats:
1437             if f['url'] not in format_urls:
1438                 format_urls.add(f['url'])
1439                 unique_formats.append(f)
1440         formats[:] = unique_formats
1441
1442     def _is_valid_url(self, url, video_id, item='video', headers={}):
1443         url = self._proto_relative_url(url, scheme='http:')
1444         # For now assume non HTTP(S) URLs always valid
1445         if not (url.startswith('http://') or url.startswith('https://')):
1446             return True
1447         try:
1448             self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
1449             return True
1450         except ExtractorError:
1451             self.to_screen(
1452                 '%s: %s URL is invalid, skipping' % (video_id, item))
1453             return False
1454
1455     def http_scheme(self):
1456         """ Either "http:" or "https:", depending on the user's preferences """
1457         return (
1458             'http:'
1459             if self._downloader.params.get('prefer_insecure', False)
1460             else 'https:')
1461
1462     def _proto_relative_url(self, url, scheme=None):
1463         if url is None:
1464             return url
1465         if url.startswith('//'):
1466             if scheme is None:
1467                 scheme = self.http_scheme()
1468             return scheme + url
1469         else:
1470             return url
1471
1472     def _sleep(self, timeout, video_id, msg_template=None):
1473         if msg_template is None:
1474             msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
1475         msg = msg_template % {'video_id': video_id, 'timeout': timeout}
1476         self.to_screen(msg)
1477         time.sleep(timeout)
1478
1479     def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
1480                              transform_source=lambda s: fix_xml_ampersands(s).strip(),
1481                              fatal=True, m3u8_id=None, data=None, headers={}, query={}):
1482         manifest = self._download_xml(
1483             manifest_url, video_id, 'Downloading f4m manifest',
1484             'Unable to download f4m manifest',
1485             # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
1486             # (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244)
1487             transform_source=transform_source,
1488             fatal=fatal, data=data, headers=headers, query=query)
1489
1490         if manifest is False:
1491             return []
1492
1493         return self._parse_f4m_formats(
1494             manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
1495             transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
1496
1497     def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
1498                            transform_source=lambda s: fix_xml_ampersands(s).strip(),
1499                            fatal=True, m3u8_id=None):
1500         if not isinstance(manifest, compat_etree_Element) and not fatal:
1501             return []
1502
1503         # currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
1504         akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
1505         if akamai_pv is not None and ';' in akamai_pv.text:
1506             playerVerificationChallenge = akamai_pv.text.split(';')[0]
1507             if playerVerificationChallenge.strip() != '':
1508                 return []
1509
1510         formats = []
1511         manifest_version = '1.0'
1512         media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
1513         if not media_nodes:
1514             manifest_version = '2.0'
1515             media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
1516         # Remove unsupported DRM protected media from final formats
1517         # rendition (see https://github.com/ytdl-org/youtube-dl/issues/8573).
1518         media_nodes = remove_encrypted_media(media_nodes)
1519         if not media_nodes:
1520             return formats
1521
1522         manifest_base_url = get_base_url(manifest)
1523
1524         bootstrap_info = xpath_element(
1525             manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
1526             'bootstrap info', default=None)
1527
1528         vcodec = None
1529         mime_type = xpath_text(
1530             manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
1531             'base URL', default=None)
1532         if mime_type and mime_type.startswith('audio/'):
1533             vcodec = 'none'
1534
1535         for i, media_el in enumerate(media_nodes):
1536             tbr = int_or_none(media_el.attrib.get('bitrate'))
1537             width = int_or_none(media_el.attrib.get('width'))
1538             height = int_or_none(media_el.attrib.get('height'))
1539             format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
1540             # If <bootstrapInfo> is present, the specified f4m is a
1541             # stream-level manifest, and only set-level manifests may refer to
1542             # external resources.  See section 11.4 and section 4 of F4M spec
1543             if bootstrap_info is None:
1544                 media_url = None
1545                 # @href is introduced in 2.0, see section 11.6 of F4M spec
1546                 if manifest_version == '2.0':
1547                     media_url = media_el.attrib.get('href')
1548                 if media_url is None:
1549                     media_url = media_el.attrib.get('url')
1550                 if not media_url:
1551                     continue
1552                 manifest_url = (
1553                     media_url if media_url.startswith('http://') or media_url.startswith('https://')
1554                     else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
1555                 # If media_url is itself a f4m manifest do the recursive extraction
1556                 # since bitrates in parent manifest (this one) and media_url manifest
1557                 # may differ leading to inability to resolve the format by requested
1558                 # bitrate in f4m downloader
1559                 ext = determine_ext(manifest_url)
1560                 if ext == 'f4m':
1561                     f4m_formats = self._extract_f4m_formats(
1562                         manifest_url, video_id, preference=preference, f4m_id=f4m_id,
1563                         transform_source=transform_source, fatal=fatal)
1564                     # Sometimes stream-level manifest contains single media entry that
1565                     # does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
1566                     # At the same time parent's media entry in set-level manifest may
1567                     # contain it. We will copy it from parent in such cases.
1568                     if len(f4m_formats) == 1:
1569                         f = f4m_formats[0]
1570                         f.update({
1571                             'tbr': f.get('tbr') or tbr,
1572                             'width': f.get('width') or width,
1573                             'height': f.get('height') or height,
1574                             'format_id': f.get('format_id') if not tbr else format_id,
1575                             'vcodec': vcodec,
1576                         })
1577                     formats.extend(f4m_formats)
1578                     continue
1579                 elif ext == 'm3u8':
1580                     formats.extend(self._extract_m3u8_formats(
1581                         manifest_url, video_id, 'mp4', preference=preference,
1582                         m3u8_id=m3u8_id, fatal=fatal))
1583                     continue
1584             formats.append({
1585                 'format_id': format_id,
1586                 'url': manifest_url,
1587                 'manifest_url': manifest_url,
1588                 'ext': 'flv' if bootstrap_info is not None else None,
1589                 'protocol': 'f4m',
1590                 'tbr': tbr,
1591                 'width': width,
1592                 'height': height,
1593                 'vcodec': vcodec,
1594                 'preference': preference,
1595             })
1596         return formats
1597
1598     def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
1599         return {
1600             'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
1601             'url': m3u8_url,
1602             'ext': ext,
1603             'protocol': 'm3u8',
1604             'preference': preference - 100 if preference else -100,
1605             'resolution': 'multiple',
1606             'format_note': 'Quality selection URL',
1607         }
1608
1609     def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
1610                               entry_protocol='m3u8', preference=None,
1611                               m3u8_id=None, note=None, errnote=None,
1612                               fatal=True, live=False, data=None, headers={},
1613                               query={}):
1614         res = self._download_webpage_handle(
1615             m3u8_url, video_id,
1616             note=note or 'Downloading m3u8 information',
1617             errnote=errnote or 'Failed to download m3u8 information',
1618             fatal=fatal, data=data, headers=headers, query=query)
1619
1620         if res is False:
1621             return []
1622
1623         m3u8_doc, urlh = res
1624         m3u8_url = urlh.geturl()
1625
1626         return self._parse_m3u8_formats(
1627             m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
1628             preference=preference, m3u8_id=m3u8_id, live=live)
1629
1630     def _parse_m3u8_formats(self, m3u8_doc, m3u8_url, ext=None,
1631                             entry_protocol='m3u8', preference=None,
1632                             m3u8_id=None, live=False):
1633         if '#EXT-X-FAXS-CM:' in m3u8_doc:  # Adobe Flash Access
1634             return []
1635
1636         if re.search(r'#EXT-X-SESSION-KEY:.*?URI="skd://', m3u8_doc):  # Apple FairPlay
1637             return []
1638
1639         formats = []
1640
1641         format_url = lambda u: (
1642             u
1643             if re.match(r'^https?://', u)
1644             else compat_urlparse.urljoin(m3u8_url, u))
1645
1646         # References:
1647         # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
1648         # 2. https://github.com/ytdl-org/youtube-dl/issues/12211
1649         # 3. https://github.com/ytdl-org/youtube-dl/issues/18923
1650
1651         # We should try extracting formats only from master playlists [1, 4.3.4],
1652         # i.e. playlists that describe available qualities. On the other hand
1653         # media playlists [1, 4.3.3] should be returned as is since they contain
1654         # just the media without qualities renditions.
1655         # Fortunately, master playlist can be easily distinguished from media
1656         # playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
1657         # master playlist tags MUST NOT appear in a media playist and vice versa.
1658         # As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
1659         # media playlist and MUST NOT appear in master playlist thus we can
1660         # clearly detect media playlist with this criterion.
1661
1662         if '#EXT-X-TARGETDURATION' in m3u8_doc:  # media playlist, return as is
1663             return [{
1664                 'url': m3u8_url,
1665                 'format_id': m3u8_id,
1666                 'ext': ext,
1667                 'protocol': entry_protocol,
1668                 'preference': preference,
1669             }]
1670
1671         groups = {}
1672         last_stream_inf = {}
1673
1674         def extract_media(x_media_line):
1675             media = parse_m3u8_attributes(x_media_line)
1676             # As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
1677             media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
1678             if not (media_type and group_id and name):
1679                 return
1680             groups.setdefault(group_id, []).append(media)
1681             if media_type not in ('VIDEO', 'AUDIO'):
1682                 return
1683             media_url = media.get('URI')
1684             if media_url:
1685                 format_id = []
1686                 for v in (m3u8_id, group_id, name):
1687                     if v:
1688                         format_id.append(v)
1689                 f = {
1690                     'format_id': '-'.join(format_id),
1691                     'url': format_url(media_url),
1692                     'manifest_url': m3u8_url,
1693                     'language': media.get('LANGUAGE'),
1694                     'ext': ext,
1695                     'protocol': entry_protocol,
1696                     'preference': preference,
1697                 }
1698                 if media_type == 'AUDIO':
1699                     f['vcodec'] = 'none'
1700                 formats.append(f)
1701
1702         def build_stream_name():
1703             # Despite specification does not mention NAME attribute for
1704             # EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
1705             # or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
1706             # 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
1707             stream_name = last_stream_inf.get('NAME')
1708             if stream_name:
1709                 return stream_name
1710             # If there is no NAME in EXT-X-STREAM-INF it will be obtained
1711             # from corresponding rendition group
1712             stream_group_id = last_stream_inf.get('VIDEO')
1713             if not stream_group_id:
1714                 return
1715             stream_group = groups.get(stream_group_id)
1716             if not stream_group:
1717                 return stream_group_id
1718             rendition = stream_group[0]
1719             return rendition.get('NAME') or stream_group_id
1720
1721         # parse EXT-X-MEDIA tags before EXT-X-STREAM-INF in order to have the
1722         # chance to detect video only formats when EXT-X-STREAM-INF tags
1723         # precede EXT-X-MEDIA tags in HLS manifest such as [3].
1724         for line in m3u8_doc.splitlines():
1725             if line.startswith('#EXT-X-MEDIA:'):
1726                 extract_media(line)
1727
1728         for line in m3u8_doc.splitlines():
1729             if line.startswith('#EXT-X-STREAM-INF:'):
1730                 last_stream_inf = parse_m3u8_attributes(line)
1731             elif line.startswith('#') or not line.strip():
1732                 continue
1733             else:
1734                 tbr = float_or_none(
1735                     last_stream_inf.get('AVERAGE-BANDWIDTH')
1736                     or last_stream_inf.get('BANDWIDTH'), scale=1000)
1737                 format_id = []
1738                 if m3u8_id:
1739                     format_id.append(m3u8_id)
1740                 stream_name = build_stream_name()
1741                 # Bandwidth of live streams may differ over time thus making
1742                 # format_id unpredictable. So it's better to keep provided
1743                 # format_id intact.
1744                 if not live:
1745                     format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
1746                 manifest_url = format_url(line.strip())
1747                 f = {
1748                     'format_id': '-'.join(format_id),
1749                     'url': manifest_url,
1750                     'manifest_url': m3u8_url,
1751                     'tbr': tbr,
1752                     'ext': ext,
1753                     'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
1754                     'protocol': entry_protocol,
1755                     'preference': preference,
1756                 }
1757                 resolution = last_stream_inf.get('RESOLUTION')
1758                 if resolution:
1759                     mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
1760                     if mobj:
1761                         f['width'] = int(mobj.group('width'))
1762                         f['height'] = int(mobj.group('height'))
1763                 # Unified Streaming Platform
1764                 mobj = re.search(
1765                     r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
1766                 if mobj:
1767                     abr, vbr = mobj.groups()
1768                     abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
1769                     f.update({
1770                         'vbr': vbr,
1771                         'abr': abr,
1772                     })
1773                 codecs = parse_codecs(last_stream_inf.get('CODECS'))
1774                 f.update(codecs)
1775                 audio_group_id = last_stream_inf.get('AUDIO')
1776                 # As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
1777                 # references a rendition group MUST have a CODECS attribute.
1778                 # However, this is not always respected, for example, [2]
1779                 # contains EXT-X-STREAM-INF tag which references AUDIO
1780                 # rendition group but does not have CODECS and despite
1781                 # referencing an audio group it represents a complete
1782                 # (with audio and video) format. So, for such cases we will
1783                 # ignore references to rendition groups and treat them
1784                 # as complete formats.
1785                 if audio_group_id and codecs and f.get('vcodec') != 'none':
1786                     audio_group = groups.get(audio_group_id)
1787                     if audio_group and audio_group[0].get('URI'):
1788                         # TODO: update acodec for audio only formats with
1789                         # the same GROUP-ID
1790                         f['acodec'] = 'none'
1791                 formats.append(f)
1792
1793                 # for DailyMotion
1794                 progressive_uri = last_stream_inf.get('PROGRESSIVE-URI')
1795                 if progressive_uri:
1796                     http_f = f.copy()
1797                     del http_f['manifest_url']
1798                     http_f.update({
1799                         'format_id': f['format_id'].replace('hls-', 'http-'),
1800                         'protocol': 'http',
1801                         'url': progressive_uri,
1802                     })
1803                     formats.append(http_f)
1804
1805                 last_stream_inf = {}
1806         return formats
1807
1808     @staticmethod
1809     def _xpath_ns(path, namespace=None):
1810         if not namespace:
1811             return path
1812         out = []
1813         for c in path.split('/'):
1814             if not c or c == '.':
1815                 out.append(c)
1816             else:
1817                 out.append('{%s}%s' % (namespace, c))
1818         return '/'.join(out)
1819
1820     def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
1821         smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
1822
1823         if smil is False:
1824             assert not fatal
1825             return []
1826
1827         namespace = self._parse_smil_namespace(smil)
1828
1829         return self._parse_smil_formats(
1830             smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
1831
1832     def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
1833         smil = self._download_smil(smil_url, video_id, fatal=fatal)
1834         if smil is False:
1835             return {}
1836         return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
1837
1838     def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
1839         return self._download_xml(
1840             smil_url, video_id, 'Downloading SMIL file',
1841             'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
1842
1843     def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
1844         namespace = self._parse_smil_namespace(smil)
1845
1846         formats = self._parse_smil_formats(
1847             smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
1848         subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
1849
1850         video_id = os.path.splitext(url_basename(smil_url))[0]
1851         title = None
1852         description = None
1853         upload_date = None
1854         for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
1855             name = meta.attrib.get('name')
1856             content = meta.attrib.get('content')
1857             if not name or not content:
1858                 continue
1859             if not title and name == 'title':
1860                 title = content
1861             elif not description and name in ('description', 'abstract'):
1862                 description = content
1863             elif not upload_date and name == 'date':
1864                 upload_date = unified_strdate(content)
1865
1866         thumbnails = [{
1867             'id': image.get('type'),
1868             'url': image.get('src'),
1869             'width': int_or_none(image.get('width')),
1870             'height': int_or_none(image.get('height')),
1871         } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
1872
1873         return {
1874             'id': video_id,
1875             'title': title or video_id,
1876             'description': description,
1877             'upload_date': upload_date,
1878             'thumbnails': thumbnails,
1879             'formats': formats,
1880             'subtitles': subtitles,
1881         }
1882
1883     def _parse_smil_namespace(self, smil):
1884         return self._search_regex(
1885             r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
1886
1887     def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
1888         base = smil_url
1889         for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
1890             b = meta.get('base') or meta.get('httpBase')
1891             if b:
1892                 base = b
1893                 break
1894
1895         formats = []
1896         rtmp_count = 0
1897         http_count = 0
1898         m3u8_count = 0
1899
1900         srcs = []
1901         media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
1902         for medium in media:
1903             src = medium.get('src')
1904             if not src or src in srcs:
1905                 continue
1906             srcs.append(src)
1907
1908             bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
1909             filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
1910             width = int_or_none(medium.get('width'))
1911             height = int_or_none(medium.get('height'))
1912             proto = medium.get('proto')
1913             ext = medium.get('ext')
1914             src_ext = determine_ext(src)
1915             streamer = medium.get('streamer') or base
1916
1917             if proto == 'rtmp' or streamer.startswith('rtmp'):
1918                 rtmp_count += 1
1919                 formats.append({
1920                     'url': streamer,
1921                     'play_path': src,
1922                     'ext': 'flv',
1923                     'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
1924                     'tbr': bitrate,
1925                     'filesize': filesize,
1926                     'width': width,
1927                     'height': height,
1928                 })
1929                 if transform_rtmp_url:
1930                     streamer, src = transform_rtmp_url(streamer, src)
1931                     formats[-1].update({
1932                         'url': streamer,
1933                         'play_path': src,
1934                     })
1935                 continue
1936
1937             src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
1938             src_url = src_url.strip()
1939
1940             if proto == 'm3u8' or src_ext == 'm3u8':
1941                 m3u8_formats = self._extract_m3u8_formats(
1942                     src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
1943                 if len(m3u8_formats) == 1:
1944                     m3u8_count += 1
1945                     m3u8_formats[0].update({
1946                         'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
1947                         'tbr': bitrate,
1948                         'width': width,
1949                         'height': height,
1950                     })
1951                 formats.extend(m3u8_formats)
1952             elif src_ext == 'f4m':
1953                 f4m_url = src_url
1954                 if not f4m_params:
1955                     f4m_params = {
1956                         'hdcore': '3.2.0',
1957                         'plugin': 'flowplayer-3.2.0.1',
1958                     }
1959                 f4m_url += '&' if '?' in f4m_url else '?'
1960                 f4m_url += compat_urllib_parse_urlencode(f4m_params)
1961                 formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
1962             elif src_ext == 'mpd':
1963                 formats.extend(self._extract_mpd_formats(
1964                     src_url, video_id, mpd_id='dash', fatal=False))
1965             elif re.search(r'\.ism/[Mm]anifest', src_url):
1966                 formats.extend(self._extract_ism_formats(
1967                     src_url, video_id, ism_id='mss', fatal=False))
1968             elif src_url.startswith('http') and self._is_valid_url(src, video_id):
1969                 http_count += 1
1970                 formats.append({
1971                     'url': src_url,
1972                     'ext': ext or src_ext or 'flv',
1973                     'format_id': 'http-%d' % (bitrate or http_count),
1974                     'tbr': bitrate,
1975                     'filesize': filesize,
1976                     'width': width,
1977                     'height': height,
1978                 })
1979
1980         return formats
1981
1982     def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
1983         urls = []
1984         subtitles = {}
1985         for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
1986             src = textstream.get('src')
1987             if not src or src in urls:
1988                 continue
1989             urls.append(src)
1990             ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
1991             lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
1992             subtitles.setdefault(lang, []).append({
1993                 'url': src,
1994                 'ext': ext,
1995             })
1996         return subtitles
1997
1998     def _extract_xspf_playlist(self, xspf_url, playlist_id, fatal=True):
1999         xspf = self._download_xml(
2000             xspf_url, playlist_id, 'Downloading xpsf playlist',
2001             'Unable to download xspf manifest', fatal=fatal)
2002         if xspf is False:
2003             return []
2004         return self._parse_xspf(
2005             xspf, playlist_id, xspf_url=xspf_url,
2006             xspf_base_url=base_url(xspf_url))
2007
2008     def _parse_xspf(self, xspf_doc, playlist_id, xspf_url=None, xspf_base_url=None):
2009         NS_MAP = {
2010             'xspf': 'http://xspf.org/ns/0/',
2011             's1': 'http://static.streamone.nl/player/ns/0',
2012         }
2013
2014         entries = []
2015         for track in xspf_doc.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
2016             title = xpath_text(
2017                 track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
2018             description = xpath_text(
2019                 track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
2020             thumbnail = xpath_text(
2021                 track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
2022             duration = float_or_none(
2023                 xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
2024
2025             formats = []
2026             for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP)):
2027                 format_url = urljoin(xspf_base_url, location.text)
2028                 if not format_url:
2029                     continue
2030                 formats.append({
2031                     'url': format_url,
2032                     'manifest_url': xspf_url,
2033                     'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
2034                     'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
2035                     'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
2036                 })
2037             self._sort_formats(formats)
2038
2039             entries.append({
2040                 'id': playlist_id,
2041                 'title': title,
2042                 'description': description,
2043                 'thumbnail': thumbnail,
2044                 'duration': duration,
2045                 'formats': formats,
2046             })
2047         return entries
2048
2049     def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}, data=None, headers={}, query={}):
2050         res = self._download_xml_handle(
2051             mpd_url, video_id,
2052             note=note or 'Downloading MPD manifest',
2053             errnote=errnote or 'Failed to download MPD manifest',
2054             fatal=fatal, data=data, headers=headers, query=query)
2055         if res is False:
2056             return []
2057         mpd_doc, urlh = res
2058         if mpd_doc is None:
2059             return []
2060         mpd_base_url = base_url(urlh.geturl())
2061
2062         return self._parse_mpd_formats(
2063             mpd_doc, mpd_id=mpd_id, mpd_base_url=mpd_base_url,
2064             formats_dict=formats_dict, mpd_url=mpd_url)
2065
2066     def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
2067         """
2068         Parse formats from MPD manifest.
2069         References:
2070          1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
2071             http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2072          2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
2073         """
2074         if mpd_doc.get('type') == 'dynamic':
2075             return []
2076
2077         namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
2078
2079         def _add_ns(path):
2080             return self._xpath_ns(path, namespace)
2081
2082         def is_drm_protected(element):
2083             return element.find(_add_ns('ContentProtection')) is not None
2084
2085         def extract_multisegment_info(element, ms_parent_info):
2086             ms_info = ms_parent_info.copy()
2087
2088             # As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
2089             # common attributes and elements.  We will only extract relevant
2090             # for us.
2091             def extract_common(source):
2092                 segment_timeline = source.find(_add_ns('SegmentTimeline'))
2093                 if segment_timeline is not None:
2094                     s_e = segment_timeline.findall(_add_ns('S'))
2095                     if s_e:
2096                         ms_info['total_number'] = 0
2097                         ms_info['s'] = []
2098                         for s in s_e:
2099                             r = int(s.get('r', 0))
2100                             ms_info['total_number'] += 1 + r
2101                             ms_info['s'].append({
2102                                 't': int(s.get('t', 0)),
2103                                 # @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
2104                                 'd': int(s.attrib['d']),
2105                                 'r': r,
2106                             })
2107                 start_number = source.get('startNumber')
2108                 if start_number:
2109                     ms_info['start_number'] = int(start_number)
2110                 timescale = source.get('timescale')
2111                 if timescale:
2112                     ms_info['timescale'] = int(timescale)
2113                 segment_duration = source.get('duration')
2114                 if segment_duration:
2115                     ms_info['segment_duration'] = float(segment_duration)
2116
2117             def extract_Initialization(source):
2118                 initialization = source.find(_add_ns('Initialization'))
2119                 if initialization is not None:
2120                     ms_info['initialization_url'] = initialization.attrib['sourceURL']
2121
2122             segment_list = element.find(_add_ns('SegmentList'))
2123             if segment_list is not None:
2124                 extract_common(segment_list)
2125                 extract_Initialization(segment_list)
2126                 segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
2127                 if segment_urls_e:
2128                     ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
2129             else:
2130                 segment_template = element.find(_add_ns('SegmentTemplate'))
2131                 if segment_template is not None:
2132                     extract_common(segment_template)
2133                     media = segment_template.get('media')
2134                     if media:
2135                         ms_info['media'] = media
2136                     initialization = segment_template.get('initialization')
2137                     if initialization:
2138                         ms_info['initialization'] = initialization
2139                     else:
2140                         extract_Initialization(segment_template)
2141             return ms_info
2142
2143         mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
2144         formats = []
2145         for period in mpd_doc.findall(_add_ns('Period')):
2146             period_duration = parse_duration(period.get('duration')) or mpd_duration
2147             period_ms_info = extract_multisegment_info(period, {
2148                 'start_number': 1,
2149                 'timescale': 1,
2150             })
2151             for adaptation_set in period.findall(_add_ns('AdaptationSet')):
2152                 if is_drm_protected(adaptation_set):
2153                     continue
2154                 adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
2155                 for representation in adaptation_set.findall(_add_ns('Representation')):
2156                     if is_drm_protected(representation):
2157                         continue
2158                     representation_attrib = adaptation_set.attrib.copy()
2159                     representation_attrib.update(representation.attrib)
2160                     # According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
2161                     mime_type = representation_attrib['mimeType']
2162                     content_type = mime_type.split('/')[0]
2163                     if content_type == 'text':
2164                         # TODO implement WebVTT downloading
2165                         pass
2166                     elif content_type in ('video', 'audio'):
2167                         base_url = ''
2168                         for element in (representation, adaptation_set, period, mpd_doc):
2169                             base_url_e = element.find(_add_ns('BaseURL'))
2170                             if base_url_e is not None:
2171                                 base_url = base_url_e.text + base_url
2172                                 if re.match(r'^https?://', base_url):
2173                                     break
2174                         if mpd_base_url and not re.match(r'^https?://', base_url):
2175                             if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
2176                                 mpd_base_url += '/'
2177                             base_url = mpd_base_url + base_url
2178                         representation_id = representation_attrib.get('id')
2179                         lang = representation_attrib.get('lang')
2180                         url_el = representation.find(_add_ns('BaseURL'))
2181                         filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
2182                         bandwidth = int_or_none(representation_attrib.get('bandwidth'))
2183                         f = {
2184                             'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
2185                             'manifest_url': mpd_url,
2186                             'ext': mimetype2ext(mime_type),
2187                             'width': int_or_none(representation_attrib.get('width')),
2188                             'height': int_or_none(representation_attrib.get('height')),
2189                             'tbr': float_or_none(bandwidth, 1000),
2190                             'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
2191                             'fps': int_or_none(representation_attrib.get('frameRate')),
2192                             'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
2193                             'format_note': 'DASH %s' % content_type,
2194                             'filesize': filesize,
2195                             'container': mimetype2ext(mime_type) + '_dash',
2196                         }
2197                         f.update(parse_codecs(representation_attrib.get('codecs')))
2198                         representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
2199
2200                         def prepare_template(template_name, identifiers):
2201                             tmpl = representation_ms_info[template_name]
2202                             # First of, % characters outside $...$ templates
2203                             # must be escaped by doubling for proper processing
2204                             # by % operator string formatting used further (see
2205                             # https://github.com/ytdl-org/youtube-dl/issues/16867).
2206                             t = ''
2207                             in_template = False
2208                             for c in tmpl:
2209                                 t += c
2210                                 if c == '$':
2211                                     in_template = not in_template
2212                                 elif c == '%' and not in_template:
2213                                     t += c
2214                             # Next, $...$ templates are translated to their
2215                             # %(...) counterparts to be used with % operator
2216                             t = t.replace('$RepresentationID$', representation_id)
2217                             t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
2218                             t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
2219                             t.replace('$$', '$')
2220                             return t
2221
2222                         # @initialization is a regular template like @media one
2223                         # so it should be handled just the same way (see
2224                         # https://github.com/ytdl-org/youtube-dl/issues/11605)
2225                         if 'initialization' in representation_ms_info:
2226                             initialization_template = prepare_template(
2227                                 'initialization',
2228                                 # As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
2229                                 # $Time$ shall not be included for @initialization thus
2230                                 # only $Bandwidth$ remains
2231                                 ('Bandwidth', ))
2232                             representation_ms_info['initialization_url'] = initialization_template % {
2233                                 'Bandwidth': bandwidth,
2234                             }
2235
2236                         def location_key(location):
2237                             return 'url' if re.match(r'^https?://', location) else 'path'
2238
2239                         if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
2240
2241                             media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
2242                             media_location_key = location_key(media_template)
2243
2244                             # As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
2245                             # can't be used at the same time
2246                             if '%(Number' in media_template and 's' not in representation_ms_info:
2247                                 segment_duration = None
2248                                 if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
2249                                     segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
2250                                     representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
2251                                 representation_ms_info['fragments'] = [{
2252                                     media_location_key: media_template % {
2253                                         'Number': segment_number,
2254                                         'Bandwidth': bandwidth,
2255                                     },
2256                                     'duration': segment_duration,
2257                                 } for segment_number in range(
2258                                     representation_ms_info['start_number'],
2259                                     representation_ms_info['total_number'] + representation_ms_info['start_number'])]
2260                             else:
2261                                 # $Number*$ or $Time$ in media template with S list available
2262                                 # Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
2263                                 # Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
2264                                 representation_ms_info['fragments'] = []
2265                                 segment_time = 0
2266                                 segment_d = None
2267                                 segment_number = representation_ms_info['start_number']
2268
2269                                 def add_segment_url():
2270                                     segment_url = media_template % {
2271                                         'Time': segment_time,
2272                                         'Bandwidth': bandwidth,
2273                                         'Number': segment_number,
2274                                     }
2275                                     representation_ms_info['fragments'].append({
2276                                         media_location_key: segment_url,
2277                                         'duration': float_or_none(segment_d, representation_ms_info['timescale']),
2278                                     })
2279
2280                                 for num, s in enumerate(representation_ms_info['s']):
2281                                     segment_time = s.get('t') or segment_time
2282                                     segment_d = s['d']
2283                                     add_segment_url()
2284                                     segment_number += 1
2285                                     for r in range(s.get('r', 0)):
2286                                         segment_time += segment_d
2287                                         add_segment_url()
2288                                         segment_number += 1
2289                                     segment_time += segment_d
2290                         elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
2291                             # No media template
2292                             # Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
2293                             # or any YouTube dashsegments video
2294                             fragments = []
2295                             segment_index = 0
2296                             timescale = representation_ms_info['timescale']
2297                             for s in representation_ms_info['s']:
2298                                 duration = float_or_none(s['d'], timescale)
2299                                 for r in range(s.get('r', 0) + 1):
2300                                     segment_uri = representation_ms_info['segment_urls'][segment_index]
2301                                     fragments.append({
2302                                         location_key(segment_uri): segment_uri,
2303                                         'duration': duration,
2304                                     })
2305                                     segment_index += 1
2306                             representation_ms_info['fragments'] = fragments
2307                         elif 'segment_urls' in representation_ms_info:
2308                             # Segment URLs with no SegmentTimeline
2309                             # Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
2310                             # https://github.com/ytdl-org/youtube-dl/pull/14844
2311                             fragments = []
2312                             segment_duration = float_or_none(
2313                                 representation_ms_info['segment_duration'],
2314                                 representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None
2315                             for segment_url in representation_ms_info['segment_urls']:
2316                                 fragment = {
2317                                     location_key(segment_url): segment_url,
2318                                 }
2319                                 if segment_duration:
2320                                     fragment['duration'] = segment_duration
2321                                 fragments.append(fragment)
2322                             representation_ms_info['fragments'] = fragments
2323                         # If there is a fragments key available then we correctly recognized fragmented media.
2324                         # Otherwise we will assume unfragmented media with direct access. Technically, such
2325                         # assumption is not necessarily correct since we may simply have no support for
2326                         # some forms of fragmented media renditions yet, but for now we'll use this fallback.
2327                         if 'fragments' in representation_ms_info:
2328                             f.update({
2329                                 # NB: mpd_url may be empty when MPD manifest is parsed from a string
2330                                 'url': mpd_url or base_url,
2331                                 'fragment_base_url': base_url,
2332                                 'fragments': [],
2333                                 'protocol': 'http_dash_segments',
2334                             })
2335                             if 'initialization_url' in representation_ms_info:
2336                                 initialization_url = representation_ms_info['initialization_url']
2337                                 if not f.get('url'):
2338                                     f['url'] = initialization_url
2339                                 f['fragments'].append({location_key(initialization_url): initialization_url})
2340                             f['fragments'].extend(representation_ms_info['fragments'])
2341                         else:
2342                             # Assuming direct URL to unfragmented media.
2343                             f['url'] = base_url
2344
2345                         # According to [1, 5.3.5.2, Table 7, page 35] @id of Representation
2346                         # is not necessarily unique within a Period thus formats with
2347                         # the same `format_id` are quite possible. There are numerous examples
2348                         # of such manifests (see https://github.com/ytdl-org/youtube-dl/issues/15111,
2349                         # https://github.com/ytdl-org/youtube-dl/issues/13919)
2350                         full_info = formats_dict.get(representation_id, {}).copy()
2351                         full_info.update(f)
2352                         formats.append(full_info)
2353                     else:
2354                         self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
2355         return formats
2356
2357     def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
2358         res = self._download_xml_handle(
2359             ism_url, video_id,
2360             note=note or 'Downloading ISM manifest',
2361             errnote=errnote or 'Failed to download ISM manifest',
2362             fatal=fatal, data=data, headers=headers, query=query)
2363         if res is False:
2364             return []
2365         ism_doc, urlh = res
2366         if ism_doc is None:
2367             return []
2368
2369         return self._parse_ism_formats(ism_doc, urlh.geturl(), ism_id)
2370
2371     def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
2372         """
2373         Parse formats from ISM manifest.
2374         References:
2375          1. [MS-SSTR]: Smooth Streaming Protocol,
2376             https://msdn.microsoft.com/en-us/library/ff469518.aspx
2377         """
2378         if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
2379             return []
2380
2381         duration = int(ism_doc.attrib['Duration'])
2382         timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
2383
2384         formats = []
2385         for stream in ism_doc.findall('StreamIndex'):
2386             stream_type = stream.get('Type')
2387             if stream_type not in ('video', 'audio'):
2388                 continue
2389             url_pattern = stream.attrib['Url']
2390             stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
2391             stream_name = stream.get('Name')
2392             for track in stream.findall('QualityLevel'):
2393                 fourcc = track.get('FourCC', 'AACL' if track.get('AudioTag') == '255' else None)
2394                 # TODO: add support for WVC1 and WMAP
2395                 if fourcc not in ('H264', 'AVC1', 'AACL'):
2396                     self.report_warning('%s is not a supported codec' % fourcc)
2397                     continue
2398                 tbr = int(track.attrib['Bitrate']) // 1000
2399                 # [1] does not mention Width and Height attributes. However,
2400                 # they're often present while MaxWidth and MaxHeight are
2401                 # missing, so should be used as fallbacks
2402                 width = int_or_none(track.get('MaxWidth') or track.get('Width'))
2403                 height = int_or_none(track.get('MaxHeight') or track.get('Height'))
2404                 sampling_rate = int_or_none(track.get('SamplingRate'))
2405
2406                 track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
2407                 track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
2408
2409                 fragments = []
2410                 fragment_ctx = {
2411                     'time': 0,
2412                 }
2413                 stream_fragments = stream.findall('c')
2414                 for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
2415                     fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
2416                     fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
2417                     fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
2418                     if not fragment_ctx['duration']:
2419                         try:
2420                             next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
2421                         except IndexError:
2422                             next_fragment_time = duration
2423                         fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
2424                     for _ in range(fragment_repeat):
2425                         fragments.append({
2426                             'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
2427                             'duration': fragment_ctx['duration'] / stream_timescale,
2428                         })
2429                         fragment_ctx['time'] += fragment_ctx['duration']
2430
2431                 format_id = []
2432                 if ism_id:
2433                     format_id.append(ism_id)
2434                 if stream_name:
2435                     format_id.append(stream_name)
2436                 format_id.append(compat_str(tbr))
2437
2438                 formats.append({
2439                     'format_id': '-'.join(format_id),
2440                     'url': ism_url,
2441                     'manifest_url': ism_url,
2442                     'ext': 'ismv' if stream_type == 'video' else 'isma',
2443                     'width': width,
2444                     'height': height,
2445                     'tbr': tbr,
2446                     'asr': sampling_rate,
2447                     'vcodec': 'none' if stream_type == 'audio' else fourcc,
2448                     'acodec': 'none' if stream_type == 'video' else fourcc,
2449                     'protocol': 'ism',
2450                     'fragments': fragments,
2451                     '_download_params': {
2452                         'duration': duration,
2453                         'timescale': stream_timescale,
2454                         'width': width or 0,
2455                         'height': height or 0,
2456                         'fourcc': fourcc,
2457                         'codec_private_data': track.get('CodecPrivateData'),
2458                         'sampling_rate': sampling_rate,
2459                         'channels': int_or_none(track.get('Channels', 2)),
2460                         'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
2461                         'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
2462                     },
2463                 })
2464         return formats
2465
2466     def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None):
2467         def absolute_url(item_url):
2468             return urljoin(base_url, item_url)
2469
2470         def parse_content_type(content_type):
2471             if not content_type:
2472                 return {}
2473             ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
2474             if ctr:
2475                 mimetype, codecs = ctr.groups()
2476                 f = parse_codecs(codecs)
2477                 f['ext'] = mimetype2ext(mimetype)
2478                 return f
2479             return {}
2480
2481         def _media_formats(src, cur_media_type, type_info={}):
2482             full_url = absolute_url(src)
2483             ext = type_info.get('ext') or determine_ext(full_url)
2484             if ext == 'm3u8':
2485                 is_plain_url = False
2486                 formats = self._extract_m3u8_formats(
2487                     full_url, video_id, ext='mp4',
2488                     entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
2489                     preference=preference, fatal=False)
2490             elif ext == 'mpd':
2491                 is_plain_url = False
2492                 formats = self._extract_mpd_formats(
2493                     full_url, video_id, mpd_id=mpd_id, fatal=False)
2494             else:
2495                 is_plain_url = True
2496                 formats = [{
2497                     'url': full_url,
2498                     'vcodec': 'none' if cur_media_type == 'audio' else None,
2499                 }]
2500             return is_plain_url, formats
2501
2502         entries = []
2503         # amp-video and amp-audio are very similar to their HTML5 counterparts
2504         # so we wll include them right here (see
2505         # https://www.ampproject.org/docs/reference/components/amp-video)
2506         media_tags = [(media_tag, media_type, '')
2507                       for media_tag, media_type
2508                       in re.findall(r'(?s)(<(?:amp-)?(video|audio)[^>]*/>)', webpage)]
2509         media_tags.extend(re.findall(
2510             # We only allow video|audio followed by a whitespace or '>'.
2511             # Allowing more characters may end up in significant slow down (see
2512             # https://github.com/ytdl-org/youtube-dl/issues/11979, example URL:
2513             # http://www.porntrex.com/maps/videositemap.xml).
2514             r'(?s)(<(?P<tag>(?:amp-)?(?:video|audio))(?:\s+[^>]*)?>)(.*?)</(?P=tag)>', webpage))
2515         for media_tag, media_type, media_content in media_tags:
2516             media_info = {
2517                 'formats': [],
2518                 'subtitles': {},
2519             }
2520             media_attributes = extract_attributes(media_tag)
2521             src = strip_or_none(media_attributes.get('src'))
2522             if src:
2523                 _, formats = _media_formats(src, media_type)
2524                 media_info['formats'].extend(formats)
2525             media_info['thumbnail'] = absolute_url(media_attributes.get('poster'))
2526             if media_content:
2527                 for source_tag in re.findall(r'<source[^>]+>', media_content):
2528                     s_attr = extract_attributes(source_tag)
2529                     # data-video-src and data-src are non standard but seen
2530                     # several times in the wild
2531                     src = strip_or_none(dict_get(s_attr, ('src', 'data-video-src', 'data-src')))
2532                     if not src:
2533                         continue
2534                     f = parse_content_type(s_attr.get('type'))
2535                     is_plain_url, formats = _media_formats(src, media_type, f)
2536                     if is_plain_url:
2537                         # width, height, res, label and title attributes are
2538                         # all not standard but seen several times in the wild
2539                         labels = [
2540                             s_attr.get(lbl)
2541                             for lbl in ('label', 'title')
2542                             if str_or_none(s_attr.get(lbl))
2543                         ]
2544                         width = int_or_none(s_attr.get('width'))
2545                         height = (int_or_none(s_attr.get('height'))
2546                                   or int_or_none(s_attr.get('res')))
2547                         if not width or not height:
2548                             for lbl in labels:
2549                                 resolution = parse_resolution(lbl)
2550                                 if not resolution:
2551                                     continue
2552                                 width = width or resolution.get('width')
2553                                 height = height or resolution.get('height')
2554                         for lbl in labels:
2555                             tbr = parse_bitrate(lbl)
2556                             if tbr:
2557                                 break
2558                         else:
2559                             tbr = None
2560                         f.update({
2561                             'width': width,
2562                             'height': height,
2563                             'tbr': tbr,
2564                             'format_id': s_attr.get('label') or s_attr.get('title'),
2565                         })
2566                         f.update(formats[0])
2567                         media_info['formats'].append(f)
2568                     else:
2569                         media_info['formats'].extend(formats)
2570                 for track_tag in re.findall(r'<track[^>]+>', media_content):
2571                     track_attributes = extract_attributes(track_tag)
2572                     kind = track_attributes.get('kind')
2573                     if not kind or kind in ('subtitles', 'captions'):
2574                         src = strip_or_none(track_attributes.get('src'))
2575                         if not src:
2576                             continue
2577                         lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
2578                         media_info['subtitles'].setdefault(lang, []).append({
2579                             'url': absolute_url(src),
2580                         })
2581             for f in media_info['formats']:
2582                 f.setdefault('http_headers', {})['Referer'] = base_url
2583             if media_info['formats'] or media_info['subtitles']:
2584                 entries.append(media_info)
2585         return entries
2586
2587     def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
2588         formats = []
2589         hdcore_sign = 'hdcore=3.7.0'
2590         f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
2591         hds_host = hosts.get('hds')
2592         if hds_host:
2593             f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
2594         if 'hdcore=' not in f4m_url:
2595             f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
2596         f4m_formats = self._extract_f4m_formats(
2597             f4m_url, video_id, f4m_id='hds', fatal=False)
2598         for entry in f4m_formats:
2599             entry.update({'extra_param_to_segment_url': hdcore_sign})
2600         formats.extend(f4m_formats)
2601         m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
2602         hls_host = hosts.get('hls')
2603         if hls_host:
2604             m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
2605         formats.extend(self._extract_m3u8_formats(
2606             m3u8_url, video_id, 'mp4', 'm3u8_native',
2607             m3u8_id='hls', fatal=False))
2608         return formats
2609
2610     def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
2611         query = compat_urlparse.urlparse(url).query
2612         url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
2613         mobj = re.search(
2614             r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
2615         url_base = mobj.group('url')
2616         http_base_url = '%s%s:%s' % ('http', mobj.group('s') or '', url_base)
2617         formats = []
2618
2619         def manifest_url(manifest):
2620             m_url = '%s/%s' % (http_base_url, manifest)
2621             if query:
2622                 m_url += '?%s' % query
2623             return m_url
2624
2625         if 'm3u8' not in skip_protocols:
2626             formats.extend(self._extract_m3u8_formats(
2627                 manifest_url('playlist.m3u8'), video_id, 'mp4',
2628                 m3u8_entry_protocol, m3u8_id='hls', fatal=False))
2629         if 'f4m' not in skip_protocols:
2630             formats.extend(self._extract_f4m_formats(
2631                 manifest_url('manifest.f4m'),
2632                 video_id, f4m_id='hds', fatal=False))
2633         if 'dash' not in skip_protocols:
2634             formats.extend(self._extract_mpd_formats(
2635                 manifest_url('manifest.mpd'),
2636                 video_id, mpd_id='dash', fatal=False))
2637         if re.search(r'(?:/smil:|\.smil)', url_base):
2638             if 'smil' not in skip_protocols:
2639                 rtmp_formats = self._extract_smil_formats(
2640                     manifest_url('jwplayer.smil'),
2641                     video_id, fatal=False)
2642                 for rtmp_format in rtmp_formats:
2643                     rtsp_format = rtmp_format.copy()
2644                     rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
2645                     del rtsp_format['play_path']
2646                     del rtsp_format['ext']
2647                     rtsp_format.update({
2648                         'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
2649                         'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
2650                         'protocol': 'rtsp',
2651                     })
2652                     formats.extend([rtmp_format, rtsp_format])
2653         else:
2654             for protocol in ('rtmp', 'rtsp'):
2655                 if protocol not in skip_protocols:
2656                     formats.append({
2657                         'url': '%s:%s' % (protocol, url_base),
2658                         'format_id': protocol,
2659                         'protocol': protocol,
2660                     })
2661         return formats
2662
2663     def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
2664         mobj = re.search(
2665             r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)',
2666             webpage)
2667         if mobj:
2668             try:
2669                 jwplayer_data = self._parse_json(mobj.group('options'),
2670                                                  video_id=video_id,
2671                                                  transform_source=transform_source)
2672             except ExtractorError:
2673                 pass
2674             else:
2675                 if isinstance(jwplayer_data, dict):
2676                     return jwplayer_data
2677
2678     def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
2679         jwplayer_data = self._find_jwplayer_data(
2680             webpage, video_id, transform_source=js_to_json)
2681         return self._parse_jwplayer_data(
2682             jwplayer_data, video_id, *args, **kwargs)
2683
2684     def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
2685                              m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
2686         # JWPlayer backward compatibility: flattened playlists
2687         # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
2688         if 'playlist' not in jwplayer_data:
2689             jwplayer_data = {'playlist': [jwplayer_data]}
2690
2691         entries = []
2692
2693         # JWPlayer backward compatibility: single playlist item
2694         # https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
2695         if not isinstance(jwplayer_data['playlist'], list):
2696             jwplayer_data['playlist'] = [jwplayer_data['playlist']]
2697
2698         for video_data in jwplayer_data['playlist']:
2699             # JWPlayer backward compatibility: flattened sources
2700             # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
2701             if 'sources' not in video_data:
2702                 video_data['sources'] = [video_data]
2703
2704             this_video_id = video_id or video_data['mediaid']
2705
2706             formats = self._parse_jwplayer_formats(
2707                 video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
2708                 mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
2709
2710             subtitles = {}
2711             tracks = video_data.get('tracks')
2712             if tracks and isinstance(tracks, list):
2713                 for track in tracks:
2714                     if not isinstance(track, dict):
2715                         continue
2716                     track_kind = track.get('kind')
2717                     if not track_kind or not isinstance(track_kind, compat_str):
2718                         continue
2719                     if track_kind.lower() not in ('captions', 'subtitles'):
2720                         continue
2721                     track_url = urljoin(base_url, track.get('file'))
2722                     if not track_url:
2723                         continue
2724                     subtitles.setdefault(track.get('label') or 'en', []).append({
2725                         'url': self._proto_relative_url(track_url)
2726                     })
2727
2728             entry = {
2729                 'id': this_video_id,
2730                 'title': unescapeHTML(video_data['title'] if require_title else video_data.get('title')),
2731                 'description': clean_html(video_data.get('description')),
2732                 'thumbnail': urljoin(base_url, self._proto_relative_url(video_data.get('image'))),
2733                 'timestamp': int_or_none(video_data.get('pubdate')),
2734                 'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
2735                 'subtitles': subtitles,
2736             }
2737             # https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
2738             if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']):
2739                 entry.update({
2740                     '_type': 'url_transparent',
2741                     'url': formats[0]['url'],
2742                 })
2743             else:
2744                 self._sort_formats(formats)
2745                 entry['formats'] = formats
2746             entries.append(entry)
2747         if len(entries) == 1:
2748             return entries[0]
2749         else:
2750             return self.playlist_result(entries)
2751
2752     def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
2753                                 m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
2754         urls = []
2755         formats = []
2756         for source in jwplayer_sources_data:
2757             if not isinstance(source, dict):
2758                 continue
2759             source_url = urljoin(
2760                 base_url, self._proto_relative_url(source.get('file')))
2761             if not source_url or source_url in urls:
2762                 continue
2763             urls.append(source_url)
2764             source_type = source.get('type') or ''
2765             ext = mimetype2ext(source_type) or determine_ext(source_url)
2766             if source_type == 'hls' or ext == 'm3u8':
2767                 formats.extend(self._extract_m3u8_formats(
2768                     source_url, video_id, 'mp4', entry_protocol='m3u8_native',
2769                     m3u8_id=m3u8_id, fatal=False))
2770             elif source_type == 'dash' or ext == 'mpd':
2771                 formats.extend(self._extract_mpd_formats(
2772                     source_url, video_id, mpd_id=mpd_id, fatal=False))
2773             elif ext == 'smil':
2774                 formats.extend(self._extract_smil_formats(
2775                     source_url, video_id, fatal=False))
2776             # https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
2777             elif source_type.startswith('audio') or ext in (
2778                     'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
2779                 formats.append({
2780                     'url': source_url,
2781                     'vcodec': 'none',
2782                     'ext': ext,
2783                 })
2784             else:
2785                 height = int_or_none(source.get('height'))
2786                 if height is None:
2787                     # Often no height is provided but there is a label in
2788                     # format like "1080p", "720p SD", or 1080.
2789                     height = int_or_none(self._search_regex(
2790                         r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
2791                         'height', default=None))
2792                 a_format = {
2793                     'url': source_url,
2794                     'width': int_or_none(source.get('width')),
2795                     'height': height,
2796                     'tbr': int_or_none(source.get('bitrate')),
2797                     'ext': ext,
2798                 }
2799                 if source_url.startswith('rtmp'):
2800                     a_format['ext'] = 'flv'
2801                     # See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
2802                     # of jwplayer.flash.swf
2803                     rtmp_url_parts = re.split(
2804                         r'((?:mp4|mp3|flv):)', source_url, 1)
2805                     if len(rtmp_url_parts) == 3:
2806                         rtmp_url, prefix, play_path = rtmp_url_parts
2807                         a_format.update({
2808                             'url': rtmp_url,
2809                             'play_path': prefix + play_path,
2810                         })
2811                     if rtmp_params:
2812                         a_format.update(rtmp_params)
2813                 formats.append(a_format)
2814         return formats
2815
2816     def _live_title(self, name):
2817         """ Generate the title for a live video """
2818         now = datetime.datetime.now()
2819         now_str = now.strftime('%Y-%m-%d %H:%M')
2820         return name + ' ' + now_str
2821
2822     def _int(self, v, name, fatal=False, **kwargs):
2823         res = int_or_none(v, **kwargs)
2824         if 'get_attr' in kwargs:
2825             print(getattr(v, kwargs['get_attr']))
2826         if res is None:
2827             msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
2828             if fatal:
2829                 raise ExtractorError(msg)
2830             else:
2831                 self._downloader.report_warning(msg)
2832         return res
2833
2834     def _float(self, v, name, fatal=False, **kwargs):
2835         res = float_or_none(v, **kwargs)
2836         if res is None:
2837             msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
2838             if fatal:
2839                 raise ExtractorError(msg)
2840             else:
2841                 self._downloader.report_warning(msg)
2842         return res
2843
2844     def _set_cookie(self, domain, name, value, expire_time=None, port=None,
2845                     path='/', secure=False, discard=False, rest={}, **kwargs):
2846         cookie = compat_cookiejar_Cookie(
2847             0, name, value, port, port is not None, domain, True,
2848             domain.startswith('.'), path, True, secure, expire_time,
2849             discard, None, None, rest)
2850         self._downloader.cookiejar.set_cookie(cookie)
2851
2852     def _get_cookies(self, url):
2853         """ Return a compat_cookies.SimpleCookie with the cookies for the url """
2854         req = sanitized_Request(url)
2855         self._downloader.cookiejar.add_cookie_header(req)
2856         return compat_cookies.SimpleCookie(req.get_header('Cookie'))
2857
2858     def _apply_first_set_cookie_header(self, url_handle, cookie):
2859         """
2860         Apply first Set-Cookie header instead of the last. Experimental.
2861
2862         Some sites (e.g. [1-3]) may serve two cookies under the same name
2863         in Set-Cookie header and expect the first (old) one to be set rather
2864         than second (new). However, as of RFC6265 the newer one cookie
2865         should be set into cookie store what actually happens.
2866         We will workaround this issue by resetting the cookie to
2867         the first one manually.
2868         1. https://new.vk.com/
2869         2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
2870         3. https://learning.oreilly.com/
2871         """
2872         for header, cookies in url_handle.headers.items():
2873             if header.lower() != 'set-cookie':
2874                 continue
2875             if sys.version_info[0] >= 3:
2876                 cookies = cookies.encode('iso-8859-1')
2877             cookies = cookies.decode('utf-8')
2878             cookie_value = re.search(
2879                 r'%s=(.+?);.*?\b[Dd]omain=(.+?)(?:[,;]|$)' % cookie, cookies)
2880             if cookie_value:
2881                 value, domain = cookie_value.groups()
2882                 self._set_cookie(domain, cookie, value)
2883                 break
2884
2885     def get_testcases(self, include_onlymatching=False):
2886         t = getattr(self, '_TEST', None)
2887         if t:
2888             assert not hasattr(self, '_TESTS'), \
2889                 '%s has _TEST and _TESTS' % type(self).__name__
2890             tests = [t]
2891         else:
2892             tests = getattr(self, '_TESTS', [])
2893         for t in tests:
2894             if not include_onlymatching and t.get('only_matching', False):
2895                 continue
2896             t['name'] = type(self).__name__[:-len('IE')]
2897             yield t
2898
2899     def is_suitable(self, age_limit):
2900         """ Test whether the extractor is generally suitable for the given
2901         age limit (i.e. pornographic sites are not, all others usually are) """
2902
2903         any_restricted = False
2904         for tc in self.get_testcases(include_onlymatching=False):
2905             if tc.get('playlist', []):
2906                 tc = tc['playlist'][0]
2907             is_restricted = age_restricted(
2908                 tc.get('info_dict', {}).get('age_limit'), age_limit)
2909             if not is_restricted:
2910                 return True
2911             any_restricted = any_restricted or is_restricted
2912         return not any_restricted
2913
2914     def extract_subtitles(self, *args, **kwargs):
2915         if (self._downloader.params.get('writesubtitles', False)
2916                 or self._downloader.params.get('listsubtitles')):
2917             return self._get_subtitles(*args, **kwargs)
2918         return {}
2919
2920     def _get_subtitles(self, *args, **kwargs):
2921         raise NotImplementedError('This method must be implemented by subclasses')
2922
2923     @staticmethod
2924     def _merge_subtitle_items(subtitle_list1, subtitle_list2):
2925         """ Merge subtitle items for one language. Items with duplicated URLs
2926         will be dropped. """
2927         list1_urls = set([item['url'] for item in subtitle_list1])
2928         ret = list(subtitle_list1)
2929         ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
2930         return ret
2931
2932     @classmethod
2933     def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
2934         """ Merge two subtitle dictionaries, language by language. """
2935         ret = dict(subtitle_dict1)
2936         for lang in subtitle_dict2:
2937             ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
2938         return ret
2939
2940     def extract_automatic_captions(self, *args, **kwargs):
2941         if (self._downloader.params.get('writeautomaticsub', False)
2942                 or self._downloader.params.get('listsubtitles')):
2943             return self._get_automatic_captions(*args, **kwargs)
2944         return {}
2945
2946     def _get_automatic_captions(self, *args, **kwargs):
2947         raise NotImplementedError('This method must be implemented by subclasses')
2948
2949     def mark_watched(self, *args, **kwargs):
2950         if (self._downloader.params.get('mark_watched', False)
2951                 and (self._get_login_info()[0] is not None
2952                      or self._downloader.params.get('cookiefile') is not None)):
2953             self._mark_watched(*args, **kwargs)
2954
2955     def _mark_watched(self, *args, **kwargs):
2956         raise NotImplementedError('This method must be implemented by subclasses')
2957
2958     def geo_verification_headers(self):
2959         headers = {}
2960         geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
2961         if geo_verification_proxy:
2962             headers['Ytdl-request-proxy'] = geo_verification_proxy
2963         return headers
2964
2965     def _generic_id(self, url):
2966         return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
2967
2968     def _generic_title(self, url):
2969         return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
2970
2971
2972 class SearchInfoExtractor(InfoExtractor):
2973     """
2974     Base class for paged search queries extractors.
2975     They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
2976     Instances should define _SEARCH_KEY and _MAX_RESULTS.
2977     """
2978
2979     @classmethod
2980     def _make_valid_url(cls):
2981         return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
2982
2983     @classmethod
2984     def suitable(cls, url):
2985         return re.match(cls._make_valid_url(), url) is not None
2986
2987     def _real_extract(self, query):
2988         mobj = re.match(self._make_valid_url(), query)
2989         if mobj is None:
2990             raise ExtractorError('Invalid search query "%s"' % query)
2991
2992         prefix = mobj.group('prefix')
2993         query = mobj.group('query')
2994         if prefix == '':
2995             return self._get_n_results(query, 1)
2996         elif prefix == 'all':
2997             return self._get_n_results(query, self._MAX_RESULTS)
2998         else:
2999             n = int(prefix)
3000             if n <= 0:
3001                 raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
3002             elif n > self._MAX_RESULTS:
3003                 self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
3004                 n = self._MAX_RESULTS
3005             return self._get_n_results(query, n)
3006
3007     def _get_n_results(self, query, n):
3008         """Get a specified number of results for a query"""
3009         raise NotImplementedError('This method must be implemented by subclasses')
3010
3011     @property
3012     def SEARCH_KEY(self):
3013         return self._SEARCH_KEY