1 from __future__ import unicode_literals
15 from ..compat import (
18 compat_etree_fromstring,
24 compat_urllib_parse_urlencode,
25 compat_urllib_request,
28 from ..downloader.f4m import remove_encrypted_media
59 class InfoExtractor(object):
60 """Information Extractor class.
62 Information extractors are the classes that, given a URL, extract
63 information about the video (or videos) the URL refers to. This
64 information includes the real video URL, the video title, author and
65 others. The information is stored in a dictionary which is then
66 passed to the YoutubeDL. The YoutubeDL processes this
67 information possibly downloading the video to the file system, among
68 other possible outcomes.
70 The type field determines the type of the result.
71 By far the most common value (and the default if _type is missing) is
72 "video", which indicates a single video.
74 For a video, the dictionaries must include the following fields:
77 title: Video title, unescaped.
79 Additionally, it must contain either a formats entry or a url one:
81 formats: A list of dictionaries for each format available, ordered
82 from worst to best quality.
85 * url Mandatory. The URL of the video file
86 * ext Will be calculated from URL if missing
87 * format A human-readable description of the format
88 ("mp4 container with h264/opus").
89 Calculated from the format_id, width, height.
90 and format_note fields if missing.
91 * format_id A short description of the format
92 ("mp4_h264_opus" or "19").
93 Technically optional, but strongly recommended.
94 * format_note Additional info about the format
95 ("3D" or "DASH video")
96 * width Width of the video, if known
97 * height Height of the video, if known
98 * resolution Textual description of width and height
99 * tbr Average bitrate of audio and video in KBit/s
100 * abr Average audio bitrate in KBit/s
101 * acodec Name of the audio codec in use
102 * asr Audio sampling rate in Hertz
103 * vbr Average video bitrate in KBit/s
105 * vcodec Name of the video codec in use
106 * container Name of the container format
107 * filesize The number of bytes, if known in advance
108 * filesize_approx An estimate for the number of bytes
109 * player_url SWF Player URL (used for rtmpdump).
110 * protocol The protocol that will be used for the actual
111 download, lower-case.
112 "http", "https", "rtsp", "rtmp", "rtmpe",
113 "m3u8", "m3u8_native" or "http_dash_segments".
114 * preference Order number of this format. If this field is
115 present and not None, the formats get sorted
116 by this field, regardless of all other values.
117 -1 for default (order by other properties),
118 -2 or smaller for less than default.
119 < -1000 to hide the format (if there is
120 another one which is strictly better)
121 * language Language code, e.g. "de" or "en-US".
122 * language_preference Is this in the language mentioned in
124 10 if it's what the URL is about,
125 -1 for default (don't know),
126 -10 otherwise, other values reserved for now.
127 * quality Order number of the video quality of this
128 format, irrespective of the file format.
129 -1 for default (order by other properties),
130 -2 or smaller for less than default.
131 * source_preference Order number for this video source
132 (quality takes higher priority)
133 -1 for default (order by other properties),
134 -2 or smaller for less than default.
135 * http_headers A dictionary of additional HTTP headers
136 to add to the request.
137 * stretched_ratio If given and not 1, indicates that the
138 video's pixels are not square.
139 width : height ratio as float.
140 * no_resume The server does not support resuming the
141 (HTTP or RTMP) download. Boolean.
143 url: Final video URL.
144 ext: Video filename extension.
145 format: The video format, defaults to ext (used for --get-format)
146 player_url: SWF Player URL (used for rtmpdump).
148 The following fields are optional:
150 alt_title: A secondary title of the video.
151 display_id An alternative identifier for the video, not necessarily
152 unique, but available before title. Typically, id is
153 something like "4234987", title "Dancing naked mole rats",
154 and display_id "dancing-naked-mole-rats"
155 thumbnails: A list of dictionaries, with the following entries:
156 * "id" (optional, string) - Thumbnail format ID
158 * "preference" (optional, int) - quality of the image
159 * "width" (optional, int)
160 * "height" (optional, int)
161 * "resolution" (optional, string "{width}x{height"},
163 thumbnail: Full URL to a video thumbnail image.
164 description: Full video description.
165 uploader: Full name of the video uploader.
166 license: License name the video is licensed under.
167 creator: The creator of the video.
168 release_date: The date (YYYYMMDD) when the video was released.
169 timestamp: UNIX timestamp of the moment the video became available.
170 upload_date: Video upload date (YYYYMMDD).
171 If not explicitly set, calculated from timestamp.
172 uploader_id: Nickname or id of the video uploader.
173 uploader_url: Full URL to a personal webpage of the video uploader.
174 location: Physical location where the video was filmed.
175 subtitles: The available subtitles as a dictionary in the format
176 {language: subformats}. "subformats" is a list sorted from
177 lower to higher preference, each element is a dictionary
178 with the "ext" entry and one of:
179 * "data": The subtitles file contents
180 * "url": A URL pointing to the subtitles file
181 "ext" will be calculated from URL if missing
182 automatic_captions: Like 'subtitles', used by the YoutubeIE for
183 automatically generated captions
184 duration: Length of the video in seconds, as an integer or float.
185 view_count: How many users have watched the video on the platform.
186 like_count: Number of positive ratings of the video
187 dislike_count: Number of negative ratings of the video
188 repost_count: Number of reposts of the video
189 average_rating: Average rating give by users, the scale used depends on the webpage
190 comment_count: Number of comments on the video
191 comments: A list of comments, each with one or more of the following
192 properties (all but one of text or html optional):
193 * "author" - human-readable name of the comment author
194 * "author_id" - user ID of the comment author
196 * "html" - Comment as HTML
197 * "text" - Plain text of the comment
198 * "timestamp" - UNIX timestamp of comment
199 * "parent" - ID of the comment this one is replying to.
200 Set to "root" to indicate that this is a
201 comment to the original video.
202 age_limit: Age restriction for the video, as an integer (years)
203 webpage_url: The URL to the video webpage, if given to youtube-dl it
204 should allow to get the same result again. (It will be set
205 by YoutubeDL if it's missing)
206 categories: A list of categories that the video falls in, for example
208 tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
209 is_live: True, False, or None (=unknown). Whether this video is a
210 live stream that goes on instead of a fixed-length video.
211 start_time: Time in seconds where the reproduction should start, as
212 specified in the URL.
213 end_time: Time in seconds where the reproduction should end, as
214 specified in the URL.
216 The following fields should only be used when the video belongs to some logical
219 chapter: Name or title of the chapter the video belongs to.
220 chapter_number: Number of the chapter the video belongs to, as an integer.
221 chapter_id: Id of the chapter the video belongs to, as a unicode string.
223 The following fields should only be used when the video is an episode of some
226 series: Title of the series or programme the video episode belongs to.
227 season: Title of the season the video episode belongs to.
228 season_number: Number of the season the video episode belongs to, as an integer.
229 season_id: Id of the season the video episode belongs to, as a unicode string.
230 episode: Title of the video episode. Unlike mandatory video title field,
231 this field should denote the exact title of the video episode
232 without any kind of decoration.
233 episode_number: Number of the video episode within a season, as an integer.
234 episode_id: Id of the video episode, as a unicode string.
236 The following fields should only be used when the media is a track or a part of
239 track: Title of the track.
240 track_number: Number of the track within an album or a disc, as an integer.
241 track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
243 artist: Artist(s) of the track.
244 genre: Genre(s) of the track.
245 album: Title of the album the track belongs to.
246 album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
247 album_artist: List of all artists appeared on the album (e.g.
248 "Ash Borer / Fell Voices" or "Various Artists", useful for splits
250 disc_number: Number of the disc or other physical medium the track belongs to,
252 release_year: Year (YYYY) when the album was released.
254 Unless mentioned otherwise, the fields should be Unicode strings.
256 Unless mentioned otherwise, None is equivalent to absence of information.
259 _type "playlist" indicates multiple videos.
260 There must be a key "entries", which is a list, an iterable, or a PagedList
261 object, each element of which is a valid dictionary by this specification.
263 Additionally, playlists can have "title", "description" and "id" attributes
264 with the same semantics as videos (see above).
267 _type "multi_video" indicates that there are multiple videos that
268 form a single show, for examples multiple acts of an opera or TV episode.
269 It must have an entries key like a playlist and contain all the keys
270 required for a video at the same time.
273 _type "url" indicates that the video must be extracted from another
274 location, possibly by a different extractor. Its only required key is:
275 "url" - the next URL to extract.
276 The key "ie_key" can be set to the class name (minus the trailing "IE",
277 e.g. "Youtube") if the extractor class is known in advance.
278 Additionally, the dictionary may have any properties of the resolved entity
279 known in advance, for example "title" if the title of the referred video is
283 _type "url_transparent" entities have the same specification as "url", but
284 indicate that the given additional information is more precise than the one
285 associated with the resolved URL.
286 This is useful when a site employs a video service that hosts the video and
287 its technical metadata, but that video service does not embed a useful
288 title, description etc.
291 Subclasses of this one should re-define the _real_initialize() and
292 _real_extract() methods and define a _VALID_URL regexp.
293 Probably, they should also be added to the list of extractors.
295 Finally, the _WORKING attribute should be set to False for broken IEs
296 in order to warn the users and skip the tests.
303 def __init__(self, downloader=None):
304 """Constructor. Receives an optional downloader."""
306 self.set_downloader(downloader)
309 def suitable(cls, url):
310 """Receives a URL and returns True if suitable for this IE."""
312 # This does not use has/getattr intentionally - we want to know whether
313 # we have cached the regexp for *this* class, whereas getattr would also
314 # match the superclass
315 if '_VALID_URL_RE' not in cls.__dict__:
316 cls._VALID_URL_RE = re.compile(cls._VALID_URL)
317 return cls._VALID_URL_RE.match(url) is not None
320 def _match_id(cls, url):
321 if '_VALID_URL_RE' not in cls.__dict__:
322 cls._VALID_URL_RE = re.compile(cls._VALID_URL)
323 m = cls._VALID_URL_RE.match(url)
329 """Getter method for _WORKING."""
332 def initialize(self):
333 """Initializes an instance (authentication, etc)."""
335 self._real_initialize()
338 def extract(self, url):
339 """Extracts URL information and returns it in list of dicts."""
342 return self._real_extract(url)
343 except ExtractorError:
345 except compat_http_client.IncompleteRead as e:
346 raise ExtractorError('A network error has occurred.', cause=e, expected=True)
347 except (KeyError, StopIteration) as e:
348 raise ExtractorError('An extractor error has occurred.', cause=e)
350 def set_downloader(self, downloader):
351 """Sets the downloader for this IE."""
352 self._downloader = downloader
354 def _real_initialize(self):
355 """Real initialization process. Redefine in subclasses."""
358 def _real_extract(self, url):
359 """Real extraction process. Redefine in subclasses."""
364 """A string for getting the InfoExtractor with get_info_extractor"""
365 return compat_str(cls.__name__[:-2])
369 return compat_str(type(self).__name__[:-2])
371 def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
372 """ Returns the response handle """
374 self.report_download_webpage(video_id)
375 elif note is not False:
377 self.to_screen('%s' % (note,))
379 self.to_screen('%s: %s' % (video_id, note))
380 if isinstance(url_or_request, compat_urllib_request.Request):
381 url_or_request = update_Request(
382 url_or_request, data=data, headers=headers, query=query)
385 url_or_request = update_url_query(url_or_request, query)
386 if data is not None or headers:
387 url_or_request = sanitized_Request(url_or_request, data, headers)
389 return self._downloader.urlopen(url_or_request)
390 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
394 errnote = 'Unable to download webpage'
396 errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
398 raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
400 self._downloader.report_warning(errmsg)
403 def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}):
404 """ Returns a tuple (page content as string, URL handle) """
405 # Strip hashes from the URL (#1038)
406 if isinstance(url_or_request, (compat_str, str)):
407 url_or_request = url_or_request.partition('#')[0]
409 urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
413 content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
414 return (content, urlh)
417 def _guess_encoding_from_content(content_type, webpage_bytes):
418 m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
420 encoding = m.group(1)
422 m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
423 webpage_bytes[:1024])
425 encoding = m.group(1).decode('ascii')
426 elif webpage_bytes.startswith(b'\xff\xfe'):
433 def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
434 content_type = urlh.headers.get('Content-Type', '')
435 webpage_bytes = urlh.read()
436 if prefix is not None:
437 webpage_bytes = prefix + webpage_bytes
439 encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
440 if self._downloader.params.get('dump_intermediate_pages', False):
442 url = url_or_request.get_full_url()
443 except AttributeError:
445 self.to_screen('Dumping request to ' + url)
446 dump = base64.b64encode(webpage_bytes).decode('ascii')
447 self._downloader.to_screen(dump)
448 if self._downloader.params.get('write_pages', False):
450 url = url_or_request.get_full_url()
451 except AttributeError:
453 basen = '%s_%s' % (video_id, url)
455 h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
456 basen = basen[:240 - len(h)] + h
457 raw_filename = basen + '.dump'
458 filename = sanitize_filename(raw_filename, restricted=True)
459 self.to_screen('Saving request to ' + filename)
460 # Working around MAX_PATH limitation on Windows (see
461 # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
462 if compat_os_name == 'nt':
463 absfilepath = os.path.abspath(filename)
464 if len(absfilepath) > 259:
465 filename = '\\\\?\\' + absfilepath
466 with open(filename, 'wb') as outf:
467 outf.write(webpage_bytes)
470 content = webpage_bytes.decode(encoding, 'replace')
472 content = webpage_bytes.decode('utf-8', 'replace')
474 if ('<title>Access to this site is blocked</title>' in content and
475 'Websense' in content[:512]):
476 msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
477 blocked_iframe = self._html_search_regex(
478 r'<iframe src="([^"]+)"', content,
479 'Websense information URL', default=None)
481 msg += ' Visit %s for more details' % blocked_iframe
482 raise ExtractorError(msg, expected=True)
483 if '<title>The URL you requested has been blocked</title>' in content[:512]:
485 'Access to this webpage has been blocked by Indian censorship. '
486 'Use a VPN or proxy server (with --proxy) to route around it.')
487 block_msg = self._html_search_regex(
488 r'</h1><p>(.*?)</p>',
489 content, 'block message', default=None)
491 msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
492 raise ExtractorError(msg, expected=True)
496 def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}):
497 """ Returns the data of the page as a string """
500 while success is False:
502 res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query)
504 except compat_http_client.IncompleteRead as e:
506 if try_count >= tries:
508 self._sleep(timeout, video_id)
515 def _download_xml(self, url_or_request, video_id,
516 note='Downloading XML', errnote='Unable to download XML',
517 transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}):
518 """Return the xml as an xml.etree.ElementTree.Element"""
519 xml_string = self._download_webpage(
520 url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query)
521 if xml_string is False:
524 xml_string = transform_source(xml_string)
525 return compat_etree_fromstring(xml_string.encode('utf-8'))
527 def _download_json(self, url_or_request, video_id,
528 note='Downloading JSON metadata',
529 errnote='Unable to download JSON metadata',
530 transform_source=None,
531 fatal=True, encoding=None, data=None, headers={}, query={}):
532 json_string = self._download_webpage(
533 url_or_request, video_id, note, errnote, fatal=fatal,
534 encoding=encoding, data=data, headers=headers, query=query)
535 if (not fatal) and json_string is False:
537 return self._parse_json(
538 json_string, video_id, transform_source=transform_source, fatal=fatal)
540 def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
542 json_string = transform_source(json_string)
544 return json.loads(json_string)
545 except ValueError as ve:
546 errmsg = '%s: Failed to parse JSON ' % video_id
548 raise ExtractorError(errmsg, cause=ve)
550 self.report_warning(errmsg + str(ve))
552 def report_warning(self, msg, video_id=None):
553 idstr = '' if video_id is None else '%s: ' % video_id
554 self._downloader.report_warning(
555 '[%s] %s%s' % (self.IE_NAME, idstr, msg))
557 def to_screen(self, msg):
558 """Print msg to screen, prefixing it with '[ie_name]'"""
559 self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
561 def report_extraction(self, id_or_name):
562 """Report information extraction."""
563 self.to_screen('%s: Extracting information' % id_or_name)
565 def report_download_webpage(self, video_id):
566 """Report webpage download."""
567 self.to_screen('%s: Downloading webpage' % video_id)
569 def report_age_confirmation(self):
570 """Report attempt to confirm age."""
571 self.to_screen('Confirming age')
573 def report_login(self):
574 """Report attempt to log in."""
575 self.to_screen('Logging in')
578 def raise_login_required(msg='This video is only available for registered users'):
579 raise ExtractorError(
580 '%s. Use --username and --password or --netrc to provide account credentials.' % msg,
584 def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'):
585 raise ExtractorError(
586 '%s. You might want to use --proxy to workaround.' % msg,
589 # Methods for following #608
591 def url_result(url, ie=None, video_id=None, video_title=None):
592 """Returns a URL that points to a page that should be processed"""
593 # TODO: ie should be the class used for getting the info
594 video_info = {'_type': 'url',
597 if video_id is not None:
598 video_info['id'] = video_id
599 if video_title is not None:
600 video_info['title'] = video_title
604 def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
605 """Returns a playlist"""
606 video_info = {'_type': 'playlist',
609 video_info['id'] = playlist_id
611 video_info['title'] = playlist_title
612 if playlist_description:
613 video_info['description'] = playlist_description
616 def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
618 Perform a regex search on the given string, using a single or a list of
619 patterns returning the first matching group.
620 In case of failure return a default value or raise a WARNING or a
621 RegexNotFoundError, depending on fatal, specifying the field name.
623 if isinstance(pattern, (str, compat_str, compiled_regex_type)):
624 mobj = re.search(pattern, string, flags)
627 mobj = re.search(p, string, flags)
631 if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
632 _name = '\033[0;34m%s\033[0m' % name
638 # return the first matching group
639 return next(g for g in mobj.groups() if g is not None)
641 return mobj.group(group)
642 elif default is not NO_DEFAULT:
645 raise RegexNotFoundError('Unable to extract %s' % _name)
647 self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
650 def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
652 Like _search_regex, but strips HTML tags and unescapes entities.
654 res = self._search_regex(pattern, string, name, default, fatal, flags, group)
656 return clean_html(res).strip()
660 def _get_login_info(self):
662 Get the login info as (username, password)
663 It will look in the netrc file using the _NETRC_MACHINE value
664 If there's no info available, return (None, None)
666 if self._downloader is None:
671 downloader_params = self._downloader.params
673 # Attempt to use provided username and password or .netrc data
674 if downloader_params.get('username') is not None:
675 username = downloader_params['username']
676 password = downloader_params['password']
677 elif downloader_params.get('usenetrc', False):
679 info = netrc.netrc().authenticators(self._NETRC_MACHINE)
684 raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
685 except (IOError, netrc.NetrcParseError) as err:
686 self._downloader.report_warning('parsing .netrc: %s' % error_to_compat_str(err))
688 return (username, password)
690 def _get_tfa_info(self, note='two-factor verification code'):
692 Get the two-factor authentication info
693 TODO - asking the user will be required for sms/phone verify
694 currently just uses the command line option
695 If there's no info available, return None
697 if self._downloader is None:
699 downloader_params = self._downloader.params
701 if downloader_params.get('twofactor') is not None:
702 return downloader_params['twofactor']
704 return compat_getpass('Type %s and press [Return]: ' % note)
706 # Helper functions for extracting OpenGraph info
708 def _og_regexes(prop):
709 content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
710 property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
711 % {'prop': re.escape(prop)})
712 template = r'<meta[^>]+?%s[^>]+?%s'
714 template % (property_re, content_re),
715 template % (content_re, property_re),
719 def _meta_regex(prop):
720 return r'''(?isx)<meta
721 (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
722 [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
724 def _og_search_property(self, prop, html, name=None, **kargs):
726 name = 'OpenGraph %s' % prop
727 escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)
730 return unescapeHTML(escaped)
732 def _og_search_thumbnail(self, html, **kargs):
733 return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
735 def _og_search_description(self, html, **kargs):
736 return self._og_search_property('description', html, fatal=False, **kargs)
738 def _og_search_title(self, html, **kargs):
739 return self._og_search_property('title', html, **kargs)
741 def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
742 regexes = self._og_regexes('video') + self._og_regexes('video:url')
744 regexes = self._og_regexes('video:secure_url') + regexes
745 return self._html_search_regex(regexes, html, name, **kargs)
747 def _og_search_url(self, html, **kargs):
748 return self._og_search_property('url', html, **kargs)
750 def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
751 if display_name is None:
753 return self._html_search_regex(
754 self._meta_regex(name),
755 html, display_name, fatal=fatal, group='content', **kwargs)
757 def _dc_search_uploader(self, html):
758 return self._html_search_meta('dc.creator', html, 'uploader')
760 def _rta_search(self, html):
761 # See http://www.rtalabel.org/index.php?content=howtofaq#single
762 if re.search(r'(?ix)<meta\s+name="rating"\s+'
763 r' content="RTA-5042-1996-1400-1577-RTA"',
768 def _media_rating_search(self, html):
769 # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
770 rating = self._html_search_meta('rating', html)
782 return RATING_TABLE.get(rating.lower())
784 def _family_friendly_search(self, html):
785 # See http://schema.org/VideoObject
786 family_friendly = self._html_search_meta('isFamilyFriendly', html)
788 if not family_friendly:
797 return RATING_TABLE.get(family_friendly.lower())
799 def _twitter_search_player(self, html):
800 return self._html_search_meta('twitter:player', html,
801 'twitter card player')
803 def _search_json_ld(self, html, video_id, **kwargs):
804 json_ld = self._search_regex(
805 r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
806 html, 'JSON-LD', group='json_ld', **kwargs)
809 return self._json_ld(json_ld, video_id, fatal=kwargs.get('fatal', True))
811 def _json_ld(self, json_ld, video_id, fatal=True):
812 if isinstance(json_ld, compat_str):
813 json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
817 if json_ld.get('@context') == 'http://schema.org':
818 item_type = json_ld.get('@type')
819 if item_type == 'TVEpisode':
821 'episode': unescapeHTML(json_ld.get('name')),
822 'episode_number': int_or_none(json_ld.get('episodeNumber')),
823 'description': unescapeHTML(json_ld.get('description')),
825 part_of_season = json_ld.get('partOfSeason')
826 if isinstance(part_of_season, dict) and part_of_season.get('@type') == 'TVSeason':
827 info['season_number'] = int_or_none(part_of_season.get('seasonNumber'))
828 part_of_series = json_ld.get('partOfSeries')
829 if isinstance(part_of_series, dict) and part_of_series.get('@type') == 'TVSeries':
830 info['series'] = unescapeHTML(part_of_series.get('name'))
831 elif item_type == 'Article':
833 'timestamp': parse_iso8601(json_ld.get('datePublished')),
834 'title': unescapeHTML(json_ld.get('headline')),
835 'description': unescapeHTML(json_ld.get('articleBody')),
837 return dict((k, v) for k, v in info.items() if v is not None)
840 def _hidden_inputs(html):
841 html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
843 for input in re.findall(r'(?i)<input([^>]+)>', html):
844 if not re.search(r'type=(["\'])(?:hidden|submit)\1', input):
846 name = re.search(r'(?:name|id)=(["\'])(?P<value>.+?)\1', input)
849 value = re.search(r'value=(["\'])(?P<value>.*?)\1', input)
852 hidden_inputs[name.group('value')] = value.group('value')
855 def _form_hidden_inputs(self, form_id, html):
856 form = self._search_regex(
857 r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
858 html, '%s form' % form_id, group='form')
859 return self._hidden_inputs(form)
861 def _sort_formats(self, formats, field_preference=None):
863 raise ExtractorError('No video formats found')
866 # Automatically determine tbr when missing based on abr and vbr (improves
867 # formats sorting in some cases)
868 if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
869 f['tbr'] = f['abr'] + f['vbr']
872 # TODO remove the following workaround
873 from ..utils import determine_ext
874 if not f.get('ext') and 'url' in f:
875 f['ext'] = determine_ext(f['url'])
877 if isinstance(field_preference, (list, tuple)):
878 return tuple(f.get(field) if f.get(field) is not None else -1 for field in field_preference)
880 preference = f.get('preference')
881 if preference is None:
883 if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
886 proto_preference = 0 if determine_protocol(f) in ['http', 'https'] else -0.1
888 if f.get('vcodec') == 'none': # audio only
890 if self._downloader.params.get('prefer_free_formats'):
891 ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
893 ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
896 audio_ext_preference = ORDER.index(f['ext'])
898 audio_ext_preference = -1
900 if f.get('acodec') == 'none': # video only
902 if self._downloader.params.get('prefer_free_formats'):
903 ORDER = ['flv', 'mp4', 'webm']
905 ORDER = ['webm', 'flv', 'mp4']
907 ext_preference = ORDER.index(f['ext'])
910 audio_ext_preference = 0
914 f.get('language_preference') if f.get('language_preference') is not None else -1,
915 f.get('quality') if f.get('quality') is not None else -1,
916 f.get('tbr') if f.get('tbr') is not None else -1,
917 f.get('filesize') if f.get('filesize') is not None else -1,
918 f.get('vbr') if f.get('vbr') is not None else -1,
919 f.get('height') if f.get('height') is not None else -1,
920 f.get('width') if f.get('width') is not None else -1,
923 f.get('abr') if f.get('abr') is not None else -1,
924 audio_ext_preference,
925 f.get('fps') if f.get('fps') is not None else -1,
926 f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
927 f.get('source_preference') if f.get('source_preference') is not None else -1,
928 f.get('format_id') if f.get('format_id') is not None else '',
930 formats.sort(key=_formats_key)
932 def _check_formats(self, formats, video_id):
935 lambda f: self._is_valid_url(
937 item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
941 def _remove_duplicate_formats(formats):
945 if f['url'] not in format_urls:
946 format_urls.add(f['url'])
947 unique_formats.append(f)
948 formats[:] = unique_formats
950 def _is_valid_url(self, url, video_id, item='video'):
951 url = self._proto_relative_url(url, scheme='http:')
952 # For now assume non HTTP(S) URLs always valid
953 if not (url.startswith('http://') or url.startswith('https://')):
956 self._request_webpage(url, video_id, 'Checking %s URL' % item)
958 except ExtractorError as e:
959 if isinstance(e.cause, compat_urllib_error.URLError):
961 '%s: %s URL is invalid, skipping' % (video_id, item))
965 def http_scheme(self):
966 """ Either "http:" or "https:", depending on the user's preferences """
969 if self._downloader.params.get('prefer_insecure', False)
972 def _proto_relative_url(self, url, scheme=None):
975 if url.startswith('//'):
977 scheme = self.http_scheme()
982 def _sleep(self, timeout, video_id, msg_template=None):
983 if msg_template is None:
984 msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
985 msg = msg_template % {'video_id': video_id, 'timeout': timeout}
989 def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
990 transform_source=lambda s: fix_xml_ampersands(s).strip(),
991 fatal=True, m3u8_id=None):
992 manifest = self._download_xml(
993 manifest_url, video_id, 'Downloading f4m manifest',
994 'Unable to download f4m manifest',
995 # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
996 # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
997 transform_source=transform_source,
1000 if manifest is False:
1003 return self._parse_f4m_formats(
1004 manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
1005 transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
1007 def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
1008 transform_source=lambda s: fix_xml_ampersands(s).strip(),
1009 fatal=True, m3u8_id=None):
1010 # currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
1011 akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
1012 if akamai_pv is not None and ';' in akamai_pv.text:
1013 playerVerificationChallenge = akamai_pv.text.split(';')[0]
1014 if playerVerificationChallenge.strip() != '':
1018 manifest_version = '1.0'
1019 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
1021 manifest_version = '2.0'
1022 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
1023 # Remove unsupported DRM protected media from final formats
1024 # rendition (see https://github.com/rg3/youtube-dl/issues/8573).
1025 media_nodes = remove_encrypted_media(media_nodes)
1028 base_url = xpath_text(
1029 manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
1030 'base URL', default=None)
1032 base_url = base_url.strip()
1034 bootstrap_info = xpath_element(
1035 manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
1036 'bootstrap info', default=None)
1038 for i, media_el in enumerate(media_nodes):
1039 tbr = int_or_none(media_el.attrib.get('bitrate'))
1040 width = int_or_none(media_el.attrib.get('width'))
1041 height = int_or_none(media_el.attrib.get('height'))
1042 format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
1043 # If <bootstrapInfo> is present, the specified f4m is a
1044 # stream-level manifest, and only set-level manifests may refer to
1045 # external resources. See section 11.4 and section 4 of F4M spec
1046 if bootstrap_info is None:
1048 # @href is introduced in 2.0, see section 11.6 of F4M spec
1049 if manifest_version == '2.0':
1050 media_url = media_el.attrib.get('href')
1051 if media_url is None:
1052 media_url = media_el.attrib.get('url')
1056 media_url if media_url.startswith('http://') or media_url.startswith('https://')
1057 else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
1058 # If media_url is itself a f4m manifest do the recursive extraction
1059 # since bitrates in parent manifest (this one) and media_url manifest
1060 # may differ leading to inability to resolve the format by requested
1061 # bitrate in f4m downloader
1062 ext = determine_ext(manifest_url)
1064 f4m_formats = self._extract_f4m_formats(
1065 manifest_url, video_id, preference=preference, f4m_id=f4m_id,
1066 transform_source=transform_source, fatal=fatal)
1067 # Sometimes stream-level manifest contains single media entry that
1068 # does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
1069 # At the same time parent's media entry in set-level manifest may
1070 # contain it. We will copy it from parent in such cases.
1071 if len(f4m_formats) == 1:
1074 'tbr': f.get('tbr') or tbr,
1075 'width': f.get('width') or width,
1076 'height': f.get('height') or height,
1077 'format_id': f.get('format_id') if not tbr else format_id,
1079 formats.extend(f4m_formats)
1082 formats.extend(self._extract_m3u8_formats(
1083 manifest_url, video_id, 'mp4', preference=preference,
1084 m3u8_id=m3u8_id, fatal=fatal))
1087 'format_id': format_id,
1088 'url': manifest_url,
1089 'ext': 'flv' if bootstrap_info is not None else None,
1093 'preference': preference,
1097 def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
1099 'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
1103 'preference': preference - 1 if preference else -1,
1104 'resolution': 'multiple',
1105 'format_note': 'Quality selection URL',
1108 def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
1109 entry_protocol='m3u8', preference=None,
1110 m3u8_id=None, note=None, errnote=None,
1111 fatal=True, live=False):
1113 formats = [self._m3u8_meta_format(m3u8_url, ext, preference, m3u8_id)]
1115 format_url = lambda u: (
1117 if re.match(r'^https?://', u)
1118 else compat_urlparse.urljoin(m3u8_url, u))
1120 res = self._download_webpage_handle(
1122 note=note or 'Downloading m3u8 information',
1123 errnote=errnote or 'Failed to download m3u8 information',
1127 m3u8_doc, urlh = res
1128 m3u8_url = urlh.geturl()
1130 # We should try extracting formats only from master playlists [1], i.e.
1131 # playlists that describe available qualities. On the other hand media
1132 # playlists [2] should be returned as is since they contain just the media
1133 # without qualities renditions.
1134 # Fortunately, master playlist can be easily distinguished from media
1135 # playlist based on particular tags availability. As of [1, 2] master
1136 # playlist tags MUST NOT appear in a media playist and vice versa.
1137 # As of [3] #EXT-X-TARGETDURATION tag is REQUIRED for every media playlist
1138 # and MUST NOT appear in master playlist thus we can clearly detect media
1139 # playlist with this criterion.
1140 # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.4
1141 # 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3
1142 # 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1
1143 if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
1146 'format_id': m3u8_id,
1148 'protocol': entry_protocol,
1149 'preference': preference,
1153 kv_rex = re.compile(
1154 r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
1155 for line in m3u8_doc.splitlines():
1156 if line.startswith('#EXT-X-STREAM-INF:'):
1158 for m in kv_rex.finditer(line):
1160 if v.startswith('"'):
1162 last_info[m.group('key')] = v
1163 elif line.startswith('#EXT-X-MEDIA:'):
1165 for m in kv_rex.finditer(line):
1167 if v.startswith('"'):
1169 last_media[m.group('key')] = v
1170 elif line.startswith('#') or not line.strip():
1173 if last_info is None:
1174 formats.append({'url': format_url(line)})
1176 tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
1179 format_id.append(m3u8_id)
1180 last_media_name = last_media.get('NAME') if last_media and last_media.get('TYPE') not in ('SUBTITLES', 'CLOSED-CAPTIONS') else None
1181 # Despite specification does not mention NAME attribute for
1182 # EXT-X-STREAM-INF it still sometimes may be present
1183 stream_name = last_info.get('NAME') or last_media_name
1184 # Bandwidth of live streams may differ over time thus making
1185 # format_id unpredictable. So it's better to keep provided
1188 format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
1190 'format_id': '-'.join(format_id),
1191 'url': format_url(line.strip()),
1194 'protocol': entry_protocol,
1195 'preference': preference,
1197 resolution = last_info.get('RESOLUTION')
1199 width_str, height_str = resolution.split('x')
1200 f['width'] = int(width_str)
1201 f['height'] = int(height_str)
1202 codecs = last_info.get('CODECS')
1204 vcodec, acodec = [None] * 2
1205 va_codecs = codecs.split(',')
1206 if len(va_codecs) == 1:
1207 # Audio only entries usually come with single codec and
1208 # no resolution. For more robustness we also check it to
1210 if not resolution and va_codecs[0].startswith('mp4a'):
1211 vcodec, acodec = 'none', va_codecs[0]
1213 vcodec = va_codecs[0]
1215 vcodec, acodec = va_codecs[:2]
1220 if last_media is not None:
1221 f['m3u8_media'] = last_media
1228 def _xpath_ns(path, namespace=None):
1232 for c in path.split('/'):
1233 if not c or c == '.':
1236 out.append('{%s}%s' % (namespace, c))
1237 return '/'.join(out)
1239 def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
1240 smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
1246 namespace = self._parse_smil_namespace(smil)
1248 return self._parse_smil_formats(
1249 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
1251 def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
1252 smil = self._download_smil(smil_url, video_id, fatal=fatal)
1255 return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
1257 def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
1258 return self._download_xml(
1259 smil_url, video_id, 'Downloading SMIL file',
1260 'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
1262 def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
1263 namespace = self._parse_smil_namespace(smil)
1265 formats = self._parse_smil_formats(
1266 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
1267 subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
1269 video_id = os.path.splitext(url_basename(smil_url))[0]
1273 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
1274 name = meta.attrib.get('name')
1275 content = meta.attrib.get('content')
1276 if not name or not content:
1278 if not title and name == 'title':
1280 elif not description and name in ('description', 'abstract'):
1281 description = content
1282 elif not upload_date and name == 'date':
1283 upload_date = unified_strdate(content)
1286 'id': image.get('type'),
1287 'url': image.get('src'),
1288 'width': int_or_none(image.get('width')),
1289 'height': int_or_none(image.get('height')),
1290 } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
1294 'title': title or video_id,
1295 'description': description,
1296 'upload_date': upload_date,
1297 'thumbnails': thumbnails,
1299 'subtitles': subtitles,
1302 def _parse_smil_namespace(self, smil):
1303 return self._search_regex(
1304 r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
1306 def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
1308 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
1309 b = meta.get('base') or meta.get('httpBase')
1320 media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
1321 for medium in media:
1322 src = medium.get('src')
1323 if not src or src in srcs:
1327 bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
1328 filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
1329 width = int_or_none(medium.get('width'))
1330 height = int_or_none(medium.get('height'))
1331 proto = medium.get('proto')
1332 ext = medium.get('ext')
1333 src_ext = determine_ext(src)
1334 streamer = medium.get('streamer') or base
1336 if proto == 'rtmp' or streamer.startswith('rtmp'):
1342 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
1344 'filesize': filesize,
1348 if transform_rtmp_url:
1349 streamer, src = transform_rtmp_url(streamer, src)
1350 formats[-1].update({
1356 src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
1357 src_url = src_url.strip()
1359 if proto == 'm3u8' or src_ext == 'm3u8':
1360 m3u8_formats = self._extract_m3u8_formats(
1361 src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
1362 if len(m3u8_formats) == 1:
1364 m3u8_formats[0].update({
1365 'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
1370 formats.extend(m3u8_formats)
1373 if src_ext == 'f4m':
1378 'plugin': 'flowplayer-3.2.0.1',
1380 f4m_url += '&' if '?' in f4m_url else '?'
1381 f4m_url += compat_urllib_parse_urlencode(f4m_params)
1382 formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
1385 if src_url.startswith('http') and self._is_valid_url(src, video_id):
1389 'ext': ext or src_ext or 'flv',
1390 'format_id': 'http-%d' % (bitrate or http_count),
1392 'filesize': filesize,
1400 def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
1403 for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
1404 src = textstream.get('src')
1405 if not src or src in urls:
1408 ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
1409 lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
1410 subtitles.setdefault(lang, []).append({
1416 def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
1417 xspf = self._download_xml(
1418 playlist_url, playlist_id, 'Downloading xpsf playlist',
1419 'Unable to download xspf manifest', fatal=fatal)
1422 return self._parse_xspf(xspf, playlist_id)
1424 def _parse_xspf(self, playlist, playlist_id):
1426 'xspf': 'http://xspf.org/ns/0/',
1427 's1': 'http://static.streamone.nl/player/ns/0',
1431 for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
1433 track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
1434 description = xpath_text(
1435 track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
1436 thumbnail = xpath_text(
1437 track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
1438 duration = float_or_none(
1439 xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
1442 'url': location.text,
1443 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
1444 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
1445 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
1446 } for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
1447 self._sort_formats(formats)
1452 'description': description,
1453 'thumbnail': thumbnail,
1454 'duration': duration,
1459 def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
1460 res = self._download_webpage_handle(
1462 note=note or 'Downloading MPD manifest',
1463 errnote=errnote or 'Failed to download MPD manifest',
1468 mpd_base_url = re.match(r'https?://.+/', urlh.geturl()).group()
1470 return self._parse_mpd_formats(
1471 compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url, formats_dict=formats_dict)
1473 def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}):
1474 if mpd_doc.get('type') == 'dynamic':
1477 namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
1480 return self._xpath_ns(path, namespace)
1482 def is_drm_protected(element):
1483 return element.find(_add_ns('ContentProtection')) is not None
1485 def extract_multisegment_info(element, ms_parent_info):
1486 ms_info = ms_parent_info.copy()
1487 segment_list = element.find(_add_ns('SegmentList'))
1488 if segment_list is not None:
1489 segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
1491 ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
1492 initialization = segment_list.find(_add_ns('Initialization'))
1493 if initialization is not None:
1494 ms_info['initialization_url'] = initialization.attrib['sourceURL']
1496 segment_template = element.find(_add_ns('SegmentTemplate'))
1497 if segment_template is not None:
1498 start_number = segment_template.get('startNumber')
1500 ms_info['start_number'] = int(start_number)
1501 segment_timeline = segment_template.find(_add_ns('SegmentTimeline'))
1502 if segment_timeline is not None:
1503 s_e = segment_timeline.findall(_add_ns('S'))
1505 ms_info['total_number'] = 0
1507 ms_info['total_number'] += 1 + int(s.get('r', '0'))
1509 timescale = segment_template.get('timescale')
1511 ms_info['timescale'] = int(timescale)
1512 segment_duration = segment_template.get('duration')
1513 if segment_duration:
1514 ms_info['segment_duration'] = int(segment_duration)
1515 media_template = segment_template.get('media')
1517 ms_info['media_template'] = media_template
1518 initialization = segment_template.get('initialization')
1520 ms_info['initialization_url'] = initialization
1522 initialization = segment_template.find(_add_ns('Initialization'))
1523 if initialization is not None:
1524 ms_info['initialization_url'] = initialization.attrib['sourceURL']
1527 mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
1529 for period in mpd_doc.findall(_add_ns('Period')):
1530 period_duration = parse_duration(period.get('duration')) or mpd_duration
1531 period_ms_info = extract_multisegment_info(period, {
1535 for adaptation_set in period.findall(_add_ns('AdaptationSet')):
1536 if is_drm_protected(adaptation_set):
1538 adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
1539 for representation in adaptation_set.findall(_add_ns('Representation')):
1540 if is_drm_protected(representation):
1542 representation_attrib = adaptation_set.attrib.copy()
1543 representation_attrib.update(representation.attrib)
1544 # According to page 41 of ISO/IEC 29001-1:2014, @mimeType is mandatory
1545 mime_type = representation_attrib['mimeType']
1546 content_type = mime_type.split('/')[0]
1547 if content_type == 'text':
1548 # TODO implement WebVTT downloading
1550 elif content_type == 'video' or content_type == 'audio':
1552 for element in (representation, adaptation_set, period, mpd_doc):
1553 base_url_e = element.find(_add_ns('BaseURL'))
1554 if base_url_e is not None:
1555 base_url = base_url_e.text + base_url
1556 if re.match(r'^https?://', base_url):
1558 if mpd_base_url and not re.match(r'^https?://', base_url):
1559 if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
1561 base_url = mpd_base_url + base_url
1562 representation_id = representation_attrib.get('id')
1563 lang = representation_attrib.get('lang')
1564 url_el = representation.find(_add_ns('BaseURL'))
1565 filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
1567 'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
1569 'ext': mimetype2ext(mime_type),
1570 'width': int_or_none(representation_attrib.get('width')),
1571 'height': int_or_none(representation_attrib.get('height')),
1572 'tbr': int_or_none(representation_attrib.get('bandwidth'), 1000),
1573 'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
1574 'fps': int_or_none(representation_attrib.get('frameRate')),
1575 'vcodec': 'none' if content_type == 'audio' else representation_attrib.get('codecs'),
1576 'acodec': 'none' if content_type == 'video' else representation_attrib.get('codecs'),
1577 'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
1578 'format_note': 'DASH %s' % content_type,
1579 'filesize': filesize,
1581 representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
1582 if 'segment_urls' not in representation_ms_info and 'media_template' in representation_ms_info:
1583 if 'total_number' not in representation_ms_info and 'segment_duration':
1584 segment_duration = float(representation_ms_info['segment_duration']) / float(representation_ms_info['timescale'])
1585 representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
1586 media_template = representation_ms_info['media_template']
1587 media_template = media_template.replace('$RepresentationID$', representation_id)
1588 media_template = re.sub(r'\$(Number|Bandwidth)\$', r'%(\1)d', media_template)
1589 media_template = re.sub(r'\$(Number|Bandwidth)%([^$]+)\$', r'%(\1)\2', media_template)
1590 media_template.replace('$$', '$')
1591 representation_ms_info['segment_urls'] = [
1593 'Number': segment_number,
1594 'Bandwidth': representation_attrib.get('bandwidth')}
1595 for segment_number in range(
1596 representation_ms_info['start_number'],
1597 representation_ms_info['total_number'] + representation_ms_info['start_number'])]
1598 if 'segment_urls' in representation_ms_info:
1600 'segment_urls': representation_ms_info['segment_urls'],
1601 'protocol': 'http_dash_segments',
1603 if 'initialization_url' in representation_ms_info:
1604 initialization_url = representation_ms_info['initialization_url'].replace('$RepresentationID$', representation_id)
1606 'initialization_url': initialization_url,
1608 if not f.get('url'):
1609 f['url'] = initialization_url
1611 existing_format = next(
1612 fo for fo in formats
1613 if fo['format_id'] == representation_id)
1614 except StopIteration:
1615 full_info = formats_dict.get(representation_id, {}).copy()
1617 formats.append(full_info)
1619 existing_format.update(f)
1621 self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
1624 def _live_title(self, name):
1625 """ Generate the title for a live video """
1626 now = datetime.datetime.now()
1627 now_str = now.strftime('%Y-%m-%d %H:%M')
1628 return name + ' ' + now_str
1630 def _int(self, v, name, fatal=False, **kwargs):
1631 res = int_or_none(v, **kwargs)
1632 if 'get_attr' in kwargs:
1633 print(getattr(v, kwargs['get_attr']))
1635 msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
1637 raise ExtractorError(msg)
1639 self._downloader.report_warning(msg)
1642 def _float(self, v, name, fatal=False, **kwargs):
1643 res = float_or_none(v, **kwargs)
1645 msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
1647 raise ExtractorError(msg)
1649 self._downloader.report_warning(msg)
1652 def _set_cookie(self, domain, name, value, expire_time=None):
1653 cookie = compat_cookiejar.Cookie(
1654 0, name, value, None, None, domain, None,
1655 None, '/', True, False, expire_time, '', None, None, None)
1656 self._downloader.cookiejar.set_cookie(cookie)
1658 def _get_cookies(self, url):
1659 """ Return a compat_cookies.SimpleCookie with the cookies for the url """
1660 req = sanitized_Request(url)
1661 self._downloader.cookiejar.add_cookie_header(req)
1662 return compat_cookies.SimpleCookie(req.get_header('Cookie'))
1664 def get_testcases(self, include_onlymatching=False):
1665 t = getattr(self, '_TEST', None)
1667 assert not hasattr(self, '_TESTS'), \
1668 '%s has _TEST and _TESTS' % type(self).__name__
1671 tests = getattr(self, '_TESTS', [])
1673 if not include_onlymatching and t.get('only_matching', False):
1675 t['name'] = type(self).__name__[:-len('IE')]
1678 def is_suitable(self, age_limit):
1679 """ Test whether the extractor is generally suitable for the given
1680 age limit (i.e. pornographic sites are not, all others usually are) """
1682 any_restricted = False
1683 for tc in self.get_testcases(include_onlymatching=False):
1684 if 'playlist' in tc:
1685 tc = tc['playlist'][0]
1686 is_restricted = age_restricted(
1687 tc.get('info_dict', {}).get('age_limit'), age_limit)
1688 if not is_restricted:
1690 any_restricted = any_restricted or is_restricted
1691 return not any_restricted
1693 def extract_subtitles(self, *args, **kwargs):
1694 if (self._downloader.params.get('writesubtitles', False) or
1695 self._downloader.params.get('listsubtitles')):
1696 return self._get_subtitles(*args, **kwargs)
1699 def _get_subtitles(self, *args, **kwargs):
1700 raise NotImplementedError('This method must be implemented by subclasses')
1703 def _merge_subtitle_items(subtitle_list1, subtitle_list2):
1704 """ Merge subtitle items for one language. Items with duplicated URLs
1705 will be dropped. """
1706 list1_urls = set([item['url'] for item in subtitle_list1])
1707 ret = list(subtitle_list1)
1708 ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
1712 def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
1713 """ Merge two subtitle dictionaries, language by language. """
1714 ret = dict(subtitle_dict1)
1715 for lang in subtitle_dict2:
1716 ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
1719 def extract_automatic_captions(self, *args, **kwargs):
1720 if (self._downloader.params.get('writeautomaticsub', False) or
1721 self._downloader.params.get('listsubtitles')):
1722 return self._get_automatic_captions(*args, **kwargs)
1725 def _get_automatic_captions(self, *args, **kwargs):
1726 raise NotImplementedError('This method must be implemented by subclasses')
1728 def mark_watched(self, *args, **kwargs):
1729 if (self._downloader.params.get('mark_watched', False) and
1730 (self._get_login_info()[0] is not None or
1731 self._downloader.params.get('cookiefile') is not None)):
1732 self._mark_watched(*args, **kwargs)
1734 def _mark_watched(self, *args, **kwargs):
1735 raise NotImplementedError('This method must be implemented by subclasses')
1738 class SearchInfoExtractor(InfoExtractor):
1740 Base class for paged search queries extractors.
1741 They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
1742 Instances should define _SEARCH_KEY and _MAX_RESULTS.
1746 def _make_valid_url(cls):
1747 return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
1750 def suitable(cls, url):
1751 return re.match(cls._make_valid_url(), url) is not None
1753 def _real_extract(self, query):
1754 mobj = re.match(self._make_valid_url(), query)
1756 raise ExtractorError('Invalid search query "%s"' % query)
1758 prefix = mobj.group('prefix')
1759 query = mobj.group('query')
1761 return self._get_n_results(query, 1)
1762 elif prefix == 'all':
1763 return self._get_n_results(query, self._MAX_RESULTS)
1767 raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
1768 elif n > self._MAX_RESULTS:
1769 self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
1770 n = self._MAX_RESULTS
1771 return self._get_n_results(query, n)
1773 def _get_n_results(self, query, n):
1774 """Get a specified number of results for a query"""
1775 raise NotImplementedError('This method must be implemented by subclasses')
1778 def SEARCH_KEY(self):
1779 return self._SEARCH_KEY