4 from __future__ import unicode_literals
35 import xml.etree.ElementTree
42 compat_etree_fromstring,
45 compat_html_entities_html5,
51 compat_socket_create_connection,
57 compat_urllib_parse_urlencode,
58 compat_urllib_parse_urlparse,
59 compat_urllib_parse_unquote_plus,
60 compat_urllib_request,
71 def register_socks_protocols():
72 # "Register" SOCKS protocols
73 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
74 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
75 for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
76 if scheme not in compat_urlparse.uses_netloc:
77 compat_urlparse.uses_netloc.append(scheme)
80 # This is not clearly defined otherwise
81 compiled_regex_type = type(re.compile(''))
84 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)',
85 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
86 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
87 'Accept-Encoding': 'gzip, deflate',
88 'Accept-Language': 'en-us,en;q=0.5',
93 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
99 ENGLISH_MONTH_NAMES = [
100 'January', 'February', 'March', 'April', 'May', 'June',
101 'July', 'August', 'September', 'October', 'November', 'December']
104 'en': ENGLISH_MONTH_NAMES,
106 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
107 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
111 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
112 'flv', 'f4v', 'f4a', 'f4b',
113 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
114 'mkv', 'mka', 'mk3d',
123 'f4f', 'f4m', 'm3u8', 'smil')
125 # needed for sanitizing filenames in restricted mode
126 ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
127 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'],
128 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy')))
151 '%Y-%m-%d %H:%M:%S.%f',
154 '%Y-%m-%dT%H:%M:%SZ',
155 '%Y-%m-%dT%H:%M:%S.%fZ',
156 '%Y-%m-%dT%H:%M:%S.%f0Z',
158 '%Y-%m-%dT%H:%M:%S.%f',
161 '%b %d %Y at %H:%M:%S',
164 DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
165 DATE_FORMATS_DAY_FIRST.extend([
174 DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
175 DATE_FORMATS_MONTH_FIRST.extend([
183 PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
186 def preferredencoding():
187 """Get preferred encoding.
189 Returns the best encoding scheme for the system, based on
190 locale.getpreferredencoding() and some further tweaks.
193 pref = locale.getpreferredencoding()
201 def write_json_file(obj, fn):
202 """ Encode obj as JSON and write it to fn, atomically if possible """
204 fn = encodeFilename(fn)
205 if sys.version_info < (3, 0) and sys.platform != 'win32':
206 encoding = get_filesystem_encoding()
207 # os.path.basename returns a bytes object, but NamedTemporaryFile
208 # will fail if the filename contains non ascii characters unless we
209 # use a unicode object
210 path_basename = lambda f: os.path.basename(fn).decode(encoding)
211 # the same for os.path.dirname
212 path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
214 path_basename = os.path.basename
215 path_dirname = os.path.dirname
219 'prefix': path_basename(fn) + '.',
220 'dir': path_dirname(fn),
224 # In Python 2.x, json.dump expects a bytestream.
225 # In Python 3.x, it writes to a character stream
226 if sys.version_info < (3, 0):
234 tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
239 if sys.platform == 'win32':
240 # Need to remove existing file on Windows, else os.rename raises
241 # WindowsError or FileExistsError.
246 os.rename(tf.name, fn)
255 if sys.version_info >= (2, 7):
256 def find_xpath_attr(node, xpath, key, val=None):
257 """ Find the xpath xpath[@key=val] """
258 assert re.match(r'^[a-zA-Z_-]+$', key)
259 expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
260 return node.find(expr)
262 def find_xpath_attr(node, xpath, key, val=None):
263 for f in node.findall(compat_xpath(xpath)):
264 if key not in f.attrib:
266 if val is None or f.attrib.get(key) == val:
270 # On python2.6 the xml.etree.ElementTree.Element methods don't support
271 # the namespace parameter
274 def xpath_with_ns(path, ns_map):
275 components = [c.split(':') for c in path.split('/')]
279 replaced.append(c[0])
282 replaced.append('{%s}%s' % (ns_map[ns], tag))
283 return '/'.join(replaced)
286 def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
287 def _find_xpath(xpath):
288 return node.find(compat_xpath(xpath))
290 if isinstance(xpath, (str, compat_str)):
291 n = _find_xpath(xpath)
299 if default is not NO_DEFAULT:
302 name = xpath if name is None else name
303 raise ExtractorError('Could not find XML element %s' % name)
309 def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
310 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
311 if n is None or n == default:
314 if default is not NO_DEFAULT:
317 name = xpath if name is None else name
318 raise ExtractorError('Could not find XML element\'s text %s' % name)
324 def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
325 n = find_xpath_attr(node, xpath, key)
327 if default is not NO_DEFAULT:
330 name = '%s[@%s]' % (xpath, key) if name is None else name
331 raise ExtractorError('Could not find XML attribute %s' % name)
337 def get_element_by_id(id, html):
338 """Return the content of the tag with the specified ID in the passed HTML document"""
339 return get_element_by_attribute('id', id, html)
342 def get_element_by_class(class_name, html):
343 """Return the content of the first tag with the specified class in the passed HTML document"""
344 retval = get_elements_by_class(class_name, html)
345 return retval[0] if retval else None
348 def get_element_by_attribute(attribute, value, html, escape_value=True):
349 retval = get_elements_by_attribute(attribute, value, html, escape_value)
350 return retval[0] if retval else None
353 def get_elements_by_class(class_name, html):
354 """Return the content of all tags with the specified class in the passed HTML document as a list"""
355 return get_elements_by_attribute(
356 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
357 html, escape_value=False)
360 def get_elements_by_attribute(attribute, value, html, escape_value=True):
361 """Return the content of the tag with the specified attribute in the passed HTML document"""
363 value = re.escape(value) if escape_value else value
366 for m in re.finditer(r'''(?xs)
368 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
370 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
374 ''' % (re.escape(attribute), value), html):
375 res = m.group('content')
377 if res.startswith('"') or res.startswith("'"):
380 retlist.append(unescapeHTML(res))
385 class HTMLAttributeParser(compat_HTMLParser):
386 """Trivial HTML parser to gather the attributes for a single element"""
389 compat_HTMLParser.__init__(self)
391 def handle_starttag(self, tag, attrs):
392 self.attrs = dict(attrs)
395 def extract_attributes(html_element):
396 """Given a string for an HTML element such as
398 a="foo" B="bar" c="&98;az" d=boz
399 empty= noval entity="&"
402 Decode and return a dictionary of attributes.
404 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
405 'empty': '', 'noval': None, 'entity': '&',
406 'sq': '"', 'dq': '\''
408 NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
409 but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
411 parser = HTMLAttributeParser()
412 parser.feed(html_element)
417 def clean_html(html):
418 """Clean an HTML snippet into a readable string"""
420 if html is None: # Convenience for sanitizing descriptions etc.
424 html = html.replace('\n', ' ')
425 html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html)
426 html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
428 html = re.sub('<.*?>', '', html)
429 # Replace html entities
430 html = unescapeHTML(html)
434 def sanitize_open(filename, open_mode):
435 """Try to open the given filename, and slightly tweak it if this fails.
437 Attempts to open the given filename. If this fails, it tries to change
438 the filename slightly, step by step, until it's either able to open it
439 or it fails and raises a final exception, like the standard open()
442 It returns the tuple (stream, definitive_file_name).
446 if sys.platform == 'win32':
448 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
449 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
450 stream = open(encodeFilename(filename), open_mode)
451 return (stream, filename)
452 except (IOError, OSError) as err:
453 if err.errno in (errno.EACCES,):
456 # In case of error, try to remove win32 forbidden chars
457 alt_filename = sanitize_path(filename)
458 if alt_filename == filename:
461 # An exception here should be caught in the caller
462 stream = open(encodeFilename(alt_filename), open_mode)
463 return (stream, alt_filename)
466 def timeconvert(timestr):
467 """Convert RFC 2822 defined time string into system timestamp"""
469 timetuple = email.utils.parsedate_tz(timestr)
470 if timetuple is not None:
471 timestamp = email.utils.mktime_tz(timetuple)
475 def sanitize_filename(s, restricted=False, is_id=False):
476 """Sanitizes a string so it could be used as part of a filename.
477 If restricted is set, use a stricter subset of allowed characters.
478 Set is_id if this is not an arbitrary string, but an ID that should be kept
481 def replace_insane(char):
482 if restricted and char in ACCENT_CHARS:
483 return ACCENT_CHARS[char]
484 if char == '?' or ord(char) < 32 or ord(char) == 127:
487 return '' if restricted else '\''
489 return '_-' if restricted else ' -'
490 elif char in '\\/|*<>':
492 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
494 if restricted and ord(char) > 127:
499 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
500 result = ''.join(map(replace_insane, s))
502 while '__' in result:
503 result = result.replace('__', '_')
504 result = result.strip('_')
505 # Common case of "Foreign band name - English song title"
506 if restricted and result.startswith('-_'):
508 if result.startswith('-'):
509 result = '_' + result[len('-'):]
510 result = result.lstrip('.')
516 def sanitize_path(s):
517 """Sanitizes and normalizes path on Windows"""
518 if sys.platform != 'win32':
520 drive_or_unc, _ = os.path.splitdrive(s)
521 if sys.version_info < (2, 7) and not drive_or_unc:
522 drive_or_unc, _ = os.path.splitunc(s)
523 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
527 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
528 for path_part in norm_path]
530 sanitized_path.insert(0, drive_or_unc + os.path.sep)
531 return os.path.join(*sanitized_path)
534 # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of
535 # unwanted failures due to missing protocol
536 def sanitize_url(url):
537 return 'http:%s' % url if url.startswith('//') else url
540 def sanitized_Request(url, *args, **kwargs):
541 return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs)
545 """Expand shell variables and ~"""
546 return os.path.expandvars(compat_expanduser(s))
549 def orderedSet(iterable):
550 """ Remove all duplicates from the input iterable """
558 def _htmlentity_transform(entity_with_semicolon):
559 """Transforms an HTML entity to a character."""
560 entity = entity_with_semicolon[:-1]
562 # Known non-numeric HTML entity
563 if entity in compat_html_entities.name2codepoint:
564 return compat_chr(compat_html_entities.name2codepoint[entity])
566 # TODO: HTML5 allows entities without a semicolon. For example,
567 # 'Éric' should be decoded as 'Éric'.
568 if entity_with_semicolon in compat_html_entities_html5:
569 return compat_html_entities_html5[entity_with_semicolon]
571 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
573 numstr = mobj.group(1)
574 if numstr.startswith('x'):
576 numstr = '0%s' % numstr
579 # See https://github.com/rg3/youtube-dl/issues/7518
581 return compat_chr(int(numstr, base))
585 # Unknown entity in name, return its literal representation
586 return '&%s;' % entity
592 assert type(s) == compat_str
595 r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
598 def get_subprocess_encoding():
599 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
600 # For subprocess calls, encode with locale encoding
601 # Refer to http://stackoverflow.com/a/9951851/35070
602 encoding = preferredencoding()
604 encoding = sys.getfilesystemencoding()
610 def encodeFilename(s, for_subprocess=False):
612 @param s The name of the file
615 assert type(s) == compat_str
617 # Python 3 has a Unicode API
618 if sys.version_info >= (3, 0):
621 # Pass '' directly to use Unicode APIs on Windows 2000 and up
622 # (Detecting Windows NT 4 is tricky because 'major >= 4' would
623 # match Windows 9x series as well. Besides, NT 4 is obsolete.)
624 if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
627 # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
628 if sys.platform.startswith('java'):
631 return s.encode(get_subprocess_encoding(), 'ignore')
634 def decodeFilename(b, for_subprocess=False):
636 if sys.version_info >= (3, 0):
639 if not isinstance(b, bytes):
642 return b.decode(get_subprocess_encoding(), 'ignore')
645 def encodeArgument(s):
646 if not isinstance(s, compat_str):
647 # Legacy code that uses byte strings
648 # Uncomment the following line after fixing all post processors
649 # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
650 s = s.decode('ascii')
651 return encodeFilename(s, True)
654 def decodeArgument(b):
655 return decodeFilename(b, True)
658 def decodeOption(optval):
661 if isinstance(optval, bytes):
662 optval = optval.decode(preferredencoding())
664 assert isinstance(optval, compat_str)
668 def formatSeconds(secs):
670 return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
672 return '%d:%02d' % (secs // 60, secs % 60)
677 def make_HTTPS_handler(params, **kwargs):
678 opts_no_check_certificate = params.get('nocheckcertificate', False)
679 if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
680 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
681 if opts_no_check_certificate:
682 context.check_hostname = False
683 context.verify_mode = ssl.CERT_NONE
685 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
688 # (create_default_context present but HTTPSHandler has no context=)
691 if sys.version_info < (3, 2):
692 return YoutubeDLHTTPSHandler(params, **kwargs)
694 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
695 context.verify_mode = (ssl.CERT_NONE
696 if opts_no_check_certificate
697 else ssl.CERT_REQUIRED)
698 context.set_default_verify_paths()
699 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
702 def bug_reports_message():
703 if ytdl_is_updateable():
704 update_cmd = 'type youtube-dl -U to update'
706 update_cmd = 'see https://yt-dl.org/update on how to update'
707 msg = '; please report this issue on https://yt-dl.org/bug .'
708 msg += ' Make sure you are using the latest version; %s.' % update_cmd
709 msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
713 class YoutubeDLError(Exception):
714 """Base exception for YoutubeDL errors."""
718 class ExtractorError(YoutubeDLError):
719 """Error during info extraction."""
721 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
722 """ tb, if given, is the original traceback (so that it can be printed out).
723 If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
726 if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
728 if video_id is not None:
729 msg = video_id + ': ' + msg
731 msg += ' (caused by %r)' % cause
733 msg += bug_reports_message()
734 super(ExtractorError, self).__init__(msg)
737 self.exc_info = sys.exc_info() # preserve original exception
739 self.video_id = video_id
741 def format_traceback(self):
742 if self.traceback is None:
744 return ''.join(traceback.format_tb(self.traceback))
747 class UnsupportedError(ExtractorError):
748 def __init__(self, url):
749 super(UnsupportedError, self).__init__(
750 'Unsupported URL: %s' % url, expected=True)
754 class RegexNotFoundError(ExtractorError):
755 """Error when a regex didn't match"""
759 class GeoRestrictedError(ExtractorError):
760 """Geographic restriction Error exception.
762 This exception may be thrown when a video is not available from your
763 geographic location due to geographic restrictions imposed by a website.
765 def __init__(self, msg, countries=None):
766 super(GeoRestrictedError, self).__init__(msg, expected=True)
768 self.countries = countries
771 class DownloadError(YoutubeDLError):
772 """Download Error exception.
774 This exception may be thrown by FileDownloader objects if they are not
775 configured to continue on errors. They will contain the appropriate
779 def __init__(self, msg, exc_info=None):
780 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
781 super(DownloadError, self).__init__(msg)
782 self.exc_info = exc_info
785 class SameFileError(YoutubeDLError):
786 """Same File exception.
788 This exception will be thrown by FileDownloader objects if they detect
789 multiple files would have to be downloaded to the same file on disk.
794 class PostProcessingError(YoutubeDLError):
795 """Post Processing exception.
797 This exception may be raised by PostProcessor's .run() method to
798 indicate an error in the postprocessing task.
801 def __init__(self, msg):
802 super(PostProcessingError, self).__init__(msg)
806 class MaxDownloadsReached(YoutubeDLError):
807 """ --max-downloads limit has been reached. """
811 class UnavailableVideoError(YoutubeDLError):
812 """Unavailable Format exception.
814 This exception will be thrown when a video is requested
815 in a format that is not available for that video.
820 class ContentTooShortError(YoutubeDLError):
821 """Content Too Short exception.
823 This exception may be raised by FileDownloader objects when a file they
824 download is too small for what the server announced first, indicating
825 the connection was probably interrupted.
828 def __init__(self, downloaded, expected):
829 super(ContentTooShortError, self).__init__(
830 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
833 self.downloaded = downloaded
834 self.expected = expected
837 class XAttrMetadataError(YoutubeDLError):
838 def __init__(self, code=None, msg='Unknown error'):
839 super(XAttrMetadataError, self).__init__(msg)
843 # Parsing code and msg
844 if (self.code in (errno.ENOSPC, errno.EDQUOT) or
845 'No space left' in self.msg or 'Disk quota excedded' in self.msg):
846 self.reason = 'NO_SPACE'
847 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
848 self.reason = 'VALUE_TOO_LONG'
850 self.reason = 'NOT_SUPPORTED'
853 class XAttrUnavailableError(YoutubeDLError):
857 def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
858 # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
859 # expected HTTP responses to meet HTTP/1.0 or later (see also
860 # https://github.com/rg3/youtube-dl/issues/6727)
861 if sys.version_info < (3, 0):
862 kwargs[b'strict'] = True
863 hc = http_class(*args, **kwargs)
864 source_address = ydl_handler._params.get('source_address')
865 if source_address is not None:
866 sa = (source_address, 0)
867 if hasattr(hc, 'source_address'): # Python 2.7+
868 hc.source_address = sa
870 def _hc_connect(self, *args, **kwargs):
871 sock = compat_socket_create_connection(
872 (self.host, self.port), self.timeout, sa)
874 self.sock = ssl.wrap_socket(
875 sock, self.key_file, self.cert_file,
876 ssl_version=ssl.PROTOCOL_TLSv1)
879 hc.connect = functools.partial(_hc_connect, hc)
884 def handle_youtubedl_headers(headers):
885 filtered_headers = headers
887 if 'Youtubedl-no-compression' in filtered_headers:
888 filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
889 del filtered_headers['Youtubedl-no-compression']
891 return filtered_headers
894 class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
895 """Handler for HTTP requests and responses.
897 This class, when installed with an OpenerDirector, automatically adds
898 the standard headers to every HTTP request and handles gzipped and
899 deflated responses from web servers. If compression is to be avoided in
900 a particular request, the original request in the program code only has
901 to include the HTTP header "Youtubedl-no-compression", which will be
902 removed before making the real request.
904 Part of this code was copied from:
906 http://techknack.net/python-urllib2-handlers/
908 Andrew Rowls, the author of that code, agreed to release it to the
912 def __init__(self, params, *args, **kwargs):
913 compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
914 self._params = params
916 def http_open(self, req):
917 conn_class = compat_http_client.HTTPConnection
919 socks_proxy = req.headers.get('Ytdl-socks-proxy')
921 conn_class = make_socks_conn_class(conn_class, socks_proxy)
922 del req.headers['Ytdl-socks-proxy']
924 return self.do_open(functools.partial(
925 _create_http_connection, self, conn_class, False),
931 return zlib.decompress(data, -zlib.MAX_WBITS)
933 return zlib.decompress(data)
935 def http_request(self, req):
936 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
937 # always respected by websites, some tend to give out URLs with non percent-encoded
938 # non-ASCII characters (see telemb.py, ard.py [#3412])
939 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
940 # To work around aforementioned issue we will replace request's original URL with
941 # percent-encoded one
942 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
943 # the code of this workaround has been moved here from YoutubeDL.urlopen()
944 url = req.get_full_url()
945 url_escaped = escape_url(url)
947 # Substitute URL if any change after escaping
948 if url != url_escaped:
949 req = update_Request(req, url=url_escaped)
951 for h, v in std_headers.items():
952 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
953 # The dict keys are capitalized because of this bug by urllib
954 if h.capitalize() not in req.headers:
957 req.headers = handle_youtubedl_headers(req.headers)
959 if sys.version_info < (2, 7) and '#' in req.get_full_url():
960 # Python 2.6 is brain-dead when it comes to fragments
961 req._Request__original = req._Request__original.partition('#')[0]
962 req._Request__r_type = req._Request__r_type.partition('#')[0]
966 def http_response(self, req, resp):
969 if resp.headers.get('Content-encoding', '') == 'gzip':
970 content = resp.read()
971 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
973 uncompressed = io.BytesIO(gz.read())
974 except IOError as original_ioerror:
975 # There may be junk add the end of the file
976 # See http://stackoverflow.com/q/4928560/35070 for details
977 for i in range(1, 1024):
979 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
980 uncompressed = io.BytesIO(gz.read())
985 raise original_ioerror
986 resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
987 resp.msg = old_resp.msg
988 del resp.headers['Content-encoding']
990 if resp.headers.get('Content-encoding', '') == 'deflate':
991 gz = io.BytesIO(self.deflate(resp.read()))
992 resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
993 resp.msg = old_resp.msg
994 del resp.headers['Content-encoding']
995 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
996 # https://github.com/rg3/youtube-dl/issues/6457).
997 if 300 <= resp.code < 400:
998 location = resp.headers.get('Location')
1000 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1001 if sys.version_info >= (3, 0):
1002 location = location.encode('iso-8859-1').decode('utf-8')
1004 location = location.decode('utf-8')
1005 location_escaped = escape_url(location)
1006 if location != location_escaped:
1007 del resp.headers['Location']
1008 if sys.version_info < (3, 0):
1009 location_escaped = location_escaped.encode('utf-8')
1010 resp.headers['Location'] = location_escaped
1013 https_request = http_request
1014 https_response = http_response
1017 def make_socks_conn_class(base_class, socks_proxy):
1018 assert issubclass(base_class, (
1019 compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
1021 url_components = compat_urlparse.urlparse(socks_proxy)
1022 if url_components.scheme.lower() == 'socks5':
1023 socks_type = ProxyType.SOCKS5
1024 elif url_components.scheme.lower() in ('socks', 'socks4'):
1025 socks_type = ProxyType.SOCKS4
1026 elif url_components.scheme.lower() == 'socks4a':
1027 socks_type = ProxyType.SOCKS4A
1029 def unquote_if_non_empty(s):
1032 return compat_urllib_parse_unquote_plus(s)
1036 url_components.hostname, url_components.port or 1080,
1038 unquote_if_non_empty(url_components.username),
1039 unquote_if_non_empty(url_components.password),
1042 class SocksConnection(base_class):
1044 self.sock = sockssocket()
1045 self.sock.setproxy(*proxy_args)
1046 if type(self.timeout) in (int, float):
1047 self.sock.settimeout(self.timeout)
1048 self.sock.connect((self.host, self.port))
1050 if isinstance(self, compat_http_client.HTTPSConnection):
1051 if hasattr(self, '_context'): # Python > 2.6
1052 self.sock = self._context.wrap_socket(
1053 self.sock, server_hostname=self.host)
1055 self.sock = ssl.wrap_socket(self.sock)
1057 return SocksConnection
1060 class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
1061 def __init__(self, params, https_conn_class=None, *args, **kwargs):
1062 compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
1063 self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
1064 self._params = params
1066 def https_open(self, req):
1068 conn_class = self._https_conn_class
1070 if hasattr(self, '_context'): # python > 2.6
1071 kwargs['context'] = self._context
1072 if hasattr(self, '_check_hostname'): # python 3.x
1073 kwargs['check_hostname'] = self._check_hostname
1075 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1077 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1078 del req.headers['Ytdl-socks-proxy']
1080 return self.do_open(functools.partial(
1081 _create_http_connection, self, conn_class, True),
1085 class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
1086 def __init__(self, cookiejar=None):
1087 compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
1089 def http_response(self, request, response):
1090 # Python 2 will choke on next HTTP request in row if there are non-ASCII
1091 # characters in Set-Cookie HTTP header of last response (see
1092 # https://github.com/rg3/youtube-dl/issues/6769).
1093 # In order to at least prevent crashing we will percent encode Set-Cookie
1094 # header before HTTPCookieProcessor starts processing it.
1095 # if sys.version_info < (3, 0) and response.headers:
1096 # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
1097 # set_cookie = response.headers.get(set_cookie_header)
1099 # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
1100 # if set_cookie != set_cookie_escaped:
1101 # del response.headers[set_cookie_header]
1102 # response.headers[set_cookie_header] = set_cookie_escaped
1103 return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
1105 https_request = compat_urllib_request.HTTPCookieProcessor.http_request
1106 https_response = http_response
1109 def extract_timezone(date_str):
1111 r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
1114 timezone = datetime.timedelta()
1116 date_str = date_str[:-len(m.group('tz'))]
1117 if not m.group('sign'):
1118 timezone = datetime.timedelta()
1120 sign = 1 if m.group('sign') == '+' else -1
1121 timezone = datetime.timedelta(
1122 hours=sign * int(m.group('hours')),
1123 minutes=sign * int(m.group('minutes')))
1124 return timezone, date_str
1127 def parse_iso8601(date_str, delimiter='T', timezone=None):
1128 """ Return a UNIX timestamp from the given date """
1130 if date_str is None:
1133 date_str = re.sub(r'\.[0-9]+', '', date_str)
1135 if timezone is None:
1136 timezone, date_str = extract_timezone(date_str)
1139 date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
1140 dt = datetime.datetime.strptime(date_str, date_format) - timezone
1141 return calendar.timegm(dt.timetuple())
1146 def date_formats(day_first=True):
1147 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
1150 def unified_strdate(date_str, day_first=True):
1151 """Return a string with the date in the format YYYYMMDD"""
1153 if date_str is None:
1157 date_str = date_str.replace(',', ' ')
1158 # Remove AM/PM + timezone
1159 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1160 _, date_str = extract_timezone(date_str)
1162 for expression in date_formats(day_first):
1164 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
1167 if upload_date is None:
1168 timetuple = email.utils.parsedate_tz(date_str)
1171 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
1174 if upload_date is not None:
1175 return compat_str(upload_date)
1178 def unified_timestamp(date_str, day_first=True):
1179 if date_str is None:
1182 date_str = date_str.replace(',', ' ')
1184 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
1185 timezone, date_str = extract_timezone(date_str)
1187 # Remove AM/PM + timezone
1188 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1190 # Remove unrecognized timezones from ISO 8601 alike timestamps
1191 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
1193 date_str = date_str[:-len(m.group('tz'))]
1195 for expression in date_formats(day_first):
1197 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
1198 return calendar.timegm(dt.timetuple())
1201 timetuple = email.utils.parsedate_tz(date_str)
1203 return calendar.timegm(timetuple) + pm_delta * 3600
1206 def determine_ext(url, default_ext='unknown_video'):
1209 guess = url.partition('?')[0].rpartition('.')[2]
1210 if re.match(r'^[A-Za-z0-9]+$', guess):
1212 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1213 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
1214 return guess.rstrip('/')
1219 def subtitles_filename(filename, sub_lang, sub_format):
1220 return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
1223 def date_from_str(date_str):
1225 Return a datetime object from a string in the format YYYYMMDD or
1226 (now|today)[+-][0-9](day|week|month|year)(s)?"""
1227 today = datetime.date.today()
1228 if date_str in ('now', 'today'):
1230 if date_str == 'yesterday':
1231 return today - datetime.timedelta(days=1)
1232 match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
1233 if match is not None:
1234 sign = match.group('sign')
1235 time = int(match.group('time'))
1238 unit = match.group('unit')
1239 # A bad approximation?
1243 elif unit == 'year':
1247 delta = datetime.timedelta(**{unit: time})
1248 return today + delta
1249 return datetime.datetime.strptime(date_str, '%Y%m%d').date()
1252 def hyphenate_date(date_str):
1254 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1255 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
1256 if match is not None:
1257 return '-'.join(match.groups())
1262 class DateRange(object):
1263 """Represents a time interval between two dates"""
1265 def __init__(self, start=None, end=None):
1266 """start and end must be strings in the format accepted by date"""
1267 if start is not None:
1268 self.start = date_from_str(start)
1270 self.start = datetime.datetime.min.date()
1272 self.end = date_from_str(end)
1274 self.end = datetime.datetime.max.date()
1275 if self.start > self.end:
1276 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
1280 """Returns a range that only contains the given day"""
1281 return cls(day, day)
1283 def __contains__(self, date):
1284 """Check if the date is in the range"""
1285 if not isinstance(date, datetime.date):
1286 date = date_from_str(date)
1287 return self.start <= date <= self.end
1290 return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
1293 def platform_name():
1294 """ Returns the platform name as a compat_str """
1295 res = platform.platform()
1296 if isinstance(res, bytes):
1297 res = res.decode(preferredencoding())
1299 assert isinstance(res, compat_str)
1303 def _windows_write_string(s, out):
1304 """ Returns True if the string was written using special methods,
1305 False if it has yet to be written out."""
1306 # Adapted from http://stackoverflow.com/a/3259271/35070
1309 import ctypes.wintypes
1317 fileno = out.fileno()
1318 except AttributeError:
1319 # If the output stream doesn't have a fileno, it's virtual
1321 except io.UnsupportedOperation:
1322 # Some strange Windows pseudo files?
1324 if fileno not in WIN_OUTPUT_IDS:
1327 GetStdHandle = ctypes.WINFUNCTYPE(
1328 ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
1329 (b'GetStdHandle', ctypes.windll.kernel32))
1330 h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
1332 WriteConsoleW = ctypes.WINFUNCTYPE(
1333 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
1334 ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
1335 ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32))
1336 written = ctypes.wintypes.DWORD(0)
1338 GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32))
1339 FILE_TYPE_CHAR = 0x0002
1340 FILE_TYPE_REMOTE = 0x8000
1341 GetConsoleMode = ctypes.WINFUNCTYPE(
1342 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
1343 ctypes.POINTER(ctypes.wintypes.DWORD))(
1344 (b'GetConsoleMode', ctypes.windll.kernel32))
1345 INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
1347 def not_a_console(handle):
1348 if handle == INVALID_HANDLE_VALUE or handle is None:
1350 return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
1351 GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
1353 if not_a_console(h):
1356 def next_nonbmp_pos(s):
1358 return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
1359 except StopIteration:
1363 count = min(next_nonbmp_pos(s), 1024)
1365 ret = WriteConsoleW(
1366 h, s, count if count else 2, ctypes.byref(written), None)
1368 raise OSError('Failed to write string')
1369 if not count: # We just wrote a non-BMP character
1370 assert written.value == 2
1373 assert written.value > 0
1374 s = s[written.value:]
1378 def write_string(s, out=None, encoding=None):
1381 assert type(s) == compat_str
1383 if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
1384 if _windows_write_string(s, out):
1387 if ('b' in getattr(out, 'mode', '') or
1388 sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
1389 byt = s.encode(encoding or preferredencoding(), 'ignore')
1391 elif hasattr(out, 'buffer'):
1392 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
1393 byt = s.encode(enc, 'ignore')
1394 out.buffer.write(byt)
1400 def bytes_to_intlist(bs):
1403 if isinstance(bs[0], int): # Python 3
1406 return [ord(c) for c in bs]
1409 def intlist_to_bytes(xs):
1412 return compat_struct_pack('%dB' % len(xs), *xs)
1415 # Cross-platform file locking
1416 if sys.platform == 'win32':
1417 import ctypes.wintypes
1420 class OVERLAPPED(ctypes.Structure):
1422 ('Internal', ctypes.wintypes.LPVOID),
1423 ('InternalHigh', ctypes.wintypes.LPVOID),
1424 ('Offset', ctypes.wintypes.DWORD),
1425 ('OffsetHigh', ctypes.wintypes.DWORD),
1426 ('hEvent', ctypes.wintypes.HANDLE),
1429 kernel32 = ctypes.windll.kernel32
1430 LockFileEx = kernel32.LockFileEx
1431 LockFileEx.argtypes = [
1432 ctypes.wintypes.HANDLE, # hFile
1433 ctypes.wintypes.DWORD, # dwFlags
1434 ctypes.wintypes.DWORD, # dwReserved
1435 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1436 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1437 ctypes.POINTER(OVERLAPPED) # Overlapped
1439 LockFileEx.restype = ctypes.wintypes.BOOL
1440 UnlockFileEx = kernel32.UnlockFileEx
1441 UnlockFileEx.argtypes = [
1442 ctypes.wintypes.HANDLE, # hFile
1443 ctypes.wintypes.DWORD, # dwReserved
1444 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1445 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1446 ctypes.POINTER(OVERLAPPED) # Overlapped
1448 UnlockFileEx.restype = ctypes.wintypes.BOOL
1449 whole_low = 0xffffffff
1450 whole_high = 0x7fffffff
1452 def _lock_file(f, exclusive):
1453 overlapped = OVERLAPPED()
1454 overlapped.Offset = 0
1455 overlapped.OffsetHigh = 0
1456 overlapped.hEvent = 0
1457 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
1458 handle = msvcrt.get_osfhandle(f.fileno())
1459 if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
1460 whole_low, whole_high, f._lock_file_overlapped_p):
1461 raise OSError('Locking file failed: %r' % ctypes.FormatError())
1463 def _unlock_file(f):
1464 assert f._lock_file_overlapped_p
1465 handle = msvcrt.get_osfhandle(f.fileno())
1466 if not UnlockFileEx(handle, 0,
1467 whole_low, whole_high, f._lock_file_overlapped_p):
1468 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
1471 # Some platforms, such as Jython, is missing fcntl
1475 def _lock_file(f, exclusive):
1476 fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
1478 def _unlock_file(f):
1479 fcntl.flock(f, fcntl.LOCK_UN)
1481 UNSUPPORTED_MSG = 'file locking is not supported on this platform'
1483 def _lock_file(f, exclusive):
1484 raise IOError(UNSUPPORTED_MSG)
1486 def _unlock_file(f):
1487 raise IOError(UNSUPPORTED_MSG)
1490 class locked_file(object):
1491 def __init__(self, filename, mode, encoding=None):
1492 assert mode in ['r', 'a', 'w']
1493 self.f = io.open(filename, mode, encoding=encoding)
1496 def __enter__(self):
1497 exclusive = self.mode != 'r'
1499 _lock_file(self.f, exclusive)
1505 def __exit__(self, etype, value, traceback):
1507 _unlock_file(self.f)
1514 def write(self, *args):
1515 return self.f.write(*args)
1517 def read(self, *args):
1518 return self.f.read(*args)
1521 def get_filesystem_encoding():
1522 encoding = sys.getfilesystemencoding()
1523 return encoding if encoding is not None else 'utf-8'
1526 def shell_quote(args):
1528 encoding = get_filesystem_encoding()
1530 if isinstance(a, bytes):
1531 # We may get a filename encoded with 'encodeFilename'
1532 a = a.decode(encoding)
1533 quoted_args.append(pipes.quote(a))
1534 return ' '.join(quoted_args)
1537 def smuggle_url(url, data):
1538 """ Pass additional data in a URL for internal use. """
1540 url, idata = unsmuggle_url(url, {})
1542 sdata = compat_urllib_parse_urlencode(
1543 {'__youtubedl_smuggle': json.dumps(data)})
1544 return url + '#' + sdata
1547 def unsmuggle_url(smug_url, default=None):
1548 if '#__youtubedl_smuggle' not in smug_url:
1549 return smug_url, default
1550 url, _, sdata = smug_url.rpartition('#')
1551 jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
1552 data = json.loads(jsond)
1556 def format_bytes(bytes):
1559 if type(bytes) is str:
1560 bytes = float(bytes)
1564 exponent = int(math.log(bytes, 1024.0))
1565 suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
1566 converted = float(bytes) / float(1024 ** exponent)
1567 return '%.2f%s' % (converted, suffix)
1570 def lookup_unit_table(unit_table, s):
1571 units_re = '|'.join(re.escape(u) for u in unit_table)
1573 r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
1576 num_str = m.group('num').replace(',', '.')
1577 mult = unit_table[m.group('unit')]
1578 return int(float(num_str) * mult)
1581 def parse_filesize(s):
1585 # The lower-case forms are of course incorrect and unofficial,
1586 # but we support those too
1603 'megabytes': 1000 ** 2,
1604 'mebibytes': 1024 ** 2,
1610 'gigabytes': 1000 ** 3,
1611 'gibibytes': 1024 ** 3,
1617 'terabytes': 1000 ** 4,
1618 'tebibytes': 1024 ** 4,
1624 'petabytes': 1000 ** 5,
1625 'pebibytes': 1024 ** 5,
1631 'exabytes': 1000 ** 6,
1632 'exbibytes': 1024 ** 6,
1638 'zettabytes': 1000 ** 7,
1639 'zebibytes': 1024 ** 7,
1645 'yottabytes': 1000 ** 8,
1646 'yobibytes': 1024 ** 8,
1649 return lookup_unit_table(_UNIT_TABLE, s)
1658 if re.match(r'^[\d,.]+$', s):
1659 return str_to_int(s)
1670 return lookup_unit_table(_UNIT_TABLE, s)
1673 def month_by_name(name, lang='en'):
1674 """ Return the number of a month by (locale-independently) English name """
1676 month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
1679 return month_names.index(name) + 1
1684 def month_by_abbreviation(abbrev):
1685 """ Return the number of a month by (locale-independently) English
1689 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
1694 def fix_xml_ampersands(xml_str):
1695 """Replace all the '&' by '&' in XML"""
1697 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
1702 def setproctitle(title):
1703 assert isinstance(title, compat_str)
1705 # ctypes in Jython is not complete
1706 # http://bugs.jython.org/issue2148
1707 if sys.platform.startswith('java'):
1711 libc = ctypes.cdll.LoadLibrary('libc.so.6')
1715 # LoadLibrary in Windows Python 2.7.13 only expects
1716 # a bytestring, but since unicode_literals turns
1717 # every string into a unicode string, it fails.
1719 title_bytes = title.encode('utf-8')
1720 buf = ctypes.create_string_buffer(len(title_bytes))
1721 buf.value = title_bytes
1723 libc.prctl(15, buf, 0, 0, 0)
1724 except AttributeError:
1725 return # Strange libc, just skip this
1728 def remove_start(s, start):
1729 return s[len(start):] if s is not None and s.startswith(start) else s
1732 def remove_end(s, end):
1733 return s[:-len(end)] if s is not None and s.endswith(end) else s
1736 def remove_quotes(s):
1737 if s is None or len(s) < 2:
1739 for quote in ('"', "'", ):
1740 if s[0] == quote and s[-1] == quote:
1745 def url_basename(url):
1746 path = compat_urlparse.urlparse(url).path
1747 return path.strip('/').split('/')[-1]
1751 return re.match(r'https?://[^?#&]+/', url).group()
1754 def urljoin(base, path):
1755 if isinstance(path, bytes):
1756 path = path.decode('utf-8')
1757 if not isinstance(path, compat_str) or not path:
1759 if re.match(r'^(?:https?:)?//', path):
1761 if isinstance(base, bytes):
1762 base = base.decode('utf-8')
1763 if not isinstance(base, compat_str) or not re.match(
1764 r'^(?:https?:)?//', base):
1766 return compat_urlparse.urljoin(base, path)
1769 class HEADRequest(compat_urllib_request.Request):
1770 def get_method(self):
1774 class PUTRequest(compat_urllib_request.Request):
1775 def get_method(self):
1779 def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
1782 v = getattr(v, get_attr, None)
1788 return int(v) * invscale // scale
1793 def str_or_none(v, default=None):
1794 return default if v is None else compat_str(v)
1797 def str_to_int(int_str):
1798 """ A more relaxed version of int_or_none """
1801 int_str = re.sub(r'[,\.\+]', '', int_str)
1805 def float_or_none(v, scale=1, invscale=1, default=None):
1809 return float(v) * invscale / scale
1814 def strip_or_none(v):
1815 return None if v is None else v.strip()
1818 def parse_duration(s):
1819 if not isinstance(s, compat_basestring):
1824 days, hours, mins, secs, ms = [None] * 5
1825 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
1827 days, hours, mins, secs, ms = m.groups()
1832 (?P<days>[0-9]+)\s*d(?:ays?)?\s*
1835 (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
1838 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
1841 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
1844 days, hours, mins, secs, ms = m.groups()
1846 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
1848 hours, mins = m.groups()
1854 duration += float(secs)
1856 duration += float(mins) * 60
1858 duration += float(hours) * 60 * 60
1860 duration += float(days) * 24 * 60 * 60
1862 duration += float(ms)
1866 def prepend_extension(filename, ext, expected_real_ext=None):
1867 name, real_ext = os.path.splitext(filename)
1869 '{0}.{1}{2}'.format(name, ext, real_ext)
1870 if not expected_real_ext or real_ext[1:] == expected_real_ext
1871 else '{0}.{1}'.format(filename, ext))
1874 def replace_extension(filename, ext, expected_real_ext=None):
1875 name, real_ext = os.path.splitext(filename)
1876 return '{0}.{1}'.format(
1877 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
1881 def check_executable(exe, args=[]):
1882 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
1883 args can be a list of arguments for a short output (like -version) """
1885 subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
1891 def get_exe_version(exe, args=['--version'],
1892 version_re=None, unrecognized='present'):
1893 """ Returns the version of the specified executable,
1894 or False if the executable is not present """
1896 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
1897 # SIGTTOU if youtube-dl is run in the background.
1898 # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656
1899 out, _ = subprocess.Popen(
1900 [encodeArgument(exe)] + args,
1901 stdin=subprocess.PIPE,
1902 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
1905 if isinstance(out, bytes): # Python 2.x
1906 out = out.decode('ascii', 'ignore')
1907 return detect_exe_version(out, version_re, unrecognized)
1910 def detect_exe_version(output, version_re=None, unrecognized='present'):
1911 assert isinstance(output, compat_str)
1912 if version_re is None:
1913 version_re = r'version\s+([-0-9._a-zA-Z]+)'
1914 m = re.search(version_re, output)
1921 class PagedList(object):
1923 # This is only useful for tests
1924 return len(self.getslice())
1927 class OnDemandPagedList(PagedList):
1928 def __init__(self, pagefunc, pagesize, use_cache=False):
1929 self._pagefunc = pagefunc
1930 self._pagesize = pagesize
1931 self._use_cache = use_cache
1935 def getslice(self, start=0, end=None):
1937 for pagenum in itertools.count(start // self._pagesize):
1938 firstid = pagenum * self._pagesize
1939 nextfirstid = pagenum * self._pagesize + self._pagesize
1940 if start >= nextfirstid:
1945 page_results = self._cache.get(pagenum)
1946 if page_results is None:
1947 page_results = list(self._pagefunc(pagenum))
1949 self._cache[pagenum] = page_results
1952 start % self._pagesize
1953 if firstid <= start < nextfirstid
1957 ((end - 1) % self._pagesize) + 1
1958 if (end is not None and firstid <= end <= nextfirstid)
1961 if startv != 0 or endv is not None:
1962 page_results = page_results[startv:endv]
1963 res.extend(page_results)
1965 # A little optimization - if current page is not "full", ie. does
1966 # not contain page_size videos then we can assume that this page
1967 # is the last one - there are no more ids on further pages -
1968 # i.e. no need to query again.
1969 if len(page_results) + startv < self._pagesize:
1972 # If we got the whole page, but the next page is not interesting,
1973 # break out early as well
1974 if end == nextfirstid:
1979 class InAdvancePagedList(PagedList):
1980 def __init__(self, pagefunc, pagecount, pagesize):
1981 self._pagefunc = pagefunc
1982 self._pagecount = pagecount
1983 self._pagesize = pagesize
1985 def getslice(self, start=0, end=None):
1987 start_page = start // self._pagesize
1989 self._pagecount if end is None else (end // self._pagesize + 1))
1990 skip_elems = start - start_page * self._pagesize
1991 only_more = None if end is None else end - start
1992 for pagenum in range(start_page, end_page):
1993 page = list(self._pagefunc(pagenum))
1995 page = page[skip_elems:]
1997 if only_more is not None:
1998 if len(page) < only_more:
1999 only_more -= len(page)
2001 page = page[:only_more]
2008 def uppercase_escape(s):
2009 unicode_escape = codecs.getdecoder('unicode_escape')
2011 r'\\U[0-9a-fA-F]{8}',
2012 lambda m: unicode_escape(m.group(0))[0],
2016 def lowercase_escape(s):
2017 unicode_escape = codecs.getdecoder('unicode_escape')
2019 r'\\u[0-9a-fA-F]{4}',
2020 lambda m: unicode_escape(m.group(0))[0],
2024 def escape_rfc3986(s):
2025 """Escape non-ASCII characters as suggested by RFC 3986"""
2026 if sys.version_info < (3, 0) and isinstance(s, compat_str):
2027 s = s.encode('utf-8')
2028 return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
2031 def escape_url(url):
2032 """Escape URL as suggested by RFC 3986"""
2033 url_parsed = compat_urllib_parse_urlparse(url)
2034 return url_parsed._replace(
2035 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
2036 path=escape_rfc3986(url_parsed.path),
2037 params=escape_rfc3986(url_parsed.params),
2038 query=escape_rfc3986(url_parsed.query),
2039 fragment=escape_rfc3986(url_parsed.fragment)
2043 def read_batch_urls(batch_fd):
2045 if not isinstance(url, compat_str):
2046 url = url.decode('utf-8', 'replace')
2047 BOM_UTF8 = '\xef\xbb\xbf'
2048 if url.startswith(BOM_UTF8):
2049 url = url[len(BOM_UTF8):]
2051 if url.startswith(('#', ';', ']')):
2055 with contextlib.closing(batch_fd) as fd:
2056 return [url for url in map(fixup, fd) if url]
2059 def urlencode_postdata(*args, **kargs):
2060 return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
2063 def update_url_query(url, query):
2066 parsed_url = compat_urlparse.urlparse(url)
2067 qs = compat_parse_qs(parsed_url.query)
2069 return compat_urlparse.urlunparse(parsed_url._replace(
2070 query=compat_urllib_parse_urlencode(qs, True)))
2073 def update_Request(req, url=None, data=None, headers={}, query={}):
2074 req_headers = req.headers.copy()
2075 req_headers.update(headers)
2076 req_data = data or req.data
2077 req_url = update_url_query(url or req.get_full_url(), query)
2078 req_get_method = req.get_method()
2079 if req_get_method == 'HEAD':
2080 req_type = HEADRequest
2081 elif req_get_method == 'PUT':
2082 req_type = PUTRequest
2084 req_type = compat_urllib_request.Request
2086 req_url, data=req_data, headers=req_headers,
2087 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
2088 if hasattr(req, 'timeout'):
2089 new_req.timeout = req.timeout
2093 def _multipart_encode_impl(data, boundary):
2094 content_type = 'multipart/form-data; boundary=%s' % boundary
2097 for k, v in data.items():
2098 out += b'--' + boundary.encode('ascii') + b'\r\n'
2099 if isinstance(k, compat_str):
2100 k = k.encode('utf-8')
2101 if isinstance(v, compat_str):
2102 v = v.encode('utf-8')
2103 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
2104 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
2105 content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
2106 if boundary.encode('ascii') in content:
2107 raise ValueError('Boundary overlaps with data')
2110 out += b'--' + boundary.encode('ascii') + b'--\r\n'
2112 return out, content_type
2115 def multipart_encode(data, boundary=None):
2117 Encode a dict to RFC 7578-compliant form-data
2120 A dict where keys and values can be either Unicode or bytes-like
2123 If specified a Unicode object, it's used as the boundary. Otherwise
2124 a random boundary is generated.
2126 Reference: https://tools.ietf.org/html/rfc7578
2128 has_specified_boundary = boundary is not None
2131 if boundary is None:
2132 boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
2135 out, content_type = _multipart_encode_impl(data, boundary)
2138 if has_specified_boundary:
2142 return out, content_type
2145 def dict_get(d, key_or_keys, default=None, skip_false_values=True):
2146 if isinstance(key_or_keys, (list, tuple)):
2147 for key in key_or_keys:
2148 if key not in d or d[key] is None or skip_false_values and not d[key]:
2152 return d.get(key_or_keys, default)
2155 def try_get(src, getter, expected_type=None):
2156 if not isinstance(getter, (list, tuple)):
2161 except (AttributeError, KeyError, TypeError, IndexError):
2164 if expected_type is None or isinstance(v, expected_type):
2168 def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
2169 return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
2181 TV_PARENTAL_GUIDELINES = {
2191 def parse_age_limit(s):
2193 return s if 0 <= s <= 21 else None
2194 if not isinstance(s, compat_basestring):
2196 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
2198 return int(m.group('age'))
2200 return US_RATINGS[s]
2201 return TV_PARENTAL_GUIDELINES.get(s)
2204 def strip_jsonp(code):
2207 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]+)
2208 (?:\s*&&\s*(?P=func_name))?
2209 \s*\(\s*(?P<callback_data>.*)\);?
2210 \s*?(?://[^\n]*)*$''',
2211 r'\g<callback_data>', code)
2214 def js_to_json(code):
2215 COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*'
2216 SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
2218 (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
2219 (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
2224 if v in ('true', 'false', 'null'):
2226 elif v.startswith('/*') or v.startswith('//') or v == ',':
2229 if v[0] in ("'", '"'):
2230 v = re.sub(r'(?s)\\.|"', lambda m: {
2235 }.get(m.group(0), m.group(0)), v[1:-1])
2237 for regex, base in INTEGER_TABLE:
2238 im = re.match(regex, v)
2240 i = int(im.group(1), base)
2241 return '"%d":' % i if v.endswith(':') else '%d' % i
2245 return re.sub(r'''(?sx)
2246 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
2247 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
2248 {comment}|,(?={skip}[\]}}])|
2249 [a-zA-Z_][.a-zA-Z_0-9]*|
2250 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
2252 '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
2255 def qualities(quality_ids):
2256 """ Get a numeric quality value out of a list of possible values """
2259 return quality_ids.index(qid)
2265 DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
2268 def limit_length(s, length):
2269 """ Add ellipses to overly long strings """
2274 return s[:length - len(ELLIPSES)] + ELLIPSES
2278 def version_tuple(v):
2279 return tuple(int(e) for e in re.split(r'[-.]', v))
2282 def is_outdated_version(version, limit, assume_new=True):
2284 return not assume_new
2286 return version_tuple(version) < version_tuple(limit)
2288 return not assume_new
2291 def ytdl_is_updateable():
2292 """ Returns if youtube-dl can be updated with -U """
2293 from zipimport import zipimporter
2295 return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
2298 def args_to_str(args):
2299 # Get a short string representation for a subprocess command
2300 return ' '.join(compat_shlex_quote(a) for a in args)
2303 def error_to_compat_str(err):
2305 # On python 2 error byte string must be decoded with proper
2306 # encoding rather than ascii
2307 if sys.version_info[0] < 3:
2308 err_str = err_str.decode(preferredencoding())
2312 def mimetype2ext(mt):
2318 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
2319 # it's the most popular one
2320 'audio/mpeg': 'mp3',
2325 _, _, res = mt.rpartition('/')
2326 res = res.split(';')[0].strip().lower()
2330 'smptett+xml': 'tt',
2334 'x-mp4-fragmented': 'mp4',
2337 'x-mpegurl': 'm3u8',
2338 'vnd.apple.mpegurl': 'm3u8',
2342 'vnd.ms-sstr+xml': 'ism',
2348 def parse_codecs(codecs_str):
2349 # http://tools.ietf.org/html/rfc6381
2352 splited_codecs = list(filter(None, map(
2353 lambda str: str.strip(), codecs_str.strip().strip(',').split(','))))
2354 vcodec, acodec = None, None
2355 for full_codec in splited_codecs:
2356 codec = full_codec.split('.')[0]
2357 if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'):
2360 elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
2364 write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
2365 if not vcodec and not acodec:
2366 if len(splited_codecs) == 2:
2371 elif len(splited_codecs) == 1:
2378 'vcodec': vcodec or 'none',
2379 'acodec': acodec or 'none',
2384 def urlhandle_detect_ext(url_handle):
2385 getheader = url_handle.headers.get
2387 cd = getheader('Content-Disposition')
2389 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
2391 e = determine_ext(m.group('filename'), default_ext=None)
2395 return mimetype2ext(getheader('Content-Type'))
2398 def encode_data_uri(data, mime_type):
2399 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
2402 def age_restricted(content_limit, age_limit):
2403 """ Returns True iff the content should be blocked """
2405 if age_limit is None: # No limit set
2407 if content_limit is None:
2408 return False # Content available for everyone
2409 return age_limit < content_limit
2412 def is_html(first_bytes):
2413 """ Detect whether a file contains HTML by examining its first bytes. """
2416 (b'\xef\xbb\xbf', 'utf-8'),
2417 (b'\x00\x00\xfe\xff', 'utf-32-be'),
2418 (b'\xff\xfe\x00\x00', 'utf-32-le'),
2419 (b'\xff\xfe', 'utf-16-le'),
2420 (b'\xfe\xff', 'utf-16-be'),
2422 for bom, enc in BOMS:
2423 if first_bytes.startswith(bom):
2424 s = first_bytes[len(bom):].decode(enc, 'replace')
2427 s = first_bytes.decode('utf-8', 'replace')
2429 return re.match(r'^\s*<', s)
2432 def determine_protocol(info_dict):
2433 protocol = info_dict.get('protocol')
2434 if protocol is not None:
2437 url = info_dict['url']
2438 if url.startswith('rtmp'):
2440 elif url.startswith('mms'):
2442 elif url.startswith('rtsp'):
2445 ext = determine_ext(url)
2451 return compat_urllib_parse_urlparse(url).scheme
2454 def render_table(header_row, data):
2455 """ Render a list of rows, each as a list of values """
2456 table = [header_row] + data
2457 max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
2458 format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
2459 return '\n'.join(format_str % tuple(row) for row in table)
2462 def _match_one(filter_part, dct):
2463 COMPARISON_OPERATORS = {
2471 operator_rex = re.compile(r'''(?x)\s*
2473 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
2475 (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
2476 (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)|
2477 (?P<strval>(?![0-9.])[a-z0-9A-Z]*)
2480 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
2481 m = operator_rex.search(filter_part)
2483 op = COMPARISON_OPERATORS[m.group('op')]
2484 actual_value = dct.get(m.group('key'))
2485 if (m.group('quotedstrval') is not None or
2486 m.group('strval') is not None or
2487 # If the original field is a string and matching comparisonvalue is
2488 # a number we should respect the origin of the original field
2489 # and process comparison value as a string (see
2490 # https://github.com/rg3/youtube-dl/issues/11082).
2491 actual_value is not None and m.group('intval') is not None and
2492 isinstance(actual_value, compat_str)):
2493 if m.group('op') not in ('=', '!='):
2495 'Operator %s does not support string values!' % m.group('op'))
2496 comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval')
2497 quote = m.group('quote')
2498 if quote is not None:
2499 comparison_value = comparison_value.replace(r'\%s' % quote, quote)
2502 comparison_value = int(m.group('intval'))
2504 comparison_value = parse_filesize(m.group('intval'))
2505 if comparison_value is None:
2506 comparison_value = parse_filesize(m.group('intval') + 'B')
2507 if comparison_value is None:
2509 'Invalid integer value %r in filter part %r' % (
2510 m.group('intval'), filter_part))
2511 if actual_value is None:
2512 return m.group('none_inclusive')
2513 return op(actual_value, comparison_value)
2516 '': lambda v: v is not None,
2517 '!': lambda v: v is None,
2519 operator_rex = re.compile(r'''(?x)\s*
2520 (?P<op>%s)\s*(?P<key>[a-z_]+)
2522 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
2523 m = operator_rex.search(filter_part)
2525 op = UNARY_OPERATORS[m.group('op')]
2526 actual_value = dct.get(m.group('key'))
2527 return op(actual_value)
2529 raise ValueError('Invalid filter part %r' % filter_part)
2532 def match_str(filter_str, dct):
2533 """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
2536 _match_one(filter_part, dct) for filter_part in filter_str.split('&'))
2539 def match_filter_func(filter_str):
2540 def _match_func(info_dict):
2541 if match_str(filter_str, info_dict):
2544 video_title = info_dict.get('title', info_dict.get('id', 'video'))
2545 return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
2549 def parse_dfxp_time_expr(time_expr):
2553 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
2555 return float(mobj.group('time_offset'))
2557 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
2559 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
2562 def srt_subtitles_timecode(seconds):
2563 return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
2566 def dfxp2srt(dfxp_data):
2567 LEGACY_NAMESPACES = (
2568 ('http://www.w3.org/ns/ttml', [
2569 'http://www.w3.org/2004/11/ttaf1',
2570 'http://www.w3.org/2006/04/ttaf1',
2571 'http://www.w3.org/2006/10/ttaf1',
2573 ('http://www.w3.org/ns/ttml#styling', [
2574 'http://www.w3.org/ns/ttml#style',
2578 SUPPORTED_STYLING = [
2587 _x = functools.partial(xpath_with_ns, ns_map={
2588 'ttml': 'http://www.w3.org/ns/ttml',
2589 'tts': 'http://www.w3.org/ns/ttml#styling',
2595 class TTMLPElementParser(object):
2597 _unclosed_elements = []
2598 _applied_styles = []
2600 def start(self, tag, attrib):
2601 if tag in (_x('ttml:br'), 'br'):
2604 unclosed_elements = []
2606 element_style_id = attrib.get('style')
2608 style.update(default_style)
2609 if element_style_id:
2610 style.update(styles.get(element_style_id, {}))
2611 for prop in SUPPORTED_STYLING:
2612 prop_val = attrib.get(_x('tts:' + prop))
2614 style[prop] = prop_val
2617 for k, v in sorted(style.items()):
2618 if self._applied_styles and self._applied_styles[-1].get(k) == v:
2621 font += ' color="%s"' % v
2622 elif k == 'fontSize':
2623 font += ' size="%s"' % v
2624 elif k == 'fontFamily':
2625 font += ' face="%s"' % v
2626 elif k == 'fontWeight' and v == 'bold':
2628 unclosed_elements.append('b')
2629 elif k == 'fontStyle' and v == 'italic':
2631 unclosed_elements.append('i')
2632 elif k == 'textDecoration' and v == 'underline':
2634 unclosed_elements.append('u')
2636 self._out += '<font' + font + '>'
2637 unclosed_elements.append('font')
2639 if self._applied_styles:
2640 applied_style.update(self._applied_styles[-1])
2641 applied_style.update(style)
2642 self._applied_styles.append(applied_style)
2643 self._unclosed_elements.append(unclosed_elements)
2646 if tag not in (_x('ttml:br'), 'br'):
2647 unclosed_elements = self._unclosed_elements.pop()
2648 for element in reversed(unclosed_elements):
2649 self._out += '</%s>' % element
2650 if unclosed_elements and self._applied_styles:
2651 self._applied_styles.pop()
2653 def data(self, data):
2657 return self._out.strip()
2659 def parse_node(node):
2660 target = TTMLPElementParser()
2661 parser = xml.etree.ElementTree.XMLParser(target=target)
2662 parser.feed(xml.etree.ElementTree.tostring(node))
2663 return parser.close()
2665 for k, v in LEGACY_NAMESPACES:
2667 dfxp_data = dfxp_data.replace(ns, k)
2669 dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8'))
2671 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
2674 raise ValueError('Invalid dfxp/TTML subtitle')
2678 for style in dfxp.findall(_x('.//ttml:style')):
2679 style_id = style.get('id')
2680 parent_style_id = style.get('style')
2682 if parent_style_id not in styles:
2685 styles[style_id] = styles[parent_style_id].copy()
2686 for prop in SUPPORTED_STYLING:
2687 prop_val = style.get(_x('tts:' + prop))
2689 styles.setdefault(style_id, {})[prop] = prop_val
2695 for p in ('body', 'div'):
2696 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
2699 style = styles.get(ele.get('style'))
2702 default_style.update(style)
2704 for para, index in zip(paras, itertools.count(1)):
2705 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
2706 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
2707 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
2708 if begin_time is None:
2713 end_time = begin_time + dur
2714 out.append('%d\n%s --> %s\n%s\n\n' % (
2716 srt_subtitles_timecode(begin_time),
2717 srt_subtitles_timecode(end_time),
2723 def cli_option(params, command_option, param):
2724 param = params.get(param)
2726 param = compat_str(param)
2727 return [command_option, param] if param is not None else []
2730 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
2731 param = params.get(param)
2732 assert isinstance(param, bool)
2734 return [command_option + separator + (true_value if param else false_value)]
2735 return [command_option, true_value if param else false_value]
2738 def cli_valueless_option(params, command_option, param, expected_value=True):
2739 param = params.get(param)
2740 return [command_option] if param == expected_value else []
2743 def cli_configuration_args(params, param, default=[]):
2744 ex_args = params.get(param)
2747 assert isinstance(ex_args, list)
2751 class ISO639Utils(object):
2752 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
2941 def short2long(cls, code):
2942 """Convert language code from ISO 639-1 to ISO 639-2/T"""
2943 return cls._lang_map.get(code[:2])
2946 def long2short(cls, code):
2947 """Convert language code from ISO 639-2/T to ISO 639-1"""
2948 for short_name, long_name in cls._lang_map.items():
2949 if long_name == code:
2953 class ISO3166Utils(object):
2954 # From http://data.okfn.org/data/core/country-list
2956 'AF': 'Afghanistan',
2957 'AX': 'Åland Islands',
2960 'AS': 'American Samoa',
2965 'AG': 'Antigua and Barbuda',
2982 'BO': 'Bolivia, Plurinational State of',
2983 'BQ': 'Bonaire, Sint Eustatius and Saba',
2984 'BA': 'Bosnia and Herzegovina',
2986 'BV': 'Bouvet Island',
2988 'IO': 'British Indian Ocean Territory',
2989 'BN': 'Brunei Darussalam',
2991 'BF': 'Burkina Faso',
2997 'KY': 'Cayman Islands',
2998 'CF': 'Central African Republic',
3002 'CX': 'Christmas Island',
3003 'CC': 'Cocos (Keeling) Islands',
3007 'CD': 'Congo, the Democratic Republic of the',
3008 'CK': 'Cook Islands',
3010 'CI': 'Côte d\'Ivoire',
3015 'CZ': 'Czech Republic',
3019 'DO': 'Dominican Republic',
3022 'SV': 'El Salvador',
3023 'GQ': 'Equatorial Guinea',
3027 'FK': 'Falkland Islands (Malvinas)',
3028 'FO': 'Faroe Islands',
3032 'GF': 'French Guiana',
3033 'PF': 'French Polynesia',
3034 'TF': 'French Southern Territories',
3049 'GW': 'Guinea-Bissau',
3052 'HM': 'Heard Island and McDonald Islands',
3053 'VA': 'Holy See (Vatican City State)',
3060 'IR': 'Iran, Islamic Republic of',
3063 'IM': 'Isle of Man',
3073 'KP': 'Korea, Democratic People\'s Republic of',
3074 'KR': 'Korea, Republic of',
3077 'LA': 'Lao People\'s Democratic Republic',
3083 'LI': 'Liechtenstein',
3087 'MK': 'Macedonia, the Former Yugoslav Republic of',
3094 'MH': 'Marshall Islands',
3100 'FM': 'Micronesia, Federated States of',
3101 'MD': 'Moldova, Republic of',
3112 'NL': 'Netherlands',
3113 'NC': 'New Caledonia',
3114 'NZ': 'New Zealand',
3119 'NF': 'Norfolk Island',
3120 'MP': 'Northern Mariana Islands',
3125 'PS': 'Palestine, State of',
3127 'PG': 'Papua New Guinea',
3130 'PH': 'Philippines',
3134 'PR': 'Puerto Rico',
3138 'RU': 'Russian Federation',
3140 'BL': 'Saint Barthélemy',
3141 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
3142 'KN': 'Saint Kitts and Nevis',
3143 'LC': 'Saint Lucia',
3144 'MF': 'Saint Martin (French part)',
3145 'PM': 'Saint Pierre and Miquelon',
3146 'VC': 'Saint Vincent and the Grenadines',
3149 'ST': 'Sao Tome and Principe',
3150 'SA': 'Saudi Arabia',
3154 'SL': 'Sierra Leone',
3156 'SX': 'Sint Maarten (Dutch part)',
3159 'SB': 'Solomon Islands',
3161 'ZA': 'South Africa',
3162 'GS': 'South Georgia and the South Sandwich Islands',
3163 'SS': 'South Sudan',
3168 'SJ': 'Svalbard and Jan Mayen',
3171 'CH': 'Switzerland',
3172 'SY': 'Syrian Arab Republic',
3173 'TW': 'Taiwan, Province of China',
3175 'TZ': 'Tanzania, United Republic of',
3177 'TL': 'Timor-Leste',
3181 'TT': 'Trinidad and Tobago',
3184 'TM': 'Turkmenistan',
3185 'TC': 'Turks and Caicos Islands',
3189 'AE': 'United Arab Emirates',
3190 'GB': 'United Kingdom',
3191 'US': 'United States',
3192 'UM': 'United States Minor Outlying Islands',
3196 'VE': 'Venezuela, Bolivarian Republic of',
3198 'VG': 'Virgin Islands, British',
3199 'VI': 'Virgin Islands, U.S.',
3200 'WF': 'Wallis and Futuna',
3201 'EH': 'Western Sahara',
3208 def short2full(cls, code):
3209 """Convert an ISO 3166-2 country code to the corresponding full name"""
3210 return cls._country_map.get(code.upper())
3213 class GeoUtils(object):
3214 # Major IPv4 address blocks per country
3216 'AD': '85.94.160.0/19',
3217 'AE': '94.200.0.0/13',
3218 'AF': '149.54.0.0/17',
3219 'AG': '209.59.64.0/18',
3220 'AI': '204.14.248.0/21',
3221 'AL': '46.99.0.0/16',
3222 'AM': '46.70.0.0/15',
3223 'AO': '105.168.0.0/13',
3224 'AP': '159.117.192.0/21',
3225 'AR': '181.0.0.0/12',
3226 'AS': '202.70.112.0/20',
3227 'AT': '84.112.0.0/13',
3228 'AU': '1.128.0.0/11',
3229 'AW': '181.41.0.0/18',
3230 'AZ': '5.191.0.0/16',
3231 'BA': '31.176.128.0/17',
3232 'BB': '65.48.128.0/17',
3233 'BD': '114.130.0.0/16',
3235 'BF': '129.45.128.0/17',
3236 'BG': '95.42.0.0/15',
3237 'BH': '37.131.0.0/17',
3238 'BI': '154.117.192.0/18',
3239 'BJ': '137.255.0.0/16',
3240 'BL': '192.131.134.0/24',
3241 'BM': '196.12.64.0/18',
3242 'BN': '156.31.0.0/16',
3243 'BO': '161.56.0.0/16',
3244 'BQ': '161.0.80.0/20',
3245 'BR': '152.240.0.0/12',
3246 'BS': '24.51.64.0/18',
3247 'BT': '119.2.96.0/19',
3248 'BW': '168.167.0.0/16',
3249 'BY': '178.120.0.0/13',
3250 'BZ': '179.42.192.0/18',
3251 'CA': '99.224.0.0/11',
3252 'CD': '41.243.0.0/16',
3253 'CF': '196.32.200.0/21',
3254 'CG': '197.214.128.0/17',
3255 'CH': '85.0.0.0/13',
3256 'CI': '154.232.0.0/14',
3257 'CK': '202.65.32.0/19',
3258 'CL': '152.172.0.0/14',
3259 'CM': '165.210.0.0/15',
3260 'CN': '36.128.0.0/10',
3261 'CO': '181.240.0.0/12',
3262 'CR': '201.192.0.0/12',
3263 'CU': '152.206.0.0/15',
3264 'CV': '165.90.96.0/19',
3265 'CW': '190.88.128.0/17',
3266 'CY': '46.198.0.0/15',
3267 'CZ': '88.100.0.0/14',
3269 'DJ': '197.241.0.0/17',
3270 'DK': '87.48.0.0/12',
3271 'DM': '192.243.48.0/20',
3272 'DO': '152.166.0.0/15',
3273 'DZ': '41.96.0.0/12',
3274 'EC': '186.68.0.0/15',
3275 'EE': '90.190.0.0/15',
3276 'EG': '156.160.0.0/11',
3277 'ER': '196.200.96.0/20',
3278 'ES': '88.0.0.0/11',
3279 'ET': '196.188.0.0/14',
3280 'EU': '2.16.0.0/13',
3281 'FI': '91.152.0.0/13',
3282 'FJ': '144.120.0.0/16',
3283 'FM': '119.252.112.0/20',
3284 'FO': '88.85.32.0/19',
3286 'GA': '41.158.0.0/15',
3288 'GD': '74.122.88.0/21',
3289 'GE': '31.146.0.0/16',
3290 'GF': '161.22.64.0/18',
3291 'GG': '62.68.160.0/19',
3292 'GH': '45.208.0.0/14',
3293 'GI': '85.115.128.0/19',
3294 'GL': '88.83.0.0/19',
3295 'GM': '160.182.0.0/15',
3296 'GN': '197.149.192.0/18',
3297 'GP': '104.250.0.0/19',
3298 'GQ': '105.235.224.0/20',
3299 'GR': '94.64.0.0/13',
3300 'GT': '168.234.0.0/16',
3301 'GU': '168.123.0.0/16',
3302 'GW': '197.214.80.0/20',
3303 'GY': '181.41.64.0/18',
3304 'HK': '113.252.0.0/14',
3305 'HN': '181.210.0.0/16',
3306 'HR': '93.136.0.0/13',
3307 'HT': '148.102.128.0/17',
3308 'HU': '84.0.0.0/14',
3309 'ID': '39.192.0.0/10',
3310 'IE': '87.32.0.0/12',
3311 'IL': '79.176.0.0/13',
3312 'IM': '5.62.80.0/20',
3313 'IN': '117.192.0.0/10',
3314 'IO': '203.83.48.0/21',
3315 'IQ': '37.236.0.0/14',
3316 'IR': '2.176.0.0/12',
3317 'IS': '82.221.0.0/16',
3318 'IT': '79.0.0.0/10',
3319 'JE': '87.244.64.0/18',
3320 'JM': '72.27.0.0/17',
3321 'JO': '176.29.0.0/16',
3322 'JP': '126.0.0.0/8',
3323 'KE': '105.48.0.0/12',
3324 'KG': '158.181.128.0/17',
3325 'KH': '36.37.128.0/17',
3326 'KI': '103.25.140.0/22',
3327 'KM': '197.255.224.0/20',
3328 'KN': '198.32.32.0/19',
3329 'KP': '175.45.176.0/22',
3330 'KR': '175.192.0.0/10',
3331 'KW': '37.36.0.0/14',
3332 'KY': '64.96.0.0/15',
3333 'KZ': '2.72.0.0/13',
3334 'LA': '115.84.64.0/18',
3335 'LB': '178.135.0.0/16',
3336 'LC': '192.147.231.0/24',
3337 'LI': '82.117.0.0/19',
3338 'LK': '112.134.0.0/15',
3339 'LR': '41.86.0.0/19',
3340 'LS': '129.232.0.0/17',
3341 'LT': '78.56.0.0/13',
3342 'LU': '188.42.0.0/16',
3343 'LV': '46.109.0.0/16',
3344 'LY': '41.252.0.0/14',
3345 'MA': '105.128.0.0/11',
3346 'MC': '88.209.64.0/18',
3347 'MD': '37.246.0.0/16',
3348 'ME': '178.175.0.0/17',
3349 'MF': '74.112.232.0/21',
3350 'MG': '154.126.0.0/17',
3351 'MH': '117.103.88.0/21',
3352 'MK': '77.28.0.0/15',
3353 'ML': '154.118.128.0/18',
3354 'MM': '37.111.0.0/17',
3355 'MN': '49.0.128.0/17',
3356 'MO': '60.246.0.0/16',
3357 'MP': '202.88.64.0/20',
3358 'MQ': '109.203.224.0/19',
3359 'MR': '41.188.64.0/18',
3360 'MS': '208.90.112.0/22',
3361 'MT': '46.11.0.0/16',
3362 'MU': '105.16.0.0/12',
3363 'MV': '27.114.128.0/18',
3364 'MW': '105.234.0.0/16',
3365 'MX': '187.192.0.0/11',
3366 'MY': '175.136.0.0/13',
3367 'MZ': '197.218.0.0/15',
3368 'NA': '41.182.0.0/16',
3369 'NC': '101.101.0.0/18',
3370 'NE': '197.214.0.0/18',
3371 'NF': '203.17.240.0/22',
3372 'NG': '105.112.0.0/12',
3373 'NI': '186.76.0.0/15',
3374 'NL': '145.96.0.0/11',
3375 'NO': '84.208.0.0/13',
3376 'NP': '36.252.0.0/15',
3377 'NR': '203.98.224.0/19',
3378 'NU': '49.156.48.0/22',
3379 'NZ': '49.224.0.0/14',
3380 'OM': '5.36.0.0/15',
3381 'PA': '186.72.0.0/15',
3382 'PE': '186.160.0.0/14',
3383 'PF': '123.50.64.0/18',
3384 'PG': '124.240.192.0/19',
3385 'PH': '49.144.0.0/13',
3386 'PK': '39.32.0.0/11',
3387 'PL': '83.0.0.0/11',
3388 'PM': '70.36.0.0/20',
3389 'PR': '66.50.0.0/16',
3390 'PS': '188.161.0.0/16',
3391 'PT': '85.240.0.0/13',
3392 'PW': '202.124.224.0/20',
3393 'PY': '181.120.0.0/14',
3394 'QA': '37.210.0.0/15',
3395 'RE': '139.26.0.0/16',
3396 'RO': '79.112.0.0/13',
3397 'RS': '178.220.0.0/14',
3398 'RU': '5.136.0.0/13',
3399 'RW': '105.178.0.0/15',
3400 'SA': '188.48.0.0/13',
3401 'SB': '202.1.160.0/19',
3402 'SC': '154.192.0.0/11',
3403 'SD': '154.96.0.0/13',
3404 'SE': '78.64.0.0/12',
3405 'SG': '152.56.0.0/14',
3406 'SI': '188.196.0.0/14',
3407 'SK': '78.98.0.0/15',
3408 'SL': '197.215.0.0/17',
3409 'SM': '89.186.32.0/19',
3410 'SN': '41.82.0.0/15',
3411 'SO': '197.220.64.0/19',
3412 'SR': '186.179.128.0/17',
3413 'SS': '105.235.208.0/21',
3414 'ST': '197.159.160.0/19',
3415 'SV': '168.243.0.0/16',
3416 'SX': '190.102.0.0/20',
3418 'SZ': '41.84.224.0/19',
3419 'TC': '65.255.48.0/20',
3420 'TD': '154.68.128.0/19',
3421 'TG': '196.168.0.0/14',
3422 'TH': '171.96.0.0/13',
3423 'TJ': '85.9.128.0/18',
3424 'TK': '27.96.24.0/21',
3425 'TL': '180.189.160.0/20',
3426 'TM': '95.85.96.0/19',
3427 'TN': '197.0.0.0/11',
3428 'TO': '175.176.144.0/21',
3429 'TR': '78.160.0.0/11',
3430 'TT': '186.44.0.0/15',
3431 'TV': '202.2.96.0/19',
3432 'TW': '120.96.0.0/11',
3433 'TZ': '156.156.0.0/14',
3434 'UA': '93.72.0.0/13',
3435 'UG': '154.224.0.0/13',
3437 'UY': '167.56.0.0/13',
3438 'UZ': '82.215.64.0/18',
3439 'VA': '212.77.0.0/19',
3440 'VC': '24.92.144.0/20',
3441 'VE': '186.88.0.0/13',
3442 'VG': '172.103.64.0/18',
3443 'VI': '146.226.0.0/16',
3444 'VN': '14.160.0.0/11',
3445 'VU': '202.80.32.0/20',
3446 'WF': '117.20.32.0/21',
3447 'WS': '202.4.32.0/19',
3448 'YE': '134.35.0.0/16',
3449 'YT': '41.242.116.0/22',
3450 'ZA': '41.0.0.0/11',
3451 'ZM': '165.56.0.0/13',
3452 'ZW': '41.85.192.0/19',
3456 def random_ipv4(cls, code):
3457 block = cls._country_ip_map.get(code.upper())
3460 addr, preflen = block.split('/')
3461 addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
3462 addr_max = addr_min | (0xffffffff >> int(preflen))
3463 return compat_str(socket.inet_ntoa(
3464 compat_struct_pack('!L', random.randint(addr_min, addr_max))))
3467 class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
3468 def __init__(self, proxies=None):
3469 # Set default handlers
3470 for type in ('http', 'https'):
3471 setattr(self, '%s_open' % type,
3472 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
3473 meth(r, proxy, type))
3474 return compat_urllib_request.ProxyHandler.__init__(self, proxies)
3476 def proxy_open(self, req, proxy, type):
3477 req_proxy = req.headers.get('Ytdl-request-proxy')
3478 if req_proxy is not None:
3480 del req.headers['Ytdl-request-proxy']
3482 if proxy == '__noproxy__':
3483 return None # No Proxy
3484 if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
3485 req.add_header('Ytdl-socks-proxy', proxy)
3486 # youtube-dl's http/https handlers do wrapping the socket with socks
3488 return compat_urllib_request.ProxyHandler.proxy_open(
3489 self, req, proxy, type)
3492 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
3493 # released into Public Domain
3494 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
3496 def long_to_bytes(n, blocksize=0):
3497 """long_to_bytes(n:long, blocksize:int) : string
3498 Convert a long integer to a byte string.
3500 If optional blocksize is given and greater than zero, pad the front of the
3501 byte string with binary zeros so that the length is a multiple of
3504 # after much testing, this algorithm was deemed to be the fastest
3508 s = compat_struct_pack('>I', n & 0xffffffff) + s
3510 # strip off leading zeros
3511 for i in range(len(s)):
3512 if s[i] != b'\000'[0]:
3515 # only happens when n == 0
3519 # add back some pad bytes. this could be done more efficiently w.r.t. the
3520 # de-padding being done above, but sigh...
3521 if blocksize > 0 and len(s) % blocksize:
3522 s = (blocksize - len(s) % blocksize) * b'\000' + s
3526 def bytes_to_long(s):
3527 """bytes_to_long(string) : long
3528 Convert a byte string to a long integer.
3530 This is (essentially) the inverse of long_to_bytes().
3535 extra = (4 - length % 4)
3536 s = b'\000' * extra + s
3537 length = length + extra
3538 for i in range(0, length, 4):
3539 acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
3543 def ohdave_rsa_encrypt(data, exponent, modulus):
3545 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
3548 data: data to encrypt, bytes-like object
3549 exponent, modulus: parameter e and N of RSA algorithm, both integer
3550 Output: hex string of encrypted data
3552 Limitation: supports one block encryption only
3555 payload = int(binascii.hexlify(data[::-1]), 16)
3556 encrypted = pow(payload, exponent, modulus)
3557 return '%x' % encrypted
3560 def pkcs1pad(data, length):
3562 Padding input data with PKCS#1 scheme
3564 @param {int[]} data input data
3565 @param {int} length target length
3566 @returns {int[]} padded data
3568 if len(data) > length - 11:
3569 raise ValueError('Input data too long for PKCS#1 padding')
3571 pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
3572 return [0, 2] + pseudo_random + [0] + data
3575 def encode_base_n(num, n, table=None):
3576 FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
3578 table = FULL_TABLE[:n]
3581 raise ValueError('base %d exceeds table length %d' % (n, len(table)))
3588 ret = table[num % n] + ret
3593 def decode_packed_codes(code):
3594 mobj = re.search(PACKED_CODES_RE, code)
3595 obfucasted_code, base, count, symbols = mobj.groups()
3598 symbols = symbols.split('|')
3603 base_n_count = encode_base_n(count, base)
3604 symbol_table[base_n_count] = symbols[count] or base_n_count
3607 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
3611 def parse_m3u8_attributes(attrib):
3613 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
3614 if val.startswith('"'):
3620 def urshift(val, n):
3621 return val >> n if val >= 0 else (val + 0x100000000) >> n
3624 # Based on png2str() written by @gdkchan and improved by @yokrysty
3625 # Originally posted at https://github.com/rg3/youtube-dl/issues/9706
3626 def decode_png(png_data):
3627 # Reference: https://www.w3.org/TR/PNG/
3628 header = png_data[8:]
3630 if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
3631 raise IOError('Not a valid PNG file.')
3633 int_map = {1: '>B', 2: '>H', 4: '>I'}
3634 unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
3639 length = unpack_integer(header[:4])
3642 chunk_type = header[:4]
3645 chunk_data = header[:length]
3646 header = header[length:]
3648 header = header[4:] # Skip CRC
3656 ihdr = chunks[0]['data']
3658 width = unpack_integer(ihdr[:4])
3659 height = unpack_integer(ihdr[4:8])
3663 for chunk in chunks:
3664 if chunk['type'] == b'IDAT':
3665 idat += chunk['data']
3668 raise IOError('Unable to read PNG data.')
3670 decompressed_data = bytearray(zlib.decompress(idat))
3675 def _get_pixel(idx):
3680 for y in range(height):
3681 basePos = y * (1 + stride)
3682 filter_type = decompressed_data[basePos]
3686 pixels.append(current_row)
3688 for x in range(stride):
3689 color = decompressed_data[1 + basePos + x]
3690 basex = y * stride + x
3695 left = _get_pixel(basex - 3)
3697 up = _get_pixel(basex - stride)
3699 if filter_type == 1: # Sub
3700 color = (color + left) & 0xff
3701 elif filter_type == 2: # Up
3702 color = (color + up) & 0xff
3703 elif filter_type == 3: # Average
3704 color = (color + ((left + up) >> 1)) & 0xff
3705 elif filter_type == 4: # Paeth
3711 c = _get_pixel(basex - stride - 3)
3719 if pa <= pb and pa <= pc:
3720 color = (color + a) & 0xff
3722 color = (color + b) & 0xff
3724 color = (color + c) & 0xff
3726 current_row.append(color)
3728 return width, height, pixels
3731 def write_xattr(path, key, value):
3732 # This mess below finds the best xattr tool for the job
3734 # try the pyxattr module...
3737 if hasattr(xattr, 'set'): # pyxattr
3738 # Unicode arguments are not supported in python-pyxattr until
3740 # See https://github.com/rg3/youtube-dl/issues/5498
3741 pyxattr_required_version = '0.5.0'
3742 if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
3743 # TODO: fallback to CLI tools
3744 raise XAttrUnavailableError(
3745 'python-pyxattr is detected but is too old. '
3746 'youtube-dl requires %s or above while your version is %s. '
3747 'Falling back to other xattr implementations' % (
3748 pyxattr_required_version, xattr.__version__))
3750 setxattr = xattr.set
3752 setxattr = xattr.setxattr
3755 setxattr(path, key, value)
3756 except EnvironmentError as e:
3757 raise XAttrMetadataError(e.errno, e.strerror)
3760 if compat_os_name == 'nt':
3761 # Write xattrs to NTFS Alternate Data Streams:
3762 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
3763 assert ':' not in key
3764 assert os.path.exists(path)
3766 ads_fn = path + ':' + key
3768 with open(ads_fn, 'wb') as f:
3770 except EnvironmentError as e:
3771 raise XAttrMetadataError(e.errno, e.strerror)
3773 user_has_setfattr = check_executable('setfattr', ['--version'])
3774 user_has_xattr = check_executable('xattr', ['-h'])
3776 if user_has_setfattr or user_has_xattr:
3778 value = value.decode('utf-8')
3779 if user_has_setfattr:
3780 executable = 'setfattr'
3781 opts = ['-n', key, '-v', value]
3782 elif user_has_xattr:
3783 executable = 'xattr'
3784 opts = ['-w', key, value]
3786 cmd = ([encodeFilename(executable, True)] +
3787 [encodeArgument(o) for o in opts] +
3788 [encodeFilename(path, True)])
3791 p = subprocess.Popen(
3792 cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
3793 except EnvironmentError as e:
3794 raise XAttrMetadataError(e.errno, e.strerror)
3795 stdout, stderr = p.communicate()
3796 stderr = stderr.decode('utf-8', 'replace')
3797 if p.returncode != 0:
3798 raise XAttrMetadataError(p.returncode, stderr)
3801 # On Unix, and can't find pyxattr, setfattr, or xattr.
3802 if sys.platform.startswith('linux'):
3803 raise XAttrUnavailableError(
3804 "Couldn't find a tool to set the xattrs. "
3805 "Install either the python 'pyxattr' or 'xattr' "
3806 "modules, or the GNU 'attr' package "
3807 "(which contains the 'setfattr' tool).")
3809 raise XAttrUnavailableError(
3810 "Couldn't find a tool to set the xattrs. "
3811 "Install either the python 'xattr' module, "
3812 "or the 'xattr' binary.")
3815 def random_birthday(year_field, month_field, day_field):
3817 year_field: str(random.randint(1950, 1995)),
3818 month_field: str(random.randint(1, 12)),
3819 day_field: str(random.randint(1, 31)),