2 # -*- coding: utf-8 -*-
4 from __future__ import unicode_literals
33 import xml.etree.ElementTree
39 compat_etree_fromstring,
44 compat_socket_create_connection,
48 compat_urllib_parse_urlparse,
49 compat_urllib_request,
55 # This is not clearly defined otherwise
56 compiled_regex_type = type(re.compile(''))
59 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/20.0 (Chrome)',
60 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
61 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
62 'Accept-Encoding': 'gzip, deflate',
63 'Accept-Language': 'en-us,en;q=0.5',
69 ENGLISH_MONTH_NAMES = [
70 'January', 'February', 'March', 'April', 'May', 'June',
71 'July', 'August', 'September', 'October', 'November', 'December']
74 def preferredencoding():
75 """Get preferred encoding.
77 Returns the best encoding scheme for the system, based on
78 locale.getpreferredencoding() and some further tweaks.
81 pref = locale.getpreferredencoding()
89 def write_json_file(obj, fn):
90 """ Encode obj as JSON and write it to fn, atomically if possible """
92 fn = encodeFilename(fn)
93 if sys.version_info < (3, 0) and sys.platform != 'win32':
94 encoding = get_filesystem_encoding()
95 # os.path.basename returns a bytes object, but NamedTemporaryFile
96 # will fail if the filename contains non ascii characters unless we
97 # use a unicode object
98 path_basename = lambda f: os.path.basename(fn).decode(encoding)
99 # the same for os.path.dirname
100 path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
102 path_basename = os.path.basename
103 path_dirname = os.path.dirname
107 'prefix': path_basename(fn) + '.',
108 'dir': path_dirname(fn),
112 # In Python 2.x, json.dump expects a bytestream.
113 # In Python 3.x, it writes to a character stream
114 if sys.version_info < (3, 0):
122 tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
127 if sys.platform == 'win32':
128 # Need to remove existing file on Windows, else os.rename raises
129 # WindowsError or FileExistsError.
134 os.rename(tf.name, fn)
143 if sys.version_info >= (2, 7):
144 def find_xpath_attr(node, xpath, key, val=None):
145 """ Find the xpath xpath[@key=val] """
146 assert re.match(r'^[a-zA-Z_-]+$', key)
148 assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val)
149 expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
150 return node.find(expr)
152 def find_xpath_attr(node, xpath, key, val=None):
153 # Here comes the crazy part: In 2.6, if the xpath is a unicode,
154 # .//node does not match if a node is a direct child of . !
155 if isinstance(xpath, compat_str):
156 xpath = xpath.encode('ascii')
158 for f in node.findall(xpath):
159 if key not in f.attrib:
161 if val is None or f.attrib.get(key) == val:
165 # On python2.6 the xml.etree.ElementTree.Element methods don't support
166 # the namespace parameter
169 def xpath_with_ns(path, ns_map):
170 components = [c.split(':') for c in path.split('/')]
174 replaced.append(c[0])
177 replaced.append('{%s}%s' % (ns_map[ns], tag))
178 return '/'.join(replaced)
181 def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
182 def _find_xpath(xpath):
183 if sys.version_info < (2, 7): # Crazy 2.6
184 xpath = xpath.encode('ascii')
185 return node.find(xpath)
187 if isinstance(xpath, (str, compat_str)):
188 n = _find_xpath(xpath)
196 if default is not NO_DEFAULT:
199 name = xpath if name is None else name
200 raise ExtractorError('Could not find XML element %s' % name)
206 def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
207 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
208 if n is None or n == default:
211 if default is not NO_DEFAULT:
214 name = xpath if name is None else name
215 raise ExtractorError('Could not find XML element\'s text %s' % name)
221 def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
222 n = find_xpath_attr(node, xpath, key)
224 if default is not NO_DEFAULT:
227 name = '%s[@%s]' % (xpath, key) if name is None else name
228 raise ExtractorError('Could not find XML attribute %s' % name)
234 def get_element_by_id(id, html):
235 """Return the content of the tag with the specified ID in the passed HTML document"""
236 return get_element_by_attribute("id", id, html)
239 def get_element_by_attribute(attribute, value, html):
240 """Return the content of the tag with the specified attribute in the passed HTML document"""
242 m = re.search(r'''(?xs)
244 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*?
246 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*?
250 ''' % (re.escape(attribute), re.escape(value)), html)
254 res = m.group('content')
256 if res.startswith('"') or res.startswith("'"):
259 return unescapeHTML(res)
262 def clean_html(html):
263 """Clean an HTML snippet into a readable string"""
265 if html is None: # Convenience for sanitizing descriptions etc.
269 html = html.replace('\n', ' ')
270 html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
271 html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
273 html = re.sub('<.*?>', '', html)
274 # Replace html entities
275 html = unescapeHTML(html)
279 def sanitize_open(filename, open_mode):
280 """Try to open the given filename, and slightly tweak it if this fails.
282 Attempts to open the given filename. If this fails, it tries to change
283 the filename slightly, step by step, until it's either able to open it
284 or it fails and raises a final exception, like the standard open()
287 It returns the tuple (stream, definitive_file_name).
291 if sys.platform == 'win32':
293 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
294 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
295 stream = open(encodeFilename(filename), open_mode)
296 return (stream, filename)
297 except (IOError, OSError) as err:
298 if err.errno in (errno.EACCES,):
301 # In case of error, try to remove win32 forbidden chars
302 alt_filename = sanitize_path(filename)
303 if alt_filename == filename:
306 # An exception here should be caught in the caller
307 stream = open(encodeFilename(alt_filename), open_mode)
308 return (stream, alt_filename)
311 def timeconvert(timestr):
312 """Convert RFC 2822 defined time string into system timestamp"""
314 timetuple = email.utils.parsedate_tz(timestr)
315 if timetuple is not None:
316 timestamp = email.utils.mktime_tz(timetuple)
320 def sanitize_filename(s, restricted=False, is_id=False):
321 """Sanitizes a string so it could be used as part of a filename.
322 If restricted is set, use a stricter subset of allowed characters.
323 Set is_id if this is not an arbitrary string, but an ID that should be kept if possible
325 def replace_insane(char):
326 if char == '?' or ord(char) < 32 or ord(char) == 127:
329 return '' if restricted else '\''
331 return '_-' if restricted else ' -'
332 elif char in '\\/|*<>':
334 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
336 if restricted and ord(char) > 127:
341 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
342 result = ''.join(map(replace_insane, s))
344 while '__' in result:
345 result = result.replace('__', '_')
346 result = result.strip('_')
347 # Common case of "Foreign band name - English song title"
348 if restricted and result.startswith('-_'):
350 if result.startswith('-'):
351 result = '_' + result[len('-'):]
352 result = result.lstrip('.')
358 def sanitize_path(s):
359 """Sanitizes and normalizes path on Windows"""
360 if sys.platform != 'win32':
362 drive_or_unc, _ = os.path.splitdrive(s)
363 if sys.version_info < (2, 7) and not drive_or_unc:
364 drive_or_unc, _ = os.path.splitunc(s)
365 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
369 path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part)
370 for path_part in norm_path]
372 sanitized_path.insert(0, drive_or_unc + os.path.sep)
373 return os.path.join(*sanitized_path)
376 def orderedSet(iterable):
377 """ Remove all duplicates from the input iterable """
385 def _htmlentity_transform(entity):
386 """Transforms an HTML entity to a character."""
387 # Known non-numeric HTML entity
388 if entity in compat_html_entities.name2codepoint:
389 return compat_chr(compat_html_entities.name2codepoint[entity])
391 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
393 numstr = mobj.group(1)
394 if numstr.startswith('x'):
396 numstr = '0%s' % numstr
399 # See https://github.com/rg3/youtube-dl/issues/7518
401 return compat_chr(int(numstr, base))
405 # Unknown entity in name, return its literal representation
406 return ('&%s;' % entity)
412 assert type(s) == compat_str
415 r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s)
418 def get_subprocess_encoding():
419 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
420 # For subprocess calls, encode with locale encoding
421 # Refer to http://stackoverflow.com/a/9951851/35070
422 encoding = preferredencoding()
424 encoding = sys.getfilesystemencoding()
430 def encodeFilename(s, for_subprocess=False):
432 @param s The name of the file
435 assert type(s) == compat_str
437 # Python 3 has a Unicode API
438 if sys.version_info >= (3, 0):
441 # Pass '' directly to use Unicode APIs on Windows 2000 and up
442 # (Detecting Windows NT 4 is tricky because 'major >= 4' would
443 # match Windows 9x series as well. Besides, NT 4 is obsolete.)
444 if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
447 return s.encode(get_subprocess_encoding(), 'ignore')
450 def decodeFilename(b, for_subprocess=False):
452 if sys.version_info >= (3, 0):
455 if not isinstance(b, bytes):
458 return b.decode(get_subprocess_encoding(), 'ignore')
461 def encodeArgument(s):
462 if not isinstance(s, compat_str):
463 # Legacy code that uses byte strings
464 # Uncomment the following line after fixing all post processors
465 # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
466 s = s.decode('ascii')
467 return encodeFilename(s, True)
470 def decodeArgument(b):
471 return decodeFilename(b, True)
474 def decodeOption(optval):
477 if isinstance(optval, bytes):
478 optval = optval.decode(preferredencoding())
480 assert isinstance(optval, compat_str)
484 def formatSeconds(secs):
486 return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
488 return '%d:%02d' % (secs // 60, secs % 60)
493 def make_HTTPS_handler(params, **kwargs):
494 opts_no_check_certificate = params.get('nocheckcertificate', False)
495 if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
496 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
497 if opts_no_check_certificate:
498 context.check_hostname = False
499 context.verify_mode = ssl.CERT_NONE
501 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
504 # (create_default_context present but HTTPSHandler has no context=)
507 if sys.version_info < (3, 2):
508 return YoutubeDLHTTPSHandler(params, **kwargs)
510 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
511 context.verify_mode = (ssl.CERT_NONE
512 if opts_no_check_certificate
513 else ssl.CERT_REQUIRED)
514 context.set_default_verify_paths()
515 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
518 def bug_reports_message():
519 if ytdl_is_updateable():
520 update_cmd = 'type youtube-dl -U to update'
522 update_cmd = 'see https://yt-dl.org/update on how to update'
523 msg = '; please report this issue on https://yt-dl.org/bug .'
524 msg += ' Make sure you are using the latest version; %s.' % update_cmd
525 msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
529 class ExtractorError(Exception):
530 """Error during info extraction."""
532 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
533 """ tb, if given, is the original traceback (so that it can be printed out).
534 If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
537 if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
539 if video_id is not None:
540 msg = video_id + ': ' + msg
542 msg += ' (caused by %r)' % cause
544 msg += bug_reports_message()
545 super(ExtractorError, self).__init__(msg)
548 self.exc_info = sys.exc_info() # preserve original exception
550 self.video_id = video_id
552 def format_traceback(self):
553 if self.traceback is None:
555 return ''.join(traceback.format_tb(self.traceback))
558 class UnsupportedError(ExtractorError):
559 def __init__(self, url):
560 super(UnsupportedError, self).__init__(
561 'Unsupported URL: %s' % url, expected=True)
565 class RegexNotFoundError(ExtractorError):
566 """Error when a regex didn't match"""
570 class DownloadError(Exception):
571 """Download Error exception.
573 This exception may be thrown by FileDownloader objects if they are not
574 configured to continue on errors. They will contain the appropriate
578 def __init__(self, msg, exc_info=None):
579 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
580 super(DownloadError, self).__init__(msg)
581 self.exc_info = exc_info
584 class SameFileError(Exception):
585 """Same File exception.
587 This exception will be thrown by FileDownloader objects if they detect
588 multiple files would have to be downloaded to the same file on disk.
593 class PostProcessingError(Exception):
594 """Post Processing exception.
596 This exception may be raised by PostProcessor's .run() method to
597 indicate an error in the postprocessing task.
600 def __init__(self, msg):
604 class MaxDownloadsReached(Exception):
605 """ --max-downloads limit has been reached. """
609 class UnavailableVideoError(Exception):
610 """Unavailable Format exception.
612 This exception will be thrown when a video is requested
613 in a format that is not available for that video.
618 class ContentTooShortError(Exception):
619 """Content Too Short exception.
621 This exception may be raised by FileDownloader objects when a file they
622 download is too small for what the server announced first, indicating
623 the connection was probably interrupted.
626 def __init__(self, downloaded, expected):
628 self.downloaded = downloaded
629 self.expected = expected
632 def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
633 # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
634 # expected HTTP responses to meet HTTP/1.0 or later (see also
635 # https://github.com/rg3/youtube-dl/issues/6727)
636 if sys.version_info < (3, 0):
637 kwargs[b'strict'] = True
638 hc = http_class(*args, **kwargs)
639 source_address = ydl_handler._params.get('source_address')
640 if source_address is not None:
641 sa = (source_address, 0)
642 if hasattr(hc, 'source_address'): # Python 2.7+
643 hc.source_address = sa
645 def _hc_connect(self, *args, **kwargs):
646 sock = compat_socket_create_connection(
647 (self.host, self.port), self.timeout, sa)
649 self.sock = ssl.wrap_socket(
650 sock, self.key_file, self.cert_file,
651 ssl_version=ssl.PROTOCOL_TLSv1)
654 hc.connect = functools.partial(_hc_connect, hc)
659 class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
660 """Handler for HTTP requests and responses.
662 This class, when installed with an OpenerDirector, automatically adds
663 the standard headers to every HTTP request and handles gzipped and
664 deflated responses from web servers. If compression is to be avoided in
665 a particular request, the original request in the program code only has
666 to include the HTTP header "Youtubedl-No-Compression", which will be
667 removed before making the real request.
669 Part of this code was copied from:
671 http://techknack.net/python-urllib2-handlers/
673 Andrew Rowls, the author of that code, agreed to release it to the
677 def __init__(self, params, *args, **kwargs):
678 compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
679 self._params = params
681 def http_open(self, req):
682 return self.do_open(functools.partial(
683 _create_http_connection, self, compat_http_client.HTTPConnection, False),
689 return zlib.decompress(data, -zlib.MAX_WBITS)
691 return zlib.decompress(data)
694 def addinfourl_wrapper(stream, headers, url, code):
695 if hasattr(compat_urllib_request.addinfourl, 'getcode'):
696 return compat_urllib_request.addinfourl(stream, headers, url, code)
697 ret = compat_urllib_request.addinfourl(stream, headers, url)
701 def http_request(self, req):
702 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
703 # always respected by websites, some tend to give out URLs with non percent-encoded
704 # non-ASCII characters (see telemb.py, ard.py [#3412])
705 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
706 # To work around aforementioned issue we will replace request's original URL with
707 # percent-encoded one
708 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
709 # the code of this workaround has been moved here from YoutubeDL.urlopen()
710 url = req.get_full_url()
711 url_escaped = escape_url(url)
713 # Substitute URL if any change after escaping
714 if url != url_escaped:
715 req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request
717 url_escaped, data=req.data, headers=req.headers,
718 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
719 new_req.timeout = req.timeout
722 for h, v in std_headers.items():
723 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
724 # The dict keys are capitalized because of this bug by urllib
725 if h.capitalize() not in req.headers:
727 if 'Youtubedl-no-compression' in req.headers:
728 if 'Accept-encoding' in req.headers:
729 del req.headers['Accept-encoding']
730 del req.headers['Youtubedl-no-compression']
732 if sys.version_info < (2, 7) and '#' in req.get_full_url():
733 # Python 2.6 is brain-dead when it comes to fragments
734 req._Request__original = req._Request__original.partition('#')[0]
735 req._Request__r_type = req._Request__r_type.partition('#')[0]
739 def http_response(self, req, resp):
742 if resp.headers.get('Content-encoding', '') == 'gzip':
743 content = resp.read()
744 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
746 uncompressed = io.BytesIO(gz.read())
747 except IOError as original_ioerror:
748 # There may be junk add the end of the file
749 # See http://stackoverflow.com/q/4928560/35070 for details
750 for i in range(1, 1024):
752 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
753 uncompressed = io.BytesIO(gz.read())
758 raise original_ioerror
759 resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
760 resp.msg = old_resp.msg
762 if resp.headers.get('Content-encoding', '') == 'deflate':
763 gz = io.BytesIO(self.deflate(resp.read()))
764 resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
765 resp.msg = old_resp.msg
766 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
767 # https://github.com/rg3/youtube-dl/issues/6457).
768 if 300 <= resp.code < 400:
769 location = resp.headers.get('Location')
771 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
772 if sys.version_info >= (3, 0):
773 location = location.encode('iso-8859-1').decode('utf-8')
774 location_escaped = escape_url(location)
775 if location != location_escaped:
776 del resp.headers['Location']
777 resp.headers['Location'] = location_escaped
780 https_request = http_request
781 https_response = http_response
784 class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
785 def __init__(self, params, https_conn_class=None, *args, **kwargs):
786 compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
787 self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
788 self._params = params
790 def https_open(self, req):
792 if hasattr(self, '_context'): # python > 2.6
793 kwargs['context'] = self._context
794 if hasattr(self, '_check_hostname'): # python 3.x
795 kwargs['check_hostname'] = self._check_hostname
796 return self.do_open(functools.partial(
797 _create_http_connection, self, self._https_conn_class, True),
801 class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
802 def __init__(self, cookiejar=None):
803 compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
805 def http_response(self, request, response):
806 # Python 2 will choke on next HTTP request in row if there are non-ASCII
807 # characters in Set-Cookie HTTP header of last response (see
808 # https://github.com/rg3/youtube-dl/issues/6769).
809 # In order to at least prevent crashing we will percent encode Set-Cookie
810 # header before HTTPCookieProcessor starts processing it.
811 # if sys.version_info < (3, 0) and response.headers:
812 # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
813 # set_cookie = response.headers.get(set_cookie_header)
815 # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
816 # if set_cookie != set_cookie_escaped:
817 # del response.headers[set_cookie_header]
818 # response.headers[set_cookie_header] = set_cookie_escaped
819 return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
821 https_request = compat_urllib_request.HTTPCookieProcessor.http_request
822 https_response = http_response
825 def parse_iso8601(date_str, delimiter='T', timezone=None):
826 """ Return a UNIX timestamp from the given date """
831 date_str = re.sub(r'\.[0-9]+', '', date_str)
835 r'(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
838 timezone = datetime.timedelta()
840 date_str = date_str[:-len(m.group(0))]
841 if not m.group('sign'):
842 timezone = datetime.timedelta()
844 sign = 1 if m.group('sign') == '+' else -1
845 timezone = datetime.timedelta(
846 hours=sign * int(m.group('hours')),
847 minutes=sign * int(m.group('minutes')))
849 date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
850 dt = datetime.datetime.strptime(date_str, date_format) - timezone
851 return calendar.timegm(dt.timetuple())
856 def unified_strdate(date_str, day_first=True):
857 """Return a string with the date in the format YYYYMMDD"""
863 date_str = date_str.replace(',', ' ')
864 # %z (UTC offset) is only supported in python>=3.2
865 if not re.match(r'^[0-9]{1,2}-[0-9]{1,2}-[0-9]{4}$', date_str):
866 date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str)
867 # Remove AM/PM + timezone
868 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
870 format_expressions = [
875 '%b %dst %Y %I:%M%p',
876 '%b %dnd %Y %I:%M%p',
877 '%b %dth %Y %I:%M%p',
883 '%Y-%m-%d %H:%M:%S.%f',
886 '%Y-%m-%dT%H:%M:%SZ',
887 '%Y-%m-%dT%H:%M:%S.%fZ',
888 '%Y-%m-%dT%H:%M:%S.%f0Z',
890 '%Y-%m-%dT%H:%M:%S.%f',
894 format_expressions.extend([
902 format_expressions.extend([
909 for expression in format_expressions:
911 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
914 if upload_date is None:
915 timetuple = email.utils.parsedate_tz(date_str)
917 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
918 if upload_date is not None:
919 return compat_str(upload_date)
922 def determine_ext(url, default_ext='unknown_video'):
925 guess = url.partition('?')[0].rpartition('.')[2]
926 if re.match(r'^[A-Za-z0-9]+$', guess):
932 def subtitles_filename(filename, sub_lang, sub_format):
933 return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
936 def date_from_str(date_str):
938 Return a datetime object from a string in the format YYYYMMDD or
939 (now|today)[+-][0-9](day|week|month|year)(s)?"""
940 today = datetime.date.today()
941 if date_str in ('now', 'today'):
943 if date_str == 'yesterday':
944 return today - datetime.timedelta(days=1)
945 match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
946 if match is not None:
947 sign = match.group('sign')
948 time = int(match.group('time'))
951 unit = match.group('unit')
952 # A bad aproximation?
960 delta = datetime.timedelta(**{unit: time})
962 return datetime.datetime.strptime(date_str, "%Y%m%d").date()
965 def hyphenate_date(date_str):
967 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
968 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
969 if match is not None:
970 return '-'.join(match.groups())
975 class DateRange(object):
976 """Represents a time interval between two dates"""
978 def __init__(self, start=None, end=None):
979 """start and end must be strings in the format accepted by date"""
980 if start is not None:
981 self.start = date_from_str(start)
983 self.start = datetime.datetime.min.date()
985 self.end = date_from_str(end)
987 self.end = datetime.datetime.max.date()
988 if self.start > self.end:
989 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
993 """Returns a range that only contains the given day"""
996 def __contains__(self, date):
997 """Check if the date is in the range"""
998 if not isinstance(date, datetime.date):
999 date = date_from_str(date)
1000 return self.start <= date <= self.end
1003 return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
1006 def platform_name():
1007 """ Returns the platform name as a compat_str """
1008 res = platform.platform()
1009 if isinstance(res, bytes):
1010 res = res.decode(preferredencoding())
1012 assert isinstance(res, compat_str)
1016 def _windows_write_string(s, out):
1017 """ Returns True if the string was written using special methods,
1018 False if it has yet to be written out."""
1019 # Adapted from http://stackoverflow.com/a/3259271/35070
1022 import ctypes.wintypes
1030 fileno = out.fileno()
1031 except AttributeError:
1032 # If the output stream doesn't have a fileno, it's virtual
1034 except io.UnsupportedOperation:
1035 # Some strange Windows pseudo files?
1037 if fileno not in WIN_OUTPUT_IDS:
1040 GetStdHandle = ctypes.WINFUNCTYPE(
1041 ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
1042 (b"GetStdHandle", ctypes.windll.kernel32))
1043 h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
1045 WriteConsoleW = ctypes.WINFUNCTYPE(
1046 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
1047 ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
1048 ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32))
1049 written = ctypes.wintypes.DWORD(0)
1051 GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32))
1052 FILE_TYPE_CHAR = 0x0002
1053 FILE_TYPE_REMOTE = 0x8000
1054 GetConsoleMode = ctypes.WINFUNCTYPE(
1055 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
1056 ctypes.POINTER(ctypes.wintypes.DWORD))(
1057 (b"GetConsoleMode", ctypes.windll.kernel32))
1058 INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
1060 def not_a_console(handle):
1061 if handle == INVALID_HANDLE_VALUE or handle is None:
1063 return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
1064 GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
1066 if not_a_console(h):
1069 def next_nonbmp_pos(s):
1071 return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
1072 except StopIteration:
1076 count = min(next_nonbmp_pos(s), 1024)
1078 ret = WriteConsoleW(
1079 h, s, count if count else 2, ctypes.byref(written), None)
1081 raise OSError('Failed to write string')
1082 if not count: # We just wrote a non-BMP character
1083 assert written.value == 2
1086 assert written.value > 0
1087 s = s[written.value:]
1091 def write_string(s, out=None, encoding=None):
1094 assert type(s) == compat_str
1096 if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
1097 if _windows_write_string(s, out):
1100 if ('b' in getattr(out, 'mode', '') or
1101 sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
1102 byt = s.encode(encoding or preferredencoding(), 'ignore')
1104 elif hasattr(out, 'buffer'):
1105 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
1106 byt = s.encode(enc, 'ignore')
1107 out.buffer.write(byt)
1113 def bytes_to_intlist(bs):
1116 if isinstance(bs[0], int): # Python 3
1119 return [ord(c) for c in bs]
1122 def intlist_to_bytes(xs):
1125 return struct_pack('%dB' % len(xs), *xs)
1128 # Cross-platform file locking
1129 if sys.platform == 'win32':
1130 import ctypes.wintypes
1133 class OVERLAPPED(ctypes.Structure):
1135 ('Internal', ctypes.wintypes.LPVOID),
1136 ('InternalHigh', ctypes.wintypes.LPVOID),
1137 ('Offset', ctypes.wintypes.DWORD),
1138 ('OffsetHigh', ctypes.wintypes.DWORD),
1139 ('hEvent', ctypes.wintypes.HANDLE),
1142 kernel32 = ctypes.windll.kernel32
1143 LockFileEx = kernel32.LockFileEx
1144 LockFileEx.argtypes = [
1145 ctypes.wintypes.HANDLE, # hFile
1146 ctypes.wintypes.DWORD, # dwFlags
1147 ctypes.wintypes.DWORD, # dwReserved
1148 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1149 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1150 ctypes.POINTER(OVERLAPPED) # Overlapped
1152 LockFileEx.restype = ctypes.wintypes.BOOL
1153 UnlockFileEx = kernel32.UnlockFileEx
1154 UnlockFileEx.argtypes = [
1155 ctypes.wintypes.HANDLE, # hFile
1156 ctypes.wintypes.DWORD, # dwReserved
1157 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1158 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1159 ctypes.POINTER(OVERLAPPED) # Overlapped
1161 UnlockFileEx.restype = ctypes.wintypes.BOOL
1162 whole_low = 0xffffffff
1163 whole_high = 0x7fffffff
1165 def _lock_file(f, exclusive):
1166 overlapped = OVERLAPPED()
1167 overlapped.Offset = 0
1168 overlapped.OffsetHigh = 0
1169 overlapped.hEvent = 0
1170 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
1171 handle = msvcrt.get_osfhandle(f.fileno())
1172 if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
1173 whole_low, whole_high, f._lock_file_overlapped_p):
1174 raise OSError('Locking file failed: %r' % ctypes.FormatError())
1176 def _unlock_file(f):
1177 assert f._lock_file_overlapped_p
1178 handle = msvcrt.get_osfhandle(f.fileno())
1179 if not UnlockFileEx(handle, 0,
1180 whole_low, whole_high, f._lock_file_overlapped_p):
1181 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
1186 def _lock_file(f, exclusive):
1187 fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
1189 def _unlock_file(f):
1190 fcntl.flock(f, fcntl.LOCK_UN)
1193 class locked_file(object):
1194 def __init__(self, filename, mode, encoding=None):
1195 assert mode in ['r', 'a', 'w']
1196 self.f = io.open(filename, mode, encoding=encoding)
1199 def __enter__(self):
1200 exclusive = self.mode != 'r'
1202 _lock_file(self.f, exclusive)
1208 def __exit__(self, etype, value, traceback):
1210 _unlock_file(self.f)
1217 def write(self, *args):
1218 return self.f.write(*args)
1220 def read(self, *args):
1221 return self.f.read(*args)
1224 def get_filesystem_encoding():
1225 encoding = sys.getfilesystemencoding()
1226 return encoding if encoding is not None else 'utf-8'
1229 def shell_quote(args):
1231 encoding = get_filesystem_encoding()
1233 if isinstance(a, bytes):
1234 # We may get a filename encoded with 'encodeFilename'
1235 a = a.decode(encoding)
1236 quoted_args.append(pipes.quote(a))
1237 return ' '.join(quoted_args)
1240 def smuggle_url(url, data):
1241 """ Pass additional data in a URL for internal use. """
1243 sdata = compat_urllib_parse.urlencode(
1244 {'__youtubedl_smuggle': json.dumps(data)})
1245 return url + '#' + sdata
1248 def unsmuggle_url(smug_url, default=None):
1249 if '#__youtubedl_smuggle' not in smug_url:
1250 return smug_url, default
1251 url, _, sdata = smug_url.rpartition('#')
1252 jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
1253 data = json.loads(jsond)
1257 def format_bytes(bytes):
1260 if type(bytes) is str:
1261 bytes = float(bytes)
1265 exponent = int(math.log(bytes, 1024.0))
1266 suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
1267 converted = float(bytes) / float(1024 ** exponent)
1268 return '%.2f%s' % (converted, suffix)
1271 def parse_filesize(s):
1275 # The lower-case forms are of course incorrect and inofficial,
1276 # but we support those too
1314 units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE)
1316 r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re, s)
1320 num_str = m.group('num').replace(',', '.')
1321 mult = _UNIT_TABLE[m.group('unit')]
1322 return int(float(num_str) * mult)
1325 def month_by_name(name):
1326 """ Return the number of a month by (locale-independently) English name """
1329 return ENGLISH_MONTH_NAMES.index(name) + 1
1334 def month_by_abbreviation(abbrev):
1335 """ Return the number of a month by (locale-independently) English
1339 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
1344 def fix_xml_ampersands(xml_str):
1345 """Replace all the '&' by '&' in XML"""
1347 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
1352 def setproctitle(title):
1353 assert isinstance(title, compat_str)
1355 libc = ctypes.cdll.LoadLibrary("libc.so.6")
1358 title_bytes = title.encode('utf-8')
1359 buf = ctypes.create_string_buffer(len(title_bytes))
1360 buf.value = title_bytes
1362 libc.prctl(15, buf, 0, 0, 0)
1363 except AttributeError:
1364 return # Strange libc, just skip this
1367 def remove_start(s, start):
1368 if s.startswith(start):
1369 return s[len(start):]
1373 def remove_end(s, end):
1375 return s[:-len(end)]
1379 def url_basename(url):
1380 path = compat_urlparse.urlparse(url).path
1381 return path.strip('/').split('/')[-1]
1384 class HEADRequest(compat_urllib_request.Request):
1385 def get_method(self):
1389 def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
1392 v = getattr(v, get_attr, None)
1398 return int(v) * invscale // scale
1403 def str_or_none(v, default=None):
1404 return default if v is None else compat_str(v)
1407 def str_to_int(int_str):
1408 """ A more relaxed version of int_or_none """
1411 int_str = re.sub(r'[,\.\+]', '', int_str)
1415 def float_or_none(v, scale=1, invscale=1, default=None):
1419 return float(v) * invscale / scale
1424 def parse_duration(s):
1425 if not isinstance(s, compat_basestring):
1433 (?P<only_mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*|
1434 (?P<only_hours>[0-9.]+)\s*(?:hours?)|
1436 \s*(?P<hours_reversed>[0-9]+)\s*(?:[:h]|hours?)\s*(?P<mins_reversed>[0-9]+)\s*(?:[:m]|mins?\.?|minutes?)\s*|
1439 (?:(?P<days>[0-9]+)\s*(?:[:d]|days?)\s*)?
1440 (?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s*
1442 (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s*
1444 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)?
1449 if m.group('only_mins'):
1450 return float_or_none(m.group('only_mins'), invscale=60)
1451 if m.group('only_hours'):
1452 return float_or_none(m.group('only_hours'), invscale=60 * 60)
1454 res += int(m.group('secs'))
1455 if m.group('mins_reversed'):
1456 res += int(m.group('mins_reversed')) * 60
1458 res += int(m.group('mins')) * 60
1459 if m.group('hours'):
1460 res += int(m.group('hours')) * 60 * 60
1461 if m.group('hours_reversed'):
1462 res += int(m.group('hours_reversed')) * 60 * 60
1464 res += int(m.group('days')) * 24 * 60 * 60
1466 res += float(m.group('ms'))
1470 def prepend_extension(filename, ext, expected_real_ext=None):
1471 name, real_ext = os.path.splitext(filename)
1473 '{0}.{1}{2}'.format(name, ext, real_ext)
1474 if not expected_real_ext or real_ext[1:] == expected_real_ext
1475 else '{0}.{1}'.format(filename, ext))
1478 def replace_extension(filename, ext, expected_real_ext=None):
1479 name, real_ext = os.path.splitext(filename)
1480 return '{0}.{1}'.format(
1481 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
1485 def check_executable(exe, args=[]):
1486 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
1487 args can be a list of arguments for a short output (like -version) """
1489 subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
1495 def get_exe_version(exe, args=['--version'],
1496 version_re=None, unrecognized='present'):
1497 """ Returns the version of the specified executable,
1498 or False if the executable is not present """
1500 out, _ = subprocess.Popen(
1501 [encodeArgument(exe)] + args,
1502 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
1505 if isinstance(out, bytes): # Python 2.x
1506 out = out.decode('ascii', 'ignore')
1507 return detect_exe_version(out, version_re, unrecognized)
1510 def detect_exe_version(output, version_re=None, unrecognized='present'):
1511 assert isinstance(output, compat_str)
1512 if version_re is None:
1513 version_re = r'version\s+([-0-9._a-zA-Z]+)'
1514 m = re.search(version_re, output)
1521 class PagedList(object):
1523 # This is only useful for tests
1524 return len(self.getslice())
1527 class OnDemandPagedList(PagedList):
1528 def __init__(self, pagefunc, pagesize):
1529 self._pagefunc = pagefunc
1530 self._pagesize = pagesize
1532 def getslice(self, start=0, end=None):
1534 for pagenum in itertools.count(start // self._pagesize):
1535 firstid = pagenum * self._pagesize
1536 nextfirstid = pagenum * self._pagesize + self._pagesize
1537 if start >= nextfirstid:
1540 page_results = list(self._pagefunc(pagenum))
1543 start % self._pagesize
1544 if firstid <= start < nextfirstid
1548 ((end - 1) % self._pagesize) + 1
1549 if (end is not None and firstid <= end <= nextfirstid)
1552 if startv != 0 or endv is not None:
1553 page_results = page_results[startv:endv]
1554 res.extend(page_results)
1556 # A little optimization - if current page is not "full", ie. does
1557 # not contain page_size videos then we can assume that this page
1558 # is the last one - there are no more ids on further pages -
1559 # i.e. no need to query again.
1560 if len(page_results) + startv < self._pagesize:
1563 # If we got the whole page, but the next page is not interesting,
1564 # break out early as well
1565 if end == nextfirstid:
1570 class InAdvancePagedList(PagedList):
1571 def __init__(self, pagefunc, pagecount, pagesize):
1572 self._pagefunc = pagefunc
1573 self._pagecount = pagecount
1574 self._pagesize = pagesize
1576 def getslice(self, start=0, end=None):
1578 start_page = start // self._pagesize
1580 self._pagecount if end is None else (end // self._pagesize + 1))
1581 skip_elems = start - start_page * self._pagesize
1582 only_more = None if end is None else end - start
1583 for pagenum in range(start_page, end_page):
1584 page = list(self._pagefunc(pagenum))
1586 page = page[skip_elems:]
1588 if only_more is not None:
1589 if len(page) < only_more:
1590 only_more -= len(page)
1592 page = page[:only_more]
1599 def uppercase_escape(s):
1600 unicode_escape = codecs.getdecoder('unicode_escape')
1602 r'\\U[0-9a-fA-F]{8}',
1603 lambda m: unicode_escape(m.group(0))[0],
1607 def lowercase_escape(s):
1608 unicode_escape = codecs.getdecoder('unicode_escape')
1610 r'\\u[0-9a-fA-F]{4}',
1611 lambda m: unicode_escape(m.group(0))[0],
1615 def escape_rfc3986(s):
1616 """Escape non-ASCII characters as suggested by RFC 3986"""
1617 if sys.version_info < (3, 0) and isinstance(s, compat_str):
1618 s = s.encode('utf-8')
1619 return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
1622 def escape_url(url):
1623 """Escape URL as suggested by RFC 3986"""
1624 url_parsed = compat_urllib_parse_urlparse(url)
1625 return url_parsed._replace(
1626 path=escape_rfc3986(url_parsed.path),
1627 params=escape_rfc3986(url_parsed.params),
1628 query=escape_rfc3986(url_parsed.query),
1629 fragment=escape_rfc3986(url_parsed.fragment)
1633 struct.pack('!I', 0)
1635 # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument
1636 def struct_pack(spec, *args):
1637 if isinstance(spec, compat_str):
1638 spec = spec.encode('ascii')
1639 return struct.pack(spec, *args)
1641 def struct_unpack(spec, *args):
1642 if isinstance(spec, compat_str):
1643 spec = spec.encode('ascii')
1644 return struct.unpack(spec, *args)
1646 struct_pack = struct.pack
1647 struct_unpack = struct.unpack
1650 def read_batch_urls(batch_fd):
1652 if not isinstance(url, compat_str):
1653 url = url.decode('utf-8', 'replace')
1654 BOM_UTF8 = '\xef\xbb\xbf'
1655 if url.startswith(BOM_UTF8):
1656 url = url[len(BOM_UTF8):]
1658 if url.startswith(('#', ';', ']')):
1662 with contextlib.closing(batch_fd) as fd:
1663 return [url for url in map(fixup, fd) if url]
1666 def urlencode_postdata(*args, **kargs):
1667 return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii')
1670 def encode_dict(d, encoding='utf-8'):
1671 return dict((k.encode(encoding), v.encode(encoding)) for k, v in d.items())
1683 def parse_age_limit(s):
1686 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
1687 return int(m.group('age')) if m else US_RATINGS.get(s, None)
1690 def strip_jsonp(code):
1692 r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code)
1695 def js_to_json(code):
1698 if v in ('true', 'false', 'null'):
1700 if v.startswith('"'):
1701 v = re.sub(r"\\'", "'", v[1:-1])
1702 elif v.startswith("'"):
1704 v = re.sub(r"\\\\|\\'|\"", lambda m: {
1711 res = re.sub(r'''(?x)
1712 "(?:[^"\\]*(?:\\\\|\\['"nu]))*[^"\\]*"|
1713 '(?:[^'\\]*(?:\\\\|\\['"nu]))*[^'\\]*'|
1714 [a-zA-Z_][.a-zA-Z_0-9]*
1716 res = re.sub(r',(\s*[\]}])', lambda m: m.group(1), res)
1720 def qualities(quality_ids):
1721 """ Get a numeric quality value out of a list of possible values """
1724 return quality_ids.index(qid)
1730 DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
1733 def limit_length(s, length):
1734 """ Add ellipses to overly long strings """
1739 return s[:length - len(ELLIPSES)] + ELLIPSES
1743 def version_tuple(v):
1744 return tuple(int(e) for e in re.split(r'[-.]', v))
1747 def is_outdated_version(version, limit, assume_new=True):
1749 return not assume_new
1751 return version_tuple(version) < version_tuple(limit)
1753 return not assume_new
1756 def ytdl_is_updateable():
1757 """ Returns if youtube-dl can be updated with -U """
1758 from zipimport import zipimporter
1760 return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
1763 def args_to_str(args):
1764 # Get a short string representation for a subprocess command
1765 return ' '.join(shlex_quote(a) for a in args)
1768 def mimetype2ext(mt):
1769 _, _, res = mt.rpartition('/')
1773 'x-mp4-fragmented': 'mp4',
1778 def urlhandle_detect_ext(url_handle):
1781 getheader = lambda h: url_handle.headers[h]
1782 except AttributeError: # Python < 3
1783 getheader = url_handle.info().getheader
1785 cd = getheader('Content-Disposition')
1787 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
1789 e = determine_ext(m.group('filename'), default_ext=None)
1793 return mimetype2ext(getheader('Content-Type'))
1796 def encode_data_uri(data, mime_type):
1797 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
1800 def age_restricted(content_limit, age_limit):
1801 """ Returns True iff the content should be blocked """
1803 if age_limit is None: # No limit set
1805 if content_limit is None:
1806 return False # Content available for everyone
1807 return age_limit < content_limit
1810 def is_html(first_bytes):
1811 """ Detect whether a file contains HTML by examining its first bytes. """
1814 (b'\xef\xbb\xbf', 'utf-8'),
1815 (b'\x00\x00\xfe\xff', 'utf-32-be'),
1816 (b'\xff\xfe\x00\x00', 'utf-32-le'),
1817 (b'\xff\xfe', 'utf-16-le'),
1818 (b'\xfe\xff', 'utf-16-be'),
1820 for bom, enc in BOMS:
1821 if first_bytes.startswith(bom):
1822 s = first_bytes[len(bom):].decode(enc, 'replace')
1825 s = first_bytes.decode('utf-8', 'replace')
1827 return re.match(r'^\s*<', s)
1830 def determine_protocol(info_dict):
1831 protocol = info_dict.get('protocol')
1832 if protocol is not None:
1835 url = info_dict['url']
1836 if url.startswith('rtmp'):
1838 elif url.startswith('mms'):
1840 elif url.startswith('rtsp'):
1843 ext = determine_ext(url)
1849 return compat_urllib_parse_urlparse(url).scheme
1852 def render_table(header_row, data):
1853 """ Render a list of rows, each as a list of values """
1854 table = [header_row] + data
1855 max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
1856 format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
1857 return '\n'.join(format_str % tuple(row) for row in table)
1860 def _match_one(filter_part, dct):
1861 COMPARISON_OPERATORS = {
1869 operator_rex = re.compile(r'''(?x)\s*
1871 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1873 (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
1874 (?P<strval>(?![0-9.])[a-z0-9A-Z]*)
1877 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
1878 m = operator_rex.search(filter_part)
1880 op = COMPARISON_OPERATORS[m.group('op')]
1881 if m.group('strval') is not None:
1882 if m.group('op') not in ('=', '!='):
1884 'Operator %s does not support string values!' % m.group('op'))
1885 comparison_value = m.group('strval')
1888 comparison_value = int(m.group('intval'))
1890 comparison_value = parse_filesize(m.group('intval'))
1891 if comparison_value is None:
1892 comparison_value = parse_filesize(m.group('intval') + 'B')
1893 if comparison_value is None:
1895 'Invalid integer value %r in filter part %r' % (
1896 m.group('intval'), filter_part))
1897 actual_value = dct.get(m.group('key'))
1898 if actual_value is None:
1899 return m.group('none_inclusive')
1900 return op(actual_value, comparison_value)
1903 '': lambda v: v is not None,
1904 '!': lambda v: v is None,
1906 operator_rex = re.compile(r'''(?x)\s*
1907 (?P<op>%s)\s*(?P<key>[a-z_]+)
1909 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
1910 m = operator_rex.search(filter_part)
1912 op = UNARY_OPERATORS[m.group('op')]
1913 actual_value = dct.get(m.group('key'))
1914 return op(actual_value)
1916 raise ValueError('Invalid filter part %r' % filter_part)
1919 def match_str(filter_str, dct):
1920 """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
1923 _match_one(filter_part, dct) for filter_part in filter_str.split('&'))
1926 def match_filter_func(filter_str):
1927 def _match_func(info_dict):
1928 if match_str(filter_str, info_dict):
1931 video_title = info_dict.get('title', info_dict.get('id', 'video'))
1932 return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
1936 def parse_dfxp_time_expr(time_expr):
1940 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
1942 return float(mobj.group('time_offset'))
1944 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:\.\d+)?)$', time_expr)
1946 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3))
1949 def srt_subtitles_timecode(seconds):
1950 return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
1953 def dfxp2srt(dfxp_data):
1954 _x = functools.partial(xpath_with_ns, ns_map={
1955 'ttml': 'http://www.w3.org/ns/ttml',
1956 'ttaf1': 'http://www.w3.org/2006/10/ttaf1',
1959 def parse_node(node):
1960 str_or_empty = functools.partial(str_or_none, default='')
1962 out = str_or_empty(node.text)
1965 if child.tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'):
1966 out += '\n' + str_or_empty(child.tail)
1967 elif child.tag in (_x('ttml:span'), _x('ttaf1:span'), 'span'):
1968 out += str_or_empty(parse_node(child))
1970 out += str_or_empty(xml.etree.ElementTree.tostring(child))
1974 dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8'))
1976 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p')
1979 raise ValueError('Invalid dfxp/TTML subtitle')
1981 for para, index in zip(paras, itertools.count(1)):
1982 begin_time = parse_dfxp_time_expr(para.attrib['begin'])
1983 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
1985 end_time = begin_time + parse_dfxp_time_expr(para.attrib['dur'])
1986 out.append('%d\n%s --> %s\n%s\n\n' % (
1988 srt_subtitles_timecode(begin_time),
1989 srt_subtitles_timecode(end_time),
1995 def cli_option(params, command_option, param):
1996 param = params.get(param)
1997 return [command_option, param] if param is not None else []
2000 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
2001 param = params.get(param)
2002 assert isinstance(param, bool)
2004 return [command_option + separator + (true_value if param else false_value)]
2005 return [command_option, true_value if param else false_value]
2008 def cli_valueless_option(params, command_option, param, expected_value=True):
2009 param = params.get(param)
2010 return [command_option] if param == expected_value else []
2013 def cli_configuration_args(params, param, default=[]):
2014 ex_args = params.get(param)
2017 assert isinstance(ex_args, list)
2021 class ISO639Utils(object):
2022 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
2211 def short2long(cls, code):
2212 """Convert language code from ISO 639-1 to ISO 639-2/T"""
2213 return cls._lang_map.get(code[:2])
2216 def long2short(cls, code):
2217 """Convert language code from ISO 639-2/T to ISO 639-1"""
2218 for short_name, long_name in cls._lang_map.items():
2219 if long_name == code:
2223 class ISO3166Utils(object):
2224 # From http://data.okfn.org/data/core/country-list
2226 'AF': 'Afghanistan',
2227 'AX': 'Ã…land Islands',
2230 'AS': 'American Samoa',
2235 'AG': 'Antigua and Barbuda',
2252 'BO': 'Bolivia, Plurinational State of',
2253 'BQ': 'Bonaire, Sint Eustatius and Saba',
2254 'BA': 'Bosnia and Herzegovina',
2256 'BV': 'Bouvet Island',
2258 'IO': 'British Indian Ocean Territory',
2259 'BN': 'Brunei Darussalam',
2261 'BF': 'Burkina Faso',
2267 'KY': 'Cayman Islands',
2268 'CF': 'Central African Republic',
2272 'CX': 'Christmas Island',
2273 'CC': 'Cocos (Keeling) Islands',
2277 'CD': 'Congo, the Democratic Republic of the',
2278 'CK': 'Cook Islands',
2280 'CI': 'Côte d\'Ivoire',
2285 'CZ': 'Czech Republic',
2289 'DO': 'Dominican Republic',
2292 'SV': 'El Salvador',
2293 'GQ': 'Equatorial Guinea',
2297 'FK': 'Falkland Islands (Malvinas)',
2298 'FO': 'Faroe Islands',
2302 'GF': 'French Guiana',
2303 'PF': 'French Polynesia',
2304 'TF': 'French Southern Territories',
2319 'GW': 'Guinea-Bissau',
2322 'HM': 'Heard Island and McDonald Islands',
2323 'VA': 'Holy See (Vatican City State)',
2330 'IR': 'Iran, Islamic Republic of',
2333 'IM': 'Isle of Man',
2343 'KP': 'Korea, Democratic People\'s Republic of',
2344 'KR': 'Korea, Republic of',
2347 'LA': 'Lao People\'s Democratic Republic',
2353 'LI': 'Liechtenstein',
2357 'MK': 'Macedonia, the Former Yugoslav Republic of',
2364 'MH': 'Marshall Islands',
2370 'FM': 'Micronesia, Federated States of',
2371 'MD': 'Moldova, Republic of',
2382 'NL': 'Netherlands',
2383 'NC': 'New Caledonia',
2384 'NZ': 'New Zealand',
2389 'NF': 'Norfolk Island',
2390 'MP': 'Northern Mariana Islands',
2395 'PS': 'Palestine, State of',
2397 'PG': 'Papua New Guinea',
2400 'PH': 'Philippines',
2404 'PR': 'Puerto Rico',
2408 'RU': 'Russian Federation',
2410 'BL': 'Saint Barthélemy',
2411 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
2412 'KN': 'Saint Kitts and Nevis',
2413 'LC': 'Saint Lucia',
2414 'MF': 'Saint Martin (French part)',
2415 'PM': 'Saint Pierre and Miquelon',
2416 'VC': 'Saint Vincent and the Grenadines',
2419 'ST': 'Sao Tome and Principe',
2420 'SA': 'Saudi Arabia',
2424 'SL': 'Sierra Leone',
2426 'SX': 'Sint Maarten (Dutch part)',
2429 'SB': 'Solomon Islands',
2431 'ZA': 'South Africa',
2432 'GS': 'South Georgia and the South Sandwich Islands',
2433 'SS': 'South Sudan',
2438 'SJ': 'Svalbard and Jan Mayen',
2441 'CH': 'Switzerland',
2442 'SY': 'Syrian Arab Republic',
2443 'TW': 'Taiwan, Province of China',
2445 'TZ': 'Tanzania, United Republic of',
2447 'TL': 'Timor-Leste',
2451 'TT': 'Trinidad and Tobago',
2454 'TM': 'Turkmenistan',
2455 'TC': 'Turks and Caicos Islands',
2459 'AE': 'United Arab Emirates',
2460 'GB': 'United Kingdom',
2461 'US': 'United States',
2462 'UM': 'United States Minor Outlying Islands',
2466 'VE': 'Venezuela, Bolivarian Republic of',
2468 'VG': 'Virgin Islands, British',
2469 'VI': 'Virgin Islands, U.S.',
2470 'WF': 'Wallis and Futuna',
2471 'EH': 'Western Sahara',
2478 def short2full(cls, code):
2479 """Convert an ISO 3166-2 country code to the corresponding full name"""
2480 return cls._country_map.get(code.upper())
2483 class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
2484 def __init__(self, proxies=None):
2485 # Set default handlers
2486 for type in ('http', 'https'):
2487 setattr(self, '%s_open' % type,
2488 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
2489 meth(r, proxy, type))
2490 return compat_urllib_request.ProxyHandler.__init__(self, proxies)
2492 def proxy_open(self, req, proxy, type):
2493 req_proxy = req.headers.get('Ytdl-request-proxy')
2494 if req_proxy is not None:
2496 del req.headers['Ytdl-request-proxy']
2498 if proxy == '__noproxy__':
2499 return None # No Proxy
2500 return compat_urllib_request.ProxyHandler.proxy_open(
2501 self, req, proxy, type)