Merge branch 'pr/#10268' of https://github.com/lkho/youtube-dl into lkho-pr/#10268
[youtube-dl] / youtube_dl / YoutubeDL.py
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from __future__ import absolute_import, unicode_literals
5
6 import collections
7 import contextlib
8 import copy
9 import datetime
10 import errno
11 import fileinput
12 import io
13 import itertools
14 import json
15 import locale
16 import operator
17 import os
18 import platform
19 import re
20 import shutil
21 import subprocess
22 import socket
23 import sys
24 import time
25 import tokenize
26 import traceback
27
28 from .compat import (
29     compat_basestring,
30     compat_cookiejar,
31     compat_expanduser,
32     compat_get_terminal_size,
33     compat_http_client,
34     compat_kwargs,
35     compat_os_name,
36     compat_str,
37     compat_tokenize_tokenize,
38     compat_urllib_error,
39     compat_urllib_request,
40     compat_urllib_request_DataHandler,
41 )
42 from .utils import (
43     age_restricted,
44     args_to_str,
45     ContentTooShortError,
46     date_from_str,
47     DateRange,
48     DEFAULT_OUTTMPL,
49     determine_ext,
50     determine_protocol,
51     DownloadError,
52     encode_compat_str,
53     encodeFilename,
54     error_to_compat_str,
55     ExtractorError,
56     format_bytes,
57     formatSeconds,
58     locked_file,
59     make_HTTPS_handler,
60     MaxDownloadsReached,
61     PagedList,
62     parse_filesize,
63     PerRequestProxyHandler,
64     platform_name,
65     PostProcessingError,
66     preferredencoding,
67     prepend_extension,
68     register_socks_protocols,
69     render_table,
70     replace_extension,
71     SameFileError,
72     sanitize_filename,
73     sanitize_path,
74     sanitize_url,
75     sanitized_Request,
76     std_headers,
77     subtitles_filename,
78     UnavailableVideoError,
79     url_basename,
80     version_tuple,
81     write_json_file,
82     write_string,
83     YoutubeDLCookieProcessor,
84     YoutubeDLHandler,
85 )
86 from .cache import Cache
87 from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER
88 from .downloader import get_suitable_downloader
89 from .downloader.rtmp import rtmpdump_version
90 from .postprocessor import (
91     FFmpegFixupM3u8PP,
92     FFmpegFixupM4aPP,
93     FFmpegFixupStretchedPP,
94     FFmpegMergerPP,
95     FFmpegPostProcessor,
96     get_postprocessor,
97 )
98 from .version import __version__
99
100 if compat_os_name == 'nt':
101     import ctypes
102
103
104 class YoutubeDL(object):
105     """YoutubeDL class.
106
107     YoutubeDL objects are the ones responsible of downloading the
108     actual video file and writing it to disk if the user has requested
109     it, among some other tasks. In most cases there should be one per
110     program. As, given a video URL, the downloader doesn't know how to
111     extract all the needed information, task that InfoExtractors do, it
112     has to pass the URL to one of them.
113
114     For this, YoutubeDL objects have a method that allows
115     InfoExtractors to be registered in a given order. When it is passed
116     a URL, the YoutubeDL object handles it to the first InfoExtractor it
117     finds that reports being able to handle it. The InfoExtractor extracts
118     all the information about the video or videos the URL refers to, and
119     YoutubeDL process the extracted information, possibly using a File
120     Downloader to download the video.
121
122     YoutubeDL objects accept a lot of parameters. In order not to saturate
123     the object constructor with arguments, it receives a dictionary of
124     options instead. These options are available through the params
125     attribute for the InfoExtractors to use. The YoutubeDL also
126     registers itself as the downloader in charge for the InfoExtractors
127     that are added to it, so this is a "mutual registration".
128
129     Available options:
130
131     username:          Username for authentication purposes.
132     password:          Password for authentication purposes.
133     videopassword:     Password for accessing a video.
134     usenetrc:          Use netrc for authentication instead.
135     verbose:           Print additional info to stdout.
136     quiet:             Do not print messages to stdout.
137     no_warnings:       Do not print out anything for warnings.
138     forceurl:          Force printing final URL.
139     forcetitle:        Force printing title.
140     forceid:           Force printing ID.
141     forcethumbnail:    Force printing thumbnail URL.
142     forcedescription:  Force printing description.
143     forcefilename:     Force printing final filename.
144     forceduration:     Force printing duration.
145     forcejson:         Force printing info_dict as JSON.
146     dump_single_json:  Force printing the info_dict of the whole playlist
147                        (or video) as a single JSON line.
148     simulate:          Do not download the video files.
149     format:            Video format code. See options.py for more information.
150     outtmpl:           Template for output names.
151     restrictfilenames: Do not allow "&" and spaces in file names
152     ignoreerrors:      Do not stop on download errors.
153     force_generic_extractor: Force downloader to use the generic extractor
154     nooverwrites:      Prevent overwriting files.
155     playliststart:     Playlist item to start at.
156     playlistend:       Playlist item to end at.
157     playlist_items:    Specific indices of playlist to download.
158     playlistreverse:   Download playlist items in reverse order.
159     matchtitle:        Download only matching titles.
160     rejecttitle:       Reject downloads for matching titles.
161     logger:            Log messages to a logging.Logger instance.
162     logtostderr:       Log messages to stderr instead of stdout.
163     writedescription:  Write the video description to a .description file
164     writeinfojson:     Write the video description to a .info.json file
165     writeannotations:  Write the video annotations to a .annotations.xml file
166     writethumbnail:    Write the thumbnail image to a file
167     write_all_thumbnails:  Write all thumbnail formats to files
168     writesubtitles:    Write the video subtitles to a file
169     writeautomaticsub: Write the automatically generated subtitles to a file
170     allsubtitles:      Downloads all the subtitles of the video
171                        (requires writesubtitles or writeautomaticsub)
172     listsubtitles:     Lists all available subtitles for the video
173     subtitlesformat:   The format code for subtitles
174     subtitleslangs:    List of languages of the subtitles to download
175     keepvideo:         Keep the video file after post-processing
176     daterange:         A DateRange object, download only if the upload_date is in the range.
177     skip_download:     Skip the actual download of the video file
178     cachedir:          Location of the cache files in the filesystem.
179                        False to disable filesystem cache.
180     noplaylist:        Download single video instead of a playlist if in doubt.
181     age_limit:         An integer representing the user's age in years.
182                        Unsuitable videos for the given age are skipped.
183     min_views:         An integer representing the minimum view count the video
184                        must have in order to not be skipped.
185                        Videos without view count information are always
186                        downloaded. None for no limit.
187     max_views:         An integer representing the maximum view count.
188                        Videos that are more popular than that are not
189                        downloaded.
190                        Videos without view count information are always
191                        downloaded. None for no limit.
192     download_archive:  File name of a file where all downloads are recorded.
193                        Videos already present in the file are not downloaded
194                        again.
195     cookiefile:        File name where cookies should be read from and dumped to.
196     nocheckcertificate:Do not verify SSL certificates
197     prefer_insecure:   Use HTTP instead of HTTPS to retrieve information.
198                        At the moment, this is only supported by YouTube.
199     proxy:             URL of the proxy server to use
200     geo_verification_proxy:  URL of the proxy to use for IP address verification
201                        on geo-restricted sites. (Experimental)
202     socket_timeout:    Time to wait for unresponsive hosts, in seconds
203     bidi_workaround:   Work around buggy terminals without bidirectional text
204                        support, using fridibi
205     debug_printtraffic:Print out sent and received HTTP traffic
206     include_ads:       Download ads as well
207     default_search:    Prepend this string if an input url is not valid.
208                        'auto' for elaborate guessing
209     encoding:          Use this encoding instead of the system-specified.
210     extract_flat:      Do not resolve URLs, return the immediate result.
211                        Pass in 'in_playlist' to only show this behavior for
212                        playlist items.
213     postprocessors:    A list of dictionaries, each with an entry
214                        * key:  The name of the postprocessor. See
215                                youtube_dl/postprocessor/__init__.py for a list.
216                        as well as any further keyword arguments for the
217                        postprocessor.
218     progress_hooks:    A list of functions that get called on download
219                        progress, with a dictionary with the entries
220                        * status: One of "downloading", "error", or "finished".
221                                  Check this first and ignore unknown values.
222
223                        If status is one of "downloading", or "finished", the
224                        following properties may also be present:
225                        * filename: The final filename (always present)
226                        * tmpfilename: The filename we're currently writing to
227                        * downloaded_bytes: Bytes on disk
228                        * total_bytes: Size of the whole file, None if unknown
229                        * total_bytes_estimate: Guess of the eventual file size,
230                                                None if unavailable.
231                        * elapsed: The number of seconds since download started.
232                        * eta: The estimated time in seconds, None if unknown
233                        * speed: The download speed in bytes/second, None if
234                                 unknown
235                        * fragment_index: The counter of the currently
236                                          downloaded video fragment.
237                        * fragment_count: The number of fragments (= individual
238                                          files that will be merged)
239
240                        Progress hooks are guaranteed to be called at least once
241                        (with status "finished") if the download is successful.
242     merge_output_format: Extension to use when merging formats.
243     fixup:             Automatically correct known faults of the file.
244                        One of:
245                        - "never": do nothing
246                        - "warn": only emit a warning
247                        - "detect_or_warn": check whether we can do anything
248                                            about it, warn otherwise (default)
249     source_address:    (Experimental) Client-side IP address to bind to.
250     call_home:         Boolean, true iff we are allowed to contact the
251                        youtube-dl servers for debugging.
252     sleep_interval:    Number of seconds to sleep before each download when
253                        used alone or a lower bound of a range for randomized
254                        sleep before each download (minimum possible number
255                        of seconds to sleep) when used along with
256                        max_sleep_interval.
257     max_sleep_interval:Upper bound of a range for randomized sleep before each
258                        download (maximum possible number of seconds to sleep).
259                        Must only be used along with sleep_interval.
260                        Actual sleep time will be a random float from range
261                        [sleep_interval; max_sleep_interval].
262     listformats:       Print an overview of available video formats and exit.
263     list_thumbnails:   Print a table of all thumbnails and exit.
264     match_filter:      A function that gets called with the info_dict of
265                        every video.
266                        If it returns a message, the video is ignored.
267                        If it returns None, the video is downloaded.
268                        match_filter_func in utils.py is one example for this.
269     no_color:          Do not emit color codes in output.
270
271     The following options determine which downloader is picked:
272     external_downloader: Executable of the external downloader to call.
273                        None or unset for standard (built-in) downloader.
274     hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv
275                        if True, otherwise use ffmpeg/avconv if False, otherwise
276                        use downloader suggested by extractor if None.
277
278     The following parameters are not used by YoutubeDL itself, they are used by
279     the downloader (see youtube_dl/downloader/common.py):
280     nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
281     noresizebuffer, retries, continuedl, noprogress, consoletitle,
282     xattr_set_filesize, external_downloader_args, hls_use_mpegts.
283
284     The following options are used by the post processors:
285     prefer_ffmpeg:     If True, use ffmpeg instead of avconv if both are available,
286                        otherwise prefer avconv.
287     postprocessor_args: A list of additional command-line arguments for the
288                         postprocessor.
289     """
290
291     params = None
292     _ies = []
293     _pps = []
294     _download_retcode = None
295     _num_downloads = None
296     _screen_file = None
297
298     def __init__(self, params=None, auto_init=True):
299         """Create a FileDownloader object with the given options."""
300         if params is None:
301             params = {}
302         self._ies = []
303         self._ies_instances = {}
304         self._pps = []
305         self._progress_hooks = []
306         self._download_retcode = 0
307         self._num_downloads = 0
308         self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
309         self._err_file = sys.stderr
310         self.params = {
311             # Default parameters
312             'nocheckcertificate': False,
313         }
314         self.params.update(params)
315         self.cache = Cache(self)
316
317         if self.params.get('cn_verification_proxy') is not None:
318             self.report_warning('--cn-verification-proxy is deprecated. Use --geo-verification-proxy instead.')
319             if self.params.get('geo_verification_proxy') is None:
320                 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
321
322         if params.get('bidi_workaround', False):
323             try:
324                 import pty
325                 master, slave = pty.openpty()
326                 width = compat_get_terminal_size().columns
327                 if width is None:
328                     width_args = []
329                 else:
330                     width_args = ['-w', str(width)]
331                 sp_kwargs = dict(
332                     stdin=subprocess.PIPE,
333                     stdout=slave,
334                     stderr=self._err_file)
335                 try:
336                     self._output_process = subprocess.Popen(
337                         ['bidiv'] + width_args, **sp_kwargs
338                     )
339                 except OSError:
340                     self._output_process = subprocess.Popen(
341                         ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
342                 self._output_channel = os.fdopen(master, 'rb')
343             except OSError as ose:
344                 if ose.errno == errno.ENOENT:
345                     self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that  fribidi  is an executable file in one of the directories in your $PATH.')
346                 else:
347                     raise
348
349         if (sys.version_info >= (3,) and sys.platform != 'win32' and
350                 sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and
351                 not params.get('restrictfilenames', False)):
352             # On Python 3, the Unicode filesystem API will throw errors (#1474)
353             self.report_warning(
354                 'Assuming --restrict-filenames since file system encoding '
355                 'cannot encode all characters. '
356                 'Set the LC_ALL environment variable to fix this.')
357             self.params['restrictfilenames'] = True
358
359         if isinstance(params.get('outtmpl'), bytes):
360             self.report_warning(
361                 'Parameter outtmpl is bytes, but should be a unicode string. '
362                 'Put  from __future__ import unicode_literals  at the top of your code file or consider switching to Python 3.x.')
363
364         self._setup_opener()
365
366         if auto_init:
367             self.print_debug_header()
368             self.add_default_info_extractors()
369
370         for pp_def_raw in self.params.get('postprocessors', []):
371             pp_class = get_postprocessor(pp_def_raw['key'])
372             pp_def = dict(pp_def_raw)
373             del pp_def['key']
374             pp = pp_class(self, **compat_kwargs(pp_def))
375             self.add_post_processor(pp)
376
377         for ph in self.params.get('progress_hooks', []):
378             self.add_progress_hook(ph)
379
380         register_socks_protocols()
381
382     def warn_if_short_id(self, argv):
383         # short YouTube ID starting with dash?
384         idxs = [
385             i for i, a in enumerate(argv)
386             if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
387         if idxs:
388             correct_argv = (
389                 ['youtube-dl'] +
390                 [a for i, a in enumerate(argv) if i not in idxs] +
391                 ['--'] + [argv[i] for i in idxs]
392             )
393             self.report_warning(
394                 'Long argument string detected. '
395                 'Use -- to separate parameters and URLs, like this:\n%s\n' %
396                 args_to_str(correct_argv))
397
398     def add_info_extractor(self, ie):
399         """Add an InfoExtractor object to the end of the list."""
400         self._ies.append(ie)
401         if not isinstance(ie, type):
402             self._ies_instances[ie.ie_key()] = ie
403             ie.set_downloader(self)
404
405     def get_info_extractor(self, ie_key):
406         """
407         Get an instance of an IE with name ie_key, it will try to get one from
408         the _ies list, if there's no instance it will create a new one and add
409         it to the extractor list.
410         """
411         ie = self._ies_instances.get(ie_key)
412         if ie is None:
413             ie = get_info_extractor(ie_key)()
414             self.add_info_extractor(ie)
415         return ie
416
417     def add_default_info_extractors(self):
418         """
419         Add the InfoExtractors returned by gen_extractors to the end of the list
420         """
421         for ie in gen_extractor_classes():
422             self.add_info_extractor(ie)
423
424     def add_post_processor(self, pp):
425         """Add a PostProcessor object to the end of the chain."""
426         self._pps.append(pp)
427         pp.set_downloader(self)
428
429     def add_progress_hook(self, ph):
430         """Add the progress hook (currently only for the file downloader)"""
431         self._progress_hooks.append(ph)
432
433     def _bidi_workaround(self, message):
434         if not hasattr(self, '_output_channel'):
435             return message
436
437         assert hasattr(self, '_output_process')
438         assert isinstance(message, compat_str)
439         line_count = message.count('\n') + 1
440         self._output_process.stdin.write((message + '\n').encode('utf-8'))
441         self._output_process.stdin.flush()
442         res = ''.join(self._output_channel.readline().decode('utf-8')
443                       for _ in range(line_count))
444         return res[:-len('\n')]
445
446     def to_screen(self, message, skip_eol=False):
447         """Print message to stdout if not in quiet mode."""
448         return self.to_stdout(message, skip_eol, check_quiet=True)
449
450     def _write_string(self, s, out=None):
451         write_string(s, out=out, encoding=self.params.get('encoding'))
452
453     def to_stdout(self, message, skip_eol=False, check_quiet=False):
454         """Print message to stdout if not in quiet mode."""
455         if self.params.get('logger'):
456             self.params['logger'].debug(message)
457         elif not check_quiet or not self.params.get('quiet', False):
458             message = self._bidi_workaround(message)
459             terminator = ['\n', ''][skip_eol]
460             output = message + terminator
461
462             self._write_string(output, self._screen_file)
463
464     def to_stderr(self, message):
465         """Print message to stderr."""
466         assert isinstance(message, compat_str)
467         if self.params.get('logger'):
468             self.params['logger'].error(message)
469         else:
470             message = self._bidi_workaround(message)
471             output = message + '\n'
472             self._write_string(output, self._err_file)
473
474     def to_console_title(self, message):
475         if not self.params.get('consoletitle', False):
476             return
477         if compat_os_name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
478             # c_wchar_p() might not be necessary if `message` is
479             # already of type unicode()
480             ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
481         elif 'TERM' in os.environ:
482             self._write_string('\033]0;%s\007' % message, self._screen_file)
483
484     def save_console_title(self):
485         if not self.params.get('consoletitle', False):
486             return
487         if 'TERM' in os.environ:
488             # Save the title on stack
489             self._write_string('\033[22;0t', self._screen_file)
490
491     def restore_console_title(self):
492         if not self.params.get('consoletitle', False):
493             return
494         if 'TERM' in os.environ:
495             # Restore the title from stack
496             self._write_string('\033[23;0t', self._screen_file)
497
498     def __enter__(self):
499         self.save_console_title()
500         return self
501
502     def __exit__(self, *args):
503         self.restore_console_title()
504
505         if self.params.get('cookiefile') is not None:
506             self.cookiejar.save()
507
508     def trouble(self, message=None, tb=None):
509         """Determine action to take when a download problem appears.
510
511         Depending on if the downloader has been configured to ignore
512         download errors or not, this method may throw an exception or
513         not when errors are found, after printing the message.
514
515         tb, if given, is additional traceback information.
516         """
517         if message is not None:
518             self.to_stderr(message)
519         if self.params.get('verbose'):
520             if tb is None:
521                 if sys.exc_info()[0]:  # if .trouble has been called from an except block
522                     tb = ''
523                     if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
524                         tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
525                     tb += encode_compat_str(traceback.format_exc())
526                 else:
527                     tb_data = traceback.format_list(traceback.extract_stack())
528                     tb = ''.join(tb_data)
529             self.to_stderr(tb)
530         if not self.params.get('ignoreerrors', False):
531             if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
532                 exc_info = sys.exc_info()[1].exc_info
533             else:
534                 exc_info = sys.exc_info()
535             raise DownloadError(message, exc_info)
536         self._download_retcode = 1
537
538     def report_warning(self, message):
539         '''
540         Print the message to stderr, it will be prefixed with 'WARNING:'
541         If stderr is a tty file the 'WARNING:' will be colored
542         '''
543         if self.params.get('logger') is not None:
544             self.params['logger'].warning(message)
545         else:
546             if self.params.get('no_warnings'):
547                 return
548             if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
549                 _msg_header = '\033[0;33mWARNING:\033[0m'
550             else:
551                 _msg_header = 'WARNING:'
552             warning_message = '%s %s' % (_msg_header, message)
553             self.to_stderr(warning_message)
554
555     def report_error(self, message, tb=None):
556         '''
557         Do the same as trouble, but prefixes the message with 'ERROR:', colored
558         in red if stderr is a tty file.
559         '''
560         if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
561             _msg_header = '\033[0;31mERROR:\033[0m'
562         else:
563             _msg_header = 'ERROR:'
564         error_message = '%s %s' % (_msg_header, message)
565         self.trouble(error_message, tb)
566
567     def report_file_already_downloaded(self, file_name):
568         """Report file has already been fully downloaded."""
569         try:
570             self.to_screen('[download] %s has already been downloaded' % file_name)
571         except UnicodeEncodeError:
572             self.to_screen('[download] The file has already been downloaded')
573
574     def prepare_filename(self, info_dict):
575         """Generate the output filename."""
576         try:
577             template_dict = dict(info_dict)
578
579             template_dict['epoch'] = int(time.time())
580             autonumber_size = self.params.get('autonumber_size')
581             if autonumber_size is None:
582                 autonumber_size = 5
583             autonumber_templ = '%0' + str(autonumber_size) + 'd'
584             template_dict['autonumber'] = autonumber_templ % self._num_downloads
585             if template_dict.get('playlist_index') is not None:
586                 template_dict['playlist_index'] = '%0*d' % (len(str(template_dict['n_entries'])), template_dict['playlist_index'])
587             if template_dict.get('resolution') is None:
588                 if template_dict.get('width') and template_dict.get('height'):
589                     template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
590                 elif template_dict.get('height'):
591                     template_dict['resolution'] = '%sp' % template_dict['height']
592                 elif template_dict.get('width'):
593                     template_dict['resolution'] = '%dx?' % template_dict['width']
594
595             sanitize = lambda k, v: sanitize_filename(
596                 compat_str(v),
597                 restricted=self.params.get('restrictfilenames'),
598                 is_id=(k == 'id'))
599             template_dict = dict((k, sanitize(k, v))
600                                  for k, v in template_dict.items()
601                                  if v is not None and not isinstance(v, (list, tuple, dict)))
602             template_dict = collections.defaultdict(lambda: 'NA', template_dict)
603
604             outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
605             tmpl = compat_expanduser(outtmpl)
606             filename = tmpl % template_dict
607             # Temporary fix for #4787
608             # 'Treat' all problem characters by passing filename through preferredencoding
609             # to workaround encoding issues with subprocess on python2 @ Windows
610             if sys.version_info < (3, 0) and sys.platform == 'win32':
611                 filename = encodeFilename(filename, True).decode(preferredencoding())
612             return sanitize_path(filename)
613         except ValueError as err:
614             self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
615             return None
616
617     def _match_entry(self, info_dict, incomplete):
618         """ Returns None iff the file should be downloaded """
619
620         video_title = info_dict.get('title', info_dict.get('id', 'video'))
621         if 'title' in info_dict:
622             # This can happen when we're just evaluating the playlist
623             title = info_dict['title']
624             matchtitle = self.params.get('matchtitle', False)
625             if matchtitle:
626                 if not re.search(matchtitle, title, re.IGNORECASE):
627                     return '"' + title + '" title did not match pattern "' + matchtitle + '"'
628             rejecttitle = self.params.get('rejecttitle', False)
629             if rejecttitle:
630                 if re.search(rejecttitle, title, re.IGNORECASE):
631                     return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
632         date = info_dict.get('upload_date')
633         if date is not None:
634             dateRange = self.params.get('daterange', DateRange())
635             if date not in dateRange:
636                 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
637         view_count = info_dict.get('view_count')
638         if view_count is not None:
639             min_views = self.params.get('min_views')
640             if min_views is not None and view_count < min_views:
641                 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
642             max_views = self.params.get('max_views')
643             if max_views is not None and view_count > max_views:
644                 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
645         if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
646             return 'Skipping "%s" because it is age restricted' % video_title
647         if self.in_download_archive(info_dict):
648             return '%s has already been recorded in archive' % video_title
649
650         if not incomplete:
651             match_filter = self.params.get('match_filter')
652             if match_filter is not None:
653                 ret = match_filter(info_dict)
654                 if ret is not None:
655                     return ret
656
657         return None
658
659     @staticmethod
660     def add_extra_info(info_dict, extra_info):
661         '''Set the keys from extra_info in info dict if they are missing'''
662         for key, value in extra_info.items():
663             info_dict.setdefault(key, value)
664
665     def extract_info(self, url, download=True, ie_key=None, extra_info={},
666                      process=True, force_generic_extractor=False):
667         '''
668         Returns a list with a dictionary for each video we find.
669         If 'download', also downloads the videos.
670         extra_info is a dict containing the extra values to add to each result
671         '''
672
673         if not ie_key and force_generic_extractor:
674             ie_key = 'Generic'
675
676         if ie_key:
677             ies = [self.get_info_extractor(ie_key)]
678         else:
679             ies = self._ies
680
681         for ie in ies:
682             if not ie.suitable(url):
683                 continue
684
685             ie = self.get_info_extractor(ie.ie_key())
686             if not ie.working():
687                 self.report_warning('The program functionality for this site has been marked as broken, '
688                                     'and will probably not work.')
689
690             try:
691                 ie_result = ie.extract(url)
692                 if ie_result is None:  # Finished already (backwards compatibility; listformats and friends should be moved here)
693                     break
694                 if isinstance(ie_result, list):
695                     # Backwards compatibility: old IE result format
696                     ie_result = {
697                         '_type': 'compat_list',
698                         'entries': ie_result,
699                     }
700                 self.add_default_extra_info(ie_result, ie, url)
701                 if process:
702                     return self.process_ie_result(ie_result, download, extra_info)
703                 else:
704                     return ie_result
705             except ExtractorError as e:  # An error we somewhat expected
706                 self.report_error(compat_str(e), e.format_traceback())
707                 break
708             except MaxDownloadsReached:
709                 raise
710             except Exception as e:
711                 if self.params.get('ignoreerrors', False):
712                     self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
713                     break
714                 else:
715                     raise
716         else:
717             self.report_error('no suitable InfoExtractor for URL %s' % url)
718
719     def add_default_extra_info(self, ie_result, ie, url):
720         self.add_extra_info(ie_result, {
721             'extractor': ie.IE_NAME,
722             'webpage_url': url,
723             'webpage_url_basename': url_basename(url),
724             'extractor_key': ie.ie_key(),
725         })
726
727     def process_ie_result(self, ie_result, download=True, extra_info={}):
728         """
729         Take the result of the ie(may be modified) and resolve all unresolved
730         references (URLs, playlist items).
731
732         It will also download the videos if 'download'.
733         Returns the resolved ie_result.
734         """
735         result_type = ie_result.get('_type', 'video')
736
737         if result_type in ('url', 'url_transparent'):
738             ie_result['url'] = sanitize_url(ie_result['url'])
739             extract_flat = self.params.get('extract_flat', False)
740             if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or
741                     extract_flat is True):
742                 if self.params.get('forcejson', False):
743                     self.to_stdout(json.dumps(ie_result))
744                 return ie_result
745
746         if result_type == 'video':
747             self.add_extra_info(ie_result, extra_info)
748             return self.process_video_result(ie_result, download=download)
749         elif result_type == 'url':
750             # We have to add extra_info to the results because it may be
751             # contained in a playlist
752             return self.extract_info(ie_result['url'],
753                                      download,
754                                      ie_key=ie_result.get('ie_key'),
755                                      extra_info=extra_info)
756         elif result_type == 'url_transparent':
757             # Use the information from the embedding page
758             info = self.extract_info(
759                 ie_result['url'], ie_key=ie_result.get('ie_key'),
760                 extra_info=extra_info, download=False, process=False)
761
762             force_properties = dict(
763                 (k, v) for k, v in ie_result.items() if v is not None)
764             for f in ('_type', 'url', 'ie_key'):
765                 if f in force_properties:
766                     del force_properties[f]
767             new_result = info.copy()
768             new_result.update(force_properties)
769
770             assert new_result.get('_type') != 'url_transparent'
771
772             return self.process_ie_result(
773                 new_result, download=download, extra_info=extra_info)
774         elif result_type == 'playlist' or result_type == 'multi_video':
775             # We process each entry in the playlist
776             playlist = ie_result.get('title') or ie_result.get('id')
777             self.to_screen('[download] Downloading playlist: %s' % playlist)
778
779             playlist_results = []
780
781             playliststart = self.params.get('playliststart', 1) - 1
782             playlistend = self.params.get('playlistend')
783             # For backwards compatibility, interpret -1 as whole list
784             if playlistend == -1:
785                 playlistend = None
786
787             playlistitems_str = self.params.get('playlist_items')
788             playlistitems = None
789             if playlistitems_str is not None:
790                 def iter_playlistitems(format):
791                     for string_segment in format.split(','):
792                         if '-' in string_segment:
793                             start, end = string_segment.split('-')
794                             for item in range(int(start), int(end) + 1):
795                                 yield int(item)
796                         else:
797                             yield int(string_segment)
798                 playlistitems = iter_playlistitems(playlistitems_str)
799
800             ie_entries = ie_result['entries']
801             if isinstance(ie_entries, list):
802                 n_all_entries = len(ie_entries)
803                 if playlistitems:
804                     entries = [
805                         ie_entries[i - 1] for i in playlistitems
806                         if -n_all_entries <= i - 1 < n_all_entries]
807                 else:
808                     entries = ie_entries[playliststart:playlistend]
809                 n_entries = len(entries)
810                 self.to_screen(
811                     '[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
812                     (ie_result['extractor'], playlist, n_all_entries, n_entries))
813             elif isinstance(ie_entries, PagedList):
814                 if playlistitems:
815                     entries = []
816                     for item in playlistitems:
817                         entries.extend(ie_entries.getslice(
818                             item - 1, item
819                         ))
820                 else:
821                     entries = ie_entries.getslice(
822                         playliststart, playlistend)
823                 n_entries = len(entries)
824                 self.to_screen(
825                     '[%s] playlist %s: Downloading %d videos' %
826                     (ie_result['extractor'], playlist, n_entries))
827             else:  # iterable
828                 if playlistitems:
829                     entry_list = list(ie_entries)
830                     entries = [entry_list[i - 1] for i in playlistitems]
831                 else:
832                     entries = list(itertools.islice(
833                         ie_entries, playliststart, playlistend))
834                 n_entries = len(entries)
835                 self.to_screen(
836                     '[%s] playlist %s: Downloading %d videos' %
837                     (ie_result['extractor'], playlist, n_entries))
838
839             if self.params.get('playlistreverse', False):
840                 entries = entries[::-1]
841
842             for i, entry in enumerate(entries, 1):
843                 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
844                 extra = {
845                     'n_entries': n_entries,
846                     'playlist': playlist,
847                     'playlist_id': ie_result.get('id'),
848                     'playlist_title': ie_result.get('title'),
849                     'playlist_index': i + playliststart,
850                     'extractor': ie_result['extractor'],
851                     'webpage_url': ie_result['webpage_url'],
852                     'webpage_url_basename': url_basename(ie_result['webpage_url']),
853                     'extractor_key': ie_result['extractor_key'],
854                 }
855
856                 reason = self._match_entry(entry, incomplete=True)
857                 if reason is not None:
858                     self.to_screen('[download] ' + reason)
859                     continue
860
861                 entry_result = self.process_ie_result(entry,
862                                                       download=download,
863                                                       extra_info=extra)
864                 playlist_results.append(entry_result)
865             ie_result['entries'] = playlist_results
866             self.to_screen('[download] Finished downloading playlist: %s' % playlist)
867             return ie_result
868         elif result_type == 'compat_list':
869             self.report_warning(
870                 'Extractor %s returned a compat_list result. '
871                 'It needs to be updated.' % ie_result.get('extractor'))
872
873             def _fixup(r):
874                 self.add_extra_info(
875                     r,
876                     {
877                         'extractor': ie_result['extractor'],
878                         'webpage_url': ie_result['webpage_url'],
879                         'webpage_url_basename': url_basename(ie_result['webpage_url']),
880                         'extractor_key': ie_result['extractor_key'],
881                     }
882                 )
883                 return r
884             ie_result['entries'] = [
885                 self.process_ie_result(_fixup(r), download, extra_info)
886                 for r in ie_result['entries']
887             ]
888             return ie_result
889         else:
890             raise Exception('Invalid result type: %s' % result_type)
891
892     def _build_format_filter(self, filter_spec):
893         " Returns a function to filter the formats according to the filter_spec "
894
895         OPERATORS = {
896             '<': operator.lt,
897             '<=': operator.le,
898             '>': operator.gt,
899             '>=': operator.ge,
900             '=': operator.eq,
901             '!=': operator.ne,
902         }
903         operator_rex = re.compile(r'''(?x)\s*
904             (?P<key>width|height|tbr|abr|vbr|asr|filesize|fps)
905             \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
906             (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
907             $
908             ''' % '|'.join(map(re.escape, OPERATORS.keys())))
909         m = operator_rex.search(filter_spec)
910         if m:
911             try:
912                 comparison_value = int(m.group('value'))
913             except ValueError:
914                 comparison_value = parse_filesize(m.group('value'))
915                 if comparison_value is None:
916                     comparison_value = parse_filesize(m.group('value') + 'B')
917                 if comparison_value is None:
918                     raise ValueError(
919                         'Invalid value %r in format specification %r' % (
920                             m.group('value'), filter_spec))
921             op = OPERATORS[m.group('op')]
922
923         if not m:
924             STR_OPERATORS = {
925                 '=': operator.eq,
926                 '!=': operator.ne,
927                 '^=': lambda attr, value: attr.startswith(value),
928                 '$=': lambda attr, value: attr.endswith(value),
929                 '*=': lambda attr, value: value in attr,
930             }
931             str_operator_rex = re.compile(r'''(?x)
932                 \s*(?P<key>ext|acodec|vcodec|container|protocol|format_id)
933                 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?
934                 \s*(?P<value>[a-zA-Z0-9._-]+)
935                 \s*$
936                 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
937             m = str_operator_rex.search(filter_spec)
938             if m:
939                 comparison_value = m.group('value')
940                 op = STR_OPERATORS[m.group('op')]
941
942         if not m:
943             raise ValueError('Invalid filter specification %r' % filter_spec)
944
945         def _filter(f):
946             actual_value = f.get(m.group('key'))
947             if actual_value is None:
948                 return m.group('none_inclusive')
949             return op(actual_value, comparison_value)
950         return _filter
951
952     def build_format_selector(self, format_spec):
953         def syntax_error(note, start):
954             message = (
955                 'Invalid format specification: '
956                 '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
957             return SyntaxError(message)
958
959         PICKFIRST = 'PICKFIRST'
960         MERGE = 'MERGE'
961         SINGLE = 'SINGLE'
962         GROUP = 'GROUP'
963         FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
964
965         def _parse_filter(tokens):
966             filter_parts = []
967             for type, string, start, _, _ in tokens:
968                 if type == tokenize.OP and string == ']':
969                     return ''.join(filter_parts)
970                 else:
971                     filter_parts.append(string)
972
973         def _remove_unused_ops(tokens):
974             # Remove operators that we don't use and join them with the surrounding strings
975             # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
976             ALLOWED_OPS = ('/', '+', ',', '(', ')')
977             last_string, last_start, last_end, last_line = None, None, None, None
978             for type, string, start, end, line in tokens:
979                 if type == tokenize.OP and string == '[':
980                     if last_string:
981                         yield tokenize.NAME, last_string, last_start, last_end, last_line
982                         last_string = None
983                     yield type, string, start, end, line
984                     # everything inside brackets will be handled by _parse_filter
985                     for type, string, start, end, line in tokens:
986                         yield type, string, start, end, line
987                         if type == tokenize.OP and string == ']':
988                             break
989                 elif type == tokenize.OP and string in ALLOWED_OPS:
990                     if last_string:
991                         yield tokenize.NAME, last_string, last_start, last_end, last_line
992                         last_string = None
993                     yield type, string, start, end, line
994                 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
995                     if not last_string:
996                         last_string = string
997                         last_start = start
998                         last_end = end
999                     else:
1000                         last_string += string
1001             if last_string:
1002                 yield tokenize.NAME, last_string, last_start, last_end, last_line
1003
1004         def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
1005             selectors = []
1006             current_selector = None
1007             for type, string, start, _, _ in tokens:
1008                 # ENCODING is only defined in python 3.x
1009                 if type == getattr(tokenize, 'ENCODING', None):
1010                     continue
1011                 elif type in [tokenize.NAME, tokenize.NUMBER]:
1012                     current_selector = FormatSelector(SINGLE, string, [])
1013                 elif type == tokenize.OP:
1014                     if string == ')':
1015                         if not inside_group:
1016                             # ')' will be handled by the parentheses group
1017                             tokens.restore_last_token()
1018                         break
1019                     elif inside_merge and string in ['/', ',']:
1020                         tokens.restore_last_token()
1021                         break
1022                     elif inside_choice and string == ',':
1023                         tokens.restore_last_token()
1024                         break
1025                     elif string == ',':
1026                         if not current_selector:
1027                             raise syntax_error('"," must follow a format selector', start)
1028                         selectors.append(current_selector)
1029                         current_selector = None
1030                     elif string == '/':
1031                         if not current_selector:
1032                             raise syntax_error('"/" must follow a format selector', start)
1033                         first_choice = current_selector
1034                         second_choice = _parse_format_selection(tokens, inside_choice=True)
1035                         current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
1036                     elif string == '[':
1037                         if not current_selector:
1038                             current_selector = FormatSelector(SINGLE, 'best', [])
1039                         format_filter = _parse_filter(tokens)
1040                         current_selector.filters.append(format_filter)
1041                     elif string == '(':
1042                         if current_selector:
1043                             raise syntax_error('Unexpected "("', start)
1044                         group = _parse_format_selection(tokens, inside_group=True)
1045                         current_selector = FormatSelector(GROUP, group, [])
1046                     elif string == '+':
1047                         video_selector = current_selector
1048                         audio_selector = _parse_format_selection(tokens, inside_merge=True)
1049                         if not video_selector or not audio_selector:
1050                             raise syntax_error('"+" must be between two format selectors', start)
1051                         current_selector = FormatSelector(MERGE, (video_selector, audio_selector), [])
1052                     else:
1053                         raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
1054                 elif type == tokenize.ENDMARKER:
1055                     break
1056             if current_selector:
1057                 selectors.append(current_selector)
1058             return selectors
1059
1060         def _build_selector_function(selector):
1061             if isinstance(selector, list):
1062                 fs = [_build_selector_function(s) for s in selector]
1063
1064                 def selector_function(ctx):
1065                     for f in fs:
1066                         for format in f(ctx):
1067                             yield format
1068                 return selector_function
1069             elif selector.type == GROUP:
1070                 selector_function = _build_selector_function(selector.selector)
1071             elif selector.type == PICKFIRST:
1072                 fs = [_build_selector_function(s) for s in selector.selector]
1073
1074                 def selector_function(ctx):
1075                     for f in fs:
1076                         picked_formats = list(f(ctx))
1077                         if picked_formats:
1078                             return picked_formats
1079                     return []
1080             elif selector.type == SINGLE:
1081                 format_spec = selector.selector
1082
1083                 def selector_function(ctx):
1084                     formats = list(ctx['formats'])
1085                     if not formats:
1086                         return
1087                     if format_spec == 'all':
1088                         for f in formats:
1089                             yield f
1090                     elif format_spec in ['best', 'worst', None]:
1091                         format_idx = 0 if format_spec == 'worst' else -1
1092                         audiovideo_formats = [
1093                             f for f in formats
1094                             if f.get('vcodec') != 'none' and f.get('acodec') != 'none']
1095                         if audiovideo_formats:
1096                             yield audiovideo_formats[format_idx]
1097                         # for extractors with incomplete formats (audio only (soundcloud)
1098                         # or video only (imgur)) we will fallback to best/worst
1099                         # {video,audio}-only format
1100                         elif ctx['incomplete_formats']:
1101                             yield formats[format_idx]
1102                     elif format_spec == 'bestaudio':
1103                         audio_formats = [
1104                             f for f in formats
1105                             if f.get('vcodec') == 'none']
1106                         if audio_formats:
1107                             yield audio_formats[-1]
1108                     elif format_spec == 'worstaudio':
1109                         audio_formats = [
1110                             f for f in formats
1111                             if f.get('vcodec') == 'none']
1112                         if audio_formats:
1113                             yield audio_formats[0]
1114                     elif format_spec == 'bestvideo':
1115                         video_formats = [
1116                             f for f in formats
1117                             if f.get('acodec') == 'none']
1118                         if video_formats:
1119                             yield video_formats[-1]
1120                     elif format_spec == 'worstvideo':
1121                         video_formats = [
1122                             f for f in formats
1123                             if f.get('acodec') == 'none']
1124                         if video_formats:
1125                             yield video_formats[0]
1126                     else:
1127                         extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
1128                         if format_spec in extensions:
1129                             filter_f = lambda f: f['ext'] == format_spec
1130                         else:
1131                             filter_f = lambda f: f['format_id'] == format_spec
1132                         matches = list(filter(filter_f, formats))
1133                         if matches:
1134                             yield matches[-1]
1135             elif selector.type == MERGE:
1136                 def _merge(formats_info):
1137                     format_1, format_2 = [f['format_id'] for f in formats_info]
1138                     # The first format must contain the video and the
1139                     # second the audio
1140                     if formats_info[0].get('vcodec') == 'none':
1141                         self.report_error('The first format must '
1142                                           'contain the video, try using '
1143                                           '"-f %s+%s"' % (format_2, format_1))
1144                         return
1145                     # Formats must be opposite (video+audio)
1146                     if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none':
1147                         self.report_error(
1148                             'Both formats %s and %s are video-only, you must specify "-f video+audio"'
1149                             % (format_1, format_2))
1150                         return
1151                     output_ext = (
1152                         formats_info[0]['ext']
1153                         if self.params.get('merge_output_format') is None
1154                         else self.params['merge_output_format'])
1155                     return {
1156                         'requested_formats': formats_info,
1157                         'format': '%s+%s' % (formats_info[0].get('format'),
1158                                              formats_info[1].get('format')),
1159                         'format_id': '%s+%s' % (formats_info[0].get('format_id'),
1160                                                 formats_info[1].get('format_id')),
1161                         'width': formats_info[0].get('width'),
1162                         'height': formats_info[0].get('height'),
1163                         'resolution': formats_info[0].get('resolution'),
1164                         'fps': formats_info[0].get('fps'),
1165                         'vcodec': formats_info[0].get('vcodec'),
1166                         'vbr': formats_info[0].get('vbr'),
1167                         'stretched_ratio': formats_info[0].get('stretched_ratio'),
1168                         'acodec': formats_info[1].get('acodec'),
1169                         'abr': formats_info[1].get('abr'),
1170                         'ext': output_ext,
1171                     }
1172                 video_selector, audio_selector = map(_build_selector_function, selector.selector)
1173
1174                 def selector_function(ctx):
1175                     for pair in itertools.product(
1176                             video_selector(copy.deepcopy(ctx)), audio_selector(copy.deepcopy(ctx))):
1177                         yield _merge(pair)
1178
1179             filters = [self._build_format_filter(f) for f in selector.filters]
1180
1181             def final_selector(ctx):
1182                 ctx_copy = copy.deepcopy(ctx)
1183                 for _filter in filters:
1184                     ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
1185                 return selector_function(ctx_copy)
1186             return final_selector
1187
1188         stream = io.BytesIO(format_spec.encode('utf-8'))
1189         try:
1190             tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
1191         except tokenize.TokenError:
1192             raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
1193
1194         class TokenIterator(object):
1195             def __init__(self, tokens):
1196                 self.tokens = tokens
1197                 self.counter = 0
1198
1199             def __iter__(self):
1200                 return self
1201
1202             def __next__(self):
1203                 if self.counter >= len(self.tokens):
1204                     raise StopIteration()
1205                 value = self.tokens[self.counter]
1206                 self.counter += 1
1207                 return value
1208
1209             next = __next__
1210
1211             def restore_last_token(self):
1212                 self.counter -= 1
1213
1214         parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
1215         return _build_selector_function(parsed_selector)
1216
1217     def _calc_headers(self, info_dict):
1218         res = std_headers.copy()
1219
1220         add_headers = info_dict.get('http_headers')
1221         if add_headers:
1222             res.update(add_headers)
1223
1224         cookies = self._calc_cookies(info_dict)
1225         if cookies:
1226             res['Cookie'] = cookies
1227
1228         return res
1229
1230     def _calc_cookies(self, info_dict):
1231         pr = sanitized_Request(info_dict['url'])
1232         self.cookiejar.add_cookie_header(pr)
1233         return pr.get_header('Cookie')
1234
1235     def process_video_result(self, info_dict, download=True):
1236         assert info_dict.get('_type', 'video') == 'video'
1237
1238         if 'id' not in info_dict:
1239             raise ExtractorError('Missing "id" field in extractor result')
1240         if 'title' not in info_dict:
1241             raise ExtractorError('Missing "title" field in extractor result')
1242
1243         if not isinstance(info_dict['id'], compat_str):
1244             self.report_warning('"id" field is not a string - forcing string conversion')
1245             info_dict['id'] = compat_str(info_dict['id'])
1246
1247         if 'playlist' not in info_dict:
1248             # It isn't part of a playlist
1249             info_dict['playlist'] = None
1250             info_dict['playlist_index'] = None
1251
1252         thumbnails = info_dict.get('thumbnails')
1253         if thumbnails is None:
1254             thumbnail = info_dict.get('thumbnail')
1255             if thumbnail:
1256                 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
1257         if thumbnails:
1258             thumbnails.sort(key=lambda t: (
1259                 t.get('preference'), t.get('width'), t.get('height'),
1260                 t.get('id'), t.get('url')))
1261             for i, t in enumerate(thumbnails):
1262                 t['url'] = sanitize_url(t['url'])
1263                 if t.get('width') and t.get('height'):
1264                     t['resolution'] = '%dx%d' % (t['width'], t['height'])
1265                 if t.get('id') is None:
1266                     t['id'] = '%d' % i
1267
1268         if self.params.get('list_thumbnails'):
1269             self.list_thumbnails(info_dict)
1270             return
1271
1272         thumbnail = info_dict.get('thumbnail')
1273         if thumbnail:
1274             info_dict['thumbnail'] = sanitize_url(thumbnail)
1275         elif thumbnails:
1276             info_dict['thumbnail'] = thumbnails[-1]['url']
1277
1278         if 'display_id' not in info_dict and 'id' in info_dict:
1279             info_dict['display_id'] = info_dict['id']
1280
1281         if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
1282             # Working around out-of-range timestamp values (e.g. negative ones on Windows,
1283             # see http://bugs.python.org/issue1646728)
1284             try:
1285                 upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp'])
1286                 info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
1287             except (ValueError, OverflowError, OSError):
1288                 pass
1289
1290         # Auto generate title fields corresponding to the *_number fields when missing
1291         # in order to always have clean titles. This is very common for TV series.
1292         for field in ('chapter', 'season', 'episode'):
1293             if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
1294                 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
1295
1296         subtitles = info_dict.get('subtitles')
1297         if subtitles:
1298             for _, subtitle in subtitles.items():
1299                 for subtitle_format in subtitle:
1300                     if subtitle_format.get('url'):
1301                         subtitle_format['url'] = sanitize_url(subtitle_format['url'])
1302                     if 'ext' not in subtitle_format:
1303                         subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
1304
1305         if self.params.get('listsubtitles', False):
1306             if 'automatic_captions' in info_dict:
1307                 self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions')
1308             self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
1309             return
1310         info_dict['requested_subtitles'] = self.process_subtitles(
1311             info_dict['id'], subtitles,
1312             info_dict.get('automatic_captions'))
1313
1314         # We now pick which formats have to be downloaded
1315         if info_dict.get('formats') is None:
1316             # There's only one format available
1317             formats = [info_dict]
1318         else:
1319             formats = info_dict['formats']
1320
1321         if not formats:
1322             raise ExtractorError('No video formats found!')
1323
1324         formats_dict = {}
1325
1326         # We check that all the formats have the format and format_id fields
1327         for i, format in enumerate(formats):
1328             if 'url' not in format:
1329                 raise ExtractorError('Missing "url" key in result (index %d)' % i)
1330
1331             format['url'] = sanitize_url(format['url'])
1332
1333             if format.get('format_id') is None:
1334                 format['format_id'] = compat_str(i)
1335             else:
1336                 # Sanitize format_id from characters used in format selector expression
1337                 format['format_id'] = re.sub('[\s,/+\[\]()]', '_', format['format_id'])
1338             format_id = format['format_id']
1339             if format_id not in formats_dict:
1340                 formats_dict[format_id] = []
1341             formats_dict[format_id].append(format)
1342
1343         # Make sure all formats have unique format_id
1344         for format_id, ambiguous_formats in formats_dict.items():
1345             if len(ambiguous_formats) > 1:
1346                 for i, format in enumerate(ambiguous_formats):
1347                     format['format_id'] = '%s-%d' % (format_id, i)
1348
1349         for i, format in enumerate(formats):
1350             if format.get('format') is None:
1351                 format['format'] = '{id} - {res}{note}'.format(
1352                     id=format['format_id'],
1353                     res=self.format_resolution(format),
1354                     note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
1355                 )
1356             # Automatically determine file extension if missing
1357             if 'ext' not in format:
1358                 format['ext'] = determine_ext(format['url']).lower()
1359             # Automatically determine protocol if missing (useful for format
1360             # selection purposes)
1361             if 'protocol' not in format:
1362                 format['protocol'] = determine_protocol(format)
1363             # Add HTTP headers, so that external programs can use them from the
1364             # json output
1365             full_format_info = info_dict.copy()
1366             full_format_info.update(format)
1367             format['http_headers'] = self._calc_headers(full_format_info)
1368
1369         # TODO Central sorting goes here
1370
1371         if formats[0] is not info_dict:
1372             # only set the 'formats' fields if the original info_dict list them
1373             # otherwise we end up with a circular reference, the first (and unique)
1374             # element in the 'formats' field in info_dict is info_dict itself,
1375             # which can't be exported to json
1376             info_dict['formats'] = formats
1377         if self.params.get('listformats'):
1378             self.list_formats(info_dict)
1379             return
1380
1381         req_format = self.params.get('format')
1382         if req_format is None:
1383             req_format_list = []
1384             if (self.params.get('outtmpl', DEFAULT_OUTTMPL) != '-' and
1385                     not info_dict.get('is_live')):
1386                 merger = FFmpegMergerPP(self)
1387                 if merger.available and merger.can_merge():
1388                     req_format_list.append('bestvideo+bestaudio')
1389             req_format_list.append('best')
1390             req_format = '/'.join(req_format_list)
1391         format_selector = self.build_format_selector(req_format)
1392
1393         # While in format selection we may need to have an access to the original
1394         # format set in order to calculate some metrics or do some processing.
1395         # For now we need to be able to guess whether original formats provided
1396         # by extractor are incomplete or not (i.e. whether extractor provides only
1397         # video-only or audio-only formats) for proper formats selection for
1398         # extractors with such incomplete formats (see
1399         # https://github.com/rg3/youtube-dl/pull/5556).
1400         # Since formats may be filtered during format selection and may not match
1401         # the original formats the results may be incorrect. Thus original formats
1402         # or pre-calculated metrics should be passed to format selection routines
1403         # as well.
1404         # We will pass a context object containing all necessary additional data
1405         # instead of just formats.
1406         # This fixes incorrect format selection issue (see
1407         # https://github.com/rg3/youtube-dl/issues/10083).
1408         incomplete_formats = (
1409             # All formats are video-only or
1410             all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats) or
1411             # all formats are audio-only
1412             all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
1413
1414         ctx = {
1415             'formats': formats,
1416             'incomplete_formats': incomplete_formats,
1417         }
1418
1419         formats_to_download = list(format_selector(ctx))
1420         if not formats_to_download:
1421             raise ExtractorError('requested format not available',
1422                                  expected=True)
1423
1424         if download:
1425             if len(formats_to_download) > 1:
1426                 self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
1427             for format in formats_to_download:
1428                 new_info = dict(info_dict)
1429                 new_info.update(format)
1430                 self.process_info(new_info)
1431         # We update the info dict with the best quality format (backwards compatibility)
1432         info_dict.update(formats_to_download[-1])
1433         return info_dict
1434
1435     def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
1436         """Select the requested subtitles and their format"""
1437         available_subs = {}
1438         if normal_subtitles and self.params.get('writesubtitles'):
1439             available_subs.update(normal_subtitles)
1440         if automatic_captions and self.params.get('writeautomaticsub'):
1441             for lang, cap_info in automatic_captions.items():
1442                 if lang not in available_subs:
1443                     available_subs[lang] = cap_info
1444
1445         if (not self.params.get('writesubtitles') and not
1446                 self.params.get('writeautomaticsub') or not
1447                 available_subs):
1448             return None
1449
1450         if self.params.get('allsubtitles', False):
1451             requested_langs = available_subs.keys()
1452         else:
1453             if self.params.get('subtitleslangs', False):
1454                 requested_langs = self.params.get('subtitleslangs')
1455             elif 'en' in available_subs:
1456                 requested_langs = ['en']
1457             else:
1458                 requested_langs = [list(available_subs.keys())[0]]
1459
1460         formats_query = self.params.get('subtitlesformat', 'best')
1461         formats_preference = formats_query.split('/') if formats_query else []
1462         subs = {}
1463         for lang in requested_langs:
1464             formats = available_subs.get(lang)
1465             if formats is None:
1466                 self.report_warning('%s subtitles not available for %s' % (lang, video_id))
1467                 continue
1468             for ext in formats_preference:
1469                 if ext == 'best':
1470                     f = formats[-1]
1471                     break
1472                 matches = list(filter(lambda f: f['ext'] == ext, formats))
1473                 if matches:
1474                     f = matches[-1]
1475                     break
1476             else:
1477                 f = formats[-1]
1478                 self.report_warning(
1479                     'No subtitle format found matching "%s" for language %s, '
1480                     'using %s' % (formats_query, lang, f['ext']))
1481             subs[lang] = f
1482         return subs
1483
1484     def process_info(self, info_dict):
1485         """Process a single resolved IE result."""
1486
1487         assert info_dict.get('_type', 'video') == 'video'
1488
1489         max_downloads = self.params.get('max_downloads')
1490         if max_downloads is not None:
1491             if self._num_downloads >= int(max_downloads):
1492                 raise MaxDownloadsReached()
1493
1494         info_dict['fulltitle'] = info_dict['title']
1495         if len(info_dict['title']) > 200:
1496             info_dict['title'] = info_dict['title'][:197] + '...'
1497
1498         if 'format' not in info_dict:
1499             info_dict['format'] = info_dict['ext']
1500
1501         reason = self._match_entry(info_dict, incomplete=False)
1502         if reason is not None:
1503             self.to_screen('[download] ' + reason)
1504             return
1505
1506         self._num_downloads += 1
1507
1508         info_dict['_filename'] = filename = self.prepare_filename(info_dict)
1509
1510         # Forced printings
1511         if self.params.get('forcetitle', False):
1512             self.to_stdout(info_dict['fulltitle'])
1513         if self.params.get('forceid', False):
1514             self.to_stdout(info_dict['id'])
1515         if self.params.get('forceurl', False):
1516             if info_dict.get('requested_formats') is not None:
1517                 for f in info_dict['requested_formats']:
1518                     self.to_stdout(f['url'] + f.get('play_path', ''))
1519             else:
1520                 # For RTMP URLs, also include the playpath
1521                 self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
1522         if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
1523             self.to_stdout(info_dict['thumbnail'])
1524         if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
1525             self.to_stdout(info_dict['description'])
1526         if self.params.get('forcefilename', False) and filename is not None:
1527             self.to_stdout(filename)
1528         if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
1529             self.to_stdout(formatSeconds(info_dict['duration']))
1530         if self.params.get('forceformat', False):
1531             self.to_stdout(info_dict['format'])
1532         if self.params.get('forcejson', False):
1533             self.to_stdout(json.dumps(info_dict))
1534
1535         # Do nothing else if in simulate mode
1536         if self.params.get('simulate', False):
1537             return
1538
1539         if filename is None:
1540             return
1541
1542         try:
1543             dn = os.path.dirname(sanitize_path(encodeFilename(filename)))
1544             if dn and not os.path.exists(dn):
1545                 os.makedirs(dn)
1546         except (OSError, IOError) as err:
1547             self.report_error('unable to create directory ' + error_to_compat_str(err))
1548             return
1549
1550         if self.params.get('writedescription', False):
1551             descfn = replace_extension(filename, 'description', info_dict.get('ext'))
1552             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
1553                 self.to_screen('[info] Video description is already present')
1554             elif info_dict.get('description') is None:
1555                 self.report_warning('There\'s no description to write.')
1556             else:
1557                 try:
1558                     self.to_screen('[info] Writing video description to: ' + descfn)
1559                     with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
1560                         descfile.write(info_dict['description'])
1561                 except (OSError, IOError):
1562                     self.report_error('Cannot write description file ' + descfn)
1563                     return
1564
1565         if self.params.get('writeannotations', False):
1566             annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext'))
1567             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
1568                 self.to_screen('[info] Video annotations are already present')
1569             else:
1570                 try:
1571                     self.to_screen('[info] Writing video annotations to: ' + annofn)
1572                     with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
1573                         annofile.write(info_dict['annotations'])
1574                 except (KeyError, TypeError):
1575                     self.report_warning('There are no annotations to write.')
1576                 except (OSError, IOError):
1577                     self.report_error('Cannot write annotations file: ' + annofn)
1578                     return
1579
1580         subtitles_are_requested = any([self.params.get('writesubtitles', False),
1581                                        self.params.get('writeautomaticsub')])
1582
1583         if subtitles_are_requested and info_dict.get('requested_subtitles'):
1584             # subtitles download errors are already managed as troubles in relevant IE
1585             # that way it will silently go on when used with unsupporting IE
1586             subtitles = info_dict['requested_subtitles']
1587             ie = self.get_info_extractor(info_dict['extractor_key'])
1588             for sub_lang, sub_info in subtitles.items():
1589                 sub_format = sub_info['ext']
1590                 if sub_info.get('data') is not None:
1591                     sub_data = sub_info['data']
1592                 else:
1593                     try:
1594                         sub_data = ie._download_webpage(
1595                             sub_info['url'], info_dict['id'], note=False)
1596                     except ExtractorError as err:
1597                         self.report_warning('Unable to download subtitle for "%s": %s' %
1598                                             (sub_lang, error_to_compat_str(err.cause)))
1599                         continue
1600                 try:
1601                     sub_filename = subtitles_filename(filename, sub_lang, sub_format)
1602                     if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
1603                         self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format))
1604                     else:
1605                         self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
1606                         with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
1607                             subfile.write(sub_data)
1608                 except (OSError, IOError):
1609                     self.report_error('Cannot write subtitles file ' + sub_filename)
1610                     return
1611
1612         if self.params.get('writeinfojson', False):
1613             infofn = replace_extension(filename, 'info.json', info_dict.get('ext'))
1614             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
1615                 self.to_screen('[info] Video description metadata is already present')
1616             else:
1617                 self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
1618                 try:
1619                     write_json_file(self.filter_requested_info(info_dict), infofn)
1620                 except (OSError, IOError):
1621                     self.report_error('Cannot write metadata to JSON file ' + infofn)
1622                     return
1623
1624         self._write_thumbnails(info_dict, filename)
1625
1626         if not self.params.get('skip_download', False):
1627             try:
1628                 def dl(name, info):
1629                     fd = get_suitable_downloader(info, self.params)(self, self.params)
1630                     for ph in self._progress_hooks:
1631                         fd.add_progress_hook(ph)
1632                     if self.params.get('verbose'):
1633                         self.to_stdout('[debug] Invoking downloader on %r' % info.get('url'))
1634                     return fd.download(name, info)
1635
1636                 if info_dict.get('requested_formats') is not None:
1637                     downloaded = []
1638                     success = True
1639                     merger = FFmpegMergerPP(self)
1640                     if not merger.available:
1641                         postprocessors = []
1642                         self.report_warning('You have requested multiple '
1643                                             'formats but ffmpeg or avconv are not installed.'
1644                                             ' The formats won\'t be merged.')
1645                     else:
1646                         postprocessors = [merger]
1647
1648                     def compatible_formats(formats):
1649                         video, audio = formats
1650                         # Check extension
1651                         video_ext, audio_ext = audio.get('ext'), video.get('ext')
1652                         if video_ext and audio_ext:
1653                             COMPATIBLE_EXTS = (
1654                                 ('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v'),
1655                                 ('webm')
1656                             )
1657                             for exts in COMPATIBLE_EXTS:
1658                                 if video_ext in exts and audio_ext in exts:
1659                                     return True
1660                         # TODO: Check acodec/vcodec
1661                         return False
1662
1663                     filename_real_ext = os.path.splitext(filename)[1][1:]
1664                     filename_wo_ext = (
1665                         os.path.splitext(filename)[0]
1666                         if filename_real_ext == info_dict['ext']
1667                         else filename)
1668                     requested_formats = info_dict['requested_formats']
1669                     if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
1670                         info_dict['ext'] = 'mkv'
1671                         self.report_warning(
1672                             'Requested formats are incompatible for merge and will be merged into mkv.')
1673                     # Ensure filename always has a correct extension for successful merge
1674                     filename = '%s.%s' % (filename_wo_ext, info_dict['ext'])
1675                     if os.path.exists(encodeFilename(filename)):
1676                         self.to_screen(
1677                             '[download] %s has already been downloaded and '
1678                             'merged' % filename)
1679                     else:
1680                         for f in requested_formats:
1681                             new_info = dict(info_dict)
1682                             new_info.update(f)
1683                             fname = self.prepare_filename(new_info)
1684                             fname = prepend_extension(fname, 'f%s' % f['format_id'], new_info['ext'])
1685                             downloaded.append(fname)
1686                             partial_success = dl(fname, new_info)
1687                             success = success and partial_success
1688                         info_dict['__postprocessors'] = postprocessors
1689                         info_dict['__files_to_merge'] = downloaded
1690                 else:
1691                     # Just a single file
1692                     success = dl(filename, info_dict)
1693             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
1694                 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
1695                 return
1696             except (OSError, IOError) as err:
1697                 raise UnavailableVideoError(err)
1698             except (ContentTooShortError, ) as err:
1699                 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
1700                 return
1701
1702             if success and filename != '-':
1703                 # Fixup content
1704                 fixup_policy = self.params.get('fixup')
1705                 if fixup_policy is None:
1706                     fixup_policy = 'detect_or_warn'
1707
1708                 INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg or avconv to fix this automatically.'
1709
1710                 stretched_ratio = info_dict.get('stretched_ratio')
1711                 if stretched_ratio is not None and stretched_ratio != 1:
1712                     if fixup_policy == 'warn':
1713                         self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
1714                             info_dict['id'], stretched_ratio))
1715                     elif fixup_policy == 'detect_or_warn':
1716                         stretched_pp = FFmpegFixupStretchedPP(self)
1717                         if stretched_pp.available:
1718                             info_dict.setdefault('__postprocessors', [])
1719                             info_dict['__postprocessors'].append(stretched_pp)
1720                         else:
1721                             self.report_warning(
1722                                 '%s: Non-uniform pixel ratio (%s). %s'
1723                                 % (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE))
1724                     else:
1725                         assert fixup_policy in ('ignore', 'never')
1726
1727                 if (info_dict.get('requested_formats') is None and
1728                         info_dict.get('container') == 'm4a_dash'):
1729                     if fixup_policy == 'warn':
1730                         self.report_warning(
1731                             '%s: writing DASH m4a. '
1732                             'Only some players support this container.'
1733                             % info_dict['id'])
1734                     elif fixup_policy == 'detect_or_warn':
1735                         fixup_pp = FFmpegFixupM4aPP(self)
1736                         if fixup_pp.available:
1737                             info_dict.setdefault('__postprocessors', [])
1738                             info_dict['__postprocessors'].append(fixup_pp)
1739                         else:
1740                             self.report_warning(
1741                                 '%s: writing DASH m4a. '
1742                                 'Only some players support this container. %s'
1743                                 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
1744                     else:
1745                         assert fixup_policy in ('ignore', 'never')
1746
1747                 if (info_dict.get('protocol') == 'm3u8_native' or
1748                         info_dict.get('protocol') == 'm3u8' and
1749                         self.params.get('hls_prefer_native')):
1750                     if fixup_policy == 'warn':
1751                         self.report_warning('%s: malformated aac bitstream.' % (
1752                             info_dict['id']))
1753                     elif fixup_policy == 'detect_or_warn':
1754                         fixup_pp = FFmpegFixupM3u8PP(self)
1755                         if fixup_pp.available:
1756                             info_dict.setdefault('__postprocessors', [])
1757                             info_dict['__postprocessors'].append(fixup_pp)
1758                         else:
1759                             self.report_warning(
1760                                 '%s: malformated aac bitstream. %s'
1761                                 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
1762                     else:
1763                         assert fixup_policy in ('ignore', 'never')
1764
1765                 try:
1766                     self.post_process(filename, info_dict)
1767                 except (PostProcessingError) as err:
1768                     self.report_error('postprocessing: %s' % str(err))
1769                     return
1770                 self.record_download_archive(info_dict)
1771
1772     def download(self, url_list):
1773         """Download a given list of URLs."""
1774         outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
1775         if (len(url_list) > 1 and
1776                 '%' not in outtmpl and
1777                 self.params.get('max_downloads') != 1):
1778             raise SameFileError(outtmpl)
1779
1780         for url in url_list:
1781             try:
1782                 # It also downloads the videos
1783                 res = self.extract_info(
1784                     url, force_generic_extractor=self.params.get('force_generic_extractor', False))
1785             except UnavailableVideoError:
1786                 self.report_error('unable to download video')
1787             except MaxDownloadsReached:
1788                 self.to_screen('[info] Maximum number of downloaded files reached.')
1789                 raise
1790             else:
1791                 if self.params.get('dump_single_json', False):
1792                     self.to_stdout(json.dumps(res))
1793
1794         return self._download_retcode
1795
1796     def download_with_info_file(self, info_filename):
1797         with contextlib.closing(fileinput.FileInput(
1798                 [info_filename], mode='r',
1799                 openhook=fileinput.hook_encoded('utf-8'))) as f:
1800             # FileInput doesn't have a read method, we can't call json.load
1801             info = self.filter_requested_info(json.loads('\n'.join(f)))
1802         try:
1803             self.process_ie_result(info, download=True)
1804         except DownloadError:
1805             webpage_url = info.get('webpage_url')
1806             if webpage_url is not None:
1807                 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
1808                 return self.download([webpage_url])
1809             else:
1810                 raise
1811         return self._download_retcode
1812
1813     @staticmethod
1814     def filter_requested_info(info_dict):
1815         return dict(
1816             (k, v) for k, v in info_dict.items()
1817             if k not in ['requested_formats', 'requested_subtitles'])
1818
1819     def post_process(self, filename, ie_info):
1820         """Run all the postprocessors on the given file."""
1821         info = dict(ie_info)
1822         info['filepath'] = filename
1823         pps_chain = []
1824         if ie_info.get('__postprocessors') is not None:
1825             pps_chain.extend(ie_info['__postprocessors'])
1826         pps_chain.extend(self._pps)
1827         for pp in pps_chain:
1828             files_to_delete = []
1829             try:
1830                 files_to_delete, info = pp.run(info)
1831             except PostProcessingError as e:
1832                 self.report_error(e.msg)
1833             if files_to_delete and not self.params.get('keepvideo', False):
1834                 for old_filename in files_to_delete:
1835                     self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
1836                     try:
1837                         os.remove(encodeFilename(old_filename))
1838                     except (IOError, OSError):
1839                         self.report_warning('Unable to remove downloaded original file')
1840
1841     def _make_archive_id(self, info_dict):
1842         # Future-proof against any change in case
1843         # and backwards compatibility with prior versions
1844         extractor = info_dict.get('extractor_key')
1845         if extractor is None:
1846             if 'id' in info_dict:
1847                 extractor = info_dict.get('ie_key')  # key in a playlist
1848         if extractor is None:
1849             return None  # Incomplete video information
1850         return extractor.lower() + ' ' + info_dict['id']
1851
1852     def in_download_archive(self, info_dict):
1853         fn = self.params.get('download_archive')
1854         if fn is None:
1855             return False
1856
1857         vid_id = self._make_archive_id(info_dict)
1858         if vid_id is None:
1859             return False  # Incomplete video information
1860
1861         try:
1862             with locked_file(fn, 'r', encoding='utf-8') as archive_file:
1863                 for line in archive_file:
1864                     if line.strip() == vid_id:
1865                         return True
1866         except IOError as ioe:
1867             if ioe.errno != errno.ENOENT:
1868                 raise
1869         return False
1870
1871     def record_download_archive(self, info_dict):
1872         fn = self.params.get('download_archive')
1873         if fn is None:
1874             return
1875         vid_id = self._make_archive_id(info_dict)
1876         assert vid_id
1877         with locked_file(fn, 'a', encoding='utf-8') as archive_file:
1878             archive_file.write(vid_id + '\n')
1879
1880     @staticmethod
1881     def format_resolution(format, default='unknown'):
1882         if format.get('vcodec') == 'none':
1883             return 'audio only'
1884         if format.get('resolution') is not None:
1885             return format['resolution']
1886         if format.get('height') is not None:
1887             if format.get('width') is not None:
1888                 res = '%sx%s' % (format['width'], format['height'])
1889             else:
1890                 res = '%sp' % format['height']
1891         elif format.get('width') is not None:
1892             res = '%dx?' % format['width']
1893         else:
1894             res = default
1895         return res
1896
1897     def _format_note(self, fdict):
1898         res = ''
1899         if fdict.get('ext') in ['f4f', 'f4m']:
1900             res += '(unsupported) '
1901         if fdict.get('language'):
1902             if res:
1903                 res += ' '
1904             res += '[%s] ' % fdict['language']
1905         if fdict.get('format_note') is not None:
1906             res += fdict['format_note'] + ' '
1907         if fdict.get('tbr') is not None:
1908             res += '%4dk ' % fdict['tbr']
1909         if fdict.get('container') is not None:
1910             if res:
1911                 res += ', '
1912             res += '%s container' % fdict['container']
1913         if (fdict.get('vcodec') is not None and
1914                 fdict.get('vcodec') != 'none'):
1915             if res:
1916                 res += ', '
1917             res += fdict['vcodec']
1918             if fdict.get('vbr') is not None:
1919                 res += '@'
1920         elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
1921             res += 'video@'
1922         if fdict.get('vbr') is not None:
1923             res += '%4dk' % fdict['vbr']
1924         if fdict.get('fps') is not None:
1925             if res:
1926                 res += ', '
1927             res += '%sfps' % fdict['fps']
1928         if fdict.get('acodec') is not None:
1929             if res:
1930                 res += ', '
1931             if fdict['acodec'] == 'none':
1932                 res += 'video only'
1933             else:
1934                 res += '%-5s' % fdict['acodec']
1935         elif fdict.get('abr') is not None:
1936             if res:
1937                 res += ', '
1938             res += 'audio'
1939         if fdict.get('abr') is not None:
1940             res += '@%3dk' % fdict['abr']
1941         if fdict.get('asr') is not None:
1942             res += ' (%5dHz)' % fdict['asr']
1943         if fdict.get('filesize') is not None:
1944             if res:
1945                 res += ', '
1946             res += format_bytes(fdict['filesize'])
1947         elif fdict.get('filesize_approx') is not None:
1948             if res:
1949                 res += ', '
1950             res += '~' + format_bytes(fdict['filesize_approx'])
1951         return res
1952
1953     def list_formats(self, info_dict):
1954         formats = info_dict.get('formats', [info_dict])
1955         table = [
1956             [f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)]
1957             for f in formats
1958             if f.get('preference') is None or f['preference'] >= -1000]
1959         if len(formats) > 1:
1960             table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)'
1961
1962         header_line = ['format code', 'extension', 'resolution', 'note']
1963         self.to_screen(
1964             '[info] Available formats for %s:\n%s' %
1965             (info_dict['id'], render_table(header_line, table)))
1966
1967     def list_thumbnails(self, info_dict):
1968         thumbnails = info_dict.get('thumbnails')
1969         if not thumbnails:
1970             self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
1971             return
1972
1973         self.to_screen(
1974             '[info] Thumbnails for %s:' % info_dict['id'])
1975         self.to_screen(render_table(
1976             ['ID', 'width', 'height', 'URL'],
1977             [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
1978
1979     def list_subtitles(self, video_id, subtitles, name='subtitles'):
1980         if not subtitles:
1981             self.to_screen('%s has no %s' % (video_id, name))
1982             return
1983         self.to_screen(
1984             'Available %s for %s:' % (name, video_id))
1985         self.to_screen(render_table(
1986             ['Language', 'formats'],
1987             [[lang, ', '.join(f['ext'] for f in reversed(formats))]
1988                 for lang, formats in subtitles.items()]))
1989
1990     def urlopen(self, req):
1991         """ Start an HTTP download """
1992         if isinstance(req, compat_basestring):
1993             req = sanitized_Request(req)
1994         return self._opener.open(req, timeout=self._socket_timeout)
1995
1996     def print_debug_header(self):
1997         if not self.params.get('verbose'):
1998             return
1999
2000         if type('') is not compat_str:
2001             # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326)
2002             self.report_warning(
2003                 'Your Python is broken! Update to a newer and supported version')
2004
2005         stdout_encoding = getattr(
2006             sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
2007         encoding_str = (
2008             '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
2009                 locale.getpreferredencoding(),
2010                 sys.getfilesystemencoding(),
2011                 stdout_encoding,
2012                 self.get_encoding()))
2013         write_string(encoding_str, encoding=None)
2014
2015         self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
2016         if _LAZY_LOADER:
2017             self._write_string('[debug] Lazy loading extractors enabled' + '\n')
2018         try:
2019             sp = subprocess.Popen(
2020                 ['git', 'rev-parse', '--short', 'HEAD'],
2021                 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
2022                 cwd=os.path.dirname(os.path.abspath(__file__)))
2023             out, err = sp.communicate()
2024             out = out.decode().strip()
2025             if re.match('[0-9a-f]+', out):
2026                 self._write_string('[debug] Git HEAD: ' + out + '\n')
2027         except Exception:
2028             try:
2029                 sys.exc_clear()
2030             except Exception:
2031                 pass
2032         self._write_string('[debug] Python version %s - %s\n' % (
2033             platform.python_version(), platform_name()))
2034
2035         exe_versions = FFmpegPostProcessor.get_versions(self)
2036         exe_versions['rtmpdump'] = rtmpdump_version()
2037         exe_str = ', '.join(
2038             '%s %s' % (exe, v)
2039             for exe, v in sorted(exe_versions.items())
2040             if v
2041         )
2042         if not exe_str:
2043             exe_str = 'none'
2044         self._write_string('[debug] exe versions: %s\n' % exe_str)
2045
2046         proxy_map = {}
2047         for handler in self._opener.handlers:
2048             if hasattr(handler, 'proxies'):
2049                 proxy_map.update(handler.proxies)
2050         self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
2051
2052         if self.params.get('call_home', False):
2053             ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
2054             self._write_string('[debug] Public IP address: %s\n' % ipaddr)
2055             latest_version = self.urlopen(
2056                 'https://yt-dl.org/latest/version').read().decode('utf-8')
2057             if version_tuple(latest_version) > version_tuple(__version__):
2058                 self.report_warning(
2059                     'You are using an outdated version (newest version: %s)! '
2060                     'See https://yt-dl.org/update if you need help updating.' %
2061                     latest_version)
2062
2063     def _setup_opener(self):
2064         timeout_val = self.params.get('socket_timeout')
2065         self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
2066
2067         opts_cookiefile = self.params.get('cookiefile')
2068         opts_proxy = self.params.get('proxy')
2069
2070         if opts_cookiefile is None:
2071             self.cookiejar = compat_cookiejar.CookieJar()
2072         else:
2073             opts_cookiefile = compat_expanduser(opts_cookiefile)
2074             self.cookiejar = compat_cookiejar.MozillaCookieJar(
2075                 opts_cookiefile)
2076             if os.access(opts_cookiefile, os.R_OK):
2077                 self.cookiejar.load()
2078
2079         cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
2080         if opts_proxy is not None:
2081             if opts_proxy == '':
2082                 proxies = {}
2083             else:
2084                 proxies = {'http': opts_proxy, 'https': opts_proxy}
2085         else:
2086             proxies = compat_urllib_request.getproxies()
2087             # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
2088             if 'http' in proxies and 'https' not in proxies:
2089                 proxies['https'] = proxies['http']
2090         proxy_handler = PerRequestProxyHandler(proxies)
2091
2092         debuglevel = 1 if self.params.get('debug_printtraffic') else 0
2093         https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
2094         ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
2095         data_handler = compat_urllib_request_DataHandler()
2096
2097         # When passing our own FileHandler instance, build_opener won't add the
2098         # default FileHandler and allows us to disable the file protocol, which
2099         # can be used for malicious purposes (see
2100         # https://github.com/rg3/youtube-dl/issues/8227)
2101         file_handler = compat_urllib_request.FileHandler()
2102
2103         def file_open(*args, **kwargs):
2104             raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in youtube-dl for security reasons')
2105         file_handler.file_open = file_open
2106
2107         opener = compat_urllib_request.build_opener(
2108             proxy_handler, https_handler, cookie_processor, ydlh, data_handler, file_handler)
2109
2110         # Delete the default user-agent header, which would otherwise apply in
2111         # cases where our custom HTTP handler doesn't come into play
2112         # (See https://github.com/rg3/youtube-dl/issues/1309 for details)
2113         opener.addheaders = []
2114         self._opener = opener
2115
2116     def encode(self, s):
2117         if isinstance(s, bytes):
2118             return s  # Already encoded
2119
2120         try:
2121             return s.encode(self.get_encoding())
2122         except UnicodeEncodeError as err:
2123             err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
2124             raise
2125
2126     def get_encoding(self):
2127         encoding = self.params.get('encoding')
2128         if encoding is None:
2129             encoding = preferredencoding()
2130         return encoding
2131
2132     def _write_thumbnails(self, info_dict, filename):
2133         if self.params.get('writethumbnail', False):
2134             thumbnails = info_dict.get('thumbnails')
2135             if thumbnails:
2136                 thumbnails = [thumbnails[-1]]
2137         elif self.params.get('write_all_thumbnails', False):
2138             thumbnails = info_dict.get('thumbnails')
2139         else:
2140             return
2141
2142         if not thumbnails:
2143             # No thumbnails present, so return immediately
2144             return
2145
2146         for t in thumbnails:
2147             thumb_ext = determine_ext(t['url'], 'jpg')
2148             suffix = '_%s' % t['id'] if len(thumbnails) > 1 else ''
2149             thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else ''
2150             t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext
2151
2152             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
2153                 self.to_screen('[%s] %s: Thumbnail %sis already present' %
2154                                (info_dict['extractor'], info_dict['id'], thumb_display_id))
2155             else:
2156                 self.to_screen('[%s] %s: Downloading thumbnail %s...' %
2157                                (info_dict['extractor'], info_dict['id'], thumb_display_id))
2158                 try:
2159                     uf = self.urlopen(t['url'])
2160                     with open(encodeFilename(thumb_filename), 'wb') as thumbf:
2161                         shutil.copyfileobj(uf, thumbf)
2162                     self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
2163                                    (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
2164                 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
2165                     self.report_warning('Unable to download thumbnail "%s": %s' %
2166                                         (t['url'], error_to_compat_str(err)))