Merge branch 'master' of github.com:rg3/youtube-dl
[youtube-dl] / youtube_dl / YoutubeDL.py
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from __future__ import absolute_import, unicode_literals
5
6 import collections
7 import datetime
8 import errno
9 import io
10 import itertools
11 import json
12 import locale
13 import operator
14 import os
15 import platform
16 import re
17 import shutil
18 import subprocess
19 import socket
20 import sys
21 import time
22 import traceback
23
24 if os.name == 'nt':
25     import ctypes
26
27 from .compat import (
28     compat_basestring,
29     compat_cookiejar,
30     compat_expanduser,
31     compat_http_client,
32     compat_kwargs,
33     compat_str,
34     compat_urllib_error,
35     compat_urllib_request,
36 )
37 from .utils import (
38     escape_url,
39     ContentTooShortError,
40     date_from_str,
41     DateRange,
42     DEFAULT_OUTTMPL,
43     determine_ext,
44     DownloadError,
45     encodeFilename,
46     ExtractorError,
47     format_bytes,
48     formatSeconds,
49     get_term_width,
50     locked_file,
51     make_HTTPS_handler,
52     MaxDownloadsReached,
53     PagedList,
54     parse_filesize,
55     PostProcessingError,
56     platform_name,
57     preferredencoding,
58     render_table,
59     SameFileError,
60     sanitize_filename,
61     std_headers,
62     subtitles_filename,
63     takewhile_inclusive,
64     UnavailableVideoError,
65     url_basename,
66     version_tuple,
67     write_json_file,
68     write_string,
69     YoutubeDLHandler,
70     prepend_extension,
71     args_to_str,
72     age_restricted,
73 )
74 from .cache import Cache
75 from .extractor import get_info_extractor, gen_extractors
76 from .downloader import get_suitable_downloader
77 from .downloader.rtmp import rtmpdump_version
78 from .postprocessor import (
79     FFmpegFixupM4aPP,
80     FFmpegFixupStretchedPP,
81     FFmpegMergerPP,
82     FFmpegPostProcessor,
83     get_postprocessor,
84 )
85 from .version import __version__
86
87
88 class YoutubeDL(object):
89     """YoutubeDL class.
90
91     YoutubeDL objects are the ones responsible of downloading the
92     actual video file and writing it to disk if the user has requested
93     it, among some other tasks. In most cases there should be one per
94     program. As, given a video URL, the downloader doesn't know how to
95     extract all the needed information, task that InfoExtractors do, it
96     has to pass the URL to one of them.
97
98     For this, YoutubeDL objects have a method that allows
99     InfoExtractors to be registered in a given order. When it is passed
100     a URL, the YoutubeDL object handles it to the first InfoExtractor it
101     finds that reports being able to handle it. The InfoExtractor extracts
102     all the information about the video or videos the URL refers to, and
103     YoutubeDL process the extracted information, possibly using a File
104     Downloader to download the video.
105
106     YoutubeDL objects accept a lot of parameters. In order not to saturate
107     the object constructor with arguments, it receives a dictionary of
108     options instead. These options are available through the params
109     attribute for the InfoExtractors to use. The YoutubeDL also
110     registers itself as the downloader in charge for the InfoExtractors
111     that are added to it, so this is a "mutual registration".
112
113     Available options:
114
115     username:          Username for authentication purposes.
116     password:          Password for authentication purposes.
117     videopassword:     Password for acces a video.
118     usenetrc:          Use netrc for authentication instead.
119     verbose:           Print additional info to stdout.
120     quiet:             Do not print messages to stdout.
121     no_warnings:       Do not print out anything for warnings.
122     forceurl:          Force printing final URL.
123     forcetitle:        Force printing title.
124     forceid:           Force printing ID.
125     forcethumbnail:    Force printing thumbnail URL.
126     forcedescription:  Force printing description.
127     forcefilename:     Force printing final filename.
128     forceduration:     Force printing duration.
129     forcejson:         Force printing info_dict as JSON.
130     dump_single_json:  Force printing the info_dict of the whole playlist
131                        (or video) as a single JSON line.
132     simulate:          Do not download the video files.
133     format:            Video format code. See options.py for more information.
134     format_limit:      Highest quality format to try.
135     outtmpl:           Template for output names.
136     restrictfilenames: Do not allow "&" and spaces in file names
137     ignoreerrors:      Do not stop on download errors.
138     nooverwrites:      Prevent overwriting files.
139     playliststart:     Playlist item to start at.
140     playlistend:       Playlist item to end at.
141     playlist_items:    Specific indices of playlist to download.
142     playlistreverse:   Download playlist items in reverse order.
143     matchtitle:        Download only matching titles.
144     rejecttitle:       Reject downloads for matching titles.
145     logger:            Log messages to a logging.Logger instance.
146     logtostderr:       Log messages to stderr instead of stdout.
147     writedescription:  Write the video description to a .description file
148     writeinfojson:     Write the video description to a .info.json file
149     writeannotations:  Write the video annotations to a .annotations.xml file
150     writethumbnail:    Write the thumbnail image to a file
151     write_all_thumbnails:  Write all thumbnail formats to files
152     writesubtitles:    Write the video subtitles to a file
153     writeautomaticsub: Write the automatic subtitles to a file
154     allsubtitles:      Downloads all the subtitles of the video
155                        (requires writesubtitles or writeautomaticsub)
156     listsubtitles:     Lists all available subtitles for the video
157     subtitlesformat:   Subtitle format [srt/sbv/vtt] (default=srt)
158     subtitleslangs:    List of languages of the subtitles to download
159     keepvideo:         Keep the video file after post-processing
160     daterange:         A DateRange object, download only if the upload_date is in the range.
161     skip_download:     Skip the actual download of the video file
162     cachedir:          Location of the cache files in the filesystem.
163                        False to disable filesystem cache.
164     noplaylist:        Download single video instead of a playlist if in doubt.
165     age_limit:         An integer representing the user's age in years.
166                        Unsuitable videos for the given age are skipped.
167     min_views:         An integer representing the minimum view count the video
168                        must have in order to not be skipped.
169                        Videos without view count information are always
170                        downloaded. None for no limit.
171     max_views:         An integer representing the maximum view count.
172                        Videos that are more popular than that are not
173                        downloaded.
174                        Videos without view count information are always
175                        downloaded. None for no limit.
176     download_archive:  File name of a file where all downloads are recorded.
177                        Videos already present in the file are not downloaded
178                        again.
179     cookiefile:        File name where cookies should be read from and dumped to.
180     nocheckcertificate:Do not verify SSL certificates
181     prefer_insecure:   Use HTTP instead of HTTPS to retrieve information.
182                        At the moment, this is only supported by YouTube.
183     proxy:             URL of the proxy server to use
184     socket_timeout:    Time to wait for unresponsive hosts, in seconds
185     bidi_workaround:   Work around buggy terminals without bidirectional text
186                        support, using fridibi
187     debug_printtraffic:Print out sent and received HTTP traffic
188     include_ads:       Download ads as well
189     default_search:    Prepend this string if an input url is not valid.
190                        'auto' for elaborate guessing
191     encoding:          Use this encoding instead of the system-specified.
192     extract_flat:      Do not resolve URLs, return the immediate result.
193                        Pass in 'in_playlist' to only show this behavior for
194                        playlist items.
195     postprocessors:    A list of dictionaries, each with an entry
196                        * key:  The name of the postprocessor. See
197                                youtube_dl/postprocessor/__init__.py for a list.
198                        as well as any further keyword arguments for the
199                        postprocessor.
200     progress_hooks:    A list of functions that get called on download
201                        progress, with a dictionary with the entries
202                        * status: One of "downloading" and "finished".
203                                  Check this first and ignore unknown values.
204
205                        If status is one of "downloading" or "finished", the
206                        following properties may also be present:
207                        * filename: The final filename (always present)
208                        * downloaded_bytes: Bytes on disk
209                        * total_bytes: Size of the whole file, None if unknown
210                        * tmpfilename: The filename we're currently writing to
211                        * eta: The estimated time in seconds, None if unknown
212                        * speed: The download speed in bytes/second, None if
213                                 unknown
214
215                        Progress hooks are guaranteed to be called at least once
216                        (with status "finished") if the download is successful.
217     merge_output_format: Extension to use when merging formats.
218     fixup:             Automatically correct known faults of the file.
219                        One of:
220                        - "never": do nothing
221                        - "warn": only emit a warning
222                        - "detect_or_warn": check whether we can do anything
223                                            about it, warn otherwise (default)
224     source_address:    (Experimental) Client-side IP address to bind to.
225     call_home:         Boolean, true iff we are allowed to contact the
226                        youtube-dl servers for debugging.
227     sleep_interval:    Number of seconds to sleep before each download.
228     external_downloader:  Executable of the external downloader to call.
229     listformats:       Print an overview of available video formats and exit.
230     list_thumbnails:   Print a table of all thumbnails and exit.
231
232
233     The following parameters are not used by YoutubeDL itself, they are used by
234     the FileDownloader:
235     nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
236     noresizebuffer, retries, continuedl, noprogress, consoletitle,
237     xattr_set_filesize.
238
239     The following options are used by the post processors:
240     prefer_ffmpeg:     If True, use ffmpeg instead of avconv if both are available,
241                        otherwise prefer avconv.
242     exec_cmd:          Arbitrary command to run after downloading
243     """
244
245     params = None
246     _ies = []
247     _pps = []
248     _download_retcode = None
249     _num_downloads = None
250     _screen_file = None
251
252     def __init__(self, params=None, auto_init=True):
253         """Create a FileDownloader object with the given options."""
254         if params is None:
255             params = {}
256         self._ies = []
257         self._ies_instances = {}
258         self._pps = []
259         self._progress_hooks = []
260         self._download_retcode = 0
261         self._num_downloads = 0
262         self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
263         self._err_file = sys.stderr
264         self.params = params
265         self.cache = Cache(self)
266
267         if params.get('bidi_workaround', False):
268             try:
269                 import pty
270                 master, slave = pty.openpty()
271                 width = get_term_width()
272                 if width is None:
273                     width_args = []
274                 else:
275                     width_args = ['-w', str(width)]
276                 sp_kwargs = dict(
277                     stdin=subprocess.PIPE,
278                     stdout=slave,
279                     stderr=self._err_file)
280                 try:
281                     self._output_process = subprocess.Popen(
282                         ['bidiv'] + width_args, **sp_kwargs
283                     )
284                 except OSError:
285                     self._output_process = subprocess.Popen(
286                         ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
287                 self._output_channel = os.fdopen(master, 'rb')
288             except OSError as ose:
289                 if ose.errno == 2:
290                     self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that  fribidi  is an executable file in one of the directories in your $PATH.')
291                 else:
292                     raise
293
294         if (sys.version_info >= (3,) and sys.platform != 'win32' and
295                 sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
296                 and not params.get('restrictfilenames', False)):
297             # On Python 3, the Unicode filesystem API will throw errors (#1474)
298             self.report_warning(
299                 'Assuming --restrict-filenames since file system encoding '
300                 'cannot encode all characters. '
301                 'Set the LC_ALL environment variable to fix this.')
302             self.params['restrictfilenames'] = True
303
304         if '%(stitle)s' in self.params.get('outtmpl', ''):
305             self.report_warning('%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
306
307         self._setup_opener()
308
309         if auto_init:
310             self.print_debug_header()
311             self.add_default_info_extractors()
312
313         for pp_def_raw in self.params.get('postprocessors', []):
314             pp_class = get_postprocessor(pp_def_raw['key'])
315             pp_def = dict(pp_def_raw)
316             del pp_def['key']
317             pp = pp_class(self, **compat_kwargs(pp_def))
318             self.add_post_processor(pp)
319
320         for ph in self.params.get('progress_hooks', []):
321             self.add_progress_hook(ph)
322
323     def warn_if_short_id(self, argv):
324         # short YouTube ID starting with dash?
325         idxs = [
326             i for i, a in enumerate(argv)
327             if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
328         if idxs:
329             correct_argv = (
330                 ['youtube-dl'] +
331                 [a for i, a in enumerate(argv) if i not in idxs] +
332                 ['--'] + [argv[i] for i in idxs]
333             )
334             self.report_warning(
335                 'Long argument string detected. '
336                 'Use -- to separate parameters and URLs, like this:\n%s\n' %
337                 args_to_str(correct_argv))
338
339     def add_info_extractor(self, ie):
340         """Add an InfoExtractor object to the end of the list."""
341         self._ies.append(ie)
342         self._ies_instances[ie.ie_key()] = ie
343         ie.set_downloader(self)
344
345     def get_info_extractor(self, ie_key):
346         """
347         Get an instance of an IE with name ie_key, it will try to get one from
348         the _ies list, if there's no instance it will create a new one and add
349         it to the extractor list.
350         """
351         ie = self._ies_instances.get(ie_key)
352         if ie is None:
353             ie = get_info_extractor(ie_key)()
354             self.add_info_extractor(ie)
355         return ie
356
357     def add_default_info_extractors(self):
358         """
359         Add the InfoExtractors returned by gen_extractors to the end of the list
360         """
361         for ie in gen_extractors():
362             self.add_info_extractor(ie)
363
364     def add_post_processor(self, pp):
365         """Add a PostProcessor object to the end of the chain."""
366         self._pps.append(pp)
367         pp.set_downloader(self)
368
369     def add_progress_hook(self, ph):
370         """Add the progress hook (currently only for the file downloader)"""
371         self._progress_hooks.append(ph)
372
373     def _bidi_workaround(self, message):
374         if not hasattr(self, '_output_channel'):
375             return message
376
377         assert hasattr(self, '_output_process')
378         assert isinstance(message, compat_str)
379         line_count = message.count('\n') + 1
380         self._output_process.stdin.write((message + '\n').encode('utf-8'))
381         self._output_process.stdin.flush()
382         res = ''.join(self._output_channel.readline().decode('utf-8')
383                       for _ in range(line_count))
384         return res[:-len('\n')]
385
386     def to_screen(self, message, skip_eol=False):
387         """Print message to stdout if not in quiet mode."""
388         return self.to_stdout(message, skip_eol, check_quiet=True)
389
390     def _write_string(self, s, out=None):
391         write_string(s, out=out, encoding=self.params.get('encoding'))
392
393     def to_stdout(self, message, skip_eol=False, check_quiet=False):
394         """Print message to stdout if not in quiet mode."""
395         if self.params.get('logger'):
396             self.params['logger'].debug(message)
397         elif not check_quiet or not self.params.get('quiet', False):
398             message = self._bidi_workaround(message)
399             terminator = ['\n', ''][skip_eol]
400             output = message + terminator
401
402             self._write_string(output, self._screen_file)
403
404     def to_stderr(self, message):
405         """Print message to stderr."""
406         assert isinstance(message, compat_str)
407         if self.params.get('logger'):
408             self.params['logger'].error(message)
409         else:
410             message = self._bidi_workaround(message)
411             output = message + '\n'
412             self._write_string(output, self._err_file)
413
414     def to_console_title(self, message):
415         if not self.params.get('consoletitle', False):
416             return
417         if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
418             # c_wchar_p() might not be necessary if `message` is
419             # already of type unicode()
420             ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
421         elif 'TERM' in os.environ:
422             self._write_string('\033]0;%s\007' % message, self._screen_file)
423
424     def save_console_title(self):
425         if not self.params.get('consoletitle', False):
426             return
427         if 'TERM' in os.environ:
428             # Save the title on stack
429             self._write_string('\033[22;0t', self._screen_file)
430
431     def restore_console_title(self):
432         if not self.params.get('consoletitle', False):
433             return
434         if 'TERM' in os.environ:
435             # Restore the title from stack
436             self._write_string('\033[23;0t', self._screen_file)
437
438     def __enter__(self):
439         self.save_console_title()
440         return self
441
442     def __exit__(self, *args):
443         self.restore_console_title()
444
445         if self.params.get('cookiefile') is not None:
446             self.cookiejar.save()
447
448     def trouble(self, message=None, tb=None):
449         """Determine action to take when a download problem appears.
450
451         Depending on if the downloader has been configured to ignore
452         download errors or not, this method may throw an exception or
453         not when errors are found, after printing the message.
454
455         tb, if given, is additional traceback information.
456         """
457         if message is not None:
458             self.to_stderr(message)
459         if self.params.get('verbose'):
460             if tb is None:
461                 if sys.exc_info()[0]:  # if .trouble has been called from an except block
462                     tb = ''
463                     if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
464                         tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
465                     tb += compat_str(traceback.format_exc())
466                 else:
467                     tb_data = traceback.format_list(traceback.extract_stack())
468                     tb = ''.join(tb_data)
469             self.to_stderr(tb)
470         if not self.params.get('ignoreerrors', False):
471             if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
472                 exc_info = sys.exc_info()[1].exc_info
473             else:
474                 exc_info = sys.exc_info()
475             raise DownloadError(message, exc_info)
476         self._download_retcode = 1
477
478     def report_warning(self, message):
479         '''
480         Print the message to stderr, it will be prefixed with 'WARNING:'
481         If stderr is a tty file the 'WARNING:' will be colored
482         '''
483         if self.params.get('logger') is not None:
484             self.params['logger'].warning(message)
485         else:
486             if self.params.get('no_warnings'):
487                 return
488             if self._err_file.isatty() and os.name != 'nt':
489                 _msg_header = '\033[0;33mWARNING:\033[0m'
490             else:
491                 _msg_header = 'WARNING:'
492             warning_message = '%s %s' % (_msg_header, message)
493             self.to_stderr(warning_message)
494
495     def report_error(self, message, tb=None):
496         '''
497         Do the same as trouble, but prefixes the message with 'ERROR:', colored
498         in red if stderr is a tty file.
499         '''
500         if self._err_file.isatty() and os.name != 'nt':
501             _msg_header = '\033[0;31mERROR:\033[0m'
502         else:
503             _msg_header = 'ERROR:'
504         error_message = '%s %s' % (_msg_header, message)
505         self.trouble(error_message, tb)
506
507     def report_file_already_downloaded(self, file_name):
508         """Report file has already been fully downloaded."""
509         try:
510             self.to_screen('[download] %s has already been downloaded' % file_name)
511         except UnicodeEncodeError:
512             self.to_screen('[download] The file has already been downloaded')
513
514     def prepare_filename(self, info_dict):
515         """Generate the output filename."""
516         try:
517             template_dict = dict(info_dict)
518
519             template_dict['epoch'] = int(time.time())
520             autonumber_size = self.params.get('autonumber_size')
521             if autonumber_size is None:
522                 autonumber_size = 5
523             autonumber_templ = '%0' + str(autonumber_size) + 'd'
524             template_dict['autonumber'] = autonumber_templ % self._num_downloads
525             if template_dict.get('playlist_index') is not None:
526                 template_dict['playlist_index'] = '%0*d' % (len(str(template_dict['n_entries'])), template_dict['playlist_index'])
527             if template_dict.get('resolution') is None:
528                 if template_dict.get('width') and template_dict.get('height'):
529                     template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
530                 elif template_dict.get('height'):
531                     template_dict['resolution'] = '%sp' % template_dict['height']
532                 elif template_dict.get('width'):
533                     template_dict['resolution'] = '?x%d' % template_dict['width']
534
535             sanitize = lambda k, v: sanitize_filename(
536                 compat_str(v),
537                 restricted=self.params.get('restrictfilenames'),
538                 is_id=(k == 'id'))
539             template_dict = dict((k, sanitize(k, v))
540                                  for k, v in template_dict.items()
541                                  if v is not None)
542             template_dict = collections.defaultdict(lambda: 'NA', template_dict)
543
544             outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
545             tmpl = compat_expanduser(outtmpl)
546             filename = tmpl % template_dict
547             # Temporary fix for #4787
548             # 'Treat' all problem characters by passing filename through preferredencoding
549             # to workaround encoding issues with subprocess on python2 @ Windows
550             if sys.version_info < (3, 0) and sys.platform == 'win32':
551                 filename = encodeFilename(filename, True).decode(preferredencoding())
552             return filename
553         except ValueError as err:
554             self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
555             return None
556
557     def _match_entry(self, info_dict):
558         """ Returns None iff the file should be downloaded """
559
560         video_title = info_dict.get('title', info_dict.get('id', 'video'))
561         if 'title' in info_dict:
562             # This can happen when we're just evaluating the playlist
563             title = info_dict['title']
564             matchtitle = self.params.get('matchtitle', False)
565             if matchtitle:
566                 if not re.search(matchtitle, title, re.IGNORECASE):
567                     return '"' + title + '" title did not match pattern "' + matchtitle + '"'
568             rejecttitle = self.params.get('rejecttitle', False)
569             if rejecttitle:
570                 if re.search(rejecttitle, title, re.IGNORECASE):
571                     return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
572         date = info_dict.get('upload_date', None)
573         if date is not None:
574             dateRange = self.params.get('daterange', DateRange())
575             if date not in dateRange:
576                 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
577         view_count = info_dict.get('view_count', None)
578         if view_count is not None:
579             min_views = self.params.get('min_views')
580             if min_views is not None and view_count < min_views:
581                 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
582             max_views = self.params.get('max_views')
583             if max_views is not None and view_count > max_views:
584                 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
585         if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
586             return 'Skipping "%s" because it is age restricted' % title
587         if self.in_download_archive(info_dict):
588             return '%s has already been recorded in archive' % video_title
589         return None
590
591     @staticmethod
592     def add_extra_info(info_dict, extra_info):
593         '''Set the keys from extra_info in info dict if they are missing'''
594         for key, value in extra_info.items():
595             info_dict.setdefault(key, value)
596
597     def extract_info(self, url, download=True, ie_key=None, extra_info={},
598                      process=True):
599         '''
600         Returns a list with a dictionary for each video we find.
601         If 'download', also downloads the videos.
602         extra_info is a dict containing the extra values to add to each result
603          '''
604
605         if ie_key:
606             ies = [self.get_info_extractor(ie_key)]
607         else:
608             ies = self._ies
609
610         for ie in ies:
611             if not ie.suitable(url):
612                 continue
613
614             if not ie.working():
615                 self.report_warning('The program functionality for this site has been marked as broken, '
616                                     'and will probably not work.')
617
618             try:
619                 ie_result = ie.extract(url)
620                 if ie_result is None:  # Finished already (backwards compatibility; listformats and friends should be moved here)
621                     break
622                 if isinstance(ie_result, list):
623                     # Backwards compatibility: old IE result format
624                     ie_result = {
625                         '_type': 'compat_list',
626                         'entries': ie_result,
627                     }
628                 self.add_default_extra_info(ie_result, ie, url)
629                 if process:
630                     return self.process_ie_result(ie_result, download, extra_info)
631                 else:
632                     return ie_result
633             except ExtractorError as de:  # An error we somewhat expected
634                 self.report_error(compat_str(de), de.format_traceback())
635                 break
636             except MaxDownloadsReached:
637                 raise
638             except Exception as e:
639                 if self.params.get('ignoreerrors', False):
640                     self.report_error(compat_str(e), tb=compat_str(traceback.format_exc()))
641                     break
642                 else:
643                     raise
644         else:
645             self.report_error('no suitable InfoExtractor for URL %s' % url)
646
647     def add_default_extra_info(self, ie_result, ie, url):
648         self.add_extra_info(ie_result, {
649             'extractor': ie.IE_NAME,
650             'webpage_url': url,
651             'webpage_url_basename': url_basename(url),
652             'extractor_key': ie.ie_key(),
653         })
654
655     def process_ie_result(self, ie_result, download=True, extra_info={}):
656         """
657         Take the result of the ie(may be modified) and resolve all unresolved
658         references (URLs, playlist items).
659
660         It will also download the videos if 'download'.
661         Returns the resolved ie_result.
662         """
663
664         result_type = ie_result.get('_type', 'video')
665
666         if result_type in ('url', 'url_transparent'):
667             extract_flat = self.params.get('extract_flat', False)
668             if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or
669                     extract_flat is True):
670                 if self.params.get('forcejson', False):
671                     self.to_stdout(json.dumps(ie_result))
672                 return ie_result
673
674         if result_type == 'video':
675             self.add_extra_info(ie_result, extra_info)
676             return self.process_video_result(ie_result, download=download)
677         elif result_type == 'url':
678             # We have to add extra_info to the results because it may be
679             # contained in a playlist
680             return self.extract_info(ie_result['url'],
681                                      download,
682                                      ie_key=ie_result.get('ie_key'),
683                                      extra_info=extra_info)
684         elif result_type == 'url_transparent':
685             # Use the information from the embedding page
686             info = self.extract_info(
687                 ie_result['url'], ie_key=ie_result.get('ie_key'),
688                 extra_info=extra_info, download=False, process=False)
689
690             force_properties = dict(
691                 (k, v) for k, v in ie_result.items() if v is not None)
692             for f in ('_type', 'url'):
693                 if f in force_properties:
694                     del force_properties[f]
695             new_result = info.copy()
696             new_result.update(force_properties)
697
698             assert new_result.get('_type') != 'url_transparent'
699
700             return self.process_ie_result(
701                 new_result, download=download, extra_info=extra_info)
702         elif result_type == 'playlist' or result_type == 'multi_video':
703             # We process each entry in the playlist
704             playlist = ie_result.get('title', None) or ie_result.get('id', None)
705             self.to_screen('[download] Downloading playlist: %s' % playlist)
706
707             playlist_results = []
708
709             playliststart = self.params.get('playliststart', 1) - 1
710             playlistend = self.params.get('playlistend', None)
711             # For backwards compatibility, interpret -1 as whole list
712             if playlistend == -1:
713                 playlistend = None
714
715             playlistitems_str = self.params.get('playlist_items', None)
716             playlistitems = None
717             if playlistitems_str is not None:
718                 def iter_playlistitems(format):
719                     for string_segment in format.split(','):
720                         if '-' in string_segment:
721                             start, end = string_segment.split('-')
722                             for item in range(int(start), int(end) + 1):
723                                 yield int(item)
724                         else:
725                             yield int(string_segment)
726                 playlistitems = iter_playlistitems(playlistitems_str)
727
728             ie_entries = ie_result['entries']
729             if isinstance(ie_entries, list):
730                 n_all_entries = len(ie_entries)
731                 if playlistitems:
732                     entries = [ie_entries[i - 1] for i in playlistitems]
733                 else:
734                     entries = ie_entries[playliststart:playlistend]
735                 n_entries = len(entries)
736                 self.to_screen(
737                     "[%s] playlist %s: Collected %d video ids (downloading %d of them)" %
738                     (ie_result['extractor'], playlist, n_all_entries, n_entries))
739             elif isinstance(ie_entries, PagedList):
740                 if playlistitems:
741                     entries = []
742                     for item in playlistitems:
743                         entries.extend(ie_entries.getslice(
744                             item - 1, item
745                         ))
746                 else:
747                     entries = ie_entries.getslice(
748                         playliststart, playlistend)
749                 n_entries = len(entries)
750                 self.to_screen(
751                     "[%s] playlist %s: Downloading %d videos" %
752                     (ie_result['extractor'], playlist, n_entries))
753             else:  # iterable
754                 if playlistitems:
755                     entry_list = list(ie_entries)
756                     entries = [entry_list[i - 1] for i in playlistitems]
757                 else:
758                     entries = list(itertools.islice(
759                         ie_entries, playliststart, playlistend))
760                 n_entries = len(entries)
761                 self.to_screen(
762                     "[%s] playlist %s: Downloading %d videos" %
763                     (ie_result['extractor'], playlist, n_entries))
764
765             if self.params.get('playlistreverse', False):
766                 entries = entries[::-1]
767
768             for i, entry in enumerate(entries, 1):
769                 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
770                 extra = {
771                     'n_entries': n_entries,
772                     'playlist': playlist,
773                     'playlist_id': ie_result.get('id'),
774                     'playlist_title': ie_result.get('title'),
775                     'playlist_index': i + playliststart,
776                     'extractor': ie_result['extractor'],
777                     'webpage_url': ie_result['webpage_url'],
778                     'webpage_url_basename': url_basename(ie_result['webpage_url']),
779                     'extractor_key': ie_result['extractor_key'],
780                 }
781
782                 reason = self._match_entry(entry)
783                 if reason is not None:
784                     self.to_screen('[download] ' + reason)
785                     continue
786
787                 entry_result = self.process_ie_result(entry,
788                                                       download=download,
789                                                       extra_info=extra)
790                 playlist_results.append(entry_result)
791             ie_result['entries'] = playlist_results
792             return ie_result
793         elif result_type == 'compat_list':
794             self.report_warning(
795                 'Extractor %s returned a compat_list result. '
796                 'It needs to be updated.' % ie_result.get('extractor'))
797
798             def _fixup(r):
799                 self.add_extra_info(
800                     r,
801                     {
802                         'extractor': ie_result['extractor'],
803                         'webpage_url': ie_result['webpage_url'],
804                         'webpage_url_basename': url_basename(ie_result['webpage_url']),
805                         'extractor_key': ie_result['extractor_key'],
806                     }
807                 )
808                 return r
809             ie_result['entries'] = [
810                 self.process_ie_result(_fixup(r), download, extra_info)
811                 for r in ie_result['entries']
812             ]
813             return ie_result
814         else:
815             raise Exception('Invalid result type: %s' % result_type)
816
817     def _apply_format_filter(self, format_spec, available_formats):
818         " Returns a tuple of the remaining format_spec and filtered formats "
819
820         OPERATORS = {
821             '<': operator.lt,
822             '<=': operator.le,
823             '>': operator.gt,
824             '>=': operator.ge,
825             '=': operator.eq,
826             '!=': operator.ne,
827         }
828         operator_rex = re.compile(r'''(?x)\s*\[
829             (?P<key>width|height|tbr|abr|vbr|asr|filesize|fps)
830             \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
831             (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
832             \]$
833             ''' % '|'.join(map(re.escape, OPERATORS.keys())))
834         m = operator_rex.search(format_spec)
835         if m:
836             try:
837                 comparison_value = int(m.group('value'))
838             except ValueError:
839                 comparison_value = parse_filesize(m.group('value'))
840                 if comparison_value is None:
841                     comparison_value = parse_filesize(m.group('value') + 'B')
842                 if comparison_value is None:
843                     raise ValueError(
844                         'Invalid value %r in format specification %r' % (
845                             m.group('value'), format_spec))
846             op = OPERATORS[m.group('op')]
847
848         if not m:
849             STR_OPERATORS = {
850                 '=': operator.eq,
851                 '!=': operator.ne,
852             }
853             str_operator_rex = re.compile(r'''(?x)\s*\[
854                 \s*(?P<key>ext|acodec|vcodec|container|protocol)
855                 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?
856                 \s*(?P<value>[a-zA-Z0-9_-]+)
857                 \s*\]$
858                 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
859             m = str_operator_rex.search(format_spec)
860             if m:
861                 comparison_value = m.group('value')
862                 op = STR_OPERATORS[m.group('op')]
863
864         if not m:
865             raise ValueError('Invalid format specification %r' % format_spec)
866
867         def _filter(f):
868             actual_value = f.get(m.group('key'))
869             if actual_value is None:
870                 return m.group('none_inclusive')
871             return op(actual_value, comparison_value)
872         new_formats = [f for f in available_formats if _filter(f)]
873
874         new_format_spec = format_spec[:-len(m.group(0))]
875         if not new_format_spec:
876             new_format_spec = 'best'
877
878         return (new_format_spec, new_formats)
879
880     def select_format(self, format_spec, available_formats):
881         while format_spec.endswith(']'):
882             format_spec, available_formats = self._apply_format_filter(
883                 format_spec, available_formats)
884         if not available_formats:
885             return None
886
887         if format_spec == 'best' or format_spec is None:
888             return available_formats[-1]
889         elif format_spec == 'worst':
890             return available_formats[0]
891         elif format_spec == 'bestaudio':
892             audio_formats = [
893                 f for f in available_formats
894                 if f.get('vcodec') == 'none']
895             if audio_formats:
896                 return audio_formats[-1]
897         elif format_spec == 'worstaudio':
898             audio_formats = [
899                 f for f in available_formats
900                 if f.get('vcodec') == 'none']
901             if audio_formats:
902                 return audio_formats[0]
903         elif format_spec == 'bestvideo':
904             video_formats = [
905                 f for f in available_formats
906                 if f.get('acodec') == 'none']
907             if video_formats:
908                 return video_formats[-1]
909         elif format_spec == 'worstvideo':
910             video_formats = [
911                 f for f in available_formats
912                 if f.get('acodec') == 'none']
913             if video_formats:
914                 return video_formats[0]
915         else:
916             extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
917             if format_spec in extensions:
918                 filter_f = lambda f: f['ext'] == format_spec
919             else:
920                 filter_f = lambda f: f['format_id'] == format_spec
921             matches = list(filter(filter_f, available_formats))
922             if matches:
923                 return matches[-1]
924         return None
925
926     def _calc_headers(self, info_dict):
927         res = std_headers.copy()
928
929         add_headers = info_dict.get('http_headers')
930         if add_headers:
931             res.update(add_headers)
932
933         cookies = self._calc_cookies(info_dict)
934         if cookies:
935             res['Cookie'] = cookies
936
937         return res
938
939     def _calc_cookies(self, info_dict):
940         class _PseudoRequest(object):
941             def __init__(self, url):
942                 self.url = url
943                 self.headers = {}
944                 self.unverifiable = False
945
946             def add_unredirected_header(self, k, v):
947                 self.headers[k] = v
948
949             def get_full_url(self):
950                 return self.url
951
952             def is_unverifiable(self):
953                 return self.unverifiable
954
955             def has_header(self, h):
956                 return h in self.headers
957
958             def get_header(self, h, default=None):
959                 return self.headers.get(h, default)
960
961         pr = _PseudoRequest(info_dict['url'])
962         self.cookiejar.add_cookie_header(pr)
963         return pr.headers.get('Cookie')
964
965     def process_video_result(self, info_dict, download=True):
966         assert info_dict.get('_type', 'video') == 'video'
967
968         if 'id' not in info_dict:
969             raise ExtractorError('Missing "id" field in extractor result')
970         if 'title' not in info_dict:
971             raise ExtractorError('Missing "title" field in extractor result')
972
973         if 'playlist' not in info_dict:
974             # It isn't part of a playlist
975             info_dict['playlist'] = None
976             info_dict['playlist_index'] = None
977
978         thumbnails = info_dict.get('thumbnails')
979         if thumbnails is None:
980             thumbnail = info_dict.get('thumbnail')
981             if thumbnail:
982                 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
983         if thumbnails:
984             thumbnails.sort(key=lambda t: (
985                 t.get('preference'), t.get('width'), t.get('height'),
986                 t.get('id'), t.get('url')))
987             for i, t in enumerate(thumbnails):
988                 if 'width' in t and 'height' in t:
989                     t['resolution'] = '%dx%d' % (t['width'], t['height'])
990                 if t.get('id') is None:
991                     t['id'] = '%d' % i
992
993         if thumbnails and 'thumbnail' not in info_dict:
994             info_dict['thumbnail'] = thumbnails[-1]['url']
995
996         if 'display_id' not in info_dict and 'id' in info_dict:
997             info_dict['display_id'] = info_dict['id']
998
999         if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
1000             # Working around negative timestamps in Windows
1001             # (see http://bugs.python.org/issue1646728)
1002             if info_dict['timestamp'] < 0 and os.name == 'nt':
1003                 info_dict['timestamp'] = 0
1004             upload_date = datetime.datetime.utcfromtimestamp(
1005                 info_dict['timestamp'])
1006             info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
1007
1008         # This extractors handle format selection themselves
1009         if info_dict['extractor'] in ['Youku']:
1010             if download:
1011                 self.process_info(info_dict)
1012             return info_dict
1013
1014         # We now pick which formats have to be downloaded
1015         if info_dict.get('formats') is None:
1016             # There's only one format available
1017             formats = [info_dict]
1018         else:
1019             formats = info_dict['formats']
1020
1021         if not formats:
1022             raise ExtractorError('No video formats found!')
1023
1024         # We check that all the formats have the format and format_id fields
1025         for i, format in enumerate(formats):
1026             if 'url' not in format:
1027                 raise ExtractorError('Missing "url" key in result (index %d)' % i)
1028
1029             if format.get('format_id') is None:
1030                 format['format_id'] = compat_str(i)
1031             if format.get('format') is None:
1032                 format['format'] = '{id} - {res}{note}'.format(
1033                     id=format['format_id'],
1034                     res=self.format_resolution(format),
1035                     note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
1036                 )
1037             # Automatically determine file extension if missing
1038             if 'ext' not in format:
1039                 format['ext'] = determine_ext(format['url']).lower()
1040             # Add HTTP headers, so that external programs can use them from the
1041             # json output
1042             full_format_info = info_dict.copy()
1043             full_format_info.update(format)
1044             format['http_headers'] = self._calc_headers(full_format_info)
1045
1046         format_limit = self.params.get('format_limit', None)
1047         if format_limit:
1048             formats = list(takewhile_inclusive(
1049                 lambda f: f['format_id'] != format_limit, formats
1050             ))
1051
1052         # TODO Central sorting goes here
1053
1054         if formats[0] is not info_dict:
1055             # only set the 'formats' fields if the original info_dict list them
1056             # otherwise we end up with a circular reference, the first (and unique)
1057             # element in the 'formats' field in info_dict is info_dict itself,
1058             # wich can't be exported to json
1059             info_dict['formats'] = formats
1060         if self.params.get('listformats'):
1061             self.list_formats(info_dict)
1062             return
1063         if self.params.get('list_thumbnails'):
1064             self.list_thumbnails(info_dict)
1065             return
1066
1067         req_format = self.params.get('format')
1068         if req_format is None:
1069             req_format = 'best'
1070         formats_to_download = []
1071         # The -1 is for supporting YoutubeIE
1072         if req_format in ('-1', 'all'):
1073             formats_to_download = formats
1074         else:
1075             for rfstr in req_format.split(','):
1076                 # We can accept formats requested in the format: 34/5/best, we pick
1077                 # the first that is available, starting from left
1078                 req_formats = rfstr.split('/')
1079                 for rf in req_formats:
1080                     if re.match(r'.+?\+.+?', rf) is not None:
1081                         # Two formats have been requested like '137+139'
1082                         format_1, format_2 = rf.split('+')
1083                         formats_info = (self.select_format(format_1, formats),
1084                                         self.select_format(format_2, formats))
1085                         if all(formats_info):
1086                             # The first format must contain the video and the
1087                             # second the audio
1088                             if formats_info[0].get('vcodec') == 'none':
1089                                 self.report_error('The first format must '
1090                                                   'contain the video, try using '
1091                                                   '"-f %s+%s"' % (format_2, format_1))
1092                                 return
1093                             output_ext = (
1094                                 formats_info[0]['ext']
1095                                 if self.params.get('merge_output_format') is None
1096                                 else self.params['merge_output_format'])
1097                             selected_format = {
1098                                 'requested_formats': formats_info,
1099                                 'format': '%s+%s' % (formats_info[0].get('format'),
1100                                                      formats_info[1].get('format')),
1101                                 'format_id': '%s+%s' % (formats_info[0].get('format_id'),
1102                                                         formats_info[1].get('format_id')),
1103                                 'width': formats_info[0].get('width'),
1104                                 'height': formats_info[0].get('height'),
1105                                 'resolution': formats_info[0].get('resolution'),
1106                                 'fps': formats_info[0].get('fps'),
1107                                 'vcodec': formats_info[0].get('vcodec'),
1108                                 'vbr': formats_info[0].get('vbr'),
1109                                 'stretched_ratio': formats_info[0].get('stretched_ratio'),
1110                                 'acodec': formats_info[1].get('acodec'),
1111                                 'abr': formats_info[1].get('abr'),
1112                                 'ext': output_ext,
1113                             }
1114                         else:
1115                             selected_format = None
1116                     else:
1117                         selected_format = self.select_format(rf, formats)
1118                     if selected_format is not None:
1119                         formats_to_download.append(selected_format)
1120                         break
1121         if not formats_to_download:
1122             raise ExtractorError('requested format not available',
1123                                  expected=True)
1124
1125         if download:
1126             if len(formats_to_download) > 1:
1127                 self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
1128             for format in formats_to_download:
1129                 new_info = dict(info_dict)
1130                 new_info.update(format)
1131                 self.process_info(new_info)
1132         # We update the info dict with the best quality format (backwards compatibility)
1133         info_dict.update(formats_to_download[-1])
1134         return info_dict
1135
1136     def process_info(self, info_dict):
1137         """Process a single resolved IE result."""
1138
1139         assert info_dict.get('_type', 'video') == 'video'
1140
1141         max_downloads = self.params.get('max_downloads')
1142         if max_downloads is not None:
1143             if self._num_downloads >= int(max_downloads):
1144                 raise MaxDownloadsReached()
1145
1146         info_dict['fulltitle'] = info_dict['title']
1147         if len(info_dict['title']) > 200:
1148             info_dict['title'] = info_dict['title'][:197] + '...'
1149
1150         # Keep for backwards compatibility
1151         info_dict['stitle'] = info_dict['title']
1152
1153         if 'format' not in info_dict:
1154             info_dict['format'] = info_dict['ext']
1155
1156         reason = self._match_entry(info_dict)
1157         if reason is not None:
1158             self.to_screen('[download] ' + reason)
1159             return
1160
1161         self._num_downloads += 1
1162
1163         info_dict['_filename'] = filename = self.prepare_filename(info_dict)
1164
1165         # Forced printings
1166         if self.params.get('forcetitle', False):
1167             self.to_stdout(info_dict['fulltitle'])
1168         if self.params.get('forceid', False):
1169             self.to_stdout(info_dict['id'])
1170         if self.params.get('forceurl', False):
1171             if info_dict.get('requested_formats') is not None:
1172                 for f in info_dict['requested_formats']:
1173                     self.to_stdout(f['url'] + f.get('play_path', ''))
1174             else:
1175                 # For RTMP URLs, also include the playpath
1176                 self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
1177         if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
1178             self.to_stdout(info_dict['thumbnail'])
1179         if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
1180             self.to_stdout(info_dict['description'])
1181         if self.params.get('forcefilename', False) and filename is not None:
1182             self.to_stdout(filename)
1183         if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
1184             self.to_stdout(formatSeconds(info_dict['duration']))
1185         if self.params.get('forceformat', False):
1186             self.to_stdout(info_dict['format'])
1187         if self.params.get('forcejson', False):
1188             self.to_stdout(json.dumps(info_dict))
1189
1190         # Do nothing else if in simulate mode
1191         if self.params.get('simulate', False):
1192             return
1193
1194         if filename is None:
1195             return
1196
1197         try:
1198             dn = os.path.dirname(encodeFilename(filename))
1199             if dn and not os.path.exists(dn):
1200                 os.makedirs(dn)
1201         except (OSError, IOError) as err:
1202             self.report_error('unable to create directory ' + compat_str(err))
1203             return
1204
1205         if self.params.get('writedescription', False):
1206             descfn = filename + '.description'
1207             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
1208                 self.to_screen('[info] Video description is already present')
1209             elif info_dict.get('description') is None:
1210                 self.report_warning('There\'s no description to write.')
1211             else:
1212                 try:
1213                     self.to_screen('[info] Writing video description to: ' + descfn)
1214                     with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
1215                         descfile.write(info_dict['description'])
1216                 except (OSError, IOError):
1217                     self.report_error('Cannot write description file ' + descfn)
1218                     return
1219
1220         if self.params.get('writeannotations', False):
1221             annofn = filename + '.annotations.xml'
1222             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
1223                 self.to_screen('[info] Video annotations are already present')
1224             else:
1225                 try:
1226                     self.to_screen('[info] Writing video annotations to: ' + annofn)
1227                     with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
1228                         annofile.write(info_dict['annotations'])
1229                 except (KeyError, TypeError):
1230                     self.report_warning('There are no annotations to write.')
1231                 except (OSError, IOError):
1232                     self.report_error('Cannot write annotations file: ' + annofn)
1233                     return
1234
1235         subtitles_are_requested = any([self.params.get('writesubtitles', False),
1236                                        self.params.get('writeautomaticsub')])
1237
1238         if subtitles_are_requested and 'subtitles' in info_dict and info_dict['subtitles']:
1239             # subtitles download errors are already managed as troubles in relevant IE
1240             # that way it will silently go on when used with unsupporting IE
1241             subtitles = info_dict['subtitles']
1242             sub_format = self.params.get('subtitlesformat', 'srt')
1243             for sub_lang in subtitles.keys():
1244                 sub = subtitles[sub_lang]
1245                 if sub is None:
1246                     continue
1247                 try:
1248                     sub_filename = subtitles_filename(filename, sub_lang, sub_format)
1249                     if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
1250                         self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format))
1251                     else:
1252                         self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
1253                         with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
1254                             subfile.write(sub)
1255                 except (OSError, IOError):
1256                     self.report_error('Cannot write subtitles file ' + sub_filename)
1257                     return
1258
1259         if self.params.get('writeinfojson', False):
1260             infofn = os.path.splitext(filename)[0] + '.info.json'
1261             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
1262                 self.to_screen('[info] Video description metadata is already present')
1263             else:
1264                 self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
1265                 try:
1266                     write_json_file(info_dict, infofn)
1267                 except (OSError, IOError):
1268                     self.report_error('Cannot write metadata to JSON file ' + infofn)
1269                     return
1270
1271         self._write_thumbnails(info_dict, filename)
1272
1273         if not self.params.get('skip_download', False):
1274             try:
1275                 def dl(name, info):
1276                     fd = get_suitable_downloader(info, self.params)(self, self.params)
1277                     for ph in self._progress_hooks:
1278                         fd.add_progress_hook(ph)
1279                     if self.params.get('verbose'):
1280                         self.to_stdout('[debug] Invoking downloader on %r' % info.get('url'))
1281                     return fd.download(name, info)
1282
1283                 if info_dict.get('requested_formats') is not None:
1284                     downloaded = []
1285                     success = True
1286                     merger = FFmpegMergerPP(self, not self.params.get('keepvideo'))
1287                     if not merger._executable:
1288                         postprocessors = []
1289                         self.report_warning('You have requested multiple '
1290                                             'formats but ffmpeg or avconv are not installed.'
1291                                             ' The formats won\'t be merged')
1292                     else:
1293                         postprocessors = [merger]
1294                     for f in info_dict['requested_formats']:
1295                         new_info = dict(info_dict)
1296                         new_info.update(f)
1297                         fname = self.prepare_filename(new_info)
1298                         fname = prepend_extension(fname, 'f%s' % f['format_id'])
1299                         downloaded.append(fname)
1300                         partial_success = dl(fname, new_info)
1301                         success = success and partial_success
1302                     info_dict['__postprocessors'] = postprocessors
1303                     info_dict['__files_to_merge'] = downloaded
1304                 else:
1305                     # Just a single file
1306                     success = dl(filename, info_dict)
1307             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
1308                 self.report_error('unable to download video data: %s' % str(err))
1309                 return
1310             except (OSError, IOError) as err:
1311                 raise UnavailableVideoError(err)
1312             except (ContentTooShortError, ) as err:
1313                 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
1314                 return
1315
1316             if success:
1317                 # Fixup content
1318                 fixup_policy = self.params.get('fixup')
1319                 if fixup_policy is None:
1320                     fixup_policy = 'detect_or_warn'
1321
1322                 stretched_ratio = info_dict.get('stretched_ratio')
1323                 if stretched_ratio is not None and stretched_ratio != 1:
1324                     if fixup_policy == 'warn':
1325                         self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
1326                             info_dict['id'], stretched_ratio))
1327                     elif fixup_policy == 'detect_or_warn':
1328                         stretched_pp = FFmpegFixupStretchedPP(self)
1329                         if stretched_pp.available:
1330                             info_dict.setdefault('__postprocessors', [])
1331                             info_dict['__postprocessors'].append(stretched_pp)
1332                         else:
1333                             self.report_warning(
1334                                 '%s: Non-uniform pixel ratio (%s). Install ffmpeg or avconv to fix this automatically.' % (
1335                                     info_dict['id'], stretched_ratio))
1336                     else:
1337                         assert fixup_policy in ('ignore', 'never')
1338
1339                 if info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash':
1340                     if fixup_policy == 'warn':
1341                         self.report_warning('%s: writing DASH m4a. Only some players support this container.' % (
1342                             info_dict['id']))
1343                     elif fixup_policy == 'detect_or_warn':
1344                         fixup_pp = FFmpegFixupM4aPP(self)
1345                         if fixup_pp.available:
1346                             info_dict.setdefault('__postprocessors', [])
1347                             info_dict['__postprocessors'].append(fixup_pp)
1348                         else:
1349                             self.report_warning(
1350                                 '%s: writing DASH m4a. Only some players support this container. Install ffmpeg or avconv to fix this automatically.' % (
1351                                     info_dict['id']))
1352                     else:
1353                         assert fixup_policy in ('ignore', 'never')
1354
1355                 try:
1356                     self.post_process(filename, info_dict)
1357                 except (PostProcessingError) as err:
1358                     self.report_error('postprocessing: %s' % str(err))
1359                     return
1360                 self.record_download_archive(info_dict)
1361
1362     def download(self, url_list):
1363         """Download a given list of URLs."""
1364         outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
1365         if (len(url_list) > 1 and
1366                 '%' not in outtmpl
1367                 and self.params.get('max_downloads') != 1):
1368             raise SameFileError(outtmpl)
1369
1370         for url in url_list:
1371             try:
1372                 # It also downloads the videos
1373                 res = self.extract_info(url)
1374             except UnavailableVideoError:
1375                 self.report_error('unable to download video')
1376             except MaxDownloadsReached:
1377                 self.to_screen('[info] Maximum number of downloaded files reached.')
1378                 raise
1379             else:
1380                 if self.params.get('dump_single_json', False):
1381                     self.to_stdout(json.dumps(res))
1382
1383         return self._download_retcode
1384
1385     def download_with_info_file(self, info_filename):
1386         with io.open(info_filename, 'r', encoding='utf-8') as f:
1387             info = json.load(f)
1388         try:
1389             self.process_ie_result(info, download=True)
1390         except DownloadError:
1391             webpage_url = info.get('webpage_url')
1392             if webpage_url is not None:
1393                 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
1394                 return self.download([webpage_url])
1395             else:
1396                 raise
1397         return self._download_retcode
1398
1399     def post_process(self, filename, ie_info):
1400         """Run all the postprocessors on the given file."""
1401         info = dict(ie_info)
1402         info['filepath'] = filename
1403         pps_chain = []
1404         if ie_info.get('__postprocessors') is not None:
1405             pps_chain.extend(ie_info['__postprocessors'])
1406         pps_chain.extend(self._pps)
1407         for pp in pps_chain:
1408             keep_video = None
1409             old_filename = info['filepath']
1410             try:
1411                 keep_video_wish, info = pp.run(info)
1412                 if keep_video_wish is not None:
1413                     if keep_video_wish:
1414                         keep_video = keep_video_wish
1415                     elif keep_video is None:
1416                         # No clear decision yet, let IE decide
1417                         keep_video = keep_video_wish
1418             except PostProcessingError as e:
1419                 self.report_error(e.msg)
1420             if keep_video is False and not self.params.get('keepvideo', False):
1421                 try:
1422                     self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
1423                     os.remove(encodeFilename(old_filename))
1424                 except (IOError, OSError):
1425                     self.report_warning('Unable to remove downloaded video file')
1426
1427     def _make_archive_id(self, info_dict):
1428         # Future-proof against any change in case
1429         # and backwards compatibility with prior versions
1430         extractor = info_dict.get('extractor_key')
1431         if extractor is None:
1432             if 'id' in info_dict:
1433                 extractor = info_dict.get('ie_key')  # key in a playlist
1434         if extractor is None:
1435             return None  # Incomplete video information
1436         return extractor.lower() + ' ' + info_dict['id']
1437
1438     def in_download_archive(self, info_dict):
1439         fn = self.params.get('download_archive')
1440         if fn is None:
1441             return False
1442
1443         vid_id = self._make_archive_id(info_dict)
1444         if vid_id is None:
1445             return False  # Incomplete video information
1446
1447         try:
1448             with locked_file(fn, 'r', encoding='utf-8') as archive_file:
1449                 for line in archive_file:
1450                     if line.strip() == vid_id:
1451                         return True
1452         except IOError as ioe:
1453             if ioe.errno != errno.ENOENT:
1454                 raise
1455         return False
1456
1457     def record_download_archive(self, info_dict):
1458         fn = self.params.get('download_archive')
1459         if fn is None:
1460             return
1461         vid_id = self._make_archive_id(info_dict)
1462         assert vid_id
1463         with locked_file(fn, 'a', encoding='utf-8') as archive_file:
1464             archive_file.write(vid_id + '\n')
1465
1466     @staticmethod
1467     def format_resolution(format, default='unknown'):
1468         if format.get('vcodec') == 'none':
1469             return 'audio only'
1470         if format.get('resolution') is not None:
1471             return format['resolution']
1472         if format.get('height') is not None:
1473             if format.get('width') is not None:
1474                 res = '%sx%s' % (format['width'], format['height'])
1475             else:
1476                 res = '%sp' % format['height']
1477         elif format.get('width') is not None:
1478             res = '?x%d' % format['width']
1479         else:
1480             res = default
1481         return res
1482
1483     def _format_note(self, fdict):
1484         res = ''
1485         if fdict.get('ext') in ['f4f', 'f4m']:
1486             res += '(unsupported) '
1487         if fdict.get('format_note') is not None:
1488             res += fdict['format_note'] + ' '
1489         if fdict.get('tbr') is not None:
1490             res += '%4dk ' % fdict['tbr']
1491         if fdict.get('container') is not None:
1492             if res:
1493                 res += ', '
1494             res += '%s container' % fdict['container']
1495         if (fdict.get('vcodec') is not None and
1496                 fdict.get('vcodec') != 'none'):
1497             if res:
1498                 res += ', '
1499             res += fdict['vcodec']
1500             if fdict.get('vbr') is not None:
1501                 res += '@'
1502         elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
1503             res += 'video@'
1504         if fdict.get('vbr') is not None:
1505             res += '%4dk' % fdict['vbr']
1506         if fdict.get('fps') is not None:
1507             res += ', %sfps' % fdict['fps']
1508         if fdict.get('acodec') is not None:
1509             if res:
1510                 res += ', '
1511             if fdict['acodec'] == 'none':
1512                 res += 'video only'
1513             else:
1514                 res += '%-5s' % fdict['acodec']
1515         elif fdict.get('abr') is not None:
1516             if res:
1517                 res += ', '
1518             res += 'audio'
1519         if fdict.get('abr') is not None:
1520             res += '@%3dk' % fdict['abr']
1521         if fdict.get('asr') is not None:
1522             res += ' (%5dHz)' % fdict['asr']
1523         if fdict.get('filesize') is not None:
1524             if res:
1525                 res += ', '
1526             res += format_bytes(fdict['filesize'])
1527         elif fdict.get('filesize_approx') is not None:
1528             if res:
1529                 res += ', '
1530             res += '~' + format_bytes(fdict['filesize_approx'])
1531         return res
1532
1533     def list_formats(self, info_dict):
1534         def line(format, idlen=20):
1535             return (('%-' + compat_str(idlen + 1) + 's%-10s%-12s%s') % (
1536                 format['format_id'],
1537                 format['ext'],
1538                 self.format_resolution(format),
1539                 self._format_note(format),
1540             ))
1541
1542         formats = info_dict.get('formats', [info_dict])
1543         idlen = max(len('format code'),
1544                     max(len(f['format_id']) for f in formats))
1545         formats_s = [
1546             line(f, idlen) for f in formats
1547             if f.get('preference') is None or f['preference'] >= -1000]
1548         if len(formats) > 1:
1549             formats_s[-1] += (' ' if self._format_note(formats[-1]) else '') + '(best)'
1550
1551         header_line = line({
1552             'format_id': 'format code', 'ext': 'extension',
1553             'resolution': 'resolution', 'format_note': 'note'}, idlen=idlen)
1554         self.to_screen(
1555             '[info] Available formats for %s:\n%s\n%s' %
1556             (info_dict['id'], header_line, '\n'.join(formats_s)))
1557
1558     def list_thumbnails(self, info_dict):
1559         thumbnails = info_dict.get('thumbnails')
1560         if not thumbnails:
1561             tn_url = info_dict.get('thumbnail')
1562             if tn_url:
1563                 thumbnails = [{'id': '0', 'url': tn_url}]
1564             else:
1565                 self.to_screen(
1566                     '[info] No thumbnails present for %s' % info_dict['id'])
1567                 return
1568
1569         self.to_screen(
1570             '[info] Thumbnails for %s:' % info_dict['id'])
1571         self.to_screen(render_table(
1572             ['ID', 'width', 'height', 'URL'],
1573             [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
1574
1575     def urlopen(self, req):
1576         """ Start an HTTP download """
1577
1578         # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1579         # always respected by websites, some tend to give out URLs with non percent-encoded
1580         # non-ASCII characters (see telemb.py, ard.py [#3412])
1581         # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1582         # To work around aforementioned issue we will replace request's original URL with
1583         # percent-encoded one
1584         req_is_string = isinstance(req, compat_basestring)
1585         url = req if req_is_string else req.get_full_url()
1586         url_escaped = escape_url(url)
1587
1588         # Substitute URL if any change after escaping
1589         if url != url_escaped:
1590             if req_is_string:
1591                 req = url_escaped
1592             else:
1593                 req = compat_urllib_request.Request(
1594                     url_escaped, data=req.data, headers=req.headers,
1595                     origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
1596
1597         return self._opener.open(req, timeout=self._socket_timeout)
1598
1599     def print_debug_header(self):
1600         if not self.params.get('verbose'):
1601             return
1602
1603         if type('') is not compat_str:
1604             # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326)
1605             self.report_warning(
1606                 'Your Python is broken! Update to a newer and supported version')
1607
1608         stdout_encoding = getattr(
1609             sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
1610         encoding_str = (
1611             '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
1612                 locale.getpreferredencoding(),
1613                 sys.getfilesystemencoding(),
1614                 stdout_encoding,
1615                 self.get_encoding()))
1616         write_string(encoding_str, encoding=None)
1617
1618         self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
1619         try:
1620             sp = subprocess.Popen(
1621                 ['git', 'rev-parse', '--short', 'HEAD'],
1622                 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
1623                 cwd=os.path.dirname(os.path.abspath(__file__)))
1624             out, err = sp.communicate()
1625             out = out.decode().strip()
1626             if re.match('[0-9a-f]+', out):
1627                 self._write_string('[debug] Git HEAD: ' + out + '\n')
1628         except:
1629             try:
1630                 sys.exc_clear()
1631             except:
1632                 pass
1633         self._write_string('[debug] Python version %s - %s\n' % (
1634             platform.python_version(), platform_name()))
1635
1636         exe_versions = FFmpegPostProcessor.get_versions()
1637         exe_versions['rtmpdump'] = rtmpdump_version()
1638         exe_str = ', '.join(
1639             '%s %s' % (exe, v)
1640             for exe, v in sorted(exe_versions.items())
1641             if v
1642         )
1643         if not exe_str:
1644             exe_str = 'none'
1645         self._write_string('[debug] exe versions: %s\n' % exe_str)
1646
1647         proxy_map = {}
1648         for handler in self._opener.handlers:
1649             if hasattr(handler, 'proxies'):
1650                 proxy_map.update(handler.proxies)
1651         self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
1652
1653         if self.params.get('call_home', False):
1654             ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
1655             self._write_string('[debug] Public IP address: %s\n' % ipaddr)
1656             latest_version = self.urlopen(
1657                 'https://yt-dl.org/latest/version').read().decode('utf-8')
1658             if version_tuple(latest_version) > version_tuple(__version__):
1659                 self.report_warning(
1660                     'You are using an outdated version (newest version: %s)! '
1661                     'See https://yt-dl.org/update if you need help updating.' %
1662                     latest_version)
1663
1664     def _setup_opener(self):
1665         timeout_val = self.params.get('socket_timeout')
1666         self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
1667
1668         opts_cookiefile = self.params.get('cookiefile')
1669         opts_proxy = self.params.get('proxy')
1670
1671         if opts_cookiefile is None:
1672             self.cookiejar = compat_cookiejar.CookieJar()
1673         else:
1674             self.cookiejar = compat_cookiejar.MozillaCookieJar(
1675                 opts_cookiefile)
1676             if os.access(opts_cookiefile, os.R_OK):
1677                 self.cookiejar.load()
1678
1679         cookie_processor = compat_urllib_request.HTTPCookieProcessor(
1680             self.cookiejar)
1681         if opts_proxy is not None:
1682             if opts_proxy == '':
1683                 proxies = {}
1684             else:
1685                 proxies = {'http': opts_proxy, 'https': opts_proxy}
1686         else:
1687             proxies = compat_urllib_request.getproxies()
1688             # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
1689             if 'http' in proxies and 'https' not in proxies:
1690                 proxies['https'] = proxies['http']
1691         proxy_handler = compat_urllib_request.ProxyHandler(proxies)
1692
1693         debuglevel = 1 if self.params.get('debug_printtraffic') else 0
1694         https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
1695         ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
1696         opener = compat_urllib_request.build_opener(
1697             https_handler, proxy_handler, cookie_processor, ydlh)
1698         # Delete the default user-agent header, which would otherwise apply in
1699         # cases where our custom HTTP handler doesn't come into play
1700         # (See https://github.com/rg3/youtube-dl/issues/1309 for details)
1701         opener.addheaders = []
1702         self._opener = opener
1703
1704     def encode(self, s):
1705         if isinstance(s, bytes):
1706             return s  # Already encoded
1707
1708         try:
1709             return s.encode(self.get_encoding())
1710         except UnicodeEncodeError as err:
1711             err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
1712             raise
1713
1714     def get_encoding(self):
1715         encoding = self.params.get('encoding')
1716         if encoding is None:
1717             encoding = preferredencoding()
1718         return encoding
1719
1720     def _write_thumbnails(self, info_dict, filename):
1721         if self.params.get('writethumbnail', False):
1722             thumbnails = info_dict.get('thumbnails')
1723             if thumbnails:
1724                 thumbnails = [thumbnails[-1]]
1725         elif self.params.get('write_all_thumbnails', False):
1726             thumbnails = info_dict.get('thumbnails')
1727         else:
1728             return
1729
1730         if not thumbnails:
1731             # No thumbnails present, so return immediately
1732             return
1733
1734         for t in thumbnails:
1735             thumb_ext = determine_ext(t['url'], 'jpg')
1736             suffix = '_%s' % t['id'] if len(thumbnails) > 1 else ''
1737             thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else ''
1738             thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext
1739
1740             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
1741                 self.to_screen('[%s] %s: Thumbnail %sis already present' %
1742                                (info_dict['extractor'], info_dict['id'], thumb_display_id))
1743             else:
1744                 self.to_screen('[%s] %s: Downloading thumbnail %s...' %
1745                                (info_dict['extractor'], info_dict['id'], thumb_display_id))
1746                 try:
1747                     uf = self.urlopen(t['url'])
1748                     with open(thumb_filename, 'wb') as thumbf:
1749                         shutil.copyfileobj(uf, thumbf)
1750                     self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
1751                                    (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
1752                 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
1753                     self.report_warning('Unable to download thumbnail "%s": %s' %
1754                                         (t['url'], compat_str(err)))