[YoutubeDL] _calc_cookies: add get_header method to _PseudoRequest (#4861)
[youtube-dl] / youtube_dl / YoutubeDL.py
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from __future__ import absolute_import, unicode_literals
5
6 import collections
7 import datetime
8 import errno
9 import io
10 import itertools
11 import json
12 import locale
13 import operator
14 import os
15 import platform
16 import re
17 import shutil
18 import subprocess
19 import socket
20 import sys
21 import time
22 import traceback
23
24 if os.name == 'nt':
25     import ctypes
26
27 from .compat import (
28     compat_basestring,
29     compat_cookiejar,
30     compat_expanduser,
31     compat_http_client,
32     compat_kwargs,
33     compat_str,
34     compat_urllib_error,
35     compat_urllib_request,
36 )
37 from .utils import (
38     escape_url,
39     ContentTooShortError,
40     date_from_str,
41     DateRange,
42     DEFAULT_OUTTMPL,
43     determine_ext,
44     DownloadError,
45     encodeFilename,
46     ExtractorError,
47     format_bytes,
48     formatSeconds,
49     get_term_width,
50     locked_file,
51     make_HTTPS_handler,
52     MaxDownloadsReached,
53     PagedList,
54     parse_filesize,
55     PostProcessingError,
56     platform_name,
57     preferredencoding,
58     render_table,
59     SameFileError,
60     sanitize_filename,
61     std_headers,
62     subtitles_filename,
63     takewhile_inclusive,
64     UnavailableVideoError,
65     url_basename,
66     version_tuple,
67     write_json_file,
68     write_string,
69     YoutubeDLHandler,
70     prepend_extension,
71     args_to_str,
72     age_restricted,
73 )
74 from .cache import Cache
75 from .extractor import get_info_extractor, gen_extractors
76 from .downloader import get_suitable_downloader
77 from .downloader.rtmp import rtmpdump_version
78 from .postprocessor import (
79     FFmpegFixupM4aPP,
80     FFmpegFixupStretchedPP,
81     FFmpegMergerPP,
82     FFmpegPostProcessor,
83     get_postprocessor,
84 )
85 from .version import __version__
86
87
88 class YoutubeDL(object):
89     """YoutubeDL class.
90
91     YoutubeDL objects are the ones responsible of downloading the
92     actual video file and writing it to disk if the user has requested
93     it, among some other tasks. In most cases there should be one per
94     program. As, given a video URL, the downloader doesn't know how to
95     extract all the needed information, task that InfoExtractors do, it
96     has to pass the URL to one of them.
97
98     For this, YoutubeDL objects have a method that allows
99     InfoExtractors to be registered in a given order. When it is passed
100     a URL, the YoutubeDL object handles it to the first InfoExtractor it
101     finds that reports being able to handle it. The InfoExtractor extracts
102     all the information about the video or videos the URL refers to, and
103     YoutubeDL process the extracted information, possibly using a File
104     Downloader to download the video.
105
106     YoutubeDL objects accept a lot of parameters. In order not to saturate
107     the object constructor with arguments, it receives a dictionary of
108     options instead. These options are available through the params
109     attribute for the InfoExtractors to use. The YoutubeDL also
110     registers itself as the downloader in charge for the InfoExtractors
111     that are added to it, so this is a "mutual registration".
112
113     Available options:
114
115     username:          Username for authentication purposes.
116     password:          Password for authentication purposes.
117     videopassword:     Password for acces a video.
118     usenetrc:          Use netrc for authentication instead.
119     verbose:           Print additional info to stdout.
120     quiet:             Do not print messages to stdout.
121     no_warnings:       Do not print out anything for warnings.
122     forceurl:          Force printing final URL.
123     forcetitle:        Force printing title.
124     forceid:           Force printing ID.
125     forcethumbnail:    Force printing thumbnail URL.
126     forcedescription:  Force printing description.
127     forcefilename:     Force printing final filename.
128     forceduration:     Force printing duration.
129     forcejson:         Force printing info_dict as JSON.
130     dump_single_json:  Force printing the info_dict of the whole playlist
131                        (or video) as a single JSON line.
132     simulate:          Do not download the video files.
133     format:            Video format code. See options.py for more information.
134     format_limit:      Highest quality format to try.
135     outtmpl:           Template for output names.
136     restrictfilenames: Do not allow "&" and spaces in file names
137     ignoreerrors:      Do not stop on download errors.
138     nooverwrites:      Prevent overwriting files.
139     playliststart:     Playlist item to start at.
140     playlistend:       Playlist item to end at.
141     playlist_items:    Specific indices of playlist to download.
142     playlistreverse:   Download playlist items in reverse order.
143     matchtitle:        Download only matching titles.
144     rejecttitle:       Reject downloads for matching titles.
145     logger:            Log messages to a logging.Logger instance.
146     logtostderr:       Log messages to stderr instead of stdout.
147     writedescription:  Write the video description to a .description file
148     writeinfojson:     Write the video description to a .info.json file
149     writeannotations:  Write the video annotations to a .annotations.xml file
150     writethumbnail:    Write the thumbnail image to a file
151     write_all_thumbnails:  Write all thumbnail formats to files
152     writesubtitles:    Write the video subtitles to a file
153     writeautomaticsub: Write the automatic subtitles to a file
154     allsubtitles:      Downloads all the subtitles of the video
155                        (requires writesubtitles or writeautomaticsub)
156     listsubtitles:     Lists all available subtitles for the video
157     subtitlesformat:   Subtitle format [srt/sbv/vtt] (default=srt)
158     subtitleslangs:    List of languages of the subtitles to download
159     keepvideo:         Keep the video file after post-processing
160     daterange:         A DateRange object, download only if the upload_date is in the range.
161     skip_download:     Skip the actual download of the video file
162     cachedir:          Location of the cache files in the filesystem.
163                        False to disable filesystem cache.
164     noplaylist:        Download single video instead of a playlist if in doubt.
165     age_limit:         An integer representing the user's age in years.
166                        Unsuitable videos for the given age are skipped.
167     min_views:         An integer representing the minimum view count the video
168                        must have in order to not be skipped.
169                        Videos without view count information are always
170                        downloaded. None for no limit.
171     max_views:         An integer representing the maximum view count.
172                        Videos that are more popular than that are not
173                        downloaded.
174                        Videos without view count information are always
175                        downloaded. None for no limit.
176     download_archive:  File name of a file where all downloads are recorded.
177                        Videos already present in the file are not downloaded
178                        again.
179     cookiefile:        File name where cookies should be read from and dumped to.
180     nocheckcertificate:Do not verify SSL certificates
181     prefer_insecure:   Use HTTP instead of HTTPS to retrieve information.
182                        At the moment, this is only supported by YouTube.
183     proxy:             URL of the proxy server to use
184     socket_timeout:    Time to wait for unresponsive hosts, in seconds
185     bidi_workaround:   Work around buggy terminals without bidirectional text
186                        support, using fridibi
187     debug_printtraffic:Print out sent and received HTTP traffic
188     include_ads:       Download ads as well
189     default_search:    Prepend this string if an input url is not valid.
190                        'auto' for elaborate guessing
191     encoding:          Use this encoding instead of the system-specified.
192     extract_flat:      Do not resolve URLs, return the immediate result.
193                        Pass in 'in_playlist' to only show this behavior for
194                        playlist items.
195     postprocessors:    A list of dictionaries, each with an entry
196                        * key:  The name of the postprocessor. See
197                                youtube_dl/postprocessor/__init__.py for a list.
198                        as well as any further keyword arguments for the
199                        postprocessor.
200     progress_hooks:    A list of functions that get called on download
201                        progress, with a dictionary with the entries
202                        * status: One of "downloading" and "finished".
203                                  Check this first and ignore unknown values.
204
205                        If status is one of "downloading" or "finished", the
206                        following properties may also be present:
207                        * filename: The final filename (always present)
208                        * downloaded_bytes: Bytes on disk
209                        * total_bytes: Size of the whole file, None if unknown
210                        * tmpfilename: The filename we're currently writing to
211                        * eta: The estimated time in seconds, None if unknown
212                        * speed: The download speed in bytes/second, None if
213                                 unknown
214
215                        Progress hooks are guaranteed to be called at least once
216                        (with status "finished") if the download is successful.
217     merge_output_format: Extension to use when merging formats.
218     fixup:             Automatically correct known faults of the file.
219                        One of:
220                        - "never": do nothing
221                        - "warn": only emit a warning
222                        - "detect_or_warn": check whether we can do anything
223                                            about it, warn otherwise (default)
224     source_address:    (Experimental) Client-side IP address to bind to.
225     call_home:         Boolean, true iff we are allowed to contact the
226                        youtube-dl servers for debugging.
227     sleep_interval:    Number of seconds to sleep before each download.
228     external_downloader:  Executable of the external downloader to call.
229     listformats:       Print an overview of available video formats and exit.
230     list_thumbnails:   Print a table of all thumbnails and exit.
231
232
233     The following parameters are not used by YoutubeDL itself, they are used by
234     the FileDownloader:
235     nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
236     noresizebuffer, retries, continuedl, noprogress, consoletitle,
237     xattr_set_filesize.
238
239     The following options are used by the post processors:
240     prefer_ffmpeg:     If True, use ffmpeg instead of avconv if both are available,
241                        otherwise prefer avconv.
242     exec_cmd:          Arbitrary command to run after downloading
243     """
244
245     params = None
246     _ies = []
247     _pps = []
248     _download_retcode = None
249     _num_downloads = None
250     _screen_file = None
251
252     def __init__(self, params=None, auto_init=True):
253         """Create a FileDownloader object with the given options."""
254         if params is None:
255             params = {}
256         self._ies = []
257         self._ies_instances = {}
258         self._pps = []
259         self._progress_hooks = []
260         self._download_retcode = 0
261         self._num_downloads = 0
262         self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
263         self._err_file = sys.stderr
264         self.params = params
265         self.cache = Cache(self)
266
267         if params.get('bidi_workaround', False):
268             try:
269                 import pty
270                 master, slave = pty.openpty()
271                 width = get_term_width()
272                 if width is None:
273                     width_args = []
274                 else:
275                     width_args = ['-w', str(width)]
276                 sp_kwargs = dict(
277                     stdin=subprocess.PIPE,
278                     stdout=slave,
279                     stderr=self._err_file)
280                 try:
281                     self._output_process = subprocess.Popen(
282                         ['bidiv'] + width_args, **sp_kwargs
283                     )
284                 except OSError:
285                     self._output_process = subprocess.Popen(
286                         ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
287                 self._output_channel = os.fdopen(master, 'rb')
288             except OSError as ose:
289                 if ose.errno == 2:
290                     self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that  fribidi  is an executable file in one of the directories in your $PATH.')
291                 else:
292                     raise
293
294         if (sys.version_info >= (3,) and sys.platform != 'win32' and
295                 sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
296                 and not params.get('restrictfilenames', False)):
297             # On Python 3, the Unicode filesystem API will throw errors (#1474)
298             self.report_warning(
299                 'Assuming --restrict-filenames since file system encoding '
300                 'cannot encode all characters. '
301                 'Set the LC_ALL environment variable to fix this.')
302             self.params['restrictfilenames'] = True
303
304         if '%(stitle)s' in self.params.get('outtmpl', ''):
305             self.report_warning('%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
306
307         self._setup_opener()
308
309         if auto_init:
310             self.print_debug_header()
311             self.add_default_info_extractors()
312
313         for pp_def_raw in self.params.get('postprocessors', []):
314             pp_class = get_postprocessor(pp_def_raw['key'])
315             pp_def = dict(pp_def_raw)
316             del pp_def['key']
317             pp = pp_class(self, **compat_kwargs(pp_def))
318             self.add_post_processor(pp)
319
320         for ph in self.params.get('progress_hooks', []):
321             self.add_progress_hook(ph)
322
323     def warn_if_short_id(self, argv):
324         # short YouTube ID starting with dash?
325         idxs = [
326             i for i, a in enumerate(argv)
327             if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
328         if idxs:
329             correct_argv = (
330                 ['youtube-dl'] +
331                 [a for i, a in enumerate(argv) if i not in idxs] +
332                 ['--'] + [argv[i] for i in idxs]
333             )
334             self.report_warning(
335                 'Long argument string detected. '
336                 'Use -- to separate parameters and URLs, like this:\n%s\n' %
337                 args_to_str(correct_argv))
338
339     def add_info_extractor(self, ie):
340         """Add an InfoExtractor object to the end of the list."""
341         self._ies.append(ie)
342         self._ies_instances[ie.ie_key()] = ie
343         ie.set_downloader(self)
344
345     def get_info_extractor(self, ie_key):
346         """
347         Get an instance of an IE with name ie_key, it will try to get one from
348         the _ies list, if there's no instance it will create a new one and add
349         it to the extractor list.
350         """
351         ie = self._ies_instances.get(ie_key)
352         if ie is None:
353             ie = get_info_extractor(ie_key)()
354             self.add_info_extractor(ie)
355         return ie
356
357     def add_default_info_extractors(self):
358         """
359         Add the InfoExtractors returned by gen_extractors to the end of the list
360         """
361         for ie in gen_extractors():
362             self.add_info_extractor(ie)
363
364     def add_post_processor(self, pp):
365         """Add a PostProcessor object to the end of the chain."""
366         self._pps.append(pp)
367         pp.set_downloader(self)
368
369     def add_progress_hook(self, ph):
370         """Add the progress hook (currently only for the file downloader)"""
371         self._progress_hooks.append(ph)
372
373     def _bidi_workaround(self, message):
374         if not hasattr(self, '_output_channel'):
375             return message
376
377         assert hasattr(self, '_output_process')
378         assert isinstance(message, compat_str)
379         line_count = message.count('\n') + 1
380         self._output_process.stdin.write((message + '\n').encode('utf-8'))
381         self._output_process.stdin.flush()
382         res = ''.join(self._output_channel.readline().decode('utf-8')
383                       for _ in range(line_count))
384         return res[:-len('\n')]
385
386     def to_screen(self, message, skip_eol=False):
387         """Print message to stdout if not in quiet mode."""
388         return self.to_stdout(message, skip_eol, check_quiet=True)
389
390     def _write_string(self, s, out=None):
391         write_string(s, out=out, encoding=self.params.get('encoding'))
392
393     def to_stdout(self, message, skip_eol=False, check_quiet=False):
394         """Print message to stdout if not in quiet mode."""
395         if self.params.get('logger'):
396             self.params['logger'].debug(message)
397         elif not check_quiet or not self.params.get('quiet', False):
398             message = self._bidi_workaround(message)
399             terminator = ['\n', ''][skip_eol]
400             output = message + terminator
401
402             self._write_string(output, self._screen_file)
403
404     def to_stderr(self, message):
405         """Print message to stderr."""
406         assert isinstance(message, compat_str)
407         if self.params.get('logger'):
408             self.params['logger'].error(message)
409         else:
410             message = self._bidi_workaround(message)
411             output = message + '\n'
412             self._write_string(output, self._err_file)
413
414     def to_console_title(self, message):
415         if not self.params.get('consoletitle', False):
416             return
417         if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
418             # c_wchar_p() might not be necessary if `message` is
419             # already of type unicode()
420             ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
421         elif 'TERM' in os.environ:
422             self._write_string('\033]0;%s\007' % message, self._screen_file)
423
424     def save_console_title(self):
425         if not self.params.get('consoletitle', False):
426             return
427         if 'TERM' in os.environ:
428             # Save the title on stack
429             self._write_string('\033[22;0t', self._screen_file)
430
431     def restore_console_title(self):
432         if not self.params.get('consoletitle', False):
433             return
434         if 'TERM' in os.environ:
435             # Restore the title from stack
436             self._write_string('\033[23;0t', self._screen_file)
437
438     def __enter__(self):
439         self.save_console_title()
440         return self
441
442     def __exit__(self, *args):
443         self.restore_console_title()
444
445         if self.params.get('cookiefile') is not None:
446             self.cookiejar.save()
447
448     def trouble(self, message=None, tb=None):
449         """Determine action to take when a download problem appears.
450
451         Depending on if the downloader has been configured to ignore
452         download errors or not, this method may throw an exception or
453         not when errors are found, after printing the message.
454
455         tb, if given, is additional traceback information.
456         """
457         if message is not None:
458             self.to_stderr(message)
459         if self.params.get('verbose'):
460             if tb is None:
461                 if sys.exc_info()[0]:  # if .trouble has been called from an except block
462                     tb = ''
463                     if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
464                         tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
465                     tb += compat_str(traceback.format_exc())
466                 else:
467                     tb_data = traceback.format_list(traceback.extract_stack())
468                     tb = ''.join(tb_data)
469             self.to_stderr(tb)
470         if not self.params.get('ignoreerrors', False):
471             if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
472                 exc_info = sys.exc_info()[1].exc_info
473             else:
474                 exc_info = sys.exc_info()
475             raise DownloadError(message, exc_info)
476         self._download_retcode = 1
477
478     def report_warning(self, message):
479         '''
480         Print the message to stderr, it will be prefixed with 'WARNING:'
481         If stderr is a tty file the 'WARNING:' will be colored
482         '''
483         if self.params.get('logger') is not None:
484             self.params['logger'].warning(message)
485         else:
486             if self.params.get('no_warnings'):
487                 return
488             if self._err_file.isatty() and os.name != 'nt':
489                 _msg_header = '\033[0;33mWARNING:\033[0m'
490             else:
491                 _msg_header = 'WARNING:'
492             warning_message = '%s %s' % (_msg_header, message)
493             self.to_stderr(warning_message)
494
495     def report_error(self, message, tb=None):
496         '''
497         Do the same as trouble, but prefixes the message with 'ERROR:', colored
498         in red if stderr is a tty file.
499         '''
500         if self._err_file.isatty() and os.name != 'nt':
501             _msg_header = '\033[0;31mERROR:\033[0m'
502         else:
503             _msg_header = 'ERROR:'
504         error_message = '%s %s' % (_msg_header, message)
505         self.trouble(error_message, tb)
506
507     def report_file_already_downloaded(self, file_name):
508         """Report file has already been fully downloaded."""
509         try:
510             self.to_screen('[download] %s has already been downloaded' % file_name)
511         except UnicodeEncodeError:
512             self.to_screen('[download] The file has already been downloaded')
513
514     def prepare_filename(self, info_dict):
515         """Generate the output filename."""
516         try:
517             template_dict = dict(info_dict)
518
519             template_dict['epoch'] = int(time.time())
520             autonumber_size = self.params.get('autonumber_size')
521             if autonumber_size is None:
522                 autonumber_size = 5
523             autonumber_templ = '%0' + str(autonumber_size) + 'd'
524             template_dict['autonumber'] = autonumber_templ % self._num_downloads
525             if template_dict.get('playlist_index') is not None:
526                 template_dict['playlist_index'] = '%0*d' % (len(str(template_dict['n_entries'])), template_dict['playlist_index'])
527             if template_dict.get('resolution') is None:
528                 if template_dict.get('width') and template_dict.get('height'):
529                     template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
530                 elif template_dict.get('height'):
531                     template_dict['resolution'] = '%sp' % template_dict['height']
532                 elif template_dict.get('width'):
533                     template_dict['resolution'] = '?x%d' % template_dict['width']
534
535             sanitize = lambda k, v: sanitize_filename(
536                 compat_str(v),
537                 restricted=self.params.get('restrictfilenames'),
538                 is_id=(k == 'id'))
539             template_dict = dict((k, sanitize(k, v))
540                                  for k, v in template_dict.items()
541                                  if v is not None)
542             template_dict = collections.defaultdict(lambda: 'NA', template_dict)
543
544             outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
545             tmpl = compat_expanduser(outtmpl)
546             filename = tmpl % template_dict
547             # Temporary fix for #4787
548             # 'Treat' all problem characters by passing filename through preferredencoding
549             # to workaround encoding issues with subprocess on python2 @ Windows
550             if sys.version_info < (3, 0) and sys.platform == 'win32':
551                 filename = encodeFilename(filename, True).decode(preferredencoding())
552             return filename
553         except ValueError as err:
554             self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
555             return None
556
557     def _match_entry(self, info_dict):
558         """ Returns None iff the file should be downloaded """
559
560         video_title = info_dict.get('title', info_dict.get('id', 'video'))
561         if 'title' in info_dict:
562             # This can happen when we're just evaluating the playlist
563             title = info_dict['title']
564             matchtitle = self.params.get('matchtitle', False)
565             if matchtitle:
566                 if not re.search(matchtitle, title, re.IGNORECASE):
567                     return '"' + title + '" title did not match pattern "' + matchtitle + '"'
568             rejecttitle = self.params.get('rejecttitle', False)
569             if rejecttitle:
570                 if re.search(rejecttitle, title, re.IGNORECASE):
571                     return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
572         date = info_dict.get('upload_date', None)
573         if date is not None:
574             dateRange = self.params.get('daterange', DateRange())
575             if date not in dateRange:
576                 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
577         view_count = info_dict.get('view_count', None)
578         if view_count is not None:
579             min_views = self.params.get('min_views')
580             if min_views is not None and view_count < min_views:
581                 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
582             max_views = self.params.get('max_views')
583             if max_views is not None and view_count > max_views:
584                 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
585         if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
586             return 'Skipping "%s" because it is age restricted' % title
587         if self.in_download_archive(info_dict):
588             return '%s has already been recorded in archive' % video_title
589         return None
590
591     @staticmethod
592     def add_extra_info(info_dict, extra_info):
593         '''Set the keys from extra_info in info dict if they are missing'''
594         for key, value in extra_info.items():
595             info_dict.setdefault(key, value)
596
597     def extract_info(self, url, download=True, ie_key=None, extra_info={},
598                      process=True):
599         '''
600         Returns a list with a dictionary for each video we find.
601         If 'download', also downloads the videos.
602         extra_info is a dict containing the extra values to add to each result
603          '''
604
605         if ie_key:
606             ies = [self.get_info_extractor(ie_key)]
607         else:
608             ies = self._ies
609
610         for ie in ies:
611             if not ie.suitable(url):
612                 continue
613
614             if not ie.working():
615                 self.report_warning('The program functionality for this site has been marked as broken, '
616                                     'and will probably not work.')
617
618             try:
619                 ie_result = ie.extract(url)
620                 if ie_result is None:  # Finished already (backwards compatibility; listformats and friends should be moved here)
621                     break
622                 if isinstance(ie_result, list):
623                     # Backwards compatibility: old IE result format
624                     ie_result = {
625                         '_type': 'compat_list',
626                         'entries': ie_result,
627                     }
628                 self.add_default_extra_info(ie_result, ie, url)
629                 if process:
630                     return self.process_ie_result(ie_result, download, extra_info)
631                 else:
632                     return ie_result
633             except ExtractorError as de:  # An error we somewhat expected
634                 self.report_error(compat_str(de), de.format_traceback())
635                 break
636             except MaxDownloadsReached:
637                 raise
638             except Exception as e:
639                 if self.params.get('ignoreerrors', False):
640                     self.report_error(compat_str(e), tb=compat_str(traceback.format_exc()))
641                     break
642                 else:
643                     raise
644         else:
645             self.report_error('no suitable InfoExtractor for URL %s' % url)
646
647     def add_default_extra_info(self, ie_result, ie, url):
648         self.add_extra_info(ie_result, {
649             'extractor': ie.IE_NAME,
650             'webpage_url': url,
651             'webpage_url_basename': url_basename(url),
652             'extractor_key': ie.ie_key(),
653         })
654
655     def process_ie_result(self, ie_result, download=True, extra_info={}):
656         """
657         Take the result of the ie(may be modified) and resolve all unresolved
658         references (URLs, playlist items).
659
660         It will also download the videos if 'download'.
661         Returns the resolved ie_result.
662         """
663
664         result_type = ie_result.get('_type', 'video')
665
666         if result_type in ('url', 'url_transparent'):
667             extract_flat = self.params.get('extract_flat', False)
668             if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or
669                     extract_flat is True):
670                 if self.params.get('forcejson', False):
671                     self.to_stdout(json.dumps(ie_result))
672                 return ie_result
673
674         if result_type == 'video':
675             self.add_extra_info(ie_result, extra_info)
676             return self.process_video_result(ie_result, download=download)
677         elif result_type == 'url':
678             # We have to add extra_info to the results because it may be
679             # contained in a playlist
680             return self.extract_info(ie_result['url'],
681                                      download,
682                                      ie_key=ie_result.get('ie_key'),
683                                      extra_info=extra_info)
684         elif result_type == 'url_transparent':
685             # Use the information from the embedding page
686             info = self.extract_info(
687                 ie_result['url'], ie_key=ie_result.get('ie_key'),
688                 extra_info=extra_info, download=False, process=False)
689
690             force_properties = dict(
691                 (k, v) for k, v in ie_result.items() if v is not None)
692             for f in ('_type', 'url'):
693                 if f in force_properties:
694                     del force_properties[f]
695             new_result = info.copy()
696             new_result.update(force_properties)
697
698             assert new_result.get('_type') != 'url_transparent'
699
700             return self.process_ie_result(
701                 new_result, download=download, extra_info=extra_info)
702         elif result_type == 'playlist' or result_type == 'multi_video':
703             # We process each entry in the playlist
704             playlist = ie_result.get('title', None) or ie_result.get('id', None)
705             self.to_screen('[download] Downloading playlist: %s' % playlist)
706
707             playlist_results = []
708
709             playliststart = self.params.get('playliststart', 1) - 1
710             playlistend = self.params.get('playlistend', None)
711             # For backwards compatibility, interpret -1 as whole list
712             if playlistend == -1:
713                 playlistend = None
714
715             playlistitems_str = self.params.get('playlist_items', None)
716             playlistitems = None
717             if playlistitems_str is not None:
718                 def iter_playlistitems(format):
719                     for string_segment in format.split(','):
720                         if '-' in string_segment:
721                             start, end = string_segment.split('-')
722                             for item in range(int(start), int(end) + 1):
723                                 yield int(item)
724                         else:
725                             yield int(string_segment)
726                 playlistitems = iter_playlistitems(playlistitems_str)
727
728             ie_entries = ie_result['entries']
729             if isinstance(ie_entries, list):
730                 n_all_entries = len(ie_entries)
731                 if playlistitems:
732                     entries = [ie_entries[i - 1] for i in playlistitems]
733                 else:
734                     entries = ie_entries[playliststart:playlistend]
735                 n_entries = len(entries)
736                 self.to_screen(
737                     "[%s] playlist %s: Collected %d video ids (downloading %d of them)" %
738                     (ie_result['extractor'], playlist, n_all_entries, n_entries))
739             elif isinstance(ie_entries, PagedList):
740                 if playlistitems:
741                     entries = []
742                     for item in playlistitems:
743                         entries.extend(ie_entries.getslice(
744                             item - 1, item
745                         ))
746                 else:
747                     entries = ie_entries.getslice(
748                         playliststart, playlistend)
749                 n_entries = len(entries)
750                 self.to_screen(
751                     "[%s] playlist %s: Downloading %d videos" %
752                     (ie_result['extractor'], playlist, n_entries))
753             else:  # iterable
754                 if playlistitems:
755                     entry_list = list(ie_entries)
756                     entries = [entry_list[i - 1] for i in playlistitems]
757                 else:
758                     entries = list(itertools.islice(
759                         ie_entries, playliststart, playlistend))
760                 n_entries = len(entries)
761                 self.to_screen(
762                     "[%s] playlist %s: Downloading %d videos" %
763                     (ie_result['extractor'], playlist, n_entries))
764
765             if self.params.get('playlistreverse', False):
766                 entries = entries[::-1]
767
768             for i, entry in enumerate(entries, 1):
769                 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
770                 extra = {
771                     'n_entries': n_entries,
772                     'playlist': playlist,
773                     'playlist_id': ie_result.get('id'),
774                     'playlist_title': ie_result.get('title'),
775                     'playlist_index': i + playliststart,
776                     'extractor': ie_result['extractor'],
777                     'webpage_url': ie_result['webpage_url'],
778                     'webpage_url_basename': url_basename(ie_result['webpage_url']),
779                     'extractor_key': ie_result['extractor_key'],
780                 }
781
782                 reason = self._match_entry(entry)
783                 if reason is not None:
784                     self.to_screen('[download] ' + reason)
785                     continue
786
787                 entry_result = self.process_ie_result(entry,
788                                                       download=download,
789                                                       extra_info=extra)
790                 playlist_results.append(entry_result)
791             ie_result['entries'] = playlist_results
792             return ie_result
793         elif result_type == 'compat_list':
794             self.report_warning(
795                 'Extractor %s returned a compat_list result. '
796                 'It needs to be updated.' % ie_result.get('extractor'))
797
798             def _fixup(r):
799                 self.add_extra_info(
800                     r,
801                     {
802                         'extractor': ie_result['extractor'],
803                         'webpage_url': ie_result['webpage_url'],
804                         'webpage_url_basename': url_basename(ie_result['webpage_url']),
805                         'extractor_key': ie_result['extractor_key'],
806                     }
807                 )
808                 return r
809             ie_result['entries'] = [
810                 self.process_ie_result(_fixup(r), download, extra_info)
811                 for r in ie_result['entries']
812             ]
813             return ie_result
814         else:
815             raise Exception('Invalid result type: %s' % result_type)
816
817     def _apply_format_filter(self, format_spec, available_formats):
818         " Returns a tuple of the remaining format_spec and filtered formats "
819
820         OPERATORS = {
821             '<': operator.lt,
822             '<=': operator.le,
823             '>': operator.gt,
824             '>=': operator.ge,
825             '=': operator.eq,
826             '!=': operator.ne,
827         }
828         operator_rex = re.compile(r'''(?x)\s*\[
829             (?P<key>width|height|tbr|abr|vbr|filesize|fps)
830             \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
831             (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
832             \]$
833             ''' % '|'.join(map(re.escape, OPERATORS.keys())))
834         m = operator_rex.search(format_spec)
835         if not m:
836             raise ValueError('Invalid format specification %r' % format_spec)
837
838         try:
839             comparison_value = int(m.group('value'))
840         except ValueError:
841             comparison_value = parse_filesize(m.group('value'))
842             if comparison_value is None:
843                 comparison_value = parse_filesize(m.group('value') + 'B')
844             if comparison_value is None:
845                 raise ValueError(
846                     'Invalid value %r in format specification %r' % (
847                         m.group('value'), format_spec))
848         op = OPERATORS[m.group('op')]
849
850         def _filter(f):
851             actual_value = f.get(m.group('key'))
852             if actual_value is None:
853                 return m.group('none_inclusive')
854             return op(actual_value, comparison_value)
855         new_formats = [f for f in available_formats if _filter(f)]
856
857         new_format_spec = format_spec[:-len(m.group(0))]
858         if not new_format_spec:
859             new_format_spec = 'best'
860
861         return (new_format_spec, new_formats)
862
863     def select_format(self, format_spec, available_formats):
864         while format_spec.endswith(']'):
865             format_spec, available_formats = self._apply_format_filter(
866                 format_spec, available_formats)
867         if not available_formats:
868             return None
869
870         if format_spec == 'best' or format_spec is None:
871             return available_formats[-1]
872         elif format_spec == 'worst':
873             return available_formats[0]
874         elif format_spec == 'bestaudio':
875             audio_formats = [
876                 f for f in available_formats
877                 if f.get('vcodec') == 'none']
878             if audio_formats:
879                 return audio_formats[-1]
880         elif format_spec == 'worstaudio':
881             audio_formats = [
882                 f for f in available_formats
883                 if f.get('vcodec') == 'none']
884             if audio_formats:
885                 return audio_formats[0]
886         elif format_spec == 'bestvideo':
887             video_formats = [
888                 f for f in available_formats
889                 if f.get('acodec') == 'none']
890             if video_formats:
891                 return video_formats[-1]
892         elif format_spec == 'worstvideo':
893             video_formats = [
894                 f for f in available_formats
895                 if f.get('acodec') == 'none']
896             if video_formats:
897                 return video_formats[0]
898         else:
899             extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
900             if format_spec in extensions:
901                 filter_f = lambda f: f['ext'] == format_spec
902             else:
903                 filter_f = lambda f: f['format_id'] == format_spec
904             matches = list(filter(filter_f, available_formats))
905             if matches:
906                 return matches[-1]
907         return None
908
909     def _calc_headers(self, info_dict):
910         res = std_headers.copy()
911
912         add_headers = info_dict.get('http_headers')
913         if add_headers:
914             res.update(add_headers)
915
916         cookies = self._calc_cookies(info_dict)
917         if cookies:
918             res['Cookie'] = cookies
919
920         return res
921
922     def _calc_cookies(self, info_dict):
923         class _PseudoRequest(object):
924             def __init__(self, url):
925                 self.url = url
926                 self.headers = {}
927                 self.unverifiable = False
928
929             def add_unredirected_header(self, k, v):
930                 self.headers[k] = v
931
932             def get_full_url(self):
933                 return self.url
934
935             def is_unverifiable(self):
936                 return self.unverifiable
937
938             def has_header(self, h):
939                 return h in self.headers
940
941             def get_header(self, h, default=None):
942                 return self.headers.get(h, default)
943
944         pr = _PseudoRequest(info_dict['url'])
945         self.cookiejar.add_cookie_header(pr)
946         return pr.headers.get('Cookie')
947
948     def process_video_result(self, info_dict, download=True):
949         assert info_dict.get('_type', 'video') == 'video'
950
951         if 'id' not in info_dict:
952             raise ExtractorError('Missing "id" field in extractor result')
953         if 'title' not in info_dict:
954             raise ExtractorError('Missing "title" field in extractor result')
955
956         if 'playlist' not in info_dict:
957             # It isn't part of a playlist
958             info_dict['playlist'] = None
959             info_dict['playlist_index'] = None
960
961         thumbnails = info_dict.get('thumbnails')
962         if thumbnails is None:
963             thumbnail = info_dict.get('thumbnail')
964             if thumbnail:
965                 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
966         if thumbnails:
967             thumbnails.sort(key=lambda t: (
968                 t.get('preference'), t.get('width'), t.get('height'),
969                 t.get('id'), t.get('url')))
970             for i, t in enumerate(thumbnails):
971                 if 'width' in t and 'height' in t:
972                     t['resolution'] = '%dx%d' % (t['width'], t['height'])
973                 if t.get('id') is None:
974                     t['id'] = '%d' % i
975
976         if thumbnails and 'thumbnail' not in info_dict:
977             info_dict['thumbnail'] = thumbnails[-1]['url']
978
979         if 'display_id' not in info_dict and 'id' in info_dict:
980             info_dict['display_id'] = info_dict['id']
981
982         if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
983             # Working around negative timestamps in Windows
984             # (see http://bugs.python.org/issue1646728)
985             if info_dict['timestamp'] < 0 and os.name == 'nt':
986                 info_dict['timestamp'] = 0
987             upload_date = datetime.datetime.utcfromtimestamp(
988                 info_dict['timestamp'])
989             info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
990
991         # This extractors handle format selection themselves
992         if info_dict['extractor'] in ['Youku']:
993             if download:
994                 self.process_info(info_dict)
995             return info_dict
996
997         # We now pick which formats have to be downloaded
998         if info_dict.get('formats') is None:
999             # There's only one format available
1000             formats = [info_dict]
1001         else:
1002             formats = info_dict['formats']
1003
1004         if not formats:
1005             raise ExtractorError('No video formats found!')
1006
1007         # We check that all the formats have the format and format_id fields
1008         for i, format in enumerate(formats):
1009             if 'url' not in format:
1010                 raise ExtractorError('Missing "url" key in result (index %d)' % i)
1011
1012             if format.get('format_id') is None:
1013                 format['format_id'] = compat_str(i)
1014             if format.get('format') is None:
1015                 format['format'] = '{id} - {res}{note}'.format(
1016                     id=format['format_id'],
1017                     res=self.format_resolution(format),
1018                     note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
1019                 )
1020             # Automatically determine file extension if missing
1021             if 'ext' not in format:
1022                 format['ext'] = determine_ext(format['url']).lower()
1023             # Add HTTP headers, so that external programs can use them from the
1024             # json output
1025             full_format_info = info_dict.copy()
1026             full_format_info.update(format)
1027             format['http_headers'] = self._calc_headers(full_format_info)
1028
1029         format_limit = self.params.get('format_limit', None)
1030         if format_limit:
1031             formats = list(takewhile_inclusive(
1032                 lambda f: f['format_id'] != format_limit, formats
1033             ))
1034
1035         # TODO Central sorting goes here
1036
1037         if formats[0] is not info_dict:
1038             # only set the 'formats' fields if the original info_dict list them
1039             # otherwise we end up with a circular reference, the first (and unique)
1040             # element in the 'formats' field in info_dict is info_dict itself,
1041             # wich can't be exported to json
1042             info_dict['formats'] = formats
1043         if self.params.get('listformats'):
1044             self.list_formats(info_dict)
1045             return
1046         if self.params.get('list_thumbnails'):
1047             self.list_thumbnails(info_dict)
1048             return
1049
1050         req_format = self.params.get('format')
1051         if req_format is None:
1052             req_format = 'best'
1053         formats_to_download = []
1054         # The -1 is for supporting YoutubeIE
1055         if req_format in ('-1', 'all'):
1056             formats_to_download = formats
1057         else:
1058             for rfstr in req_format.split(','):
1059                 # We can accept formats requested in the format: 34/5/best, we pick
1060                 # the first that is available, starting from left
1061                 req_formats = rfstr.split('/')
1062                 for rf in req_formats:
1063                     if re.match(r'.+?\+.+?', rf) is not None:
1064                         # Two formats have been requested like '137+139'
1065                         format_1, format_2 = rf.split('+')
1066                         formats_info = (self.select_format(format_1, formats),
1067                                         self.select_format(format_2, formats))
1068                         if all(formats_info):
1069                             # The first format must contain the video and the
1070                             # second the audio
1071                             if formats_info[0].get('vcodec') == 'none':
1072                                 self.report_error('The first format must '
1073                                                   'contain the video, try using '
1074                                                   '"-f %s+%s"' % (format_2, format_1))
1075                                 return
1076                             output_ext = (
1077                                 formats_info[0]['ext']
1078                                 if self.params.get('merge_output_format') is None
1079                                 else self.params['merge_output_format'])
1080                             selected_format = {
1081                                 'requested_formats': formats_info,
1082                                 'format': '%s+%s' % (formats_info[0].get('format'),
1083                                                      formats_info[1].get('format')),
1084                                 'format_id': '%s+%s' % (formats_info[0].get('format_id'),
1085                                                         formats_info[1].get('format_id')),
1086                                 'width': formats_info[0].get('width'),
1087                                 'height': formats_info[0].get('height'),
1088                                 'resolution': formats_info[0].get('resolution'),
1089                                 'fps': formats_info[0].get('fps'),
1090                                 'vcodec': formats_info[0].get('vcodec'),
1091                                 'vbr': formats_info[0].get('vbr'),
1092                                 'stretched_ratio': formats_info[0].get('stretched_ratio'),
1093                                 'acodec': formats_info[1].get('acodec'),
1094                                 'abr': formats_info[1].get('abr'),
1095                                 'ext': output_ext,
1096                             }
1097                         else:
1098                             selected_format = None
1099                     else:
1100                         selected_format = self.select_format(rf, formats)
1101                     if selected_format is not None:
1102                         formats_to_download.append(selected_format)
1103                         break
1104         if not formats_to_download:
1105             raise ExtractorError('requested format not available',
1106                                  expected=True)
1107
1108         if download:
1109             if len(formats_to_download) > 1:
1110                 self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
1111             for format in formats_to_download:
1112                 new_info = dict(info_dict)
1113                 new_info.update(format)
1114                 self.process_info(new_info)
1115         # We update the info dict with the best quality format (backwards compatibility)
1116         info_dict.update(formats_to_download[-1])
1117         return info_dict
1118
1119     def process_info(self, info_dict):
1120         """Process a single resolved IE result."""
1121
1122         assert info_dict.get('_type', 'video') == 'video'
1123
1124         max_downloads = self.params.get('max_downloads')
1125         if max_downloads is not None:
1126             if self._num_downloads >= int(max_downloads):
1127                 raise MaxDownloadsReached()
1128
1129         info_dict['fulltitle'] = info_dict['title']
1130         if len(info_dict['title']) > 200:
1131             info_dict['title'] = info_dict['title'][:197] + '...'
1132
1133         # Keep for backwards compatibility
1134         info_dict['stitle'] = info_dict['title']
1135
1136         if 'format' not in info_dict:
1137             info_dict['format'] = info_dict['ext']
1138
1139         reason = self._match_entry(info_dict)
1140         if reason is not None:
1141             self.to_screen('[download] ' + reason)
1142             return
1143
1144         self._num_downloads += 1
1145
1146         info_dict['_filename'] = filename = self.prepare_filename(info_dict)
1147
1148         # Forced printings
1149         if self.params.get('forcetitle', False):
1150             self.to_stdout(info_dict['fulltitle'])
1151         if self.params.get('forceid', False):
1152             self.to_stdout(info_dict['id'])
1153         if self.params.get('forceurl', False):
1154             if info_dict.get('requested_formats') is not None:
1155                 for f in info_dict['requested_formats']:
1156                     self.to_stdout(f['url'] + f.get('play_path', ''))
1157             else:
1158                 # For RTMP URLs, also include the playpath
1159                 self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
1160         if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
1161             self.to_stdout(info_dict['thumbnail'])
1162         if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
1163             self.to_stdout(info_dict['description'])
1164         if self.params.get('forcefilename', False) and filename is not None:
1165             self.to_stdout(filename)
1166         if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
1167             self.to_stdout(formatSeconds(info_dict['duration']))
1168         if self.params.get('forceformat', False):
1169             self.to_stdout(info_dict['format'])
1170         if self.params.get('forcejson', False):
1171             self.to_stdout(json.dumps(info_dict))
1172
1173         # Do nothing else if in simulate mode
1174         if self.params.get('simulate', False):
1175             return
1176
1177         if filename is None:
1178             return
1179
1180         try:
1181             dn = os.path.dirname(encodeFilename(filename))
1182             if dn and not os.path.exists(dn):
1183                 os.makedirs(dn)
1184         except (OSError, IOError) as err:
1185             self.report_error('unable to create directory ' + compat_str(err))
1186             return
1187
1188         if self.params.get('writedescription', False):
1189             descfn = filename + '.description'
1190             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
1191                 self.to_screen('[info] Video description is already present')
1192             elif info_dict.get('description') is None:
1193                 self.report_warning('There\'s no description to write.')
1194             else:
1195                 try:
1196                     self.to_screen('[info] Writing video description to: ' + descfn)
1197                     with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
1198                         descfile.write(info_dict['description'])
1199                 except (OSError, IOError):
1200                     self.report_error('Cannot write description file ' + descfn)
1201                     return
1202
1203         if self.params.get('writeannotations', False):
1204             annofn = filename + '.annotations.xml'
1205             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
1206                 self.to_screen('[info] Video annotations are already present')
1207             else:
1208                 try:
1209                     self.to_screen('[info] Writing video annotations to: ' + annofn)
1210                     with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
1211                         annofile.write(info_dict['annotations'])
1212                 except (KeyError, TypeError):
1213                     self.report_warning('There are no annotations to write.')
1214                 except (OSError, IOError):
1215                     self.report_error('Cannot write annotations file: ' + annofn)
1216                     return
1217
1218         subtitles_are_requested = any([self.params.get('writesubtitles', False),
1219                                        self.params.get('writeautomaticsub')])
1220
1221         if subtitles_are_requested and 'subtitles' in info_dict and info_dict['subtitles']:
1222             # subtitles download errors are already managed as troubles in relevant IE
1223             # that way it will silently go on when used with unsupporting IE
1224             subtitles = info_dict['subtitles']
1225             sub_format = self.params.get('subtitlesformat', 'srt')
1226             for sub_lang in subtitles.keys():
1227                 sub = subtitles[sub_lang]
1228                 if sub is None:
1229                     continue
1230                 try:
1231                     sub_filename = subtitles_filename(filename, sub_lang, sub_format)
1232                     if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
1233                         self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format))
1234                     else:
1235                         self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
1236                         with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
1237                             subfile.write(sub)
1238                 except (OSError, IOError):
1239                     self.report_error('Cannot write subtitles file ' + sub_filename)
1240                     return
1241
1242         if self.params.get('writeinfojson', False):
1243             infofn = os.path.splitext(filename)[0] + '.info.json'
1244             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
1245                 self.to_screen('[info] Video description metadata is already present')
1246             else:
1247                 self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
1248                 try:
1249                     write_json_file(info_dict, infofn)
1250                 except (OSError, IOError):
1251                     self.report_error('Cannot write metadata to JSON file ' + infofn)
1252                     return
1253
1254         self._write_thumbnails(info_dict, filename)
1255
1256         if not self.params.get('skip_download', False):
1257             try:
1258                 def dl(name, info):
1259                     fd = get_suitable_downloader(info, self.params)(self, self.params)
1260                     for ph in self._progress_hooks:
1261                         fd.add_progress_hook(ph)
1262                     if self.params.get('verbose'):
1263                         self.to_stdout('[debug] Invoking downloader on %r' % info.get('url'))
1264                     return fd.download(name, info)
1265
1266                 if info_dict.get('requested_formats') is not None:
1267                     downloaded = []
1268                     success = True
1269                     merger = FFmpegMergerPP(self, not self.params.get('keepvideo'))
1270                     if not merger._executable:
1271                         postprocessors = []
1272                         self.report_warning('You have requested multiple '
1273                                             'formats but ffmpeg or avconv are not installed.'
1274                                             ' The formats won\'t be merged')
1275                     else:
1276                         postprocessors = [merger]
1277                     for f in info_dict['requested_formats']:
1278                         new_info = dict(info_dict)
1279                         new_info.update(f)
1280                         fname = self.prepare_filename(new_info)
1281                         fname = prepend_extension(fname, 'f%s' % f['format_id'])
1282                         downloaded.append(fname)
1283                         partial_success = dl(fname, new_info)
1284                         success = success and partial_success
1285                     info_dict['__postprocessors'] = postprocessors
1286                     info_dict['__files_to_merge'] = downloaded
1287                 else:
1288                     # Just a single file
1289                     success = dl(filename, info_dict)
1290             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
1291                 self.report_error('unable to download video data: %s' % str(err))
1292                 return
1293             except (OSError, IOError) as err:
1294                 raise UnavailableVideoError(err)
1295             except (ContentTooShortError, ) as err:
1296                 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
1297                 return
1298
1299             if success:
1300                 # Fixup content
1301                 fixup_policy = self.params.get('fixup')
1302                 if fixup_policy is None:
1303                     fixup_policy = 'detect_or_warn'
1304
1305                 stretched_ratio = info_dict.get('stretched_ratio')
1306                 if stretched_ratio is not None and stretched_ratio != 1:
1307                     if fixup_policy == 'warn':
1308                         self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
1309                             info_dict['id'], stretched_ratio))
1310                     elif fixup_policy == 'detect_or_warn':
1311                         stretched_pp = FFmpegFixupStretchedPP(self)
1312                         if stretched_pp.available:
1313                             info_dict.setdefault('__postprocessors', [])
1314                             info_dict['__postprocessors'].append(stretched_pp)
1315                         else:
1316                             self.report_warning(
1317                                 '%s: Non-uniform pixel ratio (%s). Install ffmpeg or avconv to fix this automatically.' % (
1318                                     info_dict['id'], stretched_ratio))
1319                     else:
1320                         assert fixup_policy in ('ignore', 'never')
1321
1322                 if info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash':
1323                     if fixup_policy == 'warn':
1324                         self.report_warning('%s: writing DASH m4a. Only some players support this container.' % (
1325                             info_dict['id']))
1326                     elif fixup_policy == 'detect_or_warn':
1327                         fixup_pp = FFmpegFixupM4aPP(self)
1328                         if fixup_pp.available:
1329                             info_dict.setdefault('__postprocessors', [])
1330                             info_dict['__postprocessors'].append(fixup_pp)
1331                         else:
1332                             self.report_warning(
1333                                 '%s: writing DASH m4a. Only some players support this container. Install ffmpeg or avconv to fix this automatically.' % (
1334                                     info_dict['id']))
1335                     else:
1336                         assert fixup_policy in ('ignore', 'never')
1337
1338                 try:
1339                     self.post_process(filename, info_dict)
1340                 except (PostProcessingError) as err:
1341                     self.report_error('postprocessing: %s' % str(err))
1342                     return
1343                 self.record_download_archive(info_dict)
1344
1345     def download(self, url_list):
1346         """Download a given list of URLs."""
1347         outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
1348         if (len(url_list) > 1 and
1349                 '%' not in outtmpl
1350                 and self.params.get('max_downloads') != 1):
1351             raise SameFileError(outtmpl)
1352
1353         for url in url_list:
1354             try:
1355                 # It also downloads the videos
1356                 res = self.extract_info(url)
1357             except UnavailableVideoError:
1358                 self.report_error('unable to download video')
1359             except MaxDownloadsReached:
1360                 self.to_screen('[info] Maximum number of downloaded files reached.')
1361                 raise
1362             else:
1363                 if self.params.get('dump_single_json', False):
1364                     self.to_stdout(json.dumps(res))
1365
1366         return self._download_retcode
1367
1368     def download_with_info_file(self, info_filename):
1369         with io.open(info_filename, 'r', encoding='utf-8') as f:
1370             info = json.load(f)
1371         try:
1372             self.process_ie_result(info, download=True)
1373         except DownloadError:
1374             webpage_url = info.get('webpage_url')
1375             if webpage_url is not None:
1376                 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
1377                 return self.download([webpage_url])
1378             else:
1379                 raise
1380         return self._download_retcode
1381
1382     def post_process(self, filename, ie_info):
1383         """Run all the postprocessors on the given file."""
1384         info = dict(ie_info)
1385         info['filepath'] = filename
1386         pps_chain = []
1387         if ie_info.get('__postprocessors') is not None:
1388             pps_chain.extend(ie_info['__postprocessors'])
1389         pps_chain.extend(self._pps)
1390         for pp in pps_chain:
1391             keep_video = None
1392             old_filename = info['filepath']
1393             try:
1394                 keep_video_wish, info = pp.run(info)
1395                 if keep_video_wish is not None:
1396                     if keep_video_wish:
1397                         keep_video = keep_video_wish
1398                     elif keep_video is None:
1399                         # No clear decision yet, let IE decide
1400                         keep_video = keep_video_wish
1401             except PostProcessingError as e:
1402                 self.report_error(e.msg)
1403             if keep_video is False and not self.params.get('keepvideo', False):
1404                 try:
1405                     self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
1406                     os.remove(encodeFilename(old_filename))
1407                 except (IOError, OSError):
1408                     self.report_warning('Unable to remove downloaded video file')
1409
1410     def _make_archive_id(self, info_dict):
1411         # Future-proof against any change in case
1412         # and backwards compatibility with prior versions
1413         extractor = info_dict.get('extractor_key')
1414         if extractor is None:
1415             if 'id' in info_dict:
1416                 extractor = info_dict.get('ie_key')  # key in a playlist
1417         if extractor is None:
1418             return None  # Incomplete video information
1419         return extractor.lower() + ' ' + info_dict['id']
1420
1421     def in_download_archive(self, info_dict):
1422         fn = self.params.get('download_archive')
1423         if fn is None:
1424             return False
1425
1426         vid_id = self._make_archive_id(info_dict)
1427         if vid_id is None:
1428             return False  # Incomplete video information
1429
1430         try:
1431             with locked_file(fn, 'r', encoding='utf-8') as archive_file:
1432                 for line in archive_file:
1433                     if line.strip() == vid_id:
1434                         return True
1435         except IOError as ioe:
1436             if ioe.errno != errno.ENOENT:
1437                 raise
1438         return False
1439
1440     def record_download_archive(self, info_dict):
1441         fn = self.params.get('download_archive')
1442         if fn is None:
1443             return
1444         vid_id = self._make_archive_id(info_dict)
1445         assert vid_id
1446         with locked_file(fn, 'a', encoding='utf-8') as archive_file:
1447             archive_file.write(vid_id + '\n')
1448
1449     @staticmethod
1450     def format_resolution(format, default='unknown'):
1451         if format.get('vcodec') == 'none':
1452             return 'audio only'
1453         if format.get('resolution') is not None:
1454             return format['resolution']
1455         if format.get('height') is not None:
1456             if format.get('width') is not None:
1457                 res = '%sx%s' % (format['width'], format['height'])
1458             else:
1459                 res = '%sp' % format['height']
1460         elif format.get('width') is not None:
1461             res = '?x%d' % format['width']
1462         else:
1463             res = default
1464         return res
1465
1466     def _format_note(self, fdict):
1467         res = ''
1468         if fdict.get('ext') in ['f4f', 'f4m']:
1469             res += '(unsupported) '
1470         if fdict.get('format_note') is not None:
1471             res += fdict['format_note'] + ' '
1472         if fdict.get('tbr') is not None:
1473             res += '%4dk ' % fdict['tbr']
1474         if fdict.get('container') is not None:
1475             if res:
1476                 res += ', '
1477             res += '%s container' % fdict['container']
1478         if (fdict.get('vcodec') is not None and
1479                 fdict.get('vcodec') != 'none'):
1480             if res:
1481                 res += ', '
1482             res += fdict['vcodec']
1483             if fdict.get('vbr') is not None:
1484                 res += '@'
1485         elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
1486             res += 'video@'
1487         if fdict.get('vbr') is not None:
1488             res += '%4dk' % fdict['vbr']
1489         if fdict.get('fps') is not None:
1490             res += ', %sfps' % fdict['fps']
1491         if fdict.get('acodec') is not None:
1492             if res:
1493                 res += ', '
1494             if fdict['acodec'] == 'none':
1495                 res += 'video only'
1496             else:
1497                 res += '%-5s' % fdict['acodec']
1498         elif fdict.get('abr') is not None:
1499             if res:
1500                 res += ', '
1501             res += 'audio'
1502         if fdict.get('abr') is not None:
1503             res += '@%3dk' % fdict['abr']
1504         if fdict.get('asr') is not None:
1505             res += ' (%5dHz)' % fdict['asr']
1506         if fdict.get('filesize') is not None:
1507             if res:
1508                 res += ', '
1509             res += format_bytes(fdict['filesize'])
1510         elif fdict.get('filesize_approx') is not None:
1511             if res:
1512                 res += ', '
1513             res += '~' + format_bytes(fdict['filesize_approx'])
1514         return res
1515
1516     def list_formats(self, info_dict):
1517         def line(format, idlen=20):
1518             return (('%-' + compat_str(idlen + 1) + 's%-10s%-12s%s') % (
1519                 format['format_id'],
1520                 format['ext'],
1521                 self.format_resolution(format),
1522                 self._format_note(format),
1523             ))
1524
1525         formats = info_dict.get('formats', [info_dict])
1526         idlen = max(len('format code'),
1527                     max(len(f['format_id']) for f in formats))
1528         formats_s = [
1529             line(f, idlen) for f in formats
1530             if f.get('preference') is None or f['preference'] >= -1000]
1531         if len(formats) > 1:
1532             formats_s[0] += (' ' if self._format_note(formats[0]) else '') + '(worst)'
1533             formats_s[-1] += (' ' if self._format_note(formats[-1]) else '') + '(best)'
1534
1535         header_line = line({
1536             'format_id': 'format code', 'ext': 'extension',
1537             'resolution': 'resolution', 'format_note': 'note'}, idlen=idlen)
1538         self.to_screen(
1539             '[info] Available formats for %s:\n%s\n%s' %
1540             (info_dict['id'], header_line, '\n'.join(formats_s)))
1541
1542     def list_thumbnails(self, info_dict):
1543         thumbnails = info_dict.get('thumbnails')
1544         if not thumbnails:
1545             tn_url = info_dict.get('thumbnail')
1546             if tn_url:
1547                 thumbnails = [{'id': '0', 'url': tn_url}]
1548             else:
1549                 self.to_screen(
1550                     '[info] No thumbnails present for %s' % info_dict['id'])
1551                 return
1552
1553         self.to_screen(
1554             '[info] Thumbnails for %s:' % info_dict['id'])
1555         self.to_screen(render_table(
1556             ['ID', 'width', 'height', 'URL'],
1557             [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
1558
1559     def urlopen(self, req):
1560         """ Start an HTTP download """
1561
1562         # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1563         # always respected by websites, some tend to give out URLs with non percent-encoded
1564         # non-ASCII characters (see telemb.py, ard.py [#3412])
1565         # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1566         # To work around aforementioned issue we will replace request's original URL with
1567         # percent-encoded one
1568         req_is_string = isinstance(req, compat_basestring)
1569         url = req if req_is_string else req.get_full_url()
1570         url_escaped = escape_url(url)
1571
1572         # Substitute URL if any change after escaping
1573         if url != url_escaped:
1574             if req_is_string:
1575                 req = url_escaped
1576             else:
1577                 req = compat_urllib_request.Request(
1578                     url_escaped, data=req.data, headers=req.headers,
1579                     origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
1580
1581         return self._opener.open(req, timeout=self._socket_timeout)
1582
1583     def print_debug_header(self):
1584         if not self.params.get('verbose'):
1585             return
1586
1587         if type('') is not compat_str:
1588             # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326)
1589             self.report_warning(
1590                 'Your Python is broken! Update to a newer and supported version')
1591
1592         stdout_encoding = getattr(
1593             sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
1594         encoding_str = (
1595             '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
1596                 locale.getpreferredencoding(),
1597                 sys.getfilesystemencoding(),
1598                 stdout_encoding,
1599                 self.get_encoding()))
1600         write_string(encoding_str, encoding=None)
1601
1602         self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
1603         try:
1604             sp = subprocess.Popen(
1605                 ['git', 'rev-parse', '--short', 'HEAD'],
1606                 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
1607                 cwd=os.path.dirname(os.path.abspath(__file__)))
1608             out, err = sp.communicate()
1609             out = out.decode().strip()
1610             if re.match('[0-9a-f]+', out):
1611                 self._write_string('[debug] Git HEAD: ' + out + '\n')
1612         except:
1613             try:
1614                 sys.exc_clear()
1615             except:
1616                 pass
1617         self._write_string('[debug] Python version %s - %s\n' % (
1618             platform.python_version(), platform_name()))
1619
1620         exe_versions = FFmpegPostProcessor.get_versions()
1621         exe_versions['rtmpdump'] = rtmpdump_version()
1622         exe_str = ', '.join(
1623             '%s %s' % (exe, v)
1624             for exe, v in sorted(exe_versions.items())
1625             if v
1626         )
1627         if not exe_str:
1628             exe_str = 'none'
1629         self._write_string('[debug] exe versions: %s\n' % exe_str)
1630
1631         proxy_map = {}
1632         for handler in self._opener.handlers:
1633             if hasattr(handler, 'proxies'):
1634                 proxy_map.update(handler.proxies)
1635         self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
1636
1637         if self.params.get('call_home', False):
1638             ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
1639             self._write_string('[debug] Public IP address: %s\n' % ipaddr)
1640             latest_version = self.urlopen(
1641                 'https://yt-dl.org/latest/version').read().decode('utf-8')
1642             if version_tuple(latest_version) > version_tuple(__version__):
1643                 self.report_warning(
1644                     'You are using an outdated version (newest version: %s)! '
1645                     'See https://yt-dl.org/update if you need help updating.' %
1646                     latest_version)
1647
1648     def _setup_opener(self):
1649         timeout_val = self.params.get('socket_timeout')
1650         self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
1651
1652         opts_cookiefile = self.params.get('cookiefile')
1653         opts_proxy = self.params.get('proxy')
1654
1655         if opts_cookiefile is None:
1656             self.cookiejar = compat_cookiejar.CookieJar()
1657         else:
1658             self.cookiejar = compat_cookiejar.MozillaCookieJar(
1659                 opts_cookiefile)
1660             if os.access(opts_cookiefile, os.R_OK):
1661                 self.cookiejar.load()
1662
1663         cookie_processor = compat_urllib_request.HTTPCookieProcessor(
1664             self.cookiejar)
1665         if opts_proxy is not None:
1666             if opts_proxy == '':
1667                 proxies = {}
1668             else:
1669                 proxies = {'http': opts_proxy, 'https': opts_proxy}
1670         else:
1671             proxies = compat_urllib_request.getproxies()
1672             # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
1673             if 'http' in proxies and 'https' not in proxies:
1674                 proxies['https'] = proxies['http']
1675         proxy_handler = compat_urllib_request.ProxyHandler(proxies)
1676
1677         debuglevel = 1 if self.params.get('debug_printtraffic') else 0
1678         https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
1679         ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
1680         opener = compat_urllib_request.build_opener(
1681             https_handler, proxy_handler, cookie_processor, ydlh)
1682         # Delete the default user-agent header, which would otherwise apply in
1683         # cases where our custom HTTP handler doesn't come into play
1684         # (See https://github.com/rg3/youtube-dl/issues/1309 for details)
1685         opener.addheaders = []
1686         self._opener = opener
1687
1688     def encode(self, s):
1689         if isinstance(s, bytes):
1690             return s  # Already encoded
1691
1692         try:
1693             return s.encode(self.get_encoding())
1694         except UnicodeEncodeError as err:
1695             err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
1696             raise
1697
1698     def get_encoding(self):
1699         encoding = self.params.get('encoding')
1700         if encoding is None:
1701             encoding = preferredencoding()
1702         return encoding
1703
1704     def _write_thumbnails(self, info_dict, filename):
1705         if self.params.get('writethumbnail', False):
1706             thumbnails = info_dict.get('thumbnails')
1707             if thumbnails:
1708                 thumbnails = [thumbnails[-1]]
1709         elif self.params.get('write_all_thumbnails', False):
1710             thumbnails = info_dict.get('thumbnails')
1711         else:
1712             return
1713
1714         if not thumbnails:
1715             # No thumbnails present, so return immediately
1716             return
1717
1718         for t in thumbnails:
1719             thumb_ext = determine_ext(t['url'], 'jpg')
1720             suffix = '_%s' % t['id'] if len(thumbnails) > 1 else ''
1721             thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else ''
1722             thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext
1723
1724             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
1725                 self.to_screen('[%s] %s: Thumbnail %sis already present' %
1726                                (info_dict['extractor'], info_dict['id'], thumb_display_id))
1727             else:
1728                 self.to_screen('[%s] %s: Downloading thumbnail %s...' %
1729                                (info_dict['extractor'], info_dict['id'], thumb_display_id))
1730                 try:
1731                     uf = self.urlopen(t['url'])
1732                     with open(thumb_filename, 'wb') as thumbf:
1733                         shutil.copyfileobj(uf, thumbf)
1734                     self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
1735                                    (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
1736                 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
1737                     self.report_warning('Unable to download thumbnail "%s": %s' %
1738                                         (t['url'], compat_str(err)))