Appease pyflakes8-3
[youtube-dl] / youtube_dl / YoutubeDL.py
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from __future__ import absolute_import, unicode_literals
5
6 import collections
7 import datetime
8 import errno
9 import io
10 import itertools
11 import json
12 import locale
13 import operator
14 import os
15 import platform
16 import re
17 import shutil
18 import subprocess
19 import socket
20 import sys
21 import time
22 import traceback
23
24 if os.name == 'nt':
25     import ctypes
26
27 from .compat import (
28     compat_basestring,
29     compat_cookiejar,
30     compat_expanduser,
31     compat_http_client,
32     compat_kwargs,
33     compat_str,
34     compat_urllib_error,
35     compat_urllib_request,
36 )
37 from .utils import (
38     escape_url,
39     ContentTooShortError,
40     date_from_str,
41     DateRange,
42     DEFAULT_OUTTMPL,
43     determine_ext,
44     DownloadError,
45     encodeFilename,
46     ExtractorError,
47     format_bytes,
48     formatSeconds,
49     get_term_width,
50     locked_file,
51     make_HTTPS_handler,
52     MaxDownloadsReached,
53     PagedList,
54     parse_filesize,
55     PostProcessingError,
56     platform_name,
57     preferredencoding,
58     render_table,
59     SameFileError,
60     sanitize_filename,
61     std_headers,
62     subtitles_filename,
63     takewhile_inclusive,
64     UnavailableVideoError,
65     url_basename,
66     version_tuple,
67     write_json_file,
68     write_string,
69     YoutubeDLHandler,
70     prepend_extension,
71     args_to_str,
72     age_restricted,
73 )
74 from .cache import Cache
75 from .extractor import get_info_extractor, gen_extractors
76 from .downloader import get_suitable_downloader
77 from .downloader.rtmp import rtmpdump_version
78 from .postprocessor import (
79     FFmpegFixupM4aPP,
80     FFmpegFixupStretchedPP,
81     FFmpegMergerPP,
82     FFmpegPostProcessor,
83     get_postprocessor,
84 )
85 from .version import __version__
86
87
88 class YoutubeDL(object):
89     """YoutubeDL class.
90
91     YoutubeDL objects are the ones responsible of downloading the
92     actual video file and writing it to disk if the user has requested
93     it, among some other tasks. In most cases there should be one per
94     program. As, given a video URL, the downloader doesn't know how to
95     extract all the needed information, task that InfoExtractors do, it
96     has to pass the URL to one of them.
97
98     For this, YoutubeDL objects have a method that allows
99     InfoExtractors to be registered in a given order. When it is passed
100     a URL, the YoutubeDL object handles it to the first InfoExtractor it
101     finds that reports being able to handle it. The InfoExtractor extracts
102     all the information about the video or videos the URL refers to, and
103     YoutubeDL process the extracted information, possibly using a File
104     Downloader to download the video.
105
106     YoutubeDL objects accept a lot of parameters. In order not to saturate
107     the object constructor with arguments, it receives a dictionary of
108     options instead. These options are available through the params
109     attribute for the InfoExtractors to use. The YoutubeDL also
110     registers itself as the downloader in charge for the InfoExtractors
111     that are added to it, so this is a "mutual registration".
112
113     Available options:
114
115     username:          Username for authentication purposes.
116     password:          Password for authentication purposes.
117     videopassword:     Password for acces a video.
118     usenetrc:          Use netrc for authentication instead.
119     verbose:           Print additional info to stdout.
120     quiet:             Do not print messages to stdout.
121     no_warnings:       Do not print out anything for warnings.
122     forceurl:          Force printing final URL.
123     forcetitle:        Force printing title.
124     forceid:           Force printing ID.
125     forcethumbnail:    Force printing thumbnail URL.
126     forcedescription:  Force printing description.
127     forcefilename:     Force printing final filename.
128     forceduration:     Force printing duration.
129     forcejson:         Force printing info_dict as JSON.
130     dump_single_json:  Force printing the info_dict of the whole playlist
131                        (or video) as a single JSON line.
132     simulate:          Do not download the video files.
133     format:            Video format code. See options.py for more information.
134     format_limit:      Highest quality format to try.
135     outtmpl:           Template for output names.
136     restrictfilenames: Do not allow "&" and spaces in file names
137     ignoreerrors:      Do not stop on download errors.
138     nooverwrites:      Prevent overwriting files.
139     playliststart:     Playlist item to start at.
140     playlistend:       Playlist item to end at.
141     playlist_items:    Specific indices of playlist to download.
142     playlistreverse:   Download playlist items in reverse order.
143     matchtitle:        Download only matching titles.
144     rejecttitle:       Reject downloads for matching titles.
145     logger:            Log messages to a logging.Logger instance.
146     logtostderr:       Log messages to stderr instead of stdout.
147     writedescription:  Write the video description to a .description file
148     writeinfojson:     Write the video description to a .info.json file
149     writeannotations:  Write the video annotations to a .annotations.xml file
150     writethumbnail:    Write the thumbnail image to a file
151     write_all_thumbnails:  Write all thumbnail formats to files
152     writesubtitles:    Write the video subtitles to a file
153     writeautomaticsub: Write the automatic subtitles to a file
154     allsubtitles:      Downloads all the subtitles of the video
155                        (requires writesubtitles or writeautomaticsub)
156     listsubtitles:     Lists all available subtitles for the video
157     subtitlesformat:   Subtitle format [srt/sbv/vtt] (default=srt)
158     subtitleslangs:    List of languages of the subtitles to download
159     keepvideo:         Keep the video file after post-processing
160     daterange:         A DateRange object, download only if the upload_date is in the range.
161     skip_download:     Skip the actual download of the video file
162     cachedir:          Location of the cache files in the filesystem.
163                        False to disable filesystem cache.
164     noplaylist:        Download single video instead of a playlist if in doubt.
165     age_limit:         An integer representing the user's age in years.
166                        Unsuitable videos for the given age are skipped.
167     min_views:         An integer representing the minimum view count the video
168                        must have in order to not be skipped.
169                        Videos without view count information are always
170                        downloaded. None for no limit.
171     max_views:         An integer representing the maximum view count.
172                        Videos that are more popular than that are not
173                        downloaded.
174                        Videos without view count information are always
175                        downloaded. None for no limit.
176     download_archive:  File name of a file where all downloads are recorded.
177                        Videos already present in the file are not downloaded
178                        again.
179     cookiefile:        File name where cookies should be read from and dumped to.
180     nocheckcertificate:Do not verify SSL certificates
181     prefer_insecure:   Use HTTP instead of HTTPS to retrieve information.
182                        At the moment, this is only supported by YouTube.
183     proxy:             URL of the proxy server to use
184     socket_timeout:    Time to wait for unresponsive hosts, in seconds
185     bidi_workaround:   Work around buggy terminals without bidirectional text
186                        support, using fridibi
187     debug_printtraffic:Print out sent and received HTTP traffic
188     include_ads:       Download ads as well
189     default_search:    Prepend this string if an input url is not valid.
190                        'auto' for elaborate guessing
191     encoding:          Use this encoding instead of the system-specified.
192     extract_flat:      Do not resolve URLs, return the immediate result.
193                        Pass in 'in_playlist' to only show this behavior for
194                        playlist items.
195     postprocessors:    A list of dictionaries, each with an entry
196                        * key:  The name of the postprocessor. See
197                                youtube_dl/postprocessor/__init__.py for a list.
198                        as well as any further keyword arguments for the
199                        postprocessor.
200     progress_hooks:    A list of functions that get called on download
201                        progress, with a dictionary with the entries
202                        * status: One of "downloading" and "finished".
203                                  Check this first and ignore unknown values.
204
205                        If status is one of "downloading" or "finished", the
206                        following properties may also be present:
207                        * filename: The final filename (always present)
208                        * downloaded_bytes: Bytes on disk
209                        * total_bytes: Size of the whole file, None if unknown
210                        * tmpfilename: The filename we're currently writing to
211                        * eta: The estimated time in seconds, None if unknown
212                        * speed: The download speed in bytes/second, None if
213                                 unknown
214
215                        Progress hooks are guaranteed to be called at least once
216                        (with status "finished") if the download is successful.
217     merge_output_format: Extension to use when merging formats.
218     fixup:             Automatically correct known faults of the file.
219                        One of:
220                        - "never": do nothing
221                        - "warn": only emit a warning
222                        - "detect_or_warn": check whether we can do anything
223                                            about it, warn otherwise (default)
224     source_address:    (Experimental) Client-side IP address to bind to.
225     call_home:         Boolean, true iff we are allowed to contact the
226                        youtube-dl servers for debugging.
227     sleep_interval:    Number of seconds to sleep before each download.
228     external_downloader:  Executable of the external downloader to call.
229     listformats:       Print an overview of available video formats and exit.
230     list_thumbnails:   Print a table of all thumbnails and exit.
231
232
233     The following parameters are not used by YoutubeDL itself, they are used by
234     the FileDownloader:
235     nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
236     noresizebuffer, retries, continuedl, noprogress, consoletitle,
237     xattr_set_filesize.
238
239     The following options are used by the post processors:
240     prefer_ffmpeg:     If True, use ffmpeg instead of avconv if both are available,
241                        otherwise prefer avconv.
242     exec_cmd:          Arbitrary command to run after downloading
243     """
244
245     params = None
246     _ies = []
247     _pps = []
248     _download_retcode = None
249     _num_downloads = None
250     _screen_file = None
251
252     def __init__(self, params=None, auto_init=True):
253         """Create a FileDownloader object with the given options."""
254         if params is None:
255             params = {}
256         self._ies = []
257         self._ies_instances = {}
258         self._pps = []
259         self._progress_hooks = []
260         self._download_retcode = 0
261         self._num_downloads = 0
262         self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
263         self._err_file = sys.stderr
264         self.params = params
265         self.cache = Cache(self)
266
267         if params.get('bidi_workaround', False):
268             try:
269                 import pty
270                 master, slave = pty.openpty()
271                 width = get_term_width()
272                 if width is None:
273                     width_args = []
274                 else:
275                     width_args = ['-w', str(width)]
276                 sp_kwargs = dict(
277                     stdin=subprocess.PIPE,
278                     stdout=slave,
279                     stderr=self._err_file)
280                 try:
281                     self._output_process = subprocess.Popen(
282                         ['bidiv'] + width_args, **sp_kwargs
283                     )
284                 except OSError:
285                     self._output_process = subprocess.Popen(
286                         ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
287                 self._output_channel = os.fdopen(master, 'rb')
288             except OSError as ose:
289                 if ose.errno == 2:
290                     self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that  fribidi  is an executable file in one of the directories in your $PATH.')
291                 else:
292                     raise
293
294         if (sys.version_info >= (3,) and sys.platform != 'win32' and
295                 sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
296                 and not params.get('restrictfilenames', False)):
297             # On Python 3, the Unicode filesystem API will throw errors (#1474)
298             self.report_warning(
299                 'Assuming --restrict-filenames since file system encoding '
300                 'cannot encode all characters. '
301                 'Set the LC_ALL environment variable to fix this.')
302             self.params['restrictfilenames'] = True
303
304         if '%(stitle)s' in self.params.get('outtmpl', ''):
305             self.report_warning('%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
306
307         self._setup_opener()
308
309         if auto_init:
310             self.print_debug_header()
311             self.add_default_info_extractors()
312
313         for pp_def_raw in self.params.get('postprocessors', []):
314             pp_class = get_postprocessor(pp_def_raw['key'])
315             pp_def = dict(pp_def_raw)
316             del pp_def['key']
317             pp = pp_class(self, **compat_kwargs(pp_def))
318             self.add_post_processor(pp)
319
320         for ph in self.params.get('progress_hooks', []):
321             self.add_progress_hook(ph)
322
323     def warn_if_short_id(self, argv):
324         # short YouTube ID starting with dash?
325         idxs = [
326             i for i, a in enumerate(argv)
327             if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
328         if idxs:
329             correct_argv = (
330                 ['youtube-dl'] +
331                 [a for i, a in enumerate(argv) if i not in idxs] +
332                 ['--'] + [argv[i] for i in idxs]
333             )
334             self.report_warning(
335                 'Long argument string detected. '
336                 'Use -- to separate parameters and URLs, like this:\n%s\n' %
337                 args_to_str(correct_argv))
338
339     def add_info_extractor(self, ie):
340         """Add an InfoExtractor object to the end of the list."""
341         self._ies.append(ie)
342         self._ies_instances[ie.ie_key()] = ie
343         ie.set_downloader(self)
344
345     def get_info_extractor(self, ie_key):
346         """
347         Get an instance of an IE with name ie_key, it will try to get one from
348         the _ies list, if there's no instance it will create a new one and add
349         it to the extractor list.
350         """
351         ie = self._ies_instances.get(ie_key)
352         if ie is None:
353             ie = get_info_extractor(ie_key)()
354             self.add_info_extractor(ie)
355         return ie
356
357     def add_default_info_extractors(self):
358         """
359         Add the InfoExtractors returned by gen_extractors to the end of the list
360         """
361         for ie in gen_extractors():
362             self.add_info_extractor(ie)
363
364     def add_post_processor(self, pp):
365         """Add a PostProcessor object to the end of the chain."""
366         self._pps.append(pp)
367         pp.set_downloader(self)
368
369     def add_progress_hook(self, ph):
370         """Add the progress hook (currently only for the file downloader)"""
371         self._progress_hooks.append(ph)
372
373     def _bidi_workaround(self, message):
374         if not hasattr(self, '_output_channel'):
375             return message
376
377         assert hasattr(self, '_output_process')
378         assert isinstance(message, compat_str)
379         line_count = message.count('\n') + 1
380         self._output_process.stdin.write((message + '\n').encode('utf-8'))
381         self._output_process.stdin.flush()
382         res = ''.join(self._output_channel.readline().decode('utf-8')
383                       for _ in range(line_count))
384         return res[:-len('\n')]
385
386     def to_screen(self, message, skip_eol=False):
387         """Print message to stdout if not in quiet mode."""
388         return self.to_stdout(message, skip_eol, check_quiet=True)
389
390     def _write_string(self, s, out=None):
391         write_string(s, out=out, encoding=self.params.get('encoding'))
392
393     def to_stdout(self, message, skip_eol=False, check_quiet=False):
394         """Print message to stdout if not in quiet mode."""
395         if self.params.get('logger'):
396             self.params['logger'].debug(message)
397         elif not check_quiet or not self.params.get('quiet', False):
398             message = self._bidi_workaround(message)
399             terminator = ['\n', ''][skip_eol]
400             output = message + terminator
401
402             self._write_string(output, self._screen_file)
403
404     def to_stderr(self, message):
405         """Print message to stderr."""
406         assert isinstance(message, compat_str)
407         if self.params.get('logger'):
408             self.params['logger'].error(message)
409         else:
410             message = self._bidi_workaround(message)
411             output = message + '\n'
412             self._write_string(output, self._err_file)
413
414     def to_console_title(self, message):
415         if not self.params.get('consoletitle', False):
416             return
417         if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
418             # c_wchar_p() might not be necessary if `message` is
419             # already of type unicode()
420             ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
421         elif 'TERM' in os.environ:
422             self._write_string('\033]0;%s\007' % message, self._screen_file)
423
424     def save_console_title(self):
425         if not self.params.get('consoletitle', False):
426             return
427         if 'TERM' in os.environ:
428             # Save the title on stack
429             self._write_string('\033[22;0t', self._screen_file)
430
431     def restore_console_title(self):
432         if not self.params.get('consoletitle', False):
433             return
434         if 'TERM' in os.environ:
435             # Restore the title from stack
436             self._write_string('\033[23;0t', self._screen_file)
437
438     def __enter__(self):
439         self.save_console_title()
440         return self
441
442     def __exit__(self, *args):
443         self.restore_console_title()
444
445         if self.params.get('cookiefile') is not None:
446             self.cookiejar.save()
447
448     def trouble(self, message=None, tb=None):
449         """Determine action to take when a download problem appears.
450
451         Depending on if the downloader has been configured to ignore
452         download errors or not, this method may throw an exception or
453         not when errors are found, after printing the message.
454
455         tb, if given, is additional traceback information.
456         """
457         if message is not None:
458             self.to_stderr(message)
459         if self.params.get('verbose'):
460             if tb is None:
461                 if sys.exc_info()[0]:  # if .trouble has been called from an except block
462                     tb = ''
463                     if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
464                         tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
465                     tb += compat_str(traceback.format_exc())
466                 else:
467                     tb_data = traceback.format_list(traceback.extract_stack())
468                     tb = ''.join(tb_data)
469             self.to_stderr(tb)
470         if not self.params.get('ignoreerrors', False):
471             if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
472                 exc_info = sys.exc_info()[1].exc_info
473             else:
474                 exc_info = sys.exc_info()
475             raise DownloadError(message, exc_info)
476         self._download_retcode = 1
477
478     def report_warning(self, message):
479         '''
480         Print the message to stderr, it will be prefixed with 'WARNING:'
481         If stderr is a tty file the 'WARNING:' will be colored
482         '''
483         if self.params.get('logger') is not None:
484             self.params['logger'].warning(message)
485         else:
486             if self.params.get('no_warnings'):
487                 return
488             if self._err_file.isatty() and os.name != 'nt':
489                 _msg_header = '\033[0;33mWARNING:\033[0m'
490             else:
491                 _msg_header = 'WARNING:'
492             warning_message = '%s %s' % (_msg_header, message)
493             self.to_stderr(warning_message)
494
495     def report_error(self, message, tb=None):
496         '''
497         Do the same as trouble, but prefixes the message with 'ERROR:', colored
498         in red if stderr is a tty file.
499         '''
500         if self._err_file.isatty() and os.name != 'nt':
501             _msg_header = '\033[0;31mERROR:\033[0m'
502         else:
503             _msg_header = 'ERROR:'
504         error_message = '%s %s' % (_msg_header, message)
505         self.trouble(error_message, tb)
506
507     def report_file_already_downloaded(self, file_name):
508         """Report file has already been fully downloaded."""
509         try:
510             self.to_screen('[download] %s has already been downloaded' % file_name)
511         except UnicodeEncodeError:
512             self.to_screen('[download] The file has already been downloaded')
513
514     def prepare_filename(self, info_dict):
515         """Generate the output filename."""
516         try:
517             template_dict = dict(info_dict)
518
519             template_dict['epoch'] = int(time.time())
520             autonumber_size = self.params.get('autonumber_size')
521             if autonumber_size is None:
522                 autonumber_size = 5
523             autonumber_templ = '%0' + str(autonumber_size) + 'd'
524             template_dict['autonumber'] = autonumber_templ % self._num_downloads
525             if template_dict.get('playlist_index') is not None:
526                 template_dict['playlist_index'] = '%0*d' % (len(str(template_dict['n_entries'])), template_dict['playlist_index'])
527             if template_dict.get('resolution') is None:
528                 if template_dict.get('width') and template_dict.get('height'):
529                     template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
530                 elif template_dict.get('height'):
531                     template_dict['resolution'] = '%sp' % template_dict['height']
532                 elif template_dict.get('width'):
533                     template_dict['resolution'] = '?x%d' % template_dict['width']
534
535             sanitize = lambda k, v: sanitize_filename(
536                 compat_str(v),
537                 restricted=self.params.get('restrictfilenames'),
538                 is_id=(k == 'id'))
539             template_dict = dict((k, sanitize(k, v))
540                                  for k, v in template_dict.items()
541                                  if v is not None)
542             template_dict = collections.defaultdict(lambda: 'NA', template_dict)
543
544             outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
545             tmpl = compat_expanduser(outtmpl)
546             filename = tmpl % template_dict
547             # Temporary fix for #4787
548             # 'Treat' all problem characters by passing filename through preferredencoding
549             # to workaround encoding issues with subprocess on python2 @ Windows
550             if sys.version_info < (3, 0) and sys.platform == 'win32':
551                 filename = encodeFilename(filename, True).decode(preferredencoding())
552             return filename
553         except ValueError as err:
554             self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
555             return None
556
557     def _match_entry(self, info_dict):
558         """ Returns None iff the file should be downloaded """
559
560         video_title = info_dict.get('title', info_dict.get('id', 'video'))
561         if 'title' in info_dict:
562             # This can happen when we're just evaluating the playlist
563             title = info_dict['title']
564             matchtitle = self.params.get('matchtitle', False)
565             if matchtitle:
566                 if not re.search(matchtitle, title, re.IGNORECASE):
567                     return '"' + title + '" title did not match pattern "' + matchtitle + '"'
568             rejecttitle = self.params.get('rejecttitle', False)
569             if rejecttitle:
570                 if re.search(rejecttitle, title, re.IGNORECASE):
571                     return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
572         date = info_dict.get('upload_date', None)
573         if date is not None:
574             dateRange = self.params.get('daterange', DateRange())
575             if date not in dateRange:
576                 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
577         view_count = info_dict.get('view_count', None)
578         if view_count is not None:
579             min_views = self.params.get('min_views')
580             if min_views is not None and view_count < min_views:
581                 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
582             max_views = self.params.get('max_views')
583             if max_views is not None and view_count > max_views:
584                 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
585         if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
586             return 'Skipping "%s" because it is age restricted' % title
587         if self.in_download_archive(info_dict):
588             return '%s has already been recorded in archive' % video_title
589         return None
590
591     @staticmethod
592     def add_extra_info(info_dict, extra_info):
593         '''Set the keys from extra_info in info dict if they are missing'''
594         for key, value in extra_info.items():
595             info_dict.setdefault(key, value)
596
597     def extract_info(self, url, download=True, ie_key=None, extra_info={},
598                      process=True):
599         '''
600         Returns a list with a dictionary for each video we find.
601         If 'download', also downloads the videos.
602         extra_info is a dict containing the extra values to add to each result
603          '''
604
605         if ie_key:
606             ies = [self.get_info_extractor(ie_key)]
607         else:
608             ies = self._ies
609
610         for ie in ies:
611             if not ie.suitable(url):
612                 continue
613
614             if not ie.working():
615                 self.report_warning('The program functionality for this site has been marked as broken, '
616                                     'and will probably not work.')
617
618             try:
619                 ie_result = ie.extract(url)
620                 if ie_result is None:  # Finished already (backwards compatibility; listformats and friends should be moved here)
621                     break
622                 if isinstance(ie_result, list):
623                     # Backwards compatibility: old IE result format
624                     ie_result = {
625                         '_type': 'compat_list',
626                         'entries': ie_result,
627                     }
628                 self.add_default_extra_info(ie_result, ie, url)
629                 if process:
630                     return self.process_ie_result(ie_result, download, extra_info)
631                 else:
632                     return ie_result
633             except ExtractorError as de:  # An error we somewhat expected
634                 self.report_error(compat_str(de), de.format_traceback())
635                 break
636             except MaxDownloadsReached:
637                 raise
638             except Exception as e:
639                 if self.params.get('ignoreerrors', False):
640                     self.report_error(compat_str(e), tb=compat_str(traceback.format_exc()))
641                     break
642                 else:
643                     raise
644         else:
645             self.report_error('no suitable InfoExtractor for URL %s' % url)
646
647     def add_default_extra_info(self, ie_result, ie, url):
648         self.add_extra_info(ie_result, {
649             'extractor': ie.IE_NAME,
650             'webpage_url': url,
651             'webpage_url_basename': url_basename(url),
652             'extractor_key': ie.ie_key(),
653         })
654
655     def process_ie_result(self, ie_result, download=True, extra_info={}):
656         """
657         Take the result of the ie(may be modified) and resolve all unresolved
658         references (URLs, playlist items).
659
660         It will also download the videos if 'download'.
661         Returns the resolved ie_result.
662         """
663
664         result_type = ie_result.get('_type', 'video')
665
666         if result_type in ('url', 'url_transparent'):
667             extract_flat = self.params.get('extract_flat', False)
668             if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or
669                     extract_flat is True):
670                 if self.params.get('forcejson', False):
671                     self.to_stdout(json.dumps(ie_result))
672                 return ie_result
673
674         if result_type == 'video':
675             self.add_extra_info(ie_result, extra_info)
676             return self.process_video_result(ie_result, download=download)
677         elif result_type == 'url':
678             # We have to add extra_info to the results because it may be
679             # contained in a playlist
680             return self.extract_info(ie_result['url'],
681                                      download,
682                                      ie_key=ie_result.get('ie_key'),
683                                      extra_info=extra_info)
684         elif result_type == 'url_transparent':
685             # Use the information from the embedding page
686             info = self.extract_info(
687                 ie_result['url'], ie_key=ie_result.get('ie_key'),
688                 extra_info=extra_info, download=False, process=False)
689
690             force_properties = dict(
691                 (k, v) for k, v in ie_result.items() if v is not None)
692             for f in ('_type', 'url'):
693                 if f in force_properties:
694                     del force_properties[f]
695             new_result = info.copy()
696             new_result.update(force_properties)
697
698             assert new_result.get('_type') != 'url_transparent'
699
700             return self.process_ie_result(
701                 new_result, download=download, extra_info=extra_info)
702         elif result_type == 'playlist' or result_type == 'multi_video':
703             # We process each entry in the playlist
704             playlist = ie_result.get('title', None) or ie_result.get('id', None)
705             self.to_screen('[download] Downloading playlist: %s' % playlist)
706
707             playlist_results = []
708
709             playliststart = self.params.get('playliststart', 1) - 1
710             playlistend = self.params.get('playlistend', None)
711             # For backwards compatibility, interpret -1 as whole list
712             if playlistend == -1:
713                 playlistend = None
714
715             playlistitems_str = self.params.get('playlist_items', None)
716             playlistitems = None
717             if playlistitems_str is not None:
718                 def iter_playlistitems(format):
719                     for string_segment in format.split(','):
720                         if '-' in string_segment:
721                             start, end = string_segment.split('-')
722                             for item in range(int(start), int(end) + 1):
723                                 yield int(item)
724                         else:
725                             yield int(string_segment)
726                 playlistitems = iter_playlistitems(playlistitems_str)
727
728             ie_entries = ie_result['entries']
729             if isinstance(ie_entries, list):
730                 n_all_entries = len(ie_entries)
731                 if playlistitems:
732                     entries = [ie_entries[i - 1] for i in playlistitems]
733                 else:
734                     entries = ie_entries[playliststart:playlistend]
735                 n_entries = len(entries)
736                 self.to_screen(
737                     "[%s] playlist %s: Collected %d video ids (downloading %d of them)" %
738                     (ie_result['extractor'], playlist, n_all_entries, n_entries))
739             elif isinstance(ie_entries, PagedList):
740                 if playlistitems:
741                     entries = []
742                     for item in playlistitems:
743                         entries.extend(ie_entries.getslice(
744                             item - 1, item
745                         ))
746                 else:
747                     entries = ie_entries.getslice(
748                         playliststart, playlistend)
749                 n_entries = len(entries)
750                 self.to_screen(
751                     "[%s] playlist %s: Downloading %d videos" %
752                     (ie_result['extractor'], playlist, n_entries))
753             else:  # iterable
754                 if playlistitems:
755                     entry_list = list(ie_entries)
756                     entries = [entry_list[i - 1] for i in playlistitems]
757                 else:
758                     entries = list(itertools.islice(
759                         ie_entries, playliststart, playlistend))
760                 n_entries = len(entries)
761                 self.to_screen(
762                     "[%s] playlist %s: Downloading %d videos" %
763                     (ie_result['extractor'], playlist, n_entries))
764
765             if self.params.get('playlistreverse', False):
766                 entries = entries[::-1]
767
768             for i, entry in enumerate(entries, 1):
769                 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
770                 extra = {
771                     'n_entries': n_entries,
772                     'playlist': playlist,
773                     'playlist_id': ie_result.get('id'),
774                     'playlist_title': ie_result.get('title'),
775                     'playlist_index': i + playliststart,
776                     'extractor': ie_result['extractor'],
777                     'webpage_url': ie_result['webpage_url'],
778                     'webpage_url_basename': url_basename(ie_result['webpage_url']),
779                     'extractor_key': ie_result['extractor_key'],
780                 }
781
782                 reason = self._match_entry(entry)
783                 if reason is not None:
784                     self.to_screen('[download] ' + reason)
785                     continue
786
787                 entry_result = self.process_ie_result(entry,
788                                                       download=download,
789                                                       extra_info=extra)
790                 playlist_results.append(entry_result)
791             ie_result['entries'] = playlist_results
792             return ie_result
793         elif result_type == 'compat_list':
794             self.report_warning(
795                 'Extractor %s returned a compat_list result. '
796                 'It needs to be updated.' % ie_result.get('extractor'))
797
798             def _fixup(r):
799                 self.add_extra_info(
800                     r,
801                     {
802                         'extractor': ie_result['extractor'],
803                         'webpage_url': ie_result['webpage_url'],
804                         'webpage_url_basename': url_basename(ie_result['webpage_url']),
805                         'extractor_key': ie_result['extractor_key'],
806                     }
807                 )
808                 return r
809             ie_result['entries'] = [
810                 self.process_ie_result(_fixup(r), download, extra_info)
811                 for r in ie_result['entries']
812             ]
813             return ie_result
814         else:
815             raise Exception('Invalid result type: %s' % result_type)
816
817     def _apply_format_filter(self, format_spec, available_formats):
818         " Returns a tuple of the remaining format_spec and filtered formats "
819
820         OPERATORS = {
821             '<': operator.lt,
822             '<=': operator.le,
823             '>': operator.gt,
824             '>=': operator.ge,
825             '=': operator.eq,
826             '!=': operator.ne,
827         }
828         operator_rex = re.compile(r'''(?x)\s*\[
829             (?P<key>width|height|tbr|abr|vbr|filesize|fps)
830             \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
831             (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
832             \]$
833             ''' % '|'.join(map(re.escape, OPERATORS.keys())))
834         m = operator_rex.search(format_spec)
835         if not m:
836             raise ValueError('Invalid format specification %r' % format_spec)
837
838         try:
839             comparison_value = int(m.group('value'))
840         except ValueError:
841             comparison_value = parse_filesize(m.group('value'))
842             if comparison_value is None:
843                 comparison_value = parse_filesize(m.group('value') + 'B')
844             if comparison_value is None:
845                 raise ValueError(
846                     'Invalid value %r in format specification %r' % (
847                         m.group('value'), format_spec))
848         op = OPERATORS[m.group('op')]
849
850         def _filter(f):
851             actual_value = f.get(m.group('key'))
852             if actual_value is None:
853                 return m.group('none_inclusive')
854             return op(actual_value, comparison_value)
855         new_formats = [f for f in available_formats if _filter(f)]
856
857         new_format_spec = format_spec[:-len(m.group(0))]
858         if not new_format_spec:
859             new_format_spec = 'best'
860
861         return (new_format_spec, new_formats)
862
863     def select_format(self, format_spec, available_formats):
864         while format_spec.endswith(']'):
865             format_spec, available_formats = self._apply_format_filter(
866                 format_spec, available_formats)
867         if not available_formats:
868             return None
869
870         if format_spec == 'best' or format_spec is None:
871             return available_formats[-1]
872         elif format_spec == 'worst':
873             return available_formats[0]
874         elif format_spec == 'bestaudio':
875             audio_formats = [
876                 f for f in available_formats
877                 if f.get('vcodec') == 'none']
878             if audio_formats:
879                 return audio_formats[-1]
880         elif format_spec == 'worstaudio':
881             audio_formats = [
882                 f for f in available_formats
883                 if f.get('vcodec') == 'none']
884             if audio_formats:
885                 return audio_formats[0]
886         elif format_spec == 'bestvideo':
887             video_formats = [
888                 f for f in available_formats
889                 if f.get('acodec') == 'none']
890             if video_formats:
891                 return video_formats[-1]
892         elif format_spec == 'worstvideo':
893             video_formats = [
894                 f for f in available_formats
895                 if f.get('acodec') == 'none']
896             if video_formats:
897                 return video_formats[0]
898         else:
899             extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
900             if format_spec in extensions:
901                 filter_f = lambda f: f['ext'] == format_spec
902             else:
903                 filter_f = lambda f: f['format_id'] == format_spec
904             matches = list(filter(filter_f, available_formats))
905             if matches:
906                 return matches[-1]
907         return None
908
909     def _calc_headers(self, info_dict):
910         res = std_headers.copy()
911
912         add_headers = info_dict.get('http_headers')
913         if add_headers:
914             res.update(add_headers)
915
916         cookies = self._calc_cookies(info_dict)
917         if cookies:
918             res['Cookie'] = cookies
919
920         return res
921
922     def _calc_cookies(self, info_dict):
923         class _PseudoRequest(object):
924             def __init__(self, url):
925                 self.url = url
926                 self.headers = {}
927                 self.unverifiable = False
928
929             def add_unredirected_header(self, k, v):
930                 self.headers[k] = v
931
932             def get_full_url(self):
933                 return self.url
934
935             def is_unverifiable(self):
936                 return self.unverifiable
937
938             def has_header(self, h):
939                 return h in self.headers
940
941         pr = _PseudoRequest(info_dict['url'])
942         self.cookiejar.add_cookie_header(pr)
943         return pr.headers.get('Cookie')
944
945     def process_video_result(self, info_dict, download=True):
946         assert info_dict.get('_type', 'video') == 'video'
947
948         if 'id' not in info_dict:
949             raise ExtractorError('Missing "id" field in extractor result')
950         if 'title' not in info_dict:
951             raise ExtractorError('Missing "title" field in extractor result')
952
953         if 'playlist' not in info_dict:
954             # It isn't part of a playlist
955             info_dict['playlist'] = None
956             info_dict['playlist_index'] = None
957
958         thumbnails = info_dict.get('thumbnails')
959         if thumbnails is None:
960             thumbnail = info_dict.get('thumbnail')
961             if thumbnail:
962                 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
963         if thumbnails:
964             thumbnails.sort(key=lambda t: (
965                 t.get('preference'), t.get('width'), t.get('height'),
966                 t.get('id'), t.get('url')))
967             for t in thumbnails:
968                 if 'width' in t and 'height' in t:
969                     t['resolution'] = '%dx%d' % (t['width'], t['height'])
970
971         if thumbnails and 'thumbnail' not in info_dict:
972             info_dict['thumbnail'] = thumbnails[-1]['url']
973
974         if 'display_id' not in info_dict and 'id' in info_dict:
975             info_dict['display_id'] = info_dict['id']
976
977         if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
978             # Working around negative timestamps in Windows
979             # (see http://bugs.python.org/issue1646728)
980             if info_dict['timestamp'] < 0 and os.name == 'nt':
981                 info_dict['timestamp'] = 0
982             upload_date = datetime.datetime.utcfromtimestamp(
983                 info_dict['timestamp'])
984             info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
985
986         # This extractors handle format selection themselves
987         if info_dict['extractor'] in ['Youku']:
988             if download:
989                 self.process_info(info_dict)
990             return info_dict
991
992         # We now pick which formats have to be downloaded
993         if info_dict.get('formats') is None:
994             # There's only one format available
995             formats = [info_dict]
996         else:
997             formats = info_dict['formats']
998
999         if not formats:
1000             raise ExtractorError('No video formats found!')
1001
1002         # We check that all the formats have the format and format_id fields
1003         for i, format in enumerate(formats):
1004             if 'url' not in format:
1005                 raise ExtractorError('Missing "url" key in result (index %d)' % i)
1006
1007             if format.get('format_id') is None:
1008                 format['format_id'] = compat_str(i)
1009             if format.get('format') is None:
1010                 format['format'] = '{id} - {res}{note}'.format(
1011                     id=format['format_id'],
1012                     res=self.format_resolution(format),
1013                     note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
1014                 )
1015             # Automatically determine file extension if missing
1016             if 'ext' not in format:
1017                 format['ext'] = determine_ext(format['url']).lower()
1018             # Add HTTP headers, so that external programs can use them from the
1019             # json output
1020             full_format_info = info_dict.copy()
1021             full_format_info.update(format)
1022             format['http_headers'] = self._calc_headers(full_format_info)
1023
1024         format_limit = self.params.get('format_limit', None)
1025         if format_limit:
1026             formats = list(takewhile_inclusive(
1027                 lambda f: f['format_id'] != format_limit, formats
1028             ))
1029
1030         # TODO Central sorting goes here
1031
1032         if formats[0] is not info_dict:
1033             # only set the 'formats' fields if the original info_dict list them
1034             # otherwise we end up with a circular reference, the first (and unique)
1035             # element in the 'formats' field in info_dict is info_dict itself,
1036             # wich can't be exported to json
1037             info_dict['formats'] = formats
1038         if self.params.get('listformats'):
1039             self.list_formats(info_dict)
1040             return
1041         if self.params.get('list_thumbnails'):
1042             self.list_thumbnails(info_dict)
1043             return
1044
1045         req_format = self.params.get('format')
1046         if req_format is None:
1047             req_format = 'best'
1048         formats_to_download = []
1049         # The -1 is for supporting YoutubeIE
1050         if req_format in ('-1', 'all'):
1051             formats_to_download = formats
1052         else:
1053             for rfstr in req_format.split(','):
1054                 # We can accept formats requested in the format: 34/5/best, we pick
1055                 # the first that is available, starting from left
1056                 req_formats = rfstr.split('/')
1057                 for rf in req_formats:
1058                     if re.match(r'.+?\+.+?', rf) is not None:
1059                         # Two formats have been requested like '137+139'
1060                         format_1, format_2 = rf.split('+')
1061                         formats_info = (self.select_format(format_1, formats),
1062                                         self.select_format(format_2, formats))
1063                         if all(formats_info):
1064                             # The first format must contain the video and the
1065                             # second the audio
1066                             if formats_info[0].get('vcodec') == 'none':
1067                                 self.report_error('The first format must '
1068                                                   'contain the video, try using '
1069                                                   '"-f %s+%s"' % (format_2, format_1))
1070                                 return
1071                             output_ext = (
1072                                 formats_info[0]['ext']
1073                                 if self.params.get('merge_output_format') is None
1074                                 else self.params['merge_output_format'])
1075                             selected_format = {
1076                                 'requested_formats': formats_info,
1077                                 'format': rf,
1078                                 'format_id': '%s+%s' % (formats_info[0].get('format_id'),
1079                                                         formats_info[1].get('format_id')),
1080                                 'width': formats_info[0].get('width'),
1081                                 'height': formats_info[0].get('height'),
1082                                 'resolution': formats_info[0].get('resolution'),
1083                                 'fps': formats_info[0].get('fps'),
1084                                 'vcodec': formats_info[0].get('vcodec'),
1085                                 'vbr': formats_info[0].get('vbr'),
1086                                 'stretched_ratio': formats_info[0].get('stretched_ratio'),
1087                                 'acodec': formats_info[1].get('acodec'),
1088                                 'abr': formats_info[1].get('abr'),
1089                                 'ext': output_ext,
1090                             }
1091                         else:
1092                             selected_format = None
1093                     else:
1094                         selected_format = self.select_format(rf, formats)
1095                     if selected_format is not None:
1096                         formats_to_download.append(selected_format)
1097                         break
1098         if not formats_to_download:
1099             raise ExtractorError('requested format not available',
1100                                  expected=True)
1101
1102         if download:
1103             if len(formats_to_download) > 1:
1104                 self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
1105             for format in formats_to_download:
1106                 new_info = dict(info_dict)
1107                 new_info.update(format)
1108                 self.process_info(new_info)
1109         # We update the info dict with the best quality format (backwards compatibility)
1110         info_dict.update(formats_to_download[-1])
1111         return info_dict
1112
1113     def process_info(self, info_dict):
1114         """Process a single resolved IE result."""
1115
1116         assert info_dict.get('_type', 'video') == 'video'
1117
1118         max_downloads = self.params.get('max_downloads')
1119         if max_downloads is not None:
1120             if self._num_downloads >= int(max_downloads):
1121                 raise MaxDownloadsReached()
1122
1123         info_dict['fulltitle'] = info_dict['title']
1124         if len(info_dict['title']) > 200:
1125             info_dict['title'] = info_dict['title'][:197] + '...'
1126
1127         # Keep for backwards compatibility
1128         info_dict['stitle'] = info_dict['title']
1129
1130         if 'format' not in info_dict:
1131             info_dict['format'] = info_dict['ext']
1132
1133         reason = self._match_entry(info_dict)
1134         if reason is not None:
1135             self.to_screen('[download] ' + reason)
1136             return
1137
1138         self._num_downloads += 1
1139
1140         info_dict['_filename'] = filename = self.prepare_filename(info_dict)
1141
1142         # Forced printings
1143         if self.params.get('forcetitle', False):
1144             self.to_stdout(info_dict['fulltitle'])
1145         if self.params.get('forceid', False):
1146             self.to_stdout(info_dict['id'])
1147         if self.params.get('forceurl', False):
1148             if info_dict.get('requested_formats') is not None:
1149                 for f in info_dict['requested_formats']:
1150                     self.to_stdout(f['url'] + f.get('play_path', ''))
1151             else:
1152                 # For RTMP URLs, also include the playpath
1153                 self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
1154         if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
1155             self.to_stdout(info_dict['thumbnail'])
1156         if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
1157             self.to_stdout(info_dict['description'])
1158         if self.params.get('forcefilename', False) and filename is not None:
1159             self.to_stdout(filename)
1160         if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
1161             self.to_stdout(formatSeconds(info_dict['duration']))
1162         if self.params.get('forceformat', False):
1163             self.to_stdout(info_dict['format'])
1164         if self.params.get('forcejson', False):
1165             self.to_stdout(json.dumps(info_dict))
1166
1167         # Do nothing else if in simulate mode
1168         if self.params.get('simulate', False):
1169             return
1170
1171         if filename is None:
1172             return
1173
1174         try:
1175             dn = os.path.dirname(encodeFilename(filename))
1176             if dn and not os.path.exists(dn):
1177                 os.makedirs(dn)
1178         except (OSError, IOError) as err:
1179             self.report_error('unable to create directory ' + compat_str(err))
1180             return
1181
1182         if self.params.get('writedescription', False):
1183             descfn = filename + '.description'
1184             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
1185                 self.to_screen('[info] Video description is already present')
1186             elif info_dict.get('description') is None:
1187                 self.report_warning('There\'s no description to write.')
1188             else:
1189                 try:
1190                     self.to_screen('[info] Writing video description to: ' + descfn)
1191                     with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
1192                         descfile.write(info_dict['description'])
1193                 except (OSError, IOError):
1194                     self.report_error('Cannot write description file ' + descfn)
1195                     return
1196
1197         if self.params.get('writeannotations', False):
1198             annofn = filename + '.annotations.xml'
1199             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
1200                 self.to_screen('[info] Video annotations are already present')
1201             else:
1202                 try:
1203                     self.to_screen('[info] Writing video annotations to: ' + annofn)
1204                     with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
1205                         annofile.write(info_dict['annotations'])
1206                 except (KeyError, TypeError):
1207                     self.report_warning('There are no annotations to write.')
1208                 except (OSError, IOError):
1209                     self.report_error('Cannot write annotations file: ' + annofn)
1210                     return
1211
1212         subtitles_are_requested = any([self.params.get('writesubtitles', False),
1213                                        self.params.get('writeautomaticsub')])
1214
1215         if subtitles_are_requested and 'subtitles' in info_dict and info_dict['subtitles']:
1216             # subtitles download errors are already managed as troubles in relevant IE
1217             # that way it will silently go on when used with unsupporting IE
1218             subtitles = info_dict['subtitles']
1219             sub_format = self.params.get('subtitlesformat', 'srt')
1220             for sub_lang in subtitles.keys():
1221                 sub = subtitles[sub_lang]
1222                 if sub is None:
1223                     continue
1224                 try:
1225                     sub_filename = subtitles_filename(filename, sub_lang, sub_format)
1226                     if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
1227                         self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format))
1228                     else:
1229                         self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
1230                         with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
1231                             subfile.write(sub)
1232                 except (OSError, IOError):
1233                     self.report_error('Cannot write subtitles file ' + sub_filename)
1234                     return
1235
1236         if self.params.get('writeinfojson', False):
1237             infofn = os.path.splitext(filename)[0] + '.info.json'
1238             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
1239                 self.to_screen('[info] Video description metadata is already present')
1240             else:
1241                 self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
1242                 try:
1243                     write_json_file(info_dict, infofn)
1244                 except (OSError, IOError):
1245                     self.report_error('Cannot write metadata to JSON file ' + infofn)
1246                     return
1247
1248         self._write_thumbnails(info_dict, filename)
1249
1250         if not self.params.get('skip_download', False):
1251             try:
1252                 def dl(name, info):
1253                     fd = get_suitable_downloader(info, self.params)(self, self.params)
1254                     for ph in self._progress_hooks:
1255                         fd.add_progress_hook(ph)
1256                     if self.params.get('verbose'):
1257                         self.to_stdout('[debug] Invoking downloader on %r' % info.get('url'))
1258                     return fd.download(name, info)
1259
1260                 if info_dict.get('requested_formats') is not None:
1261                     downloaded = []
1262                     success = True
1263                     merger = FFmpegMergerPP(self, not self.params.get('keepvideo'))
1264                     if not merger._executable:
1265                         postprocessors = []
1266                         self.report_warning('You have requested multiple '
1267                                             'formats but ffmpeg or avconv are not installed.'
1268                                             ' The formats won\'t be merged')
1269                     else:
1270                         postprocessors = [merger]
1271                     for f in info_dict['requested_formats']:
1272                         new_info = dict(info_dict)
1273                         new_info.update(f)
1274                         fname = self.prepare_filename(new_info)
1275                         fname = prepend_extension(fname, 'f%s' % f['format_id'])
1276                         downloaded.append(fname)
1277                         partial_success = dl(fname, new_info)
1278                         success = success and partial_success
1279                     info_dict['__postprocessors'] = postprocessors
1280                     info_dict['__files_to_merge'] = downloaded
1281                 else:
1282                     # Just a single file
1283                     success = dl(filename, info_dict)
1284             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
1285                 self.report_error('unable to download video data: %s' % str(err))
1286                 return
1287             except (OSError, IOError) as err:
1288                 raise UnavailableVideoError(err)
1289             except (ContentTooShortError, ) as err:
1290                 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
1291                 return
1292
1293             if success:
1294                 # Fixup content
1295                 fixup_policy = self.params.get('fixup')
1296                 if fixup_policy is None:
1297                     fixup_policy = 'detect_or_warn'
1298
1299                 stretched_ratio = info_dict.get('stretched_ratio')
1300                 if stretched_ratio is not None and stretched_ratio != 1:
1301                     if fixup_policy == 'warn':
1302                         self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
1303                             info_dict['id'], stretched_ratio))
1304                     elif fixup_policy == 'detect_or_warn':
1305                         stretched_pp = FFmpegFixupStretchedPP(self)
1306                         if stretched_pp.available:
1307                             info_dict.setdefault('__postprocessors', [])
1308                             info_dict['__postprocessors'].append(stretched_pp)
1309                         else:
1310                             self.report_warning(
1311                                 '%s: Non-uniform pixel ratio (%s). Install ffmpeg or avconv to fix this automatically.' % (
1312                                     info_dict['id'], stretched_ratio))
1313                     else:
1314                         assert fixup_policy in ('ignore', 'never')
1315
1316                 if info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash':
1317                     if fixup_policy == 'warn':
1318                         self.report_warning('%s: writing DASH m4a. Only some players support this container.' % (
1319                             info_dict['id']))
1320                     elif fixup_policy == 'detect_or_warn':
1321                         fixup_pp = FFmpegFixupM4aPP(self)
1322                         if fixup_pp.available:
1323                             info_dict.setdefault('__postprocessors', [])
1324                             info_dict['__postprocessors'].append(fixup_pp)
1325                         else:
1326                             self.report_warning(
1327                                 '%s: writing DASH m4a. Only some players support this container. Install ffmpeg or avconv to fix this automatically.' % (
1328                                     info_dict['id']))
1329                     else:
1330                         assert fixup_policy in ('ignore', 'never')
1331
1332                 try:
1333                     self.post_process(filename, info_dict)
1334                 except (PostProcessingError) as err:
1335                     self.report_error('postprocessing: %s' % str(err))
1336                     return
1337                 self.record_download_archive(info_dict)
1338
1339     def download(self, url_list):
1340         """Download a given list of URLs."""
1341         outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
1342         if (len(url_list) > 1 and
1343                 '%' not in outtmpl
1344                 and self.params.get('max_downloads') != 1):
1345             raise SameFileError(outtmpl)
1346
1347         for url in url_list:
1348             try:
1349                 # It also downloads the videos
1350                 res = self.extract_info(url)
1351             except UnavailableVideoError:
1352                 self.report_error('unable to download video')
1353             except MaxDownloadsReached:
1354                 self.to_screen('[info] Maximum number of downloaded files reached.')
1355                 raise
1356             else:
1357                 if self.params.get('dump_single_json', False):
1358                     self.to_stdout(json.dumps(res))
1359
1360         return self._download_retcode
1361
1362     def download_with_info_file(self, info_filename):
1363         with io.open(info_filename, 'r', encoding='utf-8') as f:
1364             info = json.load(f)
1365         try:
1366             self.process_ie_result(info, download=True)
1367         except DownloadError:
1368             webpage_url = info.get('webpage_url')
1369             if webpage_url is not None:
1370                 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
1371                 return self.download([webpage_url])
1372             else:
1373                 raise
1374         return self._download_retcode
1375
1376     def post_process(self, filename, ie_info):
1377         """Run all the postprocessors on the given file."""
1378         info = dict(ie_info)
1379         info['filepath'] = filename
1380         pps_chain = []
1381         if ie_info.get('__postprocessors') is not None:
1382             pps_chain.extend(ie_info['__postprocessors'])
1383         pps_chain.extend(self._pps)
1384         for pp in pps_chain:
1385             keep_video = None
1386             old_filename = info['filepath']
1387             try:
1388                 keep_video_wish, info = pp.run(info)
1389                 if keep_video_wish is not None:
1390                     if keep_video_wish:
1391                         keep_video = keep_video_wish
1392                     elif keep_video is None:
1393                         # No clear decision yet, let IE decide
1394                         keep_video = keep_video_wish
1395             except PostProcessingError as e:
1396                 self.report_error(e.msg)
1397             if keep_video is False and not self.params.get('keepvideo', False):
1398                 try:
1399                     self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
1400                     os.remove(encodeFilename(old_filename))
1401                 except (IOError, OSError):
1402                     self.report_warning('Unable to remove downloaded video file')
1403
1404     def _make_archive_id(self, info_dict):
1405         # Future-proof against any change in case
1406         # and backwards compatibility with prior versions
1407         extractor = info_dict.get('extractor_key')
1408         if extractor is None:
1409             if 'id' in info_dict:
1410                 extractor = info_dict.get('ie_key')  # key in a playlist
1411         if extractor is None:
1412             return None  # Incomplete video information
1413         return extractor.lower() + ' ' + info_dict['id']
1414
1415     def in_download_archive(self, info_dict):
1416         fn = self.params.get('download_archive')
1417         if fn is None:
1418             return False
1419
1420         vid_id = self._make_archive_id(info_dict)
1421         if vid_id is None:
1422             return False  # Incomplete video information
1423
1424         try:
1425             with locked_file(fn, 'r', encoding='utf-8') as archive_file:
1426                 for line in archive_file:
1427                     if line.strip() == vid_id:
1428                         return True
1429         except IOError as ioe:
1430             if ioe.errno != errno.ENOENT:
1431                 raise
1432         return False
1433
1434     def record_download_archive(self, info_dict):
1435         fn = self.params.get('download_archive')
1436         if fn is None:
1437             return
1438         vid_id = self._make_archive_id(info_dict)
1439         assert vid_id
1440         with locked_file(fn, 'a', encoding='utf-8') as archive_file:
1441             archive_file.write(vid_id + '\n')
1442
1443     @staticmethod
1444     def format_resolution(format, default='unknown'):
1445         if format.get('vcodec') == 'none':
1446             return 'audio only'
1447         if format.get('resolution') is not None:
1448             return format['resolution']
1449         if format.get('height') is not None:
1450             if format.get('width') is not None:
1451                 res = '%sx%s' % (format['width'], format['height'])
1452             else:
1453                 res = '%sp' % format['height']
1454         elif format.get('width') is not None:
1455             res = '?x%d' % format['width']
1456         else:
1457             res = default
1458         return res
1459
1460     def _format_note(self, fdict):
1461         res = ''
1462         if fdict.get('ext') in ['f4f', 'f4m']:
1463             res += '(unsupported) '
1464         if fdict.get('format_note') is not None:
1465             res += fdict['format_note'] + ' '
1466         if fdict.get('tbr') is not None:
1467             res += '%4dk ' % fdict['tbr']
1468         if fdict.get('container') is not None:
1469             if res:
1470                 res += ', '
1471             res += '%s container' % fdict['container']
1472         if (fdict.get('vcodec') is not None and
1473                 fdict.get('vcodec') != 'none'):
1474             if res:
1475                 res += ', '
1476             res += fdict['vcodec']
1477             if fdict.get('vbr') is not None:
1478                 res += '@'
1479         elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
1480             res += 'video@'
1481         if fdict.get('vbr') is not None:
1482             res += '%4dk' % fdict['vbr']
1483         if fdict.get('fps') is not None:
1484             res += ', %sfps' % fdict['fps']
1485         if fdict.get('acodec') is not None:
1486             if res:
1487                 res += ', '
1488             if fdict['acodec'] == 'none':
1489                 res += 'video only'
1490             else:
1491                 res += '%-5s' % fdict['acodec']
1492         elif fdict.get('abr') is not None:
1493             if res:
1494                 res += ', '
1495             res += 'audio'
1496         if fdict.get('abr') is not None:
1497             res += '@%3dk' % fdict['abr']
1498         if fdict.get('asr') is not None:
1499             res += ' (%5dHz)' % fdict['asr']
1500         if fdict.get('filesize') is not None:
1501             if res:
1502                 res += ', '
1503             res += format_bytes(fdict['filesize'])
1504         elif fdict.get('filesize_approx') is not None:
1505             if res:
1506                 res += ', '
1507             res += '~' + format_bytes(fdict['filesize_approx'])
1508         return res
1509
1510     def list_formats(self, info_dict):
1511         def line(format, idlen=20):
1512             return (('%-' + compat_str(idlen + 1) + 's%-10s%-12s%s') % (
1513                 format['format_id'],
1514                 format['ext'],
1515                 self.format_resolution(format),
1516                 self._format_note(format),
1517             ))
1518
1519         formats = info_dict.get('formats', [info_dict])
1520         idlen = max(len('format code'),
1521                     max(len(f['format_id']) for f in formats))
1522         formats_s = [
1523             line(f, idlen) for f in formats
1524             if f.get('preference') is None or f['preference'] >= -1000]
1525         if len(formats) > 1:
1526             formats_s[0] += (' ' if self._format_note(formats[0]) else '') + '(worst)'
1527             formats_s[-1] += (' ' if self._format_note(formats[-1]) else '') + '(best)'
1528
1529         header_line = line({
1530             'format_id': 'format code', 'ext': 'extension',
1531             'resolution': 'resolution', 'format_note': 'note'}, idlen=idlen)
1532         self.to_screen(
1533             '[info] Available formats for %s:\n%s\n%s' %
1534             (info_dict['id'], header_line, '\n'.join(formats_s)))
1535
1536     def list_thumbnails(self, info_dict):
1537         thumbnails = info_dict.get('thumbnails')
1538         if not thumbnails:
1539             tn_url = info_dict.get('thumbnail')
1540             if tn_url:
1541                 thumbnails = [{'id': '0', 'url': tn_url}]
1542             else:
1543                 self.to_screen(
1544                     '[info] No thumbnails present for %s' % info_dict['id'])
1545                 return
1546
1547         self.to_screen(
1548             '[info] Thumbnails for %s:' % info_dict['id'])
1549         self.to_screen(render_table(
1550             ['ID', 'width', 'height', 'URL'],
1551             [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
1552
1553     def urlopen(self, req):
1554         """ Start an HTTP download """
1555
1556         # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1557         # always respected by websites, some tend to give out URLs with non percent-encoded
1558         # non-ASCII characters (see telemb.py, ard.py [#3412])
1559         # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1560         # To work around aforementioned issue we will replace request's original URL with
1561         # percent-encoded one
1562         req_is_string = isinstance(req, compat_basestring)
1563         url = req if req_is_string else req.get_full_url()
1564         url_escaped = escape_url(url)
1565
1566         # Substitute URL if any change after escaping
1567         if url != url_escaped:
1568             if req_is_string:
1569                 req = url_escaped
1570             else:
1571                 req = compat_urllib_request.Request(
1572                     url_escaped, data=req.data, headers=req.headers,
1573                     origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
1574
1575         return self._opener.open(req, timeout=self._socket_timeout)
1576
1577     def print_debug_header(self):
1578         if not self.params.get('verbose'):
1579             return
1580
1581         if type('') is not compat_str:
1582             # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326)
1583             self.report_warning(
1584                 'Your Python is broken! Update to a newer and supported version')
1585
1586         stdout_encoding = getattr(
1587             sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
1588         encoding_str = (
1589             '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
1590                 locale.getpreferredencoding(),
1591                 sys.getfilesystemencoding(),
1592                 stdout_encoding,
1593                 self.get_encoding()))
1594         write_string(encoding_str, encoding=None)
1595
1596         self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
1597         try:
1598             sp = subprocess.Popen(
1599                 ['git', 'rev-parse', '--short', 'HEAD'],
1600                 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
1601                 cwd=os.path.dirname(os.path.abspath(__file__)))
1602             out, err = sp.communicate()
1603             out = out.decode().strip()
1604             if re.match('[0-9a-f]+', out):
1605                 self._write_string('[debug] Git HEAD: ' + out + '\n')
1606         except:
1607             try:
1608                 sys.exc_clear()
1609             except:
1610                 pass
1611         self._write_string('[debug] Python version %s - %s\n' % (
1612             platform.python_version(), platform_name()))
1613
1614         exe_versions = FFmpegPostProcessor.get_versions()
1615         exe_versions['rtmpdump'] = rtmpdump_version()
1616         exe_str = ', '.join(
1617             '%s %s' % (exe, v)
1618             for exe, v in sorted(exe_versions.items())
1619             if v
1620         )
1621         if not exe_str:
1622             exe_str = 'none'
1623         self._write_string('[debug] exe versions: %s\n' % exe_str)
1624
1625         proxy_map = {}
1626         for handler in self._opener.handlers:
1627             if hasattr(handler, 'proxies'):
1628                 proxy_map.update(handler.proxies)
1629         self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
1630
1631         if self.params.get('call_home', False):
1632             ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
1633             self._write_string('[debug] Public IP address: %s\n' % ipaddr)
1634             latest_version = self.urlopen(
1635                 'https://yt-dl.org/latest/version').read().decode('utf-8')
1636             if version_tuple(latest_version) > version_tuple(__version__):
1637                 self.report_warning(
1638                     'You are using an outdated version (newest version: %s)! '
1639                     'See https://yt-dl.org/update if you need help updating.' %
1640                     latest_version)
1641
1642     def _setup_opener(self):
1643         timeout_val = self.params.get('socket_timeout')
1644         self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
1645
1646         opts_cookiefile = self.params.get('cookiefile')
1647         opts_proxy = self.params.get('proxy')
1648
1649         if opts_cookiefile is None:
1650             self.cookiejar = compat_cookiejar.CookieJar()
1651         else:
1652             self.cookiejar = compat_cookiejar.MozillaCookieJar(
1653                 opts_cookiefile)
1654             if os.access(opts_cookiefile, os.R_OK):
1655                 self.cookiejar.load()
1656
1657         cookie_processor = compat_urllib_request.HTTPCookieProcessor(
1658             self.cookiejar)
1659         if opts_proxy is not None:
1660             if opts_proxy == '':
1661                 proxies = {}
1662             else:
1663                 proxies = {'http': opts_proxy, 'https': opts_proxy}
1664         else:
1665             proxies = compat_urllib_request.getproxies()
1666             # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
1667             if 'http' in proxies and 'https' not in proxies:
1668                 proxies['https'] = proxies['http']
1669         proxy_handler = compat_urllib_request.ProxyHandler(proxies)
1670
1671         debuglevel = 1 if self.params.get('debug_printtraffic') else 0
1672         https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
1673         ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
1674         opener = compat_urllib_request.build_opener(
1675             https_handler, proxy_handler, cookie_processor, ydlh)
1676         # Delete the default user-agent header, which would otherwise apply in
1677         # cases where our custom HTTP handler doesn't come into play
1678         # (See https://github.com/rg3/youtube-dl/issues/1309 for details)
1679         opener.addheaders = []
1680         self._opener = opener
1681
1682     def encode(self, s):
1683         if isinstance(s, bytes):
1684             return s  # Already encoded
1685
1686         try:
1687             return s.encode(self.get_encoding())
1688         except UnicodeEncodeError as err:
1689             err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
1690             raise
1691
1692     def get_encoding(self):
1693         encoding = self.params.get('encoding')
1694         if encoding is None:
1695             encoding = preferredencoding()
1696         return encoding
1697
1698     def _write_thumbnails(self, info_dict, filename):
1699         if self.params.get('writethumbnail', False):
1700             thumbnails = info_dict.get('thumbnails')
1701             if thumbnails:
1702                 thumbnails = [thumbnails[-1]]
1703         elif self.params.get('write_all_thumbnails', False):
1704             thumbnails = info_dict.get('thumbnails')
1705         else:
1706             return
1707
1708         if not thumbnails:
1709             # No thumbnails present, so return immediately
1710             return
1711
1712         for t in thumbnails:
1713             thumb_ext = determine_ext(t['url'], 'jpg')
1714             suffix = '_%s' % t['id'] if len(thumbnails) > 1 else ''
1715             thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else ''
1716             thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext
1717
1718             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
1719                 self.to_screen('[%s] %s: Thumbnail %sis already present' %
1720                                (info_dict['extractor'], info_dict['id'], thumb_display_id))
1721             else:
1722                 self.to_screen('[%s] %s: Downloading thumbnail %s...' %
1723                                (info_dict['extractor'], info_dict['id'], thumb_display_id))
1724                 try:
1725                     uf = self.urlopen(t['url'])
1726                     with open(thumb_filename, 'wb') as thumbf:
1727                         shutil.copyfileobj(uf, thumbf)
1728                     self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
1729                                    (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
1730                 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
1731                     self.report_warning('Unable to download thumbnail "%s": %s' %
1732                                         (t['url'], compat_str(err)))