Merge branch 'tweakers' of https://github.com/robin007bond/youtube-dl into robin007bo...
[youtube-dl] / youtube_dl / YoutubeDL.py
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from __future__ import absolute_import, unicode_literals
5
6 import collections
7 import datetime
8 import errno
9 import io
10 import itertools
11 import json
12 import locale
13 import operator
14 import os
15 import platform
16 import re
17 import shutil
18 import subprocess
19 import socket
20 import sys
21 import time
22 import traceback
23
24 if os.name == 'nt':
25     import ctypes
26
27 from .compat import (
28     compat_basestring,
29     compat_cookiejar,
30     compat_expanduser,
31     compat_http_client,
32     compat_kwargs,
33     compat_str,
34     compat_urllib_error,
35     compat_urllib_request,
36 )
37 from .utils import (
38     escape_url,
39     ContentTooShortError,
40     date_from_str,
41     DateRange,
42     DEFAULT_OUTTMPL,
43     determine_ext,
44     DownloadError,
45     encodeFilename,
46     ExtractorError,
47     format_bytes,
48     formatSeconds,
49     get_term_width,
50     locked_file,
51     make_HTTPS_handler,
52     MaxDownloadsReached,
53     PagedList,
54     parse_filesize,
55     PostProcessingError,
56     platform_name,
57     preferredencoding,
58     render_table,
59     SameFileError,
60     sanitize_filename,
61     std_headers,
62     subtitles_filename,
63     takewhile_inclusive,
64     UnavailableVideoError,
65     url_basename,
66     version_tuple,
67     write_json_file,
68     write_string,
69     YoutubeDLHandler,
70     prepend_extension,
71     args_to_str,
72     age_restricted,
73 )
74 from .cache import Cache
75 from .extractor import get_info_extractor, gen_extractors
76 from .downloader import get_suitable_downloader
77 from .downloader.rtmp import rtmpdump_version
78 from .postprocessor import (
79     FFmpegFixupM4aPP,
80     FFmpegFixupStretchedPP,
81     FFmpegMergerPP,
82     FFmpegPostProcessor,
83     get_postprocessor,
84 )
85 from .version import __version__
86
87
88 class YoutubeDL(object):
89     """YoutubeDL class.
90
91     YoutubeDL objects are the ones responsible of downloading the
92     actual video file and writing it to disk if the user has requested
93     it, among some other tasks. In most cases there should be one per
94     program. As, given a video URL, the downloader doesn't know how to
95     extract all the needed information, task that InfoExtractors do, it
96     has to pass the URL to one of them.
97
98     For this, YoutubeDL objects have a method that allows
99     InfoExtractors to be registered in a given order. When it is passed
100     a URL, the YoutubeDL object handles it to the first InfoExtractor it
101     finds that reports being able to handle it. The InfoExtractor extracts
102     all the information about the video or videos the URL refers to, and
103     YoutubeDL process the extracted information, possibly using a File
104     Downloader to download the video.
105
106     YoutubeDL objects accept a lot of parameters. In order not to saturate
107     the object constructor with arguments, it receives a dictionary of
108     options instead. These options are available through the params
109     attribute for the InfoExtractors to use. The YoutubeDL also
110     registers itself as the downloader in charge for the InfoExtractors
111     that are added to it, so this is a "mutual registration".
112
113     Available options:
114
115     username:          Username for authentication purposes.
116     password:          Password for authentication purposes.
117     videopassword:     Password for acces a video.
118     usenetrc:          Use netrc for authentication instead.
119     verbose:           Print additional info to stdout.
120     quiet:             Do not print messages to stdout.
121     no_warnings:       Do not print out anything for warnings.
122     forceurl:          Force printing final URL.
123     forcetitle:        Force printing title.
124     forceid:           Force printing ID.
125     forcethumbnail:    Force printing thumbnail URL.
126     forcedescription:  Force printing description.
127     forcefilename:     Force printing final filename.
128     forceduration:     Force printing duration.
129     forcejson:         Force printing info_dict as JSON.
130     dump_single_json:  Force printing the info_dict of the whole playlist
131                        (or video) as a single JSON line.
132     simulate:          Do not download the video files.
133     format:            Video format code. See options.py for more information.
134     format_limit:      Highest quality format to try.
135     outtmpl:           Template for output names.
136     restrictfilenames: Do not allow "&" and spaces in file names
137     ignoreerrors:      Do not stop on download errors.
138     nooverwrites:      Prevent overwriting files.
139     playliststart:     Playlist item to start at.
140     playlistend:       Playlist item to end at.
141     playlist_items:    Specific indices of playlist to download.
142     playlistreverse:   Download playlist items in reverse order.
143     matchtitle:        Download only matching titles.
144     rejecttitle:       Reject downloads for matching titles.
145     logger:            Log messages to a logging.Logger instance.
146     logtostderr:       Log messages to stderr instead of stdout.
147     writedescription:  Write the video description to a .description file
148     writeinfojson:     Write the video description to a .info.json file
149     writeannotations:  Write the video annotations to a .annotations.xml file
150     writethumbnail:    Write the thumbnail image to a file
151     write_all_thumbnails:  Write all thumbnail formats to files
152     writesubtitles:    Write the video subtitles to a file
153     writeautomaticsub: Write the automatic subtitles to a file
154     allsubtitles:      Downloads all the subtitles of the video
155                        (requires writesubtitles or writeautomaticsub)
156     listsubtitles:     Lists all available subtitles for the video
157     subtitlesformat:   Subtitle format [srt/sbv/vtt] (default=srt)
158     subtitleslangs:    List of languages of the subtitles to download
159     keepvideo:         Keep the video file after post-processing
160     daterange:         A DateRange object, download only if the upload_date is in the range.
161     skip_download:     Skip the actual download of the video file
162     cachedir:          Location of the cache files in the filesystem.
163                        False to disable filesystem cache.
164     noplaylist:        Download single video instead of a playlist if in doubt.
165     age_limit:         An integer representing the user's age in years.
166                        Unsuitable videos for the given age are skipped.
167     min_views:         An integer representing the minimum view count the video
168                        must have in order to not be skipped.
169                        Videos without view count information are always
170                        downloaded. None for no limit.
171     max_views:         An integer representing the maximum view count.
172                        Videos that are more popular than that are not
173                        downloaded.
174                        Videos without view count information are always
175                        downloaded. None for no limit.
176     download_archive:  File name of a file where all downloads are recorded.
177                        Videos already present in the file are not downloaded
178                        again.
179     cookiefile:        File name where cookies should be read from and dumped to.
180     nocheckcertificate:Do not verify SSL certificates
181     prefer_insecure:   Use HTTP instead of HTTPS to retrieve information.
182                        At the moment, this is only supported by YouTube.
183     proxy:             URL of the proxy server to use
184     socket_timeout:    Time to wait for unresponsive hosts, in seconds
185     bidi_workaround:   Work around buggy terminals without bidirectional text
186                        support, using fridibi
187     debug_printtraffic:Print out sent and received HTTP traffic
188     include_ads:       Download ads as well
189     default_search:    Prepend this string if an input url is not valid.
190                        'auto' for elaborate guessing
191     encoding:          Use this encoding instead of the system-specified.
192     extract_flat:      Do not resolve URLs, return the immediate result.
193                        Pass in 'in_playlist' to only show this behavior for
194                        playlist items.
195     postprocessors:    A list of dictionaries, each with an entry
196                        * key:  The name of the postprocessor. See
197                                youtube_dl/postprocessor/__init__.py for a list.
198                        as well as any further keyword arguments for the
199                        postprocessor.
200     progress_hooks:    A list of functions that get called on download
201                        progress, with a dictionary with the entries
202                        * status: One of "downloading" and "finished".
203                                  Check this first and ignore unknown values.
204
205                        If status is one of "downloading" or "finished", the
206                        following properties may also be present:
207                        * filename: The final filename (always present)
208                        * downloaded_bytes: Bytes on disk
209                        * total_bytes: Size of the whole file, None if unknown
210                        * tmpfilename: The filename we're currently writing to
211                        * eta: The estimated time in seconds, None if unknown
212                        * speed: The download speed in bytes/second, None if
213                                 unknown
214
215                        Progress hooks are guaranteed to be called at least once
216                        (with status "finished") if the download is successful.
217     merge_output_format: Extension to use when merging formats.
218     fixup:             Automatically correct known faults of the file.
219                        One of:
220                        - "never": do nothing
221                        - "warn": only emit a warning
222                        - "detect_or_warn": check whether we can do anything
223                                            about it, warn otherwise (default)
224     source_address:    (Experimental) Client-side IP address to bind to.
225     call_home:         Boolean, true iff we are allowed to contact the
226                        youtube-dl servers for debugging.
227     sleep_interval:    Number of seconds to sleep before each download.
228     external_downloader:  Executable of the external downloader to call.
229     listformats:       Print an overview of available video formats and exit.
230     list_thumbnails:   Print a table of all thumbnails and exit.
231
232
233     The following parameters are not used by YoutubeDL itself, they are used by
234     the FileDownloader:
235     nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
236     noresizebuffer, retries, continuedl, noprogress, consoletitle,
237     xattr_set_filesize.
238
239     The following options are used by the post processors:
240     prefer_ffmpeg:     If True, use ffmpeg instead of avconv if both are available,
241                        otherwise prefer avconv.
242     exec_cmd:          Arbitrary command to run after downloading
243     """
244
245     params = None
246     _ies = []
247     _pps = []
248     _download_retcode = None
249     _num_downloads = None
250     _screen_file = None
251
252     def __init__(self, params=None, auto_init=True):
253         """Create a FileDownloader object with the given options."""
254         if params is None:
255             params = {}
256         self._ies = []
257         self._ies_instances = {}
258         self._pps = []
259         self._progress_hooks = []
260         self._download_retcode = 0
261         self._num_downloads = 0
262         self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
263         self._err_file = sys.stderr
264         self.params = params
265         self.cache = Cache(self)
266
267         if params.get('bidi_workaround', False):
268             try:
269                 import pty
270                 master, slave = pty.openpty()
271                 width = get_term_width()
272                 if width is None:
273                     width_args = []
274                 else:
275                     width_args = ['-w', str(width)]
276                 sp_kwargs = dict(
277                     stdin=subprocess.PIPE,
278                     stdout=slave,
279                     stderr=self._err_file)
280                 try:
281                     self._output_process = subprocess.Popen(
282                         ['bidiv'] + width_args, **sp_kwargs
283                     )
284                 except OSError:
285                     self._output_process = subprocess.Popen(
286                         ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
287                 self._output_channel = os.fdopen(master, 'rb')
288             except OSError as ose:
289                 if ose.errno == 2:
290                     self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that  fribidi  is an executable file in one of the directories in your $PATH.')
291                 else:
292                     raise
293
294         if (sys.version_info >= (3,) and sys.platform != 'win32' and
295                 sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
296                 and not params.get('restrictfilenames', False)):
297             # On Python 3, the Unicode filesystem API will throw errors (#1474)
298             self.report_warning(
299                 'Assuming --restrict-filenames since file system encoding '
300                 'cannot encode all characters. '
301                 'Set the LC_ALL environment variable to fix this.')
302             self.params['restrictfilenames'] = True
303
304         if '%(stitle)s' in self.params.get('outtmpl', ''):
305             self.report_warning('%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
306
307         self._setup_opener()
308
309         if auto_init:
310             self.print_debug_header()
311             self.add_default_info_extractors()
312
313         for pp_def_raw in self.params.get('postprocessors', []):
314             pp_class = get_postprocessor(pp_def_raw['key'])
315             pp_def = dict(pp_def_raw)
316             del pp_def['key']
317             pp = pp_class(self, **compat_kwargs(pp_def))
318             self.add_post_processor(pp)
319
320         for ph in self.params.get('progress_hooks', []):
321             self.add_progress_hook(ph)
322
323     def warn_if_short_id(self, argv):
324         # short YouTube ID starting with dash?
325         idxs = [
326             i for i, a in enumerate(argv)
327             if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
328         if idxs:
329             correct_argv = (
330                 ['youtube-dl'] +
331                 [a for i, a in enumerate(argv) if i not in idxs] +
332                 ['--'] + [argv[i] for i in idxs]
333             )
334             self.report_warning(
335                 'Long argument string detected. '
336                 'Use -- to separate parameters and URLs, like this:\n%s\n' %
337                 args_to_str(correct_argv))
338
339     def add_info_extractor(self, ie):
340         """Add an InfoExtractor object to the end of the list."""
341         self._ies.append(ie)
342         self._ies_instances[ie.ie_key()] = ie
343         ie.set_downloader(self)
344
345     def get_info_extractor(self, ie_key):
346         """
347         Get an instance of an IE with name ie_key, it will try to get one from
348         the _ies list, if there's no instance it will create a new one and add
349         it to the extractor list.
350         """
351         ie = self._ies_instances.get(ie_key)
352         if ie is None:
353             ie = get_info_extractor(ie_key)()
354             self.add_info_extractor(ie)
355         return ie
356
357     def add_default_info_extractors(self):
358         """
359         Add the InfoExtractors returned by gen_extractors to the end of the list
360         """
361         for ie in gen_extractors():
362             self.add_info_extractor(ie)
363
364     def add_post_processor(self, pp):
365         """Add a PostProcessor object to the end of the chain."""
366         self._pps.append(pp)
367         pp.set_downloader(self)
368
369     def add_progress_hook(self, ph):
370         """Add the progress hook (currently only for the file downloader)"""
371         self._progress_hooks.append(ph)
372
373     def _bidi_workaround(self, message):
374         if not hasattr(self, '_output_channel'):
375             return message
376
377         assert hasattr(self, '_output_process')
378         assert isinstance(message, compat_str)
379         line_count = message.count('\n') + 1
380         self._output_process.stdin.write((message + '\n').encode('utf-8'))
381         self._output_process.stdin.flush()
382         res = ''.join(self._output_channel.readline().decode('utf-8')
383                       for _ in range(line_count))
384         return res[:-len('\n')]
385
386     def to_screen(self, message, skip_eol=False):
387         """Print message to stdout if not in quiet mode."""
388         return self.to_stdout(message, skip_eol, check_quiet=True)
389
390     def _write_string(self, s, out=None):
391         write_string(s, out=out, encoding=self.params.get('encoding'))
392
393     def to_stdout(self, message, skip_eol=False, check_quiet=False):
394         """Print message to stdout if not in quiet mode."""
395         if self.params.get('logger'):
396             self.params['logger'].debug(message)
397         elif not check_quiet or not self.params.get('quiet', False):
398             message = self._bidi_workaround(message)
399             terminator = ['\n', ''][skip_eol]
400             output = message + terminator
401
402             self._write_string(output, self._screen_file)
403
404     def to_stderr(self, message):
405         """Print message to stderr."""
406         assert isinstance(message, compat_str)
407         if self.params.get('logger'):
408             self.params['logger'].error(message)
409         else:
410             message = self._bidi_workaround(message)
411             output = message + '\n'
412             self._write_string(output, self._err_file)
413
414     def to_console_title(self, message):
415         if not self.params.get('consoletitle', False):
416             return
417         if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
418             # c_wchar_p() might not be necessary if `message` is
419             # already of type unicode()
420             ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
421         elif 'TERM' in os.environ:
422             self._write_string('\033]0;%s\007' % message, self._screen_file)
423
424     def save_console_title(self):
425         if not self.params.get('consoletitle', False):
426             return
427         if 'TERM' in os.environ:
428             # Save the title on stack
429             self._write_string('\033[22;0t', self._screen_file)
430
431     def restore_console_title(self):
432         if not self.params.get('consoletitle', False):
433             return
434         if 'TERM' in os.environ:
435             # Restore the title from stack
436             self._write_string('\033[23;0t', self._screen_file)
437
438     def __enter__(self):
439         self.save_console_title()
440         return self
441
442     def __exit__(self, *args):
443         self.restore_console_title()
444
445         if self.params.get('cookiefile') is not None:
446             self.cookiejar.save()
447
448     def trouble(self, message=None, tb=None):
449         """Determine action to take when a download problem appears.
450
451         Depending on if the downloader has been configured to ignore
452         download errors or not, this method may throw an exception or
453         not when errors are found, after printing the message.
454
455         tb, if given, is additional traceback information.
456         """
457         if message is not None:
458             self.to_stderr(message)
459         if self.params.get('verbose'):
460             if tb is None:
461                 if sys.exc_info()[0]:  # if .trouble has been called from an except block
462                     tb = ''
463                     if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
464                         tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
465                     tb += compat_str(traceback.format_exc())
466                 else:
467                     tb_data = traceback.format_list(traceback.extract_stack())
468                     tb = ''.join(tb_data)
469             self.to_stderr(tb)
470         if not self.params.get('ignoreerrors', False):
471             if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
472                 exc_info = sys.exc_info()[1].exc_info
473             else:
474                 exc_info = sys.exc_info()
475             raise DownloadError(message, exc_info)
476         self._download_retcode = 1
477
478     def report_warning(self, message):
479         '''
480         Print the message to stderr, it will be prefixed with 'WARNING:'
481         If stderr is a tty file the 'WARNING:' will be colored
482         '''
483         if self.params.get('logger') is not None:
484             self.params['logger'].warning(message)
485         else:
486             if self.params.get('no_warnings'):
487                 return
488             if self._err_file.isatty() and os.name != 'nt':
489                 _msg_header = '\033[0;33mWARNING:\033[0m'
490             else:
491                 _msg_header = 'WARNING:'
492             warning_message = '%s %s' % (_msg_header, message)
493             self.to_stderr(warning_message)
494
495     def report_error(self, message, tb=None):
496         '''
497         Do the same as trouble, but prefixes the message with 'ERROR:', colored
498         in red if stderr is a tty file.
499         '''
500         if self._err_file.isatty() and os.name != 'nt':
501             _msg_header = '\033[0;31mERROR:\033[0m'
502         else:
503             _msg_header = 'ERROR:'
504         error_message = '%s %s' % (_msg_header, message)
505         self.trouble(error_message, tb)
506
507     def report_file_already_downloaded(self, file_name):
508         """Report file has already been fully downloaded."""
509         try:
510             self.to_screen('[download] %s has already been downloaded' % file_name)
511         except UnicodeEncodeError:
512             self.to_screen('[download] The file has already been downloaded')
513
514     def prepare_filename(self, info_dict):
515         """Generate the output filename."""
516         try:
517             template_dict = dict(info_dict)
518
519             template_dict['epoch'] = int(time.time())
520             autonumber_size = self.params.get('autonumber_size')
521             if autonumber_size is None:
522                 autonumber_size = 5
523             autonumber_templ = '%0' + str(autonumber_size) + 'd'
524             template_dict['autonumber'] = autonumber_templ % self._num_downloads
525             if template_dict.get('playlist_index') is not None:
526                 template_dict['playlist_index'] = '%0*d' % (len(str(template_dict['n_entries'])), template_dict['playlist_index'])
527             if template_dict.get('resolution') is None:
528                 if template_dict.get('width') and template_dict.get('height'):
529                     template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
530                 elif template_dict.get('height'):
531                     template_dict['resolution'] = '%sp' % template_dict['height']
532                 elif template_dict.get('width'):
533                     template_dict['resolution'] = '?x%d' % template_dict['width']
534
535             sanitize = lambda k, v: sanitize_filename(
536                 compat_str(v),
537                 restricted=self.params.get('restrictfilenames'),
538                 is_id=(k == 'id'))
539             template_dict = dict((k, sanitize(k, v))
540                                  for k, v in template_dict.items()
541                                  if v is not None)
542             template_dict = collections.defaultdict(lambda: 'NA', template_dict)
543
544             outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
545             tmpl = compat_expanduser(outtmpl)
546             filename = tmpl % template_dict
547             # Temporary fix for #4787
548             # 'Treat' all problem characters by passing filename through preferredencoding
549             # to workaround encoding issues with subprocess on python2 @ Windows
550             if sys.version_info < (3, 0) and sys.platform == 'win32':
551                 filename = encodeFilename(filename, True).decode(preferredencoding())
552             return filename
553         except ValueError as err:
554             self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
555             return None
556
557     def _match_entry(self, info_dict):
558         """ Returns None iff the file should be downloaded """
559
560         video_title = info_dict.get('title', info_dict.get('id', 'video'))
561         if 'title' in info_dict:
562             # This can happen when we're just evaluating the playlist
563             title = info_dict['title']
564             matchtitle = self.params.get('matchtitle', False)
565             if matchtitle:
566                 if not re.search(matchtitle, title, re.IGNORECASE):
567                     return '"' + title + '" title did not match pattern "' + matchtitle + '"'
568             rejecttitle = self.params.get('rejecttitle', False)
569             if rejecttitle:
570                 if re.search(rejecttitle, title, re.IGNORECASE):
571                     return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
572         date = info_dict.get('upload_date', None)
573         if date is not None:
574             dateRange = self.params.get('daterange', DateRange())
575             if date not in dateRange:
576                 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
577         view_count = info_dict.get('view_count', None)
578         if view_count is not None:
579             min_views = self.params.get('min_views')
580             if min_views is not None and view_count < min_views:
581                 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
582             max_views = self.params.get('max_views')
583             if max_views is not None and view_count > max_views:
584                 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
585         if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
586             return 'Skipping "%s" because it is age restricted' % title
587         if self.in_download_archive(info_dict):
588             return '%s has already been recorded in archive' % video_title
589         return None
590
591     @staticmethod
592     def add_extra_info(info_dict, extra_info):
593         '''Set the keys from extra_info in info dict if they are missing'''
594         for key, value in extra_info.items():
595             info_dict.setdefault(key, value)
596
597     def extract_info(self, url, download=True, ie_key=None, extra_info={},
598                      process=True):
599         '''
600         Returns a list with a dictionary for each video we find.
601         If 'download', also downloads the videos.
602         extra_info is a dict containing the extra values to add to each result
603          '''
604
605         if ie_key:
606             ies = [self.get_info_extractor(ie_key)]
607         else:
608             ies = self._ies
609
610         for ie in ies:
611             if not ie.suitable(url):
612                 continue
613
614             if not ie.working():
615                 self.report_warning('The program functionality for this site has been marked as broken, '
616                                     'and will probably not work.')
617
618             try:
619                 ie_result = ie.extract(url)
620                 if ie_result is None:  # Finished already (backwards compatibility; listformats and friends should be moved here)
621                     break
622                 if isinstance(ie_result, list):
623                     # Backwards compatibility: old IE result format
624                     ie_result = {
625                         '_type': 'compat_list',
626                         'entries': ie_result,
627                     }
628                 self.add_default_extra_info(ie_result, ie, url)
629                 if process:
630                     return self.process_ie_result(ie_result, download, extra_info)
631                 else:
632                     return ie_result
633             except ExtractorError as de:  # An error we somewhat expected
634                 self.report_error(compat_str(de), de.format_traceback())
635                 break
636             except MaxDownloadsReached:
637                 raise
638             except Exception as e:
639                 if self.params.get('ignoreerrors', False):
640                     self.report_error(compat_str(e), tb=compat_str(traceback.format_exc()))
641                     break
642                 else:
643                     raise
644         else:
645             self.report_error('no suitable InfoExtractor for URL %s' % url)
646
647     def add_default_extra_info(self, ie_result, ie, url):
648         self.add_extra_info(ie_result, {
649             'extractor': ie.IE_NAME,
650             'webpage_url': url,
651             'webpage_url_basename': url_basename(url),
652             'extractor_key': ie.ie_key(),
653         })
654
655     def process_ie_result(self, ie_result, download=True, extra_info={}):
656         """
657         Take the result of the ie(may be modified) and resolve all unresolved
658         references (URLs, playlist items).
659
660         It will also download the videos if 'download'.
661         Returns the resolved ie_result.
662         """
663
664         result_type = ie_result.get('_type', 'video')
665
666         if result_type in ('url', 'url_transparent'):
667             extract_flat = self.params.get('extract_flat', False)
668             if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or
669                     extract_flat is True):
670                 if self.params.get('forcejson', False):
671                     self.to_stdout(json.dumps(ie_result))
672                 return ie_result
673
674         if result_type == 'video':
675             self.add_extra_info(ie_result, extra_info)
676             return self.process_video_result(ie_result, download=download)
677         elif result_type == 'url':
678             # We have to add extra_info to the results because it may be
679             # contained in a playlist
680             return self.extract_info(ie_result['url'],
681                                      download,
682                                      ie_key=ie_result.get('ie_key'),
683                                      extra_info=extra_info)
684         elif result_type == 'url_transparent':
685             # Use the information from the embedding page
686             info = self.extract_info(
687                 ie_result['url'], ie_key=ie_result.get('ie_key'),
688                 extra_info=extra_info, download=False, process=False)
689
690             force_properties = dict(
691                 (k, v) for k, v in ie_result.items() if v is not None)
692             for f in ('_type', 'url'):
693                 if f in force_properties:
694                     del force_properties[f]
695             new_result = info.copy()
696             new_result.update(force_properties)
697
698             assert new_result.get('_type') != 'url_transparent'
699
700             return self.process_ie_result(
701                 new_result, download=download, extra_info=extra_info)
702         elif result_type == 'playlist' or result_type == 'multi_video':
703             # We process each entry in the playlist
704             playlist = ie_result.get('title', None) or ie_result.get('id', None)
705             self.to_screen('[download] Downloading playlist: %s' % playlist)
706
707             playlist_results = []
708
709             playliststart = self.params.get('playliststart', 1) - 1
710             playlistend = self.params.get('playlistend', None)
711             # For backwards compatibility, interpret -1 as whole list
712             if playlistend == -1:
713                 playlistend = None
714
715             playlistitems_str = self.params.get('playlist_items', None)
716             playlistitems = None
717             if playlistitems_str is not None:
718                 def iter_playlistitems(format):
719                     for string_segment in format.split(','):
720                         if '-' in string_segment:
721                             start, end = string_segment.split('-')
722                             for item in range(int(start), int(end) + 1):
723                                 yield int(item)
724                         else:
725                             yield int(string_segment)
726                 playlistitems = iter_playlistitems(playlistitems_str)
727
728             ie_entries = ie_result['entries']
729             if isinstance(ie_entries, list):
730                 n_all_entries = len(ie_entries)
731                 if playlistitems:
732                     entries = [ie_entries[i - 1] for i in playlistitems]
733                 else:
734                     entries = ie_entries[playliststart:playlistend]
735                 n_entries = len(entries)
736                 self.to_screen(
737                     "[%s] playlist %s: Collected %d video ids (downloading %d of them)" %
738                     (ie_result['extractor'], playlist, n_all_entries, n_entries))
739             elif isinstance(ie_entries, PagedList):
740                 if playlistitems:
741                     entries = []
742                     for item in playlistitems:
743                         entries.extend(ie_entries.getslice(
744                             item - 1, item
745                         ))
746                 else:
747                     entries = ie_entries.getslice(
748                         playliststart, playlistend)
749                 n_entries = len(entries)
750                 self.to_screen(
751                     "[%s] playlist %s: Downloading %d videos" %
752                     (ie_result['extractor'], playlist, n_entries))
753             else:  # iterable
754                 if playlistitems:
755                     entry_list = list(ie_entries)
756                     entries = [entry_list[i - 1] for i in playlistitems]
757                 else:
758                     entries = list(itertools.islice(
759                         ie_entries, playliststart, playlistend))
760                 n_entries = len(entries)
761                 self.to_screen(
762                     "[%s] playlist %s: Downloading %d videos" %
763                     (ie_result['extractor'], playlist, n_entries))
764
765             if self.params.get('playlistreverse', False):
766                 entries = entries[::-1]
767
768             for i, entry in enumerate(entries, 1):
769                 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
770                 extra = {
771                     'n_entries': n_entries,
772                     'playlist': playlist,
773                     'playlist_id': ie_result.get('id'),
774                     'playlist_title': ie_result.get('title'),
775                     'playlist_index': i + playliststart,
776                     'extractor': ie_result['extractor'],
777                     'webpage_url': ie_result['webpage_url'],
778                     'webpage_url_basename': url_basename(ie_result['webpage_url']),
779                     'extractor_key': ie_result['extractor_key'],
780                 }
781
782                 reason = self._match_entry(entry)
783                 if reason is not None:
784                     self.to_screen('[download] ' + reason)
785                     continue
786
787                 entry_result = self.process_ie_result(entry,
788                                                       download=download,
789                                                       extra_info=extra)
790                 playlist_results.append(entry_result)
791             ie_result['entries'] = playlist_results
792             return ie_result
793         elif result_type == 'compat_list':
794             self.report_warning(
795                 'Extractor %s returned a compat_list result. '
796                 'It needs to be updated.' % ie_result.get('extractor'))
797
798             def _fixup(r):
799                 self.add_extra_info(
800                     r,
801                     {
802                         'extractor': ie_result['extractor'],
803                         'webpage_url': ie_result['webpage_url'],
804                         'webpage_url_basename': url_basename(ie_result['webpage_url']),
805                         'extractor_key': ie_result['extractor_key'],
806                     }
807                 )
808                 return r
809             ie_result['entries'] = [
810                 self.process_ie_result(_fixup(r), download, extra_info)
811                 for r in ie_result['entries']
812             ]
813             return ie_result
814         else:
815             raise Exception('Invalid result type: %s' % result_type)
816
817     def _apply_format_filter(self, format_spec, available_formats):
818         " Returns a tuple of the remaining format_spec and filtered formats "
819
820         OPERATORS = {
821             '<': operator.lt,
822             '<=': operator.le,
823             '>': operator.gt,
824             '>=': operator.ge,
825             '=': operator.eq,
826             '!=': operator.ne,
827         }
828         operator_rex = re.compile(r'''(?x)\s*\[
829             (?P<key>width|height|tbr|abr|vbr|filesize|fps)
830             \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
831             (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
832             \]$
833             ''' % '|'.join(map(re.escape, OPERATORS.keys())))
834         m = operator_rex.search(format_spec)
835         if not m:
836             raise ValueError('Invalid format specification %r' % format_spec)
837
838         try:
839             comparison_value = int(m.group('value'))
840         except ValueError:
841             comparison_value = parse_filesize(m.group('value'))
842             if comparison_value is None:
843                 comparison_value = parse_filesize(m.group('value') + 'B')
844             if comparison_value is None:
845                 raise ValueError(
846                     'Invalid value %r in format specification %r' % (
847                         m.group('value'), format_spec))
848         op = OPERATORS[m.group('op')]
849
850         def _filter(f):
851             actual_value = f.get(m.group('key'))
852             if actual_value is None:
853                 return m.group('none_inclusive')
854             return op(actual_value, comparison_value)
855         new_formats = [f for f in available_formats if _filter(f)]
856
857         new_format_spec = format_spec[:-len(m.group(0))]
858         if not new_format_spec:
859             new_format_spec = 'best'
860
861         return (new_format_spec, new_formats)
862
863     def select_format(self, format_spec, available_formats):
864         while format_spec.endswith(']'):
865             format_spec, available_formats = self._apply_format_filter(
866                 format_spec, available_formats)
867         if not available_formats:
868             return None
869
870         if format_spec == 'best' or format_spec is None:
871             return available_formats[-1]
872         elif format_spec == 'worst':
873             return available_formats[0]
874         elif format_spec == 'bestaudio':
875             audio_formats = [
876                 f for f in available_formats
877                 if f.get('vcodec') == 'none']
878             if audio_formats:
879                 return audio_formats[-1]
880         elif format_spec == 'worstaudio':
881             audio_formats = [
882                 f for f in available_formats
883                 if f.get('vcodec') == 'none']
884             if audio_formats:
885                 return audio_formats[0]
886         elif format_spec == 'bestvideo':
887             video_formats = [
888                 f for f in available_formats
889                 if f.get('acodec') == 'none']
890             if video_formats:
891                 return video_formats[-1]
892         elif format_spec == 'worstvideo':
893             video_formats = [
894                 f for f in available_formats
895                 if f.get('acodec') == 'none']
896             if video_formats:
897                 return video_formats[0]
898         else:
899             extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
900             if format_spec in extensions:
901                 filter_f = lambda f: f['ext'] == format_spec
902             else:
903                 filter_f = lambda f: f['format_id'] == format_spec
904             matches = list(filter(filter_f, available_formats))
905             if matches:
906                 return matches[-1]
907         return None
908
909     def _calc_headers(self, info_dict):
910         res = std_headers.copy()
911
912         add_headers = info_dict.get('http_headers')
913         if add_headers:
914             res.update(add_headers)
915
916         cookies = self._calc_cookies(info_dict)
917         if cookies:
918             res['Cookie'] = cookies
919
920         return res
921
922     def _calc_cookies(self, info_dict):
923         class _PseudoRequest(object):
924             def __init__(self, url):
925                 self.url = url
926                 self.headers = {}
927                 self.unverifiable = False
928
929             def add_unredirected_header(self, k, v):
930                 self.headers[k] = v
931
932             def get_full_url(self):
933                 return self.url
934
935             def is_unverifiable(self):
936                 return self.unverifiable
937
938             def has_header(self, h):
939                 return h in self.headers
940
941         pr = _PseudoRequest(info_dict['url'])
942         self.cookiejar.add_cookie_header(pr)
943         return pr.headers.get('Cookie')
944
945     def process_video_result(self, info_dict, download=True):
946         assert info_dict.get('_type', 'video') == 'video'
947
948         if 'id' not in info_dict:
949             raise ExtractorError('Missing "id" field in extractor result')
950         if 'title' not in info_dict:
951             raise ExtractorError('Missing "title" field in extractor result')
952
953         if 'playlist' not in info_dict:
954             # It isn't part of a playlist
955             info_dict['playlist'] = None
956             info_dict['playlist_index'] = None
957
958         thumbnails = info_dict.get('thumbnails')
959         if thumbnails is None:
960             thumbnail = info_dict.get('thumbnail')
961             if thumbnail:
962                 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
963         if thumbnails:
964             thumbnails.sort(key=lambda t: (
965                 t.get('preference'), t.get('width'), t.get('height'),
966                 t.get('id'), t.get('url')))
967             for i, t in enumerate(thumbnails):
968                 if 'width' in t and 'height' in t:
969                     t['resolution'] = '%dx%d' % (t['width'], t['height'])
970                 if t.get('id') is None:
971                     t['id'] = '%d' % i
972
973         if thumbnails and 'thumbnail' not in info_dict:
974             info_dict['thumbnail'] = thumbnails[-1]['url']
975
976         if 'display_id' not in info_dict and 'id' in info_dict:
977             info_dict['display_id'] = info_dict['id']
978
979         if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
980             # Working around negative timestamps in Windows
981             # (see http://bugs.python.org/issue1646728)
982             if info_dict['timestamp'] < 0 and os.name == 'nt':
983                 info_dict['timestamp'] = 0
984             upload_date = datetime.datetime.utcfromtimestamp(
985                 info_dict['timestamp'])
986             info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
987
988         # This extractors handle format selection themselves
989         if info_dict['extractor'] in ['Youku']:
990             if download:
991                 self.process_info(info_dict)
992             return info_dict
993
994         # We now pick which formats have to be downloaded
995         if info_dict.get('formats') is None:
996             # There's only one format available
997             formats = [info_dict]
998         else:
999             formats = info_dict['formats']
1000
1001         if not formats:
1002             raise ExtractorError('No video formats found!')
1003
1004         # We check that all the formats have the format and format_id fields
1005         for i, format in enumerate(formats):
1006             if 'url' not in format:
1007                 raise ExtractorError('Missing "url" key in result (index %d)' % i)
1008
1009             if format.get('format_id') is None:
1010                 format['format_id'] = compat_str(i)
1011             if format.get('format') is None:
1012                 format['format'] = '{id} - {res}{note}'.format(
1013                     id=format['format_id'],
1014                     res=self.format_resolution(format),
1015                     note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
1016                 )
1017             # Automatically determine file extension if missing
1018             if 'ext' not in format:
1019                 format['ext'] = determine_ext(format['url']).lower()
1020             # Add HTTP headers, so that external programs can use them from the
1021             # json output
1022             full_format_info = info_dict.copy()
1023             full_format_info.update(format)
1024             format['http_headers'] = self._calc_headers(full_format_info)
1025
1026         format_limit = self.params.get('format_limit', None)
1027         if format_limit:
1028             formats = list(takewhile_inclusive(
1029                 lambda f: f['format_id'] != format_limit, formats
1030             ))
1031
1032         # TODO Central sorting goes here
1033
1034         if formats[0] is not info_dict:
1035             # only set the 'formats' fields if the original info_dict list them
1036             # otherwise we end up with a circular reference, the first (and unique)
1037             # element in the 'formats' field in info_dict is info_dict itself,
1038             # wich can't be exported to json
1039             info_dict['formats'] = formats
1040         if self.params.get('listformats'):
1041             self.list_formats(info_dict)
1042             return
1043         if self.params.get('list_thumbnails'):
1044             self.list_thumbnails(info_dict)
1045             return
1046
1047         req_format = self.params.get('format')
1048         if req_format is None:
1049             req_format = 'best'
1050         formats_to_download = []
1051         # The -1 is for supporting YoutubeIE
1052         if req_format in ('-1', 'all'):
1053             formats_to_download = formats
1054         else:
1055             for rfstr in req_format.split(','):
1056                 # We can accept formats requested in the format: 34/5/best, we pick
1057                 # the first that is available, starting from left
1058                 req_formats = rfstr.split('/')
1059                 for rf in req_formats:
1060                     if re.match(r'.+?\+.+?', rf) is not None:
1061                         # Two formats have been requested like '137+139'
1062                         format_1, format_2 = rf.split('+')
1063                         formats_info = (self.select_format(format_1, formats),
1064                                         self.select_format(format_2, formats))
1065                         if all(formats_info):
1066                             # The first format must contain the video and the
1067                             # second the audio
1068                             if formats_info[0].get('vcodec') == 'none':
1069                                 self.report_error('The first format must '
1070                                                   'contain the video, try using '
1071                                                   '"-f %s+%s"' % (format_2, format_1))
1072                                 return
1073                             output_ext = (
1074                                 formats_info[0]['ext']
1075                                 if self.params.get('merge_output_format') is None
1076                                 else self.params['merge_output_format'])
1077                             selected_format = {
1078                                 'requested_formats': formats_info,
1079                                 'format': '%s+%s' % (formats_info[0].get('format'),
1080                                                      formats_info[1].get('format')),
1081                                 'format_id': '%s+%s' % (formats_info[0].get('format_id'),
1082                                                         formats_info[1].get('format_id')),
1083                                 'width': formats_info[0].get('width'),
1084                                 'height': formats_info[0].get('height'),
1085                                 'resolution': formats_info[0].get('resolution'),
1086                                 'fps': formats_info[0].get('fps'),
1087                                 'vcodec': formats_info[0].get('vcodec'),
1088                                 'vbr': formats_info[0].get('vbr'),
1089                                 'stretched_ratio': formats_info[0].get('stretched_ratio'),
1090                                 'acodec': formats_info[1].get('acodec'),
1091                                 'abr': formats_info[1].get('abr'),
1092                                 'ext': output_ext,
1093                             }
1094                         else:
1095                             selected_format = None
1096                     else:
1097                         selected_format = self.select_format(rf, formats)
1098                     if selected_format is not None:
1099                         formats_to_download.append(selected_format)
1100                         break
1101         if not formats_to_download:
1102             raise ExtractorError('requested format not available',
1103                                  expected=True)
1104
1105         if download:
1106             if len(formats_to_download) > 1:
1107                 self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
1108             for format in formats_to_download:
1109                 new_info = dict(info_dict)
1110                 new_info.update(format)
1111                 self.process_info(new_info)
1112         # We update the info dict with the best quality format (backwards compatibility)
1113         info_dict.update(formats_to_download[-1])
1114         return info_dict
1115
1116     def process_info(self, info_dict):
1117         """Process a single resolved IE result."""
1118
1119         assert info_dict.get('_type', 'video') == 'video'
1120
1121         max_downloads = self.params.get('max_downloads')
1122         if max_downloads is not None:
1123             if self._num_downloads >= int(max_downloads):
1124                 raise MaxDownloadsReached()
1125
1126         info_dict['fulltitle'] = info_dict['title']
1127         if len(info_dict['title']) > 200:
1128             info_dict['title'] = info_dict['title'][:197] + '...'
1129
1130         # Keep for backwards compatibility
1131         info_dict['stitle'] = info_dict['title']
1132
1133         if 'format' not in info_dict:
1134             info_dict['format'] = info_dict['ext']
1135
1136         reason = self._match_entry(info_dict)
1137         if reason is not None:
1138             self.to_screen('[download] ' + reason)
1139             return
1140
1141         self._num_downloads += 1
1142
1143         info_dict['_filename'] = filename = self.prepare_filename(info_dict)
1144
1145         # Forced printings
1146         if self.params.get('forcetitle', False):
1147             self.to_stdout(info_dict['fulltitle'])
1148         if self.params.get('forceid', False):
1149             self.to_stdout(info_dict['id'])
1150         if self.params.get('forceurl', False):
1151             if info_dict.get('requested_formats') is not None:
1152                 for f in info_dict['requested_formats']:
1153                     self.to_stdout(f['url'] + f.get('play_path', ''))
1154             else:
1155                 # For RTMP URLs, also include the playpath
1156                 self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
1157         if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
1158             self.to_stdout(info_dict['thumbnail'])
1159         if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
1160             self.to_stdout(info_dict['description'])
1161         if self.params.get('forcefilename', False) and filename is not None:
1162             self.to_stdout(filename)
1163         if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
1164             self.to_stdout(formatSeconds(info_dict['duration']))
1165         if self.params.get('forceformat', False):
1166             self.to_stdout(info_dict['format'])
1167         if self.params.get('forcejson', False):
1168             self.to_stdout(json.dumps(info_dict))
1169
1170         # Do nothing else if in simulate mode
1171         if self.params.get('simulate', False):
1172             return
1173
1174         if filename is None:
1175             return
1176
1177         try:
1178             dn = os.path.dirname(encodeFilename(filename))
1179             if dn and not os.path.exists(dn):
1180                 os.makedirs(dn)
1181         except (OSError, IOError) as err:
1182             self.report_error('unable to create directory ' + compat_str(err))
1183             return
1184
1185         if self.params.get('writedescription', False):
1186             descfn = filename + '.description'
1187             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
1188                 self.to_screen('[info] Video description is already present')
1189             elif info_dict.get('description') is None:
1190                 self.report_warning('There\'s no description to write.')
1191             else:
1192                 try:
1193                     self.to_screen('[info] Writing video description to: ' + descfn)
1194                     with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
1195                         descfile.write(info_dict['description'])
1196                 except (OSError, IOError):
1197                     self.report_error('Cannot write description file ' + descfn)
1198                     return
1199
1200         if self.params.get('writeannotations', False):
1201             annofn = filename + '.annotations.xml'
1202             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
1203                 self.to_screen('[info] Video annotations are already present')
1204             else:
1205                 try:
1206                     self.to_screen('[info] Writing video annotations to: ' + annofn)
1207                     with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
1208                         annofile.write(info_dict['annotations'])
1209                 except (KeyError, TypeError):
1210                     self.report_warning('There are no annotations to write.')
1211                 except (OSError, IOError):
1212                     self.report_error('Cannot write annotations file: ' + annofn)
1213                     return
1214
1215         subtitles_are_requested = any([self.params.get('writesubtitles', False),
1216                                        self.params.get('writeautomaticsub')])
1217
1218         if subtitles_are_requested and 'subtitles' in info_dict and info_dict['subtitles']:
1219             # subtitles download errors are already managed as troubles in relevant IE
1220             # that way it will silently go on when used with unsupporting IE
1221             subtitles = info_dict['subtitles']
1222             sub_format = self.params.get('subtitlesformat', 'srt')
1223             for sub_lang in subtitles.keys():
1224                 sub = subtitles[sub_lang]
1225                 if sub is None:
1226                     continue
1227                 try:
1228                     sub_filename = subtitles_filename(filename, sub_lang, sub_format)
1229                     if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
1230                         self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format))
1231                     else:
1232                         self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
1233                         with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
1234                             subfile.write(sub)
1235                 except (OSError, IOError):
1236                     self.report_error('Cannot write subtitles file ' + sub_filename)
1237                     return
1238
1239         if self.params.get('writeinfojson', False):
1240             infofn = os.path.splitext(filename)[0] + '.info.json'
1241             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
1242                 self.to_screen('[info] Video description metadata is already present')
1243             else:
1244                 self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
1245                 try:
1246                     write_json_file(info_dict, infofn)
1247                 except (OSError, IOError):
1248                     self.report_error('Cannot write metadata to JSON file ' + infofn)
1249                     return
1250
1251         self._write_thumbnails(info_dict, filename)
1252
1253         if not self.params.get('skip_download', False):
1254             try:
1255                 def dl(name, info):
1256                     fd = get_suitable_downloader(info, self.params)(self, self.params)
1257                     for ph in self._progress_hooks:
1258                         fd.add_progress_hook(ph)
1259                     if self.params.get('verbose'):
1260                         self.to_stdout('[debug] Invoking downloader on %r' % info.get('url'))
1261                     return fd.download(name, info)
1262
1263                 if info_dict.get('requested_formats') is not None:
1264                     downloaded = []
1265                     success = True
1266                     merger = FFmpegMergerPP(self, not self.params.get('keepvideo'))
1267                     if not merger._executable:
1268                         postprocessors = []
1269                         self.report_warning('You have requested multiple '
1270                                             'formats but ffmpeg or avconv are not installed.'
1271                                             ' The formats won\'t be merged')
1272                     else:
1273                         postprocessors = [merger]
1274                     for f in info_dict['requested_formats']:
1275                         new_info = dict(info_dict)
1276                         new_info.update(f)
1277                         fname = self.prepare_filename(new_info)
1278                         fname = prepend_extension(fname, 'f%s' % f['format_id'])
1279                         downloaded.append(fname)
1280                         partial_success = dl(fname, new_info)
1281                         success = success and partial_success
1282                     info_dict['__postprocessors'] = postprocessors
1283                     info_dict['__files_to_merge'] = downloaded
1284                 else:
1285                     # Just a single file
1286                     success = dl(filename, info_dict)
1287             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
1288                 self.report_error('unable to download video data: %s' % str(err))
1289                 return
1290             except (OSError, IOError) as err:
1291                 raise UnavailableVideoError(err)
1292             except (ContentTooShortError, ) as err:
1293                 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
1294                 return
1295
1296             if success:
1297                 # Fixup content
1298                 fixup_policy = self.params.get('fixup')
1299                 if fixup_policy is None:
1300                     fixup_policy = 'detect_or_warn'
1301
1302                 stretched_ratio = info_dict.get('stretched_ratio')
1303                 if stretched_ratio is not None and stretched_ratio != 1:
1304                     if fixup_policy == 'warn':
1305                         self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
1306                             info_dict['id'], stretched_ratio))
1307                     elif fixup_policy == 'detect_or_warn':
1308                         stretched_pp = FFmpegFixupStretchedPP(self)
1309                         if stretched_pp.available:
1310                             info_dict.setdefault('__postprocessors', [])
1311                             info_dict['__postprocessors'].append(stretched_pp)
1312                         else:
1313                             self.report_warning(
1314                                 '%s: Non-uniform pixel ratio (%s). Install ffmpeg or avconv to fix this automatically.' % (
1315                                     info_dict['id'], stretched_ratio))
1316                     else:
1317                         assert fixup_policy in ('ignore', 'never')
1318
1319                 if info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash':
1320                     if fixup_policy == 'warn':
1321                         self.report_warning('%s: writing DASH m4a. Only some players support this container.' % (
1322                             info_dict['id']))
1323                     elif fixup_policy == 'detect_or_warn':
1324                         fixup_pp = FFmpegFixupM4aPP(self)
1325                         if fixup_pp.available:
1326                             info_dict.setdefault('__postprocessors', [])
1327                             info_dict['__postprocessors'].append(fixup_pp)
1328                         else:
1329                             self.report_warning(
1330                                 '%s: writing DASH m4a. Only some players support this container. Install ffmpeg or avconv to fix this automatically.' % (
1331                                     info_dict['id']))
1332                     else:
1333                         assert fixup_policy in ('ignore', 'never')
1334
1335                 try:
1336                     self.post_process(filename, info_dict)
1337                 except (PostProcessingError) as err:
1338                     self.report_error('postprocessing: %s' % str(err))
1339                     return
1340                 self.record_download_archive(info_dict)
1341
1342     def download(self, url_list):
1343         """Download a given list of URLs."""
1344         outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
1345         if (len(url_list) > 1 and
1346                 '%' not in outtmpl
1347                 and self.params.get('max_downloads') != 1):
1348             raise SameFileError(outtmpl)
1349
1350         for url in url_list:
1351             try:
1352                 # It also downloads the videos
1353                 res = self.extract_info(url)
1354             except UnavailableVideoError:
1355                 self.report_error('unable to download video')
1356             except MaxDownloadsReached:
1357                 self.to_screen('[info] Maximum number of downloaded files reached.')
1358                 raise
1359             else:
1360                 if self.params.get('dump_single_json', False):
1361                     self.to_stdout(json.dumps(res))
1362
1363         return self._download_retcode
1364
1365     def download_with_info_file(self, info_filename):
1366         with io.open(info_filename, 'r', encoding='utf-8') as f:
1367             info = json.load(f)
1368         try:
1369             self.process_ie_result(info, download=True)
1370         except DownloadError:
1371             webpage_url = info.get('webpage_url')
1372             if webpage_url is not None:
1373                 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
1374                 return self.download([webpage_url])
1375             else:
1376                 raise
1377         return self._download_retcode
1378
1379     def post_process(self, filename, ie_info):
1380         """Run all the postprocessors on the given file."""
1381         info = dict(ie_info)
1382         info['filepath'] = filename
1383         pps_chain = []
1384         if ie_info.get('__postprocessors') is not None:
1385             pps_chain.extend(ie_info['__postprocessors'])
1386         pps_chain.extend(self._pps)
1387         for pp in pps_chain:
1388             keep_video = None
1389             old_filename = info['filepath']
1390             try:
1391                 keep_video_wish, info = pp.run(info)
1392                 if keep_video_wish is not None:
1393                     if keep_video_wish:
1394                         keep_video = keep_video_wish
1395                     elif keep_video is None:
1396                         # No clear decision yet, let IE decide
1397                         keep_video = keep_video_wish
1398             except PostProcessingError as e:
1399                 self.report_error(e.msg)
1400             if keep_video is False and not self.params.get('keepvideo', False):
1401                 try:
1402                     self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
1403                     os.remove(encodeFilename(old_filename))
1404                 except (IOError, OSError):
1405                     self.report_warning('Unable to remove downloaded video file')
1406
1407     def _make_archive_id(self, info_dict):
1408         # Future-proof against any change in case
1409         # and backwards compatibility with prior versions
1410         extractor = info_dict.get('extractor_key')
1411         if extractor is None:
1412             if 'id' in info_dict:
1413                 extractor = info_dict.get('ie_key')  # key in a playlist
1414         if extractor is None:
1415             return None  # Incomplete video information
1416         return extractor.lower() + ' ' + info_dict['id']
1417
1418     def in_download_archive(self, info_dict):
1419         fn = self.params.get('download_archive')
1420         if fn is None:
1421             return False
1422
1423         vid_id = self._make_archive_id(info_dict)
1424         if vid_id is None:
1425             return False  # Incomplete video information
1426
1427         try:
1428             with locked_file(fn, 'r', encoding='utf-8') as archive_file:
1429                 for line in archive_file:
1430                     if line.strip() == vid_id:
1431                         return True
1432         except IOError as ioe:
1433             if ioe.errno != errno.ENOENT:
1434                 raise
1435         return False
1436
1437     def record_download_archive(self, info_dict):
1438         fn = self.params.get('download_archive')
1439         if fn is None:
1440             return
1441         vid_id = self._make_archive_id(info_dict)
1442         assert vid_id
1443         with locked_file(fn, 'a', encoding='utf-8') as archive_file:
1444             archive_file.write(vid_id + '\n')
1445
1446     @staticmethod
1447     def format_resolution(format, default='unknown'):
1448         if format.get('vcodec') == 'none':
1449             return 'audio only'
1450         if format.get('resolution') is not None:
1451             return format['resolution']
1452         if format.get('height') is not None:
1453             if format.get('width') is not None:
1454                 res = '%sx%s' % (format['width'], format['height'])
1455             else:
1456                 res = '%sp' % format['height']
1457         elif format.get('width') is not None:
1458             res = '?x%d' % format['width']
1459         else:
1460             res = default
1461         return res
1462
1463     def _format_note(self, fdict):
1464         res = ''
1465         if fdict.get('ext') in ['f4f', 'f4m']:
1466             res += '(unsupported) '
1467         if fdict.get('format_note') is not None:
1468             res += fdict['format_note'] + ' '
1469         if fdict.get('tbr') is not None:
1470             res += '%4dk ' % fdict['tbr']
1471         if fdict.get('container') is not None:
1472             if res:
1473                 res += ', '
1474             res += '%s container' % fdict['container']
1475         if (fdict.get('vcodec') is not None and
1476                 fdict.get('vcodec') != 'none'):
1477             if res:
1478                 res += ', '
1479             res += fdict['vcodec']
1480             if fdict.get('vbr') is not None:
1481                 res += '@'
1482         elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
1483             res += 'video@'
1484         if fdict.get('vbr') is not None:
1485             res += '%4dk' % fdict['vbr']
1486         if fdict.get('fps') is not None:
1487             res += ', %sfps' % fdict['fps']
1488         if fdict.get('acodec') is not None:
1489             if res:
1490                 res += ', '
1491             if fdict['acodec'] == 'none':
1492                 res += 'video only'
1493             else:
1494                 res += '%-5s' % fdict['acodec']
1495         elif fdict.get('abr') is not None:
1496             if res:
1497                 res += ', '
1498             res += 'audio'
1499         if fdict.get('abr') is not None:
1500             res += '@%3dk' % fdict['abr']
1501         if fdict.get('asr') is not None:
1502             res += ' (%5dHz)' % fdict['asr']
1503         if fdict.get('filesize') is not None:
1504             if res:
1505                 res += ', '
1506             res += format_bytes(fdict['filesize'])
1507         elif fdict.get('filesize_approx') is not None:
1508             if res:
1509                 res += ', '
1510             res += '~' + format_bytes(fdict['filesize_approx'])
1511         return res
1512
1513     def list_formats(self, info_dict):
1514         def line(format, idlen=20):
1515             return (('%-' + compat_str(idlen + 1) + 's%-10s%-12s%s') % (
1516                 format['format_id'],
1517                 format['ext'],
1518                 self.format_resolution(format),
1519                 self._format_note(format),
1520             ))
1521
1522         formats = info_dict.get('formats', [info_dict])
1523         idlen = max(len('format code'),
1524                     max(len(f['format_id']) for f in formats))
1525         formats_s = [
1526             line(f, idlen) for f in formats
1527             if f.get('preference') is None or f['preference'] >= -1000]
1528         if len(formats) > 1:
1529             formats_s[0] += (' ' if self._format_note(formats[0]) else '') + '(worst)'
1530             formats_s[-1] += (' ' if self._format_note(formats[-1]) else '') + '(best)'
1531
1532         header_line = line({
1533             'format_id': 'format code', 'ext': 'extension',
1534             'resolution': 'resolution', 'format_note': 'note'}, idlen=idlen)
1535         self.to_screen(
1536             '[info] Available formats for %s:\n%s\n%s' %
1537             (info_dict['id'], header_line, '\n'.join(formats_s)))
1538
1539     def list_thumbnails(self, info_dict):
1540         thumbnails = info_dict.get('thumbnails')
1541         if not thumbnails:
1542             tn_url = info_dict.get('thumbnail')
1543             if tn_url:
1544                 thumbnails = [{'id': '0', 'url': tn_url}]
1545             else:
1546                 self.to_screen(
1547                     '[info] No thumbnails present for %s' % info_dict['id'])
1548                 return
1549
1550         self.to_screen(
1551             '[info] Thumbnails for %s:' % info_dict['id'])
1552         self.to_screen(render_table(
1553             ['ID', 'width', 'height', 'URL'],
1554             [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
1555
1556     def urlopen(self, req):
1557         """ Start an HTTP download """
1558
1559         # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1560         # always respected by websites, some tend to give out URLs with non percent-encoded
1561         # non-ASCII characters (see telemb.py, ard.py [#3412])
1562         # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1563         # To work around aforementioned issue we will replace request's original URL with
1564         # percent-encoded one
1565         req_is_string = isinstance(req, compat_basestring)
1566         url = req if req_is_string else req.get_full_url()
1567         url_escaped = escape_url(url)
1568
1569         # Substitute URL if any change after escaping
1570         if url != url_escaped:
1571             if req_is_string:
1572                 req = url_escaped
1573             else:
1574                 req = compat_urllib_request.Request(
1575                     url_escaped, data=req.data, headers=req.headers,
1576                     origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
1577
1578         return self._opener.open(req, timeout=self._socket_timeout)
1579
1580     def print_debug_header(self):
1581         if not self.params.get('verbose'):
1582             return
1583
1584         if type('') is not compat_str:
1585             # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326)
1586             self.report_warning(
1587                 'Your Python is broken! Update to a newer and supported version')
1588
1589         stdout_encoding = getattr(
1590             sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
1591         encoding_str = (
1592             '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
1593                 locale.getpreferredencoding(),
1594                 sys.getfilesystemencoding(),
1595                 stdout_encoding,
1596                 self.get_encoding()))
1597         write_string(encoding_str, encoding=None)
1598
1599         self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
1600         try:
1601             sp = subprocess.Popen(
1602                 ['git', 'rev-parse', '--short', 'HEAD'],
1603                 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
1604                 cwd=os.path.dirname(os.path.abspath(__file__)))
1605             out, err = sp.communicate()
1606             out = out.decode().strip()
1607             if re.match('[0-9a-f]+', out):
1608                 self._write_string('[debug] Git HEAD: ' + out + '\n')
1609         except:
1610             try:
1611                 sys.exc_clear()
1612             except:
1613                 pass
1614         self._write_string('[debug] Python version %s - %s\n' % (
1615             platform.python_version(), platform_name()))
1616
1617         exe_versions = FFmpegPostProcessor.get_versions()
1618         exe_versions['rtmpdump'] = rtmpdump_version()
1619         exe_str = ', '.join(
1620             '%s %s' % (exe, v)
1621             for exe, v in sorted(exe_versions.items())
1622             if v
1623         )
1624         if not exe_str:
1625             exe_str = 'none'
1626         self._write_string('[debug] exe versions: %s\n' % exe_str)
1627
1628         proxy_map = {}
1629         for handler in self._opener.handlers:
1630             if hasattr(handler, 'proxies'):
1631                 proxy_map.update(handler.proxies)
1632         self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
1633
1634         if self.params.get('call_home', False):
1635             ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
1636             self._write_string('[debug] Public IP address: %s\n' % ipaddr)
1637             latest_version = self.urlopen(
1638                 'https://yt-dl.org/latest/version').read().decode('utf-8')
1639             if version_tuple(latest_version) > version_tuple(__version__):
1640                 self.report_warning(
1641                     'You are using an outdated version (newest version: %s)! '
1642                     'See https://yt-dl.org/update if you need help updating.' %
1643                     latest_version)
1644
1645     def _setup_opener(self):
1646         timeout_val = self.params.get('socket_timeout')
1647         self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
1648
1649         opts_cookiefile = self.params.get('cookiefile')
1650         opts_proxy = self.params.get('proxy')
1651
1652         if opts_cookiefile is None:
1653             self.cookiejar = compat_cookiejar.CookieJar()
1654         else:
1655             self.cookiejar = compat_cookiejar.MozillaCookieJar(
1656                 opts_cookiefile)
1657             if os.access(opts_cookiefile, os.R_OK):
1658                 self.cookiejar.load()
1659
1660         cookie_processor = compat_urllib_request.HTTPCookieProcessor(
1661             self.cookiejar)
1662         if opts_proxy is not None:
1663             if opts_proxy == '':
1664                 proxies = {}
1665             else:
1666                 proxies = {'http': opts_proxy, 'https': opts_proxy}
1667         else:
1668             proxies = compat_urllib_request.getproxies()
1669             # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
1670             if 'http' in proxies and 'https' not in proxies:
1671                 proxies['https'] = proxies['http']
1672         proxy_handler = compat_urllib_request.ProxyHandler(proxies)
1673
1674         debuglevel = 1 if self.params.get('debug_printtraffic') else 0
1675         https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
1676         ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
1677         opener = compat_urllib_request.build_opener(
1678             https_handler, proxy_handler, cookie_processor, ydlh)
1679         # Delete the default user-agent header, which would otherwise apply in
1680         # cases where our custom HTTP handler doesn't come into play
1681         # (See https://github.com/rg3/youtube-dl/issues/1309 for details)
1682         opener.addheaders = []
1683         self._opener = opener
1684
1685     def encode(self, s):
1686         if isinstance(s, bytes):
1687             return s  # Already encoded
1688
1689         try:
1690             return s.encode(self.get_encoding())
1691         except UnicodeEncodeError as err:
1692             err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
1693             raise
1694
1695     def get_encoding(self):
1696         encoding = self.params.get('encoding')
1697         if encoding is None:
1698             encoding = preferredencoding()
1699         return encoding
1700
1701     def _write_thumbnails(self, info_dict, filename):
1702         if self.params.get('writethumbnail', False):
1703             thumbnails = info_dict.get('thumbnails')
1704             if thumbnails:
1705                 thumbnails = [thumbnails[-1]]
1706         elif self.params.get('write_all_thumbnails', False):
1707             thumbnails = info_dict.get('thumbnails')
1708         else:
1709             return
1710
1711         if not thumbnails:
1712             # No thumbnails present, so return immediately
1713             return
1714
1715         for t in thumbnails:
1716             thumb_ext = determine_ext(t['url'], 'jpg')
1717             suffix = '_%s' % t['id'] if len(thumbnails) > 1 else ''
1718             thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else ''
1719             thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext
1720
1721             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
1722                 self.to_screen('[%s] %s: Thumbnail %sis already present' %
1723                                (info_dict['extractor'], info_dict['id'], thumb_display_id))
1724             else:
1725                 self.to_screen('[%s] %s: Downloading thumbnail %s...' %
1726                                (info_dict['extractor'], info_dict['id'], thumb_display_id))
1727                 try:
1728                     uf = self.urlopen(t['url'])
1729                     with open(thumb_filename, 'wb') as thumbf:
1730                         shutil.copyfileobj(uf, thumbf)
1731                     self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
1732                                    (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
1733                 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
1734                     self.report_warning('Unable to download thumbnail "%s": %s' %
1735                                         (t['url'], compat_str(err)))