2 # -*- coding: utf-8 -*-
4 from __future__ import absolute_import, unicode_literals
30 compat_urllib_request,
53 UnavailableVideoError,
60 from .extractor import get_info_extractor, gen_extractors
61 from .downloader import get_suitable_downloader
62 from .postprocessor import FFmpegMergerPP
63 from .version import __version__
66 class YoutubeDL(object):
69 YoutubeDL objects are the ones responsible of downloading the
70 actual video file and writing it to disk if the user has requested
71 it, among some other tasks. In most cases there should be one per
72 program. As, given a video URL, the downloader doesn't know how to
73 extract all the needed information, task that InfoExtractors do, it
74 has to pass the URL to one of them.
76 For this, YoutubeDL objects have a method that allows
77 InfoExtractors to be registered in a given order. When it is passed
78 a URL, the YoutubeDL object handles it to the first InfoExtractor it
79 finds that reports being able to handle it. The InfoExtractor extracts
80 all the information about the video or videos the URL refers to, and
81 YoutubeDL process the extracted information, possibly using a File
82 Downloader to download the video.
84 YoutubeDL objects accept a lot of parameters. In order not to saturate
85 the object constructor with arguments, it receives a dictionary of
86 options instead. These options are available through the params
87 attribute for the InfoExtractors to use. The YoutubeDL also
88 registers itself as the downloader in charge for the InfoExtractors
89 that are added to it, so this is a "mutual registration".
93 username: Username for authentication purposes.
94 password: Password for authentication purposes.
95 videopassword: Password for acces a video.
96 usenetrc: Use netrc for authentication instead.
97 verbose: Print additional info to stdout.
98 quiet: Do not print messages to stdout.
99 no_warnings: Do not print out anything for warnings.
100 forceurl: Force printing final URL.
101 forcetitle: Force printing title.
102 forceid: Force printing ID.
103 forcethumbnail: Force printing thumbnail URL.
104 forcedescription: Force printing description.
105 forcefilename: Force printing final filename.
106 forceduration: Force printing duration.
107 forcejson: Force printing info_dict as JSON.
108 simulate: Do not download the video files.
109 format: Video format code.
110 format_limit: Highest quality format to try.
111 outtmpl: Template for output names.
112 restrictfilenames: Do not allow "&" and spaces in file names
113 ignoreerrors: Do not stop on download errors.
114 nooverwrites: Prevent overwriting files.
115 playliststart: Playlist item to start at.
116 playlistend: Playlist item to end at.
117 matchtitle: Download only matching titles.
118 rejecttitle: Reject downloads for matching titles.
119 logger: Log messages to a logging.Logger instance.
120 logtostderr: Log messages to stderr instead of stdout.
121 writedescription: Write the video description to a .description file
122 writeinfojson: Write the video description to a .info.json file
123 writeannotations: Write the video annotations to a .annotations.xml file
124 writethumbnail: Write the thumbnail image to a file
125 writesubtitles: Write the video subtitles to a file
126 writeautomaticsub: Write the automatic subtitles to a file
127 allsubtitles: Downloads all the subtitles of the video
128 (requires writesubtitles or writeautomaticsub)
129 listsubtitles: Lists all available subtitles for the video
130 subtitlesformat: Subtitle format [srt/sbv/vtt] (default=srt)
131 subtitleslangs: List of languages of the subtitles to download
132 keepvideo: Keep the video file after post-processing
133 daterange: A DateRange object, download only if the upload_date is in the range.
134 skip_download: Skip the actual download of the video file
135 cachedir: Location of the cache files in the filesystem.
136 None to disable filesystem cache.
137 noplaylist: Download single video instead of a playlist if in doubt.
138 age_limit: An integer representing the user's age in years.
139 Unsuitable videos for the given age are skipped.
140 min_views: An integer representing the minimum view count the video
141 must have in order to not be skipped.
142 Videos without view count information are always
143 downloaded. None for no limit.
144 max_views: An integer representing the maximum view count.
145 Videos that are more popular than that are not
147 Videos without view count information are always
148 downloaded. None for no limit.
149 download_archive: File name of a file where all downloads are recorded.
150 Videos already present in the file are not downloaded
152 cookiefile: File name where cookies should be read from and dumped to.
153 nocheckcertificate:Do not verify SSL certificates
154 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
155 At the moment, this is only supported by YouTube.
156 proxy: URL of the proxy server to use
157 socket_timeout: Time to wait for unresponsive hosts, in seconds
158 bidi_workaround: Work around buggy terminals without bidirectional text
159 support, using fridibi
160 debug_printtraffic:Print out sent and received HTTP traffic
161 include_ads: Download ads as well
162 default_search: Prepend this string if an input url is not valid.
163 'auto' for elaborate guessing
164 encoding: Use this encoding instead of the system-specified.
165 extract_flat: Do not resolve URLs, return the immediate result.
167 The following parameters are not used by YoutubeDL itself, they are used by
169 nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
170 noresizebuffer, retries, continuedl, noprogress, consoletitle
172 The following options are used by the post processors:
173 prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available,
174 otherwise prefer avconv.
175 exec_cmd: Arbitrary command to run after downloading
181 _download_retcode = None
182 _num_downloads = None
185 def __init__(self, params=None):
186 """Create a FileDownloader object with the given options."""
190 self._ies_instances = {}
192 self._progress_hooks = []
193 self._download_retcode = 0
194 self._num_downloads = 0
195 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
196 self._err_file = sys.stderr
199 if params.get('bidi_workaround', False):
202 master, slave = pty.openpty()
203 width = get_term_width()
207 width_args = ['-w', str(width)]
209 stdin=subprocess.PIPE,
211 stderr=self._err_file)
213 self._output_process = subprocess.Popen(
214 ['bidiv'] + width_args, **sp_kwargs
217 self._output_process = subprocess.Popen(
218 ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
219 self._output_channel = os.fdopen(master, 'rb')
220 except OSError as ose:
222 self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
226 if (sys.version_info >= (3,) and sys.platform != 'win32' and
227 sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
228 and not params['restrictfilenames']):
229 # On Python 3, the Unicode filesystem API will throw errors (#1474)
231 'Assuming --restrict-filenames since file system encoding '
232 'cannot encode all charactes. '
233 'Set the LC_ALL environment variable to fix this.')
234 self.params['restrictfilenames'] = True
236 if '%(stitle)s' in self.params.get('outtmpl', ''):
237 self.report_warning('%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
241 def add_info_extractor(self, ie):
242 """Add an InfoExtractor object to the end of the list."""
244 self._ies_instances[ie.ie_key()] = ie
245 ie.set_downloader(self)
247 def get_info_extractor(self, ie_key):
249 Get an instance of an IE with name ie_key, it will try to get one from
250 the _ies list, if there's no instance it will create a new one and add
251 it to the extractor list.
253 ie = self._ies_instances.get(ie_key)
255 ie = get_info_extractor(ie_key)()
256 self.add_info_extractor(ie)
259 def add_default_info_extractors(self):
261 Add the InfoExtractors returned by gen_extractors to the end of the list
263 for ie in gen_extractors():
264 self.add_info_extractor(ie)
266 def add_post_processor(self, pp):
267 """Add a PostProcessor object to the end of the chain."""
269 pp.set_downloader(self)
271 def add_progress_hook(self, ph):
272 """Add the progress hook (currently only for the file downloader)"""
273 self._progress_hooks.append(ph)
275 def _bidi_workaround(self, message):
276 if not hasattr(self, '_output_channel'):
279 assert hasattr(self, '_output_process')
280 assert isinstance(message, compat_str)
281 line_count = message.count('\n') + 1
282 self._output_process.stdin.write((message + '\n').encode('utf-8'))
283 self._output_process.stdin.flush()
284 res = ''.join(self._output_channel.readline().decode('utf-8')
285 for _ in range(line_count))
286 return res[:-len('\n')]
288 def to_screen(self, message, skip_eol=False):
289 """Print message to stdout if not in quiet mode."""
290 return self.to_stdout(message, skip_eol, check_quiet=True)
292 def _write_string(self, s, out=None):
293 write_string(s, out=out, encoding=self.params.get('encoding'))
295 def to_stdout(self, message, skip_eol=False, check_quiet=False):
296 """Print message to stdout if not in quiet mode."""
297 if self.params.get('logger'):
298 self.params['logger'].debug(message)
299 elif not check_quiet or not self.params.get('quiet', False):
300 message = self._bidi_workaround(message)
301 terminator = ['\n', ''][skip_eol]
302 output = message + terminator
304 self._write_string(output, self._screen_file)
306 def to_stderr(self, message):
307 """Print message to stderr."""
308 assert isinstance(message, compat_str)
309 if self.params.get('logger'):
310 self.params['logger'].error(message)
312 message = self._bidi_workaround(message)
313 output = message + '\n'
314 self._write_string(output, self._err_file)
316 def to_console_title(self, message):
317 if not self.params.get('consoletitle', False):
319 if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
320 # c_wchar_p() might not be necessary if `message` is
321 # already of type unicode()
322 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
323 elif 'TERM' in os.environ:
324 self._write_string('\033]0;%s\007' % message, self._screen_file)
326 def save_console_title(self):
327 if not self.params.get('consoletitle', False):
329 if 'TERM' in os.environ:
330 # Save the title on stack
331 self._write_string('\033[22;0t', self._screen_file)
333 def restore_console_title(self):
334 if not self.params.get('consoletitle', False):
336 if 'TERM' in os.environ:
337 # Restore the title from stack
338 self._write_string('\033[23;0t', self._screen_file)
341 self.save_console_title()
344 def __exit__(self, *args):
345 self.restore_console_title()
347 if self.params.get('cookiefile') is not None:
348 self.cookiejar.save()
350 def trouble(self, message=None, tb=None):
351 """Determine action to take when a download problem appears.
353 Depending on if the downloader has been configured to ignore
354 download errors or not, this method may throw an exception or
355 not when errors are found, after printing the message.
357 tb, if given, is additional traceback information.
359 if message is not None:
360 self.to_stderr(message)
361 if self.params.get('verbose'):
363 if sys.exc_info()[0]: # if .trouble has been called from an except block
365 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
366 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
367 tb += compat_str(traceback.format_exc())
369 tb_data = traceback.format_list(traceback.extract_stack())
370 tb = ''.join(tb_data)
372 if not self.params.get('ignoreerrors', False):
373 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
374 exc_info = sys.exc_info()[1].exc_info
376 exc_info = sys.exc_info()
377 raise DownloadError(message, exc_info)
378 self._download_retcode = 1
380 def report_warning(self, message):
382 Print the message to stderr, it will be prefixed with 'WARNING:'
383 If stderr is a tty file the 'WARNING:' will be colored
385 if self.params.get('logger') is not None:
386 self.params['logger'].warning(message)
388 if self.params.get('no_warnings'):
390 if self._err_file.isatty() and os.name != 'nt':
391 _msg_header = '\033[0;33mWARNING:\033[0m'
393 _msg_header = 'WARNING:'
394 warning_message = '%s %s' % (_msg_header, message)
395 self.to_stderr(warning_message)
397 def report_error(self, message, tb=None):
399 Do the same as trouble, but prefixes the message with 'ERROR:', colored
400 in red if stderr is a tty file.
402 if self._err_file.isatty() and os.name != 'nt':
403 _msg_header = '\033[0;31mERROR:\033[0m'
405 _msg_header = 'ERROR:'
406 error_message = '%s %s' % (_msg_header, message)
407 self.trouble(error_message, tb)
409 def report_file_already_downloaded(self, file_name):
410 """Report file has already been fully downloaded."""
412 self.to_screen('[download] %s has already been downloaded' % file_name)
413 except UnicodeEncodeError:
414 self.to_screen('[download] The file has already been downloaded')
416 def prepare_filename(self, info_dict):
417 """Generate the output filename."""
419 template_dict = dict(info_dict)
421 template_dict['epoch'] = int(time.time())
422 autonumber_size = self.params.get('autonumber_size')
423 if autonumber_size is None:
425 autonumber_templ = '%0' + str(autonumber_size) + 'd'
426 template_dict['autonumber'] = autonumber_templ % self._num_downloads
427 if template_dict.get('playlist_index') is not None:
428 template_dict['playlist_index'] = '%0*d' % (len(str(template_dict['n_entries'])), template_dict['playlist_index'])
429 if template_dict.get('resolution') is None:
430 if template_dict.get('width') and template_dict.get('height'):
431 template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
432 elif template_dict.get('height'):
433 template_dict['resolution'] = '%sp' % template_dict['height']
434 elif template_dict.get('width'):
435 template_dict['resolution'] = '?x%d' % template_dict['width']
437 sanitize = lambda k, v: sanitize_filename(
439 restricted=self.params.get('restrictfilenames'),
441 template_dict = dict((k, sanitize(k, v))
442 for k, v in template_dict.items()
444 template_dict = collections.defaultdict(lambda: 'NA', template_dict)
446 outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
447 tmpl = os.path.expanduser(outtmpl)
448 filename = tmpl % template_dict
450 except ValueError as err:
451 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
454 def _match_entry(self, info_dict):
455 """ Returns None iff the file should be downloaded """
457 video_title = info_dict.get('title', info_dict.get('id', 'video'))
458 if 'title' in info_dict:
459 # This can happen when we're just evaluating the playlist
460 title = info_dict['title']
461 matchtitle = self.params.get('matchtitle', False)
463 if not re.search(matchtitle, title, re.IGNORECASE):
464 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
465 rejecttitle = self.params.get('rejecttitle', False)
467 if re.search(rejecttitle, title, re.IGNORECASE):
468 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
469 date = info_dict.get('upload_date', None)
471 dateRange = self.params.get('daterange', DateRange())
472 if date not in dateRange:
473 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
474 view_count = info_dict.get('view_count', None)
475 if view_count is not None:
476 min_views = self.params.get('min_views')
477 if min_views is not None and view_count < min_views:
478 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
479 max_views = self.params.get('max_views')
480 if max_views is not None and view_count > max_views:
481 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
482 age_limit = self.params.get('age_limit')
483 if age_limit is not None:
484 actual_age_limit = info_dict.get('age_limit')
485 if actual_age_limit is None:
487 if age_limit < actual_age_limit:
488 return 'Skipping "' + title + '" because it is age restricted'
489 if self.in_download_archive(info_dict):
490 return '%s has already been recorded in archive' % video_title
494 def add_extra_info(info_dict, extra_info):
495 '''Set the keys from extra_info in info dict if they are missing'''
496 for key, value in extra_info.items():
497 info_dict.setdefault(key, value)
499 def extract_info(self, url, download=True, ie_key=None, extra_info={},
502 Returns a list with a dictionary for each video we find.
503 If 'download', also downloads the videos.
504 extra_info is a dict containing the extra values to add to each result
508 ies = [self.get_info_extractor(ie_key)]
513 if not ie.suitable(url):
517 self.report_warning('The program functionality for this site has been marked as broken, '
518 'and will probably not work.')
521 ie_result = ie.extract(url)
522 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
524 if isinstance(ie_result, list):
525 # Backwards compatibility: old IE result format
527 '_type': 'compat_list',
528 'entries': ie_result,
530 self.add_default_extra_info(ie_result, ie, url)
532 return self.process_ie_result(ie_result, download, extra_info)
535 except ExtractorError as de: # An error we somewhat expected
536 self.report_error(compat_str(de), de.format_traceback())
538 except MaxDownloadsReached:
540 except Exception as e:
541 if self.params.get('ignoreerrors', False):
542 self.report_error(compat_str(e), tb=compat_str(traceback.format_exc()))
547 self.report_error('no suitable InfoExtractor for URL %s' % url)
549 def add_default_extra_info(self, ie_result, ie, url):
550 self.add_extra_info(ie_result, {
551 'extractor': ie.IE_NAME,
553 'webpage_url_basename': url_basename(url),
554 'extractor_key': ie.ie_key(),
557 def process_ie_result(self, ie_result, download=True, extra_info={}):
559 Take the result of the ie(may be modified) and resolve all unresolved
560 references (URLs, playlist items).
562 It will also download the videos if 'download'.
563 Returns the resolved ie_result.
566 result_type = ie_result.get('_type', 'video')
568 if self.params.get('extract_flat', False):
569 if result_type in ('url', 'url_transparent'):
572 if result_type == 'video':
573 self.add_extra_info(ie_result, extra_info)
574 return self.process_video_result(ie_result, download=download)
575 elif result_type == 'url':
576 # We have to add extra_info to the results because it may be
577 # contained in a playlist
578 return self.extract_info(ie_result['url'],
580 ie_key=ie_result.get('ie_key'),
581 extra_info=extra_info)
582 elif result_type == 'url_transparent':
583 # Use the information from the embedding page
584 info = self.extract_info(
585 ie_result['url'], ie_key=ie_result.get('ie_key'),
586 extra_info=extra_info, download=False, process=False)
588 def make_result(embedded_info):
589 new_result = ie_result.copy()
590 for f in ('_type', 'url', 'ext', 'player_url', 'formats',
591 'entries', 'ie_key', 'duration',
592 'subtitles', 'annotations', 'format',
593 'thumbnail', 'thumbnails'):
596 if f in embedded_info:
597 new_result[f] = embedded_info[f]
599 new_result = make_result(info)
601 assert new_result.get('_type') != 'url_transparent'
602 if new_result.get('_type') == 'compat_list':
603 new_result['entries'] = [
604 make_result(e) for e in new_result['entries']]
606 return self.process_ie_result(
607 new_result, download=download, extra_info=extra_info)
608 elif result_type == 'playlist':
609 # We process each entry in the playlist
610 playlist = ie_result.get('title', None) or ie_result.get('id', None)
611 self.to_screen('[download] Downloading playlist: %s' % playlist)
613 playlist_results = []
615 playliststart = self.params.get('playliststart', 1) - 1
616 playlistend = self.params.get('playlistend', None)
617 # For backwards compatibility, interpret -1 as whole list
618 if playlistend == -1:
621 if isinstance(ie_result['entries'], list):
622 n_all_entries = len(ie_result['entries'])
623 entries = ie_result['entries'][playliststart:playlistend]
624 n_entries = len(entries)
626 "[%s] playlist %s: Collected %d video ids (downloading %d of them)" %
627 (ie_result['extractor'], playlist, n_all_entries, n_entries))
629 assert isinstance(ie_result['entries'], PagedList)
630 entries = ie_result['entries'].getslice(
631 playliststart, playlistend)
632 n_entries = len(entries)
634 "[%s] playlist %s: Downloading %d videos" %
635 (ie_result['extractor'], playlist, n_entries))
637 for i, entry in enumerate(entries, 1):
638 self.to_screen('[download] Downloading video #%s of %s' % (i, n_entries))
640 'n_entries': n_entries,
641 'playlist': playlist,
642 'playlist_index': i + playliststart,
643 'extractor': ie_result['extractor'],
644 'webpage_url': ie_result['webpage_url'],
645 'webpage_url_basename': url_basename(ie_result['webpage_url']),
646 'extractor_key': ie_result['extractor_key'],
649 reason = self._match_entry(entry)
650 if reason is not None:
651 self.to_screen('[download] ' + reason)
654 entry_result = self.process_ie_result(entry,
657 playlist_results.append(entry_result)
658 ie_result['entries'] = playlist_results
660 elif result_type == 'compat_list':
662 self.add_extra_info(r,
664 'extractor': ie_result['extractor'],
665 'webpage_url': ie_result['webpage_url'],
666 'webpage_url_basename': url_basename(ie_result['webpage_url']),
667 'extractor_key': ie_result['extractor_key'],
670 ie_result['entries'] = [
671 self.process_ie_result(_fixup(r), download, extra_info)
672 for r in ie_result['entries']
676 raise Exception('Invalid result type: %s' % result_type)
678 def select_format(self, format_spec, available_formats):
679 if format_spec == 'best' or format_spec is None:
680 return available_formats[-1]
681 elif format_spec == 'worst':
682 return available_formats[0]
683 elif format_spec == 'bestaudio':
685 f for f in available_formats
686 if f.get('vcodec') == 'none']
688 return audio_formats[-1]
689 elif format_spec == 'worstaudio':
691 f for f in available_formats
692 if f.get('vcodec') == 'none']
694 return audio_formats[0]
695 elif format_spec == 'bestvideo':
697 f for f in available_formats
698 if f.get('acodec') == 'none']
700 return video_formats[-1]
701 elif format_spec == 'worstvideo':
703 f for f in available_formats
704 if f.get('acodec') == 'none']
706 return video_formats[0]
708 extensions = ['mp4', 'flv', 'webm', '3gp']
709 if format_spec in extensions:
710 filter_f = lambda f: f['ext'] == format_spec
712 filter_f = lambda f: f['format_id'] == format_spec
713 matches = list(filter(filter_f, available_formats))
718 def process_video_result(self, info_dict, download=True):
719 assert info_dict.get('_type', 'video') == 'video'
721 if 'id' not in info_dict:
722 raise ExtractorError('Missing "id" field in extractor result')
723 if 'title' not in info_dict:
724 raise ExtractorError('Missing "title" field in extractor result')
726 if 'playlist' not in info_dict:
727 # It isn't part of a playlist
728 info_dict['playlist'] = None
729 info_dict['playlist_index'] = None
731 thumbnails = info_dict.get('thumbnails')
733 thumbnails.sort(key=lambda t: (
734 t.get('width'), t.get('height'), t.get('url')))
736 if 'width' in t and 'height' in t:
737 t['resolution'] = '%dx%d' % (t['width'], t['height'])
739 if thumbnails and 'thumbnail' not in info_dict:
740 info_dict['thumbnail'] = thumbnails[-1]['url']
742 if 'display_id' not in info_dict and 'id' in info_dict:
743 info_dict['display_id'] = info_dict['id']
745 if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
746 upload_date = datetime.datetime.utcfromtimestamp(
747 info_dict['timestamp'])
748 info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
750 # This extractors handle format selection themselves
751 if info_dict['extractor'] in ['Youku']:
753 self.process_info(info_dict)
756 # We now pick which formats have to be downloaded
757 if info_dict.get('formats') is None:
758 # There's only one format available
759 formats = [info_dict]
761 formats = info_dict['formats']
764 raise ExtractorError('No video formats found!')
766 # We check that all the formats have the format and format_id fields
767 for i, format in enumerate(formats):
768 if 'url' not in format:
769 raise ExtractorError('Missing "url" key in result (index %d)' % i)
771 if format.get('format_id') is None:
772 format['format_id'] = compat_str(i)
773 if format.get('format') is None:
774 format['format'] = '{id} - {res}{note}'.format(
775 id=format['format_id'],
776 res=self.format_resolution(format),
777 note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
779 # Automatically determine file extension if missing
780 if 'ext' not in format:
781 format['ext'] = determine_ext(format['url']).lower()
783 format_limit = self.params.get('format_limit', None)
785 formats = list(takewhile_inclusive(
786 lambda f: f['format_id'] != format_limit, formats
789 # TODO Central sorting goes here
791 if formats[0] is not info_dict:
792 # only set the 'formats' fields if the original info_dict list them
793 # otherwise we end up with a circular reference, the first (and unique)
794 # element in the 'formats' field in info_dict is info_dict itself,
795 # wich can't be exported to json
796 info_dict['formats'] = formats
797 if self.params.get('listformats', None):
798 self.list_formats(info_dict)
801 req_format = self.params.get('format')
802 if req_format is None:
804 formats_to_download = []
805 # The -1 is for supporting YoutubeIE
806 if req_format in ('-1', 'all'):
807 formats_to_download = formats
809 # We can accept formats requested in the format: 34/5/best, we pick
810 # the first that is available, starting from left
811 req_formats = req_format.split('/')
812 for rf in req_formats:
813 if re.match(r'.+?\+.+?', rf) is not None:
814 # Two formats have been requested like '137+139'
815 format_1, format_2 = rf.split('+')
816 formats_info = (self.select_format(format_1, formats),
817 self.select_format(format_2, formats))
818 if all(formats_info):
820 'requested_formats': formats_info,
822 'ext': formats_info[0]['ext'],
825 selected_format = None
827 selected_format = self.select_format(rf, formats)
828 if selected_format is not None:
829 formats_to_download = [selected_format]
831 if not formats_to_download:
832 raise ExtractorError('requested format not available',
836 if len(formats_to_download) > 1:
837 self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
838 for format in formats_to_download:
839 new_info = dict(info_dict)
840 new_info.update(format)
841 self.process_info(new_info)
842 # We update the info dict with the best quality format (backwards compatibility)
843 info_dict.update(formats_to_download[-1])
846 def process_info(self, info_dict):
847 """Process a single resolved IE result."""
849 assert info_dict.get('_type', 'video') == 'video'
851 max_downloads = self.params.get('max_downloads')
852 if max_downloads is not None:
853 if self._num_downloads >= int(max_downloads):
854 raise MaxDownloadsReached()
856 info_dict['fulltitle'] = info_dict['title']
857 if len(info_dict['title']) > 200:
858 info_dict['title'] = info_dict['title'][:197] + '...'
860 # Keep for backwards compatibility
861 info_dict['stitle'] = info_dict['title']
863 if 'format' not in info_dict:
864 info_dict['format'] = info_dict['ext']
866 reason = self._match_entry(info_dict)
867 if reason is not None:
868 self.to_screen('[download] ' + reason)
871 self._num_downloads += 1
873 filename = self.prepare_filename(info_dict)
876 if self.params.get('forcetitle', False):
877 self.to_stdout(info_dict['fulltitle'])
878 if self.params.get('forceid', False):
879 self.to_stdout(info_dict['id'])
880 if self.params.get('forceurl', False):
881 # For RTMP URLs, also include the playpath
882 self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
883 if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
884 self.to_stdout(info_dict['thumbnail'])
885 if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
886 self.to_stdout(info_dict['description'])
887 if self.params.get('forcefilename', False) and filename is not None:
888 self.to_stdout(filename)
889 if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
890 self.to_stdout(formatSeconds(info_dict['duration']))
891 if self.params.get('forceformat', False):
892 self.to_stdout(info_dict['format'])
893 if self.params.get('forcejson', False):
894 info_dict['_filename'] = filename
895 self.to_stdout(json.dumps(info_dict))
897 # Do nothing else if in simulate mode
898 if self.params.get('simulate', False):
905 dn = os.path.dirname(encodeFilename(filename))
906 if dn and not os.path.exists(dn):
908 except (OSError, IOError) as err:
909 self.report_error('unable to create directory ' + compat_str(err))
912 if self.params.get('writedescription', False):
913 descfn = filename + '.description'
914 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
915 self.to_screen('[info] Video description is already present')
918 self.to_screen('[info] Writing video description to: ' + descfn)
919 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
920 descfile.write(info_dict['description'])
921 except (KeyError, TypeError):
922 self.report_warning('There\'s no description to write.')
923 except (OSError, IOError):
924 self.report_error('Cannot write description file ' + descfn)
927 if self.params.get('writeannotations', False):
928 annofn = filename + '.annotations.xml'
929 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
930 self.to_screen('[info] Video annotations are already present')
933 self.to_screen('[info] Writing video annotations to: ' + annofn)
934 with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
935 annofile.write(info_dict['annotations'])
936 except (KeyError, TypeError):
937 self.report_warning('There are no annotations to write.')
938 except (OSError, IOError):
939 self.report_error('Cannot write annotations file: ' + annofn)
942 subtitles_are_requested = any([self.params.get('writesubtitles', False),
943 self.params.get('writeautomaticsub')])
945 if subtitles_are_requested and 'subtitles' in info_dict and info_dict['subtitles']:
946 # subtitles download errors are already managed as troubles in relevant IE
947 # that way it will silently go on when used with unsupporting IE
948 subtitles = info_dict['subtitles']
949 sub_format = self.params.get('subtitlesformat', 'srt')
950 for sub_lang in subtitles.keys():
951 sub = subtitles[sub_lang]
955 sub_filename = subtitles_filename(filename, sub_lang, sub_format)
956 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
957 self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format))
959 self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
960 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
962 except (OSError, IOError):
963 self.report_error('Cannot write subtitles file ' + sub_filename)
966 if self.params.get('writeinfojson', False):
967 infofn = os.path.splitext(filename)[0] + '.info.json'
968 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
969 self.to_screen('[info] Video description metadata is already present')
971 self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
973 write_json_file(info_dict, encodeFilename(infofn))
974 except (OSError, IOError):
975 self.report_error('Cannot write metadata to JSON file ' + infofn)
978 if self.params.get('writethumbnail', False):
979 if info_dict.get('thumbnail') is not None:
980 thumb_format = determine_ext(info_dict['thumbnail'], 'jpg')
981 thumb_filename = os.path.splitext(filename)[0] + '.' + thumb_format
982 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
983 self.to_screen('[%s] %s: Thumbnail is already present' %
984 (info_dict['extractor'], info_dict['id']))
986 self.to_screen('[%s] %s: Downloading thumbnail ...' %
987 (info_dict['extractor'], info_dict['id']))
989 uf = self.urlopen(info_dict['thumbnail'])
990 with open(thumb_filename, 'wb') as thumbf:
991 shutil.copyfileobj(uf, thumbf)
992 self.to_screen('[%s] %s: Writing thumbnail to: %s' %
993 (info_dict['extractor'], info_dict['id'], thumb_filename))
994 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
995 self.report_warning('Unable to download thumbnail "%s": %s' %
996 (info_dict['thumbnail'], compat_str(err)))
998 if not self.params.get('skip_download', False):
999 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
1004 fd = get_suitable_downloader(info)(self, self.params)
1005 for ph in self._progress_hooks:
1006 fd.add_progress_hook(ph)
1007 if self.params.get('verbose'):
1008 self.to_stdout('[debug] Invoking downloader on %r' % info.get('url'))
1009 return fd.download(name, info)
1010 if info_dict.get('requested_formats') is not None:
1013 merger = FFmpegMergerPP(self, not self.params.get('keepvideo'))
1014 if not merger._get_executable():
1016 self.report_warning('You have requested multiple '
1017 'formats but ffmpeg or avconv are not installed.'
1018 ' The formats won\'t be merged')
1020 postprocessors = [merger]
1021 for f in info_dict['requested_formats']:
1022 new_info = dict(info_dict)
1024 fname = self.prepare_filename(new_info)
1025 fname = prepend_extension(fname, 'f%s' % f['format_id'])
1026 downloaded.append(fname)
1027 partial_success = dl(fname, new_info)
1028 success = success and partial_success
1029 info_dict['__postprocessors'] = postprocessors
1030 info_dict['__files_to_merge'] = downloaded
1032 # Just a single file
1033 success = dl(filename, info_dict)
1034 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
1035 self.report_error('unable to download video data: %s' % str(err))
1037 except (OSError, IOError) as err:
1038 raise UnavailableVideoError(err)
1039 except (ContentTooShortError, ) as err:
1040 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
1045 self.post_process(filename, info_dict)
1046 except (PostProcessingError) as err:
1047 self.report_error('postprocessing: %s' % str(err))
1050 self.record_download_archive(info_dict)
1052 def download(self, url_list):
1053 """Download a given list of URLs."""
1054 outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
1055 if (len(url_list) > 1 and
1057 and self.params.get('max_downloads') != 1):
1058 raise SameFileError(outtmpl)
1060 for url in url_list:
1062 #It also downloads the videos
1063 self.extract_info(url)
1064 except UnavailableVideoError:
1065 self.report_error('unable to download video')
1066 except MaxDownloadsReached:
1067 self.to_screen('[info] Maximum number of downloaded files reached.')
1070 return self._download_retcode
1072 def download_with_info_file(self, info_filename):
1073 with io.open(info_filename, 'r', encoding='utf-8') as f:
1076 self.process_ie_result(info, download=True)
1077 except DownloadError:
1078 webpage_url = info.get('webpage_url')
1079 if webpage_url is not None:
1080 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
1081 return self.download([webpage_url])
1084 return self._download_retcode
1086 def post_process(self, filename, ie_info):
1087 """Run all the postprocessors on the given file."""
1088 info = dict(ie_info)
1089 info['filepath'] = filename
1092 if ie_info.get('__postprocessors') is not None:
1093 pps_chain.extend(ie_info['__postprocessors'])
1094 pps_chain.extend(self._pps)
1095 for pp in pps_chain:
1097 keep_video_wish, new_info = pp.run(info)
1098 if keep_video_wish is not None:
1100 keep_video = keep_video_wish
1101 elif keep_video is None:
1102 # No clear decision yet, let IE decide
1103 keep_video = keep_video_wish
1104 except PostProcessingError as e:
1105 self.report_error(e.msg)
1106 if keep_video is False and not self.params.get('keepvideo', False):
1108 self.to_screen('Deleting original file %s (pass -k to keep)' % filename)
1109 os.remove(encodeFilename(filename))
1110 except (IOError, OSError):
1111 self.report_warning('Unable to remove downloaded video file')
1113 def _make_archive_id(self, info_dict):
1114 # Future-proof against any change in case
1115 # and backwards compatibility with prior versions
1116 extractor = info_dict.get('extractor_key')
1117 if extractor is None:
1118 if 'id' in info_dict:
1119 extractor = info_dict.get('ie_key') # key in a playlist
1120 if extractor is None:
1121 return None # Incomplete video information
1122 return extractor.lower() + ' ' + info_dict['id']
1124 def in_download_archive(self, info_dict):
1125 fn = self.params.get('download_archive')
1129 vid_id = self._make_archive_id(info_dict)
1131 return False # Incomplete video information
1134 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
1135 for line in archive_file:
1136 if line.strip() == vid_id:
1138 except IOError as ioe:
1139 if ioe.errno != errno.ENOENT:
1143 def record_download_archive(self, info_dict):
1144 fn = self.params.get('download_archive')
1147 vid_id = self._make_archive_id(info_dict)
1149 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
1150 archive_file.write(vid_id + '\n')
1153 def format_resolution(format, default='unknown'):
1154 if format.get('vcodec') == 'none':
1156 if format.get('resolution') is not None:
1157 return format['resolution']
1158 if format.get('height') is not None:
1159 if format.get('width') is not None:
1160 res = '%sx%s' % (format['width'], format['height'])
1162 res = '%sp' % format['height']
1163 elif format.get('width') is not None:
1164 res = '?x%d' % format['width']
1169 def _format_note(self, fdict):
1171 if fdict.get('ext') in ['f4f', 'f4m']:
1172 res += '(unsupported) '
1173 if fdict.get('format_note') is not None:
1174 res += fdict['format_note'] + ' '
1175 if fdict.get('tbr') is not None:
1176 res += '%4dk ' % fdict['tbr']
1177 if fdict.get('container') is not None:
1180 res += '%s container' % fdict['container']
1181 if (fdict.get('vcodec') is not None and
1182 fdict.get('vcodec') != 'none'):
1185 res += fdict['vcodec']
1186 if fdict.get('vbr') is not None:
1188 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
1190 if fdict.get('vbr') is not None:
1191 res += '%4dk' % fdict['vbr']
1192 if fdict.get('acodec') is not None:
1195 if fdict['acodec'] == 'none':
1198 res += '%-5s' % fdict['acodec']
1199 elif fdict.get('abr') is not None:
1203 if fdict.get('abr') is not None:
1204 res += '@%3dk' % fdict['abr']
1205 if fdict.get('asr') is not None:
1206 res += ' (%5dHz)' % fdict['asr']
1207 if fdict.get('filesize') is not None:
1210 res += format_bytes(fdict['filesize'])
1211 elif fdict.get('filesize_approx') is not None:
1214 res += '~' + format_bytes(fdict['filesize_approx'])
1217 def list_formats(self, info_dict):
1218 def line(format, idlen=20):
1219 return (('%-' + compat_str(idlen + 1) + 's%-10s%-12s%s') % (
1220 format['format_id'],
1222 self.format_resolution(format),
1223 self._format_note(format),
1226 formats = info_dict.get('formats', [info_dict])
1227 idlen = max(len('format code'),
1228 max(len(f['format_id']) for f in formats))
1229 formats_s = [line(f, idlen) for f in formats]
1230 if len(formats) > 1:
1231 formats_s[0] += (' ' if self._format_note(formats[0]) else '') + '(worst)'
1232 formats_s[-1] += (' ' if self._format_note(formats[-1]) else '') + '(best)'
1234 header_line = line({
1235 'format_id': 'format code', 'ext': 'extension',
1236 'resolution': 'resolution', 'format_note': 'note'}, idlen=idlen)
1237 self.to_screen('[info] Available formats for %s:\n%s\n%s' %
1238 (info_dict['id'], header_line, '\n'.join(formats_s)))
1240 def urlopen(self, req):
1241 """ Start an HTTP download """
1242 return self._opener.open(req, timeout=self._socket_timeout)
1244 def print_debug_header(self):
1245 if not self.params.get('verbose'):
1248 if type('') is not compat_str:
1249 # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326)
1250 self.report_warning(
1251 'Your Python is broken! Update to a newer and supported version')
1254 '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
1255 locale.getpreferredencoding(),
1256 sys.getfilesystemencoding(),
1257 sys.stdout.encoding,
1258 self.get_encoding()))
1259 write_string(encoding_str, encoding=None)
1261 self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
1263 sp = subprocess.Popen(
1264 ['git', 'rev-parse', '--short', 'HEAD'],
1265 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
1266 cwd=os.path.dirname(os.path.abspath(__file__)))
1267 out, err = sp.communicate()
1268 out = out.decode().strip()
1269 if re.match('[0-9a-f]+', out):
1270 self._write_string('[debug] Git HEAD: ' + out + '\n')
1276 self._write_string('[debug] Python version %s - %s' %
1277 (platform.python_version(), platform_name()) + '\n')
1280 for handler in self._opener.handlers:
1281 if hasattr(handler, 'proxies'):
1282 proxy_map.update(handler.proxies)
1283 self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
1285 def _setup_opener(self):
1286 timeout_val = self.params.get('socket_timeout')
1287 self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
1289 opts_cookiefile = self.params.get('cookiefile')
1290 opts_proxy = self.params.get('proxy')
1292 if opts_cookiefile is None:
1293 self.cookiejar = compat_cookiejar.CookieJar()
1295 self.cookiejar = compat_cookiejar.MozillaCookieJar(
1297 if os.access(opts_cookiefile, os.R_OK):
1298 self.cookiejar.load()
1300 cookie_processor = compat_urllib_request.HTTPCookieProcessor(
1302 if opts_proxy is not None:
1303 if opts_proxy == '':
1306 proxies = {'http': opts_proxy, 'https': opts_proxy}
1308 proxies = compat_urllib_request.getproxies()
1309 # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
1310 if 'http' in proxies and 'https' not in proxies:
1311 proxies['https'] = proxies['http']
1312 proxy_handler = compat_urllib_request.ProxyHandler(proxies)
1314 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
1315 https_handler = make_HTTPS_handler(
1316 self.params.get('nocheckcertificate', False), debuglevel=debuglevel)
1317 ydlh = YoutubeDLHandler(debuglevel=debuglevel)
1318 opener = compat_urllib_request.build_opener(
1319 https_handler, proxy_handler, cookie_processor, ydlh)
1320 # Delete the default user-agent header, which would otherwise apply in
1321 # cases where our custom HTTP handler doesn't come into play
1322 # (See https://github.com/rg3/youtube-dl/issues/1309 for details)
1323 opener.addheaders = []
1324 self._opener = opener
1326 def encode(self, s):
1327 if isinstance(s, bytes):
1328 return s # Already encoded
1331 return s.encode(self.get_encoding())
1332 except UnicodeEncodeError as err:
1333 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
1336 def get_encoding(self):
1337 encoding = self.params.get('encoding')
1338 if encoding is None:
1339 encoding = preferredencoding()