2 # -*- coding: utf-8 -*-
4 from __future__ import absolute_import
28 compat_urllib_request,
49 UnavailableVideoError,
55 from .extractor import get_info_extractor, gen_extractors
56 from .FileDownloader import FileDownloader
57 from .version import __version__
60 class YoutubeDL(object):
63 YoutubeDL objects are the ones responsible of downloading the
64 actual video file and writing it to disk if the user has requested
65 it, among some other tasks. In most cases there should be one per
66 program. As, given a video URL, the downloader doesn't know how to
67 extract all the needed information, task that InfoExtractors do, it
68 has to pass the URL to one of them.
70 For this, YoutubeDL objects have a method that allows
71 InfoExtractors to be registered in a given order. When it is passed
72 a URL, the YoutubeDL object handles it to the first InfoExtractor it
73 finds that reports being able to handle it. The InfoExtractor extracts
74 all the information about the video or videos the URL refers to, and
75 YoutubeDL process the extracted information, possibly using a File
76 Downloader to download the video.
78 YoutubeDL objects accept a lot of parameters. In order not to saturate
79 the object constructor with arguments, it receives a dictionary of
80 options instead. These options are available through the params
81 attribute for the InfoExtractors to use. The YoutubeDL also
82 registers itself as the downloader in charge for the InfoExtractors
83 that are added to it, so this is a "mutual registration".
87 username: Username for authentication purposes.
88 password: Password for authentication purposes.
89 videopassword: Password for acces a video.
90 usenetrc: Use netrc for authentication instead.
91 verbose: Print additional info to stdout.
92 quiet: Do not print messages to stdout.
93 forceurl: Force printing final URL.
94 forcetitle: Force printing title.
95 forceid: Force printing ID.
96 forcethumbnail: Force printing thumbnail URL.
97 forcedescription: Force printing description.
98 forcefilename: Force printing final filename.
99 forceduration: Force printing duration.
100 forcejson: Force printing info_dict as JSON.
101 simulate: Do not download the video files.
102 format: Video format code.
103 format_limit: Highest quality format to try.
104 outtmpl: Template for output names.
105 restrictfilenames: Do not allow "&" and spaces in file names
106 ignoreerrors: Do not stop on download errors.
107 nooverwrites: Prevent overwriting files.
108 playliststart: Playlist item to start at.
109 playlistend: Playlist item to end at.
110 matchtitle: Download only matching titles.
111 rejecttitle: Reject downloads for matching titles.
112 logger: Log messages to a logging.Logger instance.
113 logtostderr: Log messages to stderr instead of stdout.
114 writedescription: Write the video description to a .description file
115 writeinfojson: Write the video description to a .info.json file
116 writeannotations: Write the video annotations to a .annotations.xml file
117 writethumbnail: Write the thumbnail image to a file
118 writesubtitles: Write the video subtitles to a file
119 writeautomaticsub: Write the automatic subtitles to a file
120 allsubtitles: Downloads all the subtitles of the video
121 (requires writesubtitles or writeautomaticsub)
122 listsubtitles: Lists all available subtitles for the video
123 subtitlesformat: Subtitle format [srt/sbv/vtt] (default=srt)
124 subtitleslangs: List of languages of the subtitles to download
125 keepvideo: Keep the video file after post-processing
126 daterange: A DateRange object, download only if the upload_date is in the range.
127 skip_download: Skip the actual download of the video file
128 cachedir: Location of the cache files in the filesystem.
129 None to disable filesystem cache.
130 noplaylist: Download single video instead of a playlist if in doubt.
131 age_limit: An integer representing the user's age in years.
132 Unsuitable videos for the given age are skipped.
133 min_views: An integer representing the minimum view count the video
134 must have in order to not be skipped.
135 Videos without view count information are always
136 downloaded. None for no limit.
137 max_views: An integer representing the maximum view count.
138 Videos that are more popular than that are not
140 Videos without view count information are always
141 downloaded. None for no limit.
142 download_archive: File name of a file where all downloads are recorded.
143 Videos already present in the file are not downloaded
145 cookiefile: File name where cookies should be read from and dumped to.
146 nocheckcertificate:Do not verify SSL certificates
147 proxy: URL of the proxy server to use
148 socket_timeout: Time to wait for unresponsive hosts, in seconds
149 bidi_workaround: Work around buggy terminals without bidirectional text
150 support, using fridibi
152 The following parameters are not used by YoutubeDL itself, they are used by
154 nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
155 noresizebuffer, retries, continuedl, noprogress, consoletitle
161 _download_retcode = None
162 _num_downloads = None
165 def __init__(self, params=None):
166 """Create a FileDownloader object with the given options."""
168 self._ies_instances = {}
170 self._progress_hooks = []
171 self._download_retcode = 0
172 self._num_downloads = 0
173 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
174 self._err_file = sys.stderr
175 self.params = {} if params is None else params
177 if params.get('bidi_workaround', False):
180 master, slave = pty.openpty()
181 width = get_term_width()
185 width_args = ['-w', str(width)]
186 self._fribidi = subprocess.Popen(
187 ['fribidi', '-c', 'UTF-8'] + width_args,
188 stdin=subprocess.PIPE,
190 stderr=self._err_file)
191 self._fribidi_channel = os.fdopen(master, 'rb')
192 except OSError as ose:
194 self.report_warning(u'Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
198 if (sys.version_info >= (3,) and sys.platform != 'win32' and
199 sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
200 and not params['restrictfilenames']):
201 # On Python 3, the Unicode filesystem API will throw errors (#1474)
203 u'Assuming --restrict-filenames since file system encoding '
204 u'cannot encode all charactes. '
205 u'Set the LC_ALL environment variable to fix this.')
206 self.params['restrictfilenames'] = True
208 self.fd = FileDownloader(self, self.params)
210 if '%(stitle)s' in self.params.get('outtmpl', ''):
211 self.report_warning(u'%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
215 def add_info_extractor(self, ie):
216 """Add an InfoExtractor object to the end of the list."""
218 self._ies_instances[ie.ie_key()] = ie
219 ie.set_downloader(self)
221 def get_info_extractor(self, ie_key):
223 Get an instance of an IE with name ie_key, it will try to get one from
224 the _ies list, if there's no instance it will create a new one and add
225 it to the extractor list.
227 ie = self._ies_instances.get(ie_key)
229 ie = get_info_extractor(ie_key)()
230 self.add_info_extractor(ie)
233 def add_default_info_extractors(self):
235 Add the InfoExtractors returned by gen_extractors to the end of the list
237 for ie in gen_extractors():
238 self.add_info_extractor(ie)
240 def add_post_processor(self, pp):
241 """Add a PostProcessor object to the end of the chain."""
243 pp.set_downloader(self)
245 def _bidi_workaround(self, message):
246 if not hasattr(self, '_fribidi_channel'):
249 assert type(message) == type(u'')
250 line_count = message.count(u'\n') + 1
251 self._fribidi.stdin.write((message + u'\n').encode('utf-8'))
252 self._fribidi.stdin.flush()
253 res = u''.join(self._fribidi_channel.readline().decode('utf-8')
254 for _ in range(line_count))
255 return res[:-len(u'\n')]
257 def to_screen(self, message, skip_eol=False):
258 """Print message to stdout if not in quiet mode."""
259 return self.to_stdout(message, skip_eol, check_quiet=True)
261 def to_stdout(self, message, skip_eol=False, check_quiet=False):
262 """Print message to stdout if not in quiet mode."""
263 if self.params.get('logger'):
264 self.params['logger'].debug(message)
265 elif not check_quiet or not self.params.get('quiet', False):
266 message = self._bidi_workaround(message)
267 terminator = [u'\n', u''][skip_eol]
268 output = message + terminator
270 write_string(output, self._screen_file)
272 def to_stderr(self, message):
273 """Print message to stderr."""
274 assert type(message) == type(u'')
275 if self.params.get('logger'):
276 self.params['logger'].error(message)
278 message = self._bidi_workaround(message)
279 output = message + u'\n'
280 write_string(output, self._err_file)
282 def to_console_title(self, message):
283 if not self.params.get('consoletitle', False):
285 if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
286 # c_wchar_p() might not be necessary if `message` is
287 # already of type unicode()
288 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
289 elif 'TERM' in os.environ:
290 write_string(u'\033]0;%s\007' % message, self._screen_file)
292 def save_console_title(self):
293 if not self.params.get('consoletitle', False):
295 if 'TERM' in os.environ:
296 # Save the title on stack
297 write_string(u'\033[22;0t', self._screen_file)
299 def restore_console_title(self):
300 if not self.params.get('consoletitle', False):
302 if 'TERM' in os.environ:
303 # Restore the title from stack
304 write_string(u'\033[23;0t', self._screen_file)
307 self.save_console_title()
310 def __exit__(self, *args):
311 self.restore_console_title()
313 if self.params.get('cookiefile') is not None:
314 self.cookiejar.save()
316 def trouble(self, message=None, tb=None):
317 """Determine action to take when a download problem appears.
319 Depending on if the downloader has been configured to ignore
320 download errors or not, this method may throw an exception or
321 not when errors are found, after printing the message.
323 tb, if given, is additional traceback information.
325 if message is not None:
326 self.to_stderr(message)
327 if self.params.get('verbose'):
329 if sys.exc_info()[0]: # if .trouble has been called from an except block
331 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
332 tb += u''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
333 tb += compat_str(traceback.format_exc())
335 tb_data = traceback.format_list(traceback.extract_stack())
336 tb = u''.join(tb_data)
338 if not self.params.get('ignoreerrors', False):
339 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
340 exc_info = sys.exc_info()[1].exc_info
342 exc_info = sys.exc_info()
343 raise DownloadError(message, exc_info)
344 self._download_retcode = 1
346 def report_warning(self, message):
348 Print the message to stderr, it will be prefixed with 'WARNING:'
349 If stderr is a tty file the 'WARNING:' will be colored
351 if self._err_file.isatty() and os.name != 'nt':
352 _msg_header = u'\033[0;33mWARNING:\033[0m'
354 _msg_header = u'WARNING:'
355 warning_message = u'%s %s' % (_msg_header, message)
356 self.to_stderr(warning_message)
358 def report_error(self, message, tb=None):
360 Do the same as trouble, but prefixes the message with 'ERROR:', colored
361 in red if stderr is a tty file.
363 if self._err_file.isatty() and os.name != 'nt':
364 _msg_header = u'\033[0;31mERROR:\033[0m'
366 _msg_header = u'ERROR:'
367 error_message = u'%s %s' % (_msg_header, message)
368 self.trouble(error_message, tb)
370 def report_file_already_downloaded(self, file_name):
371 """Report file has already been fully downloaded."""
373 self.to_screen(u'[download] %s has already been downloaded' % file_name)
374 except UnicodeEncodeError:
375 self.to_screen(u'[download] The file has already been downloaded')
377 def increment_downloads(self):
378 """Increment the ordinal that assigns a number to each file."""
379 self._num_downloads += 1
381 def prepare_filename(self, info_dict):
382 """Generate the output filename."""
384 template_dict = dict(info_dict)
386 template_dict['epoch'] = int(time.time())
387 autonumber_size = self.params.get('autonumber_size')
388 if autonumber_size is None:
390 autonumber_templ = u'%0' + str(autonumber_size) + u'd'
391 template_dict['autonumber'] = autonumber_templ % self._num_downloads
392 if template_dict.get('playlist_index') is not None:
393 template_dict['playlist_index'] = u'%05d' % template_dict['playlist_index']
395 sanitize = lambda k, v: sanitize_filename(
397 restricted=self.params.get('restrictfilenames'),
399 template_dict = dict((k, sanitize(k, v))
400 for k, v in template_dict.items()
402 template_dict = collections.defaultdict(lambda: u'NA', template_dict)
404 tmpl = os.path.expanduser(self.params['outtmpl'])
405 filename = tmpl % template_dict
407 except ValueError as err:
408 self.report_error(u'Error in output template: ' + str(err) + u' (encoding: ' + repr(preferredencoding()) + ')')
411 def _match_entry(self, info_dict):
412 """ Returns None iff the file should be downloaded """
414 video_title = info_dict.get('title', info_dict.get('id', u'video'))
415 if 'title' in info_dict:
416 # This can happen when we're just evaluating the playlist
417 title = info_dict['title']
418 matchtitle = self.params.get('matchtitle', False)
420 if not re.search(matchtitle, title, re.IGNORECASE):
421 return u'"' + title + '" title did not match pattern "' + matchtitle + '"'
422 rejecttitle = self.params.get('rejecttitle', False)
424 if re.search(rejecttitle, title, re.IGNORECASE):
425 return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
426 date = info_dict.get('upload_date', None)
428 dateRange = self.params.get('daterange', DateRange())
429 if date not in dateRange:
430 return u'%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
431 view_count = info_dict.get('view_count', None)
432 if view_count is not None:
433 min_views = self.params.get('min_views')
434 if min_views is not None and view_count < min_views:
435 return u'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
436 max_views = self.params.get('max_views')
437 if max_views is not None and view_count > max_views:
438 return u'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
439 age_limit = self.params.get('age_limit')
440 if age_limit is not None:
441 if age_limit < info_dict.get('age_limit', 0):
442 return u'Skipping "' + title + '" because it is age restricted'
443 if self.in_download_archive(info_dict):
444 return u'%s has already been recorded in archive' % video_title
448 def add_extra_info(info_dict, extra_info):
449 '''Set the keys from extra_info in info dict if they are missing'''
450 for key, value in extra_info.items():
451 info_dict.setdefault(key, value)
453 def extract_info(self, url, download=True, ie_key=None, extra_info={},
456 Returns a list with a dictionary for each video we find.
457 If 'download', also downloads the videos.
458 extra_info is a dict containing the extra values to add to each result
462 ies = [self.get_info_extractor(ie_key)]
467 if not ie.suitable(url):
471 self.report_warning(u'The program functionality for this site has been marked as broken, '
472 u'and will probably not work.')
475 ie_result = ie.extract(url)
476 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
478 if isinstance(ie_result, list):
479 # Backwards compatibility: old IE result format
481 '_type': 'compat_list',
482 'entries': ie_result,
484 self.add_extra_info(ie_result,
486 'extractor': ie.IE_NAME,
488 'webpage_url_basename': url_basename(url),
489 'extractor_key': ie.ie_key(),
492 return self.process_ie_result(ie_result, download, extra_info)
495 except ExtractorError as de: # An error we somewhat expected
496 self.report_error(compat_str(de), de.format_traceback())
498 except Exception as e:
499 if self.params.get('ignoreerrors', False):
500 self.report_error(compat_str(e), tb=compat_str(traceback.format_exc()))
505 self.report_error(u'no suitable InfoExtractor: %s' % url)
507 def process_ie_result(self, ie_result, download=True, extra_info={}):
509 Take the result of the ie(may be modified) and resolve all unresolved
510 references (URLs, playlist items).
512 It will also download the videos if 'download'.
513 Returns the resolved ie_result.
516 result_type = ie_result.get('_type', 'video') # If not given we suppose it's a video, support the default old system
517 if result_type == 'video':
518 self.add_extra_info(ie_result, extra_info)
519 return self.process_video_result(ie_result, download=download)
520 elif result_type == 'url':
521 # We have to add extra_info to the results because it may be
522 # contained in a playlist
523 return self.extract_info(ie_result['url'],
525 ie_key=ie_result.get('ie_key'),
526 extra_info=extra_info)
527 elif result_type == 'url_transparent':
528 # Use the information from the embedding page
529 info = self.extract_info(
530 ie_result['url'], ie_key=ie_result.get('ie_key'),
531 extra_info=extra_info, download=False, process=False)
533 def make_result(embedded_info):
534 new_result = ie_result.copy()
535 for f in ('_type', 'url', 'ext', 'player_url', 'formats',
536 'entries', 'urlhandle', 'ie_key', 'duration',
537 'subtitles', 'annotations', 'format',
538 'thumbnail', 'thumbnails'):
541 if f in embedded_info:
542 new_result[f] = embedded_info[f]
544 new_result = make_result(info)
546 assert new_result.get('_type') != 'url_transparent'
547 if new_result.get('_type') == 'compat_list':
548 new_result['entries'] = [
549 make_result(e) for e in new_result['entries']]
551 return self.process_ie_result(
552 new_result, download=download, extra_info=extra_info)
553 elif result_type == 'playlist':
554 # We process each entry in the playlist
555 playlist = ie_result.get('title', None) or ie_result.get('id', None)
556 self.to_screen(u'[download] Downloading playlist: %s' % playlist)
558 playlist_results = []
560 n_all_entries = len(ie_result['entries'])
561 playliststart = self.params.get('playliststart', 1) - 1
562 playlistend = self.params.get('playlistend', None)
563 # For backwards compatibility, interpret -1 as whole list
564 if playlistend == -1:
567 entries = ie_result['entries'][playliststart:playlistend]
568 n_entries = len(entries)
571 u"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" %
572 (ie_result['extractor'], playlist, n_all_entries, n_entries))
574 for i, entry in enumerate(entries, 1):
575 self.to_screen(u'[download] Downloading video #%s of %s' % (i, n_entries))
577 'playlist': playlist,
578 'playlist_index': i + playliststart,
579 'extractor': ie_result['extractor'],
580 'webpage_url': ie_result['webpage_url'],
581 'webpage_url_basename': url_basename(ie_result['webpage_url']),
582 'extractor_key': ie_result['extractor_key'],
585 reason = self._match_entry(entry)
586 if reason is not None:
587 self.to_screen(u'[download] ' + reason)
590 entry_result = self.process_ie_result(entry,
593 playlist_results.append(entry_result)
594 ie_result['entries'] = playlist_results
596 elif result_type == 'compat_list':
598 self.add_extra_info(r,
600 'extractor': ie_result['extractor'],
601 'webpage_url': ie_result['webpage_url'],
602 'webpage_url_basename': url_basename(ie_result['webpage_url']),
603 'extractor_key': ie_result['extractor_key'],
606 ie_result['entries'] = [
607 self.process_ie_result(_fixup(r), download, extra_info)
608 for r in ie_result['entries']
612 raise Exception('Invalid result type: %s' % result_type)
614 def select_format(self, format_spec, available_formats):
615 if format_spec == 'best' or format_spec is None:
616 return available_formats[-1]
617 elif format_spec == 'worst':
618 return available_formats[0]
620 extensions = [u'mp4', u'flv', u'webm', u'3gp']
621 if format_spec in extensions:
622 filter_f = lambda f: f['ext'] == format_spec
624 filter_f = lambda f: f['format_id'] == format_spec
625 matches = list(filter(filter_f, available_formats))
630 def process_video_result(self, info_dict, download=True):
631 assert info_dict.get('_type', 'video') == 'video'
633 if 'playlist' not in info_dict:
634 # It isn't part of a playlist
635 info_dict['playlist'] = None
636 info_dict['playlist_index'] = None
638 # This extractors handle format selection themselves
639 if info_dict['extractor'] in [u'Youku']:
641 self.process_info(info_dict)
644 # We now pick which formats have to be downloaded
645 if info_dict.get('formats') is None:
646 # There's only one format available
647 formats = [info_dict]
649 formats = info_dict['formats']
651 # We check that all the formats have the format and format_id fields
652 for (i, format) in enumerate(formats):
653 if format.get('format_id') is None:
654 format['format_id'] = compat_str(i)
655 if format.get('format') is None:
656 format['format'] = u'{id} - {res}{note}'.format(
657 id=format['format_id'],
658 res=self.format_resolution(format),
659 note=u' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
661 # Automatically determine file extension if missing
662 if 'ext' not in format:
663 format['ext'] = determine_ext(format['url'])
665 format_limit = self.params.get('format_limit', None)
667 formats = list(takewhile_inclusive(
668 lambda f: f['format_id'] != format_limit, formats
670 if self.params.get('prefer_free_formats'):
671 def _free_formats_key(f):
673 ext_ord = [u'flv', u'mp4', u'webm'].index(f['ext'])
676 # We only compare the extension if they have the same height and width
677 return (f.get('height'), f.get('width'), ext_ord)
678 formats = sorted(formats, key=_free_formats_key)
680 info_dict['formats'] = formats
681 if self.params.get('listformats', None):
682 self.list_formats(info_dict)
685 req_format = self.params.get('format', 'best')
686 if req_format is None:
688 formats_to_download = []
689 # The -1 is for supporting YoutubeIE
690 if req_format in ('-1', 'all'):
691 formats_to_download = formats
693 # We can accept formats requestd in the format: 34/5/best, we pick
694 # the first that is available, starting from left
695 req_formats = req_format.split('/')
696 for rf in req_formats:
697 selected_format = self.select_format(rf, formats)
698 if selected_format is not None:
699 formats_to_download = [selected_format]
701 if not formats_to_download:
702 raise ExtractorError(u'requested format not available',
706 if len(formats_to_download) > 1:
707 self.to_screen(u'[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
708 for format in formats_to_download:
709 new_info = dict(info_dict)
710 new_info.update(format)
711 self.process_info(new_info)
712 # We update the info dict with the best quality format (backwards compatibility)
713 info_dict.update(formats_to_download[-1])
716 def process_info(self, info_dict):
717 """Process a single resolved IE result."""
719 assert info_dict.get('_type', 'video') == 'video'
720 #We increment the download the download count here to match the previous behaviour.
721 self.increment_downloads()
723 info_dict['fulltitle'] = info_dict['title']
724 if len(info_dict['title']) > 200:
725 info_dict['title'] = info_dict['title'][:197] + u'...'
727 # Keep for backwards compatibility
728 info_dict['stitle'] = info_dict['title']
730 if not 'format' in info_dict:
731 info_dict['format'] = info_dict['ext']
733 reason = self._match_entry(info_dict)
734 if reason is not None:
735 self.to_screen(u'[download] ' + reason)
738 max_downloads = self.params.get('max_downloads')
739 if max_downloads is not None:
740 if self._num_downloads > int(max_downloads):
741 raise MaxDownloadsReached()
743 filename = self.prepare_filename(info_dict)
746 if self.params.get('forcetitle', False):
747 self.to_stdout(info_dict['fulltitle'])
748 if self.params.get('forceid', False):
749 self.to_stdout(info_dict['id'])
750 if self.params.get('forceurl', False):
751 # For RTMP URLs, also include the playpath
752 self.to_stdout(info_dict['url'] + info_dict.get('play_path', u''))
753 if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
754 self.to_stdout(info_dict['thumbnail'])
755 if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
756 self.to_stdout(info_dict['description'])
757 if self.params.get('forcefilename', False) and filename is not None:
758 self.to_stdout(filename)
759 if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
760 self.to_stdout(formatSeconds(info_dict['duration']))
761 if self.params.get('forceformat', False):
762 self.to_stdout(info_dict['format'])
763 if self.params.get('forcejson', False):
764 info_dict['_filename'] = filename
765 self.to_stdout(json.dumps(info_dict))
767 # Do nothing else if in simulate mode
768 if self.params.get('simulate', False):
775 dn = os.path.dirname(encodeFilename(filename))
776 if dn != '' and not os.path.exists(dn):
778 except (OSError, IOError) as err:
779 self.report_error(u'unable to create directory ' + compat_str(err))
782 if self.params.get('writedescription', False):
783 descfn = filename + u'.description'
784 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
785 self.to_screen(u'[info] Video description is already present')
788 self.to_screen(u'[info] Writing video description to: ' + descfn)
789 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
790 descfile.write(info_dict['description'])
791 except (KeyError, TypeError):
792 self.report_warning(u'There\'s no description to write.')
793 except (OSError, IOError):
794 self.report_error(u'Cannot write description file ' + descfn)
797 if self.params.get('writeannotations', False):
798 annofn = filename + u'.annotations.xml'
799 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
800 self.to_screen(u'[info] Video annotations are already present')
803 self.to_screen(u'[info] Writing video annotations to: ' + annofn)
804 with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
805 annofile.write(info_dict['annotations'])
806 except (KeyError, TypeError):
807 self.report_warning(u'There are no annotations to write.')
808 except (OSError, IOError):
809 self.report_error(u'Cannot write annotations file: ' + annofn)
812 subtitles_are_requested = any([self.params.get('writesubtitles', False),
813 self.params.get('writeautomaticsub')])
815 if subtitles_are_requested and 'subtitles' in info_dict and info_dict['subtitles']:
816 # subtitles download errors are already managed as troubles in relevant IE
817 # that way it will silently go on when used with unsupporting IE
818 subtitles = info_dict['subtitles']
819 sub_format = self.params.get('subtitlesformat', 'srt')
820 for sub_lang in subtitles.keys():
821 sub = subtitles[sub_lang]
825 sub_filename = subtitles_filename(filename, sub_lang, sub_format)
826 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
827 self.to_screen(u'[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format))
829 self.to_screen(u'[info] Writing video subtitles to: ' + sub_filename)
830 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
832 except (OSError, IOError):
833 self.report_error(u'Cannot write subtitles file ' + descfn)
836 if self.params.get('writeinfojson', False):
837 infofn = os.path.splitext(filename)[0] + u'.info.json'
838 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
839 self.to_screen(u'[info] Video description metadata is already present')
841 self.to_screen(u'[info] Writing video description metadata as JSON to: ' + infofn)
843 json_info_dict = dict((k, v) for k, v in info_dict.items() if not k in ['urlhandle'])
844 write_json_file(json_info_dict, encodeFilename(infofn))
845 except (OSError, IOError):
846 self.report_error(u'Cannot write metadata to JSON file ' + infofn)
849 if self.params.get('writethumbnail', False):
850 if info_dict.get('thumbnail') is not None:
851 thumb_format = determine_ext(info_dict['thumbnail'], u'jpg')
852 thumb_filename = os.path.splitext(filename)[0] + u'.' + thumb_format
853 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
854 self.to_screen(u'[%s] %s: Thumbnail is already present' %
855 (info_dict['extractor'], info_dict['id']))
857 self.to_screen(u'[%s] %s: Downloading thumbnail ...' %
858 (info_dict['extractor'], info_dict['id']))
860 uf = compat_urllib_request.urlopen(info_dict['thumbnail'])
861 with open(thumb_filename, 'wb') as thumbf:
862 shutil.copyfileobj(uf, thumbf)
863 self.to_screen(u'[%s] %s: Writing thumbnail to: %s' %
864 (info_dict['extractor'], info_dict['id'], thumb_filename))
865 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
866 self.report_warning(u'Unable to download thumbnail "%s": %s' %
867 (info_dict['thumbnail'], compat_str(err)))
869 if not self.params.get('skip_download', False):
870 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
874 success = self.fd._do_download(filename, info_dict)
875 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
876 self.report_error(u'unable to download video data: %s' % str(err))
878 except (OSError, IOError) as err:
879 raise UnavailableVideoError(err)
880 except (ContentTooShortError, ) as err:
881 self.report_error(u'content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
886 self.post_process(filename, info_dict)
887 except (PostProcessingError) as err:
888 self.report_error(u'postprocessing: %s' % str(err))
891 self.record_download_archive(info_dict)
893 def download(self, url_list):
894 """Download a given list of URLs."""
895 if (len(url_list) > 1 and
896 '%' not in self.params['outtmpl']
897 and self.params.get('max_downloads') != 1):
898 raise SameFileError(self.params['outtmpl'])
902 #It also downloads the videos
903 self.extract_info(url)
904 except UnavailableVideoError:
905 self.report_error(u'unable to download video')
906 except MaxDownloadsReached:
907 self.to_screen(u'[info] Maximum number of downloaded files reached.')
910 return self._download_retcode
912 def download_with_info_file(self, info_filename):
913 with io.open(info_filename, 'r', encoding='utf-8') as f:
916 self.process_ie_result(info, download=True)
917 except DownloadError:
918 webpage_url = info.get('webpage_url')
919 if webpage_url is not None:
920 self.report_warning(u'The info failed to download, trying with "%s"' % webpage_url)
921 return self.download([webpage_url])
924 return self._download_retcode
926 def post_process(self, filename, ie_info):
927 """Run all the postprocessors on the given file."""
929 info['filepath'] = filename
933 keep_video_wish, new_info = pp.run(info)
934 if keep_video_wish is not None:
936 keep_video = keep_video_wish
937 elif keep_video is None:
938 # No clear decision yet, let IE decide
939 keep_video = keep_video_wish
940 except PostProcessingError as e:
941 self.report_error(e.msg)
942 if keep_video is False and not self.params.get('keepvideo', False):
944 self.to_screen(u'Deleting original file %s (pass -k to keep)' % filename)
945 os.remove(encodeFilename(filename))
946 except (IOError, OSError):
947 self.report_warning(u'Unable to remove downloaded video file')
949 def _make_archive_id(self, info_dict):
950 # Future-proof against any change in case
951 # and backwards compatibility with prior versions
952 extractor = info_dict.get('extractor_key')
953 if extractor is None:
954 if 'id' in info_dict:
955 extractor = info_dict.get('ie_key') # key in a playlist
956 if extractor is None:
957 return None # Incomplete video information
958 return extractor.lower() + u' ' + info_dict['id']
960 def in_download_archive(self, info_dict):
961 fn = self.params.get('download_archive')
965 vid_id = self._make_archive_id(info_dict)
967 return False # Incomplete video information
970 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
971 for line in archive_file:
972 if line.strip() == vid_id:
974 except IOError as ioe:
975 if ioe.errno != errno.ENOENT:
979 def record_download_archive(self, info_dict):
980 fn = self.params.get('download_archive')
983 vid_id = self._make_archive_id(info_dict)
985 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
986 archive_file.write(vid_id + u'\n')
989 def format_resolution(format, default='unknown'):
990 if format.get('vcodec') == 'none':
992 if format.get('_resolution') is not None:
993 return format['_resolution']
994 if format.get('height') is not None:
995 if format.get('width') is not None:
996 res = u'%sx%s' % (format['width'], format['height'])
998 res = u'%sp' % format['height']
1003 def list_formats(self, info_dict):
1004 def format_note(fdict):
1006 if fdict.get('format_note') is not None:
1007 res += fdict['format_note'] + u' '
1008 if (fdict.get('vcodec') is not None and
1009 fdict.get('vcodec') != 'none'):
1010 res += u'%-5s' % fdict['vcodec']
1011 elif fdict.get('vbr') is not None:
1013 if fdict.get('vbr') is not None:
1014 res += u'@%4dk' % fdict['vbr']
1015 if fdict.get('acodec') is not None:
1018 res += u'%-5s' % fdict['acodec']
1019 elif fdict.get('abr') is not None:
1023 if fdict.get('abr') is not None:
1024 res += u'@%3dk' % fdict['abr']
1025 if fdict.get('filesize') is not None:
1028 res += format_bytes(fdict['filesize'])
1031 def line(format, idlen=20):
1032 return ((u'%-' + compat_str(idlen + 1) + u's%-10s%-12s%s') % (
1033 format['format_id'],
1035 self.format_resolution(format),
1036 format_note(format),
1039 formats = info_dict.get('formats', [info_dict])
1040 idlen = max(len(u'format code'),
1041 max(len(f['format_id']) for f in formats))
1042 formats_s = [line(f, idlen) for f in formats]
1043 if len(formats) > 1:
1044 formats_s[0] += (' ' if format_note(formats[0]) else '') + '(worst)'
1045 formats_s[-1] += (' ' if format_note(formats[-1]) else '') + '(best)'
1047 header_line = line({
1048 'format_id': u'format code', 'ext': u'extension',
1049 '_resolution': u'resolution', 'format_note': u'note'}, idlen=idlen)
1050 self.to_screen(u'[info] Available formats for %s:\n%s\n%s' %
1051 (info_dict['id'], header_line, u"\n".join(formats_s)))
1053 def urlopen(self, req):
1054 """ Start an HTTP download """
1055 return self._opener.open(req)
1057 def print_debug_header(self):
1058 if not self.params.get('verbose'):
1060 write_string(u'[debug] youtube-dl version ' + __version__ + u'\n')
1062 sp = subprocess.Popen(
1063 ['git', 'rev-parse', '--short', 'HEAD'],
1064 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
1065 cwd=os.path.dirname(os.path.abspath(__file__)))
1066 out, err = sp.communicate()
1067 out = out.decode().strip()
1068 if re.match('[0-9a-f]+', out):
1069 write_string(u'[debug] Git HEAD: ' + out + u'\n')
1075 write_string(u'[debug] Python version %s - %s' %
1076 (platform.python_version(), platform_name()) + u'\n')
1079 for handler in self._opener.handlers:
1080 if hasattr(handler, 'proxies'):
1081 proxy_map.update(handler.proxies)
1082 write_string(u'[debug] Proxy map: ' + compat_str(proxy_map) + u'\n')
1084 def _setup_opener(self):
1085 timeout_val = self.params.get('socket_timeout')
1086 timeout = 600 if timeout_val is None else float(timeout_val)
1088 opts_cookiefile = self.params.get('cookiefile')
1089 opts_proxy = self.params.get('proxy')
1091 if opts_cookiefile is None:
1092 self.cookiejar = compat_cookiejar.CookieJar()
1094 self.cookiejar = compat_cookiejar.MozillaCookieJar(
1096 if os.access(opts_cookiefile, os.R_OK):
1097 self.cookiejar.load()
1099 cookie_processor = compat_urllib_request.HTTPCookieProcessor(
1101 if opts_proxy is not None:
1102 if opts_proxy == '':
1105 proxies = {'http': opts_proxy, 'https': opts_proxy}
1107 proxies = compat_urllib_request.getproxies()
1108 # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
1109 if 'http' in proxies and 'https' not in proxies:
1110 proxies['https'] = proxies['http']
1111 proxy_handler = compat_urllib_request.ProxyHandler(proxies)
1112 https_handler = make_HTTPS_handler(
1113 self.params.get('nocheckcertificate', False))
1114 opener = compat_urllib_request.build_opener(
1115 https_handler, proxy_handler, cookie_processor, YoutubeDLHandler())
1116 # Delete the default user-agent header, which would otherwise apply in
1117 # cases where our custom HTTP handler doesn't come into play
1118 # (See https://github.com/rg3/youtube-dl/issues/1309 for details)
1119 opener.addheaders = []
1120 self._opener = opener
1122 # TODO remove this global modification
1123 compat_urllib_request.install_opener(opener)
1124 socket.setdefaulttimeout(timeout)