1 from __future__ import unicode_literals
9 from .common import FileDownloader
10 from ..compat import (
22 XAttrUnavailableError,
26 class HttpFD(FileDownloader):
27 def real_download(self, filename, info_dict):
28 url = info_dict['url']
30 class DownloadContext(dict):
31 __getattr__ = dict.get
32 __setattr__ = dict.__setitem__
33 __delattr__ = dict.__delitem__
35 ctx = DownloadContext()
36 ctx.filename = filename
37 ctx.tmpfilename = self.temp_name(filename)
40 # Do not include the Accept-Encoding header
41 headers = {'Youtubedl-no-compression': 'True'}
42 add_headers = info_dict.get('http_headers')
44 headers.update(add_headers)
45 basic_request = sanitized_Request(url, None, headers)
46 request = sanitized_Request(url, None, headers)
48 is_test = self.params.get('test', False)
49 chunk_size = self._TEST_FILE_SIZE if is_test else (
50 self.params.get('http_chunk_size') or 0)
55 ctx.block_size = self.params.get('buffersize', 1024)
56 ctx.start_time = time.time()
58 if self.params.get('continuedl', True):
59 # Establish possible resume length
60 if os.path.isfile(encodeFilename(ctx.tmpfilename)):
61 ctx.resume_len = os.path.getsize(
62 encodeFilename(ctx.tmpfilename))
64 ctx.is_resume = ctx.resume_len > 0
67 retries = self.params.get('retries', 0)
69 class SucceedDownload(Exception):
72 class RetryDownload(Exception):
73 def __init__(self, source_error):
74 self.source_error = source_error
76 class NextFragment(Exception):
79 def set_range(req, start, end):
80 range_header = 'bytes=%d-' % start
82 range_header += compat_str(end)
83 req.add_header('Range', range_header)
85 def establish_connection():
86 if ctx.resume_len > 0:
87 range_start = ctx.resume_len
89 self.report_resuming_byte(ctx.resume_len)
96 range_end = range_start + chunk_size - 1 if chunk_size else None
97 if range_end and ctx.data_len is not None and range_end >= ctx.data_len:
98 range_end = ctx.data_len - 1
99 has_range = range_start is not None
100 ctx.has_range = has_range
102 set_range(request, range_start, range_end)
103 # Establish connection
105 ctx.data = self.ydl.urlopen(request)
106 # When trying to resume, Content-Range HTTP header of response has to be checked
107 # to match the value of requested Range HTTP header. This is due to a webservers
108 # that don't support resuming and serve a whole file with no Content-Range
109 # set in response despite of requested Range (see
110 # https://github.com/rg3/youtube-dl/issues/6057#issuecomment-126129799)
112 content_range = ctx.data.headers.get('Content-Range')
114 content_range_m = re.search(r'bytes (\d+)-(\d+)?(?:/(\d+))?', content_range)
115 # Content-Range is present and matches requested Range, resume is possible
117 if range_start == int(content_range_m.group(1)):
118 content_range_end = int_or_none(content_range_m.group(2))
119 content_len = int_or_none(content_range_m.group(3))
120 accept_content_len = (
121 # Non-chunked download
123 # Chunked download and requested piece or
124 # its part is promised to be served
125 content_range_end == range_end or
126 content_len < range_end)
127 if accept_content_len:
128 ctx.data_len = content_len
130 # Content-Range is either not present or invalid. Assuming remote webserver is
131 # trying to send the whole file, resume is not possible, so wiping the local file
132 # and performing entire redownload
133 self.report_unable_to_resume()
136 ctx.data_len = int_or_none(ctx.data.info().get('Content-length', None))
138 except (compat_urllib_error.HTTPError, ) as err:
140 # Unable to resume (requested range not satisfiable)
142 # Open the connection again without the range header
143 ctx.data = self.ydl.urlopen(basic_request)
144 content_length = ctx.data.info()['Content-Length']
145 except (compat_urllib_error.HTTPError, ) as err:
146 if err.code < 500 or err.code >= 600:
149 # Examine the reported length
150 if (content_length is not None and
151 (ctx.resume_len - 100 < int(content_length) < ctx.resume_len + 100)):
152 # The file had already been fully downloaded.
153 # Explanation to the above condition: in issue #175 it was revealed that
154 # YouTube sometimes adds or removes a few bytes from the end of the file,
155 # changing the file size slightly and causing problems for some users. So
156 # I decided to implement a suggested change and consider the file
157 # completely downloaded if the file size differs less than 100 bytes from
158 # the one in the hard drive.
159 self.report_file_already_downloaded(ctx.filename)
160 self.try_rename(ctx.tmpfilename, ctx.filename)
161 self._hook_progress({
162 'filename': ctx.filename,
163 'status': 'finished',
164 'downloaded_bytes': ctx.resume_len,
165 'total_bytes': ctx.resume_len,
167 raise SucceedDownload()
169 # The length does not match, we start the download over
170 self.report_unable_to_resume()
174 elif err.code == 302:
177 # HTTP Error 302: The HTTP server returned a redirect error that would lead to an infinite loop.
178 # may happen during chunk downloading. This is usually fixed
180 elif err.code < 500 or err.code >= 600:
181 # Unexpected HTTP error
183 raise RetryDownload(err)
184 except socket.error as err:
185 if err.errno != errno.ECONNRESET:
186 # Connection reset is no problem, just retry
188 raise RetryDownload(err)
191 data_len = ctx.data.info().get('Content-length', None)
193 # Range HTTP header may be ignored/unsupported by a webserver
194 # (e.g. extractor/scivee.py, extractor/bambuser.py).
195 # However, for a test we still would like to download just a piece of a file.
196 # To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
197 # block size when downloading a file.
198 if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
199 data_len = self._TEST_FILE_SIZE
201 if data_len is not None:
202 data_len = int(data_len) + ctx.resume_len
203 min_data_len = self.params.get('min_filesize')
204 max_data_len = self.params.get('max_filesize')
205 if min_data_len is not None and data_len < min_data_len:
206 self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
208 if max_data_len is not None and data_len > max_data_len:
209 self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
212 byte_counter = 0 + ctx.resume_len
213 block_size = ctx.block_size
216 # measure time over whole while-loop, so slow_down() and best_block_size() work together properly
217 now = None # needed for slow_down() in the first loop run
218 before = start # start measuring
221 if ctx.tmpfilename != '-':
224 ctx.resume_len = os.path.getsize(encodeFilename(ctx.tmpfilename))
225 raise RetryDownload(e)
230 data_block = ctx.data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
231 # socket.timeout is a subclass of socket.error but may not have
233 except socket.timeout as e:
235 except socket.error as e:
236 if e.errno not in (errno.ECONNRESET, errno.ETIMEDOUT):
240 byte_counter += len(data_block)
242 # exit loop when download is finished
243 if len(data_block) == 0:
246 # Open destination file just in time
247 if ctx.stream is None:
249 ctx.stream, ctx.tmpfilename = sanitize_open(
250 ctx.tmpfilename, ctx.open_mode)
251 assert ctx.stream is not None
252 ctx.filename = self.undo_temp_name(ctx.tmpfilename)
253 self.report_destination(ctx.filename)
254 except (OSError, IOError) as err:
255 self.report_error('unable to open for writing: %s' % str(err))
258 if self.params.get('xattr_set_filesize', False) and data_len is not None:
260 write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode('utf-8'))
261 except (XAttrUnavailableError, XAttrMetadataError) as err:
262 self.report_error('unable to set filesize xattr: %s' % str(err))
265 ctx.stream.write(data_block)
266 except (IOError, OSError) as err:
268 self.report_error('unable to write data: %s' % str(err))
272 self.slow_down(start, now, byte_counter - ctx.resume_len)
274 # end measuring of one loop run
279 if not self.params.get('noresizebuffer', False):
280 block_size = self.best_block_size(after - before, len(data_block))
285 speed = self.calc_speed(start, now, byte_counter - ctx.resume_len)
286 if ctx.data_len is None:
289 eta = self.calc_eta(start, time.time(), ctx.data_len - ctx.resume_len, byte_counter - ctx.resume_len)
291 self._hook_progress({
292 'status': 'downloading',
293 'downloaded_bytes': byte_counter,
294 'total_bytes': ctx.data_len,
295 'tmpfilename': ctx.tmpfilename,
296 'filename': ctx.filename,
299 'elapsed': now - ctx.start_time,
302 if is_test and byte_counter == data_len:
305 if not is_test and chunk_size and ctx.data_len is not None and byte_counter < ctx.data_len:
306 ctx.resume_len = byte_counter
307 # ctx.block_size = block_size
310 if ctx.stream is None:
312 self.report_error('Did not get any data blocks')
314 if ctx.tmpfilename != '-':
317 if data_len is not None and byte_counter != data_len:
318 err = ContentTooShortError(byte_counter, int(data_len))
323 self.try_rename(ctx.tmpfilename, ctx.filename)
325 # Update file modification time
326 if self.params.get('updatetime', True):
327 info_dict['filetime'] = self.try_utime(ctx.filename, ctx.data.info().get('last-modified', None))
329 self._hook_progress({
330 'downloaded_bytes': byte_counter,
331 'total_bytes': byte_counter,
332 'filename': ctx.filename,
333 'status': 'finished',
334 'elapsed': time.time() - ctx.start_time,
339 while count <= retries:
341 establish_connection()
343 except RetryDownload as e:
346 self.report_retry(e.source_error, count, retries)
350 except SucceedDownload:
353 self.report_error('giving up after %s retries' % retries)