Merge branch 'master' into vimeo
[youtube-dl] / youtube-dl
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Author: Ricardo Garcia Gonzalez
4 # Author: Danny Colligan
5 # Author: Benjamin Johnson
6 # Author: Vasyl' Vavrychuk
7 # Author: Witold Baryluk
8 # Author: PaweÅ‚ Paprota
9 # License: Public domain code
10 import cookielib
11 import ctypes
12 import datetime
13 import email.utils
14 import gzip
15 import htmlentitydefs
16 import httplib
17 import locale
18 import math
19 import netrc
20 import os
21 import os.path
22 import re
23 import socket
24 import string
25 import StringIO
26 import subprocess
27 import sys
28 import time
29 import urllib
30 import urllib2
31 import zlib
32
33 # parse_qs was moved from the cgi module to the urlparse module recently.
34 try:
35         from urlparse import parse_qs
36 except ImportError:
37         from cgi import parse_qs
38
39 std_headers = {
40         'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:2.0b10) Gecko/20100101 Firefox/4.0b10',
41         'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
42         'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
43         'Accept-Encoding': 'gzip, deflate',
44         'Accept-Language': 'en-us,en;q=0.5',
45 }
46
47 simple_title_chars = string.ascii_letters.decode('ascii') + string.digits.decode('ascii')
48
49 def preferredencoding():
50         """Get preferred encoding.
51
52         Returns the best encoding scheme for the system, based on
53         locale.getpreferredencoding() and some further tweaks.
54         """
55         def yield_preferredencoding():
56                 try:
57                         pref = locale.getpreferredencoding()
58                         u'TEST'.encode(pref)
59                 except:
60                         pref = 'UTF-8'
61                 while True:
62                         yield pref
63         return yield_preferredencoding().next()
64
65 def htmlentity_transform(matchobj):
66         """Transforms an HTML entity to a Unicode character.
67
68         This function receives a match object and is intended to be used with
69         the re.sub() function.
70         """
71         entity = matchobj.group(1)
72
73         # Known non-numeric HTML entity
74         if entity in htmlentitydefs.name2codepoint:
75                 return unichr(htmlentitydefs.name2codepoint[entity])
76
77         # Unicode character
78         mobj = re.match(ur'(?u)#(x?\d+)', entity)
79         if mobj is not None:
80                 numstr = mobj.group(1)
81                 if numstr.startswith(u'x'):
82                         base = 16
83                         numstr = u'0%s' % numstr
84                 else:
85                         base = 10
86                 return unichr(long(numstr, base))
87
88         # Unknown entity in name, return its literal representation
89         return (u'&%s;' % entity)
90
91 def sanitize_title(utitle):
92         """Sanitizes a video title so it could be used as part of a filename."""
93         utitle = re.sub(ur'(?u)&(.+?);', htmlentity_transform, utitle)
94         return utitle.replace(unicode(os.sep), u'%')
95
96 def sanitize_open(filename, open_mode):
97         """Try to open the given filename, and slightly tweak it if this fails.
98
99         Attempts to open the given filename. If this fails, it tries to change
100         the filename slightly, step by step, until it's either able to open it
101         or it fails and raises a final exception, like the standard open()
102         function.
103
104         It returns the tuple (stream, definitive_file_name).
105         """
106         try:
107                 if filename == u'-':
108                         if sys.platform == 'win32':
109                                 import msvcrt
110                                 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
111                         return (sys.stdout, filename)
112                 stream = open(filename, open_mode)
113                 return (stream, filename)
114         except (IOError, OSError), err:
115                 # In case of error, try to remove win32 forbidden chars
116                 filename = re.sub(ur'[/<>:"\|\?\*]', u'#', filename)
117
118                 # An exception here should be caught in the caller
119                 stream = open(filename, open_mode)
120                 return (stream, filename)
121
122 def timeconvert(timestr):
123     """Convert RFC 2822 defined time string into system timestamp"""
124     timestamp = None
125     timetuple = email.utils.parsedate_tz(timestr)
126     if timetuple is not None:
127         timestamp = email.utils.mktime_tz(timetuple)
128     return timestamp
129
130 class DownloadError(Exception):
131         """Download Error exception.
132
133         This exception may be thrown by FileDownloader objects if they are not
134         configured to continue on errors. They will contain the appropriate
135         error message.
136         """
137         pass
138
139 class SameFileError(Exception):
140         """Same File exception.
141
142         This exception will be thrown by FileDownloader objects if they detect
143         multiple files would have to be downloaded to the same file on disk.
144         """
145         pass
146
147 class PostProcessingError(Exception):
148         """Post Processing exception.
149
150         This exception may be raised by PostProcessor's .run() method to
151         indicate an error in the postprocessing task.
152         """
153         pass
154
155 class UnavailableVideoError(Exception):
156         """Unavailable Format exception.
157
158         This exception will be thrown when a video is requested
159         in a format that is not available for that video.
160         """
161         pass
162
163 class ContentTooShortError(Exception):
164         """Content Too Short exception.
165
166         This exception may be raised by FileDownloader objects when a file they
167         download is too small for what the server announced first, indicating
168         the connection was probably interrupted.
169         """
170         # Both in bytes
171         downloaded = None
172         expected = None
173
174         def __init__(self, downloaded, expected):
175                 self.downloaded = downloaded
176                 self.expected = expected
177
178 class YoutubeDLHandler(urllib2.HTTPHandler):
179         """Handler for HTTP requests and responses.
180
181         This class, when installed with an OpenerDirector, automatically adds
182         the standard headers to every HTTP request and handles gzipped and
183         deflated responses from web servers. If compression is to be avoided in
184         a particular request, the original request in the program code only has
185         to include the HTTP header "Youtubedl-No-Compression", which will be
186         removed before making the real request.
187         
188         Part of this code was copied from:
189
190           http://techknack.net/python-urllib2-handlers/
191           
192         Andrew Rowls, the author of that code, agreed to release it to the
193         public domain.
194         """
195
196         @staticmethod
197         def deflate(data):
198                 try:
199                         return zlib.decompress(data, -zlib.MAX_WBITS)
200                 except zlib.error:
201                         return zlib.decompress(data)
202         
203         @staticmethod
204         def addinfourl_wrapper(stream, headers, url, code):
205                 if hasattr(urllib2.addinfourl, 'getcode'):
206                         return urllib2.addinfourl(stream, headers, url, code)
207                 ret = urllib2.addinfourl(stream, headers, url)
208                 ret.code = code
209                 return ret
210         
211         def http_request(self, req):
212                 for h in std_headers:
213                         if h in req.headers:
214                                 del req.headers[h]
215                         req.add_header(h, std_headers[h])
216                 if 'Youtubedl-no-compression' in req.headers:
217                         if 'Accept-encoding' in req.headers:
218                                 del req.headers['Accept-encoding']
219                         del req.headers['Youtubedl-no-compression']
220                 return req
221
222         def http_response(self, req, resp):
223                 old_resp = resp
224                 # gzip
225                 if resp.headers.get('Content-encoding', '') == 'gzip':
226                         gz = gzip.GzipFile(fileobj=StringIO.StringIO(resp.read()), mode='r')
227                         resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
228                         resp.msg = old_resp.msg
229                 # deflate
230                 if resp.headers.get('Content-encoding', '') == 'deflate':
231                         gz = StringIO.StringIO(self.deflate(resp.read()))
232                         resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
233                         resp.msg = old_resp.msg
234                 return resp
235
236 class FileDownloader(object):
237         """File Downloader class.
238
239         File downloader objects are the ones responsible of downloading the
240         actual video file and writing it to disk if the user has requested
241         it, among some other tasks. In most cases there should be one per
242         program. As, given a video URL, the downloader doesn't know how to
243         extract all the needed information, task that InfoExtractors do, it
244         has to pass the URL to one of them.
245
246         For this, file downloader objects have a method that allows
247         InfoExtractors to be registered in a given order. When it is passed
248         a URL, the file downloader handles it to the first InfoExtractor it
249         finds that reports being able to handle it. The InfoExtractor extracts
250         all the information about the video or videos the URL refers to, and
251         asks the FileDownloader to process the video information, possibly
252         downloading the video.
253
254         File downloaders accept a lot of parameters. In order not to saturate
255         the object constructor with arguments, it receives a dictionary of
256         options instead. These options are available through the params
257         attribute for the InfoExtractors to use. The FileDownloader also
258         registers itself as the downloader in charge for the InfoExtractors
259         that are added to it, so this is a "mutual registration".
260
261         Available options:
262
263         username:         Username for authentication purposes.
264         password:         Password for authentication purposes.
265         usenetrc:         Use netrc for authentication instead.
266         quiet:            Do not print messages to stdout.
267         forceurl:         Force printing final URL.
268         forcetitle:       Force printing title.
269         forcethumbnail:   Force printing thumbnail URL.
270         forcedescription: Force printing description.
271         forcefilename:    Force printing final filename.
272         simulate:         Do not download the video files.
273         format:           Video format code.
274         format_limit:     Highest quality format to try.
275         outtmpl:          Template for output names.
276         ignoreerrors:     Do not stop on download errors.
277         ratelimit:        Download speed limit, in bytes/sec.
278         nooverwrites:     Prevent overwriting files.
279         retries:          Number of times to retry for HTTP error 5xx
280         continuedl:       Try to continue downloads if possible.
281         noprogress:       Do not print the progress bar.
282         playliststart:    Playlist item to start at.
283         playlistend:      Playlist item to end at.
284         logtostderr:      Log messages to stderr instead of stdout.
285         consoletitle:     Display progress in console window's titlebar.
286         nopart:           Do not use temporary .part files.
287         updatetime:       Use the Last-modified header to set output file timestamps.
288         """
289
290         params = None
291         _ies = []
292         _pps = []
293         _download_retcode = None
294         _num_downloads = None
295         _screen_file = None
296
297         def __init__(self, params):
298                 """Create a FileDownloader object with the given options."""
299                 self._ies = []
300                 self._pps = []
301                 self._download_retcode = 0
302                 self._num_downloads = 0
303                 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
304                 self.params = params
305
306         @staticmethod
307         def pmkdir(filename):
308                 """Create directory components in filename. Similar to Unix "mkdir -p"."""
309                 components = filename.split(os.sep)
310                 aggregate = [os.sep.join(components[0:x]) for x in xrange(1, len(components))]
311                 aggregate = ['%s%s' % (x, os.sep) for x in aggregate] # Finish names with separator
312                 for dir in aggregate:
313                         if not os.path.exists(dir):
314                                 os.mkdir(dir)
315
316         @staticmethod
317         def format_bytes(bytes):
318                 if bytes is None:
319                         return 'N/A'
320                 if type(bytes) is str:
321                         bytes = float(bytes)
322                 if bytes == 0.0:
323                         exponent = 0
324                 else:
325                         exponent = long(math.log(bytes, 1024.0))
326                 suffix = 'bkMGTPEZY'[exponent]
327                 converted = float(bytes) / float(1024**exponent)
328                 return '%.2f%s' % (converted, suffix)
329
330         @staticmethod
331         def calc_percent(byte_counter, data_len):
332                 if data_len is None:
333                         return '---.-%'
334                 return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0))
335
336         @staticmethod
337         def calc_eta(start, now, total, current):
338                 if total is None:
339                         return '--:--'
340                 dif = now - start
341                 if current == 0 or dif < 0.001: # One millisecond
342                         return '--:--'
343                 rate = float(current) / dif
344                 eta = long((float(total) - float(current)) / rate)
345                 (eta_mins, eta_secs) = divmod(eta, 60)
346                 if eta_mins > 99:
347                         return '--:--'
348                 return '%02d:%02d' % (eta_mins, eta_secs)
349
350         @staticmethod
351         def calc_speed(start, now, bytes):
352                 dif = now - start
353                 if bytes == 0 or dif < 0.001: # One millisecond
354                         return '%10s' % '---b/s'
355                 return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif))
356
357         @staticmethod
358         def best_block_size(elapsed_time, bytes):
359                 new_min = max(bytes / 2.0, 1.0)
360                 new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
361                 if elapsed_time < 0.001:
362                         return long(new_max)
363                 rate = bytes / elapsed_time
364                 if rate > new_max:
365                         return long(new_max)
366                 if rate < new_min:
367                         return long(new_min)
368                 return long(rate)
369
370         @staticmethod
371         def parse_bytes(bytestr):
372                 """Parse a string indicating a byte quantity into a long integer."""
373                 matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
374                 if matchobj is None:
375                         return None
376                 number = float(matchobj.group(1))
377                 multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
378                 return long(round(number * multiplier))
379
380         def add_info_extractor(self, ie):
381                 """Add an InfoExtractor object to the end of the list."""
382                 self._ies.append(ie)
383                 ie.set_downloader(self)
384
385         def add_post_processor(self, pp):
386                 """Add a PostProcessor object to the end of the chain."""
387                 self._pps.append(pp)
388                 pp.set_downloader(self)
389
390         def to_screen(self, message, skip_eol=False, ignore_encoding_errors=False):
391                 """Print message to stdout if not in quiet mode."""
392                 try:
393                         if not self.params.get('quiet', False):
394                                 terminator = [u'\n', u''][skip_eol]
395                                 print >>self._screen_file, (u'%s%s' % (message, terminator)).encode(preferredencoding()),
396                         self._screen_file.flush()
397                 except (UnicodeEncodeError), err:
398                         if not ignore_encoding_errors:
399                                 raise
400
401         def to_stderr(self, message):
402                 """Print message to stderr."""
403                 print >>sys.stderr, message.encode(preferredencoding())
404
405         def to_cons_title(self, message):
406                 """Set console/terminal window title to message."""
407                 if not self.params.get('consoletitle', False):
408                         return
409                 if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
410                         # c_wchar_p() might not be necessary if `message` is
411                         # already of type unicode()
412                         ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
413                 elif 'TERM' in os.environ:
414                         sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding()))
415
416         def fixed_template(self):
417                 """Checks if the output template is fixed."""
418                 return (re.search(ur'(?u)%\(.+?\)s', self.params['outtmpl']) is None)
419
420         def trouble(self, message=None):
421                 """Determine action to take when a download problem appears.
422
423                 Depending on if the downloader has been configured to ignore
424                 download errors or not, this method may throw an exception or
425                 not when errors are found, after printing the message.
426                 """
427                 if message is not None:
428                         self.to_stderr(message)
429                 if not self.params.get('ignoreerrors', False):
430                         raise DownloadError(message)
431                 self._download_retcode = 1
432
433         def slow_down(self, start_time, byte_counter):
434                 """Sleep if the download speed is over the rate limit."""
435                 rate_limit = self.params.get('ratelimit', None)
436                 if rate_limit is None or byte_counter == 0:
437                         return
438                 now = time.time()
439                 elapsed = now - start_time
440                 if elapsed <= 0.0:
441                         return
442                 speed = float(byte_counter) / elapsed
443                 if speed > rate_limit:
444                         time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
445
446         def temp_name(self, filename):
447                 """Returns a temporary filename for the given filename."""
448                 if self.params.get('nopart', False) or filename == u'-' or \
449                                 (os.path.exists(filename) and not os.path.isfile(filename)):
450                         return filename
451                 return filename + u'.part'
452
453         def undo_temp_name(self, filename):
454                 if filename.endswith(u'.part'):
455                         return filename[:-len(u'.part')]
456                 return filename
457
458         def try_rename(self, old_filename, new_filename):
459                 try:
460                         if old_filename == new_filename:
461                                 return
462                         os.rename(old_filename, new_filename)
463                 except (IOError, OSError), err:
464                         self.trouble(u'ERROR: unable to rename file')
465         
466         def try_utime(self, filename, last_modified_hdr):
467                 """Try to set the last-modified time of the given file."""
468                 if last_modified_hdr is None:
469                         return
470                 if not os.path.isfile(filename):
471                         return
472                 timestr = last_modified_hdr
473                 if timestr is None:
474                         return
475                 filetime = timeconvert(timestr)
476                 if filetime is None:
477                         return
478                 try:
479                         os.utime(filename,(time.time(), filetime))
480                 except:
481                         pass
482
483         def report_destination(self, filename):
484                 """Report destination filename."""
485                 self.to_screen(u'[download] Destination: %s' % filename, ignore_encoding_errors=True)
486
487         def report_progress(self, percent_str, data_len_str, speed_str, eta_str):
488                 """Report download progress."""
489                 if self.params.get('noprogress', False):
490                         return
491                 self.to_screen(u'\r[download] %s of %s at %s ETA %s' %
492                                 (percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
493                 self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' %
494                                 (percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip()))
495
496         def report_resuming_byte(self, resume_len):
497                 """Report attempt to resume at given byte."""
498                 self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
499
500         def report_retry(self, count, retries):
501                 """Report retry in case of HTTP error 5xx"""
502                 self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
503
504         def report_file_already_downloaded(self, file_name):
505                 """Report file has already been fully downloaded."""
506                 try:
507                         self.to_screen(u'[download] %s has already been downloaded' % file_name)
508                 except (UnicodeEncodeError), err:
509                         self.to_screen(u'[download] The file has already been downloaded')
510
511         def report_unable_to_resume(self):
512                 """Report it was impossible to resume download."""
513                 self.to_screen(u'[download] Unable to resume')
514
515         def report_finish(self):
516                 """Report download finished."""
517                 if self.params.get('noprogress', False):
518                         self.to_screen(u'[download] Download completed')
519                 else:
520                         self.to_screen(u'')
521
522         def increment_downloads(self):
523                 """Increment the ordinal that assigns a number to each file."""
524                 self._num_downloads += 1
525
526         def prepare_filename(self, info_dict):
527                 """Generate the output filename."""
528                 try:
529                         template_dict = dict(info_dict)
530                         template_dict['epoch'] = unicode(long(time.time()))
531                         template_dict['autonumber'] = unicode('%05d' % self._num_downloads)
532                         filename = self.params['outtmpl'] % template_dict
533                         return filename
534                 except (ValueError, KeyError), err:
535                         self.trouble(u'ERROR: invalid system charset or erroneous output template')
536                         return None
537
538         def process_info(self, info_dict):
539                 """Process a single dictionary returned by an InfoExtractor."""
540                 filename = self.prepare_filename(info_dict)
541                 # Do nothing else if in simulate mode
542                 if self.params.get('simulate', False):
543                         # Forced printings
544                         if self.params.get('forcetitle', False):
545                                 print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
546                         if self.params.get('forceurl', False):
547                                 print info_dict['url'].encode(preferredencoding(), 'xmlcharrefreplace')
548                         if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict:
549                                 print info_dict['thumbnail'].encode(preferredencoding(), 'xmlcharrefreplace')
550                         if self.params.get('forcedescription', False) and 'description' in info_dict:
551                                 print info_dict['description'].encode(preferredencoding(), 'xmlcharrefreplace')
552                         if self.params.get('forcefilename', False) and filename is not None:
553                                 print filename.encode(preferredencoding(), 'xmlcharrefreplace')
554
555                         return
556
557                 if filename is None:
558                         return
559                 if self.params.get('nooverwrites', False) and os.path.exists(filename):
560                         self.to_stderr(u'WARNING: file exists and will be skipped')
561                         return
562
563                 try:
564                         self.pmkdir(filename)
565                 except (OSError, IOError), err:
566                         self.trouble(u'ERROR: unable to create directories: %s' % str(err))
567                         return
568
569                 try:
570                         success = self._do_download(filename, info_dict['url'].encode('utf-8'), info_dict.get('player_url', None))
571                 except (OSError, IOError), err:
572                         raise UnavailableVideoError
573                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
574                         self.trouble(u'ERROR: unable to download video data: %s' % str(err))
575                         return
576                 except (ContentTooShortError, ), err:
577                         self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
578                         return
579
580                 if success:
581                         try:
582                                 self.post_process(filename, info_dict)
583                         except (PostProcessingError), err:
584                                 self.trouble(u'ERROR: postprocessing: %s' % str(err))
585                                 return
586
587         def download(self, url_list):
588                 """Download a given list of URLs."""
589                 if len(url_list) > 1 and self.fixed_template():
590                         raise SameFileError(self.params['outtmpl'])
591
592                 for url in url_list:
593                         suitable_found = False
594                         for ie in self._ies:
595                                 # Go to next InfoExtractor if not suitable
596                                 if not ie.suitable(url):
597                                         continue
598
599                                 # Suitable InfoExtractor found
600                                 suitable_found = True
601
602                                 # Extract information from URL and process it
603                                 ie.extract(url)
604
605                                 # Suitable InfoExtractor had been found; go to next URL
606                                 break
607
608                         if not suitable_found:
609                                 self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
610
611                 return self._download_retcode
612
613         def post_process(self, filename, ie_info):
614                 """Run the postprocessing chain on the given file."""
615                 info = dict(ie_info)
616                 info['filepath'] = filename
617                 for pp in self._pps:
618                         info = pp.run(info)
619                         if info is None:
620                                 break
621
622         def _download_with_rtmpdump(self, filename, url, player_url):
623                 self.report_destination(filename)
624                 tmpfilename = self.temp_name(filename)
625
626                 # Check for rtmpdump first
627                 try:
628                         subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
629                 except (OSError, IOError):
630                         self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run')
631                         return False
632
633                 # Download using rtmpdump. rtmpdump returns exit code 2 when
634                 # the connection was interrumpted and resuming appears to be
635                 # possible. This is part of rtmpdump's normal usage, AFAIK.
636                 basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', tmpfilename]
637                 retval = subprocess.call(basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)])
638                 while retval == 2 or retval == 1:
639                         prevsize = os.path.getsize(tmpfilename)
640                         self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True)
641                         time.sleep(5.0) # This seems to be needed
642                         retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
643                         cursize = os.path.getsize(tmpfilename)
644                         if prevsize == cursize and retval == 1:
645                                 break
646                 if retval == 0:
647                         self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(tmpfilename))
648                         self.try_rename(tmpfilename, filename)
649                         return True
650                 else:
651                         self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
652                         return False
653
654         def _do_download(self, filename, url, player_url):
655                 # Check file already present
656                 if self.params.get('continuedl', False) and os.path.isfile(filename) and not self.params.get('nopart', False):
657                         self.report_file_already_downloaded(filename)
658                         return True
659
660                 # Attempt to download using rtmpdump
661                 if url.startswith('rtmp'):
662                         return self._download_with_rtmpdump(filename, url, player_url)
663
664                 tmpfilename = self.temp_name(filename)
665                 stream = None
666                 open_mode = 'wb'
667
668                 # Do not include the Accept-Encoding header
669                 headers = {'Youtubedl-no-compression': 'True'}
670                 basic_request = urllib2.Request(url, None, headers)
671                 request = urllib2.Request(url, None, headers)
672
673                 # Establish possible resume length
674                 if os.path.isfile(tmpfilename):
675                         resume_len = os.path.getsize(tmpfilename)
676                 else:
677                         resume_len = 0
678
679                 # Request parameters in case of being able to resume
680                 if self.params.get('continuedl', False) and resume_len != 0:
681                         self.report_resuming_byte(resume_len)
682                         request.add_header('Range','bytes=%d-' % resume_len)
683                         open_mode = 'ab'
684
685                 count = 0
686                 retries = self.params.get('retries', 0)
687                 while count <= retries:
688                         # Establish connection
689                         try:
690                                 data = urllib2.urlopen(request)
691                                 break
692                         except (urllib2.HTTPError, ), err:
693                                 if (err.code < 500 or err.code >= 600) and err.code != 416:
694                                         # Unexpected HTTP error
695                                         raise
696                                 elif err.code == 416:
697                                         # Unable to resume (requested range not satisfiable)
698                                         try:
699                                                 # Open the connection again without the range header
700                                                 data = urllib2.urlopen(basic_request)
701                                                 content_length = data.info()['Content-Length']
702                                         except (urllib2.HTTPError, ), err:
703                                                 if err.code < 500 or err.code >= 600:
704                                                         raise
705                                         else:
706                                                 # Examine the reported length
707                                                 if (content_length is not None and
708                                                     (resume_len - 100 < long(content_length) < resume_len + 100)):
709                                                         # The file had already been fully downloaded.
710                                                         # Explanation to the above condition: in issue #175 it was revealed that
711                                                         # YouTube sometimes adds or removes a few bytes from the end of the file,
712                                                         # changing the file size slightly and causing problems for some users. So
713                                                         # I decided to implement a suggested change and consider the file
714                                                         # completely downloaded if the file size differs less than 100 bytes from
715                                                         # the one in the hard drive.
716                                                         self.report_file_already_downloaded(filename)
717                                                         self.try_rename(tmpfilename, filename)
718                                                         return True
719                                                 else:
720                                                         # The length does not match, we start the download over
721                                                         self.report_unable_to_resume()
722                                                         open_mode = 'wb'
723                                                         break
724                         # Retry
725                         count += 1
726                         if count <= retries:
727                                 self.report_retry(count, retries)
728
729                 if count > retries:
730                         self.trouble(u'ERROR: giving up after %s retries' % retries)
731                         return False
732
733                 data_len = data.info().get('Content-length', None)
734                 if data_len is not None:
735                         data_len = long(data_len) + resume_len
736                 data_len_str = self.format_bytes(data_len)
737                 byte_counter = 0 + resume_len
738                 block_size = 1024
739                 start = time.time()
740                 while True:
741                         # Download and write
742                         before = time.time()
743                         data_block = data.read(block_size)
744                         after = time.time()
745                         if len(data_block) == 0:
746                                 break
747                         byte_counter += len(data_block)
748
749                         # Open file just in time
750                         if stream is None:
751                                 try:
752                                         (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
753                                         filename = self.undo_temp_name(tmpfilename)
754                                         self.report_destination(filename)
755                                 except (OSError, IOError), err:
756                                         self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
757                                         return False
758                         try:
759                                 stream.write(data_block)
760                         except (IOError, OSError), err:
761                                 self.trouble(u'\nERROR: unable to write data: %s' % str(err))
762                                 return False
763                         block_size = self.best_block_size(after - before, len(data_block))
764
765                         # Progress message
766                         percent_str = self.calc_percent(byte_counter, data_len)
767                         eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
768                         speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len)
769                         self.report_progress(percent_str, data_len_str, speed_str, eta_str)
770
771                         # Apply rate limit
772                         self.slow_down(start, byte_counter - resume_len)
773
774                 stream.close()
775                 self.report_finish()
776                 if data_len is not None and byte_counter != data_len:
777                         raise ContentTooShortError(byte_counter, long(data_len))
778                 self.try_rename(tmpfilename, filename)
779
780                 # Update file modification time
781                 if self.params.get('updatetime', True):
782                         self.try_utime(filename, data.info().get('last-modified', None))
783
784                 return True
785
786 class InfoExtractor(object):
787         """Information Extractor class.
788
789         Information extractors are the classes that, given a URL, extract
790         information from the video (or videos) the URL refers to. This
791         information includes the real video URL, the video title and simplified
792         title, author and others. The information is stored in a dictionary
793         which is then passed to the FileDownloader. The FileDownloader
794         processes this information possibly downloading the video to the file
795         system, among other possible outcomes. The dictionaries must include
796         the following fields:
797
798         id:             Video identifier.
799         url:            Final video URL.
800         uploader:       Nickname of the video uploader.
801         title:          Literal title.
802         stitle:         Simplified title.
803         ext:            Video filename extension.
804         format:         Video format.
805         player_url:     SWF Player URL (may be None).
806
807         The following fields are optional. Their primary purpose is to allow
808         youtube-dl to serve as the backend for a video search function, such
809         as the one in youtube2mp3.  They are only used when their respective
810         forced printing functions are called:
811
812         thumbnail:      Full URL to a video thumbnail image.
813         description:    One-line video description.
814
815         Subclasses of this one should re-define the _real_initialize() and
816         _real_extract() methods, as well as the suitable() static method.
817         Probably, they should also be instantiated and added to the main
818         downloader.
819         """
820
821         _ready = False
822         _downloader = None
823
824         def __init__(self, downloader=None):
825                 """Constructor. Receives an optional downloader."""
826                 self._ready = False
827                 self.set_downloader(downloader)
828
829         @staticmethod
830         def suitable(url):
831                 """Receives a URL and returns True if suitable for this IE."""
832                 return False
833
834         def initialize(self):
835                 """Initializes an instance (authentication, etc)."""
836                 if not self._ready:
837                         self._real_initialize()
838                         self._ready = True
839
840         def extract(self, url):
841                 """Extracts URL information and returns it in list of dicts."""
842                 self.initialize()
843                 return self._real_extract(url)
844
845         def set_downloader(self, downloader):
846                 """Sets the downloader for this IE."""
847                 self._downloader = downloader
848
849         def _real_initialize(self):
850                 """Real initialization process. Redefine in subclasses."""
851                 pass
852
853         def _real_extract(self, url):
854                 """Real extraction process. Redefine in subclasses."""
855                 pass
856
857 class YoutubeIE(InfoExtractor):
858         """Information extractor for youtube.com."""
859
860         _VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)(?:(?:(?:v|embed)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=)))?([0-9A-Za-z_-]+)(?(1).+)?$'
861         _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
862         _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
863         _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
864         _NETRC_MACHINE = 'youtube'
865         # Listed in order of quality
866         _available_formats = ['38', '37', '22', '45', '35', '34', '43', '18', '6', '5', '17', '13']
867         _video_extensions = {
868                 '13': '3gp',
869                 '17': 'mp4',
870                 '18': 'mp4',
871                 '22': 'mp4',
872                 '37': 'mp4',
873                 '38': 'video', # You actually don't know if this will be MOV, AVI or whatever
874                 '43': 'webm',
875                 '45': 'webm',
876         }
877
878         @staticmethod
879         def suitable(url):
880                 return (re.match(YoutubeIE._VALID_URL, url) is not None)
881
882         def report_lang(self):
883                 """Report attempt to set language."""
884                 self._downloader.to_screen(u'[youtube] Setting language')
885
886         def report_login(self):
887                 """Report attempt to log in."""
888                 self._downloader.to_screen(u'[youtube] Logging in')
889
890         def report_age_confirmation(self):
891                 """Report attempt to confirm age."""
892                 self._downloader.to_screen(u'[youtube] Confirming age')
893
894         def report_video_webpage_download(self, video_id):
895                 """Report attempt to download video webpage."""
896                 self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id)
897
898         def report_video_info_webpage_download(self, video_id):
899                 """Report attempt to download video info webpage."""
900                 self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id)
901
902         def report_information_extraction(self, video_id):
903                 """Report attempt to extract video information."""
904                 self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id)
905
906         def report_unavailable_format(self, video_id, format):
907                 """Report extracted video URL."""
908                 self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format))
909
910         def report_rtmp_download(self):
911                 """Indicate the download will use the RTMP protocol."""
912                 self._downloader.to_screen(u'[youtube] RTMP download detected')
913
914         def _real_initialize(self):
915                 if self._downloader is None:
916                         return
917
918                 username = None
919                 password = None
920                 downloader_params = self._downloader.params
921
922                 # Attempt to use provided username and password or .netrc data
923                 if downloader_params.get('username', None) is not None:
924                         username = downloader_params['username']
925                         password = downloader_params['password']
926                 elif downloader_params.get('usenetrc', False):
927                         try:
928                                 info = netrc.netrc().authenticators(self._NETRC_MACHINE)
929                                 if info is not None:
930                                         username = info[0]
931                                         password = info[2]
932                                 else:
933                                         raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
934                         except (IOError, netrc.NetrcParseError), err:
935                                 self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err))
936                                 return
937
938                 # Set language
939                 request = urllib2.Request(self._LANG_URL)
940                 try:
941                         self.report_lang()
942                         urllib2.urlopen(request).read()
943                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
944                         self._downloader.to_stderr(u'WARNING: unable to set language: %s' % str(err))
945                         return
946
947                 # No authentication to be performed
948                 if username is None:
949                         return
950
951                 # Log in
952                 login_form = {
953                                 'current_form': 'loginForm',
954                                 'next':         '/',
955                                 'action_login': 'Log In',
956                                 'username':     username,
957                                 'password':     password,
958                                 }
959                 request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form))
960                 try:
961                         self.report_login()
962                         login_results = urllib2.urlopen(request).read()
963                         if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
964                                 self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
965                                 return
966                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
967                         self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err))
968                         return
969
970                 # Confirm age
971                 age_form = {
972                                 'next_url':             '/',
973                                 'action_confirm':       'Confirm',
974                                 }
975                 request = urllib2.Request(self._AGE_URL, urllib.urlencode(age_form))
976                 try:
977                         self.report_age_confirmation()
978                         age_results = urllib2.urlopen(request).read()
979                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
980                         self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
981                         return
982
983         def _real_extract(self, url):
984                 # Extract video id from URL
985                 mobj = re.match(self._VALID_URL, url)
986                 if mobj is None:
987                         self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
988                         return
989                 video_id = mobj.group(2)
990
991                 # Get video webpage
992                 self.report_video_webpage_download(video_id)
993                 request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&amp;has_verified=1' % video_id)
994                 try:
995                         video_webpage = urllib2.urlopen(request).read()
996                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
997                         self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
998                         return
999
1000                 # Attempt to extract SWF player URL
1001                 mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
1002                 if mobj is not None:
1003                         player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
1004                 else:
1005                         player_url = None
1006
1007                 # Get video info
1008                 self.report_video_info_webpage_download(video_id)
1009                 for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
1010                         video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
1011                                            % (video_id, el_type))
1012                         request = urllib2.Request(video_info_url)
1013                         try:
1014                                 video_info_webpage = urllib2.urlopen(request).read()
1015                                 video_info = parse_qs(video_info_webpage)
1016                                 if 'token' in video_info:
1017                                         break
1018                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1019                                 self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
1020                                 return
1021                 if 'token' not in video_info:
1022                         if 'reason' in video_info:
1023                                 self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0].decode('utf-8'))
1024                         else:
1025                                 self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
1026                         return
1027
1028                 # Start extracting information
1029                 self.report_information_extraction(video_id)
1030
1031                 # uploader
1032                 if 'author' not in video_info:
1033                         self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
1034                         return
1035                 video_uploader = urllib.unquote_plus(video_info['author'][0])
1036
1037                 # title
1038                 if 'title' not in video_info:
1039                         self._downloader.trouble(u'ERROR: unable to extract video title')
1040                         return
1041                 video_title = urllib.unquote_plus(video_info['title'][0])
1042                 video_title = video_title.decode('utf-8')
1043                 video_title = sanitize_title(video_title)
1044
1045                 # simplified title
1046                 simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
1047                 simple_title = simple_title.strip(ur'_')
1048
1049                 # thumbnail image
1050                 if 'thumbnail_url' not in video_info:
1051                         self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
1052                         video_thumbnail = ''
1053                 else:   # don't panic if we can't find it
1054                         video_thumbnail = urllib.unquote_plus(video_info['thumbnail_url'][0])
1055
1056                 # upload date
1057                 upload_date = u'NA'
1058                 mobj = re.search(r'id="eow-date".*?>(.*?)</span>', video_webpage, re.DOTALL)
1059                 if mobj is not None:
1060                         upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
1061                         format_expressions = ['%d %B %Y', '%B %d %Y']
1062                         for expression in format_expressions:
1063                                 try:
1064                                         upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d')
1065                                 except:
1066                                         pass
1067
1068                 # description
1069                 video_description = 'No description available.'
1070                 if self._downloader.params.get('forcedescription', False):
1071                         mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', video_webpage)
1072                         if mobj is not None:
1073                                 video_description = mobj.group(1)
1074
1075                 # token
1076                 video_token = urllib.unquote_plus(video_info['token'][0])
1077
1078                 # Decide which formats to download
1079                 req_format = self._downloader.params.get('format', None)
1080
1081                 if 'fmt_url_map' in video_info:
1082                         url_map = dict(tuple(pair.split('|')) for pair in video_info['fmt_url_map'][0].split(','))
1083                         format_limit = self._downloader.params.get('format_limit', None)
1084                         if format_limit is not None and format_limit in self._available_formats:
1085                                 format_list = self._available_formats[self._available_formats.index(format_limit):]
1086                         else:
1087                                 format_list = self._available_formats
1088                         existing_formats = [x for x in format_list if x in url_map]
1089                         if len(existing_formats) == 0:
1090                                 self._downloader.trouble(u'ERROR: no known formats available for video')
1091                                 return
1092                         if req_format is None:
1093                                 video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
1094                         elif req_format == '-1':
1095                                 video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
1096                         else:
1097                                 # Specific format
1098                                 if req_format not in url_map:
1099                                         self._downloader.trouble(u'ERROR: requested format not available')
1100                                         return
1101                                 video_url_list = [(req_format, url_map[req_format])] # Specific format
1102
1103                 elif 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
1104                         self.report_rtmp_download()
1105                         video_url_list = [(None, video_info['conn'][0])]
1106
1107                 else:
1108                         self._downloader.trouble(u'ERROR: no fmt_url_map or conn information found in video info')
1109                         return
1110
1111                 for format_param, video_real_url in video_url_list:
1112                         # At this point we have a new video
1113                         self._downloader.increment_downloads()
1114
1115                         # Extension
1116                         video_extension = self._video_extensions.get(format_param, 'flv')
1117
1118                         # Find the video URL in fmt_url_map or conn paramters
1119                         try:
1120                                 # Process video information
1121                                 self._downloader.process_info({
1122                                         'id':           video_id.decode('utf-8'),
1123                                         'url':          video_real_url.decode('utf-8'),
1124                                         'uploader':     video_uploader.decode('utf-8'),
1125                                         'upload_date':  upload_date,
1126                                         'title':        video_title,
1127                                         'stitle':       simple_title,
1128                                         'ext':          video_extension.decode('utf-8'),
1129                                         'format':       (format_param is None and u'NA' or format_param.decode('utf-8')),
1130                                         'thumbnail':    video_thumbnail.decode('utf-8'),
1131                                         'description':  video_description.decode('utf-8'),
1132                                         'player_url':   player_url,
1133                                 })
1134                         except UnavailableVideoError, err:
1135                                 self._downloader.trouble(u'\nERROR: unable to download video')
1136
1137
1138 class MetacafeIE(InfoExtractor):
1139         """Information Extractor for metacafe.com."""
1140
1141         _VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
1142         _DISCLAIMER = 'http://www.metacafe.com/family_filter/'
1143         _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
1144         _youtube_ie = None
1145
1146         def __init__(self, youtube_ie, downloader=None):
1147                 InfoExtractor.__init__(self, downloader)
1148                 self._youtube_ie = youtube_ie
1149
1150         @staticmethod
1151         def suitable(url):
1152                 return (re.match(MetacafeIE._VALID_URL, url) is not None)
1153
1154         def report_disclaimer(self):
1155                 """Report disclaimer retrieval."""
1156                 self._downloader.to_screen(u'[metacafe] Retrieving disclaimer')
1157
1158         def report_age_confirmation(self):
1159                 """Report attempt to confirm age."""
1160                 self._downloader.to_screen(u'[metacafe] Confirming age')
1161
1162         def report_download_webpage(self, video_id):
1163                 """Report webpage download."""
1164                 self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id)
1165
1166         def report_extraction(self, video_id):
1167                 """Report information extraction."""
1168                 self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id)
1169
1170         def _real_initialize(self):
1171                 # Retrieve disclaimer
1172                 request = urllib2.Request(self._DISCLAIMER)
1173                 try:
1174                         self.report_disclaimer()
1175                         disclaimer = urllib2.urlopen(request).read()
1176                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1177                         self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % str(err))
1178                         return
1179
1180                 # Confirm age
1181                 disclaimer_form = {
1182                         'filters': '0',
1183                         'submit': "Continue - I'm over 18",
1184                         }
1185                 request = urllib2.Request(self._FILTER_POST, urllib.urlencode(disclaimer_form))
1186                 try:
1187                         self.report_age_confirmation()
1188                         disclaimer = urllib2.urlopen(request).read()
1189                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1190                         self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
1191                         return
1192
1193         def _real_extract(self, url):
1194                 # Extract id and simplified title from URL
1195                 mobj = re.match(self._VALID_URL, url)
1196                 if mobj is None:
1197                         self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
1198                         return
1199
1200                 video_id = mobj.group(1)
1201
1202                 # Check if video comes from YouTube
1203                 mobj2 = re.match(r'^yt-(.*)$', video_id)
1204                 if mobj2 is not None:
1205                         self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % mobj2.group(1))
1206                         return
1207
1208                 # At this point we have a new video
1209                 self._downloader.increment_downloads()
1210
1211                 simple_title = mobj.group(2).decode('utf-8')
1212
1213                 # Retrieve video webpage to extract further information
1214                 request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id)
1215                 try:
1216                         self.report_download_webpage(video_id)
1217                         webpage = urllib2.urlopen(request).read()
1218                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1219                         self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
1220                         return
1221
1222                 # Extract URL, uploader and title from webpage
1223                 self.report_extraction(video_id)
1224                 mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
1225                 if mobj is not None:
1226                         mediaURL = urllib.unquote(mobj.group(1))
1227                         video_extension = mediaURL[-3:]
1228
1229                         # Extract gdaKey if available
1230                         mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
1231                         if mobj is None:
1232                                 video_url = mediaURL
1233                         else:
1234                                 gdaKey = mobj.group(1)
1235                                 video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
1236                 else:
1237                         mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
1238                         if mobj is None:
1239                                 self._downloader.trouble(u'ERROR: unable to extract media URL')
1240                                 return
1241                         vardict = parse_qs(mobj.group(1))
1242                         if 'mediaData' not in vardict:
1243                                 self._downloader.trouble(u'ERROR: unable to extract media URL')
1244                                 return
1245                         mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
1246                         if mobj is None:
1247                                 self._downloader.trouble(u'ERROR: unable to extract media URL')
1248                                 return
1249                         mediaURL = mobj.group(1).replace('\\/', '/')
1250                         video_extension = mediaURL[-3:]
1251                         video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2))
1252
1253                 mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
1254                 if mobj is None:
1255                         self._downloader.trouble(u'ERROR: unable to extract title')
1256                         return
1257                 video_title = mobj.group(1).decode('utf-8')
1258                 video_title = sanitize_title(video_title)
1259
1260                 mobj = re.search(r'(?ms)By:\s*<a .*?>(.+?)<', webpage)
1261                 if mobj is None:
1262                         self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
1263                         return
1264                 video_uploader = mobj.group(1)
1265
1266                 try:
1267                         # Process video information
1268                         self._downloader.process_info({
1269                                 'id':           video_id.decode('utf-8'),
1270                                 'url':          video_url.decode('utf-8'),
1271                                 'uploader':     video_uploader.decode('utf-8'),
1272                                 'upload_date':  u'NA',
1273                                 'title':        video_title,
1274                                 'stitle':       simple_title,
1275                                 'ext':          video_extension.decode('utf-8'),
1276                                 'format':       u'NA',
1277                                 'player_url':   None,
1278                         })
1279                 except UnavailableVideoError:
1280                         self._downloader.trouble(u'\nERROR: unable to download video')
1281
1282
1283 class DailymotionIE(InfoExtractor):
1284         """Information Extractor for Dailymotion"""
1285
1286         _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^_/]+)_([^/]+)'
1287
1288         def __init__(self, downloader=None):
1289                 InfoExtractor.__init__(self, downloader)
1290
1291         @staticmethod
1292         def suitable(url):
1293                 return (re.match(DailymotionIE._VALID_URL, url) is not None)
1294
1295         def report_download_webpage(self, video_id):
1296                 """Report webpage download."""
1297                 self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id)
1298
1299         def report_extraction(self, video_id):
1300                 """Report information extraction."""
1301                 self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id)
1302
1303         def _real_initialize(self):
1304                 return
1305
1306         def _real_extract(self, url):
1307                 # Extract id and simplified title from URL
1308                 mobj = re.match(self._VALID_URL, url)
1309                 if mobj is None:
1310                         self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
1311                         return
1312
1313                 # At this point we have a new video
1314                 self._downloader.increment_downloads()
1315                 video_id = mobj.group(1)
1316
1317                 simple_title = mobj.group(2).decode('utf-8')
1318                 video_extension = 'flv'
1319
1320                 # Retrieve video webpage to extract further information
1321                 request = urllib2.Request(url)
1322                 try:
1323                         self.report_download_webpage(video_id)
1324                         webpage = urllib2.urlopen(request).read()
1325                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1326                         self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
1327                         return
1328
1329                 # Extract URL, uploader and title from webpage
1330                 self.report_extraction(video_id)
1331                 mobj = re.search(r'(?i)addVariable\(\"video\"\s*,\s*\"([^\"]*)\"\)', webpage)
1332                 if mobj is None:
1333                         self._downloader.trouble(u'ERROR: unable to extract media URL')
1334                         return
1335                 mediaURL = urllib.unquote(mobj.group(1))
1336
1337                 # if needed add http://www.dailymotion.com/ if relative URL
1338
1339                 video_url = mediaURL
1340
1341                 # '<meta\s+name="title"\s+content="Dailymotion\s*[:\-]\s*(.*?)"\s*\/\s*>'
1342                 mobj = re.search(r'(?im)<title>Dailymotion\s*[\-:]\s*(.+?)</title>', webpage)
1343                 if mobj is None:
1344                         self._downloader.trouble(u'ERROR: unable to extract title')
1345                         return
1346                 video_title = mobj.group(1).decode('utf-8')
1347                 video_title = sanitize_title(video_title)
1348
1349                 mobj = re.search(r'(?im)<Attribute name="owner">(.+?)</Attribute>', webpage)
1350                 if mobj is None:
1351                         self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
1352                         return
1353                 video_uploader = mobj.group(1)
1354
1355                 try:
1356                         # Process video information
1357                         self._downloader.process_info({
1358                                 'id':           video_id.decode('utf-8'),
1359                                 'url':          video_url.decode('utf-8'),
1360                                 'uploader':     video_uploader.decode('utf-8'),
1361                                 'upload_date':  u'NA',
1362                                 'title':        video_title,
1363                                 'stitle':       simple_title,
1364                                 'ext':          video_extension.decode('utf-8'),
1365                                 'format':       u'NA',
1366                                 'player_url':   None,
1367                         })
1368                 except UnavailableVideoError:
1369                         self._downloader.trouble(u'\nERROR: unable to download video')
1370
1371 class GoogleIE(InfoExtractor):
1372         """Information extractor for video.google.com."""
1373
1374         _VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*'
1375
1376         def __init__(self, downloader=None):
1377                 InfoExtractor.__init__(self, downloader)
1378
1379         @staticmethod
1380         def suitable(url):
1381                 return (re.match(GoogleIE._VALID_URL, url) is not None)
1382
1383         def report_download_webpage(self, video_id):
1384                 """Report webpage download."""
1385                 self._downloader.to_screen(u'[video.google] %s: Downloading webpage' % video_id)
1386
1387         def report_extraction(self, video_id):
1388                 """Report information extraction."""
1389                 self._downloader.to_screen(u'[video.google] %s: Extracting information' % video_id)
1390
1391         def _real_initialize(self):
1392                 return
1393
1394         def _real_extract(self, url):
1395                 # Extract id from URL
1396                 mobj = re.match(self._VALID_URL, url)
1397                 if mobj is None:
1398                         self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
1399                         return
1400
1401                 # At this point we have a new video
1402                 self._downloader.increment_downloads()
1403                 video_id = mobj.group(1)
1404
1405                 video_extension = 'mp4'
1406
1407                 # Retrieve video webpage to extract further information
1408                 request = urllib2.Request('http://video.google.com/videoplay?docid=%s&hl=en&oe=utf-8' % video_id)
1409                 try:
1410                         self.report_download_webpage(video_id)
1411                         webpage = urllib2.urlopen(request).read()
1412                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1413                         self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
1414                         return
1415
1416                 # Extract URL, uploader, and title from webpage
1417                 self.report_extraction(video_id)
1418                 mobj = re.search(r"download_url:'([^']+)'", webpage)
1419                 if mobj is None:
1420                         video_extension = 'flv'
1421                         mobj = re.search(r"(?i)videoUrl\\x3d(.+?)\\x26", webpage)
1422                 if mobj is None:
1423                         self._downloader.trouble(u'ERROR: unable to extract media URL')
1424                         return
1425                 mediaURL = urllib.unquote(mobj.group(1))
1426                 mediaURL = mediaURL.replace('\\x3d', '\x3d')
1427                 mediaURL = mediaURL.replace('\\x26', '\x26')
1428
1429                 video_url = mediaURL
1430
1431                 mobj = re.search(r'<title>(.*)</title>', webpage)
1432                 if mobj is None:
1433                         self._downloader.trouble(u'ERROR: unable to extract title')
1434                         return
1435                 video_title = mobj.group(1).decode('utf-8')
1436                 video_title = sanitize_title(video_title)
1437                 simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
1438
1439                 # Extract video description
1440                 mobj = re.search(r'<span id=short-desc-content>([^<]*)</span>', webpage)
1441                 if mobj is None:
1442                         self._downloader.trouble(u'ERROR: unable to extract video description')
1443                         return
1444                 video_description = mobj.group(1).decode('utf-8')
1445                 if not video_description:
1446                         video_description = 'No description available.'
1447
1448                 # Extract video thumbnail
1449                 if self._downloader.params.get('forcethumbnail', False):
1450                         request = urllib2.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id)))
1451                         try:
1452                                 webpage = urllib2.urlopen(request).read()
1453                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1454                                 self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
1455                                 return
1456                         mobj = re.search(r'<img class=thumbnail-img (?:.* )?src=(http.*)>', webpage)
1457                         if mobj is None:
1458                                 self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
1459                                 return
1460                         video_thumbnail = mobj.group(1)
1461                 else:   # we need something to pass to process_info
1462                         video_thumbnail = ''
1463
1464
1465                 try:
1466                         # Process video information
1467                         self._downloader.process_info({
1468                                 'id':           video_id.decode('utf-8'),
1469                                 'url':          video_url.decode('utf-8'),
1470                                 'uploader':     u'NA',
1471                                 'upload_date':  u'NA',
1472                                 'title':        video_title,
1473                                 'stitle':       simple_title,
1474                                 'ext':          video_extension.decode('utf-8'),
1475                                 'format':       u'NA',
1476                                 'player_url':   None,
1477                         })
1478                 except UnavailableVideoError:
1479                         self._downloader.trouble(u'\nERROR: unable to download video')
1480
1481
1482 class PhotobucketIE(InfoExtractor):
1483         """Information extractor for photobucket.com."""
1484
1485         _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
1486
1487         def __init__(self, downloader=None):
1488                 InfoExtractor.__init__(self, downloader)
1489
1490         @staticmethod
1491         def suitable(url):
1492                 return (re.match(PhotobucketIE._VALID_URL, url) is not None)
1493
1494         def report_download_webpage(self, video_id):
1495                 """Report webpage download."""
1496                 self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id)
1497
1498         def report_extraction(self, video_id):
1499                 """Report information extraction."""
1500                 self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id)
1501
1502         def _real_initialize(self):
1503                 return
1504
1505         def _real_extract(self, url):
1506                 # Extract id from URL
1507                 mobj = re.match(self._VALID_URL, url)
1508                 if mobj is None:
1509                         self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
1510                         return
1511
1512                 # At this point we have a new video
1513                 self._downloader.increment_downloads()
1514                 video_id = mobj.group(1)
1515
1516                 video_extension = 'flv'
1517
1518                 # Retrieve video webpage to extract further information
1519                 request = urllib2.Request(url)
1520                 try:
1521                         self.report_download_webpage(video_id)
1522                         webpage = urllib2.urlopen(request).read()
1523                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1524                         self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
1525                         return
1526
1527                 # Extract URL, uploader, and title from webpage
1528                 self.report_extraction(video_id)
1529                 mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage)
1530                 if mobj is None:
1531                         self._downloader.trouble(u'ERROR: unable to extract media URL')
1532                         return
1533                 mediaURL = urllib.unquote(mobj.group(1))
1534
1535                 video_url = mediaURL
1536
1537                 mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
1538                 if mobj is None:
1539                         self._downloader.trouble(u'ERROR: unable to extract title')
1540                         return
1541                 video_title = mobj.group(1).decode('utf-8')
1542                 video_title = sanitize_title(video_title)
1543                 simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
1544
1545                 video_uploader = mobj.group(2).decode('utf-8')
1546
1547                 try:
1548                         # Process video information
1549                         self._downloader.process_info({
1550                                 'id':           video_id.decode('utf-8'),
1551                                 'url':          video_url.decode('utf-8'),
1552                                 'uploader':     video_uploader,
1553                                 'upload_date':  u'NA',
1554                                 'title':        video_title,
1555                                 'stitle':       simple_title,
1556                                 'ext':          video_extension.decode('utf-8'),
1557                                 'format':       u'NA',
1558                                 'player_url':   None,
1559                         })
1560                 except UnavailableVideoError:
1561                         self._downloader.trouble(u'\nERROR: unable to download video')
1562
1563
1564 class YahooIE(InfoExtractor):
1565         """Information extractor for video.yahoo.com."""
1566
1567         # _VALID_URL matches all Yahoo! Video URLs
1568         # _VPAGE_URL matches only the extractable '/watch/' URLs
1569         _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?'
1570         _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?'
1571
1572         def __init__(self, downloader=None):
1573                 InfoExtractor.__init__(self, downloader)
1574
1575         @staticmethod
1576         def suitable(url):
1577                 return (re.match(YahooIE._VALID_URL, url) is not None)
1578
1579         def report_download_webpage(self, video_id):
1580                 """Report webpage download."""
1581                 self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id)
1582
1583         def report_extraction(self, video_id):
1584                 """Report information extraction."""
1585                 self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id)
1586
1587         def _real_initialize(self):
1588                 return
1589
1590         def _real_extract(self, url, new_video=True):
1591                 # Extract ID from URL
1592                 mobj = re.match(self._VALID_URL, url)
1593                 if mobj is None:
1594                         self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
1595                         return
1596
1597                 # At this point we have a new video
1598                 self._downloader.increment_downloads()
1599                 video_id = mobj.group(2)
1600                 video_extension = 'flv'
1601
1602                 # Rewrite valid but non-extractable URLs as
1603                 # extractable English language /watch/ URLs
1604                 if re.match(self._VPAGE_URL, url) is None:
1605                         request = urllib2.Request(url)
1606                         try:
1607                                 webpage = urllib2.urlopen(request).read()
1608                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1609                                 self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
1610                                 return
1611
1612                         mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
1613                         if mobj is None:
1614                                 self._downloader.trouble(u'ERROR: Unable to extract id field')
1615                                 return
1616                         yahoo_id = mobj.group(1)
1617
1618                         mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
1619                         if mobj is None:
1620                                 self._downloader.trouble(u'ERROR: Unable to extract vid field')
1621                                 return
1622                         yahoo_vid = mobj.group(1)
1623
1624                         url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id)
1625                         return self._real_extract(url, new_video=False)
1626
1627                 # Retrieve video webpage to extract further information
1628                 request = urllib2.Request(url)
1629                 try:
1630                         self.report_download_webpage(video_id)
1631                         webpage = urllib2.urlopen(request).read()
1632                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1633                         self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
1634                         return
1635
1636                 # Extract uploader and title from webpage
1637                 self.report_extraction(video_id)
1638                 mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
1639                 if mobj is None:
1640                         self._downloader.trouble(u'ERROR: unable to extract video title')
1641                         return
1642                 video_title = mobj.group(1).decode('utf-8')
1643                 simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
1644
1645                 mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
1646                 if mobj is None:
1647                         self._downloader.trouble(u'ERROR: unable to extract video uploader')
1648                         return
1649                 video_uploader = mobj.group(1).decode('utf-8')
1650
1651                 # Extract video thumbnail
1652                 mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
1653                 if mobj is None:
1654                         self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
1655                         return
1656                 video_thumbnail = mobj.group(1).decode('utf-8')
1657
1658                 # Extract video description
1659                 mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
1660                 if mobj is None:
1661                         self._downloader.trouble(u'ERROR: unable to extract video description')
1662                         return
1663                 video_description = mobj.group(1).decode('utf-8')
1664                 if not video_description: video_description = 'No description available.'
1665
1666                 # Extract video height and width
1667                 mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
1668                 if mobj is None:
1669                         self._downloader.trouble(u'ERROR: unable to extract video height')
1670                         return
1671                 yv_video_height = mobj.group(1)
1672
1673                 mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
1674                 if mobj is None:
1675                         self._downloader.trouble(u'ERROR: unable to extract video width')
1676                         return
1677                 yv_video_width = mobj.group(1)
1678
1679                 # Retrieve video playlist to extract media URL
1680                 # I'm not completely sure what all these options are, but we
1681                 # seem to need most of them, otherwise the server sends a 401.
1682                 yv_lg = 'R0xx6idZnW2zlrKP8xxAIR'  # not sure what this represents
1683                 yv_bitrate = '700'  # according to Wikipedia this is hard-coded
1684                 request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
1685                                           '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
1686                                           '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
1687                 try:
1688                         self.report_download_webpage(video_id)
1689                         webpage = urllib2.urlopen(request).read()
1690                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1691                         self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
1692                         return
1693
1694                 # Extract media URL from playlist XML
1695                 mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
1696                 if mobj is None:
1697                         self._downloader.trouble(u'ERROR: Unable to extract media URL')
1698                         return
1699                 video_url = urllib.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
1700                 video_url = re.sub(r'(?u)&(.+?);', htmlentity_transform, video_url)
1701
1702                 try:
1703                         # Process video information
1704                         self._downloader.process_info({
1705                                 'id':           video_id.decode('utf-8'),
1706                                 'url':          video_url,
1707                                 'uploader':     video_uploader,
1708                                 'upload_date':  u'NA',
1709                                 'title':        video_title,
1710                                 'stitle':       simple_title,
1711                                 'ext':          video_extension.decode('utf-8'),
1712                                 'thumbnail':    video_thumbnail.decode('utf-8'),
1713                                 'description':  video_description,
1714                                 'thumbnail':    video_thumbnail,
1715                                 'description':  video_description,
1716                                 'player_url':   None,
1717                         })
1718                 except UnavailableVideoError:
1719                         self._downloader.trouble(u'\nERROR: unable to download video')
1720
1721
1722 class VimeoIE(InfoExtractor):
1723         """Information extractor for vimeo.com."""
1724
1725         # _VALID_URL matches Vimeo URLs
1726         _VALID_URL = r'(?:http://)?vimeo\.com/([0-9]+)'
1727
1728         def __init__(self, downloader=None):
1729                 InfoExtractor.__init__(self, downloader)
1730
1731         @staticmethod
1732         def suitable(url):
1733                 return (re.match(VimeoIE._VALID_URL, url) is not None)
1734
1735         def report_download_webpage(self, video_id):
1736                 """Report webpage download."""
1737                 self._downloader.to_screen(u'[video.vimeo] %s: Downloading webpage' % video_id)
1738
1739         def report_extraction(self, video_id):
1740                 """Report information extraction."""
1741                 self._downloader.to_screen(u'[video.vimeo] %s: Extracting information' % video_id)
1742
1743         def _real_initialize(self):
1744                 return
1745
1746         def _real_extract(self, url, new_video=True):
1747                 # Extract ID from URL
1748                 mobj = re.match(self._VALID_URL, url)
1749                 if mobj is None:
1750                         self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
1751                         return
1752
1753                 # At this point we have a new video
1754                 self._downloader.increment_downloads()
1755                 video_id = mobj.group(1)
1756                 video_extension = 'flv' # FIXME
1757
1758                 # Retrieve video webpage to extract further information
1759                 request = urllib2.Request("http://vimeo.com/moogaloop/load/clip:%s" % video_id, None, std_headers)
1760                 try:
1761                         self.report_download_webpage(video_id)
1762                         webpage = urllib2.urlopen(request).read()
1763                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1764                         self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
1765                         return
1766
1767                 # Extract uploader and title from webpage
1768                 self.report_extraction(video_id)
1769                 mobj = re.search(r'<caption>(.*?)</caption>', webpage)
1770                 if mobj is None:
1771                         self._downloader.trouble(u'ERROR: unable to extract video title')
1772                         return
1773                 video_title = mobj.group(1).decode('utf-8')
1774                 simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
1775
1776                 mobj = re.search(r'<uploader_url>http://vimeo.com/(.*?)</uploader_url>', webpage)
1777                 if mobj is None:
1778                         self._downloader.trouble(u'ERROR: unable to extract video uploader')
1779                         return
1780                 video_uploader = mobj.group(1).decode('utf-8')
1781
1782                 # Extract video thumbnail
1783                 mobj = re.search(r'<thumbnail>(.*?)</thumbnail>', webpage)
1784                 if mobj is None:
1785                         self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
1786                         return
1787                 video_thumbnail = mobj.group(1).decode('utf-8')
1788
1789                 # # Extract video description
1790                 # mobj = re.search(r'<meta property="og:description" content="(.*)" />', webpage)
1791                 # if mobj is None:
1792                 #       self._downloader.trouble(u'ERROR: unable to extract video description')
1793                 #       return
1794                 # video_description = mobj.group(1).decode('utf-8')
1795                 # if not video_description: video_description = 'No description available.'
1796                 video_description = 'Foo.'
1797
1798                 # Extract request signature
1799                 mobj = re.search(r'<request_signature>(.*?)</request_signature>', webpage)
1800                 if mobj is None:
1801                         self._downloader.trouble(u'ERROR: unable to extract request signature')
1802                         return
1803                 sig = mobj.group(1).decode('utf-8')
1804
1805                 # Extract request signature expiration
1806                 mobj = re.search(r'<request_signature_expires>(.*?)</request_signature_expires>', webpage)
1807                 if mobj is None:
1808                         self._downloader.trouble(u'ERROR: unable to extract request signature expiration')
1809                         return
1810                 sig_exp = mobj.group(1).decode('utf-8')
1811
1812                 video_url = "http://vimeo.com/moogaloop/play/clip:%s/%s/%s" % (video_id, sig, sig_exp)
1813
1814                 try:
1815                         # Process video information
1816                         self._downloader.process_info({
1817                                 'id':           video_id.decode('utf-8'),
1818                                 'url':          video_url,
1819                                 'uploader':     video_uploader,
1820                                 'upload_date':  u'NA',
1821                                 'title':        video_title,
1822                                 'stitle':       simple_title,
1823                                 'ext':          video_extension.decode('utf-8'),
1824                                 'thumbnail':    video_thumbnail.decode('utf-8'),
1825                                 'description':  video_description,
1826                                 'thumbnail':    video_thumbnail,
1827                                 'description':  video_description,
1828                                 'player_url':   None,
1829                         })
1830                 except UnavailableVideoError:
1831                         self._downloader.trouble(u'ERROR: unable to download video')
1832
1833
1834 class GenericIE(InfoExtractor):
1835         """Generic last-resort information extractor."""
1836
1837         def __init__(self, downloader=None):
1838                 InfoExtractor.__init__(self, downloader)
1839
1840         @staticmethod
1841         def suitable(url):
1842                 return True
1843
1844         def report_download_webpage(self, video_id):
1845                 """Report webpage download."""
1846                 self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
1847                 self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id)
1848
1849         def report_extraction(self, video_id):
1850                 """Report information extraction."""
1851                 self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id)
1852
1853         def _real_initialize(self):
1854                 return
1855
1856         def _real_extract(self, url):
1857                 # At this point we have a new video
1858                 self._downloader.increment_downloads()
1859
1860                 video_id = url.split('/')[-1]
1861                 request = urllib2.Request(url)
1862                 try:
1863                         self.report_download_webpage(video_id)
1864                         webpage = urllib2.urlopen(request).read()
1865                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1866                         self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
1867                         return
1868                 except ValueError, err:
1869                         # since this is the last-resort InfoExtractor, if
1870                         # this error is thrown, it'll be thrown here
1871                         self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
1872                         return
1873
1874                 self.report_extraction(video_id)
1875                 # Start with something easy: JW Player in SWFObject
1876                 mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
1877                 if mobj is None:
1878                         # Broaden the search a little bit
1879                         mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
1880                 if mobj is None:
1881                         self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
1882                         return
1883
1884                 # It's possible that one of the regexes
1885                 # matched, but returned an empty group:
1886                 if mobj.group(1) is None:
1887                         self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
1888                         return
1889
1890                 video_url = urllib.unquote(mobj.group(1))
1891                 video_id  = os.path.basename(video_url)
1892
1893                 # here's a fun little line of code for you:
1894                 video_extension = os.path.splitext(video_id)[1][1:]
1895                 video_id        = os.path.splitext(video_id)[0]
1896
1897                 # it's tempting to parse this further, but you would
1898                 # have to take into account all the variations like
1899                 #   Video Title - Site Name
1900                 #   Site Name | Video Title
1901                 #   Video Title - Tagline | Site Name
1902                 # and so on and so forth; it's just not practical
1903                 mobj = re.search(r'<title>(.*)</title>', webpage)
1904                 if mobj is None:
1905                         self._downloader.trouble(u'ERROR: unable to extract title')
1906                         return
1907                 video_title = mobj.group(1).decode('utf-8')
1908                 video_title = sanitize_title(video_title)
1909                 simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
1910
1911                 # video uploader is domain name
1912                 mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
1913                 if mobj is None:
1914                         self._downloader.trouble(u'ERROR: unable to extract title')
1915                         return
1916                 video_uploader = mobj.group(1).decode('utf-8')
1917
1918                 try:
1919                         # Process video information
1920                         self._downloader.process_info({
1921                                 'id':           video_id.decode('utf-8'),
1922                                 'url':          video_url.decode('utf-8'),
1923                                 'uploader':     video_uploader,
1924                                 'upload_date':  u'NA',
1925                                 'title':        video_title,
1926                                 'stitle':       simple_title,
1927                                 'ext':          video_extension.decode('utf-8'),
1928                                 'format':       u'NA',
1929                                 'player_url':   None,
1930                         })
1931                 except UnavailableVideoError, err:
1932                         self._downloader.trouble(u'\nERROR: unable to download video')
1933
1934
1935 class YoutubeSearchIE(InfoExtractor):
1936         """Information Extractor for YouTube search queries."""
1937         _VALID_QUERY = r'ytsearch(\d+|all)?:[\s\S]+'
1938         _TEMPLATE_URL = 'http://www.youtube.com/results?search_query=%s&page=%s&gl=US&hl=en'
1939         _VIDEO_INDICATOR = r'href="/watch\?v=.+?"'
1940         _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
1941         _youtube_ie = None
1942         _max_youtube_results = 1000
1943
1944         def __init__(self, youtube_ie, downloader=None):
1945                 InfoExtractor.__init__(self, downloader)
1946                 self._youtube_ie = youtube_ie
1947
1948         @staticmethod
1949         def suitable(url):
1950                 return (re.match(YoutubeSearchIE._VALID_QUERY, url) is not None)
1951
1952         def report_download_page(self, query, pagenum):
1953                 """Report attempt to download playlist page with given number."""
1954                 query = query.decode(preferredencoding())
1955                 self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
1956
1957         def _real_initialize(self):
1958                 self._youtube_ie.initialize()
1959
1960         def _real_extract(self, query):
1961                 mobj = re.match(self._VALID_QUERY, query)
1962                 if mobj is None:
1963                         self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
1964                         return
1965
1966                 prefix, query = query.split(':')
1967                 prefix = prefix[8:]
1968                 query  = query.encode('utf-8')
1969                 if prefix == '':
1970                         self._download_n_results(query, 1)
1971                         return
1972                 elif prefix == 'all':
1973                         self._download_n_results(query, self._max_youtube_results)
1974                         return
1975                 else:
1976                         try:
1977                                 n = long(prefix)
1978                                 if n <= 0:
1979                                         self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
1980                                         return
1981                                 elif n > self._max_youtube_results:
1982                                         self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)'  % (self._max_youtube_results, n))
1983                                         n = self._max_youtube_results
1984                                 self._download_n_results(query, n)
1985                                 return
1986                         except ValueError: # parsing prefix as integer fails
1987                                 self._download_n_results(query, 1)
1988                                 return
1989
1990         def _download_n_results(self, query, n):
1991                 """Downloads a specified number of results for a query"""
1992
1993                 video_ids = []
1994                 already_seen = set()
1995                 pagenum = 1
1996
1997                 while True:
1998                         self.report_download_page(query, pagenum)
1999                         result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
2000                         request = urllib2.Request(result_url)
2001                         try:
2002                                 page = urllib2.urlopen(request).read()
2003                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2004                                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
2005                                 return
2006
2007                         # Extract video identifiers
2008                         for mobj in re.finditer(self._VIDEO_INDICATOR, page):
2009                                 video_id = page[mobj.span()[0]:mobj.span()[1]].split('=')[2][:-1]
2010                                 if video_id not in already_seen:
2011                                         video_ids.append(video_id)
2012                                         already_seen.add(video_id)
2013                                         if len(video_ids) == n:
2014                                                 # Specified n videos reached
2015                                                 for id in video_ids:
2016                                                         self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
2017                                                 return
2018
2019                         if re.search(self._MORE_PAGES_INDICATOR, page) is None:
2020                                 for id in video_ids:
2021                                         self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
2022                                 return
2023
2024                         pagenum = pagenum + 1
2025
2026 class GoogleSearchIE(InfoExtractor):
2027         """Information Extractor for Google Video search queries."""
2028         _VALID_QUERY = r'gvsearch(\d+|all)?:[\s\S]+'
2029         _TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en'
2030         _VIDEO_INDICATOR = r'videoplay\?docid=([^\&>]+)\&'
2031         _MORE_PAGES_INDICATOR = r'<span>Next</span>'
2032         _google_ie = None
2033         _max_google_results = 1000
2034
2035         def __init__(self, google_ie, downloader=None):
2036                 InfoExtractor.__init__(self, downloader)
2037                 self._google_ie = google_ie
2038
2039         @staticmethod
2040         def suitable(url):
2041                 return (re.match(GoogleSearchIE._VALID_QUERY, url) is not None)
2042
2043         def report_download_page(self, query, pagenum):
2044                 """Report attempt to download playlist page with given number."""
2045                 query = query.decode(preferredencoding())
2046                 self._downloader.to_screen(u'[video.google] query "%s": Downloading page %s' % (query, pagenum))
2047
2048         def _real_initialize(self):
2049                 self._google_ie.initialize()
2050
2051         def _real_extract(self, query):
2052                 mobj = re.match(self._VALID_QUERY, query)
2053                 if mobj is None:
2054                         self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
2055                         return
2056
2057                 prefix, query = query.split(':')
2058                 prefix = prefix[8:]
2059                 query  = query.encode('utf-8')
2060                 if prefix == '':
2061                         self._download_n_results(query, 1)
2062                         return
2063                 elif prefix == 'all':
2064                         self._download_n_results(query, self._max_google_results)
2065                         return
2066                 else:
2067                         try:
2068                                 n = long(prefix)
2069                                 if n <= 0:
2070                                         self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
2071                                         return
2072                                 elif n > self._max_google_results:
2073                                         self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)'  % (self._max_google_results, n))
2074                                         n = self._max_google_results
2075                                 self._download_n_results(query, n)
2076                                 return
2077                         except ValueError: # parsing prefix as integer fails
2078                                 self._download_n_results(query, 1)
2079                                 return
2080
2081         def _download_n_results(self, query, n):
2082                 """Downloads a specified number of results for a query"""
2083
2084                 video_ids = []
2085                 already_seen = set()
2086                 pagenum = 1
2087
2088                 while True:
2089                         self.report_download_page(query, pagenum)
2090                         result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
2091                         request = urllib2.Request(result_url)
2092                         try:
2093                                 page = urllib2.urlopen(request).read()
2094                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2095                                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
2096                                 return
2097
2098                         # Extract video identifiers
2099                         for mobj in re.finditer(self._VIDEO_INDICATOR, page):
2100                                 video_id = mobj.group(1)
2101                                 if video_id not in already_seen:
2102                                         video_ids.append(video_id)
2103                                         already_seen.add(video_id)
2104                                         if len(video_ids) == n:
2105                                                 # Specified n videos reached
2106                                                 for id in video_ids:
2107                                                         self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id)
2108                                                 return
2109
2110                         if re.search(self._MORE_PAGES_INDICATOR, page) is None:
2111                                 for id in video_ids:
2112                                         self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id)
2113                                 return
2114
2115                         pagenum = pagenum + 1
2116
2117 class YahooSearchIE(InfoExtractor):
2118         """Information Extractor for Yahoo! Video search queries."""
2119         _VALID_QUERY = r'yvsearch(\d+|all)?:[\s\S]+'
2120         _TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s'
2121         _VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"'
2122         _MORE_PAGES_INDICATOR = r'\s*Next'
2123         _yahoo_ie = None
2124         _max_yahoo_results = 1000
2125
2126         def __init__(self, yahoo_ie, downloader=None):
2127                 InfoExtractor.__init__(self, downloader)
2128                 self._yahoo_ie = yahoo_ie
2129
2130         @staticmethod
2131         def suitable(url):
2132                 return (re.match(YahooSearchIE._VALID_QUERY, url) is not None)
2133
2134         def report_download_page(self, query, pagenum):
2135                 """Report attempt to download playlist page with given number."""
2136                 query = query.decode(preferredencoding())
2137                 self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum))
2138
2139         def _real_initialize(self):
2140                 self._yahoo_ie.initialize()
2141
2142         def _real_extract(self, query):
2143                 mobj = re.match(self._VALID_QUERY, query)
2144                 if mobj is None:
2145                         self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
2146                         return
2147
2148                 prefix, query = query.split(':')
2149                 prefix = prefix[8:]
2150                 query  = query.encode('utf-8')
2151                 if prefix == '':
2152                         self._download_n_results(query, 1)
2153                         return
2154                 elif prefix == 'all':
2155                         self._download_n_results(query, self._max_yahoo_results)
2156                         return
2157                 else:
2158                         try:
2159                                 n = long(prefix)
2160                                 if n <= 0:
2161                                         self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
2162                                         return
2163                                 elif n > self._max_yahoo_results:
2164                                         self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)'  % (self._max_yahoo_results, n))
2165                                         n = self._max_yahoo_results
2166                                 self._download_n_results(query, n)
2167                                 return
2168                         except ValueError: # parsing prefix as integer fails
2169                                 self._download_n_results(query, 1)
2170                                 return
2171
2172         def _download_n_results(self, query, n):
2173                 """Downloads a specified number of results for a query"""
2174
2175                 video_ids = []
2176                 already_seen = set()
2177                 pagenum = 1
2178
2179                 while True:
2180                         self.report_download_page(query, pagenum)
2181                         result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
2182                         request = urllib2.Request(result_url)
2183                         try:
2184                                 page = urllib2.urlopen(request).read()
2185                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2186                                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
2187                                 return
2188
2189                         # Extract video identifiers
2190                         for mobj in re.finditer(self._VIDEO_INDICATOR, page):
2191                                 video_id = mobj.group(1)
2192                                 if video_id not in already_seen:
2193                                         video_ids.append(video_id)
2194                                         already_seen.add(video_id)
2195                                         if len(video_ids) == n:
2196                                                 # Specified n videos reached
2197                                                 for id in video_ids:
2198                                                         self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id)
2199                                                 return
2200
2201                         if re.search(self._MORE_PAGES_INDICATOR, page) is None:
2202                                 for id in video_ids:
2203                                         self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id)
2204                                 return
2205
2206                         pagenum = pagenum + 1
2207
2208 class YoutubePlaylistIE(InfoExtractor):
2209         """Information Extractor for YouTube playlists."""
2210
2211         _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:(?:view_play_list|my_playlists|artist)\?.*?(p|a)=|user/.*?/user/|p/)([^&]+).*'
2212         _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
2213         _VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
2214         _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
2215         _youtube_ie = None
2216
2217         def __init__(self, youtube_ie, downloader=None):
2218                 InfoExtractor.__init__(self, downloader)
2219                 self._youtube_ie = youtube_ie
2220
2221         @staticmethod
2222         def suitable(url):
2223                 return (re.match(YoutubePlaylistIE._VALID_URL, url) is not None)
2224
2225         def report_download_page(self, playlist_id, pagenum):
2226                 """Report attempt to download playlist page with given number."""
2227                 self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
2228
2229         def _real_initialize(self):
2230                 self._youtube_ie.initialize()
2231
2232         def _real_extract(self, url):
2233                 # Extract playlist id
2234                 mobj = re.match(self._VALID_URL, url)
2235                 if mobj is None:
2236                         self._downloader.trouble(u'ERROR: invalid url: %s' % url)
2237                         return
2238
2239                 # Download playlist pages
2240                 # prefix is 'p' as default for playlists but there are other types that need extra care
2241                 playlist_prefix = mobj.group(1)
2242                 if playlist_prefix == 'a':
2243                         playlist_access = 'artist'
2244                 else:
2245                         playlist_access = 'view_play_list'
2246                 playlist_id = mobj.group(2)
2247                 video_ids = []
2248                 pagenum = 1
2249
2250                 while True:
2251                         self.report_download_page(playlist_id, pagenum)
2252                         request = urllib2.Request(self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum))
2253                         try:
2254                                 page = urllib2.urlopen(request).read()
2255                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2256                                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
2257                                 return
2258
2259                         # Extract video identifiers
2260                         ids_in_page = []
2261                         for mobj in re.finditer(self._VIDEO_INDICATOR, page):
2262                                 if mobj.group(1) not in ids_in_page:
2263                                         ids_in_page.append(mobj.group(1))
2264                         video_ids.extend(ids_in_page)
2265
2266                         if re.search(self._MORE_PAGES_INDICATOR, page) is None:
2267                                 break
2268                         pagenum = pagenum + 1
2269
2270                 playliststart = self._downloader.params.get('playliststart', 1) - 1
2271                 playlistend = self._downloader.params.get('playlistend', -1)
2272                 video_ids = video_ids[playliststart:playlistend]
2273
2274                 for id in video_ids:
2275                         self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
2276                 return
2277
2278 class YoutubeUserIE(InfoExtractor):
2279         """Information Extractor for YouTube users."""
2280
2281         _VALID_URL = r'(?:(?:(?:http://)?(?:\w+\.)?youtube.com/user/)|ytuser:)([A-Za-z0-9_-]+)'
2282         _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
2283         _GDATA_PAGE_SIZE = 50
2284         _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
2285         _VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
2286         _youtube_ie = None
2287
2288         def __init__(self, youtube_ie, downloader=None):
2289                 InfoExtractor.__init__(self, downloader)
2290                 self._youtube_ie = youtube_ie
2291
2292         @staticmethod
2293         def suitable(url):
2294                 return (re.match(YoutubeUserIE._VALID_URL, url) is not None)
2295
2296         def report_download_page(self, username, start_index):
2297                 """Report attempt to download user page."""
2298                 self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' %
2299                                            (username, start_index, start_index + self._GDATA_PAGE_SIZE))
2300
2301         def _real_initialize(self):
2302                 self._youtube_ie.initialize()
2303
2304         def _real_extract(self, url):
2305                 # Extract username
2306                 mobj = re.match(self._VALID_URL, url)
2307                 if mobj is None:
2308                         self._downloader.trouble(u'ERROR: invalid url: %s' % url)
2309                         return
2310
2311                 username = mobj.group(1)
2312
2313                 # Download video ids using YouTube Data API. Result size per
2314                 # query is limited (currently to 50 videos) so we need to query
2315                 # page by page until there are no video ids - it means we got
2316                 # all of them.
2317
2318                 video_ids = []
2319                 pagenum = 0
2320
2321                 while True:
2322                         start_index = pagenum * self._GDATA_PAGE_SIZE + 1
2323                         self.report_download_page(username, start_index)
2324
2325                         request = urllib2.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index))
2326
2327                         try:
2328                                 page = urllib2.urlopen(request).read()
2329                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2330                                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
2331                                 return
2332
2333                         # Extract video identifiers
2334                         ids_in_page = []
2335
2336                         for mobj in re.finditer(self._VIDEO_INDICATOR, page):
2337                                 if mobj.group(1) not in ids_in_page:
2338                                         ids_in_page.append(mobj.group(1))
2339
2340                         video_ids.extend(ids_in_page)
2341
2342                         # A little optimization - if current page is not
2343                         # "full", ie. does not contain PAGE_SIZE video ids then
2344                         # we can assume that this page is the last one - there
2345                         # are no more ids on further pages - no need to query
2346                         # again.
2347
2348                         if len(ids_in_page) < self._GDATA_PAGE_SIZE:
2349                                 break
2350
2351                         pagenum += 1
2352
2353                 all_ids_count = len(video_ids)
2354                 playliststart = self._downloader.params.get('playliststart', 1) - 1
2355                 playlistend = self._downloader.params.get('playlistend', -1)
2356
2357                 if playlistend == -1:
2358                         video_ids = video_ids[playliststart:]
2359                 else:
2360                         video_ids = video_ids[playliststart:playlistend]
2361                         
2362                 self._downloader.to_screen("[youtube] user %s: Collected %d video ids (downloading %d of them)" %
2363                                            (username, all_ids_count, len(video_ids)))
2364
2365                 for video_id in video_ids:
2366                         self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % video_id)
2367
2368
2369 class DepositFilesIE(InfoExtractor):
2370         """Information extractor for depositfiles.com"""
2371
2372         _VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles.com/(?:../(?#locale))?files/(.+)'
2373
2374         def __init__(self, downloader=None):
2375                 InfoExtractor.__init__(self, downloader)
2376
2377         @staticmethod
2378         def suitable(url):
2379                 return (re.match(DepositFilesIE._VALID_URL, url) is not None)
2380
2381         def report_download_webpage(self, file_id):
2382                 """Report webpage download."""
2383                 self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id)
2384
2385         def report_extraction(self, file_id):
2386                 """Report information extraction."""
2387                 self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id)
2388
2389         def _real_initialize(self):
2390                 return
2391
2392         def _real_extract(self, url):
2393                 # At this point we have a new file
2394                 self._downloader.increment_downloads()
2395
2396                 file_id = url.split('/')[-1]
2397                 # Rebuild url in english locale
2398                 url = 'http://depositfiles.com/en/files/' + file_id
2399
2400                 # Retrieve file webpage with 'Free download' button pressed
2401                 free_download_indication = { 'gateway_result' : '1' }
2402                 request = urllib2.Request(url, urllib.urlencode(free_download_indication))
2403                 try:
2404                         self.report_download_webpage(file_id)
2405                         webpage = urllib2.urlopen(request).read()
2406                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2407                         self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % str(err))
2408                         return
2409
2410                 # Search for the real file URL
2411                 mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage)
2412                 if (mobj is None) or (mobj.group(1) is None):
2413                         # Try to figure out reason of the error.
2414                         mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
2415                         if (mobj is not None) and (mobj.group(1) is not None):
2416                                 restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
2417                                 self._downloader.trouble(u'ERROR: %s' % restriction_message)
2418                         else:
2419                                 self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url)
2420                         return
2421
2422                 file_url = mobj.group(1)
2423                 file_extension = os.path.splitext(file_url)[1][1:]
2424
2425                 # Search for file title
2426                 mobj = re.search(r'<b title="(.*?)">', webpage)
2427                 if mobj is None:
2428                         self._downloader.trouble(u'ERROR: unable to extract title')
2429                         return
2430                 file_title = mobj.group(1).decode('utf-8')
2431
2432                 try:
2433                         # Process file information
2434                         self._downloader.process_info({
2435                                 'id':           file_id.decode('utf-8'),
2436                                 'url':          file_url.decode('utf-8'),
2437                                 'uploader':     u'NA',
2438                                 'upload_date':  u'NA',
2439                                 'title':        file_title,
2440                                 'stitle':       file_title,
2441                                 'ext':          file_extension.decode('utf-8'),
2442                                 'format':       u'NA',
2443                                 'player_url':   None,
2444                         })
2445                 except UnavailableVideoError, err:
2446                         self._downloader.trouble(u'ERROR: unable to download file')
2447
2448 class PostProcessor(object):
2449         """Post Processor class.
2450
2451         PostProcessor objects can be added to downloaders with their
2452         add_post_processor() method. When the downloader has finished a
2453         successful download, it will take its internal chain of PostProcessors
2454         and start calling the run() method on each one of them, first with
2455         an initial argument and then with the returned value of the previous
2456         PostProcessor.
2457
2458         The chain will be stopped if one of them ever returns None or the end
2459         of the chain is reached.
2460
2461         PostProcessor objects follow a "mutual registration" process similar
2462         to InfoExtractor objects.
2463         """
2464
2465         _downloader = None
2466
2467         def __init__(self, downloader=None):
2468                 self._downloader = downloader
2469
2470         def set_downloader(self, downloader):
2471                 """Sets the downloader for this PP."""
2472                 self._downloader = downloader
2473
2474         def run(self, information):
2475                 """Run the PostProcessor.
2476
2477                 The "information" argument is a dictionary like the ones
2478                 composed by InfoExtractors. The only difference is that this
2479                 one has an extra field called "filepath" that points to the
2480                 downloaded file.
2481
2482                 When this method returns None, the postprocessing chain is
2483                 stopped. However, this method may return an information
2484                 dictionary that will be passed to the next postprocessing
2485                 object in the chain. It can be the one it received after
2486                 changing some fields.
2487
2488                 In addition, this method may raise a PostProcessingError
2489                 exception that will be taken into account by the downloader
2490                 it was called from.
2491                 """
2492                 return information # by default, do nothing
2493
2494 ### MAIN PROGRAM ###
2495 if __name__ == '__main__':
2496         try:
2497                 # Modules needed only when running the main program
2498                 import getpass
2499                 import optparse
2500
2501                 # Function to update the program file with the latest version from the repository.
2502                 def update_self(downloader, filename):
2503                         # Note: downloader only used for options
2504                         if not os.access(filename, os.W_OK):
2505                                 sys.exit('ERROR: no write permissions on %s' % filename)
2506
2507                         downloader.to_screen('Updating to latest stable version...')
2508                         try:
2509                                 latest_url = 'http://github.com/rg3/youtube-dl/raw/master/LATEST_VERSION'
2510                                 latest_version = urllib.urlopen(latest_url).read().strip()
2511                                 prog_url = 'http://github.com/rg3/youtube-dl/raw/%s/youtube-dl' % latest_version
2512                                 newcontent = urllib.urlopen(prog_url).read()
2513                         except (IOError, OSError), err:
2514                                 sys.exit('ERROR: unable to download latest version')
2515                         try:
2516                                 stream = open(filename, 'w')
2517                                 stream.write(newcontent)
2518                                 stream.close()
2519                         except (IOError, OSError), err:
2520                                 sys.exit('ERROR: unable to overwrite current version')
2521                         downloader.to_screen('Updated to version %s' % latest_version)
2522
2523                 # Parse command line
2524                 parser = optparse.OptionParser(
2525                         usage='Usage: %prog [options] url...',
2526                         version='2011.01.30',
2527                         conflict_handler='resolve',
2528                 )
2529
2530                 parser.add_option('-h', '--help',
2531                                 action='help', help='print this help text and exit')
2532                 parser.add_option('-v', '--version',
2533                                 action='version', help='print program version and exit')
2534                 parser.add_option('-U', '--update',
2535                                 action='store_true', dest='update_self', help='update this program to latest stable version')
2536                 parser.add_option('-i', '--ignore-errors',
2537                                 action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
2538                 parser.add_option('-r', '--rate-limit',
2539                                 dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
2540                 parser.add_option('-R', '--retries',
2541                                 dest='retries', metavar='RETRIES', help='number of retries (default is 10)', default=10)
2542                 parser.add_option('--playlist-start',
2543                                 dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is 1)', default=1)
2544                 parser.add_option('--playlist-end',
2545                                 dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1)
2546                 parser.add_option('--dump-user-agent',
2547                                 action='store_true', dest='dump_user_agent',
2548                                 help='display the current browser identification', default=False)
2549
2550                 authentication = optparse.OptionGroup(parser, 'Authentication Options')
2551                 authentication.add_option('-u', '--username',
2552                                 dest='username', metavar='USERNAME', help='account username')
2553                 authentication.add_option('-p', '--password',
2554                                 dest='password', metavar='PASSWORD', help='account password')
2555                 authentication.add_option('-n', '--netrc',
2556                                 action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
2557                 parser.add_option_group(authentication)
2558
2559                 video_format = optparse.OptionGroup(parser, 'Video Format Options')
2560                 video_format.add_option('-f', '--format',
2561                                 action='store', dest='format', metavar='FORMAT', help='video format code')
2562                 video_format.add_option('--all-formats',
2563                                 action='store_const', dest='format', help='download all available video formats', const='-1')
2564                 video_format.add_option('--max-quality',
2565                                 action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
2566                 parser.add_option_group(video_format)
2567
2568                 verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
2569                 verbosity.add_option('-q', '--quiet',
2570                                 action='store_true', dest='quiet', help='activates quiet mode', default=False)
2571                 verbosity.add_option('-s', '--simulate',
2572                                 action='store_true', dest='simulate', help='do not download video', default=False)
2573                 verbosity.add_option('-g', '--get-url',
2574                                 action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
2575                 verbosity.add_option('-e', '--get-title',
2576                                 action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
2577                 verbosity.add_option('--get-thumbnail',
2578                                 action='store_true', dest='getthumbnail',
2579                                 help='simulate, quiet but print thumbnail URL', default=False)
2580                 verbosity.add_option('--get-description',
2581                                 action='store_true', dest='getdescription',
2582                                 help='simulate, quiet but print video description', default=False)
2583                 verbosity.add_option('--get-filename',
2584                                 action='store_true', dest='getfilename',
2585                                 help='simulate, quiet but print output filename', default=False)
2586                 verbosity.add_option('--no-progress',
2587                                 action='store_true', dest='noprogress', help='do not print progress bar', default=False)
2588                 verbosity.add_option('--console-title',
2589                                 action='store_true', dest='consoletitle',
2590                                 help='display progress in console titlebar', default=False)
2591                 parser.add_option_group(verbosity)
2592
2593                 filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
2594                 filesystem.add_option('-t', '--title',
2595                                 action='store_true', dest='usetitle', help='use title in file name', default=False)
2596                 filesystem.add_option('-l', '--literal',
2597                                 action='store_true', dest='useliteral', help='use literal title in file name', default=False)
2598                 filesystem.add_option('-A', '--auto-number',
2599                                 action='store_true', dest='autonumber',
2600                                 help='number downloaded files starting from 00000', default=False)
2601                 filesystem.add_option('-o', '--output',
2602                                 dest='outtmpl', metavar='TEMPLATE', help='output filename template')
2603                 filesystem.add_option('-a', '--batch-file',
2604                                 dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
2605                 filesystem.add_option('-w', '--no-overwrites',
2606                                 action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
2607                 filesystem.add_option('-c', '--continue',
2608                                 action='store_true', dest='continue_dl', help='resume partially downloaded files', default=False)
2609                 filesystem.add_option('--cookies',
2610                                 dest='cookiefile', metavar='FILE', help='file to dump cookie jar to')
2611                 filesystem.add_option('--no-part',
2612                                 action='store_true', dest='nopart', help='do not use .part files', default=False)
2613                 filesystem.add_option('--no-mtime',
2614                                 action='store_false', dest='updatetime',
2615                                 help='do not use the Last-modified header to set the file modification time', default=True)
2616                 parser.add_option_group(filesystem)
2617
2618                 (opts, args) = parser.parse_args()
2619
2620                 # Open appropriate CookieJar
2621                 if opts.cookiefile is None:
2622                         jar = cookielib.CookieJar()
2623                 else:
2624                         try:
2625                                 jar = cookielib.MozillaCookieJar(opts.cookiefile)
2626                                 if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK):
2627                                         jar.load()
2628                         except (IOError, OSError), err:
2629                                 sys.exit(u'ERROR: unable to open cookie file')
2630
2631                 # Dump user agent
2632                 if opts.dump_user_agent:
2633                         print std_headers['User-Agent']
2634                         sys.exit(0)
2635
2636                 # General configuration
2637                 cookie_processor = urllib2.HTTPCookieProcessor(jar)
2638                 urllib2.install_opener(urllib2.build_opener(urllib2.ProxyHandler(), cookie_processor, YoutubeDLHandler()))
2639                 socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
2640
2641                 # Batch file verification
2642                 batchurls = []
2643                 if opts.batchfile is not None:
2644                         try:
2645                                 if opts.batchfile == '-':
2646                                         batchfd = sys.stdin
2647                                 else:
2648                                         batchfd = open(opts.batchfile, 'r')
2649                                 batchurls = batchfd.readlines()
2650                                 batchurls = [x.strip() for x in batchurls]
2651                                 batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)]
2652                         except IOError:
2653                                 sys.exit(u'ERROR: batch file could not be read')
2654                 all_urls = batchurls + args
2655
2656                 # Conflicting, missing and erroneous options
2657                 if opts.usenetrc and (opts.username is not None or opts.password is not None):
2658                         parser.error(u'using .netrc conflicts with giving username/password')
2659                 if opts.password is not None and opts.username is None:
2660                         parser.error(u'account username missing')
2661                 if opts.outtmpl is not None and (opts.useliteral or opts.usetitle or opts.autonumber):
2662                         parser.error(u'using output template conflicts with using title, literal title or auto number')
2663                 if opts.usetitle and opts.useliteral:
2664                         parser.error(u'using title conflicts with using literal title')
2665                 if opts.username is not None and opts.password is None:
2666                         opts.password = getpass.getpass(u'Type account password and press return:')
2667                 if opts.ratelimit is not None:
2668                         numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
2669                         if numeric_limit is None:
2670                                 parser.error(u'invalid rate limit specified')
2671                         opts.ratelimit = numeric_limit
2672                 if opts.retries is not None:
2673                         try:
2674                                 opts.retries = long(opts.retries)
2675                         except (TypeError, ValueError), err:
2676                                 parser.error(u'invalid retry count specified')
2677                 try:
2678                         opts.playliststart = long(opts.playliststart)
2679                         if opts.playliststart <= 0:
2680                                 raise ValueError
2681                 except (TypeError, ValueError), err:
2682                         parser.error(u'invalid playlist start number specified')
2683                 try:
2684                         opts.playlistend = long(opts.playlistend)
2685                         if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart):
2686                                 raise ValueError
2687                 except (TypeError, ValueError), err:
2688                         parser.error(u'invalid playlist end number specified')
2689
2690                 # Information extractors
2691                 vimeo_ie = VimeoIE()
2692                 youtube_ie = YoutubeIE()
2693                 metacafe_ie = MetacafeIE(youtube_ie)
2694                 dailymotion_ie = DailymotionIE()
2695                 youtube_pl_ie = YoutubePlaylistIE(youtube_ie)
2696                 youtube_user_ie = YoutubeUserIE(youtube_ie)
2697                 youtube_search_ie = YoutubeSearchIE(youtube_ie)
2698                 google_ie = GoogleIE()
2699                 google_search_ie = GoogleSearchIE(google_ie)
2700                 photobucket_ie = PhotobucketIE()
2701                 yahoo_ie = YahooIE()
2702                 yahoo_search_ie = YahooSearchIE(yahoo_ie)
2703                 deposit_files_ie = DepositFilesIE()
2704                 generic_ie = GenericIE()
2705
2706                 # File downloader
2707                 fd = FileDownloader({
2708                         'usenetrc': opts.usenetrc,
2709                         'username': opts.username,
2710                         'password': opts.password,
2711                         'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename),
2712                         'forceurl': opts.geturl,
2713                         'forcetitle': opts.gettitle,
2714                         'forcethumbnail': opts.getthumbnail,
2715                         'forcedescription': opts.getdescription,
2716                         'forcefilename': opts.getfilename,
2717                         'simulate': (opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename),
2718                         'format': opts.format,
2719                         'format_limit': opts.format_limit,
2720                         'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding()))
2721                                 or (opts.format == '-1' and opts.usetitle and u'%(stitle)s-%(id)s-%(format)s.%(ext)s')
2722                                 or (opts.format == '-1' and opts.useliteral and u'%(title)s-%(id)s-%(format)s.%(ext)s')
2723                                 or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
2724                                 or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(stitle)s-%(id)s.%(ext)s')
2725                                 or (opts.useliteral and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
2726                                 or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s')
2727                                 or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s')
2728                                 or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
2729                                 or u'%(id)s.%(ext)s'),
2730                         'ignoreerrors': opts.ignoreerrors,
2731                         'ratelimit': opts.ratelimit,
2732                         'nooverwrites': opts.nooverwrites,
2733                         'retries': opts.retries,
2734                         'continuedl': opts.continue_dl,
2735                         'noprogress': opts.noprogress,
2736                         'playliststart': opts.playliststart,
2737                         'playlistend': opts.playlistend,
2738                         'logtostderr': opts.outtmpl == '-',
2739                         'consoletitle': opts.consoletitle,
2740                         'nopart': opts.nopart,
2741                         'updatetime': opts.updatetime,
2742                         })
2743                 fd.add_info_extractor(vimeo_ie)
2744                 fd.add_info_extractor(youtube_search_ie)
2745                 fd.add_info_extractor(youtube_pl_ie)
2746                 fd.add_info_extractor(youtube_user_ie)
2747                 fd.add_info_extractor(metacafe_ie)
2748                 fd.add_info_extractor(dailymotion_ie)
2749                 fd.add_info_extractor(youtube_ie)
2750                 fd.add_info_extractor(google_ie)
2751                 fd.add_info_extractor(google_search_ie)
2752                 fd.add_info_extractor(photobucket_ie)
2753                 fd.add_info_extractor(yahoo_ie)
2754                 fd.add_info_extractor(yahoo_search_ie)
2755                 fd.add_info_extractor(deposit_files_ie)
2756
2757                 # This must come last since it's the
2758                 # fallback if none of the others work
2759                 fd.add_info_extractor(generic_ie)
2760
2761                 # Update version
2762                 if opts.update_self:
2763                         update_self(fd, sys.argv[0])
2764
2765                 # Maybe do nothing
2766                 if len(all_urls) < 1:
2767                         if not opts.update_self:
2768                                 parser.error(u'you must provide at least one URL')
2769                         else:
2770                                 sys.exit()
2771                 retcode = fd.download(all_urls)
2772
2773                 # Dump cookie jar if requested
2774                 if opts.cookiefile is not None:
2775                         try:
2776                                 jar.save()
2777                         except (IOError, OSError), err:
2778                                 sys.exit(u'ERROR: unable to save cookie jar')
2779
2780                 sys.exit(retcode)
2781
2782         except DownloadError:
2783                 sys.exit(1)
2784         except SameFileError:
2785                 sys.exit(u'ERROR: fixed output name but more than one file to download')
2786         except KeyboardInterrupt:
2787                 sys.exit(u'\nERROR: Interrupted by user')