merge upstream
[youtube-dl] / youtube-dl
index 762bfc3ef55deb645181ef65535dec1e513c8a5a..67e1a0ffd75c7f2be68f9b92f63a987bf6c4ccf4 100755 (executable)
@@ -9,12 +9,8 @@
 # Author: Gergely Imreh
 # Author: Philipp Hagemeister <phihag@phihag.de>
 # License: Public domain code
-from __future__ import with_statement
-import contextlib
 import cookielib
-import ctypes
 import datetime
-import email.utils
 import gzip
 import htmlentitydefs
 import httplib
@@ -34,6 +30,13 @@ import urllib2
 import warnings
 import zlib
 
+if os.name == 'nt':
+       import ctypes
+
+try:
+       import email.utils
+except ImportError: # Python 2.4
+       import email.Utils
 try:
        import cStringIO as StringIO
 except ImportError:
@@ -47,11 +50,11 @@ except ImportError:
 
 try:
        import lxml.etree
-except ImportError: # Python < 2.6
+except ImportError:
        pass # Handled below
 
 std_headers = {
-       'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:2.0b11) Gecko/20100101 Firefox/4.0b11',
+       'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:5.0.1) Gecko/20100101 Firefox/5.0.1',
        'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate',
@@ -62,7 +65,7 @@ simple_title_chars = string.ascii_letters.decode('ascii') + string.digits.decode
 
 try:
        import json
-except ImportError: # Python <2.5, use trivialjson (https://github.com/phihag/trivialjson):
+except ImportError: # Python <2.6, use trivialjson (https://github.com/phihag/trivialjson):
        import re
        class json(object):
                @staticmethod
@@ -707,8 +710,11 @@ class FileDownloader(object):
                        try:
                                descfn = filename + '.description'
                                self.report_writedescription(descfn)
-                               with contextlib.closing(open(descfn, 'wb')) as descfile:
+                               descfile = open(descfn, 'wb')
+                               try:
                                        descfile.write(info_dict['description'].encode('utf-8'))
+                               finally:
+                                       descfile.close()
                        except (OSError, IOError):
                                self.trouble(u'ERROR: Cannot write description file: %s' % str(descfn))
                                return
@@ -722,8 +728,11 @@ class FileDownloader(object):
                                self.trouble(u'ERROR: No JSON encoder found. Update to Python 2.6+, setup a json module, or leave out --write-info-json.')
                                return
                        try:
-                               with contextlib.closing(open(infofn, 'wb')) as infof:
+                               infof = open(infofn, 'wb')
+                               try:
                                        json.dump(info_dict, infof)
+                               finally:
+                                       infof.close()
                        except (OSError, IOError):
                                self.trouble(u'ERROR: Cannot write metadata to JSON file: %s' % str(infofn))
                                return
@@ -1019,7 +1028,7 @@ class InfoExtractor(object):
 class YoutubeIE(InfoExtractor):
        """Information extractor for youtube.com."""
 
-       _VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)(?:(?:(?:v|embed|e)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=)))?([0-9A-Za-z_-]+)(?(1).+)?$'
+       _VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)(?:(?:(?:v|embed|e)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))?)?([0-9A-Za-z_-]+)(?(1).+)?$'
        _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
        _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
        _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
@@ -1233,7 +1242,6 @@ class YoutubeIE(InfoExtractor):
                except NameError:
                        video_description = u'No description available.'
                        if self._downloader.params.get('forcedescription', False) or self._downloader.params.get('writedescription', False):
-                               warnings.warn(u'You are using an old Python version, install Python 2.6+ or lxml. Falling back to old video description extractor.')
                                mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', video_webpage)
                                if mobj is not None:
                                        video_description = mobj.group(1).decode('utf-8')
@@ -1241,6 +1249,7 @@ class YoutubeIE(InfoExtractor):
                        html_parser = lxml.etree.HTMLParser(encoding='utf-8')
                        vwebpage_doc = lxml.etree.parse(StringIO.StringIO(video_webpage), html_parser)
                        video_description = u''.join(vwebpage_doc.xpath('id("eow-description")//text()'))
+                       # TODO use another parser
 
                # token
                video_token = urllib.unquote_plus(video_info['token'][0])
@@ -1248,8 +1257,15 @@ class YoutubeIE(InfoExtractor):
                # Decide which formats to download
                req_format = self._downloader.params.get('format', None)
 
-               if 'fmt_url_map' in video_info and len(video_info['fmt_url_map']) >= 1 and ',' in video_info['fmt_url_map'][0]:
-                       url_map = dict(tuple(pair.split('|')) for pair in video_info['fmt_url_map'][0].split(','))
+               if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
+                       self.report_rtmp_download()
+                       video_url_list = [(None, video_info['conn'][0])]
+               elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
+                       url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')
+                       url_data = [parse_qs(uds) for uds in url_data_strs]
+                       url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data)
+                       url_map = dict((ud['itag'][0], ud['url'][0]) for ud in url_data)
+
                        format_limit = self._downloader.params.get('format_limit', None)
                        if format_limit is not None and format_limit in self._available_formats:
                                format_list = self._available_formats[self._available_formats.index(format_limit):]
@@ -1269,13 +1285,8 @@ class YoutubeIE(InfoExtractor):
                                        self._downloader.trouble(u'ERROR: requested format not available')
                                        return
                                video_url_list = [(req_format, url_map[req_format])] # Specific format
-
-               elif 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
-                       self.report_rtmp_download()
-                       video_url_list = [(None, video_info['conn'][0])]
-
                else:
-                       self._downloader.trouble(u'ERROR: no fmt_url_map or conn information found in video info')
+                       self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info')
                        return
 
                for format_param, video_real_url in video_url_list:
@@ -1285,7 +1296,6 @@ class YoutubeIE(InfoExtractor):
                        # Extension
                        video_extension = self._video_extensions.get(format_param, 'flv')
 
-                       # Find the video URL in fmt_url_map or conn paramters
                        try:
                                # Process video information
                                self._downloader.process_info({
@@ -2710,7 +2720,6 @@ class FacebookIE(InfoExtractor):
                        # Extension
                        video_extension = self._video_extensions.get(format_param, 'mp4')
 
-                       # Find the video URL in fmt_url_map or conn paramters
                        try:
                                # Process video information
                                self._downloader.process_info({
@@ -2732,7 +2741,7 @@ class FacebookIE(InfoExtractor):
 class BlipTVIE(InfoExtractor):
        """Information extractor for blip.tv"""
 
-       _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip.tv(/.+)$'
+       _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$'
        _URL_EXT = r'^.*\.([a-z0-9]+)$'
 
        @staticmethod
@@ -2754,7 +2763,11 @@ class BlipTVIE(InfoExtractor):
                        self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
                        return
 
-               json_url = url + ('&' if '?' in url else '?') + 'skin=json&version=2&no_wrap=1'
+               if '?' in url:
+                       cchar = '&'
+               else:
+                       cchar = '?'
+               json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
                request = urllib2.Request(json_url)
                self.report_extraction(mobj.group(1))
                try:
@@ -2764,7 +2777,10 @@ class BlipTVIE(InfoExtractor):
                        return
                try:
                        json_data = json.loads(json_code)
-                       data = json_data['Post'] if 'Post' in json_data else json_data
+                       if 'Post' in json_data:
+                               data = json_data['Post']
+                       else:
+                               data = json_data
 
                        upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
                        video_url = data['media']['url']
@@ -2958,7 +2974,7 @@ if __name__ == '__main__':
                # Parse command line
                parser = optparse.OptionParser(
                        usage='Usage: %prog [options] url...',
-                       version='2011.07.09-phihag',
+                       version='2011.08.04-phihag',
                        conflict_handler='resolve',
                )