Merge branch 'master' into vimeo
authorRogério Brito <rbrito@ime.usp.br>
Fri, 4 Feb 2011 05:51:16 +0000 (03:51 -0200)
committerRogério Brito <rbrito@ime.usp.br>
Fri, 4 Feb 2011 05:51:16 +0000 (03:51 -0200)
1  2 
youtube-dl

diff --combined youtube-dl
index e7459062df1ce0f14b33ea37e66867ffe9bdac42,dd875a38eabf84b1e6e952abcc7bebf20956e590..b96156be7f327ff2960a0e6903918683b56f0ae6
@@@ -5,6 -5,7 +5,7 @@@
  # Author: Benjamin Johnson
  # Author: Vasyl' Vavrychuk
  # Author: Witold Baryluk
+ # Author: Paweł Paprota
  # License: Public domain code
  import cookielib
  import ctypes
@@@ -36,7 -37,7 +37,7 @@@ except ImportError
        from cgi import parse_qs
  
  std_headers = {
-       'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.12) Gecko/20101028 Firefox/3.6.12',
+       'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:2.0b10) Gecko/20100101 Firefox/4.0b10',
        'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate',
@@@ -1718,118 -1719,6 +1719,118 @@@ class YahooIE(InfoExtractor)
                        self._downloader.trouble(u'\nERROR: unable to download video')
  
  
 +class VimeoIE(InfoExtractor):
 +      """Information extractor for vimeo.com."""
 +
 +      # _VALID_URL matches Vimeo URLs
 +      _VALID_URL = r'(?:http://)?vimeo\.com/([0-9]+)'
 +
 +      def __init__(self, downloader=None):
 +              InfoExtractor.__init__(self, downloader)
 +
 +      @staticmethod
 +      def suitable(url):
 +              return (re.match(VimeoIE._VALID_URL, url) is not None)
 +
 +      def report_download_webpage(self, video_id):
 +              """Report webpage download."""
 +              self._downloader.to_screen(u'[video.vimeo] %s: Downloading webpage' % video_id)
 +
 +      def report_extraction(self, video_id):
 +              """Report information extraction."""
 +              self._downloader.to_screen(u'[video.vimeo] %s: Extracting information' % video_id)
 +
 +      def _real_initialize(self):
 +              return
 +
 +      def _real_extract(self, url, new_video=True):
 +              # Extract ID from URL
 +              mobj = re.match(self._VALID_URL, url)
 +              if mobj is None:
 +                      self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
 +                      return
 +
 +              # At this point we have a new video
 +              self._downloader.increment_downloads()
 +              video_id = mobj.group(1)
 +              video_extension = 'flv' # FIXME
 +
 +              # Retrieve video webpage to extract further information
 +              request = urllib2.Request("http://vimeo.com/moogaloop/load/clip:%s" % video_id, None, std_headers)
 +              try:
 +                      self.report_download_webpage(video_id)
 +                      webpage = urllib2.urlopen(request).read()
 +              except (urllib2.URLError, httplib.HTTPException, socket.error), err:
 +                      self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
 +                      return
 +
 +              # Extract uploader and title from webpage
 +              self.report_extraction(video_id)
 +              mobj = re.search(r'<caption>(.*?)</caption>', webpage)
 +              if mobj is None:
 +                      self._downloader.trouble(u'ERROR: unable to extract video title')
 +                      return
 +              video_title = mobj.group(1).decode('utf-8')
 +              simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
 +
 +              mobj = re.search(r'<uploader_url>http://vimeo.com/(.*?)</uploader_url>', webpage)
 +              if mobj is None:
 +                      self._downloader.trouble(u'ERROR: unable to extract video uploader')
 +                      return
 +              video_uploader = mobj.group(1).decode('utf-8')
 +
 +              # Extract video thumbnail
 +              mobj = re.search(r'<thumbnail>(.*?)</thumbnail>', webpage)
 +              if mobj is None:
 +                      self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
 +                      return
 +              video_thumbnail = mobj.group(1).decode('utf-8')
 +
 +              # # Extract video description
 +              # mobj = re.search(r'<meta property="og:description" content="(.*)" />', webpage)
 +              # if mobj is None:
 +              #       self._downloader.trouble(u'ERROR: unable to extract video description')
 +              #       return
 +              # video_description = mobj.group(1).decode('utf-8')
 +              # if not video_description: video_description = 'No description available.'
 +              video_description = 'Foo.'
 +
 +              # Extract request signature
 +              mobj = re.search(r'<request_signature>(.*?)</request_signature>', webpage)
 +              if mobj is None:
 +                      self._downloader.trouble(u'ERROR: unable to extract request signature')
 +                      return
 +              sig = mobj.group(1).decode('utf-8')
 +
 +              # Extract request signature expiration
 +              mobj = re.search(r'<request_signature_expires>(.*?)</request_signature_expires>', webpage)
 +              if mobj is None:
 +                      self._downloader.trouble(u'ERROR: unable to extract request signature expiration')
 +                      return
 +              sig_exp = mobj.group(1).decode('utf-8')
 +
 +              video_url = "http://vimeo.com/moogaloop/play/clip:%s/%s/%s" % (video_id, sig, sig_exp)
 +
 +              try:
 +                      # Process video information
 +                      self._downloader.process_info({
 +                              'id':           video_id.decode('utf-8'),
 +                              'url':          video_url,
 +                              'uploader':     video_uploader,
 +                              'upload_date':  u'NA',
 +                              'title':        video_title,
 +                              'stitle':       simple_title,
 +                              'ext':          video_extension.decode('utf-8'),
 +                              'thumbnail':    video_thumbnail.decode('utf-8'),
 +                              'description':  video_description,
 +                              'thumbnail':    video_thumbnail,
 +                              'description':  video_description,
 +                              'player_url':   None,
 +                      })
 +              except UnavailableVideoError:
 +                      self._downloader.trouble(u'ERROR: unable to download video')
 +
 +
  class GenericIE(InfoExtractor):
        """Generic last-resort information extractor."""
  
@@@ -2207,8 -2096,8 +2208,8 @@@ class YahooSearchIE(InfoExtractor)
  class YoutubePlaylistIE(InfoExtractor):
        """Information Extractor for YouTube playlists."""
  
-       _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:(?:view_play_list|my_playlists)\?.*?p=|user/.*?/user/|p/)([^&]+).*'
-       _TEMPLATE_URL = 'http://www.youtube.com/view_play_list?p=%s&page=%s&gl=US&hl=en'
+       _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:(?:view_play_list|my_playlists|artist)\?.*?(p|a)=|user/.*?/user/|p/)([^&]+).*'
+       _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
        _VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
        _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
        _youtube_ie = None
                        return
  
                # Download playlist pages
-               playlist_id = mobj.group(1)
+               # prefix is 'p' as default for playlists but there are other types that need extra care
+               playlist_prefix = mobj.group(1)
+               if playlist_prefix == 'a':
+                       playlist_access = 'artist'
+               else:
+                       playlist_access = 'view_play_list'
+               playlist_id = mobj.group(2)
                video_ids = []
                pagenum = 1
  
                while True:
                        self.report_download_page(playlist_id, pagenum)
-                       request = urllib2.Request(self._TEMPLATE_URL % (playlist_id, pagenum))
+                       request = urllib2.Request(self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum))
                        try:
                                page = urllib2.urlopen(request).read()
                        except (urllib2.URLError, httplib.HTTPException, socket.error), err:
  class YoutubeUserIE(InfoExtractor):
        """Information Extractor for YouTube users."""
  
-       _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/user/(.*)'
+       _VALID_URL = r'(?:(?:(?:http://)?(?:\w+\.)?youtube.com/user/)|ytuser:)([A-Za-z0-9_-]+)'
        _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
-       _VIDEO_INDICATOR = r'http://gdata.youtube.com/feeds/api/videos/(.*)' # XXX Fix this.
+       _GDATA_PAGE_SIZE = 50
+       _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
+       _VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
        _youtube_ie = None
  
        def __init__(self, youtube_ie, downloader=None):
        def suitable(url):
                return (re.match(YoutubeUserIE._VALID_URL, url) is not None)
  
-       def report_download_page(self, username):
+       def report_download_page(self, username, start_index):
                """Report attempt to download user page."""
-               self._downloader.to_screen(u'[youtube] user %s: Downloading page ' % (username))
+               self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' %
+                                          (username, start_index, start_index + self._GDATA_PAGE_SIZE))
  
        def _real_initialize(self):
                self._youtube_ie.initialize()
                        self._downloader.trouble(u'ERROR: invalid url: %s' % url)
                        return
  
-               # Download user page
                username = mobj.group(1)
+               # Download video ids using YouTube Data API. Result size per
+               # query is limited (currently to 50 videos) so we need to query
+               # page by page until there are no video ids - it means we got
+               # all of them.
                video_ids = []
-               pagenum = 1
+               pagenum = 0
  
-               self.report_download_page(username)
-               request = urllib2.Request(self._TEMPLATE_URL % (username))
-               try:
-                       page = urllib2.urlopen(request).read()
-               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-                       self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
-                       return
+               while True:
+                       start_index = pagenum * self._GDATA_PAGE_SIZE + 1
+                       self.report_download_page(username, start_index)
+                       request = urllib2.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index))
+                       try:
+                               page = urllib2.urlopen(request).read()
+                       except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                               self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+                               return
+                       # Extract video identifiers
+                       ids_in_page = []
  
-               # Extract video identifiers
-               ids_in_page = []
+                       for mobj in re.finditer(self._VIDEO_INDICATOR, page):
+                               if mobj.group(1) not in ids_in_page:
+                                       ids_in_page.append(mobj.group(1))
  
-               for mobj in re.finditer(self._VIDEO_INDICATOR, page):
-                       if mobj.group(1) not in ids_in_page:
-                               ids_in_page.append(mobj.group(1))
-               video_ids.extend(ids_in_page)
+                       video_ids.extend(ids_in_page)
+                       # A little optimization - if current page is not
+                       # "full", ie. does not contain PAGE_SIZE video ids then
+                       # we can assume that this page is the last one - there
+                       # are no more ids on further pages - no need to query
+                       # again.
+                       if len(ids_in_page) < self._GDATA_PAGE_SIZE:
+                               break
  
+                       pagenum += 1
+               all_ids_count = len(video_ids)
                playliststart = self._downloader.params.get('playliststart', 1) - 1
                playlistend = self._downloader.params.get('playlistend', -1)
-               video_ids = video_ids[playliststart:playlistend]
  
-               for id in video_ids:
-                       self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
-               return
+               if playlistend == -1:
+                       video_ids = video_ids[playliststart:]
+               else:
+                       video_ids = video_ids[playliststart:playlistend]
+                       
+               self._downloader.to_screen("[youtube] user %s: Collected %d video ids (downloading %d of them)" %
+                                          (username, all_ids_count, len(video_ids)))
+               for video_id in video_ids:
+                       self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % video_id)
  
  class DepositFilesIE(InfoExtractor):
        """Information extractor for depositfiles.com"""
@@@ -2484,7 -2411,7 +2523,7 @@@ if __name__ == '__main__'
                # Parse command line
                parser = optparse.OptionParser(
                        usage='Usage: %prog [options] url...',
-                       version='2010.12.09',
+                       version='2011.01.30',
                        conflict_handler='resolve',
                )
  
                        parser.error(u'invalid playlist end number specified')
  
                # Information extractors
 +              vimeo_ie = VimeoIE()
                youtube_ie = YoutubeIE()
                metacafe_ie = MetacafeIE(youtube_ie)
                dailymotion_ie = DailymotionIE()
                        'nopart': opts.nopart,
                        'updatetime': opts.updatetime,
                        })
 +              fd.add_info_extractor(vimeo_ie)
                fd.add_info_extractor(youtube_search_ie)
                fd.add_info_extractor(youtube_pl_ie)
                fd.add_info_extractor(youtube_user_ie)