Merge PR #422 from 'kevinamadeus/master'
authorFilippo Valsorda <filippo.valsorda@gmail.com>
Tue, 9 Oct 2012 08:48:49 +0000 (10:48 +0200)
committerFilippo Valsorda <filippo.valsorda@gmail.com>
Tue, 9 Oct 2012 08:48:49 +0000 (10:48 +0200)
Add InfoExtractor for Google Plus video
(with fixes)

1  2 
youtube_dl/InfoExtractors.py
youtube_dl/__init__.py

index 9e5ea7c61870f19ae30cea7670867584226123f8,ddb9fbca1fc9f64739d4e4e2e09094911cc3457f..f97611cb9b4ab681eb21f760db4c71ea1b2da98a
@@@ -2988,189 -2956,126 +2988,313 @@@ class MTVIE(InfoExtractor)
  
                return [info]
  
 -      _VALID_URL = r'(?:https://)?plus\.google\.com/(\d+)/posts/(\w+)'
 +
 +class YoukuIE(InfoExtractor):
 +
 +      _VALID_URL =  r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html'
 +      IE_NAME = u'Youku'
 +
 +      def __init__(self, downloader=None):
 +              InfoExtractor.__init__(self, downloader)
 +
 +      def report_download_webpage(self, file_id):
 +              """Report webpage download."""
 +              self._downloader.to_screen(u'[Youku] %s: Downloading webpage' % file_id)
 +
 +      def report_extraction(self, file_id):
 +              """Report information extraction."""
 +              self._downloader.to_screen(u'[Youku] %s: Extracting information' % file_id)
 +
 +      def _gen_sid(self):
 +              nowTime = int(time.time() * 1000)
 +              random1 = random.randint(1000,1998)
 +              random2 = random.randint(1000,9999)
 +
 +              return "%d%d%d" %(nowTime,random1,random2)
 +
 +      def _get_file_ID_mix_string(self, seed):
 +              mixed = []
 +              source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
 +              seed = float(seed)
 +              for i in range(len(source)):
 +                      seed  =  (seed * 211 + 30031 ) % 65536
 +                      index  =  math.floor(seed / 65536 * len(source) )
 +                      mixed.append(source[int(index)])
 +                      source.remove(source[int(index)])
 +              #return ''.join(mixed)
 +              return mixed
 +
 +      def _get_file_id(self, fileId, seed):
 +              mixed = self._get_file_ID_mix_string(seed)
 +              ids = fileId.split('*')
 +              realId = []
 +              for ch in ids:
 +                      if ch:
 +                              realId.append(mixed[int(ch)])
 +              return ''.join(realId)
 +
 +      def _real_extract(self, url):
 +              mobj = re.match(self._VALID_URL, url)
 +              if mobj is None:
 +                      self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
 +                      return
 +              video_id = mobj.group('ID')
 +
 +              info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
 +
 +              request = urllib2.Request(info_url, None, std_headers)
 +              try:
 +                      self.report_download_webpage(video_id)
 +                      jsondata = urllib2.urlopen(request).read()
 +              except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
 +                      self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
 +                      return
 +
 +              self.report_extraction(video_id)
 +              try:
 +                      config = json.loads(jsondata)
 +
 +                      video_title =  config['data'][0]['title']
 +                      seed = config['data'][0]['seed']
 +
 +                      format = self._downloader.params.get('format', None)
 +                      supported_format = config['data'][0]['streamfileids'].keys()
 +
 +                      if format is None or format == 'best':
 +                              if 'hd2' in supported_format:
 +                                      format = 'hd2'
 +                              else:
 +                                      format = 'flv'
 +                              ext = u'flv'
 +                      elif format == 'worst':
 +                              format = 'mp4'
 +                              ext = u'mp4'
 +                      else:
 +                              format = 'flv'
 +                              ext = u'flv'
 +
 +
 +                      fileid = config['data'][0]['streamfileids'][format]
 +                      seg_number = len(config['data'][0]['segs'][format])
 +
 +                      keys=[]
 +                      for i in xrange(seg_number):
 +                              keys.append(config['data'][0]['segs'][format][i]['k'])
 +
 +                      #TODO check error
 +                      #youku only could be viewed from mainland china
 +              except:
 +                      self._downloader.trouble(u'ERROR: unable to extract info section')
 +                      return
 +
 +              files_info=[]
 +              sid = self._gen_sid()
 +              fileid = self._get_file_id(fileid, seed)
 +
 +              #column 8,9 of fileid represent the segment number
 +              #fileid[7:9] should be changed
 +              for index, key in enumerate(keys):
 +
 +                      temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
 +                      download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
 +
 +                      info = {
 +                              'id': '%s_part%02d' % (video_id, index),
 +                              'url': download_url,
 +                              'uploader': None,
 +                              'title': video_title,
 +                              'ext': ext,
 +                              'format': u'NA'
 +                      }
 +                      files_info.append(info)
 +
 +              return files_info
 +
 +
 +class XNXXIE(InfoExtractor):
 +      """Information extractor for xnxx.com"""
 +
 +      _VALID_URL = r'^http://video\.xnxx\.com/video([0-9]+)/(.*)'
 +      IE_NAME = u'xnxx'
 +      VIDEO_URL_RE = r'flv_url=(.*?)&amp;'
 +      VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM'
 +      VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&amp;'
 +
 +      def report_webpage(self, video_id):
 +              """Report information extraction"""
 +              self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
 +
 +      def report_extraction(self, video_id):
 +              """Report information extraction"""
 +              self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
 +
 +      def _real_extract(self, url):
 +              mobj = re.match(self._VALID_URL, url)
 +              if mobj is None:
 +                      self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
 +                      return
 +              video_id = mobj.group(1).decode('utf-8')
 +
 +              self.report_webpage(video_id)
 +
 +              # Get webpage content
 +              try:
 +                      webpage = urllib2.urlopen(url).read()
 +              except (urllib2.URLError, httplib.HTTPException, socket.error), err:
 +                      self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
 +                      return
 +
 +              result = re.search(self.VIDEO_URL_RE, webpage)
 +              if result is None:
 +                      self._downloader.trouble(u'ERROR: unable to extract video url')
 +                      return
 +              video_url = urllib.unquote(result.group(1).decode('utf-8'))
 +
 +              result = re.search(self.VIDEO_TITLE_RE, webpage)
 +              if result is None:
 +                      self._downloader.trouble(u'ERROR: unable to extract video title')
 +                      return
 +              video_title = result.group(1).decode('utf-8')
 +
 +              result = re.search(self.VIDEO_THUMB_RE, webpage)
 +              if result is None:
 +                      self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
 +                      return
 +              video_thumbnail = result.group(1).decode('utf-8')
 +
 +              info = {'id': video_id,
 +                              'url': video_url,
 +                              'uploader': None,
 +                              'upload_date': None,
 +                              'title': video_title,
 +                              'ext': 'flv',
 +                              'format': 'flv',
 +                              'thumbnail': video_thumbnail,
 +                              'description': None,
 +                              'player_url': None}
 +
 +              return [info]
++
++
+ class GooglePlusIE(InfoExtractor):
+       """Information extractor for plus.google.com."""
 -                      self.report_extract_entry(post_url)
++      _VALID_URL = r'(?:https://)?plus\.google\.com/(?:\w+/)*?(\d+)/posts/(\w+)'
+       IE_NAME = u'plus.google'
+       def __init__(self, downloader=None):
+               InfoExtractor.__init__(self, downloader)
+       def report_extract_entry(self, url):
+               """Report downloading extry"""
+               self._downloader.to_screen(u'[plus.google] Downloading entry: %s' % url.decode('utf-8'))
+       def report_date(self, upload_date):
+               """Report downloading extry"""
+               self._downloader.to_screen(u'[plus.google] Entry date: %s' % upload_date)
+       def report_uploader(self, uploader):
+               """Report downloading extry"""
+               self._downloader.to_screen(u'[plus.google] Uploader: %s' % uploader.decode('utf-8'))
+       def report_title(self, video_title):
+               """Report downloading extry"""
+               self._downloader.to_screen(u'[plus.google] Title: %s' % video_title.decode('utf-8'))
+       def report_extract_vid_page(self, video_page):
+               """Report information extraction."""
+               self._downloader.to_screen(u'[plus.google] Extracting video page: %s' % video_page.decode('utf-8'))
+       def _real_extract(self, url):
+               # Extract id from URL
+               mobj = re.match(self._VALID_URL, url)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+                       return
+               post_url = mobj.group(0)
+               video_id = mobj.group(2)
+               video_extension = 'flv'
+               # Step 1, Retrieve post webpage to extract further information
++              self.report_extract_entry(post_url)
+               request = urllib2.Request(post_url)
+               try:
 -                      """Convert timestring to a format suitable for filename"""
+                       webpage = urllib2.urlopen(request).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % str(err))
+                       return
+               # Extract update date
+               upload_date = u'NA'
+               pattern = 'title="Timestamp">(.*?)</a>'
+               mobj = re.search(pattern, webpage)
+               if mobj:
+                       upload_date = mobj.group(1)
 -              """Get the first line for title"""
++                      # Convert timestring to a format suitable for filename
+                       upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
+                       upload_date = upload_date.strftime('%Y%m%d')
+               self.report_date(upload_date)
+               # Extract uploader
+               uploader = u'NA'
+               pattern = r'rel\="author".*?>(.*?)</a>'
+               mobj = re.search(pattern, webpage)
+               if mobj:
+                       uploader = mobj.group(1)
+               self.report_uploader(uploader)
+               # Extract title
 -              pattern = r'<meta name\=\"Description\" content\=\"(.*?)[\s<"]'
++              # Get the first line for title
+               video_title = u'NA'
 -              if mobj is None:
++              pattern = r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]'
+               mobj = re.search(pattern, webpage)
+               if mobj:
+                       video_title = mobj.group(1)
+               self.report_title(video_title)
+               # Step 2, Stimulate clicking the image box to launch video
+               pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]'
+               mobj = re.search(pattern, webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract video page URL')
+               video_page = mobj.group(1)
+               request = urllib2.Request(video_page)
+               try:
+                       webpage = urllib2.urlopen(request).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+                       return
+               self.report_extract_vid_page(video_page)
+               # Extract video links on video page
+               """Extract video links of all sizes"""
+               pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
+               mobj = re.findall(pattern, webpage)
 -              video_url = unicode(video_url, "unicode_escape").encode("utf8")
++              if len(mobj) == 0:
+                       self._downloader.trouble(u'ERROR: unable to extract video links')
+               # Sort in resolution
+               links = sorted(mobj)
+               # Choose the lowest of the sort, i.e. highest resolution
+               video_url = links[-1]
+               # Only get the url. The resolution part in the tuple has no use anymore
+               video_url = video_url[-1]
+               # Treat escaped \u0026 style hex
 -                      'url':          video_url.decode('utf-8'),
++              video_url = unicode(video_url, "unicode_escape")
+               return [{
+                       'id':           video_id.decode('utf-8'),
 -
++                      'url':          video_url,
+                       'uploader':     uploader.decode('utf-8'),
+                       'upload_date':  upload_date.decode('utf-8'),
+                       'title':        video_title.decode('utf-8'),
+                       'ext':          video_extension.decode('utf-8'),
+                       'format':       u'NA',
+                       'player_url':   None,
+               }]
index b21416dafbd8495fca65612c64b4e6f6dfda1405,fc8101f82600229b9d2671e7a7a01f98fa6624fd..15a3ec4cf8563f7d44b7c429d24e07ef7c92abbd
@@@ -353,8 -351,7 +353,9 @@@ def gen_extractors()
                MixcloudIE(),
                StanfordOpenClassroomIE(),
                MTVIE(),
 +              YoukuIE(),
 +              XNXXIE(),
+               GooglePlusIE(),
  
                GenericIE()
        ]