Merge branch 'master' of https://github.com/rg3/youtube-dl into bilibili
authorremitamine <remitamine@gmail.com>
Thu, 3 Dec 2015 19:05:11 +0000 (20:05 +0100)
committerremitamine <remitamine@gmail.com>
Thu, 3 Dec 2015 19:05:11 +0000 (20:05 +0100)
1  2 
youtube_dl/extractor/bilibili.py

index 935fcc55c7844ad6fbb4e7e706824c7bf74a50d7,6c66a12368ea0a963d89ef5922c9d83f3019ddfc..1c3644587f6f49e73f9bd26fcb187d09c60c3e91
  from __future__ import unicode_literals
  
  import re
 -import itertools
  import json
- import xml.etree.ElementTree as ET
  
  from .common import InfoExtractor
+ from ..compat import (
+     compat_etree_fromstring,
+ )
  from ..utils import (
      int_or_none,
 -    unified_strdate,
 +    unescapeHTML,
      ExtractorError,
  )
  
  
  class BiliBiliIE(InfoExtractor):
 -    _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>[0-9]+)/'
 +    _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>\d+)(?:/index_(?P<page_num>\d+).html)?'
  
      _TESTS = [{
          'url': 'http://www.bilibili.tv/video/av1074402/',
          'md5': '2c301e4dab317596e837c3e7633e7d86',
          'info_dict': {
 -            'id': '1074402_part1',
 +            'id': '1554319',
              'ext': 'flv',
              'title': '【金坷垃】金泡沫',
 -            'duration': 308,
 +            'duration': 308313,
              'upload_date': '20140420',
              'thumbnail': 're:^https?://.+\.jpg',
 +            'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
 +            'timestamp': 1397983878,
 +            'uploader': '菊子桑',
          },
      }, {
          'url': 'http://www.bilibili.com/video/av1041170/',
          'info_dict': {
              'id': '1041170',
              'title': '【BD1080P】刀语【诸神&异域】',
 +            'description': '这是个神奇的故事~每个人不留弹幕不给走哦~切利哦!~',
 +            'uploader': '枫叶逝去',
 +            'timestamp': 1396501299,
          },
          'playlist_count': 9,
      }]
  
      def _real_extract(self, url):
 -        video_id = self._match_id(url)
 -        webpage = self._download_webpage(url, video_id)
 -
 -        if '(此视频不存在或被删除)' in webpage:
 -            raise ExtractorError(
 -                'The video does not exist or was deleted', expected=True)
 -
 -        if '>你没有权限浏览! 由于版权相关问题 我们不对您所在的地区提供服务<' in webpage:
 -            raise ExtractorError(
 -                'The video is not available in your region due to copyright reasons',
 -                expected=True)
 -
 -        video_code = self._search_regex(
 -            r'(?s)<div itemprop="video".*?>(.*?)</div>', webpage, 'video code')
 -
 -        title = self._html_search_meta(
 -            'media:title', video_code, 'title', fatal=True)
 -        duration_str = self._html_search_meta(
 -            'duration', video_code, 'duration')
 -        if duration_str is None:
 -            duration = None
 -        else:
 -            duration_mobj = re.match(
 -                r'^T(?:(?P<hours>[0-9]+)H)?(?P<minutes>[0-9]+)M(?P<seconds>[0-9]+)S$',
 -                duration_str)
 -            duration = (
 -                int_or_none(duration_mobj.group('hours'), default=0) * 3600 +
 -                int(duration_mobj.group('minutes')) * 60 +
 -                int(duration_mobj.group('seconds')))
 -        upload_date = unified_strdate(self._html_search_meta(
 -            'uploadDate', video_code, fatal=False))
 -        thumbnail = self._html_search_meta(
 -            'thumbnailUrl', video_code, 'thumbnail', fatal=False)
 -
 -        cid = self._search_regex(r'cid=(\d+)', webpage, 'cid')
 -
 -        entries = []
 -
 -        lq_page = self._download_webpage(
 -            'http://interface.bilibili.com/v_cdn_play?appkey=1&cid=%s' % cid,
 -            video_id,
 -            note='Downloading LQ video info'
 +        mobj = re.match(self._VALID_URL, url)
 +        video_id = mobj.group('id')
 +        page_num = mobj.group('page_num') or '1'
 +
 +        view_data = self._download_json(
 +            'http://api.bilibili.com/view?type=json&appkey=8e9fc618fbd41e28&id=%s&page=%s' % (video_id, page_num),
 +            video_id)
 +        if 'error' in view_data:
 +            raise ExtractorError('%s said: %s' % (self.IE_NAME, view_data['error']), expected=True)
 +
 +        cid = view_data['cid']
 +        title = unescapeHTML(view_data['title'])
 +
 +        page = self._download_webpage(
 +            'http://interface.bilibili.com/v_cdn_play?appkey=8e9fc618fbd41e28&cid=%s' % cid,
 +            cid,
 +            'Downloading page %s/%s' % (page_num, view_data['pages'])
          )
          try:
 -            err_info = json.loads(lq_page)
 +            err_info = json.loads(page)
              raise ExtractorError(
                  'BiliBili said: ' + err_info['error_text'], expected=True)
          except ValueError:
              pass
  
-         doc = ET.fromstring(page)
 -        lq_doc = compat_etree_fromstring(lq_page)
 -        lq_durls = lq_doc.findall('./durl')
++        doc = compat_etree_fromstring(page)
  
 -        hq_doc = self._download_xml(
 -            'http://interface.bilibili.com/playurl?appkey=1&cid=%s' % cid,
 -            video_id,
 -            note='Downloading HQ video info',
 -            fatal=False,
 -        )
 -        if hq_doc is not False:
 -            hq_durls = hq_doc.findall('./durl')
 -            assert len(lq_durls) == len(hq_durls)
 -        else:
 -            hq_durls = itertools.repeat(None)
 +        entries = []
  
 -        i = 1
 -        for lq_durl, hq_durl in zip(lq_durls, hq_durls):
 +        for durl in doc.findall('./durl'):
 +            size = durl.find('./filesize|./size')
              formats = [{
 -                'format_id': 'lq',
 -                'quality': 1,
 -                'url': lq_durl.find('./url').text,
 -                'filesize': int_or_none(
 -                    lq_durl.find('./size'), get_attr='text'),
 +                'url': durl.find('./url').text,
 +                'filesize': int_or_none(size.text) if size else None,
 +                'ext': 'flv',
              }]
 -            if hq_durl is not None:
 -                formats.append({
 -                    'format_id': 'hq',
 -                    'quality': 2,
 -                    'ext': 'flv',
 -                    'url': hq_durl.find('./url').text,
 -                    'filesize': int_or_none(
 -                        hq_durl.find('./size'), get_attr='text'),
 -                })
 -            self._sort_formats(formats)
 +            backup_urls = durl.find('./backup_url')
 +            if backup_urls is not None:
 +                for backup_url in backup_urls.findall('./url'):
 +                    formats.append({'url': backup_url.text})
 +            formats.reverse()
  
              entries.append({
 -                'id': '%s_part%d' % (video_id, i),
 +                'id': '%s_part%s' % (cid, durl.find('./order').text),
                  'title': title,
 +                'duration': int_or_none(durl.find('./length').text) // 1000,
                  'formats': formats,
 -                'duration': duration,
 -                'upload_date': upload_date,
 -                'thumbnail': thumbnail,
              })
  
 -            i += 1
 -
 -        return {
 -            '_type': 'multi_video',
 -            'entries': entries,
 -            'id': video_id,
 -            'title': title
 +        info = {
 +            'id': str(cid),
 +            'title': title,
 +            'description': view_data.get('description'),
 +            'thumbnail': view_data.get('pic'),
 +            'uploader': view_data.get('author'),
 +            'timestamp': int_or_none(view_data.get('created')),
 +            'view_count': view_data.get('play'),
 +            'duration': int_or_none(doc.find('./timelength').text),
          }
 +
 +        if len(entries) == 1:
 +            entries[0].update(info)
 +            return entries[0]
 +        else:
 +            info.update({
 +                '_type': 'multi_video',
 +                'id': video_id,
 +                'entries': entries,
 +            })
 +            return info