Merge pull request #7320 from remitamine/adobetv
[youtube-dl] / youtube_dl / extractor / bilibili.py
index 85156ce49cca1b99f9e534f48c270f5e34621339..59beb11bce71bfc6ef9b036ad123dc44e872d0be 100644 (file)
@@ -1,18 +1,20 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import json
-import xml.etree.ElementTree as ET
+import re
 
 from .common import InfoExtractor
+from ..compat import compat_str
 from ..utils import (
     int_or_none,
+    unescapeHTML,
     ExtractorError,
+    xpath_text,
 )
 
 
 class BiliBiliIE(InfoExtractor):
-    _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>[0-9]+)/'
+    _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>\d+)(?:/index_(?P<page_num>\d+).html)?'
 
     _TESTS = [{
         'url': 'http://www.bilibili.tv/video/av1074402/',
@@ -33,57 +35,67 @@ class BiliBiliIE(InfoExtractor):
         'info_dict': {
             'id': '1041170',
             'title': '【BD1080P】刀语【诸神&异域】',
+            'description': '这是个神奇的故事~每个人不留弹幕不给走哦~切利哦!~',
+            'uploader': '枫叶逝去',
+            'timestamp': 1396501299,
         },
-        'playlist_count': 12,
+        'playlist_count': 9,
     }]
 
-    def _extract_video_info(self, cid, view_data, page_num=1, num_pages=1):
-        title = view_data['title']
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        page_num = mobj.group('page_num') or '1'
+
+        view_data = self._download_json(
+            'http://api.bilibili.com/view?type=json&appkey=8e9fc618fbd41e28&id=%s&page=%s' % (video_id, page_num),
+            video_id)
+        if 'error' in view_data:
+            raise ExtractorError('%s said: %s' % (self.IE_NAME, view_data['error']), expected=True)
+
+        cid = view_data['cid']
+        title = unescapeHTML(view_data['title'])
 
-        page = self._download_webpage(
+        doc = self._download_xml(
             'http://interface.bilibili.com/v_cdn_play?appkey=8e9fc618fbd41e28&cid=%s' % cid,
             cid,
-            'Downloading page %d/%d' % (page_num, num_pages)
+            'Downloading page %s/%s' % (page_num, view_data['pages'])
         )
-        try:
-            err_info = json.loads(page)
-            raise ExtractorError(
-                'BiliBili said: ' + err_info['error_text'], expected=True)
-        except ValueError:
-            pass
 
-        doc = ET.fromstring(page)
-        durls = doc.findall('./durl')
+        if xpath_text(doc, './result') == 'error':
+            raise ExtractorError('%s said: %s' % (self.IE_NAME, xpath_text(doc, './message')), expected=True)
 
         entries = []
 
-        for durl in durls:
-            formats = []
-            backup_url = durl.find('./backup_url')
-            if backup_url is not None:
-                formats.append({'url': backup_url.find('./url').text})
-            size = durl.find('./filesize|./size')
-            formats.append({
+        for durl in doc.findall('./durl'):
+            size = xpath_text(durl, ['./filesize', './size'])
+            formats = [{
                 'url': durl.find('./url').text,
-                'filesize': int_or_none(size.text) if size else None,
+                'filesize': int_or_none(size),
                 'ext': 'flv',
-            })
+            }]
+            backup_urls = durl.find('./backup_url')
+            if backup_urls is not None:
+                for backup_url in backup_urls.findall('./url'):
+                    formats.append({'url': backup_url.text})
+            formats.reverse()
+
             entries.append({
-                'id': '%s_part%s' % (cid, durl.find('./order').text),
+                'id': '%s_part%s' % (cid, xpath_text(durl, './order')),
                 'title': title,
-                'duration': int_or_none(durl.find('./length').text) // 1000,
+                'duration': int_or_none(xpath_text(durl, './length'), 1000),
                 'formats': formats,
             })
 
         info = {
-            'id': cid,
+            'id': compat_str(cid),
             'title': title,
             'description': view_data.get('description'),
             'thumbnail': view_data.get('pic'),
             'uploader': view_data.get('author'),
             'timestamp': int_or_none(view_data.get('created')),
-            'view_count': view_data.get('play'),
-            'duration': int_or_none(doc.find('./timelength').text),
+            'view_count': int_or_none(view_data.get('play')),
+            'duration': int_or_none(xpath_text(doc, './timelength')),
         }
 
         if len(entries) == 1:
@@ -92,22 +104,7 @@ class BiliBiliIE(InfoExtractor):
         else:
             info.update({
                 '_type': 'multi_video',
+                'id': video_id,
                 'entries': entries,
             })
             return info
-
-    def _real_extract(self, url):
-        video_id = self._match_id(url)
-        view_data = self._download_json('http://api.bilibili.com/view?type=json&appkey=8e9fc618fbd41e28&id=%s' % video_id, video_id)
-
-        num_pages = int_or_none(view_data['pages'])
-        if num_pages > 1:
-            play_list_title = view_data['title']
-            page_list = self._download_json('http://www.bilibili.com/widget/getPageList?aid=%s' % video_id, video_id, 'Downloading page list metadata')
-            entries = []
-            for page in page_list:
-                view_data['title'] = page['pagename']
-                entries.append(self._extract_video_info(str(page['cid']), view_data, page['page'], num_pages))
-            return self.playlist_result(entries, video_id, play_list_title, view_data.get('description'))
-        else:
-            return self._extract_video_info(str(view_data['cid']), view_data)