X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=youtube_dl%2Fextractor%2Fbilibili.py;h=2d174e6f9a81da7412cd58ac316c7b5924dcde78;hb=25291b979a7ff8dc7ab57729a724e7e372c65c4b;hp=85156ce49cca1b99f9e534f48c270f5e34621339;hpb=55af2b26e0f169bef2f10a7b5f6ec8e34c6dbb6d;p=youtube-dl diff --git a/youtube_dl/extractor/bilibili.py b/youtube_dl/extractor/bilibili.py index 85156ce49..2d174e6f9 100644 --- a/youtube_dl/extractor/bilibili.py +++ b/youtube_dl/extractor/bilibili.py @@ -1,113 +1,125 @@ # coding: utf-8 from __future__ import unicode_literals -import json -import xml.etree.ElementTree as ET +import hashlib +import re from .common import InfoExtractor +from ..compat import compat_parse_qs from ..utils import ( int_or_none, - ExtractorError, + float_or_none, + unified_timestamp, + urlencode_postdata, ) class BiliBiliIE(InfoExtractor): - _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P[0-9]+)/' + _VALID_URL = r'https?://(?:www\.|bangumi\.|)bilibili\.(?:tv|com)/(?:video/av|anime/v/)(?P\d+)' - _TESTS = [{ + _TEST = { 'url': 'http://www.bilibili.tv/video/av1074402/', - 'md5': '2c301e4dab317596e837c3e7633e7d86', + 'md5': '9fa226fe2b8a9a4d5a69b4c6a183417e', 'info_dict': { - 'id': '1554319', - 'ext': 'flv', + 'id': '1074402', + 'ext': 'mp4', 'title': '【金坷垃】金泡沫', - 'duration': 308313, + 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923', + 'duration': 308.315, + 'timestamp': 1398012660, 'upload_date': '20140420', 'thumbnail': 're:^https?://.+\.jpg', - 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923', - 'timestamp': 1397983878, 'uploader': '菊子桑', + 'uploader_id': '156160', }, - }, { - 'url': 'http://www.bilibili.com/video/av1041170/', - 'info_dict': { - 'id': '1041170', - 'title': '【BD1080P】刀语【诸神&异域】', - }, - 'playlist_count': 12, - }] - - def _extract_video_info(self, cid, view_data, page_num=1, num_pages=1): - title = view_data['title'] - - page = self._download_webpage( - 'http://interface.bilibili.com/v_cdn_play?appkey=8e9fc618fbd41e28&cid=%s' % cid, - cid, - 'Downloading page %d/%d' % (page_num, num_pages) - ) - try: - err_info = json.loads(page) - raise ExtractorError( - 'BiliBili said: ' + err_info['error_text'], expected=True) - except ValueError: - pass - - doc = ET.fromstring(page) - durls = doc.findall('./durl') + } + + _APP_KEY = '6f90a59ac58a4123' + _BILIBILI_KEY = '0bfd84cc3940035173f35e6777508326' + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + if 'anime/v' not in url: + cid = compat_parse_qs(self._search_regex( + [r'EmbedPlayer\([^)]+,\s*"([^"]+)"\)', + r']+src="https://secure\.bilibili\.com/secure,([^"]+)"'], + webpage, 'player parameters'))['cid'][0] + else: + js = self._download_json( + 'http://bangumi.bilibili.com/web_api/get_source', video_id, + data=urlencode_postdata({'episode_id': video_id}), + headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'}) + cid = js['result']['cid'] + + payload = 'appkey=%s&cid=%s&otype=json&quality=2&type=mp4' % (self._APP_KEY, cid) + sign = hashlib.md5((payload + self._BILIBILI_KEY).encode('utf-8')).hexdigest() + + video_info = self._download_json( + 'http://interface.bilibili.com/playurl?%s&sign=%s' % (payload, sign), + video_id, note='Downloading video info page') entries = [] - for durl in durls: - formats = [] - backup_url = durl.find('./backup_url') - if backup_url is not None: - formats.append({'url': backup_url.find('./url').text}) - size = durl.find('./filesize|./size') - formats.append({ - 'url': durl.find('./url').text, - 'filesize': int_or_none(size.text) if size else None, - 'ext': 'flv', - }) + for idx, durl in enumerate(video_info['durl']): + formats = [{ + 'url': durl['url'], + 'filesize': int_or_none(durl['size']), + }] + for backup_url in durl.get('backup_url', []): + formats.append({ + 'url': backup_url, + # backup URLs have lower priorities + 'preference': -2 if 'hd.mp4' in backup_url else -3, + }) + + self._sort_formats(formats) + entries.append({ - 'id': '%s_part%s' % (cid, durl.find('./order').text), - 'title': title, - 'duration': int_or_none(durl.find('./length').text) // 1000, + 'id': '%s_part%s' % (video_id, idx), + 'duration': float_or_none(durl.get('length'), 1000), 'formats': formats, }) + title = self._html_search_regex(']+title="([^"]+)">', webpage, 'title') + description = self._html_search_meta('description', webpage) + timestamp = unified_timestamp(self._html_search_regex( + r']+datetime="([^"]+)"', webpage, 'upload time', fatal=False)) + thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage) + + # TODO 'view_count' requires deobfuscating Javascript info = { - 'id': cid, + 'id': video_id, 'title': title, - 'description': view_data.get('description'), - 'thumbnail': view_data.get('pic'), - 'uploader': view_data.get('author'), - 'timestamp': int_or_none(view_data.get('created')), - 'view_count': view_data.get('play'), - 'duration': int_or_none(doc.find('./timelength').text), + 'description': description, + 'timestamp': timestamp, + 'thumbnail': thumbnail, + 'duration': float_or_none(video_info.get('timelength'), scale=1000), } + uploader_mobj = re.search( + r']+href="https?://space\.bilibili\.com/(?P\d+)"[^>]+title="(?P[^"]+)"', + webpage) + if uploader_mobj: + info.update({ + 'uploader': uploader_mobj.group('name'), + 'uploader_id': uploader_mobj.group('id'), + }) + + for entry in entries: + entry.update(info) + if len(entries) == 1: - entries[0].update(info) return entries[0] else: - info.update({ + for idx, entry in enumerate(entries): + entry['id'] = '%s_part%d' % (video_id, (idx + 1)) + + return { '_type': 'multi_video', + 'id': video_id, + 'title': title, + 'description': description, 'entries': entries, - }) - return info - - def _real_extract(self, url): - video_id = self._match_id(url) - view_data = self._download_json('http://api.bilibili.com/view?type=json&appkey=8e9fc618fbd41e28&id=%s' % video_id, video_id) - - num_pages = int_or_none(view_data['pages']) - if num_pages > 1: - play_list_title = view_data['title'] - page_list = self._download_json('http://www.bilibili.com/widget/getPageList?aid=%s' % video_id, video_id, 'Downloading page list metadata') - entries = [] - for page in page_list: - view_data['title'] = page['pagename'] - entries.append(self._extract_video_info(str(page['cid']), view_data, page['page'], num_pages)) - return self.playlist_result(entries, video_id, play_list_title, view_data.get('description')) - else: - return self._extract_video_info(str(view_data['cid']), view_data) + }