Merge branch 'master' of https://github.com/rg3/youtube-dl into bilibili
authorremitamine <remitamine@gmail.com>
Thu, 3 Dec 2015 19:05:11 +0000 (20:05 +0100)
committerremitamine <remitamine@gmail.com>
Thu, 3 Dec 2015 19:05:11 +0000 (20:05 +0100)
1  2 
youtube_dl/extractor/bilibili.py

index 935fcc55c7844ad6fbb4e7e706824c7bf74a50d7,6c66a12368ea0a963d89ef5922c9d83f3019ddfc..1c3644587f6f49e73f9bd26fcb187d09c60c3e91
@@@ -2,13 -2,16 +2,15 @@@
  from __future__ import unicode_literals
  
  import re
 -import itertools
  import json
- import xml.etree.ElementTree as ET
  
  from .common import InfoExtractor
+ from ..compat import (
+     compat_etree_fromstring,
+ )
  from ..utils import (
      int_or_none,
 -    unified_strdate,
 +    unescapeHTML,
      ExtractorError,
  )
  
@@@ -68,48 -90,55 +70,48 @@@ class BiliBiliIE(InfoExtractor)
          except ValueError:
              pass
  
-         doc = ET.fromstring(page)
 -        lq_doc = compat_etree_fromstring(lq_page)
 -        lq_durls = lq_doc.findall('./durl')
++        doc = compat_etree_fromstring(page)
  
 -        hq_doc = self._download_xml(
 -            'http://interface.bilibili.com/playurl?appkey=1&cid=%s' % cid,
 -            video_id,
 -            note='Downloading HQ video info',
 -            fatal=False,
 -        )
 -        if hq_doc is not False:
 -            hq_durls = hq_doc.findall('./durl')
 -            assert len(lq_durls) == len(hq_durls)
 -        else:
 -            hq_durls = itertools.repeat(None)
 +        entries = []
  
 -        i = 1
 -        for lq_durl, hq_durl in zip(lq_durls, hq_durls):
 +        for durl in doc.findall('./durl'):
 +            size = durl.find('./filesize|./size')
              formats = [{
 -                'format_id': 'lq',
 -                'quality': 1,
 -                'url': lq_durl.find('./url').text,
 -                'filesize': int_or_none(
 -                    lq_durl.find('./size'), get_attr='text'),
 +                'url': durl.find('./url').text,
 +                'filesize': int_or_none(size.text) if size else None,
 +                'ext': 'flv',
              }]
 -            if hq_durl is not None:
 -                formats.append({
 -                    'format_id': 'hq',
 -                    'quality': 2,
 -                    'ext': 'flv',
 -                    'url': hq_durl.find('./url').text,
 -                    'filesize': int_or_none(
 -                        hq_durl.find('./size'), get_attr='text'),
 -                })
 -            self._sort_formats(formats)
 +            backup_urls = durl.find('./backup_url')
 +            if backup_urls is not None:
 +                for backup_url in backup_urls.findall('./url'):
 +                    formats.append({'url': backup_url.text})
 +            formats.reverse()
  
              entries.append({
 -                'id': '%s_part%d' % (video_id, i),
 +                'id': '%s_part%s' % (cid, durl.find('./order').text),
                  'title': title,
 +                'duration': int_or_none(durl.find('./length').text) // 1000,
                  'formats': formats,
 -                'duration': duration,
 -                'upload_date': upload_date,
 -                'thumbnail': thumbnail,
              })
  
 -            i += 1
 -
 -        return {
 -            '_type': 'multi_video',
 -            'entries': entries,
 -            'id': video_id,
 -            'title': title
 +        info = {
 +            'id': str(cid),
 +            'title': title,
 +            'description': view_data.get('description'),
 +            'thumbnail': view_data.get('pic'),
 +            'uploader': view_data.get('author'),
 +            'timestamp': int_or_none(view_data.get('created')),
 +            'view_count': view_data.get('play'),
 +            'duration': int_or_none(doc.find('./timelength').text),
          }
 +
 +        if len(entries) == 1:
 +            entries[0].update(info)
 +            return entries[0]
 +        else:
 +            info.update({
 +                '_type': 'multi_video',
 +                'id': video_id,
 +                'entries': entries,
 +            })
 +            return info