Merge branch 'ping-viki-shows'
[youtube-dl] / youtube_dl / extractor / qqmusic.py
index d0ea4a7698d57ef418c7d0eec56004f0c2c274e3..b540033e25a8c8e033f9d7f7b49d6b6ae46f755b 100644 (file)
@@ -9,11 +9,13 @@ from .common import InfoExtractor
 from ..utils import (
     strip_jsonp,
     unescapeHTML,
+    js_to_json,
 )
 from ..compat import compat_urllib_request
 
 
 class QQMusicIE(InfoExtractor):
+    IE_NAME = 'qqmusic'
     _VALID_URL = r'http://y.qq.com/#type=song&mid=(?P<id>[0-9A-Za-z]+)'
     _TESTS = [{
         'url': 'http://y.qq.com/#type=song&mid=004295Et37taLD',
@@ -24,6 +26,7 @@ class QQMusicIE(InfoExtractor):
             'title': '可惜没如果',
             'upload_date': '20141227',
             'creator': '林俊杰',
+            'description': 'md5:d327722d0361576fde558f1ac68a7065',
         }
     }]
 
@@ -40,17 +43,25 @@ class QQMusicIE(InfoExtractor):
         detail_info_page = self._download_webpage(
             'http://s.plcloud.music.qq.com/fcgi-bin/fcg_yqq_song_detail_info.fcg?songmid=%s&play=0' % mid,
             mid, note='Download song detail info',
-            errnote='Unable to get song detail info')
+            errnote='Unable to get song detail info', encoding='gbk')
 
         song_name = self._html_search_regex(
             r"songname:\s*'([^']+)'", detail_info_page, 'song name')
 
         publish_time = self._html_search_regex(
             r'发行时间:(\d{4}-\d{2}-\d{2})', detail_info_page,
-            'publish time').replace('-', '')
+            'publish time', default=None)
+        if publish_time:
+            publish_time = publish_time.replace('-', '')
 
         singer = self._html_search_regex(
-            r"singer:\s*'([^']+)", detail_info_page, 'singer')
+            r"singer:\s*'([^']+)", detail_info_page, 'singer', default=None)
+
+        lrc_content = self._html_search_regex(
+            r'<div class="content" id="lrc_content"[^<>]*>([^<>]+)</div>',
+            detail_info_page, 'LRC lyrics', default=None)
+        if lrc_content:
+            lrc_content = lrc_content.replace('\\n', '\n')
 
         guid = self.m_r_get_ruin()
 
@@ -66,6 +77,7 @@ class QQMusicIE(InfoExtractor):
             'title': song_name,
             'upload_date': publish_time,
             'creator': singer,
+            'description': lrc_content,
         }
 
 
@@ -74,10 +86,6 @@ class QQPlaylistBaseIE(InfoExtractor):
     def qq_static_url(category, mid):
         return 'http://y.qq.com/y/static/%s/%s/%s/%s.html' % (category, mid[-2], mid[-1], mid)
 
-    @staticmethod
-    def qq_song_url(mid):
-        return 'http://y.qq.com/#type=song&mid=%s' % mid
-
     @classmethod
     def get_entries_from_page(cls, page):
         entries = []
@@ -85,12 +93,14 @@ class QQPlaylistBaseIE(InfoExtractor):
         for item in re.findall(r'class="data"[^<>]*>([^<>]+)</', page):
             song_mid = unescapeHTML(item).split('|')[-5]
             entries.append(cls.url_result(
-                cls.qq_song_url(song_mid), 'QQMusic', song_mid))
+                'http://y.qq.com/#type=song&mid=' + song_mid, 'QQMusic',
+                song_mid))
 
         return entries
 
 
 class QQMusicSingerIE(QQPlaylistBaseIE):
+    IE_NAME = 'qqmusic:singer'
     _VALID_URL = r'http://y.qq.com/#type=singer&mid=(?P<id>[0-9A-Za-z]+)'
     _TEST = {
         'url': 'http://y.qq.com/#type=singer&mid=001BLpXF2DyJe2',
@@ -134,6 +144,7 @@ class QQMusicSingerIE(QQPlaylistBaseIE):
 
 
 class QQMusicAlbumIE(QQPlaylistBaseIE):
+    IE_NAME = 'qqmusic:album'
     _VALID_URL = r'http://y.qq.com/#type=album&mid=(?P<id>[0-9A-Za-z]+)'
 
     _TEST = {
@@ -163,3 +174,67 @@ class QQMusicAlbumIE(QQPlaylistBaseIE):
             album_page, 'album details', default=None)
 
         return self.playlist_result(entries, mid, album_name, album_detail)
+
+
+class QQMusicToplistIE(QQPlaylistBaseIE):
+    IE_NAME = 'qqmusic:toplist'
+    _VALID_URL = r'http://y\.qq\.com/#type=toplist&p=(?P<id>(top|global)_[0-9]+)'
+
+    _TESTS = [{
+        'url': 'http://y.qq.com/#type=toplist&p=global_12',
+        'info_dict': {
+            'id': 'global_12',
+            'title': 'itunes榜',
+        },
+        'playlist_count': 10,
+    }, {
+        'url': 'http://y.qq.com/#type=toplist&p=top_6',
+        'info_dict': {
+            'id': 'top_6',
+            'title': 'QQ音乐巅峰榜·欧美',
+        },
+        'playlist_count': 100,
+    }, {
+        'url': 'http://y.qq.com/#type=toplist&p=global_5',
+        'info_dict': {
+            'id': 'global_5',
+            'title': '韩国mnet排行榜',
+        },
+        'playlist_count': 50,
+    }]
+
+    @staticmethod
+    def strip_qq_jsonp(code):
+        return js_to_json(re.sub(r'^MusicJsonCallback\((.*?)\)/\*.+?\*/$', r'\1', code))
+
+    def _real_extract(self, url):
+        list_id = self._match_id(url)
+
+        list_type, num_id = list_id.split("_")
+
+        list_page = self._download_webpage(
+            "http://y.qq.com/y/static/toplist/index/%s.html" % list_id,
+            list_id, 'Download toplist page')
+
+        entries = []
+        if list_type == 'top':
+            jsonp_url = "http://y.qq.com/y/static/toplist/json/top/%s/1.js" % num_id
+        else:
+            jsonp_url = "http://y.qq.com/y/static/toplist/json/global/%s/1_1.js" % num_id
+
+        toplist_json = self._download_json(
+            jsonp_url, list_id, note='Retrieve toplist json',
+            errnote='Unable to get toplist json', transform_source=self.strip_qq_jsonp)
+
+        for song in toplist_json['l']:
+            s = song['s']
+            song_mid = s.split("|")[20]
+            entries.append(self.url_result(
+                'http://y.qq.com/#type=song&mid=' + song_mid, 'QQMusic',
+                song_mid))
+
+        list_name = self._html_search_regex(
+            r'<h2 id="top_name">([^\']+)</h2>', list_page, 'top list name',
+            default=None)
+
+        return self.playlist_result(entries, list_id, list_name)