import re
from .common import InfoExtractor
-from ..utils import strip_jsonp
+from ..utils import (
+ strip_jsonp,
+ unescapeHTML,
+ js_to_json,
+)
from ..compat import compat_urllib_request
'title': '可惜没如果',
'upload_date': '20141227',
'creator': '林俊杰',
+ 'description': 'md5:4348ff1dd24036906baa7b6f973f8d30',
}
}]
detail_info_page = self._download_webpage(
'http://s.plcloud.music.qq.com/fcgi-bin/fcg_yqq_song_detail_info.fcg?songmid=%s&play=0' % mid,
mid, note='Download song detail info',
- errnote='Unable to get song detail info')
+ errnote='Unable to get song detail info', encoding='gbk')
song_name = self._html_search_regex(
r"songname:\s*'([^']+)'", detail_info_page, 'song name')
publish_time = self._html_search_regex(
r'发行时间:(\d{4}-\d{2}-\d{2})', detail_info_page,
- 'publish time').replace('-', '')
+ 'publish time', default=None)
+ if publish_time:
+ publish_time = publish_time.replace('-', '')
singer = self._html_search_regex(
- r"singer:\s*'([^']+)", detail_info_page, 'singer')
+ r"singer:\s*'([^']+)", detail_info_page, 'singer', default=None)
+
+ lrc_content = self._html_search_regex(
+ r'<div class="content" id="lrc_content"[^<>]*>([^<>]+)</div>',
+ detail_info_page, 'LRC lyrics', default=None)
guid = self.m_r_get_ruin()
'title': song_name,
'upload_date': publish_time,
'creator': singer,
+ 'description': lrc_content,
}
-class QQMusicSingerIE(InfoExtractor):
+class QQPlaylistBaseIE(InfoExtractor):
+ @staticmethod
+ def qq_static_url(category, mid):
+ return 'http://y.qq.com/y/static/%s/%s/%s/%s.html' % (category, mid[-2], mid[-1], mid)
+
+ @classmethod
+ def get_entries_from_page(cls, page):
+ entries = []
+
+ for item in re.findall(r'class="data"[^<>]*>([^<>]+)</', page):
+ song_mid = unescapeHTML(item).split('|')[-5]
+ entries.append(cls.url_result(
+ 'http://y.qq.com/#type=song&mid=' + song_mid, 'QQMusic',
+ song_mid))
+
+ return entries
+
+
+class QQMusicSingerIE(QQPlaylistBaseIE):
_VALID_URL = r'http://y.qq.com/#type=singer&mid=(?P<id>[0-9A-Za-z]+)'
_TEST = {
'url': 'http://y.qq.com/#type=singer&mid=001BLpXF2DyJe2',
mid = self._match_id(url)
singer_page = self._download_webpage(
- 'http://y.qq.com/y/static/singer/%s/%s/%s.html' % (mid[-2], mid[-1], mid),
- 'Download singer page')
-
- entries = []
+ self.qq_static_url('singer', mid), mid, 'Download singer page')
- for item in re.findall(r'<span class="data">([^<>]+)</span>', singer_page):
- song_mid = item.split('|')[-5]
- entries.append(self.url_result(
- 'http://y.qq.com/#type=song&mid=' + song_mid, 'QQMusic', song_mid))
+ entries = self.get_entries_from_page(singer_page)
singer_name = self._html_search_regex(
r"singername\s*:\s*'([^']+)'", singer_page, 'singer name',
req.add_header(
'Referer', 'http://s.plcloud.music.qq.com/xhr_proxy_utf8.html')
singer_desc_page = self._download_xml(
- req, 'Donwload singer description XML')
+ req, mid, 'Donwload singer description XML')
singer_desc = singer_desc_page.find('./data/info/desc').text
return self.playlist_result(entries, mid, singer_name, singer_desc)
+
+
+class QQMusicAlbumIE(QQPlaylistBaseIE):
+ _VALID_URL = r'http://y.qq.com/#type=album&mid=(?P<id>[0-9A-Za-z]+)'
+
+ _TEST = {
+ 'url': 'http://y.qq.com/#type=album&mid=000gXCTb2AhRR1&play=0',
+ 'info_dict': {
+ 'id': '000gXCTb2AhRR1',
+ 'title': '我们都是这样长大的',
+ 'description': 'md5:d216c55a2d4b3537fe4415b8767d74d6',
+ },
+ 'playlist_count': 4,
+ }
+
+ def _real_extract(self, url):
+ mid = self._match_id(url)
+
+ album_page = self._download_webpage(
+ self.qq_static_url('album', mid), mid, 'Download album page')
+
+ entries = self.get_entries_from_page(album_page)
+
+ album_name = self._html_search_regex(
+ r"albumname\s*:\s*'([^']+)',", album_page, 'album name',
+ default=None)
+
+ album_detail = self._html_search_regex(
+ r'<div class="album_detail close_detail">\s*<p>((?:[^<>]+(?:<br />)?)+)</p>',
+ album_page, 'album details', default=None)
+
+ return self.playlist_result(entries, mid, album_name, album_detail)
+
+
+class QQMusicToplistIE(QQPlaylistBaseIE):
+ _VALID_URL = r'http://y\.qq\.com/#type=toplist&p=(?P<id>(top|global)_[0-9]+)'
+
+ _TESTS = [{
+ 'url': 'http://y.qq.com/#type=toplist&p=global_12',
+ 'info_dict': {
+ 'id': 'global_12',
+ 'title': 'itunes榜',
+ },
+ 'playlist_count': 10,
+ }, {
+ 'url': 'http://y.qq.com/#type=toplist&p=top_6',
+ 'info_dict': {
+ 'id': 'top_6',
+ 'title': 'QQ音乐巅峰榜·欧美',
+ },
+ 'playlist_count': 100,
+ }, {
+ 'url': 'http://y.qq.com/#type=toplist&p=global_5',
+ 'info_dict': {
+ 'id': 'global_5',
+ 'title': '韩国mnet排行榜',
+ },
+ 'playlist_count': 50,
+ }]
+
+ @staticmethod
+ def strip_qq_jsonp(code):
+ return js_to_json(re.sub(r'^MusicJsonCallback\((.*?)\)/\*.+?\*/$', r'\1', code))
+
+ def _real_extract(self, url):
+ list_id = self._match_id(url)
+
+ list_type = list_id.split("_")[0]
+ num_id = list_id.split("_")[1]
+
+ list_page = self._download_webpage(
+ "http://y.qq.com/y/static/toplist/index/%s.html" % list_id,
+ list_id, 'Download toplist page')
+
+ entries = []
+ jsonp_url = ""
+ if list_type == 'top':
+ jsonp_url = "http://y.qq.com/y/static/toplist/json/top/%s/1.js" % num_id
+ elif list_type == 'global':
+ jsonp_url = "http://y.qq.com/y/static/toplist/json/global/%s/1_1.js" % num_id
+
+ list = self._download_json(jsonp_url, list_id, note='Retrieve toplist json',
+ errnote='Unable to get toplist json', transform_source=self.strip_qq_jsonp)
+
+ for song in list['l']:
+ s = song['s']
+ song_mid = s.split("|")[20]
+ entries.append(self.url_result(
+ 'http://y.qq.com/#type=song&mid=' + song_mid, 'QQMusic',
+ song_mid))
+
+ list_name = self._html_search_regex(
+ r'<h2 id="top_name">([^\']+)</h2>', list_page, 'top list name',
+ default=None)
+ list_desc = None
+
+ return self.playlist_result(entries, list_id, list_name, list_desc)
+
\ No newline at end of file