X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=youtube_dl%2Fextractor%2Fcrunchyroll.py;h=d7e2b841e10856cadf0526fe8ff6d4c280dc0dae;hb=75111274edeec5f7088730b7f9c5c623dae77f28;hp=414c46b0d5e042be0e29ececf069a6c331e30197;hpb=8230018c20595a22e636b834ebb522a6a85d0d8b;p=youtube-dl diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py index 414c46b0d..d7e2b841e 100644 --- a/youtube_dl/extractor/crunchyroll.py +++ b/youtube_dl/extractor/crunchyroll.py @@ -17,7 +17,6 @@ from ..utils import ( bytes_to_intlist, intlist_to_bytes, unified_strdate, - clean_html, urlencode_postdata, ) from ..aes import ( @@ -40,6 +39,7 @@ class CrunchyrollIE(SubtitlesInfoExtractor): 'thumbnail': 'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg', 'uploader': 'Yomiuri Telecasting Corporation (YTV)', 'upload_date': '20131013', + 'url': 're:(?!.*&)', }, 'params': { # rtmp @@ -69,11 +69,9 @@ class CrunchyrollIE(SubtitlesInfoExtractor): login_request.add_header('Content-Type', 'application/x-www-form-urlencoded') self._download_webpage(login_request, None, False, 'Wrong login info') - def _real_initialize(self): self._login() - def _decrypt_subtitles(self, data, iv, id): data = bytes_to_intlist(data) iv = bytes_to_intlist(iv) @@ -99,8 +97,10 @@ class CrunchyrollIE(SubtitlesInfoExtractor): return shaHash + [0] * 12 key = obfuscate_key(id) + class Counter: __value = iv + def next_value(self): temp = self.__value self.__value = inc(self.__value) @@ -108,19 +108,17 @@ class CrunchyrollIE(SubtitlesInfoExtractor): decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv)) return zlib.decompress(decrypted_data) - def _convert_subtitles_to_srt(self, subtitles): + def _convert_subtitles_to_srt(self, sub_root): output = '' - for i, (start, end, text) in enumerate(re.findall(r']*?start="([^"]+)" [^>]*?end="([^"]+)" [^>]*?text="([^"]+)"[^>]*?>', subtitles), 1): - start = start.replace('.', ',') - end = end.replace('.', ',') - text = clean_html(text) - text = text.replace('\\N', '\n') - if not text: - continue + + for i, event in enumerate(sub_root.findall('./events/event'), 1): + start = event.attrib['start'].replace('.', ',') + end = event.attrib['end'].replace('.', ',') + text = event.attrib['text'].replace('\\N', '\n') output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text) return output - def _convert_subtitles_to_ass(self, subtitles): + def _convert_subtitles_to_ass(self, sub_root): output = '' def ass_bool(strvalue): @@ -129,10 +127,6 @@ class CrunchyrollIE(SubtitlesInfoExtractor): assvalue = '-1' return assvalue - sub_root = xml.etree.ElementTree.fromstring(subtitles) - if not sub_root: - return output - output = '[Script Info]\n' output += 'Title: %s\n' % sub_root.attrib["title"] output += 'ScriptType: v4.00+\n' @@ -189,7 +183,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text return output - def _real_extract(self,url): + def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('video_id') @@ -232,18 +226,20 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text formats = [] for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage): stream_quality, stream_format = self._FORMAT_IDS[fmt] - video_format = fmt+'p' + video_format = fmt + 'p' streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/') # urlencode doesn't work! - streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+'&media%5Fid='+stream_id+'&video%5Fformat='+stream_format + streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' + stream_quality + '&media%5Fid=' + stream_id + '&video%5Fformat=' + stream_format streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') streamdata_req.add_header('Content-Length', str(len(streamdata_req.data))) - streamdata = self._download_webpage(streamdata_req, video_id, note='Downloading media info for '+video_format) - video_url = self._search_regex(r'([^<]+)', streamdata, 'video_url') - video_play_path = self._search_regex(r'([^<]+)', streamdata, 'video_play_path') + streamdata = self._download_xml( + streamdata_req, video_id, + note='Downloading media info for %s' % video_format) + video_url = streamdata.find('.//host').text + video_play_path = streamdata.find('.//file').text formats.append({ 'url': video_url, - 'play_path': video_play_path, + 'play_path': video_play_path, 'ext': 'flv', 'format': video_format, 'format_id': video_format, @@ -252,8 +248,9 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text subtitles = {} sub_format = self._downloader.params.get('subtitlesformat', 'srt') for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage): - sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\ - video_id, note='Downloading subtitles for '+sub_name) + sub_page = self._download_webpage( + 'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id, + video_id, note='Downloading subtitles for ' + sub_name) id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False) iv = self._search_regex(r'([^<]+)', sub_page, 'subtitle_iv', fatal=False) data = self._search_regex(r'([^<]+)', sub_page, 'subtitle_data', fatal=False) @@ -267,56 +264,60 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False) if not lang_code: continue + sub_root = xml.etree.ElementTree.fromstring(subtitle) if sub_format == 'ass': - subtitles[lang_code] = self._convert_subtitles_to_ass(subtitle) + subtitles[lang_code] = self._convert_subtitles_to_ass(sub_root) else: - subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle) + subtitles[lang_code] = self._convert_subtitles_to_srt(sub_root) if self._downloader.params.get('listsubtitles', False): self._list_available_subtitles(video_id, subtitles) return return { - 'id': video_id, - 'title': video_title, + 'id': video_id, + 'title': video_title, 'description': video_description, - 'thumbnail': video_thumbnail, - 'uploader': video_uploader, + 'thumbnail': video_thumbnail, + 'uploader': video_uploader, 'upload_date': video_upload_date, - 'subtitles': subtitles, - 'formats': formats, + 'subtitles': subtitles, + 'formats': formats, } class CrunchyrollShowPlaylistIE(InfoExtractor): IE_NAME = "crunchyroll:playlist" - _VALID_URL = r'https?://(?:(?Pwww|m)\.)?(?Pcrunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P[\w\-]+))/?$' - _TITLE_EXTR = r'\s*(?P[\w\s]+)' + _VALID_URL = r'https?://(?:(?Pwww|m)\.)?(?Pcrunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P[\w\-]+))/?$' _TESTS = [{ - 'url' : 'http://www.crunchyroll.com/attack-on-titan', - 'info_dict' : { - 'title' : 'Attack on Titan' + 'url': 'http://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi', + 'info_dict': { + 'id': 'a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi', + 'title': 'A Bridge to the Starry Skies - Hoshizora e Kakaru Hashi' }, - 'playlist_count' : 15 + 'playlist_count': 13, }] - def _extract_title_entries(self,id,webpage): - _EPISODE_ID_EXTR = r'id="showview_videos_media_(?P\d+)".*?href="/{0}/(?P[\w\-]+-(?P=vidid))"'.format(id) - title = self._html_search_regex(self._TITLE_EXTR,webpage,"title",flags=re.UNICODE|re.MULTILINE) - episode_urls = [self.url_result('http://www.crunchyroll.com/{0}/{1}'.format(id, showmatch[1])) for - showmatch in re.findall(_EPISODE_ID_EXTR, webpage,re.UNICODE|re.MULTILINE|re.DOTALL)] - return title, episode_urls - - def _real_extract(self, url): - url_match = re.match(self._VALID_URL,url) - show_id = url_match.group('show') - webpage = self._download_webpage(url,show_id) - (title,entries) = self._extract_title_entries(show_id,webpage) + show_id = self._match_id(url) + + webpage = self._download_webpage(url, show_id) + title = self._html_search_regex( + r'(?s)]*>\s*(.*?)', + webpage, 'title') + episode_paths = re.findall( + r'(?s)
  • ]+>.*?