X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=youtube_dl%2Fextractor%2Fsohu.py;h=daf6ad555be2a84dc89ec8dfcabcdfde78cf0194;hb=3047121c639428235191ff5f7afbda7ecda38779;hp=7644cc02d71e8dc09cbe8536983d33fba19c3dec;hpb=3f3308cd75fc068e4d67d00aa7d7892e02ab16e9;p=youtube-dl diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py index 7644cc02d..daf6ad555 100644 --- a/youtube_dl/extractor/sohu.py +++ b/youtube_dl/extractor/sohu.py @@ -6,11 +6,11 @@ import re from .common import InfoExtractor from ..compat import ( compat_str, - compat_urllib_request + compat_urllib_parse, ) from ..utils import ( - sanitize_url_path_consecutive_slashes, ExtractorError, + sanitized_Request, ) @@ -96,7 +96,7 @@ class SohuIE(InfoExtractor): else: base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid=' - req = compat_urllib_request.Request(base_data_url + vid_id) + req = sanitized_Request(base_data_url + vid_id) cn_verification_proxy = self._downloader.params.get('cn_verification_proxy') if cn_verification_proxy: @@ -143,23 +143,41 @@ class SohuIE(InfoExtractor): formats = [] for format_id, format_data in formats_json.items(): allot = format_data['allot'] - prot = format_data['prot'] data = format_data['data'] clips_url = data['clipsURL'] su = data['su'] - part_str = self._download_webpage( - 'http://%s/?prot=%s&file=%s&new=%s' % - (allot, prot, clips_url[i], su[i]), - video_id, - 'Downloading %s video URL part %d of %d' - % (format_id, i + 1, part_count)) + video_url = 'newflv.sohu.ccgslb.net' + cdnId = None + retries = 0 - part_info = part_str.split('|') + while 'newflv.sohu.ccgslb.net' in video_url: + params = { + 'prot': 9, + 'file': clips_url[i], + 'new': su[i], + 'prod': 'flash', + } - video_url = sanitize_url_path_consecutive_slashes( - '%s%s?key=%s' % (part_info[0], su[i], part_info[3])) + if cdnId is not None: + params['idc'] = cdnId + + download_note = 'Downloading %s video URL part %d of %d' % ( + format_id, i + 1, part_count) + + if retries > 0: + download_note += ' (retry #%d)' % retries + part_info = self._parse_json(self._download_webpage( + 'http://%s/?%s' % (allot, compat_urllib_parse.urlencode(params)), + video_id, download_note), video_id) + + video_url = part_info['url'] + cdnId = part_info.get('nid') + + retries += 1 + if retries > 5: + raise ExtractorError('Failed to get video URL') formats.append({ 'url': video_url,