Unify coding cookie
[youtube-dl] / youtube_dl / extractor / sohu.py
index cd049b6f0dc5ecbaf26e36277702f91df55d96f5..30760ca06be4b3fc112f3fe0200c74b665d64855 100644 (file)
-# encoding: utf-8
+# coding: utf-8
+from __future__ import unicode_literals
 
 import re
-import json
-import time
-import logging
-import urllib2
 
 from .common import InfoExtractor
-from ..utils import compat_urllib_request, clean_html
+from ..compat import (
+    compat_str,
+    compat_urllib_parse_urlencode,
+)
+from ..utils import ExtractorError
 
 
 class SohuIE(InfoExtractor):
-    _VALID_URL = r'https?://tv\.sohu\.com/\d+?/n(?P<id>\d+)\.shtml.*?'
-
-    _TEST = {
-        u'url': u'http://tv.sohu.com/20130724/n382479172.shtml#super',
-        u'file': u'382479172.flv',
-        u'md5': u'cc84eed6b6fbf0f2f9a8d3cb9da1939b',
-        u'info_dict': {
-            u'title': u'The Illest - Far East Movement Riff Raff',
-        },
-    }
+    _VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?'
 
+    # Sohu videos give different MD5 sums on Travis CI and my machine
+    _TESTS = [{
+        'note': 'This video is available only in Mainland China',
+        'url': 'http://tv.sohu.com/20130724/n382479172.shtml#super',
+        'info_dict': {
+            'id': '382479172',
+            'ext': 'mp4',
+            'title': 'MV:Far East Movement《The Illest》',
+        },
+        'skip': 'On available in China',
+    }, {
+        'url': 'http://tv.sohu.com/20150305/n409385080.shtml',
+        'info_dict': {
+            'id': '409385080',
+            'ext': 'mp4',
+            'title': '《2015湖南卫视羊年元宵晚会》唐嫣《花好月圆》',
+        }
+    }, {
+        'url': 'http://my.tv.sohu.com/us/232799889/78693464.shtml',
+        'info_dict': {
+            'id': '78693464',
+            'ext': 'mp4',
+            'title': '【爱范品】第31期:MWC见不到的奇葩手机',
+        }
+    }, {
+        'note': 'Multipart video',
+        'url': 'http://my.tv.sohu.com/pl/8384802/78910339.shtml',
+        'info_dict': {
+            'id': '78910339',
+            'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
+        },
+        'playlist': [{
+            'info_dict': {
+                'id': '78910339_part1',
+                'ext': 'mp4',
+                'duration': 294,
+                'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
+            }
+        }, {
+            'info_dict': {
+                'id': '78910339_part2',
+                'ext': 'mp4',
+                'duration': 300,
+                'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
+            }
+        }, {
+            'info_dict': {
+                'id': '78910339_part3',
+                'ext': 'mp4',
+                'duration': 150,
+                'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
+            }
+        }]
+    }, {
+        'note': 'Video with title containing dash',
+        'url': 'http://my.tv.sohu.com/us/249884221/78932792.shtml',
+        'info_dict': {
+            'id': '78932792',
+            'ext': 'mp4',
+            'title': 'youtube-dl testing video',
+        },
+        'params': {
+            'skip_download': True
+        }
+    }]
 
     def _real_extract(self, url):
+
+        def _fetch_data(vid_id, mytv=False):
+            if mytv:
+                base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid='
+            else:
+                base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid='
+
+            return self._download_json(
+                base_data_url + vid_id, video_id,
+                'Downloading JSON data for %s' % vid_id,
+                headers=self.geo_verification_headers())
+
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
+        mytv = mobj.group('mytv') is not None
+
         webpage = self._download_webpage(url, video_id)
-        pattern = r'<title>(.+?)</title>'
-        compiled = re.compile(pattern, re.DOTALL)
-        title = self._search_regex(compiled, webpage, u'video title')
-        title = clean_html(title).split('-')[0].strip()
-        pattern = re.compile(r'var vid="(\d+)"')
-        result = re.search(pattern, webpage)
-        if not result:
-            logging.info('[Sohu] could not get vid')
-            return None
-        vid = result.group(1)
-        logging.info('vid: %s' % vid)
-        base_url_1 = 'http://hot.vrs.sohu.com/vrs_flash.action?vid='
-        url_1 = base_url_1 + vid
-        logging.info('json url: %s' % url_1)
-        webpage = self._download_webpage(url_1, vid)
-        json_1 = json.loads(webpage)
-        # get the highest definition video vid and json infomation.
-        vids = []
-        qualities = ('oriVid', 'superVid', 'highVid', 'norVid')
-        for vid_name in qualities:
-            vids.append(json_1['data'][vid_name])
-        clearest_vid = 0
-        for i, v in enumerate(vids):
-            if v != 0:
-                clearest_vid = v
-                logging.info('quality definition: %s' % qualities[i][:-3])
-                break
-        if not clearest_vid:
-            logging.warning('could not find valid clearest_vid')
-            return None
-        if vid != clearest_vid:
-            url_1 = '%s%d' % (base_url_1, clearest_vid)
-            logging.info('highest definition json url: %s' % url_1)
-            json_1 = json.loads(urllib2.urlopen(url_1).read())
-        allot = json_1['allot']
-        prot = json_1['prot']
-        clipsURL = json_1['data']['clipsURL']
-        su = json_1['data']['su']
-        num_of_parts = json_1['data']['totalBlocks']
-        logging.info('Total parts: %d' % num_of_parts)
-        base_url_3 = 'http://allot/?prot=prot&file=clipsURL[i]&new=su[i]'
-        files_info = []
-        for i in range(num_of_parts):
-            middle_url = 'http://%s/?prot=%s&file=%s&new=%s' % (allot, prot, clipsURL[i], su[i])
-            logging.info('middle url part %d: %s' % (i, middle_url))
-            middle_info = urllib2.urlopen(middle_url).read().split('|')
-            middle_part_1 = middle_info[0]
-            download_url = '%s%s?key=%s' % (middle_info[0], su[i], middle_info[3])
 
+        title = re.sub(r' - 搜狐视频$', '', self._og_search_title(webpage))
+
+        vid = self._html_search_regex(
+            r'var vid ?= ?["\'](\d+)["\']',
+            webpage, 'video path')
+        vid_data = _fetch_data(vid, mytv)
+        if vid_data['play'] != 1:
+            if vid_data.get('status') == 12:
+                raise ExtractorError(
+                    'Sohu said: There\'s something wrong in the video.',
+                    expected=True)
+            else:
+                raise ExtractorError(
+                    'Sohu said: The video is only licensed to users in Mainland China.',
+                    expected=True)
+
+        formats_json = {}
+        for format_id in ('nor', 'high', 'super', 'ori', 'h2644k', 'h2654k'):
+            vid_id = vid_data['data'].get('%sVid' % format_id)
+            if not vid_id:
+                continue
+            vid_id = compat_str(vid_id)
+            formats_json[format_id] = vid_data if vid == vid_id else _fetch_data(vid_id, mytv)
+
+        part_count = vid_data['data']['totalBlocks']
+
+        playlist = []
+        for i in range(part_count):
+            formats = []
+            for format_id, format_data in formats_json.items():
+                allot = format_data['allot']
+
+                data = format_data['data']
+                clips_url = data['clipsURL']
+                su = data['su']
+
+                video_url = 'newflv.sohu.ccgslb.net'
+                cdnId = None
+                retries = 0
+
+                while 'newflv.sohu.ccgslb.net' in video_url:
+                    params = {
+                        'prot': 9,
+                        'file': clips_url[i],
+                        'new': su[i],
+                        'prod': 'flash',
+                        'rb': 1,
+                    }
+
+                    if cdnId is not None:
+                        params['idc'] = cdnId
+
+                    download_note = 'Downloading %s video URL part %d of %d' % (
+                        format_id, i + 1, part_count)
+
+                    if retries > 0:
+                        download_note += ' (retry #%d)' % retries
+                    part_info = self._parse_json(self._download_webpage(
+                        'http://%s/?%s' % (allot, compat_urllib_parse_urlencode(params)),
+                        video_id, download_note), video_id)
+
+                    video_url = part_info['url']
+                    cdnId = part_info.get('nid')
+
+                    retries += 1
+                    if retries > 5:
+                        raise ExtractorError('Failed to get video URL')
+
+                formats.append({
+                    'url': video_url,
+                    'format_id': format_id,
+                    'filesize': data['clipsBytes'][i],
+                    'width': data['width'],
+                    'height': data['height'],
+                    'fps': data['fps'],
+                })
+            self._sort_formats(formats)
+
+            playlist.append({
+                'id': '%s_part%d' % (video_id, i + 1),
+                'title': title,
+                'duration': vid_data['data']['clipsDuration'][i],
+                'formats': formats,
+            })
+
+        if len(playlist) == 1:
+            info = playlist[0]
+            info['id'] = video_id
+        else:
             info = {
-                'id': '%s_part%02d' % (video_id, i + 1),
+                '_type': 'multi_video',
+                'entries': playlist,
+                'id': video_id,
                 'title': title,
-                'url': download_url,
-                'ext': 'mp4',
             }
-            files_info.append(info)
-            time.sleep(1)
-        if num_of_parts == 1:
-            info =  files_info[0]
-            info['id'] = video_id
-            return info
-        return files_info
+
+        return info