[vlive] Updated to new V App/VLive api.
authorErwin de Haan <git@erayan.eu>
Sat, 6 Feb 2016 22:37:55 +0000 (23:37 +0100)
committerSergey M․ <dstftw@gmail.com>
Sat, 6 Feb 2016 23:27:17 +0000 (05:27 +0600)
More robust with getting keys and ids from website.

youtube_dl/extractor/vlive.py

index 86c1cb5ef5d352608416a653bea5ef9f74c4287a..3e1f8ef07361dc055b942466868bdff56683fcf3 100644 (file)
@@ -9,17 +9,18 @@ from time import time
 from .common import InfoExtractor
 from ..utils import (
     ExtractorError,
-    determine_ext
+    determine_ext,
+    int_or_none
 )
 from ..compat import compat_urllib_parse
 
 
 class VLiveIE(InfoExtractor):
     IE_NAME = 'vlive'
-    # www.vlive.tv/video/ links redirect to m.vlive.tv/video/ for mobile devices
+    # vlive.tv/video/ links redirect to www.vlive.tv/video/ 
     _VALID_URL = r'https?://(?:(www|m)\.)?vlive\.tv/video/(?P<id>[0-9]+)'
     _TEST = {
-        'url': 'http://m.vlive.tv/video/1326',
+        'url': 'http://www.vlive.tv/video/1326',
         'md5': 'cc7314812855ce56de70a06a27314983',
         'info_dict': {
             'id': '1326',
@@ -28,50 +29,45 @@ class VLiveIE(InfoExtractor):
             'creator': 'Girl\'s Day',
         },
     }
-    _SECRET = 'rFkwZet6pqk1vQt6SxxUkAHX7YL3lmqzUMrU4IDusTo4jEBdtOhNfT4BYYAdArwH'
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
 
         webpage = self._download_webpage(
-            'http://m.vlive.tv/video/%s' % video_id,
+            'http://www.vlive.tv/video/%s' % video_id,
             video_id, note='Download video page')
 
+        long_video_id = self._search_regex(
+            r'vlive\.tv\.video\.ajax\.request\.handler\.init\("[0-9]+",\s?"[^"]*",\s?"([^"]+)",\s?"[^"]+",\s?"[^"]*",\s?"[^"]*"\)', webpage, 'long_video_id')
+
+        key = self._search_regex(
+            r'vlive\.tv\.video\.ajax\.request\.handler\.init\("[0-9]+",\s?"[^"]*",\s?"[^"]+",\s?"([^"]+)",\s?"[^"]*",\s?"[^"]*"\)', webpage, 'key')
+
         title = self._og_search_title(webpage)
         thumbnail = self._og_search_thumbnail(webpage)
         creator = self._html_search_regex(
-            r'<span[^>]+class="name">([^<>]+)</span>', webpage, 'creator')
+            r'<div class="info_area">\s*<strong[^>]+class="name">([^<>]+)</strong>', webpage, 'creator',fatal=False)
 
-        url = 'http://global.apis.naver.com/globalV/globalV/vod/%s/playinfo?' % video_id
-        msgpad = '%.0f' % (time() * 1000)
-        md = b64encode(
-            hmac.new(self._SECRET.encode('ascii'),
-                     (url[:255] + msgpad).encode('ascii'), sha1).digest()
-        )
-        url += '&' + compat_urllib_parse.urlencode({'msgpad': msgpad, 'md': md})
+        # doct = document type (xml or json), cpt = caption type (vtt or ttml)
+        url = "http://global.apis.naver.com/rmcnmv/rmcnmv/vod_play_videoInfo.json?videoId=%s&key=%s&ptc=http&doct=json&cpt=vtt" % (long_video_id, key)
+        
         playinfo = self._download_json(url, video_id, 'Downloading video json')
 
-        if playinfo.get('message', '') != 'success':
-            raise ExtractorError(playinfo.get('message', 'JSON request unsuccessful'))
-
-        if not playinfo.get('result'):
-            raise ExtractorError('No videos found.')
-
         formats = []
-        for vid in playinfo['result'].get('videos', {}).get('list', []):
+        for vid in playinfo.get('videos', {}).get('list', []):
             formats.append({
                 'url': vid['source'],
                 'ext': 'mp4',
                 'abr': vid.get('bitrate', {}).get('audio'),
                 'vbr': vid.get('bitrate', {}).get('video'),
-                'format_id': vid['encodingOption']['name'],
-                'height': vid.get('height'),
-                'width': vid.get('width'),
+                'format_id': vid.get('encodingOption', {}).get('name'),
+                'height': int_or_none(vid.get('encodingOption', {}).get('height')),
+                'width': int_or_none(vid.get('encodingOption', {}).get('width')),
             })
         self._sort_formats(formats)
 
         subtitles = {}
-        for caption in playinfo['result'].get('captions', {}).get('list', []):
+        for caption in playinfo.get('captions', {}).get('list', []):
             subtitles[caption['language']] = [
                 {'ext': determine_ext(caption['source'], default_ext='vtt'),
                  'url': caption['source']}]