Merge branch 'vlive-hls' of https://github.com/Kagami/youtube-dl into Kagami-vlive-hls
[youtube-dl] / youtube_dl / extractor / vlive.py
1 # coding: utf-8
2 from __future__ import division, unicode_literals
3
4 import re
5 import time
6 from .common import InfoExtractor
7 from ..utils import (
8     ExtractorError,
9     dict_get,
10     float_or_none,
11     int_or_none,
12 )
13 from ..compat import compat_urllib_parse_urlencode
14
15
16 class VLiveIE(InfoExtractor):
17     IE_NAME = 'vlive'
18     _VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/video/(?P<id>[0-9]+)'
19     _TEST = {
20         'url': 'http://www.vlive.tv/video/1326',
21         'md5': 'cc7314812855ce56de70a06a27314983',
22         'info_dict': {
23             'id': '1326',
24             'ext': 'mp4',
25             'title': "[V] Girl's Day's Broadcast",
26             'creator': "Girl's Day",
27             'view_count': int,
28         },
29     }
30
31     def _real_extract(self, url):
32         video_id = self._match_id(url)
33
34         webpage = self._download_webpage(
35             'http://www.vlive.tv/video/%s' % video_id, video_id)
36
37         # UTC+x - UTC+9 (KST)
38         tz = time.altzone if time.localtime().tm_isdst == 1 else time.timezone
39         tz_offset = -tz // 60 - 9 * 60
40         self._set_cookie('vlive.tv', 'timezoneOffset', '%d' % tz_offset)
41
42         status_params = self._download_json(
43             'http://www.vlive.tv/video/status?videoSeq=%s' % video_id,
44             video_id, 'Downloading JSON status',
45             headers={'Referer': url})
46         status = status_params.get('status')
47         air_start = status_params.get('onAirStartAt', '')
48         is_live = status_params.get('isLive')
49
50         video_params = self._search_regex(
51             r'vlive\.tv\.video\.ajax\.request\.handler\.init\((.+)\)',
52             webpage, 'video params')
53         live_params, long_video_id, key = re.split(
54             r'"\s*,\s*"', video_params)[1:4]
55
56         if status == 'LIVE_ON_AIR' or status == 'BIG_EVENT_ON_AIR':
57             live_params = self._parse_json('"%s"' % live_params, video_id)
58             live_params = self._parse_json(live_params, video_id)
59             return self._live(video_id, webpage, live_params)
60         elif status == 'VOD_ON_AIR' or status == 'BIG_EVENT_INTRO':
61             if long_video_id and key:
62                 return self._replay(video_id, webpage, long_video_id, key)
63             elif is_live:
64                 status = 'LIVE_END'
65             else:
66                 status = 'COMING_SOON'
67
68         if status == 'LIVE_END':
69             raise ExtractorError('Uploading for replay. Please wait...',
70                                  expected=True)
71         elif status == 'COMING_SOON':
72             raise ExtractorError('Coming soon! %s' % air_start, expected=True)
73         elif status == 'CANCELED':
74             raise ExtractorError('We are sorry, '
75                                  'but the live broadcast has been canceled.',
76                                  expected=True)
77         else:
78             raise ExtractorError('Unknown status %s' % status)
79
80     def _get_common_fields(self, webpage):
81         title = self._og_search_title(webpage)
82         creator = self._html_search_regex(
83             r'<div[^>]+class="info_area"[^>]*>\s*<a\s+[^>]*>([^<]+)',
84             webpage, 'creator', fatal=False)
85         thumbnail = self._og_search_thumbnail(webpage)
86         return {
87             'title': title,
88             'creator': creator,
89             'thumbnail': thumbnail,
90         }
91
92     def _live(self, video_id, webpage, live_params):
93         formats = []
94         for vid in live_params.get('resolutions', []):
95             formats.extend(self._extract_m3u8_formats(
96                 vid['cdnUrl'], video_id, 'mp4',
97                 m3u8_id=vid.get('name'),
98                 fatal=False, live=True))
99         self._sort_formats(formats)
100
101         return dict(self._get_common_fields(webpage),
102             id=video_id,
103             formats=formats,
104             is_live=True,
105         )
106
107     def _replay(self, video_id, webpage, long_video_id, key):
108         playinfo = self._download_json(
109             'http://global.apis.naver.com/rmcnmv/rmcnmv/vod_play_videoInfo.json?%s'
110             % compat_urllib_parse_urlencode({
111                 'videoId': long_video_id,
112                 'key': key,
113                 'ptc': 'http',
114                 'doct': 'json',  # document type (xml or json)
115                 'cpt': 'vtt',  # captions type (vtt or ttml)
116             }), video_id)
117
118         formats = [{
119             'url': vid['source'],
120             'format_id': vid.get('encodingOption', {}).get('name'),
121             'abr': float_or_none(vid.get('bitrate', {}).get('audio')),
122             'vbr': float_or_none(vid.get('bitrate', {}).get('video')),
123             'width': int_or_none(vid.get('encodingOption', {}).get('width')),
124             'height': int_or_none(vid.get('encodingOption', {}).get('height')),
125             'filesize': int_or_none(vid.get('size')),
126         } for vid in playinfo.get('videos', {}).get('list', []) if vid.get('source')]
127         self._sort_formats(formats)
128
129         view_count = int_or_none(playinfo.get('meta', {}).get('count'))
130
131         subtitles = {}
132         for caption in playinfo.get('captions', {}).get('list', []):
133             lang = dict_get(caption, ('language', 'locale', 'country', 'label'))
134             if lang and caption.get('source'):
135                 subtitles[lang] = [{
136                     'ext': 'vtt',
137                     'url': caption['source']}]
138
139         return dict(self._get_common_fields(webpage),
140             id=video_id,
141             formats=formats,
142             view_count=view_count,
143             subtitles=subtitles,
144         )