[dramafever] Add new extractor for dramafever.com
[youtube-dl] / youtube_dl / extractor / dramafever.py
1 # encoding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7
8
9 class DramaFeverIE(InfoExtractor):
10     IE_NAME = 'dramafever'
11     _VALID_URL = r'^https?://(?:www\.)?dramafever\.com/drama/(?P<id>[0-9]+/[0-9]+)/'
12     _TESTS = [{
13         'url': 'http://www.dramafever.com/drama/4512/1/Cooking_with_Shin/',
14         'info_dict': {
15             'id': '4512.1',
16             'ext': 'flv',
17             'title': 'Cooking with Shin 4512.1',
18             'upload_date': '20140702',
19             'description': 'Served at all special occasions and featured in the hit drama Heirs, Shin cooks Red Bean Rice.',
20         }
21     }]
22
23     def _real_extract(self, url):
24         video_id = self._match_id(url).replace("/", ".")
25
26         consumer_secret = self._get_consumer_secret(video_id)
27
28         ep_json = self._download_json(
29             "http://www.dramafever.com/amp/episode/feed.json?guid=%s" % video_id,
30             video_id, note='Downloading episode metadata',
31             errnote="Video may not be available for your location")["channel"]["item"]
32
33         title = ep_json["media-group"]["media-title"]
34         description = ep_json["media-group"]["media-description"]
35         thumbnail = ep_json["media-group"]["media-thumbnail"]["@attributes"]["url"]
36         duration = int(ep_json["media-group"]["media-content"][0]["@attributes"]["duration"])
37         mobj = re.match(r"([0-9]{4})-([0-9]{2})-([0-9]{2})", ep_json["pubDate"])
38         upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3) if mobj is not None else None
39
40         formats = []
41         for vid_format in ep_json["media-group"]["media-content"]:
42             src = vid_format["@attributes"]["url"]
43             if '.f4m' in src:
44                 formats.extend(self._extract_f4m_formats(src, video_id))
45
46         self._sort_formats(formats)
47         video_subtitles = self.extract_subtitles(video_id, consumer_secret)
48
49         return {
50             'id': video_id,
51             'title': title,
52             'description': description,
53             'thumbnail': thumbnail,
54             'upload_date': upload_date,
55             'duration': duration,
56             'formats': formats,
57             'subtitles': video_subtitles,
58         }
59
60     def _get_consumer_secret(self, video_id):
61         df_js = self._download_webpage(
62             "http://www.dramafever.com/static/126960d/v2/js/plugins/jquery.threadedcomments.js", video_id)
63         return self._search_regex(r"'cs': '([0-9a-zA-Z]+)'", df_js, "cs")
64
65     def _get_episodes(self, series_id, consumer_secret, episode_filter=None):
66         _PAGE_SIZE = 60
67
68         curr_page = 1
69         max_pages = curr_page + 1
70         results = []
71         while max_pages >= curr_page:
72             page_url = "http://www.dramafever.com/api/4/episode/series/?cs=%s&series_id=%s&page_size=%d&page_number=%d" % \
73                        (consumer_secret, series_id, _PAGE_SIZE, curr_page)
74             series = self._download_json(
75                 page_url, series_id, note="Downloading series json page #%d" % curr_page)
76             max_pages = series['num_pages']
77             results.extend([ep for ep in series['value'] if episode_filter is None or episode_filter(ep)])
78             curr_page += 1
79         return results
80
81     def _get_subtitles(self, video_id, consumer_secret):
82
83         def match_episode(ep):
84             return ep['guid'] == video_id
85
86         res = None
87         info = self._get_episodes(
88             video_id.split(".")[0], consumer_secret, episode_filter=match_episode)
89         if len(info) == 1 and info[0]['subfile'] != '':
90             res = {'en': [{'url': info[0]['subfile'], 'ext': 'srt'}]}
91         return res
92
93
94 class DramaFeverSeriesIE(DramaFeverIE):
95     IE_NAME = 'dramafever:series'
96     _VALID_URL = r'^https?://(?:www\.)?dramafever\.com/drama/(?P<id>[0-9]+)/\d*[a-zA-Z_][a-zA-Z0-9_]*/'
97     _TESTS = [{
98         'url': 'http://www.dramafever.com/drama/4512/Cooking_with_Shin/',
99         'info_dict': {
100             'id': '4512',
101             'title': 'Cooking with Shin',
102             'description': 'Professional chef and cooking instructor Shin Kim takes some of the delicious dishes featured in your favorite dramas and shows you how to make them right at home.',
103         },
104         'playlist_count': 4,
105     }, {
106         'url': 'http://www.dramafever.com/drama/124/IRIS/',
107         'info_dict': {
108             'id': '124',
109             'title': 'IRIS',
110             'description': 'Lee Byung Hun and Kim Tae Hee star in this powerhouse drama and ratings megahit of action, intrigue and romance.',
111         },
112         'playlist_count': 20,
113     }]
114
115     def _real_extract(self, url):
116         series_id = self._match_id(url)
117         consumer_secret = self._get_consumer_secret(series_id)
118
119         series_json = self._download_json(
120             "http://www.dramafever.com/api/4/series/query/?cs=%s&series_id=%s" % (consumer_secret, series_id),
121             series_id, note='Downloading series metadata')["series"][series_id]
122
123         title = series_json["name"]
124         description = series_json["description_short"]
125
126         episodes = self._get_episodes(series_id, consumer_secret)
127         entries = []
128         for ep in episodes:
129             entries.append(self.url_result(
130                 'http://www.dramafever.com%s' % ep['episode_url'], 'DramaFever', ep['guid']))
131         return self.playlist_result(entries, series_id, title, description)