[dramafever] Streamline code
[youtube-dl] / youtube_dl / extractor / dramafever.py
1 # encoding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7
8
9 class DramaFeverIE(InfoExtractor):
10     IE_NAME = 'dramafever'
11     _VALID_URL = r'^https?://(?:www\.)?dramafever\.com/drama/(?P<id>[0-9]+/[0-9]+)/'
12     _TESTS = [{
13         'url': 'http://www.dramafever.com/drama/4512/1/Cooking_with_Shin/',
14         'info_dict': {
15             'id': '4512.1',
16             'ext': 'flv',
17             'title': 'Cooking with Shin 4512.1',
18             'upload_date': '20140702',
19             'description': 'Served at all special occasions and featured in the hit drama Heirs, Shin cooks Red Bean Rice.',
20         }
21     }]
22
23     def _real_extract(self, url):
24         video_id = self._match_id(url).replace("/", ".")
25
26         consumer_secret = self._get_consumer_secret(video_id)
27
28         ep_json = self._download_json(
29             "http://www.dramafever.com/amp/episode/feed.json?guid=%s" % video_id,
30             video_id, note='Downloading episode metadata',
31             errnote="Video may not be available for your location")["channel"]["item"]
32
33         title = ep_json["media-group"]["media-title"]
34         description = ep_json["media-group"]["media-description"]
35         thumbnail = ep_json["media-group"]["media-thumbnail"]["@attributes"]["url"]
36         duration = int(ep_json["media-group"]["media-content"][0]["@attributes"]["duration"])
37         mobj = re.match(r"([0-9]{4})-([0-9]{2})-([0-9]{2})", ep_json["pubDate"])
38         upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3) if mobj is not None else None
39
40         formats = []
41         for vid_format in ep_json["media-group"]["media-content"]:
42             src = vid_format["@attributes"]["url"]
43             if '.f4m' in src:
44                 formats.extend(self._extract_f4m_formats(src, video_id))
45
46         self._sort_formats(formats)
47         video_subtitles = self.extract_subtitles(video_id, consumer_secret)
48
49         return {
50             'id': video_id,
51             'title': title,
52             'description': description,
53             'thumbnail': thumbnail,
54             'upload_date': upload_date,
55             'duration': duration,
56             'formats': formats,
57             'subtitles': video_subtitles,
58         }
59
60     def _get_consumer_secret(self, video_id):
61         df_js = self._download_webpage(
62             "http://www.dramafever.com/static/126960d/v2/js/plugins/jquery.threadedcomments.js", video_id)
63         return self._search_regex(r"'cs': '([0-9a-zA-Z]+)'", df_js, "cs")
64
65     def _get_episodes(self, series_id, consumer_secret, episode_filter=None):
66         _PAGE_SIZE = 60
67
68         curr_page = 1
69         max_pages = curr_page + 1
70         results = []
71         while max_pages >= curr_page:
72             page_url = "http://www.dramafever.com/api/4/episode/series/?cs=%s&series_id=%s&page_size=%d&page_number=%d" % \
73                        (consumer_secret, series_id, _PAGE_SIZE, curr_page)
74             series = self._download_json(
75                 page_url, series_id, note="Downloading series json page #%d" % curr_page)
76             max_pages = series['num_pages']
77             results.extend([ep for ep in series['value'] if episode_filter is None or episode_filter(ep)])
78             curr_page += 1
79         return results
80
81     def _get_subtitles(self, video_id, consumer_secret):
82
83         res = None
84         info = self._get_episodes(
85             video_id.split(".")[0], consumer_secret,
86             episode_filter=lambda x: x['guid'] == video_id)
87
88         if len(info) == 1 and info[0]['subfile'] != '':
89             res = {'en': [{'url': info[0]['subfile'], 'ext': 'srt'}]}
90         return res
91
92
93 class DramaFeverSeriesIE(DramaFeverIE):
94     IE_NAME = 'dramafever:series'
95     _VALID_URL = r'^https?://(?:www\.)?dramafever\.com/drama/(?P<id>[0-9]+)/\d*[a-zA-Z_][a-zA-Z0-9_]*/'
96     _TESTS = [{
97         'url': 'http://www.dramafever.com/drama/4512/Cooking_with_Shin/',
98         'info_dict': {
99             'id': '4512',
100             'title': 'Cooking with Shin',
101             'description': 'Professional chef and cooking instructor Shin Kim takes some of the delicious dishes featured in your favorite dramas and shows you how to make them right at home.',
102         },
103         'playlist_count': 4,
104     }, {
105         'url': 'http://www.dramafever.com/drama/124/IRIS/',
106         'info_dict': {
107             'id': '124',
108             'title': 'IRIS',
109             'description': 'Lee Byung Hun and Kim Tae Hee star in this powerhouse drama and ratings megahit of action, intrigue and romance.',
110         },
111         'playlist_count': 20,
112     }]
113
114     def _real_extract(self, url):
115         series_id = self._match_id(url)
116         consumer_secret = self._get_consumer_secret(series_id)
117
118         series_json = self._download_json(
119             "http://www.dramafever.com/api/4/series/query/?cs=%s&series_id=%s" % (consumer_secret, series_id),
120             series_id, note='Downloading series metadata')["series"][series_id]
121
122         title = series_json["name"]
123         description = series_json["description_short"]
124
125         episodes = self._get_episodes(series_id, consumer_secret)
126         entries = []
127         for ep in episodes:
128             entries.append(self.url_result(
129                 'http://www.dramafever.com%s' % ep['episode_url'], 'DramaFever', ep['guid']))
130         return self.playlist_result(entries, series_id, title, description)