Unify coding cookie
[youtube-dl] / youtube_dl / extractor / dramafever.py
index 8fac99cc54c5c30eac13eb0311a1b2b0527d963f..c115956121a242920ec8016e8c9f3558c34060c6 100644 (file)
-# encoding: utf-8
+# coding: utf-8
 from __future__ import unicode_literals
 
-import re
+import itertools
+
+from .amp import AMPIE
+from ..compat import (
+    compat_HTTPError,
+    compat_urlparse,
+)
+from ..utils import (
+    ExtractorError,
+    clean_html,
+    int_or_none,
+    sanitized_Request,
+    urlencode_postdata
+)
+
+
+class DramaFeverBaseIE(AMPIE):
+    _LOGIN_URL = 'https://www.dramafever.com/accounts/login/'
+    _NETRC_MACHINE = 'dramafever'
+
+    _CONSUMER_SECRET = 'DA59dtVXYLxajktV'
+
+    _consumer_secret = None
+
+    def _get_consumer_secret(self):
+        mainjs = self._download_webpage(
+            'http://www.dramafever.com/static/51afe95/df2014/scripts/main.js',
+            None, 'Downloading main.js', fatal=False)
+        if not mainjs:
+            return self._CONSUMER_SECRET
+        return self._search_regex(
+            r"var\s+cs\s*=\s*'([^']+)'", mainjs,
+            'consumer secret', default=self._CONSUMER_SECRET)
+
+    def _real_initialize(self):
+        self._login()
+        self._consumer_secret = self._get_consumer_secret()
+
+    def _login(self):
+        (username, password) = self._get_login_info()
+        if username is None:
+            return
+
+        login_form = {
+            'username': username,
+            'password': password,
+        }
+
+        request = sanitized_Request(
+            self._LOGIN_URL, urlencode_postdata(login_form))
+        response = self._download_webpage(
+            request, None, 'Logging in as %s' % username)
 
-from .common import InfoExtractor
+        if all(logout_pattern not in response
+               for logout_pattern in ['href="/accounts/logout/"', '>Log out<']):
+            error = self._html_search_regex(
+                r'(?s)class="hidden-xs prompt"[^>]*>(.+?)<',
+                response, 'error message', default=None)
+            if error:
+                raise ExtractorError('Unable to login: %s' % error, expected=True)
+            raise ExtractorError('Unable to log in')
 
 
-class DramaFeverIE(InfoExtractor):
+class DramaFeverIE(DramaFeverBaseIE):
     IE_NAME = 'dramafever'
-    _VALID_URL = r'^https?://(?:www\.)?dramafever\.com/drama/(?P<id>[0-9]+/[0-9]+)/'
+    _VALID_URL = r'https?://(?:www\.)?dramafever\.com/drama/(?P<id>[0-9]+/[0-9]+)(?:/|$)'
     _TESTS = [{
         'url': 'http://www.dramafever.com/drama/4512/1/Cooking_with_Shin/',
         'info_dict': {
             'id': '4512.1',
-            'ext': 'flv',
+            'ext': 'mp4',
             'title': 'Cooking with Shin 4512.1',
+            'description': 'md5:a8eec7942e1664a6896fcd5e1287bfd0',
+            'episode': 'Episode 1',
+            'episode_number': 1,
+            'thumbnail': 're:^https?://.*\.jpg',
+            'timestamp': 1404336058,
             'upload_date': '20140702',
-            'description': 'Served at all special occasions and featured in the hit drama Heirs, Shin cooks Red Bean Rice.',
-        }
+            'duration': 343,
+        },
+        'params': {
+            # m3u8 download
+            'skip_download': True,
+        },
+    }, {
+        'url': 'http://www.dramafever.com/drama/4826/4/Mnet_Asian_Music_Awards_2015/?ap=1',
+        'info_dict': {
+            'id': '4826.4',
+            'ext': 'mp4',
+            'title': 'Mnet Asian Music Awards 2015 4826.4',
+            'description': 'md5:3ff2ee8fedaef86e076791c909cf2e91',
+            'episode': 'Mnet Asian Music Awards 2015 - Part 3',
+            'episode_number': 4,
+            'thumbnail': 're:^https?://.*\.jpg',
+            'timestamp': 1450213200,
+            'upload_date': '20151215',
+            'duration': 5602,
+        },
+        'params': {
+            # m3u8 download
+            'skip_download': True,
+        },
     }]
 
     def _real_extract(self, url):
-        video_id = self._match_id(url).replace("/", ".")
-
-        consumer_secret = self._get_consumer_secret(video_id)
-
-        ep_json = self._download_json(
-            "http://www.dramafever.com/amp/episode/feed.json?guid=%s" % video_id,
-            video_id, note='Downloading episode metadata',
-            errnote="Video may not be available for your location")["channel"]["item"]
-
-        title = ep_json["media-group"]["media-title"]
-        description = ep_json["media-group"]["media-description"]
-        thumbnail = ep_json["media-group"]["media-thumbnail"]["@attributes"]["url"]
-        duration = int(ep_json["media-group"]["media-content"][0]["@attributes"]["duration"])
-        mobj = re.match(r"([0-9]{4})-([0-9]{2})-([0-9]{2})", ep_json["pubDate"])
-        upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3) if mobj is not None else None
-
-        formats = []
-        for vid_format in ep_json["media-group"]["media-content"]:
-            src = vid_format["@attributes"]["url"]
-            if '.f4m' in src:
-                formats.extend(self._extract_f4m_formats(src, video_id))
-
-        self._sort_formats(formats)
-        video_subtitles = self.extract_subtitles(video_id, consumer_secret)
-
-        return {
-            'id': video_id,
-            'title': title,
-            'description': description,
-            'thumbnail': thumbnail,
-            'upload_date': upload_date,
-            'duration': duration,
-            'formats': formats,
-            'subtitles': video_subtitles,
-        }
-
-    def _get_consumer_secret(self, video_id):
-        df_js = self._download_webpage(
-            "http://www.dramafever.com/static/126960d/v2/js/plugins/jquery.threadedcomments.js", video_id)
-        return self._search_regex(r"'cs': '([0-9a-zA-Z]+)'", df_js, "cs")
-
-    def _get_episodes(self, series_id, consumer_secret, episode_filter=None):
-        _PAGE_SIZE = 60
-
-        curr_page = 1
-        max_pages = curr_page + 1
-        results = []
-        while max_pages >= curr_page:
-            page_url = "http://www.dramafever.com/api/4/episode/series/?cs=%s&series_id=%s&page_size=%d&page_number=%d" % \
-                       (consumer_secret, series_id, _PAGE_SIZE, curr_page)
-            series = self._download_json(
-                page_url, series_id, note="Downloading series json page #%d" % curr_page)
-            max_pages = series['num_pages']
-            results.extend([ep for ep in series['value'] if episode_filter is None or episode_filter(ep)])
-            curr_page += 1
-        return results
-
-    def _get_subtitles(self, video_id, consumer_secret):
-
-        def match_episode(ep):
-            return ep['guid'] == video_id
-
-        res = None
-        info = self._get_episodes(
-            video_id.split(".")[0], consumer_secret, episode_filter=match_episode)
-        if len(info) == 1 and info[0]['subfile'] != '':
-            res = {'en': [{'url': info[0]['subfile'], 'ext': 'srt'}]}
-        return res
-
-
-class DramaFeverSeriesIE(DramaFeverIE):
+        video_id = self._match_id(url).replace('/', '.')
+
+        try:
+            info = self._extract_feed_info(
+                'http://www.dramafever.com/amp/episode/feed.json?guid=%s' % video_id)
+        except ExtractorError as e:
+            if isinstance(e.cause, compat_HTTPError):
+                raise ExtractorError(
+                    'Currently unavailable in your country.', expected=True)
+            raise
+
+        series_id, episode_number = video_id.split('.')
+        episode_info = self._download_json(
+            # We only need a single episode info, so restricting page size to one episode
+            # and dealing with page number as with episode number
+            r'http://www.dramafever.com/api/4/episode/series/?cs=%s&series_id=%s&page_number=%s&page_size=1'
+            % (self._consumer_secret, series_id, episode_number),
+            video_id, 'Downloading episode info JSON', fatal=False)
+        if episode_info:
+            value = episode_info.get('value')
+            if isinstance(value, list):
+                for v in value:
+                    if v.get('type') == 'Episode':
+                        subfile = v.get('subfile') or v.get('new_subfile')
+                        if subfile and subfile != 'http://www.dramafever.com/st/':
+                            info.setdefault('subtitles', {}).setdefault('English', []).append({
+                                'ext': 'srt',
+                                'url': subfile,
+                            })
+                        episode_number = int_or_none(v.get('number'))
+                        episode_fallback = 'Episode'
+                        if episode_number:
+                            episode_fallback += ' %d' % episode_number
+                        info['episode'] = v.get('title') or episode_fallback
+                        info['episode_number'] = episode_number
+                        break
+
+        return info
+
+
+class DramaFeverSeriesIE(DramaFeverBaseIE):
     IE_NAME = 'dramafever:series'
-    _VALID_URL = r'^https?://(?:www\.)?dramafever\.com/drama/(?P<id>[0-9]+)/\d*[a-zA-Z_][a-zA-Z0-9_]*/'
+    _VALID_URL = r'https?://(?:www\.)?dramafever\.com/drama/(?P<id>[0-9]+)(?:/(?:(?!\d+(?:/|$)).+)?)?$'
     _TESTS = [{
         'url': 'http://www.dramafever.com/drama/4512/Cooking_with_Shin/',
         'info_dict': {
             'id': '4512',
             'title': 'Cooking with Shin',
-            'description': 'Professional chef and cooking instructor Shin Kim takes some of the delicious dishes featured in your favorite dramas and shows you how to make them right at home.',
+            'description': 'md5:84a3f26e3cdc3fb7f500211b3593b5c1',
         },
         'playlist_count': 4,
     }, {
@@ -107,25 +162,38 @@ class DramaFeverSeriesIE(DramaFeverIE):
         'info_dict': {
             'id': '124',
             'title': 'IRIS',
-            'description': 'Lee Byung Hun and Kim Tae Hee star in this powerhouse drama and ratings megahit of action, intrigue and romance.',
+            'description': 'md5:b3a30e587cf20c59bd1c01ec0ee1b862',
         },
         'playlist_count': 20,
     }]
 
+    _PAGE_SIZE = 60  # max is 60 (see http://api.drama9.com/#get--api-4-episode-series-)
+
     def _real_extract(self, url):
         series_id = self._match_id(url)
-        consumer_secret = self._get_consumer_secret(series_id)
 
-        series_json = self._download_json(
-            "http://www.dramafever.com/api/4/series/query/?cs=%s&series_id=%s" % (consumer_secret, series_id),
-            series_id, note='Downloading series metadata')["series"][series_id]
+        series = self._download_json(
+            'http://www.dramafever.com/api/4/series/query/?cs=%s&series_id=%s'
+            % (self._consumer_secret, series_id),
+            series_id, 'Downloading series JSON')['series'][series_id]
 
-        title = series_json["name"]
-        description = series_json["description_short"]
+        title = clean_html(series['name'])
+        description = clean_html(series.get('description') or series.get('description_short'))
 
-        episodes = self._get_episodes(series_id, consumer_secret)
         entries = []
-        for ep in episodes:
-            entries.append(self.url_result(
-                'http://www.dramafever.com%s' % ep['episode_url'], 'DramaFever', ep['guid']))
+        for page_num in itertools.count(1):
+            episodes = self._download_json(
+                'http://www.dramafever.com/api/4/episode/series/?cs=%s&series_id=%s&page_size=%d&page_number=%d'
+                % (self._consumer_secret, series_id, self._PAGE_SIZE, page_num),
+                series_id, 'Downloading episodes JSON page #%d' % page_num)
+            for episode in episodes.get('value', []):
+                episode_url = episode.get('episode_url')
+                if not episode_url:
+                    continue
+                entries.append(self.url_result(
+                    compat_urlparse.urljoin(url, episode_url),
+                    'DramaFever', episode.get('guid')))
+            if page_num == episodes['num_pages']:
+                break
+
         return self.playlist_result(entries, series_id, title, description)