[animeondemand][wip] Add extractor (#8518)
[youtube-dl] / youtube_dl / extractor / animeondemand.py
1 from __future__ import unicode_literals
2
3 import re
4
5 from .common import InfoExtractor
6 from ..compat import compat_urlparse
7 from ..utils import (
8     determine_ext,
9     encode_dict,
10     ExtractorError,
11     sanitized_Request,
12     urlencode_postdata,
13 )
14
15
16 class AnimeOnDemandIE(InfoExtractor):
17     _VALID_URL = r'https?://(?:www\.)?anime-on-demand\.de/anime/(?P<id>\d+)'
18     _LOGIN_URL = 'https://www.anime-on-demand.de/users/sign_in'
19     _APPLY_HTML5_URL = 'https://www.anime-on-demand.de/html5apply'
20     _TEST = {
21         'url': 'https://www.anime-on-demand.de/anime/161',
22         'info_dict': {
23             'id': '161',
24             'title': 'Grimgar, Ashes and Illusions (OmU)',
25             'description': 'md5:6681ce3c07c7189d255ac6ab23812d31',
26         },
27         'playlist_mincount': 4,
28     }
29
30     def _login(self):
31         (username, password) = self._get_login_info()
32         if username is None:
33             return
34
35         login_page = self._download_webpage(
36             self._LOGIN_URL, None, 'Downloading login page')
37
38         login_form = self._form_hidden_inputs('new_user', login_page)
39
40         login_form.update({
41             'user[login]': username,
42             'user[password]': password,
43         })
44
45         post_url = self._search_regex(
46             r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
47             'post url', default=self._LOGIN_URL, group='url')
48
49         if not post_url.startswith('http'):
50             post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
51
52         request = sanitized_Request(
53             post_url, urlencode_postdata(encode_dict(login_form)))
54         request.add_header('Referer', self._LOGIN_URL)
55
56         response = self._download_webpage(
57             request, None, 'Logging in as %s' % username)
58
59         if all(p not in response for p in ('>Logout<', 'href="/users/sign_out"')):
60             error = self._search_regex(
61                 r'<p class="alert alert-danger">(.+?)</p>',
62                 response, 'error', default=None)
63             if error:
64                 raise ExtractorError('Unable to login: %s' % error, expected=True)
65             raise ExtractorError('Unable to log in')
66
67     def _real_initialize(self):
68         self._login()
69
70     def _real_extract(self, url):
71         anime_id = self._match_id(url)
72
73         webpage = self._download_webpage(url, anime_id)
74
75         if 'data-playlist=' not in webpage:
76             self._download_webpage(
77                 self._APPLY_HTML5_URL, anime_id,
78                 'Activating HTML5 beta', 'Unable to apply HTML5 beta')
79             webpage = self._download_webpage(url, anime_id)
80
81         csrf_token = self._html_search_meta(
82             'csrf-token', webpage, 'csrf token', fatal=True)
83
84         anime_title = self._html_search_regex(
85             r'(?s)<h1[^>]+itemprop="name"[^>]*>(.+?)</h1>',
86             webpage, 'anime name')
87         anime_description = self._html_search_regex(
88             r'(?s)<div[^>]+itemprop="description"[^>]*>(.+?)</div>',
89             webpage, 'anime description', default=None)
90
91         entries = []
92
93         for episode_html in re.findall(r'(?s)<h3[^>]+class="episodebox-title".+?>Episodeninhalt<', webpage):
94             m = re.search(
95                 r'class="episodebox-title"[^>]+title="Episode (?P<number>\d+) - (?P<title>.+?)"', episode_html)
96             if not m:
97                 continue
98
99             episode_number = int(m.group('number'))
100             episode_title = m.group('title')
101             video_id = 'episode-%d' % episode_number
102
103             common_info = {
104                 'id': video_id,
105                 'series': anime_title,
106                 'episode': episode_title,
107                 'episode_number': episode_number,
108             }
109
110             formats = []
111
112             playlist_url = self._search_regex(
113                 r'data-playlist=(["\'])(?P<url>.+?)\1',
114                 episode_html, 'data playlist', default=None, group='url')
115             if playlist_url:
116                 request = sanitized_Request(
117                     compat_urlparse.urljoin(url, playlist_url),
118                     headers={
119                         'X-Requested-With': 'XMLHttpRequest',
120                         'X-CSRF-Token': csrf_token,
121                         'Referer': url,
122                         'Accept': 'application/json, text/javascript, */*; q=0.01',
123                     })
124
125                 playlist = self._download_json(
126                     request, video_id, 'Downloading playlist JSON', fatal=False)
127                 if playlist:
128                     playlist = playlist['playlist'][0]
129                     title = playlist['title']
130                     description = playlist.get('description')
131                     for source in playlist.get('sources', []):
132                         file_ = source.get('file')
133                         if file_ and determine_ext(file_) == 'm3u8':
134                             formats = self._extract_m3u8_formats(
135                                 file_, video_id, 'mp4',
136                                 entry_protocol='m3u8_native', m3u8_id='hls')
137
138             if formats:
139                 f = common_info.copy()
140                 f.update({
141                     'title': title,
142                     'description': description,
143                     'formats': formats,
144                 })
145                 entries.append(f)
146
147             m = re.search(
148                 r'data-dialog-header=(["\'])(?P<title>.+?)\1[^>]+href=(["\'])(?P<href>.+?)\3[^>]*>Teaser<',
149                 episode_html)
150             if m:
151                 f = common_info.copy()
152                 f.update({
153                     'id': '%s-teaser' % f['id'],
154                     'title': m.group('title'),
155                     'url': compat_urlparse.urljoin(url, m.group('href')),
156                 })
157                 entries.append(f)
158
159         return self.playlist_result(entries, anime_id, anime_title, anime_description)