[animeondemand] Expand episode title regex (Closes #8875)
[youtube-dl] / youtube_dl / extractor / animeondemand.py
1 from __future__ import unicode_literals
2
3 import re
4
5 from .common import InfoExtractor
6 from ..compat import compat_urlparse
7 from ..utils import (
8     determine_ext,
9     encode_dict,
10     ExtractorError,
11     sanitized_Request,
12     urlencode_postdata,
13 )
14
15
16 class AnimeOnDemandIE(InfoExtractor):
17     _VALID_URL = r'https?://(?:www\.)?anime-on-demand\.de/anime/(?P<id>\d+)'
18     _LOGIN_URL = 'https://www.anime-on-demand.de/users/sign_in'
19     _APPLY_HTML5_URL = 'https://www.anime-on-demand.de/html5apply'
20     _NETRC_MACHINE = 'animeondemand'
21     _TEST = {
22         'url': 'https://www.anime-on-demand.de/anime/161',
23         'info_dict': {
24             'id': '161',
25             'title': 'Grimgar, Ashes and Illusions (OmU)',
26             'description': 'md5:6681ce3c07c7189d255ac6ab23812d31',
27         },
28         'playlist_mincount': 4,
29     }
30
31     def _login(self):
32         (username, password) = self._get_login_info()
33         if username is None:
34             return
35
36         login_page = self._download_webpage(
37             self._LOGIN_URL, None, 'Downloading login page')
38
39         login_form = self._form_hidden_inputs('new_user', login_page)
40
41         login_form.update({
42             'user[login]': username,
43             'user[password]': password,
44         })
45
46         post_url = self._search_regex(
47             r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
48             'post url', default=self._LOGIN_URL, group='url')
49
50         if not post_url.startswith('http'):
51             post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
52
53         request = sanitized_Request(
54             post_url, urlencode_postdata(encode_dict(login_form)))
55         request.add_header('Referer', self._LOGIN_URL)
56
57         response = self._download_webpage(
58             request, None, 'Logging in as %s' % username)
59
60         if all(p not in response for p in ('>Logout<', 'href="/users/sign_out"')):
61             error = self._search_regex(
62                 r'<p class="alert alert-danger">(.+?)</p>',
63                 response, 'error', default=None)
64             if error:
65                 raise ExtractorError('Unable to login: %s' % error, expected=True)
66             raise ExtractorError('Unable to log in')
67
68     def _real_initialize(self):
69         self._login()
70
71     def _real_extract(self, url):
72         anime_id = self._match_id(url)
73
74         webpage = self._download_webpage(url, anime_id)
75
76         if 'data-playlist=' not in webpage:
77             self._download_webpage(
78                 self._APPLY_HTML5_URL, anime_id,
79                 'Activating HTML5 beta', 'Unable to apply HTML5 beta')
80             webpage = self._download_webpage(url, anime_id)
81
82         csrf_token = self._html_search_meta(
83             'csrf-token', webpage, 'csrf token', fatal=True)
84
85         anime_title = self._html_search_regex(
86             r'(?s)<h1[^>]+itemprop="name"[^>]*>(.+?)</h1>',
87             webpage, 'anime name')
88         anime_description = self._html_search_regex(
89             r'(?s)<div[^>]+itemprop="description"[^>]*>(.+?)</div>',
90             webpage, 'anime description', default=None)
91
92         entries = []
93
94         for episode_html in re.findall(r'(?s)<h3[^>]+class="episodebox-title".+?>Episodeninhalt<', webpage):
95             m = re.search(
96                 r'class="episodebox-title"[^>]+title="(?:Episode|Film)\s*(?P<number>\d+)\s*-\s*(?P<title>.+?)"', episode_html)
97             if not m:
98                 continue
99
100             episode_number = int(m.group('number'))
101             episode_title = m.group('title')
102             video_id = 'episode-%d' % episode_number
103
104             common_info = {
105                 'id': video_id,
106                 'series': anime_title,
107                 'episode': episode_title,
108                 'episode_number': episode_number,
109             }
110
111             formats = []
112
113             playlist_url = self._search_regex(
114                 r'data-playlist=(["\'])(?P<url>.+?)\1',
115                 episode_html, 'data playlist', default=None, group='url')
116             if playlist_url:
117                 request = sanitized_Request(
118                     compat_urlparse.urljoin(url, playlist_url),
119                     headers={
120                         'X-Requested-With': 'XMLHttpRequest',
121                         'X-CSRF-Token': csrf_token,
122                         'Referer': url,
123                         'Accept': 'application/json, text/javascript, */*; q=0.01',
124                     })
125
126                 playlist = self._download_json(
127                     request, video_id, 'Downloading playlist JSON', fatal=False)
128                 if playlist:
129                     playlist = playlist['playlist'][0]
130                     title = playlist['title']
131                     description = playlist.get('description')
132                     for source in playlist.get('sources', []):
133                         file_ = source.get('file')
134                         if file_ and determine_ext(file_) == 'm3u8':
135                             formats = self._extract_m3u8_formats(
136                                 file_, video_id, 'mp4',
137                                 entry_protocol='m3u8_native', m3u8_id='hls')
138
139             if formats:
140                 f = common_info.copy()
141                 f.update({
142                     'title': title,
143                     'description': description,
144                     'formats': formats,
145                 })
146                 entries.append(f)
147
148             m = re.search(
149                 r'data-dialog-header=(["\'])(?P<title>.+?)\1[^>]+href=(["\'])(?P<href>.+?)\3[^>]*>Teaser<',
150                 episode_html)
151             if m:
152                 f = common_info.copy()
153                 f.update({
154                     'id': '%s-teaser' % f['id'],
155                     'title': m.group('title'),
156                     'url': compat_urlparse.urljoin(url, m.group('href')),
157                 })
158                 entries.append(f)
159
160         return self.playlist_result(entries, anime_id, anime_title, anime_description)