[pornhub] Improve extraction and extract all formats (closes #12166, closes #15891...
[youtube-dl] / youtube_dl / extractor / pornhub.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import functools
5 import itertools
6 import operator
7 import re
8
9 from .common import InfoExtractor
10 from ..compat import (
11     compat_HTTPError,
12     compat_str,
13 )
14 from ..utils import (
15     ExtractorError,
16     int_or_none,
17     js_to_json,
18     orderedSet,
19     remove_quotes,
20     str_to_int,
21 )
22
23
24 class PornHubIE(InfoExtractor):
25     IE_DESC = 'PornHub and Thumbzilla'
26     _VALID_URL = r'''(?x)
27                     https?://
28                         (?:
29                             (?:[^/]+\.)?pornhub\.com/(?:(?:view_video\.php|video/show)\?viewkey=|embed/)|
30                             (?:www\.)?thumbzilla\.com/video/
31                         )
32                         (?P<id>[\da-z]+)
33                     '''
34     _TESTS = [{
35         'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
36         'md5': '1e19b41231a02eba417839222ac9d58e',
37         'info_dict': {
38             'id': '648719015',
39             'ext': 'mp4',
40             'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
41             'uploader': 'Babes',
42             'duration': 361,
43             'view_count': int,
44             'like_count': int,
45             'dislike_count': int,
46             'comment_count': int,
47             'age_limit': 18,
48             'tags': list,
49             'categories': list,
50         },
51     }, {
52         # non-ASCII title
53         'url': 'http://www.pornhub.com/view_video.php?viewkey=1331683002',
54         'info_dict': {
55             'id': '1331683002',
56             'ext': 'mp4',
57             'title': '重庆婷婷女王足交',
58             'uploader': 'Unknown',
59             'duration': 1753,
60             'view_count': int,
61             'like_count': int,
62             'dislike_count': int,
63             'comment_count': int,
64             'age_limit': 18,
65             'tags': list,
66             'categories': list,
67         },
68         'params': {
69             'skip_download': True,
70         },
71     }, {
72         'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
73         'only_matching': True,
74     }, {
75         # removed at the request of cam4.com
76         'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862',
77         'only_matching': True,
78     }, {
79         # removed at the request of the copyright owner
80         'url': 'http://www.pornhub.com/view_video.php?viewkey=788152859',
81         'only_matching': True,
82     }, {
83         # removed by uploader
84         'url': 'http://www.pornhub.com/view_video.php?viewkey=ph572716d15a111',
85         'only_matching': True,
86     }, {
87         # private video
88         'url': 'http://www.pornhub.com/view_video.php?viewkey=ph56fd731fce6b7',
89         'only_matching': True,
90     }, {
91         'url': 'https://www.thumbzilla.com/video/ph56c6114abd99a/horny-girlfriend-sex',
92         'only_matching': True,
93     }, {
94         'url': 'http://www.pornhub.com/video/show?viewkey=648719015',
95         'only_matching': True,
96     }]
97
98     @staticmethod
99     def _extract_urls(webpage):
100         return re.findall(
101             r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub\.com/embed/[\da-z]+)',
102             webpage)
103
104     def _extract_count(self, pattern, webpage, name):
105         return str_to_int(self._search_regex(
106             pattern, webpage, '%s count' % name, fatal=False))
107
108     def _real_extract(self, url):
109         video_id = self._match_id(url)
110
111         self._set_cookie('pornhub.com', 'age_verified', '1')
112
113         def dl_webpage(platform):
114             self._set_cookie('pornhub.com', 'platform', platform)
115             return self._download_webpage(
116                 'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id,
117                 video_id, 'Downloading %s webpage' % platform)
118
119         webpage = dl_webpage('pc')
120
121         error_msg = self._html_search_regex(
122             r'(?s)<div[^>]+class=(["\'])(?:(?!\1).)*\b(?:removed|userMessageSection)\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</div>',
123             webpage, 'error message', default=None, group='error')
124         if error_msg:
125             error_msg = re.sub(r'\s+', ' ', error_msg)
126             raise ExtractorError(
127                 'PornHub said: %s' % error_msg,
128                 expected=True, video_id=video_id)
129
130         # video_title from flashvars contains whitespace instead of non-ASCII (see
131         # http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying
132         # on that anymore.
133         title = self._html_search_meta(
134             'twitter:title', webpage, default=None) or self._search_regex(
135             (r'<h1[^>]+class=["\']title["\'][^>]*>(?P<title>[^<]+)',
136              r'<div[^>]+data-video-title=(["\'])(?P<title>.+?)\1',
137              r'shareTitle\s*=\s*(["\'])(?P<title>.+?)\1'),
138             webpage, 'title', group='title')
139
140         video_urls = []
141         video_urls_set = set()
142
143         flashvars = self._parse_json(
144             self._search_regex(
145                 r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
146             video_id)
147         if flashvars:
148             thumbnail = flashvars.get('image_url')
149             duration = int_or_none(flashvars.get('video_duration'))
150             media_definitions = flashvars.get('mediaDefinitions')
151             if isinstance(media_definitions, list):
152                 for definition in media_definitions:
153                     if not isinstance(definition, dict):
154                         continue
155                     video_url = definition.get('videoUrl')
156                     if not video_url or not isinstance(video_url, compat_str):
157                         continue
158                     if video_url in video_urls_set:
159                         continue
160                     video_urls_set.add(video_url)
161                     video_urls.append(
162                         (video_url, int_or_none(definition.get('quality'))))
163         else:
164             thumbnail, duration = [None] * 2
165
166         if not video_urls:
167             tv_webpage = dl_webpage('tv')
168
169             assignments = self._search_regex(
170                 r'(var.+?mediastring.+?)</script>', tv_webpage,
171                 'encoded url').split(';')
172
173             js_vars = {}
174
175             def parse_js_value(inp):
176                 inp = re.sub(r'/\*(?:(?!\*/).)*?\*/', '', inp)
177                 if '+' in inp:
178                     inps = inp.split('+')
179                     return functools.reduce(
180                         operator.concat, map(parse_js_value, inps))
181                 inp = inp.strip()
182                 if inp in js_vars:
183                     return js_vars[inp]
184                 return remove_quotes(inp)
185
186             for assn in assignments:
187                 assn = assn.strip()
188                 if not assn:
189                     continue
190                 assn = re.sub(r'var\s+', '', assn)
191                 vname, value = assn.split('=', 1)
192                 js_vars[vname] = parse_js_value(value)
193
194             video_url = js_vars['mediastring']
195             if video_url not in video_urls_set:
196                 video_urls.append((video_url, None))
197                 video_urls_set.add(video_url)
198
199         for mobj in re.finditer(
200                 r'<a[^>]+\bclass=["\']downloadBtn\b[^>]+\bhref=(["\'])(?P<url>(?:(?!\1).)+)\1',
201                 webpage):
202             video_url = mobj.group('url')
203             if video_url not in video_urls_set:
204                 video_urls.append((video_url, None))
205                 video_urls_set.add(video_url)
206
207         formats = []
208         for video_url, height in video_urls:
209             tbr = None
210             mobj = re.search(r'(?P<height>\d+)[pP]?_(?P<tbr>\d+)[kK]', video_url)
211             if mobj:
212                 if not height:
213                     height = int(mobj.group('height'))
214                 tbr = int(mobj.group('tbr'))
215             formats.append({
216                 'url': video_url,
217                 'format_id': '%dp' % height if height else None,
218                 'height': height,
219                 'tbr': tbr,
220             })
221         self._sort_formats(formats)
222
223         video_uploader = self._html_search_regex(
224             r'(?s)From:&nbsp;.+?<(?:a\b[^>]+\bhref=["\']/(?:user|channel)s/|span\b[^>]+\bclass=["\']username)[^>]+>(.+?)<',
225             webpage, 'uploader', fatal=False)
226
227         view_count = self._extract_count(
228             r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
229         like_count = self._extract_count(
230             r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
231         dislike_count = self._extract_count(
232             r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
233         comment_count = self._extract_count(
234             r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
235
236         page_params = self._parse_json(self._search_regex(
237             r'page_params\.zoneDetails\[([\'"])[^\'"]+\1\]\s*=\s*(?P<data>{[^}]+})',
238             webpage, 'page parameters', group='data', default='{}'),
239             video_id, transform_source=js_to_json, fatal=False)
240         tags = categories = None
241         if page_params:
242             tags = page_params.get('tags', '').split(',')
243             categories = page_params.get('categories', '').split(',')
244
245         return {
246             'id': video_id,
247             'uploader': video_uploader,
248             'title': title,
249             'thumbnail': thumbnail,
250             'duration': duration,
251             'view_count': view_count,
252             'like_count': like_count,
253             'dislike_count': dislike_count,
254             'comment_count': comment_count,
255             'formats': formats,
256             'age_limit': 18,
257             'tags': tags,
258             'categories': categories,
259         }
260
261
262 class PornHubPlaylistBaseIE(InfoExtractor):
263     def _extract_entries(self, webpage):
264         # Only process container div with main playlist content skipping
265         # drop-down menu that uses similar pattern for videos (see
266         # https://github.com/rg3/youtube-dl/issues/11594).
267         container = self._search_regex(
268             r'(?s)(<div[^>]+class=["\']container.+)', webpage,
269             'container', default=webpage)
270
271         return [
272             self.url_result(
273                 'http://www.pornhub.com/%s' % video_url,
274                 PornHubIE.ie_key(), video_title=title)
275             for video_url, title in orderedSet(re.findall(
276                 r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"',
277                 container))
278         ]
279
280     def _real_extract(self, url):
281         playlist_id = self._match_id(url)
282
283         webpage = self._download_webpage(url, playlist_id)
284
285         entries = self._extract_entries(webpage)
286
287         playlist = self._parse_json(
288             self._search_regex(
289                 r'(?:playlistObject|PLAYLIST_VIEW)\s*=\s*({.+?});', webpage,
290                 'playlist', default='{}'),
291             playlist_id, fatal=False)
292         title = playlist.get('title') or self._search_regex(
293             r'>Videos\s+in\s+(.+?)\s+[Pp]laylist<', webpage, 'title', fatal=False)
294
295         return self.playlist_result(
296             entries, playlist_id, title, playlist.get('description'))
297
298
299 class PornHubPlaylistIE(PornHubPlaylistBaseIE):
300     _VALID_URL = r'https?://(?:[^/]+\.)?pornhub\.com/playlist/(?P<id>\d+)'
301     _TESTS = [{
302         'url': 'http://www.pornhub.com/playlist/4667351',
303         'info_dict': {
304             'id': '4667351',
305             'title': 'Nataly Hot',
306         },
307         'playlist_mincount': 2,
308     }, {
309         'url': 'https://de.pornhub.com/playlist/4667351',
310         'only_matching': True,
311     }]
312
313
314 class PornHubUserVideosIE(PornHubPlaylistBaseIE):
315     _VALID_URL = r'https?://(?:[^/]+\.)?pornhub\.com/(?:user|channel)s/(?P<id>[^/]+)/videos'
316     _TESTS = [{
317         'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
318         'info_dict': {
319             'id': 'zoe_ph',
320         },
321         'playlist_mincount': 171,
322     }, {
323         'url': 'http://www.pornhub.com/users/rushandlia/videos',
324         'only_matching': True,
325     }, {
326         # default sorting as Top Rated Videos
327         'url': 'https://www.pornhub.com/channels/povd/videos',
328         'info_dict': {
329             'id': 'povd',
330         },
331         'playlist_mincount': 293,
332     }, {
333         # Top Rated Videos
334         'url': 'https://www.pornhub.com/channels/povd/videos?o=ra',
335         'only_matching': True,
336     }, {
337         # Most Recent Videos
338         'url': 'https://www.pornhub.com/channels/povd/videos?o=da',
339         'only_matching': True,
340     }, {
341         # Most Viewed Videos
342         'url': 'https://www.pornhub.com/channels/povd/videos?o=vi',
343         'only_matching': True,
344     }, {
345         'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
346         'only_matching': True,
347     }]
348
349     def _real_extract(self, url):
350         user_id = self._match_id(url)
351
352         entries = []
353         for page_num in itertools.count(1):
354             try:
355                 webpage = self._download_webpage(
356                     url, user_id, 'Downloading page %d' % page_num,
357                     query={'page': page_num})
358             except ExtractorError as e:
359                 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
360                     break
361                 raise
362             page_entries = self._extract_entries(webpage)
363             if not page_entries:
364                 break
365             entries.extend(page_entries)
366
367         return self.playlist_result(entries, user_id)