[pornhub] Apply scrape detection bypass for all extractors
[youtube-dl] / youtube_dl / extractor / pornhub.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import functools
5 import itertools
6 import operator
7 import re
8
9 from .common import InfoExtractor
10 from ..compat import (
11     compat_HTTPError,
12     compat_str,
13     compat_urllib_request,
14 )
15 from .openload import PhantomJSwrapper
16 from ..utils import (
17     ExtractorError,
18     int_or_none,
19     js_to_json,
20     orderedSet,
21     remove_quotes,
22     str_to_int,
23     url_or_none,
24 )
25
26
27 class PornHubBaseIE(InfoExtractor):
28     def _download_webpage_handle(self, *args, **kwargs):
29         def dl(*args, **kwargs):
30             return super(PornHubBaseIE, self)._download_webpage_handle(*args, **kwargs)
31
32         webpage, urlh = dl(*args, **kwargs)
33
34         if any(re.search(p, webpage) for p in (
35                 r'<body\b[^>]+\bonload=["\']go\(\)',
36                 r'document\.cookie\s*=\s*["\']RNKEY=',
37                 r'document\.location\.reload\(true\)')):
38             url_or_request = args[0]
39             url = (url_or_request.get_full_url()
40                    if isinstance(url_or_request, compat_urllib_request.Request)
41                    else url_or_request)
42             phantom = PhantomJSwrapper(self, required_version='2.0')
43             phantom.get(url, html=webpage)
44             webpage, urlh = dl(*args, **kwargs)
45
46         return webpage, urlh
47
48
49 class PornHubIE(PornHubBaseIE):
50     IE_DESC = 'PornHub and Thumbzilla'
51     _VALID_URL = r'''(?x)
52                     https?://
53                         (?:
54                             (?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/(?:(?:view_video\.php|video/show)\?viewkey=|embed/)|
55                             (?:www\.)?thumbzilla\.com/video/
56                         )
57                         (?P<id>[\da-z]+)
58                     '''
59     _TESTS = [{
60         'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
61         'md5': '1e19b41231a02eba417839222ac9d58e',
62         'info_dict': {
63             'id': '648719015',
64             'ext': 'mp4',
65             'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
66             'uploader': 'Babes',
67             'upload_date': '20130628',
68             'duration': 361,
69             'view_count': int,
70             'like_count': int,
71             'dislike_count': int,
72             'comment_count': int,
73             'age_limit': 18,
74             'tags': list,
75             'categories': list,
76         },
77     }, {
78         # non-ASCII title
79         'url': 'http://www.pornhub.com/view_video.php?viewkey=1331683002',
80         'info_dict': {
81             'id': '1331683002',
82             'ext': 'mp4',
83             'title': '重庆婷婷女王足交',
84             'uploader': 'Unknown',
85             'upload_date': '20150213',
86             'duration': 1753,
87             'view_count': int,
88             'like_count': int,
89             'dislike_count': int,
90             'comment_count': int,
91             'age_limit': 18,
92             'tags': list,
93             'categories': list,
94         },
95         'params': {
96             'skip_download': True,
97         },
98     }, {
99         # subtitles
100         'url': 'https://www.pornhub.com/view_video.php?viewkey=ph5af5fef7c2aa7',
101         'info_dict': {
102             'id': 'ph5af5fef7c2aa7',
103             'ext': 'mp4',
104             'title': 'BFFS - Cute Teen Girls Share Cock On the Floor',
105             'uploader': 'BFFs',
106             'duration': 622,
107             'view_count': int,
108             'like_count': int,
109             'dislike_count': int,
110             'comment_count': int,
111             'age_limit': 18,
112             'tags': list,
113             'categories': list,
114             'subtitles': {
115                 'en': [{
116                     "ext": 'srt'
117                 }]
118             },
119         },
120         'params': {
121             'skip_download': True,
122         },
123     }, {
124         'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
125         'only_matching': True,
126     }, {
127         # removed at the request of cam4.com
128         'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862',
129         'only_matching': True,
130     }, {
131         # removed at the request of the copyright owner
132         'url': 'http://www.pornhub.com/view_video.php?viewkey=788152859',
133         'only_matching': True,
134     }, {
135         # removed by uploader
136         'url': 'http://www.pornhub.com/view_video.php?viewkey=ph572716d15a111',
137         'only_matching': True,
138     }, {
139         # private video
140         'url': 'http://www.pornhub.com/view_video.php?viewkey=ph56fd731fce6b7',
141         'only_matching': True,
142     }, {
143         'url': 'https://www.thumbzilla.com/video/ph56c6114abd99a/horny-girlfriend-sex',
144         'only_matching': True,
145     }, {
146         'url': 'http://www.pornhub.com/video/show?viewkey=648719015',
147         'only_matching': True,
148     }, {
149         'url': 'https://www.pornhub.net/view_video.php?viewkey=203640933',
150         'only_matching': True,
151     }]
152
153     @staticmethod
154     def _extract_urls(webpage):
155         return re.findall(
156             r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub\.(?:com|net)/embed/[\da-z]+)',
157             webpage)
158
159     def _extract_count(self, pattern, webpage, name):
160         return str_to_int(self._search_regex(
161             pattern, webpage, '%s count' % name, fatal=False))
162
163     def _real_extract(self, url):
164         mobj = re.match(self._VALID_URL, url)
165         host = mobj.group('host') or 'pornhub.com'
166         video_id = mobj.group('id')
167
168         self._set_cookie(host, 'age_verified', '1')
169
170         def dl_webpage(platform):
171             self._set_cookie(host, 'platform', platform)
172             return self._download_webpage(
173                 'http://www.%s/view_video.php?viewkey=%s' % (host, video_id),
174                 video_id, 'Downloading %s webpage' % platform)
175
176         webpage = dl_webpage('pc')
177
178         error_msg = self._html_search_regex(
179             r'(?s)<div[^>]+class=(["\'])(?:(?!\1).)*\b(?:removed|userMessageSection)\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</div>',
180             webpage, 'error message', default=None, group='error')
181         if error_msg:
182             error_msg = re.sub(r'\s+', ' ', error_msg)
183             raise ExtractorError(
184                 'PornHub said: %s' % error_msg,
185                 expected=True, video_id=video_id)
186
187         # video_title from flashvars contains whitespace instead of non-ASCII (see
188         # http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying
189         # on that anymore.
190         title = self._html_search_meta(
191             'twitter:title', webpage, default=None) or self._search_regex(
192             (r'<h1[^>]+class=["\']title["\'][^>]*>(?P<title>[^<]+)',
193              r'<div[^>]+data-video-title=(["\'])(?P<title>.+?)\1',
194              r'shareTitle\s*=\s*(["\'])(?P<title>.+?)\1'),
195             webpage, 'title', group='title')
196
197         video_urls = []
198         video_urls_set = set()
199         subtitles = {}
200
201         flashvars = self._parse_json(
202             self._search_regex(
203                 r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
204             video_id)
205         if flashvars:
206             subtitle_url = url_or_none(flashvars.get('closedCaptionsFile'))
207             if subtitle_url:
208                 subtitles.setdefault('en', []).append({
209                     'url': subtitle_url,
210                     'ext': 'srt',
211                 })
212             thumbnail = flashvars.get('image_url')
213             duration = int_or_none(flashvars.get('video_duration'))
214             media_definitions = flashvars.get('mediaDefinitions')
215             if isinstance(media_definitions, list):
216                 for definition in media_definitions:
217                     if not isinstance(definition, dict):
218                         continue
219                     video_url = definition.get('videoUrl')
220                     if not video_url or not isinstance(video_url, compat_str):
221                         continue
222                     if video_url in video_urls_set:
223                         continue
224                     video_urls_set.add(video_url)
225                     video_urls.append(
226                         (video_url, int_or_none(definition.get('quality'))))
227         else:
228             thumbnail, duration = [None] * 2
229
230         if not video_urls:
231             tv_webpage = dl_webpage('tv')
232
233             assignments = self._search_regex(
234                 r'(var.+?mediastring.+?)</script>', tv_webpage,
235                 'encoded url').split(';')
236
237             js_vars = {}
238
239             def parse_js_value(inp):
240                 inp = re.sub(r'/\*(?:(?!\*/).)*?\*/', '', inp)
241                 if '+' in inp:
242                     inps = inp.split('+')
243                     return functools.reduce(
244                         operator.concat, map(parse_js_value, inps))
245                 inp = inp.strip()
246                 if inp in js_vars:
247                     return js_vars[inp]
248                 return remove_quotes(inp)
249
250             for assn in assignments:
251                 assn = assn.strip()
252                 if not assn:
253                     continue
254                 assn = re.sub(r'var\s+', '', assn)
255                 vname, value = assn.split('=', 1)
256                 js_vars[vname] = parse_js_value(value)
257
258             video_url = js_vars['mediastring']
259             if video_url not in video_urls_set:
260                 video_urls.append((video_url, None))
261                 video_urls_set.add(video_url)
262
263         for mobj in re.finditer(
264                 r'<a[^>]+\bclass=["\']downloadBtn\b[^>]+\bhref=(["\'])(?P<url>(?:(?!\1).)+)\1',
265                 webpage):
266             video_url = mobj.group('url')
267             if video_url not in video_urls_set:
268                 video_urls.append((video_url, None))
269                 video_urls_set.add(video_url)
270
271         upload_date = None
272         formats = []
273         for video_url, height in video_urls:
274             if not upload_date:
275                 upload_date = self._search_regex(
276                     r'/(\d{6}/\d{2})/', video_url, 'upload data', default=None)
277                 if upload_date:
278                     upload_date = upload_date.replace('/', '')
279             tbr = None
280             mobj = re.search(r'(?P<height>\d+)[pP]?_(?P<tbr>\d+)[kK]', video_url)
281             if mobj:
282                 if not height:
283                     height = int(mobj.group('height'))
284                 tbr = int(mobj.group('tbr'))
285             formats.append({
286                 'url': video_url,
287                 'format_id': '%dp' % height if height else None,
288                 'height': height,
289                 'tbr': tbr,
290             })
291         self._sort_formats(formats)
292
293         video_uploader = self._html_search_regex(
294             r'(?s)From:&nbsp;.+?<(?:a\b[^>]+\bhref=["\']/(?:(?:user|channel)s|model|pornstar)/|span\b[^>]+\bclass=["\']username)[^>]+>(.+?)<',
295             webpage, 'uploader', fatal=False)
296
297         view_count = self._extract_count(
298             r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
299         like_count = self._extract_count(
300             r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
301         dislike_count = self._extract_count(
302             r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
303         comment_count = self._extract_count(
304             r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
305
306         page_params = self._parse_json(self._search_regex(
307             r'page_params\.zoneDetails\[([\'"])[^\'"]+\1\]\s*=\s*(?P<data>{[^}]+})',
308             webpage, 'page parameters', group='data', default='{}'),
309             video_id, transform_source=js_to_json, fatal=False)
310         tags = categories = None
311         if page_params:
312             tags = page_params.get('tags', '').split(',')
313             categories = page_params.get('categories', '').split(',')
314
315         return {
316             'id': video_id,
317             'uploader': video_uploader,
318             'upload_date': upload_date,
319             'title': title,
320             'thumbnail': thumbnail,
321             'duration': duration,
322             'view_count': view_count,
323             'like_count': like_count,
324             'dislike_count': dislike_count,
325             'comment_count': comment_count,
326             'formats': formats,
327             'age_limit': 18,
328             'tags': tags,
329             'categories': categories,
330             'subtitles': subtitles,
331         }
332
333
334 class PornHubPlaylistBaseIE(PornHubBaseIE):
335     def _extract_entries(self, webpage, host):
336         # Only process container div with main playlist content skipping
337         # drop-down menu that uses similar pattern for videos (see
338         # https://github.com/rg3/youtube-dl/issues/11594).
339         container = self._search_regex(
340             r'(?s)(<div[^>]+class=["\']container.+)', webpage,
341             'container', default=webpage)
342
343         return [
344             self.url_result(
345                 'http://www.%s/%s' % (host, video_url),
346                 PornHubIE.ie_key(), video_title=title)
347             for video_url, title in orderedSet(re.findall(
348                 r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"',
349                 container))
350         ]
351
352     def _real_extract(self, url):
353         mobj = re.match(self._VALID_URL, url)
354         host = mobj.group('host')
355         playlist_id = mobj.group('id')
356
357         webpage = self._download_webpage(url, playlist_id)
358
359         entries = self._extract_entries(webpage, host)
360
361         playlist = self._parse_json(
362             self._search_regex(
363                 r'(?:playlistObject|PLAYLIST_VIEW)\s*=\s*({.+?});', webpage,
364                 'playlist', default='{}'),
365             playlist_id, fatal=False)
366         title = playlist.get('title') or self._search_regex(
367             r'>Videos\s+in\s+(.+?)\s+[Pp]laylist<', webpage, 'title', fatal=False)
368
369         return self.playlist_result(
370             entries, playlist_id, title, playlist.get('description'))
371
372
373 class PornHubPlaylistIE(PornHubPlaylistBaseIE):
374     _VALID_URL = r'https?://(?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/playlist/(?P<id>\d+)'
375     _TESTS = [{
376         'url': 'http://www.pornhub.com/playlist/4667351',
377         'info_dict': {
378             'id': '4667351',
379             'title': 'Nataly Hot',
380         },
381         'playlist_mincount': 2,
382     }, {
383         'url': 'https://de.pornhub.com/playlist/4667351',
384         'only_matching': True,
385     }]
386
387
388 class PornHubUserVideosIE(PornHubPlaylistBaseIE):
389     _VALID_URL = r'https?://(?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/]+)/videos'
390     _TESTS = [{
391         'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
392         'info_dict': {
393             'id': 'zoe_ph',
394         },
395         'playlist_mincount': 171,
396     }, {
397         'url': 'http://www.pornhub.com/users/rushandlia/videos',
398         'only_matching': True,
399     }, {
400         # default sorting as Top Rated Videos
401         'url': 'https://www.pornhub.com/channels/povd/videos',
402         'info_dict': {
403             'id': 'povd',
404         },
405         'playlist_mincount': 293,
406     }, {
407         # Top Rated Videos
408         'url': 'https://www.pornhub.com/channels/povd/videos?o=ra',
409         'only_matching': True,
410     }, {
411         # Most Recent Videos
412         'url': 'https://www.pornhub.com/channels/povd/videos?o=da',
413         'only_matching': True,
414     }, {
415         # Most Viewed Videos
416         'url': 'https://www.pornhub.com/channels/povd/videos?o=vi',
417         'only_matching': True,
418     }, {
419         'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
420         'only_matching': True,
421     }, {
422         'url': 'https://www.pornhub.com/model/jayndrea/videos/upload',
423         'only_matching': True,
424     }, {
425         'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos/upload',
426         'only_matching': True,
427     }]
428
429     def _real_extract(self, url):
430         mobj = re.match(self._VALID_URL, url)
431         host = mobj.group('host')
432         user_id = mobj.group('id')
433
434         entries = []
435         for page_num in itertools.count(1):
436             try:
437                 webpage = self._download_webpage(
438                     url, user_id, 'Downloading page %d' % page_num,
439                     query={'page': page_num})
440             except ExtractorError as e:
441                 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
442                     break
443                 raise
444             page_entries = self._extract_entries(webpage, host)
445             if not page_entries:
446                 break
447             entries.extend(page_entries)
448
449         return self.playlist_result(entries, user_id)