[pornhub] Add support for channels (closes #15613)
[youtube-dl] / youtube_dl / extractor / pornhub.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import functools
5 import itertools
6 import operator
7 # import os
8 import re
9
10 from .common import InfoExtractor
11 from ..compat import (
12     compat_HTTPError,
13     # compat_urllib_parse_unquote,
14     # compat_urllib_parse_unquote_plus,
15     # compat_urllib_parse_urlparse,
16 )
17 from ..utils import (
18     ExtractorError,
19     int_or_none,
20     js_to_json,
21     orderedSet,
22     # sanitized_Request,
23     remove_quotes,
24     str_to_int,
25 )
26 # from ..aes import (
27 #     aes_decrypt_text
28 # )
29
30
31 class PornHubIE(InfoExtractor):
32     IE_DESC = 'PornHub and Thumbzilla'
33     _VALID_URL = r'''(?x)
34                     https?://
35                         (?:
36                             (?:[a-z]+\.)?pornhub\.com/(?:(?:view_video\.php|video/show)\?viewkey=|embed/)|
37                             (?:www\.)?thumbzilla\.com/video/
38                         )
39                         (?P<id>[\da-z]+)
40                     '''
41     _TESTS = [{
42         'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
43         'md5': '1e19b41231a02eba417839222ac9d58e',
44         'info_dict': {
45             'id': '648719015',
46             'ext': 'mp4',
47             'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
48             'uploader': 'Babes',
49             'duration': 361,
50             'view_count': int,
51             'like_count': int,
52             'dislike_count': int,
53             'comment_count': int,
54             'age_limit': 18,
55             'tags': list,
56             'categories': list,
57         },
58     }, {
59         # non-ASCII title
60         'url': 'http://www.pornhub.com/view_video.php?viewkey=1331683002',
61         'info_dict': {
62             'id': '1331683002',
63             'ext': 'mp4',
64             'title': '重庆婷婷女王足交',
65             'uploader': 'cj397186295',
66             'duration': 1753,
67             'view_count': int,
68             'like_count': int,
69             'dislike_count': int,
70             'comment_count': int,
71             'age_limit': 18,
72             'tags': list,
73             'categories': list,
74         },
75         'params': {
76             'skip_download': True,
77         },
78     }, {
79         'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
80         'only_matching': True,
81     }, {
82         # removed at the request of cam4.com
83         'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862',
84         'only_matching': True,
85     }, {
86         # removed at the request of the copyright owner
87         'url': 'http://www.pornhub.com/view_video.php?viewkey=788152859',
88         'only_matching': True,
89     }, {
90         # removed by uploader
91         'url': 'http://www.pornhub.com/view_video.php?viewkey=ph572716d15a111',
92         'only_matching': True,
93     }, {
94         # private video
95         'url': 'http://www.pornhub.com/view_video.php?viewkey=ph56fd731fce6b7',
96         'only_matching': True,
97     }, {
98         'url': 'https://www.thumbzilla.com/video/ph56c6114abd99a/horny-girlfriend-sex',
99         'only_matching': True,
100     }, {
101         'url': 'http://www.pornhub.com/video/show?viewkey=648719015',
102         'only_matching': True,
103     }]
104
105     @staticmethod
106     def _extract_urls(webpage):
107         return re.findall(
108             r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub\.com/embed/[\da-z]+)',
109             webpage)
110
111     def _extract_count(self, pattern, webpage, name):
112         return str_to_int(self._search_regex(
113             pattern, webpage, '%s count' % name, fatal=False))
114
115     def _real_extract(self, url):
116         video_id = self._match_id(url)
117
118         def dl_webpage(platform):
119             return self._download_webpage(
120                 'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id,
121                 video_id, headers={
122                     'Cookie': 'age_verified=1; platform=%s' % platform,
123                 })
124
125         webpage = dl_webpage('pc')
126
127         error_msg = self._html_search_regex(
128             r'(?s)<div[^>]+class=(["\'])(?:(?!\1).)*\b(?:removed|userMessageSection)\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</div>',
129             webpage, 'error message', default=None, group='error')
130         if error_msg:
131             error_msg = re.sub(r'\s+', ' ', error_msg)
132             raise ExtractorError(
133                 'PornHub said: %s' % error_msg,
134                 expected=True, video_id=video_id)
135
136         tv_webpage = dl_webpage('tv')
137
138         assignments = self._search_regex(
139             r'(var.+?mediastring.+?)</script>', tv_webpage,
140             'encoded url').split(';')
141
142         js_vars = {}
143
144         def parse_js_value(inp):
145             inp = re.sub(r'/\*(?:(?!\*/).)*?\*/', '', inp)
146             if '+' in inp:
147                 inps = inp.split('+')
148                 return functools.reduce(
149                     operator.concat, map(parse_js_value, inps))
150             inp = inp.strip()
151             if inp in js_vars:
152                 return js_vars[inp]
153             return remove_quotes(inp)
154
155         for assn in assignments:
156             assn = assn.strip()
157             if not assn:
158                 continue
159             assn = re.sub(r'var\s+', '', assn)
160             vname, value = assn.split('=', 1)
161             js_vars[vname] = parse_js_value(value)
162
163         video_url = js_vars['mediastring']
164
165         title = self._search_regex(
166             r'<h1>([^>]+)</h1>', tv_webpage, 'title', default=None)
167
168         # video_title from flashvars contains whitespace instead of non-ASCII (see
169         # http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying
170         # on that anymore.
171         title = title or self._html_search_meta(
172             'twitter:title', webpage, default=None) or self._search_regex(
173             (r'<h1[^>]+class=["\']title["\'][^>]*>(?P<title>[^<]+)',
174              r'<div[^>]+data-video-title=(["\'])(?P<title>.+?)\1',
175              r'shareTitle\s*=\s*(["\'])(?P<title>.+?)\1'),
176             webpage, 'title', group='title')
177
178         flashvars = self._parse_json(
179             self._search_regex(
180                 r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
181             video_id)
182         if flashvars:
183             thumbnail = flashvars.get('image_url')
184             duration = int_or_none(flashvars.get('video_duration'))
185         else:
186             title, thumbnail, duration = [None] * 3
187
188         video_uploader = self._html_search_regex(
189             r'(?s)From:&nbsp;.+?<(?:a\b[^>]+\bhref=["\']/(?:user|channel)s/|span\b[^>]+\bclass=["\']username)[^>]+>(.+?)<',
190             webpage, 'uploader', fatal=False)
191
192         view_count = self._extract_count(
193             r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
194         like_count = self._extract_count(
195             r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
196         dislike_count = self._extract_count(
197             r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
198         comment_count = self._extract_count(
199             r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
200
201         page_params = self._parse_json(self._search_regex(
202             r'page_params\.zoneDetails\[([\'"])[^\'"]+\1\]\s*=\s*(?P<data>{[^}]+})',
203             webpage, 'page parameters', group='data', default='{}'),
204             video_id, transform_source=js_to_json, fatal=False)
205         tags = categories = None
206         if page_params:
207             tags = page_params.get('tags', '').split(',')
208             categories = page_params.get('categories', '').split(',')
209
210         return {
211             'id': video_id,
212             'url': video_url,
213             'uploader': video_uploader,
214             'title': title,
215             'thumbnail': thumbnail,
216             'duration': duration,
217             'view_count': view_count,
218             'like_count': like_count,
219             'dislike_count': dislike_count,
220             'comment_count': comment_count,
221             # 'formats': formats,
222             'age_limit': 18,
223             'tags': tags,
224             'categories': categories,
225         }
226
227
228 class PornHubPlaylistBaseIE(InfoExtractor):
229     def _extract_entries(self, webpage):
230         # Only process container div with main playlist content skipping
231         # drop-down menu that uses similar pattern for videos (see
232         # https://github.com/rg3/youtube-dl/issues/11594).
233         container = self._search_regex(
234             r'(?s)(<div[^>]+class=["\']container.+)', webpage,
235             'container', default=webpage)
236
237         return [
238             self.url_result(
239                 'http://www.pornhub.com/%s' % video_url,
240                 PornHubIE.ie_key(), video_title=title)
241             for video_url, title in orderedSet(re.findall(
242                 r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"',
243                 container))
244         ]
245
246     def _real_extract(self, url):
247         playlist_id = self._match_id(url)
248
249         webpage = self._download_webpage(url, playlist_id)
250
251         entries = self._extract_entries(webpage)
252
253         playlist = self._parse_json(
254             self._search_regex(
255                 r'(?:playlistObject|PLAYLIST_VIEW)\s*=\s*({.+?});', webpage,
256                 'playlist', default='{}'),
257             playlist_id, fatal=False)
258         title = playlist.get('title') or self._search_regex(
259             r'>Videos\s+in\s+(.+?)\s+[Pp]laylist<', webpage, 'title', fatal=False)
260
261         return self.playlist_result(
262             entries, playlist_id, title, playlist.get('description'))
263
264
265 class PornHubPlaylistIE(PornHubPlaylistBaseIE):
266     _VALID_URL = r'https?://(?:www\.)?pornhub\.com/playlist/(?P<id>\d+)'
267     _TESTS = [{
268         'url': 'http://www.pornhub.com/playlist/4667351',
269         'info_dict': {
270             'id': '4667351',
271             'title': 'Nataly Hot',
272         },
273         'playlist_mincount': 2,
274     }]
275
276
277 class PornHubUserVideosIE(PornHubPlaylistBaseIE):
278     _VALID_URL = r'https?://(?:www\.)?pornhub\.com/(?:user|channel)s/(?P<id>[^/]+)/videos'
279     _TESTS = [{
280         'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
281         'info_dict': {
282             'id': 'zoe_ph',
283         },
284         'playlist_mincount': 171,
285     }, {
286         'url': 'http://www.pornhub.com/users/rushandlia/videos',
287         'only_matching': True,
288     }, {
289         # default sorting as Top Rated Videos
290         'url': 'https://www.pornhub.com/channels/povd/videos',
291         'info_dict': {
292             'id': 'povd',
293         },
294         'playlist_mincount': 293,
295     }, {
296         # Top Rated Videos
297         'url': 'https://www.pornhub.com/channels/povd/videos?o=ra',
298         'only_matching': True,
299     }, {
300         # Most Recent Videos
301         'url': 'https://www.pornhub.com/channels/povd/videos?o=da',
302         'only_matching': True,
303     }, {
304         # Most Viewed Videos
305         'url': 'https://www.pornhub.com/channels/povd/videos?o=vi',
306         'only_matching': True,
307     }]
308
309     def _real_extract(self, url):
310         user_id = self._match_id(url)
311
312         entries = []
313         for page_num in itertools.count(1):
314             try:
315                 webpage = self._download_webpage(
316                     url, user_id, 'Downloading page %d' % page_num,
317                     query={'page': page_num})
318             except ExtractorError as e:
319                 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
320                     break
321                 raise
322             page_entries = self._extract_entries(webpage)
323             if not page_entries:
324                 break
325             entries.extend(page_entries)
326
327         return self.playlist_result(entries, user_id)