2 from __future__ import unicode_literals
9 from .common import InfoExtractor
10 from ..compat import (
13 compat_urllib_request,
15 from .openload import PhantomJSwrapper
27 class PornHubIE(InfoExtractor):
28 IE_DESC = 'PornHub and Thumbzilla'
32 (?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/(?:(?:view_video\.php|video/show)\?viewkey=|embed/)|
33 (?:www\.)?thumbzilla\.com/video/
38 'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
39 'md5': '1e19b41231a02eba417839222ac9d58e',
43 'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
45 'upload_date': '20130628',
57 'url': 'http://www.pornhub.com/view_video.php?viewkey=1331683002',
62 'uploader': 'Unknown',
63 'upload_date': '20150213',
74 'skip_download': True,
78 'url': 'https://www.pornhub.com/view_video.php?viewkey=ph5af5fef7c2aa7',
80 'id': 'ph5af5fef7c2aa7',
82 'title': 'BFFS - Cute Teen Girls Share Cock On the Floor',
99 'skip_download': True,
102 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
103 'only_matching': True,
105 # removed at the request of cam4.com
106 'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862',
107 'only_matching': True,
109 # removed at the request of the copyright owner
110 'url': 'http://www.pornhub.com/view_video.php?viewkey=788152859',
111 'only_matching': True,
113 # removed by uploader
114 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph572716d15a111',
115 'only_matching': True,
118 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph56fd731fce6b7',
119 'only_matching': True,
121 'url': 'https://www.thumbzilla.com/video/ph56c6114abd99a/horny-girlfriend-sex',
122 'only_matching': True,
124 'url': 'http://www.pornhub.com/video/show?viewkey=648719015',
125 'only_matching': True,
127 'url': 'https://www.pornhub.net/view_video.php?viewkey=203640933',
128 'only_matching': True,
131 def _download_webpage_handle(self, *args, **kwargs):
132 def dl(*args, **kwargs):
133 return super(PornHubIE, self)._download_webpage_handle(*args, **kwargs)
135 webpage, urlh = dl(*args, **kwargs)
137 if any(re.search(p, webpage) for p in (
138 r'<body\b[^>]+\bonload=["\']go\(\)',
139 r'document\.cookie\s*=\s*["\']RNKEY=',
140 r'document\.location\.reload\(true\)')):
141 url_or_request = args[0]
142 url = (url_or_request.get_full_url()
143 if isinstance(url_or_request, compat_urllib_request.Request)
145 phantom = PhantomJSwrapper(self, required_version='2.0')
146 phantom.get(url, html=webpage)
147 webpage, urlh = dl(*args, **kwargs)
152 def _extract_urls(webpage):
154 r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub\.(?:com|net)/embed/[\da-z]+)',
157 def _extract_count(self, pattern, webpage, name):
158 return str_to_int(self._search_regex(
159 pattern, webpage, '%s count' % name, fatal=False))
161 def _real_extract(self, url):
162 mobj = re.match(self._VALID_URL, url)
163 host = mobj.group('host') or 'pornhub.com'
164 video_id = mobj.group('id')
166 self._set_cookie(host, 'age_verified', '1')
168 def dl_webpage(platform):
169 self._set_cookie(host, 'platform', platform)
170 return self._download_webpage(
171 'http://www.%s/view_video.php?viewkey=%s' % (host, video_id),
172 video_id, 'Downloading %s webpage' % platform)
174 webpage = dl_webpage('pc')
176 error_msg = self._html_search_regex(
177 r'(?s)<div[^>]+class=(["\'])(?:(?!\1).)*\b(?:removed|userMessageSection)\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</div>',
178 webpage, 'error message', default=None, group='error')
180 error_msg = re.sub(r'\s+', ' ', error_msg)
181 raise ExtractorError(
182 'PornHub said: %s' % error_msg,
183 expected=True, video_id=video_id)
185 # video_title from flashvars contains whitespace instead of non-ASCII (see
186 # http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying
188 title = self._html_search_meta(
189 'twitter:title', webpage, default=None) or self._search_regex(
190 (r'<h1[^>]+class=["\']title["\'][^>]*>(?P<title>[^<]+)',
191 r'<div[^>]+data-video-title=(["\'])(?P<title>.+?)\1',
192 r'shareTitle\s*=\s*(["\'])(?P<title>.+?)\1'),
193 webpage, 'title', group='title')
196 video_urls_set = set()
199 flashvars = self._parse_json(
201 r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
204 subtitle_url = url_or_none(flashvars.get('closedCaptionsFile'))
206 subtitles.setdefault('en', []).append({
210 thumbnail = flashvars.get('image_url')
211 duration = int_or_none(flashvars.get('video_duration'))
212 media_definitions = flashvars.get('mediaDefinitions')
213 if isinstance(media_definitions, list):
214 for definition in media_definitions:
215 if not isinstance(definition, dict):
217 video_url = definition.get('videoUrl')
218 if not video_url or not isinstance(video_url, compat_str):
220 if video_url in video_urls_set:
222 video_urls_set.add(video_url)
224 (video_url, int_or_none(definition.get('quality'))))
226 thumbnail, duration = [None] * 2
229 tv_webpage = dl_webpage('tv')
231 assignments = self._search_regex(
232 r'(var.+?mediastring.+?)</script>', tv_webpage,
233 'encoded url').split(';')
237 def parse_js_value(inp):
238 inp = re.sub(r'/\*(?:(?!\*/).)*?\*/', '', inp)
240 inps = inp.split('+')
241 return functools.reduce(
242 operator.concat, map(parse_js_value, inps))
246 return remove_quotes(inp)
248 for assn in assignments:
252 assn = re.sub(r'var\s+', '', assn)
253 vname, value = assn.split('=', 1)
254 js_vars[vname] = parse_js_value(value)
256 video_url = js_vars['mediastring']
257 if video_url not in video_urls_set:
258 video_urls.append((video_url, None))
259 video_urls_set.add(video_url)
261 for mobj in re.finditer(
262 r'<a[^>]+\bclass=["\']downloadBtn\b[^>]+\bhref=(["\'])(?P<url>(?:(?!\1).)+)\1',
264 video_url = mobj.group('url')
265 if video_url not in video_urls_set:
266 video_urls.append((video_url, None))
267 video_urls_set.add(video_url)
271 for video_url, height in video_urls:
273 upload_date = self._search_regex(
274 r'/(\d{6}/\d{2})/', video_url, 'upload data', default=None)
276 upload_date = upload_date.replace('/', '')
278 mobj = re.search(r'(?P<height>\d+)[pP]?_(?P<tbr>\d+)[kK]', video_url)
281 height = int(mobj.group('height'))
282 tbr = int(mobj.group('tbr'))
285 'format_id': '%dp' % height if height else None,
289 self._sort_formats(formats)
291 video_uploader = self._html_search_regex(
292 r'(?s)From: .+?<(?:a\b[^>]+\bhref=["\']/(?:(?:user|channel)s|model|pornstar)/|span\b[^>]+\bclass=["\']username)[^>]+>(.+?)<',
293 webpage, 'uploader', fatal=False)
295 view_count = self._extract_count(
296 r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
297 like_count = self._extract_count(
298 r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
299 dislike_count = self._extract_count(
300 r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
301 comment_count = self._extract_count(
302 r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
304 page_params = self._parse_json(self._search_regex(
305 r'page_params\.zoneDetails\[([\'"])[^\'"]+\1\]\s*=\s*(?P<data>{[^}]+})',
306 webpage, 'page parameters', group='data', default='{}'),
307 video_id, transform_source=js_to_json, fatal=False)
308 tags = categories = None
310 tags = page_params.get('tags', '').split(',')
311 categories = page_params.get('categories', '').split(',')
315 'uploader': video_uploader,
316 'upload_date': upload_date,
318 'thumbnail': thumbnail,
319 'duration': duration,
320 'view_count': view_count,
321 'like_count': like_count,
322 'dislike_count': dislike_count,
323 'comment_count': comment_count,
327 'categories': categories,
328 'subtitles': subtitles,
332 class PornHubPlaylistBaseIE(InfoExtractor):
333 def _extract_entries(self, webpage, host):
334 # Only process container div with main playlist content skipping
335 # drop-down menu that uses similar pattern for videos (see
336 # https://github.com/rg3/youtube-dl/issues/11594).
337 container = self._search_regex(
338 r'(?s)(<div[^>]+class=["\']container.+)', webpage,
339 'container', default=webpage)
343 'http://www.%s/%s' % (host, video_url),
344 PornHubIE.ie_key(), video_title=title)
345 for video_url, title in orderedSet(re.findall(
346 r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"',
350 def _real_extract(self, url):
351 mobj = re.match(self._VALID_URL, url)
352 host = mobj.group('host')
353 playlist_id = mobj.group('id')
355 webpage = self._download_webpage(url, playlist_id)
357 entries = self._extract_entries(webpage, host)
359 playlist = self._parse_json(
361 r'(?:playlistObject|PLAYLIST_VIEW)\s*=\s*({.+?});', webpage,
362 'playlist', default='{}'),
363 playlist_id, fatal=False)
364 title = playlist.get('title') or self._search_regex(
365 r'>Videos\s+in\s+(.+?)\s+[Pp]laylist<', webpage, 'title', fatal=False)
367 return self.playlist_result(
368 entries, playlist_id, title, playlist.get('description'))
371 class PornHubPlaylistIE(PornHubPlaylistBaseIE):
372 _VALID_URL = r'https?://(?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/playlist/(?P<id>\d+)'
374 'url': 'http://www.pornhub.com/playlist/4667351',
377 'title': 'Nataly Hot',
379 'playlist_mincount': 2,
381 'url': 'https://de.pornhub.com/playlist/4667351',
382 'only_matching': True,
386 class PornHubUserVideosIE(PornHubPlaylistBaseIE):
387 _VALID_URL = r'https?://(?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/]+)/videos'
389 'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
393 'playlist_mincount': 171,
395 'url': 'http://www.pornhub.com/users/rushandlia/videos',
396 'only_matching': True,
398 # default sorting as Top Rated Videos
399 'url': 'https://www.pornhub.com/channels/povd/videos',
403 'playlist_mincount': 293,
406 'url': 'https://www.pornhub.com/channels/povd/videos?o=ra',
407 'only_matching': True,
410 'url': 'https://www.pornhub.com/channels/povd/videos?o=da',
411 'only_matching': True,
414 'url': 'https://www.pornhub.com/channels/povd/videos?o=vi',
415 'only_matching': True,
417 'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
418 'only_matching': True,
420 'url': 'https://www.pornhub.com/model/jayndrea/videos/upload',
421 'only_matching': True,
423 'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos/upload',
424 'only_matching': True,
427 def _real_extract(self, url):
428 mobj = re.match(self._VALID_URL, url)
429 host = mobj.group('host')
430 user_id = mobj.group('id')
433 for page_num in itertools.count(1):
435 webpage = self._download_webpage(
436 url, user_id, 'Downloading page %d' % page_num,
437 query={'page': page_num})
438 except ExtractorError as e:
439 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
442 page_entries = self._extract_entries(webpage, host)
445 entries.extend(page_entries)
447 return self.playlist_result(entries, user_id)