Merge branch 'crooksandliars' of https://github.com/fstirlitz/youtube-dl into fstirli...
[youtube-dl] / youtube_dl / extractor / pornhub.py
1 from __future__ import unicode_literals
2
3 import os
4 import re
5
6 from .common import InfoExtractor
7 from ..compat import (
8     compat_urllib_parse,
9     compat_urllib_parse_urlparse,
10     compat_urllib_request,
11 )
12 from ..utils import (
13     ExtractorError,
14     str_to_int,
15 )
16 from ..aes import (
17     aes_decrypt_text
18 )
19
20
21 class PornHubIE(InfoExtractor):
22     _VALID_URL = r'https?://(?:www\.)?pornhub\.com/view_video\.php\?viewkey=(?P<id>[0-9a-f]+)'
23     _TEST = {
24         'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
25         'md5': '882f488fa1f0026f023f33576004a2ed',
26         'info_dict': {
27             'id': '648719015',
28             'ext': 'mp4',
29             "uploader": "Babes",
30             "title": "Seductive Indian beauty strips down and fingers her pink pussy",
31             "age_limit": 18
32         }
33     }
34
35     def _extract_count(self, pattern, webpage, name):
36         return str_to_int(self._search_regex(
37             pattern, webpage, '%s count' % name, fatal=False))
38
39     def _real_extract(self, url):
40         video_id = self._match_id(url)
41
42         req = compat_urllib_request.Request(url)
43         req.add_header('Cookie', 'age_verified=1')
44         webpage = self._download_webpage(req, video_id)
45
46         error_msg = self._html_search_regex(
47             r'(?s)<div class="userMessageSection[^"]*".*?>(.*?)</div>',
48             webpage, 'error message', default=None)
49         if error_msg:
50             error_msg = re.sub(r'\s+', ' ', error_msg)
51             raise ExtractorError(
52                 'PornHub said: %s' % error_msg,
53                 expected=True, video_id=video_id)
54
55         video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
56         video_uploader = self._html_search_regex(
57             r'(?s)From:&nbsp;.+?<(?:a href="/users/|a href="/channels/|span class="username)[^>]+>(.+?)<',
58             webpage, 'uploader', fatal=False)
59         thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False)
60         if thumbnail:
61             thumbnail = compat_urllib_parse.unquote(thumbnail)
62
63         view_count = self._extract_count(
64             r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
65         like_count = self._extract_count(
66             r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
67         dislike_count = self._extract_count(
68             r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
69         comment_count = self._extract_count(
70             r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
71
72         video_urls = list(map(compat_urllib_parse.unquote, re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
73         if webpage.find('"encrypted":true') != -1:
74             password = compat_urllib_parse.unquote_plus(self._html_search_regex(r'"video_title":"([^"]+)', webpage, 'password'))
75             video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
76
77         formats = []
78         for video_url in video_urls:
79             path = compat_urllib_parse_urlparse(video_url).path
80             extension = os.path.splitext(path)[1][1:]
81             format = path.split('/')[5].split('_')[:2]
82             format = "-".join(format)
83
84             m = re.match(r'^(?P<height>[0-9]+)P-(?P<tbr>[0-9]+)K$', format)
85             if m is None:
86                 height = None
87                 tbr = None
88             else:
89                 height = int(m.group('height'))
90                 tbr = int(m.group('tbr'))
91
92             formats.append({
93                 'url': video_url,
94                 'ext': extension,
95                 'format': format,
96                 'format_id': format,
97                 'tbr': tbr,
98                 'height': height,
99             })
100         self._sort_formats(formats)
101
102         return {
103             'id': video_id,
104             'uploader': video_uploader,
105             'title': video_title,
106             'thumbnail': thumbnail,
107             'view_count': view_count,
108             'like_count': like_count,
109             'dislike_count': dislike_count,
110             'comment_count': comment_count,
111             'formats': formats,
112             'age_limit': 18,
113         }
114
115
116 class PornHubPlaylistIE(InfoExtractor):
117     _VALID_URL = r'https?://(?:www\.)?pornhub\.com/playlist/(?P<id>\d+)'
118     _TESTS = [{
119         'url': 'http://www.pornhub.com/playlist/6201671',
120         'info_dict': {
121             'id': '6201671',
122             'title': 'P0p4',
123         },
124         'playlist_mincount': 35,
125     }]
126
127     def _real_extract(self, url):
128         playlist_id = self._match_id(url)
129
130         webpage = self._download_webpage(url, playlist_id)
131
132         entries = [
133             self.url_result('http://www.pornhub.com/%s' % video_url, 'PornHub')
134             for video_url in set(re.findall('href="/?(view_video\.php\?viewkey=\d+[^"]*)"', webpage))
135         ]
136
137         playlist = self._parse_json(
138             self._search_regex(
139                 r'playlistObject\s*=\s*({.+?});', webpage, 'playlist'),
140             playlist_id)
141
142         return self.playlist_result(
143             entries, playlist_id, playlist.get('title'), playlist.get('description'))