[youtube] Fix extraction.
[youtube-dl] / youtube_dl / extractor / porncom.py
1 from __future__ import unicode_literals
2
3 import re
4
5 from .common import InfoExtractor
6 from ..compat import compat_urlparse
7 from ..utils import (
8     int_or_none,
9     js_to_json,
10     parse_filesize,
11     str_to_int,
12 )
13
14
15 class PornComIE(InfoExtractor):
16     _VALID_URL = r'https?://(?:[a-zA-Z]+\.)?porn\.com/videos/(?:(?P<display_id>[^/]+)-)?(?P<id>\d+)'
17     _TESTS = [{
18         'url': 'http://www.porn.com/videos/teen-grabs-a-dildo-and-fucks-her-pussy-live-on-1hottie-i-rec-2603339',
19         'md5': '3f30ce76267533cd12ba999263156de7',
20         'info_dict': {
21             'id': '2603339',
22             'display_id': 'teen-grabs-a-dildo-and-fucks-her-pussy-live-on-1hottie-i-rec',
23             'ext': 'mp4',
24             'title': 'Teen grabs a dildo and fucks her pussy live on 1hottie, I rec',
25             'thumbnail': r're:^https?://.*\.jpg$',
26             'duration': 551,
27             'view_count': int,
28             'age_limit': 18,
29             'categories': list,
30             'tags': list,
31         },
32     }, {
33         'url': 'http://se.porn.com/videos/marsha-may-rides-seth-on-top-of-his-thick-cock-2658067',
34         'only_matching': True,
35     }]
36
37     def _real_extract(self, url):
38         mobj = re.match(self._VALID_URL, url)
39         video_id = mobj.group('id')
40         display_id = mobj.group('display_id') or video_id
41
42         webpage = self._download_webpage(url, display_id)
43
44         config = self._parse_json(
45             self._search_regex(
46                 (r'=\s*({.+?})\s*;\s*v1ar\b',
47                  r'=\s*({.+?})\s*,\s*[\da-zA-Z_]+\s*='),
48                 webpage, 'config', default='{}'),
49             display_id, transform_source=js_to_json, fatal=False)
50
51         if config:
52             title = config['title']
53             formats = [{
54                 'url': stream['url'],
55                 'format_id': stream.get('id'),
56                 'height': int_or_none(self._search_regex(
57                     r'^(\d+)[pP]', stream.get('id') or '', 'height', default=None))
58             } for stream in config['streams'] if stream.get('url')]
59             thumbnail = (compat_urlparse.urljoin(
60                 config['thumbCDN'], config['poster'])
61                 if config.get('thumbCDN') and config.get('poster') else None)
62             duration = int_or_none(config.get('length'))
63         else:
64             title = self._search_regex(
65                 (r'<title>([^<]+)</title>', r'<h1[^>]*>([^<]+)</h1>'),
66                 webpage, 'title')
67             formats = [{
68                 'url': compat_urlparse.urljoin(url, format_url),
69                 'format_id': '%sp' % height,
70                 'height': int(height),
71                 'filesize_approx': parse_filesize(filesize),
72             } for format_url, height, filesize in re.findall(
73                 r'<a[^>]+href="(/download/[^"]+)">[^<]*?(\d+)p<span[^>]*>(\d+\s*[a-zA-Z]+)<',
74                 webpage)]
75             thumbnail = None
76             duration = None
77
78         self._sort_formats(formats)
79
80         view_count = str_to_int(self._search_regex(
81             (r'Views:\s*</span>\s*<span>\s*([\d,.]+)',
82              r'class=["\']views["\'][^>]*><p>([\d,.]+)'), webpage,
83             'view count', fatal=False))
84
85         def extract_list(kind):
86             s = self._search_regex(
87                 (r'(?s)%s:\s*</span>\s*<span>(.+?)</span>' % kind.capitalize(),
88                  r'(?s)<p[^>]*>%s:(.+?)</p>' % kind.capitalize()),
89                 webpage, kind, fatal=False)
90             return re.findall(r'<a[^>]+>([^<]+)</a>', s or '')
91
92         return {
93             'id': video_id,
94             'display_id': display_id,
95             'title': title,
96             'thumbnail': thumbnail,
97             'duration': duration,
98             'view_count': view_count,
99             'formats': formats,
100             'age_limit': 18,
101             'categories': extract_list('categories'),
102             'tags': extract_list('tags'),
103         }