[youtube] Fix extraction.
[youtube-dl] / youtube_dl / extractor / motherless.py
1 from __future__ import unicode_literals
2
3 import datetime
4 import re
5
6 from .common import InfoExtractor
7 from ..compat import compat_urlparse
8 from ..utils import (
9     ExtractorError,
10     InAdvancePagedList,
11     orderedSet,
12     str_to_int,
13     unified_strdate,
14 )
15
16
17 class MotherlessIE(InfoExtractor):
18     _VALID_URL = r'https?://(?:www\.)?motherless\.com/(?:g/[a-z0-9_]+/)?(?P<id>[A-Z0-9]+)'
19     _TESTS = [{
20         'url': 'http://motherless.com/AC3FFE1',
21         'md5': '310f62e325a9fafe64f68c0bccb6e75f',
22         'info_dict': {
23             'id': 'AC3FFE1',
24             'ext': 'mp4',
25             'title': 'Fucked in the ass while playing PS3',
26             'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'],
27             'upload_date': '20100913',
28             'uploader_id': 'famouslyfuckedup',
29             'thumbnail': r're:https?://.*\.jpg',
30             'age_limit': 18,
31         }
32     }, {
33         'url': 'http://motherless.com/532291B',
34         'md5': 'bc59a6b47d1f958e61fbd38a4d31b131',
35         'info_dict': {
36             'id': '532291B',
37             'ext': 'mp4',
38             'title': 'Amazing girl playing the omegle game, PERFECT!',
39             'categories': ['Amateur', 'webcam', 'omegle', 'pink', 'young', 'masturbate', 'teen',
40                            'game', 'hairy'],
41             'upload_date': '20140622',
42             'uploader_id': 'Sulivana7x',
43             'thumbnail': r're:https?://.*\.jpg',
44             'age_limit': 18,
45         },
46         'skip': '404',
47     }, {
48         'url': 'http://motherless.com/g/cosplay/633979F',
49         'md5': '0b2a43f447a49c3e649c93ad1fafa4a0',
50         'info_dict': {
51             'id': '633979F',
52             'ext': 'mp4',
53             'title': 'Turtlette',
54             'categories': ['superheroine heroine  superher'],
55             'upload_date': '20140827',
56             'uploader_id': 'shade0230',
57             'thumbnail': r're:https?://.*\.jpg',
58             'age_limit': 18,
59         }
60     }, {
61         # no keywords
62         'url': 'http://motherless.com/8B4BBC1',
63         'only_matching': True,
64     }]
65
66     def _real_extract(self, url):
67         video_id = self._match_id(url)
68         webpage = self._download_webpage(url, video_id)
69
70         if any(p in webpage for p in (
71                 '<title>404 - MOTHERLESS.COM<',
72                 ">The page you're looking for cannot be found.<")):
73             raise ExtractorError('Video %s does not exist' % video_id, expected=True)
74
75         if '>The content you are trying to view is for friends only.' in webpage:
76             raise ExtractorError('Video %s is for friends only' % video_id, expected=True)
77
78         title = self._html_search_regex(
79             (r'(?s)<div[^>]+\bclass=["\']media-meta-title[^>]+>(.+?)</div>',
80              r'id="view-upload-title">\s+([^<]+)<'), webpage, 'title')
81         video_url = (self._html_search_regex(
82             (r'setup\(\{\s*["\']file["\']\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
83              r'fileurl\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1'),
84             webpage, 'video URL', default=None, group='url')
85             or 'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id)
86         age_limit = self._rta_search(webpage)
87         view_count = str_to_int(self._html_search_regex(
88             (r'>(\d+)\s+Views<', r'<strong>Views</strong>\s+([^<]+)<'),
89             webpage, 'view count', fatal=False))
90         like_count = str_to_int(self._html_search_regex(
91             (r'>(\d+)\s+Favorites<', r'<strong>Favorited</strong>\s+([^<]+)<'),
92             webpage, 'like count', fatal=False))
93
94         upload_date = self._html_search_regex(
95             (r'class=["\']count[^>]+>(\d+\s+[a-zA-Z]{3}\s+\d{4})<',
96              r'<strong>Uploaded</strong>\s+([^<]+)<'), webpage, 'upload date')
97         if 'Ago' in upload_date:
98             days = int(re.search(r'([0-9]+)', upload_date).group(1))
99             upload_date = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y%m%d')
100         else:
101             upload_date = unified_strdate(upload_date)
102
103         comment_count = webpage.count('class="media-comment-contents"')
104         uploader_id = self._html_search_regex(
105             r'"thumb-member-username">\s+<a href="/m/([^"]+)"',
106             webpage, 'uploader_id')
107
108         categories = self._html_search_meta('keywords', webpage, default=None)
109         if categories:
110             categories = [cat.strip() for cat in categories.split(',')]
111
112         return {
113             'id': video_id,
114             'title': title,
115             'upload_date': upload_date,
116             'uploader_id': uploader_id,
117             'thumbnail': self._og_search_thumbnail(webpage),
118             'categories': categories,
119             'view_count': view_count,
120             'like_count': like_count,
121             'comment_count': comment_count,
122             'age_limit': age_limit,
123             'url': video_url,
124         }
125
126
127 class MotherlessGroupIE(InfoExtractor):
128     _VALID_URL = r'https?://(?:www\.)?motherless\.com/gv?/(?P<id>[a-z0-9_]+)'
129     _TESTS = [{
130         'url': 'http://motherless.com/g/movie_scenes',
131         'info_dict': {
132             'id': 'movie_scenes',
133             'title': 'Movie Scenes',
134             'description': 'Hot and sexy scenes from "regular" movies... '
135                            'Beautiful actresses fully nude... A looot of '
136                            'skin! :)Enjoy!',
137         },
138         'playlist_mincount': 662,
139     }, {
140         'url': 'http://motherless.com/gv/sex_must_be_funny',
141         'info_dict': {
142             'id': 'sex_must_be_funny',
143             'title': 'Sex must be funny',
144             'description': 'Sex can be funny. Wide smiles,laugh, games, fun of '
145                            'any kind!'
146         },
147         'playlist_mincount': 9,
148     }]
149
150     @classmethod
151     def suitable(cls, url):
152         return (False if MotherlessIE.suitable(url)
153                 else super(MotherlessGroupIE, cls).suitable(url))
154
155     def _extract_entries(self, webpage, base):
156         entries = []
157         for mobj in re.finditer(
158                 r'href="(?P<href>/[^"]+)"[^>]*>(?:\s*<img[^>]+alt="[^-]+-\s(?P<title>[^"]+)")?',
159                 webpage):
160             video_url = compat_urlparse.urljoin(base, mobj.group('href'))
161             if not MotherlessIE.suitable(video_url):
162                 continue
163             video_id = MotherlessIE._match_id(video_url)
164             title = mobj.group('title')
165             entries.append(self.url_result(
166                 video_url, ie=MotherlessIE.ie_key(), video_id=video_id,
167                 video_title=title))
168         # Alternative fallback
169         if not entries:
170             entries = [
171                 self.url_result(
172                     compat_urlparse.urljoin(base, '/' + entry_id),
173                     ie=MotherlessIE.ie_key(), video_id=entry_id)
174                 for entry_id in orderedSet(re.findall(
175                     r'data-codename=["\']([A-Z0-9]+)', webpage))]
176         return entries
177
178     def _real_extract(self, url):
179         group_id = self._match_id(url)
180         page_url = compat_urlparse.urljoin(url, '/gv/%s' % group_id)
181         webpage = self._download_webpage(page_url, group_id)
182         title = self._search_regex(
183             r'<title>([\w\s]+\w)\s+-', webpage, 'title', fatal=False)
184         description = self._html_search_meta(
185             'description', webpage, fatal=False)
186         page_count = self._int(self._search_regex(
187             r'(\d+)</(?:a|span)><(?:a|span)[^>]+>\s*NEXT',
188             webpage, 'page_count'), 'page_count')
189         PAGE_SIZE = 80
190
191         def _get_page(idx):
192             webpage = self._download_webpage(
193                 page_url, group_id, query={'page': idx + 1},
194                 note='Downloading page %d/%d' % (idx + 1, page_count)
195             )
196             for entry in self._extract_entries(webpage, url):
197                 yield entry
198
199         playlist = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
200
201         return {
202             '_type': 'playlist',
203             'id': group_id,
204             'title': title,
205             'description': description,
206             'entries': playlist
207         }