X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=youtube_dl%2Fextractor%2Fmotherless.py;h=b1615b4d8e4bce8b580942f717477e6ed57ee92e;hb=607d204551aa0def292383c2870fba2afca096da;hp=4adac691c09f49241897a63b26774e86c3a019bf;hpb=a133eb7764594b830cb975e3925972214e932704;p=youtube-dl
diff --git a/youtube_dl/extractor/motherless.py b/youtube_dl/extractor/motherless.py
index 4adac691c..b1615b4d8 100644
--- a/youtube_dl/extractor/motherless.py
+++ b/youtube_dl/extractor/motherless.py
@@ -26,7 +26,7 @@ class MotherlessIE(InfoExtractor):
'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'],
'upload_date': '20100913',
'uploader_id': 'famouslyfuckedup',
- 'thumbnail': r're:http://.*\.jpg',
+ 'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
}
}, {
@@ -40,7 +40,7 @@ class MotherlessIE(InfoExtractor):
'game', 'hairy'],
'upload_date': '20140622',
'uploader_id': 'Sulivana7x',
- 'thumbnail': r're:http://.*\.jpg',
+ 'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
},
'skip': '404',
@@ -54,7 +54,7 @@ class MotherlessIE(InfoExtractor):
'categories': ['superheroine heroine superher'],
'upload_date': '20140827',
'uploader_id': 'shade0230',
- 'thumbnail': r're:http://.*\.jpg',
+ 'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
}
}, {
@@ -76,19 +76,24 @@ class MotherlessIE(InfoExtractor):
raise ExtractorError('Video %s is for friends only' % video_id, expected=True)
title = self._html_search_regex(
- r'id="view-upload-title">\s+([^<]+)<', webpage, 'title')
- video_url = self._html_search_regex(
- r'setup\(\{\s+"file".+: "([^"]+)",', webpage, 'video URL')
+ (r'(?s)
]+\bclass=["\']media-meta-title[^>]+>(.+?)
',
+ r'id="view-upload-title">\s+([^<]+)<'), webpage, 'title')
+ video_url = (self._html_search_regex(
+ (r'setup\(\{\s*["\']file["\']\s*:\s*(["\'])(?P(?:(?!\1).)+)\1',
+ r'fileurl\s*=\s*(["\'])(?P(?:(?!\1).)+)\1'),
+ webpage, 'video URL', default=None, group='url')
+ or 'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id)
age_limit = self._rta_search(webpage)
view_count = str_to_int(self._html_search_regex(
- r'Views\s+([^<]+)<',
+ (r'>(\d+)\s+Views<', r'Views\s+([^<]+)<'),
webpage, 'view count', fatal=False))
like_count = str_to_int(self._html_search_regex(
- r'Favorited\s+([^<]+)<',
+ (r'>(\d+)\s+Favorites<', r'Favorited\s+([^<]+)<'),
webpage, 'like count', fatal=False))
upload_date = self._html_search_regex(
- r'Uploaded\s+([^<]+)<', webpage, 'upload date')
+ (r'class=["\']count[^>]+>(\d+\s+[a-zA-Z]{3}\s+\d{4})<',
+ r'Uploaded\s+([^<]+)<'), webpage, 'upload date')
if 'Ago' in upload_date:
days = int(re.search(r'([0-9]+)', upload_date).group(1))
upload_date = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y%m%d')
@@ -120,7 +125,7 @@ class MotherlessIE(InfoExtractor):
class MotherlessGroupIE(InfoExtractor):
- _VALID_URL = 'https?://(?:www\.)?motherless\.com/gv?/(?P[a-z0-9_]+)'
+ _VALID_URL = r'https?://(?:www\.)?motherless\.com/gv?/(?P[a-z0-9_]+)'
_TESTS = [{
'url': 'http://motherless.com/g/movie_scenes',
'info_dict': {
@@ -148,14 +153,27 @@ class MotherlessGroupIE(InfoExtractor):
else super(MotherlessGroupIE, cls).suitable(url))
def _extract_entries(self, webpage, base):
- return [
- self.url_result(
- compat_urlparse.urljoin(base, video_path),
- MotherlessIE.ie_key(), video_title=title)
- for video_path, title in orderedSet(re.findall(
- r'href="(/[^"]+)"[^>]+>\s+]+alt="[^-]+-\s([^"]+)"',
- webpage))
- ]
+ entries = []
+ for mobj in re.finditer(
+ r'href="(?P/[^"]+)"[^>]*>(?:\s*]+alt="[^-]+-\s(?P[^"]+)")?',
+ webpage):
+ video_url = compat_urlparse.urljoin(base, mobj.group('href'))
+ if not MotherlessIE.suitable(video_url):
+ continue
+ video_id = MotherlessIE._match_id(video_url)
+ title = mobj.group('title')
+ entries.append(self.url_result(
+ video_url, ie=MotherlessIE.ie_key(), video_id=video_id,
+ video_title=title))
+ # Alternative fallback
+ if not entries:
+ entries = [
+ self.url_result(
+ compat_urlparse.urljoin(base, '/' + entry_id),
+ ie=MotherlessIE.ie_key(), video_id=entry_id)
+ for entry_id in orderedSet(re.findall(
+ r'data-codename=["\']([A-Z0-9]+)', webpage))]
+ return entries
def _real_extract(self, url):
group_id = self._match_id(url)