X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;ds=sidebyside;f=youtube_dl%2Fextractor%2Fredtube.py;h=f70a75256c638f4a3ce9cda3b9577176e49f3cca;hb=c9bd503e7de28e9d4ba9ebee5e509a8abb90fc72;hp=7d9285ffb5d085c6c3bf12eb38b98e001104dae8;hpb=5021ca6c13e3d011dc24ecf38d326e3a59e726a1;p=youtube-dl diff --git a/youtube_dl/extractor/redtube.py b/youtube_dl/extractor/redtube.py index 7d9285ffb..f70a75256 100644 --- a/youtube_dl/extractor/redtube.py +++ b/youtube_dl/extractor/redtube.py @@ -1,6 +1,9 @@ from __future__ import unicode_literals +import re + from .common import InfoExtractor +from ..compat import compat_str from ..utils import ( ExtractorError, int_or_none, @@ -28,6 +31,12 @@ class RedTubeIE(InfoExtractor): 'only_matching': True, }] + @staticmethod + def _extract_urls(webpage): + return re.findall( + r']+?src=["\'](?P(?:https?:)?//embed\.redtube\.com/\?.*?\bid=\d+)', + webpage) + def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( @@ -54,7 +63,23 @@ class RedTubeIE(InfoExtractor): 'format_id': format_id, 'height': int_or_none(format_id), }) - else: + medias = self._parse_json( + self._search_regex( + r'mediaDefinition\s*:\s*(\[.+?\])', webpage, + 'media definitions', default='{}'), + video_id, fatal=False) + if medias and isinstance(medias, list): + for media in medias: + format_url = media.get('videoUrl') + if not format_url or not isinstance(format_url, compat_str): + continue + format_id = media.get('quality') + formats.append({ + 'url': format_url, + 'format_id': format_id, + 'height': int_or_none(format_id), + }) + if not formats: video_url = self._html_search_regex( r'', webpage, 'video URL') formats.append({'url': video_url}) @@ -65,7 +90,7 @@ class RedTubeIE(InfoExtractor): r']+class="added-time"[^>]*>ADDED ([^<]+)<', webpage, 'upload date', fatal=False)) duration = int_or_none(self._search_regex( - r'videoDuration\s*:\s*(\d+)', webpage, 'duration', fatal=False)) + r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=None)) view_count = str_to_int(self._search_regex( r']*>VIEWS\s*([\d,.]+)', webpage, 'view count', fatal=False))