X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=youtube_dl%2FInfoExtractors.py;h=092bfef22ba7cdf7e4847ebc562bae0814037141;hb=60179645808cbc3cff3ba062312bfa360de48965;hp=d7295ae3fe0bafb87dd3a1ea88e431ffd31cd32f;hpb=e314ba675b6ce6683395d04e4621aae2b5aca0ec;p=youtube-dl diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index d7295ae3f..092bfef22 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -3,6 +3,7 @@ from __future__ import absolute_import +import base64 import datetime import netrc import os @@ -105,6 +106,20 @@ class InfoExtractor(object): def IE_NAME(self): return type(self).__name__[:-2] + def _download_webpage(self, url_or_request, video_id, note=None, errnote=None): + if note is None: + note = u'Downloading video webpage' + self._downloader.to_screen(u'[%s] %s: %s' % (self.IE_NAME, video_id, note)) + try: + urlh = compat_urllib_request.urlopen(url_or_request) + webpage_bytes = urlh.read() + return webpage_bytes.decode('utf-8', 'replace') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + if errnote is None: + errnote = u'Unable to download webpage' + raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2]) + + class YoutubeIE(InfoExtractor): """Information extractor for youtube.com.""" @@ -397,7 +412,7 @@ class YoutubeIE(InfoExtractor): # uploader_id video_uploader_id = None - mobj = re.search(r'', video_webpage) + mobj = re.search(r'', video_webpage) if mobj is not None: video_uploader_id = mobj.group(1) else: @@ -660,10 +675,6 @@ class DailymotionIE(InfoExtractor): def __init__(self, downloader=None): InfoExtractor.__init__(self, downloader) - def report_download_webpage(self, video_id): - """Report webpage download.""" - self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id) - def report_extraction(self, video_id): """Report information extraction.""" self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id) @@ -682,13 +693,7 @@ class DailymotionIE(InfoExtractor): # Retrieve video webpage to extract further information request = compat_urllib_request.Request(url) request.add_header('Cookie', 'family_filter=off') - try: - self.report_download_webpage(video_id) - webpage_bytes = compat_urllib_request.urlopen(request).read() - webpage = webpage_bytes.decode('utf-8') - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err)) - return + webpage = self._download_webpage(request, video_id) # Extract URL, uploader and title from webpage self.report_extraction(video_id) @@ -1910,10 +1915,6 @@ class DepositFilesIE(InfoExtractor): """Information extractor for depositfiles.com""" _VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)' - IE_NAME = u'DepositFiles' - - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) def report_download_webpage(self, file_id): """Report webpage download.""" @@ -2276,10 +2277,6 @@ class MyVideoIE(InfoExtractor): def __init__(self, downloader=None): InfoExtractor.__init__(self, downloader) - def report_download_webpage(self, video_id): - """Report webpage download.""" - self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id) - def report_extraction(self, video_id): """Report information extraction.""" self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id) @@ -2293,13 +2290,8 @@ class MyVideoIE(InfoExtractor): video_id = mobj.group(1) # Get video webpage - request = compat_urllib_request.Request('http://www.myvideo.de/watch/%s' % video_id) - try: - self.report_download_webpage(video_id) - webpage = compat_urllib_request.urlopen(request).read().decode('utf-8') - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) - return + webpage_url = 'http://www.myvideo.de/watch/%s' % video_id + webpage = self._download_webpage(webpage_url, video_id) self.report_extraction(video_id) mobj = re.search(r'', @@ -2341,7 +2333,6 @@ class ComedyCentralIE(InfoExtractor): (the-colbert-report-(videos|collections)/(?P[0-9]+)/[^/]*/(?P.*?)) |(watch/(?P[^/]*)/(?P.*))))) $""" - IE_NAME = u'comedycentral' _available_formats = ['3500', '2200', '1700', '1200', '750', '400'] @@ -2369,16 +2360,12 @@ class ComedyCentralIE(InfoExtractor): def report_extraction(self, episode_id): self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id) - def report_config_download(self, episode_id): - self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id) + def report_config_download(self, episode_id, media_id): + self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration for %s' % (episode_id, media_id)) def report_index_download(self, episode_id): self._downloader.to_screen(u'[comedycentral] %s: Downloading show index' % episode_id) - def report_player_url(self, episode_id): - self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id) - - def _print_formats(self, formats): print('Available formats:') for x in formats: @@ -2417,6 +2404,7 @@ class ComedyCentralIE(InfoExtractor): try: htmlHandle = compat_urllib_request.urlopen(req) html = htmlHandle.read() + webpage = html.decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) return @@ -2431,29 +2419,20 @@ class ComedyCentralIE(InfoExtractor): return epTitle = mobj.group('episode') - mMovieParams = re.findall('(?:gsp.comedystor/.*)$', rtmp_video_url) + if not m: + raise ExtractorError(u'Cannot transform RTMP url') + base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/' + video_url = base + m.group('finalid') - if video_url.startswith(broken_cdn): - video_url = video_url.replace(broken_cdn, better_cdn) - - effTitle = showId + u'-' + epTitle + effTitle = showId + u'-' + epTitle + u' part ' + compat_str(partNum+1) info = { 'id': shortMediaId, 'url': video_url, @@ -2529,9 +2506,7 @@ class ComedyCentralIE(InfoExtractor): 'format': format, 'thumbnail': None, 'description': officialTitle, - 'player_url': None #playerUrl } - results.append(info) return results @@ -2611,7 +2586,6 @@ class EscapistIE(InfoExtractor): return [info] - class CollegeHumorIE(InfoExtractor): """Information extractor for collegehumor.com""" @@ -2690,10 +2664,6 @@ class XVideosIE(InfoExtractor): _VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)' IE_NAME = u'xvideos' - def report_webpage(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) - def report_extraction(self, video_id): """Report information extraction.""" self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) @@ -2705,15 +2675,7 @@ class XVideosIE(InfoExtractor): return video_id = mobj.group(1) - self.report_webpage(video_id) - - request = compat_urllib_request.Request(r'http://www.xvideos.com/video' + video_id) - try: - webpage_bytes = compat_urllib_request.urlopen(request).read() - webpage = webpage_bytes.decode('utf-8', 'replace') - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) - return + webpage = self._download_webpage(url, video_id) self.report_extraction(video_id) @@ -2812,7 +2774,7 @@ class SoundcloudIE(InfoExtractor): stream_json_bytes = compat_urllib_request.urlopen(request).read() stream_json = stream_json_bytes.decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to download stream definitions: %s' % compat_str(err)) return streams = json.loads(stream_json) @@ -2831,13 +2793,7 @@ class SoundcloudIE(InfoExtractor): class InfoQIE(InfoExtractor): """Information extractor for infoq.com""" - _VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$' - IE_NAME = u'infoq' - - def report_webpage(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) def report_extraction(self, video_id): """Report information extraction.""" @@ -2849,38 +2805,29 @@ class InfoQIE(InfoExtractor): self._downloader.trouble(u'ERROR: invalid URL: %s' % url) return - self.report_webpage(url) - - request = compat_urllib_request.Request(url) - try: - webpage = compat_urllib_request.urlopen(request).read() - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) - return - + webpage = self._download_webpage(url, video_id=url) self.report_extraction(url) - # Extract video URL mobj = re.search(r"jsclassref='([^']*)'", webpage) if mobj is None: self._downloader.trouble(u'ERROR: unable to extract video url') return - video_url = 'rtmpe://video.infoq.com/cfx/st/' + compat_urllib_parse.unquote(mobj.group(1).decode('base64')) - + real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8')) + video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id # Extract title mobj = re.search(r'contentTitle = "(.*?)";', webpage) if mobj is None: self._downloader.trouble(u'ERROR: unable to extract video title') return - video_title = mobj.group(1).decode('utf-8') + video_title = mobj.group(1) # Extract description video_description = u'No description available.' mobj = re.search(r'', webpage) if mobj is not None: - video_description = mobj.group(1).decode('utf-8') + video_description = mobj.group(1) video_filename = video_url.split('/')[-1] video_id, extension = video_filename.split('.') @@ -3136,10 +3083,6 @@ class MTVIE(InfoExtractor): _VALID_URL = r'^(?Phttps?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P[0-9]+)/[^/]+$' IE_NAME = u'mtv' - def report_webpage(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) - def report_extraction(self, video_id): """Report information extraction.""" self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) @@ -3152,14 +3095,8 @@ class MTVIE(InfoExtractor): if not mobj.group('proto'): url = 'http://' + url video_id = mobj.group('videoid') - self.report_webpage(video_id) - request = compat_urllib_request.Request(url) - try: - webpage = compat_urllib_request.urlopen(request).read() - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) - return + webpage = self._download_webpage(url, video_id) mobj = re.search(r'', webpage) if mobj is None: @@ -3222,20 +3159,15 @@ class MTVIE(InfoExtractor): class YoukuIE(InfoExtractor): - _VALID_URL = r'(?:http://)?v\.youku\.com/v_show/id_(?P[A-Za-z0-9]+)\.html' - IE_NAME = u'Youku' - - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) def report_download_webpage(self, file_id): """Report webpage download.""" - self._downloader.to_screen(u'[Youku] %s: Downloading webpage' % file_id) + self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, file_id)) def report_extraction(self, file_id): """Report information extraction.""" - self._downloader.to_screen(u'[Youku] %s: Extracting information' % file_id) + self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) def _gen_sid(self): nowTime = int(time.time() * 1000) @@ -3340,7 +3272,7 @@ class YoukuIE(InfoExtractor): class XNXXIE(InfoExtractor): """Information extractor for xnxx.com""" - _VALID_URL = r'^http://video\.xnxx\.com/video([0-9]+)/(.*)' + _VALID_URL = r'^(?:https?://)?video\.xnxx\.com/video([0-9]+)/(.*)' IE_NAME = u'xnxx' VIDEO_URL_RE = r'flv_url=(.*?)&' VIDEO_TITLE_RE = r'(.*?)\s+-\s+XNXX.COM' @@ -3529,9 +3461,6 @@ class NBAIE(InfoExtractor): _VALID_URL = r'^(?:https?://)?(?:watch\.|www\.)?nba\.com/(?:nba/)?video(/[^?]*)(\?.*)?$' IE_NAME = u'nba' - def report_extraction(self, video_id): - self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) - def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: @@ -3542,14 +3471,7 @@ class NBAIE(InfoExtractor): if video_id.endswith('/index.html'): video_id = video_id[:-len('/index.html')] - self.report_extraction(video_id) - try: - urlh = compat_urllib_request.urlopen(url) - webpage_bytes = urlh.read() - webpage = webpage_bytes.decode('utf-8', 'ignore') - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) - return + webpage = self._download_webpage(url, video_id) video_url = u'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4' def _findProp(rexp, default=None): @@ -3602,17 +3524,23 @@ class JustinTVIE(InfoExtractor): return response = json.loads(webpage) + if type(response) != list: + error_text = response.get('error', 'unknown error') + self._downloader.trouble(u'ERROR: Justin.tv API: %s' % error_text) + return info = [] for clip in response: video_url = clip['video_file_url'] if video_url: video_extension = os.path.splitext(video_url)[1][1:] - video_date = re.sub('-', '', clip['created_on'][:10]) + video_date = re.sub('-', '', clip['start_time'][:10]) + video_uploader_id = clip.get('user_id', clip.get('channel_id')) info.append({ 'id': clip['id'], 'url': video_url, 'title': clip['title'], - 'uploader': clip.get('user_id', clip.get('channel_id')), + 'uploader': clip.get('channel_name', video_uploader_id), + 'uploader_id': video_uploader_id, 'upload_date': video_date, 'ext': video_extension, }) @@ -3631,7 +3559,7 @@ class JustinTVIE(InfoExtractor): paged = True api += '/channel/archives/%s.json' else: - api += '/clip/show/%s.json' + api += '/broadcast/by_archive/%s.json' api = api % (video_id,) self.report_extraction(video_id) @@ -3652,10 +3580,6 @@ class JustinTVIE(InfoExtractor): class FunnyOrDieIE(InfoExtractor): _VALID_URL = r'^(?:https?://)?(?:www\.)?funnyordie\.com/videos/(?P<id>[0-9a-f]+)/.*$' - IE_NAME = u'FunnyOrDie' - - def report_extraction(self, video_id): - self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) @@ -3664,14 +3588,7 @@ class FunnyOrDieIE(InfoExtractor): return video_id = mobj.group('id') - self.report_extraction(video_id) - try: - urlh = compat_urllib_request.urlopen(url) - webpage_bytes = urlh.read() - webpage = webpage_bytes.decode('utf-8', 'ignore') - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) - return + webpage = self._download_webpage(url, video_id) m = re.search(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"', webpage, re.DOTALL) if not m: @@ -3701,9 +3618,6 @@ class FunnyOrDieIE(InfoExtractor): class TweetReelIE(InfoExtractor): _VALID_URL = r'^(?:https?://)?(?:www\.)?tweetreel\.com/[?](?P<id>[0-9a-z]+)$' - def report_extraction(self, video_id): - self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) - def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: @@ -3711,14 +3625,7 @@ class TweetReelIE(InfoExtractor): return video_id = mobj.group('id') - self.report_extraction(video_id) - try: - urlh = compat_urllib_request.urlopen(url) - webpage_bytes = urlh.read() - webpage = webpage_bytes.decode('utf-8', 'ignore') - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) - return + webpage = self._download_webpage(url, video_id) m = re.search(r'<div id="left" status_id="([0-9]+)">', webpage) if not m: @@ -3763,45 +3670,409 @@ class SteamIE(InfoExtractor): (?P<gameID>\d+)/? (?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID """ - IE_NAME = u'Steam' - + def suitable(self, url): """Receives a URL and returns True if suitable for this IE.""" return re.match(self._VALID_URL, url, re.VERBOSE) is not None - - def report_download_video_page(self, game_id): - self._downloader.to_screen(u'[%s] %s: Downloading video page' % (self.IE_NAME, game_id)) - + def _real_extract(self, url): m = re.match(self._VALID_URL, url, re.VERBOSE) urlRE = r"'movie_(?P<videoID>\d+)': \{\s*FILENAME: \"(?P<videoURL>[\w:/\.\?=]+)\"(,\s*MOVIE_NAME: \"(?P<videoName>[\w:/\.\?=\+-]+)\")?\s*\}," gameID = m.group('gameID') videourl = 'http://store.steampowered.com/video/%s/' % gameID - try: - self.report_download_video_page(gameID) - urlh = compat_urllib_request.urlopen(videourl) - webpage_bytes = urlh.read() - webpage = webpage_bytes.decode('utf-8', 'ignore') - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) - return + webpage = self._download_webpage(videourl, gameID) mweb = re.finditer(urlRE, webpage) - namesRE = r'<span class=\"title\">(?P<videoName>[\w:/\.\?=\+\s-]+)</span>' - titles = list(re.finditer(namesRE, webpage)) + namesRE = r'<span class="title">(?P<videoName>.+?)</span>' + titles = re.finditer(namesRE, webpage) videos = [] - i = 0 - for vid in mweb: + for vid,vtitle in zip(mweb,titles): video_id = vid.group('videoID') - title = titles[i].group('videoName') - video_url=vid.group('videoURL') + title = vtitle.group('videoName') + video_url = vid.group('videoURL') if not video_url: self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id) - i += 1 info = { 'id':video_id, 'url':video_url, 'ext': 'flv', - 'title': title + 'title': unescapeHTML(title) } videos.append(info) return videos + +class UstreamIE(InfoExtractor): + _VALID_URL = r'https?://www\.ustream\.tv/recorded/(?P<videoID>\d+)' + IE_NAME = u'ustream' + + def _real_extract(self, url): + m = re.match(self._VALID_URL, url) + video_id = m.group('videoID') + video_url = u'http://tcdn.ustream.tv/video/%s' % video_id + webpage = self._download_webpage(url, video_id) + m = re.search(r'data-title="(?P<title>.+)"',webpage) + title = m.group('title') + m = re.search(r'<a class="state" data-content-type="channel" data-content-id="(?P<uploader>\d+)"',webpage) + uploader = m.group('uploader') + info = { + 'id':video_id, + 'url':video_url, + 'ext': 'flv', + 'title': title, + 'uploader': uploader + } + return [info] + + + +class YouPornIE(InfoExtractor): + """Information extractor for youporn.com.""" + + _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + # def report_id(self, video_id): + # """Report finding video ID""" + # self._downloader.to_screen(u'[youporn] Video ID: %s' % video_id) + + # def report_webpage(self, url): + # """Report downloading page""" + # self._downloader.to_screen(u'[youporn] Downloaded page: %s' % url) + + # def report_title(self, video_title): + # """Report dfinding title""" + # self._downloader.to_screen(u'[youporn] Title: %s' % video_title) + + # def report_uploader(self, uploader): + # """Report dfinding title""" + # self._downloader.to_screen(u'[youporn] Uploader: %s' % uploader) + + # def report_upload_date(self, video_date): + # """Report finding date""" + # self._downloader.to_screen(u'[youporn] Date: %s' % video_date) + + def _print_formats(self, formats): + """Print all available formats""" + print('Available formats:') + print(u'ext\t\tformat') + print(u'---------------------------------') + for format in formats: + print(u'%s\t\t%s' % (format['ext'], format['format'])) + + def _specific(self, req_format, formats): + for x in formats: + if(x["format"]==req_format): + return x + return None + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + + video_id = mobj.group('videoid') + #self.report_id(video_id) + + webpage = self._download_webpage(url, video_id) + #self.report_webpage(url) + + # Get the video title + VIDEO_TITLE_RE = r'videoTitleArea">(?P<title>.*)</h1>' + result = re.search(VIDEO_TITLE_RE, webpage) + if result is None: + self._downloader.trouble(u'ERROR: unable to extract video title') + return + video_title = result.group('title').strip() + #self.report_title(video_title) + + # Get the video date + VIDEO_DATE_RE = r'Date:</b>(?P<date>.*)</li>' + result = re.search(VIDEO_DATE_RE, webpage) + if result is None: + self._downloader.trouble(u'ERROR: unable to extract video date') + return + upload_date = result.group('date').strip() + #self.report_upload_date(upload_date) + + # Get the video uploader + VIDEO_UPLOADER_RE = r'Submitted:</b>(?P<uploader>.*)</li>' + result = re.search(VIDEO_UPLOADER_RE, webpage) + if result is None: + self._downloader.trouble(u'ERROR: unable to extract uploader') + return + video_uploader = result.group('uploader').strip() + video_uploader = clean_html( video_uploader ) + #self.report_uploader(video_uploader) + + # Get all of the formats available + DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>' + result = re.search(DOWNLOAD_LIST_RE, webpage) + if result is None: + self._downloader.trouble(u'ERROR: unable to extract download list') + return + download_list_html = result.group('download_list').strip() + + # Get all of the links from the page + LINK_RE = r'(?s)<a href="(?P<url>[^"]+)">' + links = re.findall(LINK_RE, download_list_html) + if(len(links) == 0): + self._downloader.trouble(u'ERROR: no known formats available for video') + return + + self._downloader.to_screen(u'[youporn] Links found: %d' % len(links)) + + formats = [] + for link in links: + + # A link looks like this: + # http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0 + # A path looks like this: + # /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4 + video_url = unescapeHTML( link ) + path = compat_urllib_parse_urlparse( video_url ).path + extension = os.path.splitext( path )[1][1:] + format = path.split('/')[4].split('_')[:2] + size = format[0] + bitrate = format[1] + format = "-".join( format ) + title = u'%s-%s-%s' % (video_title, size, bitrate) + + formats.append({ + 'id': video_id, + 'url': video_url, + 'uploader': video_uploader, + 'upload_date': upload_date, + 'title': title, + 'ext': extension, + 'format': format, + 'thumbnail': None, + 'description': None, + 'player_url': None + }) + + if self._downloader.params.get('listformats', None): + self._print_formats(formats) + return + + req_format = self._downloader.params.get('format', None) + #format_limit = self._downloader.params.get('format_limit', None) + self._downloader.to_screen(u'[youporn] Format: %s' % req_format) + + + if req_format is None or req_format == 'best': + return [formats[0]] + elif req_format == 'worst': + return [formats[-1]] + elif req_format in ('-1', 'all'): + return formats + else: + format = self._specific( req_format, formats ) + if result is None: + self._downloader.trouble(u'ERROR: requested format not available') + return + return [format] + + + +class PornotubeIE(InfoExtractor): + """Information extractor for pornotube.com.""" + + _VALID_URL = r'^(?:https?://)?(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$' + + # def __init__(self, downloader=None): + # InfoExtractor.__init__(self, downloader) + + # def report_extract_entry(self, url): + # """Report downloading extry""" + # self._downloader.to_screen(u'[pornotube] Downloading entry: %s' % url.decode('utf-8')) + + # def report_date(self, upload_date): + # """Report finding uploaded date""" + # self._downloader.to_screen(u'[pornotube] Entry date: %s' % upload_date) + + # def report_webpage(self, url): + # """Report downloading page""" + # self._downloader.to_screen(u'[pornotube] Downloaded page: %s' % url) + + # def report_title(self, video_title): + # """Report downloading extry""" + # self._downloader.to_screen(u'[pornotube] Title: %s' % video_title.decode('utf-8')) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + + video_id = mobj.group('videoid') + video_title = mobj.group('title') + #self.report_title(video_title); + + # Get webpage content + webpage = self._download_webpage(url, video_id) + #self.report_webpage(url) + + # Get the video URL + VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",' + result = re.search(VIDEO_URL_RE, webpage) + if result is None: + self._downloader.trouble(u'ERROR: unable to extract video url') + return + video_url = compat_urllib_parse.unquote(result.group('url')) + #self.report_extract_entry(video_url) + + #Get the uploaded date + VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by' + result = re.search(VIDEO_UPLOADED_RE, webpage) + if result is None: + self._downloader.trouble(u'ERROR: unable to extract video title') + return + upload_date = result.group('date') + #self.report_date(upload_date); + + info = {'id': video_id, + 'url': video_url, + 'uploader': None, + 'upload_date': upload_date, + 'title': video_title, + 'ext': 'flv', + 'format': 'flv', + 'thumbnail': None, + 'description': None, + 'player_url': None} + + return [info] + + + +class YouJizzIE(InfoExtractor): + """Information extractor for youjizz.com.""" + + _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youjizz\.com/videos/(?P<videoid>[^.]+).html$' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + # def report_extract_entry(self, url): + # """Report downloading extry""" + # self._downloader.to_screen(u'[youjizz] Downloading entry: %s' % url.decode('utf-8')) + + # def report_webpage(self, url): + # """Report downloading page""" + # self._downloader.to_screen(u'[youjizz] Downloaded page: %s' % url) + + # def report_title(self, video_title): + # """Report downloading extry""" + # self._downloader.to_screen(u'[youjizz] Title: %s' % video_title.decode('utf-8')) + + # def report_embed_page(self, embed_page): + # """Report downloading extry""" + # self._downloader.to_screen(u'[youjizz] Embed Page: %s' % embed_page.decode('utf-8')) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + + video_id = mobj.group('videoid') + + # Get webpage content + webpage = self._download_webpage(url, video_id) + #self.report_webpage(url) + + # Get the video title + VIDEO_TITLE_RE = r'<title>(?P<title>.*)' + result = re.search(VIDEO_TITLE_RE, webpage) + if result is None: + self._downloader.trouble(u'ERROR: unable to extract video title') + return + video_title = result.group('title').strip() + #self.report_title(video_title) + + # Get the embed page + EMBED_PAGE_RE = r'http://www.youjizz.com/videos/embed/(?P[0-9]+)' + result = re.search(EMBED_PAGE_RE, webpage) + if result is None: + self._downloader.trouble(u'ERROR: unable to extract embed page') + return + + embed_page_url = result.group(0).strip() + video_id = result.group('videoid') + #self.report_embed_page(embed_page_url) + + webpage = self._download_webpage(embed_page_url, video_id) + + # Get the video URL + SOURCE_RE = r'so.addVariable\("file",encodeURIComponent\("(?P[^"]+)"\)\);' + result = re.search(SOURCE_RE, webpage) + if result is None: + self._downloader.trouble(u'ERROR: unable to extract video url') + return + video_url = result.group('source') + #self.report_extract_entry(video_url) + + info = {'id': video_id, + 'url': video_url, + 'uploader': None, + 'upload_date': None, + 'title': video_title, + 'ext': 'flv', + 'format': 'flv', + 'thumbnail': None, + 'description': None, + 'player_url': embed_page_url} + + return [info] + + +def gen_extractors(): + """ Return a list of an instance of every supported extractor. + The order does matter; the first extractor matched is the one handling the URL. + """ + return [ + YoutubePlaylistIE(), + YoutubeChannelIE(), + YoutubeUserIE(), + YoutubeSearchIE(), + YoutubeIE(), + MetacafeIE(), + DailymotionIE(), + GoogleSearchIE(), + PhotobucketIE(), + YahooIE(), + YahooSearchIE(), + DepositFilesIE(), + FacebookIE(), + BlipTVUserIE(), + BlipTVIE(), + VimeoIE(), + MyVideoIE(), + ComedyCentralIE(), + EscapistIE(), + CollegeHumorIE(), + XVideosIE(), + SoundcloudIE(), + InfoQIE(), + MixcloudIE(), + StanfordOpenClassroomIE(), + MTVIE(), + YoukuIE(), + XNXXIE(), + YouJizzIE(), + PornotubeIE(), + YouPornIE(), + GooglePlusIE(), + ArteTvIE(), + NBAIE(), + JustinTVIE(), + FunnyOrDieIE(), + TweetReelIE(), + SteamIE(), + UstreamIE(), + GenericIE() + ] + +