import random
import math
import operator
+import hashlib
+import binascii
+import urllib
from .utils import *
class SearchInfoExtractor(InfoExtractor):
"""
- Base class for search queries extractors
+ Base class for paged search queries extractors.
They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
+ Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
- _max_results = 0 # The maximum number of results the extractor can get
@classmethod
- def _VALID_URL(cls):
- return r'%s(?P<prefix>|\d+|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
+ def _make_valid_url(cls):
+ return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
- return re.match(cls._VALID_URL() , url) is not None
+ return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
- mobj = re.match(self._VALID_URL(), query)
+ mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError(u'Invalid search query "%s"' % query)
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
- return self._get_n_results(query, self._max_results)
+ return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
- elif n > self._max_results:
- self._downloader.report_warning(u'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._max_results, n))
- n = self._max_results
+ elif n > self._MAX_RESULTS:
+ self._downloader.report_warning(u'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
+ n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
return (u'Did not fetch video subtitles', None, None)
return (None, sub_lang, sub)
+ def _request_automatic_caption(self, video_id, webpage):
+ """We need the webpage for getting the captions url, pass it as an
+ argument to speed up the process."""
+ sub_lang = self._downloader.params.get('subtitleslang')
+ sub_format = self._downloader.params.get('subtitlesformat')
+ self.to_screen(u'%s: Looking for automatic captions' % video_id)
+ mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
+ err_msg = u'Couldn\'t find automatic captions for "%s"' % sub_lang
+ if mobj is None:
+ return [(err_msg, None, None)]
+ player_config = json.loads(mobj.group(1))
+ try:
+ args = player_config[u'args']
+ caption_url = args[u'ttsurl']
+ timestamp = args[u'timestamp']
+ params = compat_urllib_parse.urlencode({
+ 'lang': 'en',
+ 'tlang': sub_lang,
+ 'fmt': sub_format,
+ 'ts': timestamp,
+ 'kind': 'asr',
+ })
+ subtitles_url = caption_url + '&' + params
+ sub = self._download_webpage(subtitles_url, video_id, u'Downloading automatic captions')
+ return [(None, sub_lang, sub)]
+ except KeyError:
+ return [(err_msg, None, None)]
+
def _extract_subtitle(self, video_id):
"""
Return a list with a tuple:
if video_subtitles:
(sub_error, sub_lang, sub) = video_subtitles[0]
if sub_error:
- self._downloader.report_error(sub_error)
+ # We try with the automatic captions
+ video_subtitles = self._request_automatic_caption(video_id, video_webpage)
+ (sub_error_auto, sub_lang, sub) = video_subtitles[0]
+ if sub is not None:
+ pass
+ else:
+ # We report the original error
+ self._downloader.report_error(sub_error)
if self._downloader.params.get('allsubtitles', False):
video_subtitles = self._extract_all_subtitles(video_id)
"""Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs
- _VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)'
+ _VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo(?P<pro>pro)?\.com/(?:(?:(?:groups|album)/[^/]+)|(?:.*?)/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)'
IE_NAME = u'vimeo'
def _real_extract(self, url, new_video=True):
video_id = mobj.group('id')
if not mobj.group('proto'):
url = 'https://' + url
- if mobj.group('direct_link'):
+ if mobj.group('direct_link') or mobj.group('pro'):
url = 'https://vimeo.com/' + video_id
# Retrieve video webpage to extract further information
# Extract uploader and uploader_id
video_uploader = config["video"]["owner"]["name"]
- video_uploader_id = config["video"]["owner"]["url"].split('/')[-1]
+ video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None
# Extract video thumbnail
video_thumbnail = config["video"]["thumbnail"]
opener.add_handler(handler())
response = opener.open(HeadRequest(url))
+ if response is None:
+ raise ExtractorError(u'Invalid URL protocol')
new_url = response.geturl()
if url == new_url:
class YoutubeSearchIE(SearchInfoExtractor):
"""Information Extractor for YouTube search queries."""
_API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
- _max_results = 1000
+ _MAX_RESULTS = 1000
IE_NAME = u'youtube:search'
_SEARCH_KEY = 'ytsearch'
class GoogleSearchIE(SearchInfoExtractor):
"""Information Extractor for Google Video search queries."""
_MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"'
- _max_results = 1000
+ _MAX_RESULTS = 1000
IE_NAME = u'video.google:search'
_SEARCH_KEY = 'gvsearch'
class YahooSearchIE(SearchInfoExtractor):
"""Information Extractor for Yahoo! Video search queries."""
- _max_results = 1000
+ _MAX_RESULTS = 1000
IE_NAME = u'screen.yahoo:search'
_SEARCH_KEY = 'yvsearch'
class BlipTVIE(InfoExtractor):
"""Information extractor for blip.tv"""
- _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$'
+ _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(.+)$'
_URL_EXT = r'^.*\.([a-z0-9]+)$'
IE_NAME = u'blip.tv'
if mobj is None:
raise ExtractorError(u'Invalid URL: %s' % url)
+ # See https://github.com/rg3/youtube-dl/issues/857
+ api_mobj = re.match(r'http://a\.blip\.tv/api\.swf#(?P<video_id>[\d\w]+)', url)
+ if api_mobj is not None:
+ url = 'http://blip.tv/play/g_%s' % api_mobj.group('video_id')
urlp = compat_urllib_parse_urlparse(url)
if urlp.path.startswith('/play/'):
request = compat_urllib_request.Request(url)
_VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*'
IE_NAME = u'myvideo'
+ # Original Code from: https://github.com/dersphere/plugin.video.myvideo_de.git
+ # Released into the Public Domain by Tristan Fischer on 2013-05-19
+ # https://github.com/rg3/youtube-dl/pull/842
+ def __rc4crypt(self,data, key):
+ x = 0
+ box = list(range(256))
+ for i in list(range(256)):
+ x = (x + box[i] + compat_ord(key[i % len(key)])) % 256
+ box[i], box[x] = box[x], box[i]
+ x = 0
+ y = 0
+ out = ''
+ for char in data:
+ x = (x + 1) % 256
+ y = (y + box[x]) % 256
+ box[x], box[y] = box[y], box[x]
+ out += chr(compat_ord(char) ^ box[(box[x] + box[y]) % 256])
+ return out
+
+ def __md5(self,s):
+ return hashlib.md5(s).hexdigest().encode()
+
def _real_extract(self,url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
+ raise ExtractorError(u'invalid URL: %s' % url)
video_id = mobj.group(1)
+ GK = (
+ b'WXpnME1EZGhNRGhpTTJNM01XVmhOREU0WldNNVpHTTJOakpt'
+ b'TW1FMU5tVTBNR05pWkRaa05XRXhNVFJoWVRVd1ptSXhaVEV3'
+ b'TnpsbA0KTVRkbU1tSTRNdz09'
+ )
+
# Get video webpage
webpage_url = 'http://www.myvideo.de/watch/%s' % video_id
webpage = self._download_webpage(webpage_url, video_id)
+ mobj = re.search('source src=\'(.+?)[.]([^.]+)\'', webpage)
+ if mobj is not None:
+ self.report_extraction(video_id)
+ video_url = mobj.group(1) + '.flv'
+
+ mobj = re.search('<title>([^<]+)</title>', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract title')
+ video_title = mobj.group(1)
+
+ mobj = re.search('[.](.+?)$', video_url)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract extention')
+ video_ext = mobj.group(1)
+
+ return [{
+ 'id': video_id,
+ 'url': video_url,
+ 'uploader': None,
+ 'upload_date': None,
+ 'title': video_title,
+ 'ext': u'flv',
+ }]
+
+ # try encxml
+ mobj = re.search('var flashvars={(.+?)}', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract video')
+
+ params = {}
+ encxml = ''
+ sec = mobj.group(1)
+ for (a, b) in re.findall('(.+?):\'(.+?)\',?', sec):
+ if not a == '_encxml':
+ params[a] = b
+ else:
+ encxml = compat_urllib_parse.unquote(b)
+ if not params.get('domain'):
+ params['domain'] = 'www.myvideo.de'
+ xmldata_url = '%s?%s' % (encxml, compat_urllib_parse.urlencode(params))
+ if 'flash_playertype=MTV' in xmldata_url:
+ self._downloader.report_warning(u'avoiding MTV player')
+ xmldata_url = (
+ 'http://www.myvideo.de/dynamic/get_player_video_xml.php'
+ '?flash_playertype=D&ID=%s&_countlimit=4&autorun=yes'
+ ) % video_id
+
+ # get enc data
+ enc_data = self._download_webpage(xmldata_url, video_id).split('=')[1]
+ enc_data_b = binascii.unhexlify(enc_data)
+ sk = self.__md5(
+ base64.b64decode(base64.b64decode(GK)) +
+ self.__md5(
+ str(video_id).encode('utf-8')
+ )
+ )
+ dec_data = self.__rc4crypt(enc_data_b, sk)
+
+ # extracting infos
self.report_extraction(video_id)
- mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\'',
- webpage)
+
+ mobj = re.search('connectionurl=\'(.*?)\'', dec_data)
if mobj is None:
- raise ExtractorError(u'Unable to extract media URL')
- video_url = mobj.group(1) + ('/%s.flv' % video_id)
+ raise ExtractorError(u'unable to extract rtmpurl')
+ video_rtmpurl = compat_urllib_parse.unquote(mobj.group(1))
+ if 'myvideo2flash' in video_rtmpurl:
+ self._downloader.report_warning(u'forcing RTMPT ...')
+ video_rtmpurl = video_rtmpurl.replace('rtmpe://', 'rtmpt://')
+
+ # extract non rtmp videos
+ if (video_rtmpurl is None) or (video_rtmpurl == ''):
+ mobj = re.search('path=\'(http.*?)\' source=\'(.*?)\'', dec_data)
+ if mobj is None:
+ raise ExtractorError(u'unable to extract url')
+ video_rtmpurl = compat_urllib_parse.unquote(mobj.group(1)) + compat_urllib_parse.unquote(mobj.group(2))
- mobj = re.search('<title>([^<]+)</title>', webpage)
+ mobj = re.search('source=\'(.*?)\'', dec_data)
if mobj is None:
- raise ExtractorError(u'Unable to extract title')
+ raise ExtractorError(u'unable to extract swfobj')
+ video_file = compat_urllib_parse.unquote(mobj.group(1))
+
+ if not video_file.endswith('f4m'):
+ ppath, prefix = video_file.split('.')
+ video_playpath = '%s:%s' % (prefix, ppath)
+ video_hls_playlist = ''
+ else:
+ video_playpath = ''
+ video_hls_playlist = (
+ video_filepath + video_file
+ ).replace('.f4m', '.m3u8')
+ mobj = re.search('swfobject.embedSWF\(\'(.+?)\'', webpage)
+ if mobj is None:
+ raise ExtractorError(u'unable to extract swfobj')
+ video_swfobj = compat_urllib_parse.unquote(mobj.group(1))
+
+ mobj = re.search("<h1(?: class='globalHd')?>(.*?)</h1>", webpage)
+ if mobj is None:
+ raise ExtractorError(u'unable to extract title')
video_title = mobj.group(1)
return [{
- 'id': video_id,
- 'url': video_url,
- 'uploader': None,
- 'upload_date': None,
- 'title': video_title,
- 'ext': u'flv',
+ 'id': video_id,
+ 'url': video_rtmpurl,
+ 'tc_url': video_rtmpurl,
+ 'uploader': None,
+ 'upload_date': None,
+ 'title': video_title,
+ 'ext': u'flv',
+ 'play_path': video_playpath,
+ 'video_file': video_file,
+ 'video_hls_playlist': video_hls_playlist,
+ 'player_url': video_swfobj,
}]
class ComedyCentralIE(InfoExtractor):
video_id = m.group('videoID')
video_url = u'http://tcdn.ustream.tv/video/%s' % video_id
webpage = self._download_webpage(url, video_id)
- m = re.search(r'data-title="(?P<title>.+)"',webpage)
- title = m.group('title')
- m = re.search(r'<a class="state" data-content-type="channel" data-content-id="(?P<uploader>\d+)"',webpage)
- uploader = m.group('uploader')
+ self.report_extraction(video_id)
+ try:
+ m = re.search(r'data-title="(?P<title>.+)"',webpage)
+ title = m.group('title')
+ m = re.search(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
+ webpage, re.DOTALL)
+ uploader = unescapeHTML(m.group('uploader').strip())
+ m = re.search(r'<link rel="image_src" href="(?P<thumb>.*?)"', webpage)
+ thumb = m.group('thumb')
+ except AttributeError:
+ raise ExtractorError(u'Unable to extract info')
info = {
'id':video_id,
'url':video_url,
'ext': 'flv',
'title': title,
- 'uploader': uploader
+ 'uploader': uploader,
+ 'thumbnail': thumb,
}
- return [info]
+ return info
class WorldStarHipHopIE(InfoExtractor):
_VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/videos/video\.php\?v=(?P<id>.*)'
video_id = m.group('videoID')
webpage = self._download_webpage(url, video_id)
- m = re.search(r'<div class="spVideoTitle">(.*?)</div>', webpage)
+ m = re.search(r'<div class="module-title">(.*?)</div>', webpage)
if not m:
raise ExtractorError(u'Cannot find title')
video_title = unescapeHTML(m.group(1))
info["url"] = stream["video_url"]
return [info]
+class ZDFIE(InfoExtractor):
+ _VALID_URL = r'^http://www\.zdf\.de\/ZDFmediathek\/(.*beitrag\/video\/)(?P<video_id>[^/\?]+)(?:\?.*)?'
+ _TITLE = r'<h1(?: class="beitragHeadline")?>(?P<title>.*)</h1>'
+ _MEDIA_STREAM = r'<a href="(?P<video_url>.+(?P<media_type>.streaming).+/zdf/(?P<quality>[^\/]+)/[^"]*)".+class="play".+>'
+ _MMS_STREAM = r'href="(?P<video_url>mms://[^"]*)"'
+ _RTSP_STREAM = r'(?P<video_url>rtsp://[^"]*.mp4)'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ raise ExtractorError(u'Invalid URL: %s' % url)
+ video_id = mobj.group('video_id')
+
+ html = self._download_webpage(url, video_id)
+ streams = [m.groupdict() for m in re.finditer(self._MEDIA_STREAM, html)]
+ if streams is None:
+ raise ExtractorError(u'No media url found.')
+
+ # s['media_type'] == 'wstreaming' -> use 'Windows Media Player' and mms url
+ # s['media_type'] == 'hstreaming' -> use 'Quicktime' and rtsp url
+ # choose first/default media type and highest quality for now
+ for s in streams: #find 300 - dsl1000mbit
+ if s['quality'] == '300' and s['media_type'] == 'wstreaming':
+ stream_=s
+ break
+ for s in streams: #find veryhigh - dsl2000mbit
+ if s['quality'] == 'veryhigh' and s['media_type'] == 'wstreaming': # 'hstreaming' - rtsp is not working
+ stream_=s
+ break
+ if stream_ is None:
+ raise ExtractorError(u'No stream found.')
+
+ media_link = self._download_webpage(stream_['video_url'], video_id,'Get stream URL')
+
+ self.report_extraction(video_id)
+ mobj = re.search(self._TITLE, html)
+ if mobj is None:
+ raise ExtractorError(u'Cannot extract title')
+ title = unescapeHTML(mobj.group('title'))
+
+ mobj = re.search(self._MMS_STREAM, media_link)
+ if mobj is None:
+ mobj = re.search(self._RTSP_STREAM, media_link)
+ if mobj is None:
+ raise ExtractorError(u'Cannot extract mms:// or rtsp:// URL')
+ mms_url = mobj.group('video_url')
+
+ mobj = re.search('(.*)[.](?P<ext>[^.]+)', mms_url)
+ if mobj is None:
+ raise ExtractorError(u'Cannot extract extention')
+ ext = mobj.group('ext')
+
+ return [{'id': video_id,
+ 'url': mms_url,
+ 'title': title,
+ 'ext': ext
+ }]
+
class TumblrIE(InfoExtractor):
_VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/((post)|(video))/(?P<id>\d*)/(.*?)'
re_video = r'src=\\x22(?P<video_url>http://%s\.tumblr\.com/video_file/%s/(.*?))\\x22 type=\\x22video/(?P<ext>.*?)\\x22' % (blog, video_id)
video = re.search(re_video, webpage)
if video is None:
- self.to_screen("No video founded")
+ self.to_screen("No video found")
return []
video_url = video.group('video_url')
ext = video.group('ext')
class InaIE(InfoExtractor):
"""Information Extractor for Ina.fr"""
- _VALID_URL = r'(?:http://)?(?:www.)?ina\.fr/video/(?P<id>I[0-9]+)/.*'
+ _VALID_URL = r'(?:http://)?(?:www\.)?ina\.fr/video/(?P<id>I[0-9]+)/.*'
def _real_extract(self,url):
mobj = re.match(self._VALID_URL, url)
'title': video_title,
}]
+class HowcastIE(InfoExtractor):
+ """Information Extractor for Howcast.com"""
+ _VALID_URL = r'(?:https?://)?(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+
+ video_id = mobj.group('id')
+ webpage_url = 'http://www.howcast.com/videos/' + video_id
+ webpage = self._download_webpage(webpage_url, video_id)
+
+ self.report_extraction(video_id)
+
+ mobj = re.search(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)"', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract video URL')
+ video_url = mobj.group(1)
+
+ mobj = re.search(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') property=\'og:title\'', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract title')
+ video_title = mobj.group(1) or mobj.group(2)
+
+ mobj = re.search(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'', webpage)
+ if mobj is None:
+ self._downloader.report_warning(u'unable to extract description')
+ video_description = None
+ else:
+ video_description = mobj.group(1) or mobj.group(2)
+
+ mobj = re.search(r'<meta content=\'(.+?)\' property=\'og:image\'', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract thumbnail')
+ thumbnail = mobj.group(1)
+
+ return [{
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'title': video_title,
+ 'description': video_description,
+ 'thumbnail': thumbnail,
+ }]
+
+class VineIE(InfoExtractor):
+ """Information Extractor for Vine.co"""
+ _VALID_URL = r'(?:https?://)?(?:www\.)?vine\.co/v/(?P<id>\w+)'
+
+ def _real_extract(self, url):
+
+ mobj = re.match(self._VALID_URL, url)
+
+ video_id = mobj.group('id')
+ webpage_url = 'https://vine.co/v/' + video_id
+ webpage = self._download_webpage(webpage_url, video_id)
+
+ self.report_extraction(video_id)
+
+ mobj = re.search(r'<meta property="twitter:player:stream" content="(.+?)"', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract video URL')
+ video_url = mobj.group(1)
+
+ mobj = re.search(r'<meta property="og:title" content="(.+?)"', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract title')
+ video_title = mobj.group(1)
+
+ mobj = re.search(r'<meta property="og:image" content="(.+?)(\?.*?)?"', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract thumbnail')
+ thumbnail = mobj.group(1)
+
+ mobj = re.search(r'<div class="user">.*?<h2>(.+?)</h2>', webpage, re.DOTALL)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract uploader')
+ uploader = mobj.group(1)
+
+ return [{
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'title': video_title,
+ 'thumbnail': thumbnail,
+ 'uploader': uploader,
+ }]
+
+class FlickrIE(InfoExtractor):
+ """Information Extractor for Flickr videos"""
+ _VALID_URL = r'(?:https?://)?(?:www\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+
+ video_id = mobj.group('id')
+ video_uploader_id = mobj.group('uploader_id')
+ webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id
+ webpage = self._download_webpage(webpage_url, video_id)
+
+ mobj = re.search(r"photo_secret: '(\w+)'", webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract video secret')
+ secret = mobj.group(1)
+
+ first_url = 'https://secure.flickr.com/apps/video/video_mtl_xml.gne?v=x&photo_id=' + video_id + '&secret=' + secret + '&bitrate=700&target=_self'
+ first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage')
+
+ mobj = re.search(r'<Item id="id">(\d+-\d+)</Item>', first_xml)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract node_id')
+ node_id = mobj.group(1)
+
+ second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'
+ second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage')
+
+ self.report_extraction(video_id)
+
+ mobj = re.search(r'<STREAM APP="(.+?)" FULLPATH="(.+?)"', second_xml)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract video url')
+ video_url = mobj.group(1) + unescapeHTML(mobj.group(2))
+
+ mobj = re.search(r'<meta property="og:title" content=(?:"([^"]+)"|\'([^\']+)\')', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract title')
+ video_title = mobj.group(1) or mobj.group(2)
+
+ mobj = re.search(r'<meta property="og:description" content=(?:"([^"]+)"|\'([^\']+)\')', webpage)
+ if mobj is None:
+ self._downloader.report_warning(u'unable to extract description')
+ video_description = None
+ else:
+ video_description = mobj.group(1) or mobj.group(2)
+
+ mobj = re.search(r'<meta property="og:image" content=(?:"([^"]+)"|\'([^\']+)\')', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract thumbnail')
+ thumbnail = mobj.group(1) or mobj.group(2)
+
+ return [{
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'title': video_title,
+ 'description': video_description,
+ 'thumbnail': thumbnail,
+ 'uploader_id': video_uploader_id,
+ }]
+
+class TeamcocoIE(InfoExtractor):
+ _VALID_URL = r'http://teamcoco\.com/video/(?P<url_title>.*)'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ raise ExtractorError(u'Invalid URL: %s' % url)
+ url_title = mobj.group('url_title')
+ webpage = self._download_webpage(url, url_title)
+
+ mobj = re.search(r'<article class="video" data-id="(\d+?)"', webpage)
+ video_id = mobj.group(1)
+
+ self.report_extraction(video_id)
+
+ mobj = re.search(r'<meta property="og:title" content="(.+?)"', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract title')
+ video_title = mobj.group(1)
+
+ mobj = re.search(r'<meta property="og:image" content="(.+?)"', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract thumbnail')
+ thumbnail = mobj.group(1)
+
+ mobj = re.search(r'<meta property="og:description" content="(.*?)"', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract description')
+ description = mobj.group(1)
+
+ data_url = 'http://teamcoco.com/cvp/2.0/%s.xml' % video_id
+ data = self._download_webpage(data_url, video_id, 'Downloading data webpage')
+ mobj = re.search(r'<file type="high".*?>(.*?)</file>', data)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract video url')
+ video_url = mobj.group(1)
+
+ return [{
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'title': video_title,
+ 'thumbnail': thumbnail,
+ 'description': description,
+ }]
+
+class XHamsterIE(InfoExtractor):
+ """Information Extractor for xHamster"""
+ _VALID_URL = r'(?:http://)?(?:www.)?xhamster\.com/movies/(?P<id>[0-9]+)/.*\.html'
+
+ def _real_extract(self,url):
+ mobj = re.match(self._VALID_URL, url)
+
+ video_id = mobj.group('id')
+ mrss_url='http://xhamster.com/movies/%s/.html' % video_id
+ webpage = self._download_webpage(mrss_url, video_id)
+ mobj = re.search(r'\'srv\': \'(?P<server>[^\']*)\',\s*\'file\': \'(?P<file>[^\']+)\',', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract media URL')
+ if len(mobj.group('server')) == 0:
+ video_url = compat_urllib_parse.unquote(mobj.group('file'))
+ else:
+ video_url = mobj.group('server')+'/key='+mobj.group('file')
+ video_extension = video_url.split('.')[-1]
+
+ mobj = re.search(r'<title>(?P<title>.+?) - xHamster\.com</title>', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract title')
+ video_title = unescapeHTML(mobj.group('title'))
+
+ mobj = re.search(r'<span>Description: </span>(?P<description>[^<]+)', webpage)
+ if mobj is None:
+ video_description = u''
+ else:
+ video_description = unescapeHTML(mobj.group('description'))
+
+ mobj = re.search(r'hint=\'(?P<upload_date_Y>[0-9]{4})-(?P<upload_date_m>[0-9]{2})-(?P<upload_date_d>[0-9]{2}) [0-9]{2}:[0-9]{2}:[0-9]{2} [A-Z]{3,4}\'', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract upload date')
+ video_upload_date = mobj.group('upload_date_Y')+mobj.group('upload_date_m')+mobj.group('upload_date_d')
+
+ mobj = re.search(r'<a href=\'/user/[^>]+>(?P<uploader_id>[^>]+)', webpage)
+ if mobj is None:
+ video_uploader_id = u'anonymous'
+ else:
+ video_uploader_id = mobj.group('uploader_id')
+
+ mobj = re.search(r'\'image\':\'(?P<thumbnail>[^\']+)\'', webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract thumbnail URL')
+ video_thumbnail = mobj.group('thumbnail')
+
+ return [{
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': video_extension,
+ 'title': video_title,
+ 'description': video_description,
+ 'upload_date': video_upload_date,
+ 'uploader_id': video_uploader_id,
+ 'thumbnail': video_thumbnail
+ }]
+
+class HypemIE(InfoExtractor):
+ """Information Extractor for hypem"""
+ _VALID_URL = r'(?:http://)?(?:www\.)?hypem\.com/track/([^/]+)/([^/]+)'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ raise ExtractorError(u'Invalid URL: %s' % url)
+ track_id = mobj.group(1)
+
+ data = { 'ax': 1, 'ts': time.time() }
+ data_encoded = compat_urllib_parse.urlencode(data)
+ complete_url = url + "?" + data_encoded
+ request = compat_urllib_request.Request(complete_url)
+ response, urlh = self._download_webpage_handle(request, track_id, u'Downloading webpage with the url')
+ cookie = urlh.headers.get('Set-Cookie', '')
+
+ self.report_extraction(track_id)
+ mobj = re.search(r'<script type="application/json" id="displayList-data">(.*?)</script>', response, flags=re.MULTILINE|re.DOTALL)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extrack tracks')
+ html_tracks = mobj.group(1).strip()
+ try:
+ track_list = json.loads(html_tracks)
+ track = track_list[u'tracks'][0]
+ except ValueError:
+ raise ExtractorError(u'Hypemachine contained invalid JSON.')
+
+ key = track[u"key"]
+ track_id = track[u"id"]
+ artist = track[u"artist"]
+ title = track[u"song"]
+
+ serve_url = "http://hypem.com/serve/source/%s/%s" % (compat_str(track_id), compat_str(key))
+ request = compat_urllib_request.Request(serve_url, "" , {'Content-Type': 'application/json'})
+ request.add_header('cookie', cookie)
+ song_data_json = self._download_webpage(request, track_id, u'Downloading metadata')
+ try:
+ song_data = json.loads(song_data_json)
+ except ValueError:
+ raise ExtractorError(u'Hypemachine contained invalid JSON.')
+ final_url = song_data[u"url"]
+
+ return [{
+ 'id': track_id,
+ 'url': final_url,
+ 'ext': "mp3",
+ 'title': title,
+ 'artist': artist,
+ }]
+
+class Vbox7IE(InfoExtractor):
+ """Information Extractor for Vbox7"""
+ _VALID_URL = r'(?:http://)?(?:www\.)?vbox7\.com/play:([^/]+)'
+
+ def _real_extract(self,url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ raise ExtractorError(u'Invalid URL: %s' % url)
+ video_id = mobj.group(1)
+
+ redirect_page, urlh = self._download_webpage_handle(url, video_id)
+ redirect_url = urlh.geturl() + re.search(r'window\.location = \'(.*)\';', redirect_page).group(1)
+ webpage = self._download_webpage(redirect_url, video_id, u'Downloading redirect page')
+
+ title = re.search(r'<title>(.*)</title>', webpage)
+ title = (title.group(1)).split('/')[0].strip()
+
+ ext = "flv"
+ info_url = "http://vbox7.com/play/magare.do"
+ data = compat_urllib_parse.urlencode({'as3':'1','vid':video_id})
+ info_request = compat_urllib_request.Request(info_url, data)
+ info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
+ info_response = self._download_webpage(info_request, video_id, u'Downloading info webpage')
+ if info_response is None:
+ raise ExtractorError(u'Unable to extract the media url')
+ (final_url, thumbnail_url) = map(lambda x: x.split('=')[1], info_response.split('&'))
+
+ return [{
+ 'id': video_id,
+ 'url': final_url,
+ 'ext': ext,
+ 'title': title,
+ 'thumbnail': thumbnail_url,
+ }]
+
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
YahooSearchIE(),
DepositFilesIE(),
FacebookIE(),
- BlipTVUserIE(),
BlipTVIE(),
+ BlipTVUserIE(),
VimeoIE(),
MyVideoIE(),
ComedyCentralIE(),
SpiegelIE(),
LiveLeakIE(),
ARDIE(),
+ ZDFIE(),
TumblrIE(),
BandcampIE(),
RedTubeIE(),
InaIE(),
+ HowcastIE(),
+ VineIE(),
+ FlickrIE(),
+ TeamcocoIE(),
+ XHamsterIE(),
+ HypemIE(),
+ Vbox7IE(),
GenericIE()
]