+ """Information extractor for MTV.com"""
+
+ _VALID_URL = r'^(?P<proto>https?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P<videoid>[0-9]+)/[^/]+$'
+ IE_NAME = u'mtv'
+
+ def report_extraction(self, video_id):
+ """Report information extraction."""
+ self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+ if not mobj.group('proto'):
+ url = 'http://' + url
+ video_id = mobj.group('videoid')
+
+ webpage = self._download_webpage(url, video_id)
+
+ mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: unable to extract song name')
+ return
+ song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
+ mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: unable to extract performer')
+ return
+ performer = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
+ video_title = performer + ' - ' + song_name
+
+ mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: unable to mtvn_uri')
+ return
+ mtvn_uri = mobj.group(1)
+
+ mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: unable to extract content id')
+ return
+ content_id = mobj.group(1)
+
+ videogen_url = 'http://www.mtv.com/player/includes/mediaGen.jhtml?uri=' + mtvn_uri + '&id=' + content_id + '&vid=' + video_id + '&ref=www.mtvn.com&viewUri=' + mtvn_uri
+ self.report_extraction(video_id)
+ request = compat_urllib_request.Request(videogen_url)
+ try:
+ metadataXml = compat_urllib_request.urlopen(request).read()
+ except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+ self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err))
+ return
+
+ mdoc = xml.etree.ElementTree.fromstring(metadataXml)
+ renditions = mdoc.findall('.//rendition')
+
+ # For now, always pick the highest quality.
+ rendition = renditions[-1]
+
+ try:
+ _,_,ext = rendition.attrib['type'].partition('/')
+ format = ext + '-' + rendition.attrib['width'] + 'x' + rendition.attrib['height'] + '_' + rendition.attrib['bitrate']
+ video_url = rendition.find('./src').text
+ except KeyError:
+ self._downloader.trouble('Invalid rendition field.')
+ return
+
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'uploader': performer,
+ 'upload_date': None,
+ 'title': video_title,
+ 'ext': ext,
+ 'format': format,
+ }
+
+ return [info]
+
+
+class YoukuIE(InfoExtractor):
+ _VALID_URL = r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html'
+
+ def report_download_webpage(self, file_id):
+ """Report webpage download."""
+ self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, file_id))
+
+ def report_extraction(self, file_id):
+ """Report information extraction."""
+ self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
+
+ def _gen_sid(self):
+ nowTime = int(time.time() * 1000)
+ random1 = random.randint(1000,1998)
+ random2 = random.randint(1000,9999)
+
+ return "%d%d%d" %(nowTime,random1,random2)
+
+ def _get_file_ID_mix_string(self, seed):
+ mixed = []
+ source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
+ seed = float(seed)
+ for i in range(len(source)):
+ seed = (seed * 211 + 30031 ) % 65536
+ index = math.floor(seed / 65536 * len(source) )
+ mixed.append(source[int(index)])
+ source.remove(source[int(index)])
+ #return ''.join(mixed)
+ return mixed
+
+ def _get_file_id(self, fileId, seed):
+ mixed = self._get_file_ID_mix_string(seed)
+ ids = fileId.split('*')
+ realId = []
+ for ch in ids:
+ if ch:
+ realId.append(mixed[int(ch)])
+ return ''.join(realId)
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+ video_id = mobj.group('ID')
+
+ info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
+
+ request = compat_urllib_request.Request(info_url, None, std_headers)
+ try:
+ self.report_download_webpage(video_id)
+ jsondata = compat_urllib_request.urlopen(request).read()
+ except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+ self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+ return
+
+ self.report_extraction(video_id)
+ try:
+ jsonstr = jsondata.decode('utf-8')
+ config = json.loads(jsonstr)
+
+ video_title = config['data'][0]['title']
+ seed = config['data'][0]['seed']
+
+ format = self._downloader.params.get('format', None)
+ supported_format = list(config['data'][0]['streamfileids'].keys())
+
+ if format is None or format == 'best':
+ if 'hd2' in supported_format:
+ format = 'hd2'
+ else:
+ format = 'flv'
+ ext = u'flv'
+ elif format == 'worst':
+ format = 'mp4'
+ ext = u'mp4'
+ else:
+ format = 'flv'
+ ext = u'flv'
+
+
+ fileid = config['data'][0]['streamfileids'][format]
+ keys = [s['k'] for s in config['data'][0]['segs'][format]]
+ except (UnicodeDecodeError, ValueError, KeyError):
+ self._downloader.trouble(u'ERROR: unable to extract info section')
+ return
+
+ files_info=[]
+ sid = self._gen_sid()
+ fileid = self._get_file_id(fileid, seed)
+
+ #column 8,9 of fileid represent the segment number
+ #fileid[7:9] should be changed
+ for index, key in enumerate(keys):
+
+ temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
+ download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
+
+ info = {
+ 'id': '%s_part%02d' % (video_id, index),
+ 'url': download_url,
+ 'uploader': None,
+ 'upload_date': None,
+ 'title': video_title,
+ 'ext': ext,
+ }
+ files_info.append(info)
+
+ return files_info
+
+
+class XNXXIE(InfoExtractor):
+ """Information extractor for xnxx.com"""
+
+ _VALID_URL = r'^http://video\.xnxx\.com/video([0-9]+)/(.*)'
+ IE_NAME = u'xnxx'
+ VIDEO_URL_RE = r'flv_url=(.*?)&'
+ VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM'
+ VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&'
+
+ def report_webpage(self, video_id):
+ """Report information extraction"""
+ self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
+
+ def report_extraction(self, video_id):
+ """Report information extraction"""
+ self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+ video_id = mobj.group(1)
+
+ self.report_webpage(video_id)
+
+ # Get webpage content
+ try:
+ webpage_bytes = compat_urllib_request.urlopen(url).read()
+ webpage = webpage_bytes.decode('utf-8')
+ except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+ self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
+ return
+
+ result = re.search(self.VIDEO_URL_RE, webpage)
+ if result is None:
+ self._downloader.trouble(u'ERROR: unable to extract video url')
+ return
+ video_url = compat_urllib_parse.unquote(result.group(1))
+
+ result = re.search(self.VIDEO_TITLE_RE, webpage)
+ if result is None:
+ self._downloader.trouble(u'ERROR: unable to extract video title')
+ return
+ video_title = result.group(1)
+
+ result = re.search(self.VIDEO_THUMB_RE, webpage)
+ if result is None:
+ self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+ return
+ video_thumbnail = result.group(1)
+
+ return [{
+ 'id': video_id,
+ 'url': video_url,
+ 'uploader': None,
+ 'upload_date': None,
+ 'title': video_title,
+ 'ext': 'flv',
+ 'thumbnail': video_thumbnail,
+ 'description': None,
+ }]
+
+
+class GooglePlusIE(InfoExtractor):
+ """Information extractor for plus.google.com."""
+
+ _VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)'
+ IE_NAME = u'plus.google'
+
+ def __init__(self, downloader=None):
+ InfoExtractor.__init__(self, downloader)
+
+ def report_extract_entry(self, url):
+ """Report downloading extry"""
+ self._downloader.to_screen(u'[plus.google] Downloading entry: %s' % url)
+
+ def report_date(self, upload_date):
+ """Report downloading extry"""
+ self._downloader.to_screen(u'[plus.google] Entry date: %s' % upload_date)
+
+ def report_uploader(self, uploader):
+ """Report downloading extry"""
+ self._downloader.to_screen(u'[plus.google] Uploader: %s' % uploader)
+
+ def report_title(self, video_title):
+ """Report downloading extry"""
+ self._downloader.to_screen(u'[plus.google] Title: %s' % video_title)
+
+ def report_extract_vid_page(self, video_page):
+ """Report information extraction."""
+ self._downloader.to_screen(u'[plus.google] Extracting video page: %s' % video_page)
+
+ def _real_extract(self, url):
+ # Extract id from URL
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+ return
+
+ post_url = mobj.group(0)
+ video_id = mobj.group(1)
+
+ video_extension = 'flv'
+
+ # Step 1, Retrieve post webpage to extract further information
+ self.report_extract_entry(post_url)
+ request = compat_urllib_request.Request(post_url)
+ try:
+ webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
+ except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+ self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err))
+ return
+
+ # Extract update date
+ upload_date = None
+ pattern = 'title="Timestamp">(.*?)</a>'
+ mobj = re.search(pattern, webpage)
+ if mobj:
+ upload_date = mobj.group(1)
+ # Convert timestring to a format suitable for filename
+ upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
+ upload_date = upload_date.strftime('%Y%m%d')
+ self.report_date(upload_date)
+
+ # Extract uploader
+ uploader = None
+ pattern = r'rel\="author".*?>(.*?)</a>'
+ mobj = re.search(pattern, webpage)
+ if mobj:
+ uploader = mobj.group(1)
+ self.report_uploader(uploader)
+
+ # Extract title
+ # Get the first line for title
+ video_title = u'NA'
+ pattern = r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]'
+ mobj = re.search(pattern, webpage)
+ if mobj:
+ video_title = mobj.group(1)
+ self.report_title(video_title)
+
+ # Step 2, Stimulate clicking the image box to launch video
+ pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]'
+ mobj = re.search(pattern, webpage)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: unable to extract video page URL')
+
+ video_page = mobj.group(1)
+ request = compat_urllib_request.Request(video_page)
+ try:
+ webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
+ except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+ self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+ return
+ self.report_extract_vid_page(video_page)
+
+
+ # Extract video links on video page
+ """Extract video links of all sizes"""
+ pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
+ mobj = re.findall(pattern, webpage)
+ if len(mobj) == 0:
+ self._downloader.trouble(u'ERROR: unable to extract video links')
+
+ # Sort in resolution
+ links = sorted(mobj)
+
+ # Choose the lowest of the sort, i.e. highest resolution
+ video_url = links[-1]
+ # Only get the url. The resolution part in the tuple has no use anymore
+ video_url = video_url[-1]
+ # Treat escaped \u0026 style hex
+ try:
+ video_url = video_url.decode("unicode_escape")
+ except AttributeError: # Python 3
+ video_url = bytes(video_url, 'ascii').decode('unicode-escape')
+
+
+ return [{
+ 'id': video_id,
+ 'url': video_url,
+ 'uploader': uploader,
+ 'upload_date': upload_date,
+ 'title': video_title,
+ 'ext': video_extension,
+ }]
+
+class NBAIE(InfoExtractor):
+ _VALID_URL = r'^(?:https?://)?(?:watch\.|www\.)?nba\.com/(?:nba/)?video(/[^?]*)(\?.*)?$'
+ IE_NAME = u'nba'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+
+ video_id = mobj.group(1)
+ if video_id.endswith('/index.html'):
+ video_id = video_id[:-len('/index.html')]
+
+ webpage = self._download_webpage(url, video_id)
+
+ video_url = u'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4'
+ def _findProp(rexp, default=None):
+ m = re.search(rexp, webpage)
+ if m:
+ return unescapeHTML(m.group(1))
+ else:
+ return default
+
+ shortened_video_id = video_id.rpartition('/')[2]
+ title = _findProp(r'<meta property="og:title" content="(.*?)"', shortened_video_id).replace('NBA.com: ', '')
+ info = {
+ 'id': shortened_video_id,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'title': title,
+ 'uploader_date': _findProp(r'<b>Date:</b> (.*?)</div>'),
+ 'description': _findProp(r'<div class="description">(.*?)</h1>'),
+ }
+ return [info]
+
+class JustinTVIE(InfoExtractor):
+ """Information extractor for justin.tv and twitch.tv"""
+ # TODO: One broadcast may be split into multiple videos. The key
+ # 'broadcast_id' is the same for all parts, and 'broadcast_part'
+ # starts at 1 and increases. Can we treat all parts as one video?
+
+ _VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?(?:twitch|justin)\.tv/
+ ([^/]+)(?:/b/([^/]+))?/?(?:\#.*)?$"""
+ _JUSTIN_PAGE_LIMIT = 100
+ IE_NAME = u'justin.tv'
+
+ def report_extraction(self, file_id):
+ """Report information extraction."""
+ self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
+
+ def report_download_page(self, channel, offset):
+ """Report attempt to download a single page of videos."""
+ self._downloader.to_screen(u'[%s] %s: Downloading video information from %d to %d' %
+ (self.IE_NAME, channel, offset, offset + self._JUSTIN_PAGE_LIMIT))
+
+ # Return count of items, list of *valid* items
+ def _parse_page(self, url):
+ try:
+ urlh = compat_urllib_request.urlopen(url)
+ webpage_bytes = urlh.read()
+ webpage = webpage_bytes.decode('utf-8', 'ignore')
+ except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+ self._downloader.trouble(u'ERROR: unable to download video info JSON: %s' % compat_str(err))
+ return
+
+ response = json.loads(webpage)
+ info = []
+ for clip in response:
+ video_url = clip['video_file_url']
+ if video_url:
+ video_extension = os.path.splitext(video_url)[1][1:]
+ video_date = re.sub('-', '', clip['created_on'][:10])
+ info.append({
+ 'id': clip['id'],
+ 'url': video_url,
+ 'title': clip['title'],
+ 'uploader': clip.get('user_id', clip.get('channel_id')),
+ 'upload_date': video_date,
+ 'ext': video_extension,
+ })
+ return (len(response), info)
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+
+ api = 'http://api.justin.tv'
+ video_id = mobj.group(mobj.lastindex)
+ paged = False
+ if mobj.lastindex == 1:
+ paged = True
+ api += '/channel/archives/%s.json'
+ else:
+ api += '/clip/show/%s.json'
+ api = api % (video_id,)
+
+ self.report_extraction(video_id)
+
+ info = []
+ offset = 0
+ limit = self._JUSTIN_PAGE_LIMIT
+ while True:
+ if paged:
+ self.report_download_page(video_id, offset)
+ page_url = api + ('?offset=%d&limit=%d' % (offset, limit))
+ page_count, page_info = self._parse_page(page_url)
+ info.extend(page_info)
+ if not paged or page_count != limit:
+ break
+ offset += limit
+ return info
+
+class FunnyOrDieIE(InfoExtractor):
+ _VALID_URL = r'^(?:https?://)?(?:www\.)?funnyordie\.com/videos/(?P<id>[0-9a-f]+)/.*$'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+
+ video_id = mobj.group('id')
+ webpage = self._download_webpage(url, video_id)
+
+ m = re.search(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"', webpage, re.DOTALL)
+ if not m:
+ self._downloader.trouble(u'ERROR: unable to find video information')
+ video_url = unescapeHTML(m.group('url'))
+
+ m = re.search(r"class='player_page_h1'>\s+<a.*?>(?P<title>.*?)</a>", webpage)
+ if not m:
+ self._downloader.trouble(u'Cannot find video title')
+ title = unescapeHTML(m.group('title'))
+
+ m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
+ if m:
+ desc = unescapeHTML(m.group('desc'))
+ else:
+ desc = None
+
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'title': title,
+ 'description': desc,
+ }
+ return [info]
+
+class TweetReelIE(InfoExtractor):
+ _VALID_URL = r'^(?:https?://)?(?:www\.)?tweetreel\.com/[?](?P<id>[0-9a-z]+)$'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+
+ video_id = mobj.group('id')
+ webpage = self._download_webpage(url, video_id)
+
+ m = re.search(r'<div id="left" status_id="([0-9]+)">', webpage)
+ if not m:
+ self._downloader.trouble(u'ERROR: Cannot find status ID')
+ status_id = m.group(1)
+
+ m = re.search(r'<div class="tweet_text">(.*?)</div>', webpage, flags=re.DOTALL)
+ if not m:
+ self._downloader.trouble(u'WARNING: Cannot find description')
+ desc = unescapeHTML(re.sub('<a.*?</a>', '', m.group(1))).strip()
+
+ m = re.search(r'<div class="tweet_info">.*?from <a target="_blank" href="https?://twitter.com/(?P<uploader_id>.+?)">(?P<uploader>.+?)</a>', webpage, flags=re.DOTALL)
+ if not m:
+ self._downloader.trouble(u'ERROR: Cannot find uploader')
+ uploader = unescapeHTML(m.group('uploader'))
+ uploader_id = unescapeHTML(m.group('uploader_id'))
+
+ m = re.search(r'<span unixtime="([0-9]+)"', webpage)
+ if not m:
+ self._downloader.trouble(u'ERROR: Cannot find upload date')
+ upload_date = datetime.datetime.fromtimestamp(int(m.group(1))).strftime('%Y%m%d')
+
+ title = desc
+ video_url = 'http://files.tweetreel.com/video/' + status_id + '.mov'
+
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mov',
+ 'title': title,
+ 'description': desc,
+ 'uploader': uploader,
+ 'uploader_id': uploader_id,
+ 'internal_id': status_id,
+ 'upload_date': upload_date
+ }
+ return [info]
+
+class SteamIE(InfoExtractor):
+ _VALID_URL = r"""http://store.steampowered.com/
+ (?P<urltype>video|app)/ #If the page is only for videos or for a game
+ (?P<gameID>\d+)/?
+ (?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID
+ """
+
+ def suitable(self, url):
+ """Receives a URL and returns True if suitable for this IE."""
+ return re.match(self._VALID_URL, url, re.VERBOSE) is not None
+
+ def _real_extract(self, url):
+ m = re.match(self._VALID_URL, url, re.VERBOSE)
+ urlRE = r"'movie_(?P<videoID>\d+)': \{\s*FILENAME: \"(?P<videoURL>[\w:/\.\?=]+)\"(,\s*MOVIE_NAME: \"(?P<videoName>[\w:/\.\?=\+-]+)\")?\s*\},"
+ gameID = m.group('gameID')
+ videourl = 'http://store.steampowered.com/video/%s/' % gameID
+ webpage = self._download_webpage(videourl, gameID)
+ mweb = re.finditer(urlRE, webpage)
+ namesRE = r'<span class="title">(?P<videoName>.+?)</span>'
+ titles = re.finditer(namesRE, webpage)
+ videos = []
+ for vid,vtitle in zip(mweb,titles):
+ video_id = vid.group('videoID')
+ title = vtitle.group('videoName')
+ video_url = vid.group('videoURL')
+ if not video_url:
+ self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id)
+ info = {
+ 'id':video_id,
+ 'url':video_url,
+ 'ext': 'flv',
+ 'title': unescapeHTML(title)
+ }
+ videos.append(info)
+ return videos
+
+class UstreamIE(InfoExtractor):
+ _VALID_URL = r'http://www.ustream.tv/recorded/(?P<videoID>\d+)'
+ IE_NAME = u'ustream'
+
+ def _real_extract(self, url):
+ m = re.match(self._VALID_URL, url)
+ video_id = m.group('videoID')
+ video_url = u'http://tcdn.ustream.tv/video/%s' % video_id
+ webpage = self._download_webpage(url, video_id)
+ m = re.search(r'data-title="(?P<title>.+)"',webpage)
+ title = m.group('title')
+ m = re.search(r'<a class="state" data-content-type="channel" data-content-id="(?P<uploader>\d+)"',webpage)
+ uploader = m.group('uploader')
+ info = {
+ 'id':video_id,
+ 'url':video_url,
+ 'ext': 'flv',
+ 'title': title,
+ 'uploader': uploader
+ }
+ return [info]
+
+
+def gen_extractors():
+ """ Return a list of an instance of every supported extractor.
+ The order does matter; the first extractor matched is the one handling the URL.
+ """
+ return [
+ YoutubePlaylistIE(),
+ YoutubeChannelIE(),
+ YoutubeUserIE(),
+ YoutubeSearchIE(),
+ YoutubeIE(),
+ MetacafeIE(),
+ DailymotionIE(),
+ GoogleSearchIE(),
+ PhotobucketIE(),
+ YahooIE(),
+ YahooSearchIE(),
+ DepositFilesIE(),
+ FacebookIE(),
+ BlipTVUserIE(),
+ BlipTVIE(),
+ VimeoIE(),
+ MyVideoIE(),
+ ComedyCentralIE(),
+ EscapistIE(),
+ CollegeHumorIE(),
+ XVideosIE(),
+ SoundcloudIE(),
+ InfoQIE(),
+ MixcloudIE(),
+ StanfordOpenClassroomIE(),
+ MTVIE(),
+ YoukuIE(),
+ XNXXIE(),
+ GooglePlusIE(),
+ ArteTvIE(),
+ NBAIE(),
+ JustinTVIE(),
+ FunnyOrDieIE(),
+ TweetReelIE(),
+ SteamIE(),
+ UstreamIE(),
+ GenericIE()
+ ]
+
+