X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=youtube_dl%2Fextractor%2Fustream.py;h=68d03b99905cce848eb38fde8b6d8e643c548105;hb=ce08a86462984ff82368c1acd7285e9625855191;hp=d1d2af19b8870353e66cbf227727e54e5bafb0e7;hpb=5c38625259cf50dd7432377aac4daee3aede8f25;p=youtube-dl diff --git a/youtube_dl/extractor/ustream.py b/youtube_dl/extractor/ustream.py index d1d2af19b..68d03b999 100644 --- a/youtube_dl/extractor/ustream.py +++ b/youtube_dl/extractor/ustream.py @@ -1,62 +1,57 @@ from __future__ import unicode_literals -import json import re from .common import InfoExtractor -from ..utils import ( +from ..compat import ( compat_urlparse, - get_meta_content, ) class UstreamIE(InfoExtractor): - _VALID_URL = r'https?://www\.ustream\.tv/(?Precorded|embed)/(?P\d+)' + _VALID_URL = r'https?://www\.ustream\.tv/(?Precorded|embed|embed/recorded)/(?P\d+)' IE_NAME = 'ustream' - _TESTS = [{ + _TEST = { 'url': 'http://www.ustream.tv/recorded/20274954', - 'file': '20274954.flv', 'md5': '088f151799e8f572f84eb62f17d73e5c', 'info_dict': { - "uploader": "Young Americans for Liberty", - "title": "Young Americans for Liberty February 7, 2012 2:28 AM", + 'id': '20274954', + 'ext': 'flv', + 'uploader': 'Young Americans for Liberty', + 'title': 'Young Americans for Liberty February 7, 2012 2:28 AM', }, - }, - { - 'url': 'http://www.ustream.tv/embed/17357891', - 'file': 'NSA and Privacy Forum Debate featuring General Hayden and Barton Gellman-45734260.flv', - 'md5': '27b99cdb639c9b12a79bca876a073417', - 'info_dict': { - "uploader": "AU SPA: The NSA and Privacy", - "title": "NSA and Privacy Forum Debate featuring General Hayden and Barton Gellman" - }, } - ] def _real_extract(self, url): m = re.match(self._VALID_URL, url) + video_id = m.group('videoID') + + # some sites use this embed format (see: http://github.com/rg3/youtube-dl/issues/2990) + if m.group('type') == 'embed/recorded': + video_id = m.group('videoID') + desktop_url = 'http://www.ustream.tv/recorded/' + video_id + return self.url_result(desktop_url, 'Ustream') if m.group('type') == 'embed': video_id = m.group('videoID') webpage = self._download_webpage(url, video_id) - desktop_video_id = self._html_search_regex(r'ContentVideoIds=\["([^"]*?)"\]', webpage, 'desktop_video_id') + desktop_video_id = self._html_search_regex( + r'ContentVideoIds=\["([^"]*?)"\]', webpage, 'desktop_video_id') desktop_url = 'http://www.ustream.tv/recorded/' + desktop_video_id return self.url_result(desktop_url, 'Ustream') - video_id = m.group('videoID') - video_url = 'http://tcdn.ustream.tv/video/%s' % video_id webpage = self._download_webpage(url, video_id) self.report_extraction(video_id) video_title = self._html_search_regex(r'data-title="(?P.+)"', - webpage, 'title') + webpage, 'title') uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>', - webpage, 'uploader', fatal=False, flags=re.DOTALL) + webpage, 'uploader', fatal=False, flags=re.DOTALL) thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"', - webpage, 'thumbnail', fatal=False) + webpage, 'thumbnail', fatal=False) return { 'id': video_id, @@ -71,21 +66,36 @@ class UstreamIE(InfoExtractor): class UstreamChannelIE(InfoExtractor): _VALID_URL = r'https?://www\.ustream\.tv/channel/(?P<slug>.+)' IE_NAME = 'ustream:channel' + _TEST = { + 'url': 'http://www.ustream.tv/channel/channeljapan', + 'info_dict': { + 'id': '10874166', + }, + 'playlist_mincount': 17, + } def _real_extract(self, url): m = re.match(self._VALID_URL, url) - slug = m.group('slug') - webpage = self._download_webpage(url, slug) - channel_id = get_meta_content('ustream:channel_id', webpage) + display_id = m.group('slug') + webpage = self._download_webpage(url, display_id) + channel_id = self._html_search_meta('ustream:channel_id', webpage) BASE = 'http://www.ustream.tv' next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id video_ids = [] while next_url: - reply = json.loads(self._download_webpage(compat_urlparse.urljoin(BASE, next_url), channel_id)) + reply = self._download_json( + compat_urlparse.urljoin(BASE, next_url), display_id, + note='Downloading video information (next: %d)' % (len(video_ids) + 1)) video_ids.extend(re.findall(r'data-content-id="(\d.*)"', reply['data'])) next_url = reply['nextUrl'] - urls = ['http://www.ustream.tv/recorded/' + vid for vid in video_ids] - url_entries = [self.url_result(eurl, 'Ustream') for eurl in urls] - return self.playlist_result(url_entries, channel_id) + entries = [ + self.url_result('http://www.ustream.tv/recorded/' + vid, 'Ustream') + for vid in video_ids] + return { + '_type': 'playlist', + 'id': channel_id, + 'display_id': display_id, + 'entries': entries, + }