[ustream] Add an alternative approach to extract title (fixes #5128)
[youtube-dl] / youtube_dl / extractor / ustream.py
1 from __future__ import unicode_literals
2
3 import json
4 import re
5
6 from .common import InfoExtractor
7 from ..compat import (
8     compat_urlparse,
9 )
10 from ..utils import ExtractorError
11
12
13 class UstreamIE(InfoExtractor):
14     _VALID_URL = r'https?://www\.ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<videoID>\d+)'
15     IE_NAME = 'ustream'
16     _TEST = {
17         'url': 'http://www.ustream.tv/recorded/20274954',
18         'md5': '088f151799e8f572f84eb62f17d73e5c',
19         'info_dict': {
20             'id': '20274954',
21             'ext': 'flv',
22             'uploader': 'Young Americans for Liberty',
23             'title': 'Young Americans for Liberty February 7, 2012 2:28 AM',
24         },
25     }
26
27     def _real_extract(self, url):
28         m = re.match(self._VALID_URL, url)
29         video_id = m.group('videoID')
30
31         # some sites use this embed format (see: http://github.com/rg3/youtube-dl/issues/2990)
32         if m.group('type') == 'embed/recorded':
33             video_id = m.group('videoID')
34             desktop_url = 'http://www.ustream.tv/recorded/' + video_id
35             return self.url_result(desktop_url, 'Ustream')
36         if m.group('type') == 'embed':
37             video_id = m.group('videoID')
38             webpage = self._download_webpage(url, video_id)
39             desktop_video_id = self._html_search_regex(
40                 r'ContentVideoIds=\["([^"]*?)"\]', webpage, 'desktop_video_id')
41             desktop_url = 'http://www.ustream.tv/recorded/' + desktop_video_id
42             return self.url_result(desktop_url, 'Ustream')
43
44         params = self._download_json(
45             'http://cdngw.ustream.tv/rgwjson/Viewer.getVideo/' + json.dumps({
46                 'brandId': 1,
47                 'videoId': int(video_id),
48                 'autoplay': False,
49             }), video_id)
50
51         if 'error' in params:
52             raise ExtractorError(params['error']['message'], expected=True)
53
54         video_url = params['flv']
55
56         webpage = self._download_webpage(url, video_id)
57
58         self.report_extraction(video_id)
59
60         video_title = self._html_search_regex(r'data-title="(?P<title>.+)"',
61                                               webpage, 'title', default=None)
62
63         if not video_title:
64             try:
65                 video_title = params['moduleConfig']['meta']['title']
66             except KeyError:
67                 pass
68
69         if not video_title:
70             video_title = 'Ustream video ' + video_id
71
72         uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
73                                            webpage, 'uploader', fatal=False, flags=re.DOTALL)
74
75         thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"',
76                                             webpage, 'thumbnail', fatal=False)
77
78         return {
79             'id': video_id,
80             'url': video_url,
81             'ext': 'flv',
82             'title': video_title,
83             'uploader': uploader,
84             'thumbnail': thumbnail,
85         }
86
87
88 class UstreamChannelIE(InfoExtractor):
89     _VALID_URL = r'https?://www\.ustream\.tv/channel/(?P<slug>.+)'
90     IE_NAME = 'ustream:channel'
91     _TEST = {
92         'url': 'http://www.ustream.tv/channel/channeljapan',
93         'info_dict': {
94             'id': '10874166',
95         },
96         'playlist_mincount': 17,
97     }
98
99     def _real_extract(self, url):
100         m = re.match(self._VALID_URL, url)
101         display_id = m.group('slug')
102         webpage = self._download_webpage(url, display_id)
103         channel_id = self._html_search_meta('ustream:channel_id', webpage)
104
105         BASE = 'http://www.ustream.tv'
106         next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id
107         video_ids = []
108         while next_url:
109             reply = self._download_json(
110                 compat_urlparse.urljoin(BASE, next_url), display_id,
111                 note='Downloading video information (next: %d)' % (len(video_ids) + 1))
112             video_ids.extend(re.findall(r'data-content-id="(\d.*)"', reply['data']))
113             next_url = reply['nextUrl']
114
115         entries = [
116             self.url_result('http://www.ustream.tv/recorded/' + vid, 'Ustream')
117             for vid in video_ids]
118         return {
119             '_type': 'playlist',
120             'id': channel_id,
121             'display_id': display_id,
122             'entries': entries,
123         }