[youtube] Skip unsupported adaptive stream type (#18804)
[youtube-dl] / youtube_dl / extractor / revision3.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..compat import compat_str
8 from ..utils import (
9     int_or_none,
10     parse_iso8601,
11     unescapeHTML,
12     qualities,
13 )
14
15
16 class Revision3EmbedIE(InfoExtractor):
17     IE_NAME = 'revision3:embed'
18     _VALID_URL = r'(?:revision3:(?:(?P<playlist_type>[^:]+):)?|https?://(?:(?:(?:www|embed)\.)?(?:revision3|animalist)|(?:(?:api|embed)\.)?seekernetwork)\.com/player/embed\?videoId=)(?P<playlist_id>\d+)'
19     _TEST = {
20         'url': 'http://api.seekernetwork.com/player/embed?videoId=67558',
21         'md5': '83bcd157cab89ad7318dd7b8c9cf1306',
22         'info_dict': {
23             'id': '67558',
24             'ext': 'mp4',
25             'title': 'The Pros & Cons Of Zoos',
26             'description': 'Zoos are often depicted as a terrible place for animals to live, but is there any truth to this?',
27             'uploader_id': 'dnews',
28             'uploader': 'DNews',
29         }
30     }
31     _API_KEY = 'ba9c741bce1b9d8e3defcc22193f3651b8867e62'
32
33     def _real_extract(self, url):
34         mobj = re.match(self._VALID_URL, url)
35         playlist_id = mobj.group('playlist_id')
36         playlist_type = mobj.group('playlist_type') or 'video_id'
37         video_data = self._download_json(
38             'http://revision3.com/api/getPlaylist.json', playlist_id, query={
39                 'api_key': self._API_KEY,
40                 'codecs': 'h264,vp8,theora',
41                 playlist_type: playlist_id,
42             })['items'][0]
43
44         formats = []
45         for vcodec, media in video_data['media'].items():
46             for quality_id, quality in media.items():
47                 if quality_id == 'hls':
48                     formats.extend(self._extract_m3u8_formats(
49                         quality['url'], playlist_id, 'mp4',
50                         'm3u8_native', m3u8_id='hls', fatal=False))
51                 else:
52                     formats.append({
53                         'url': quality['url'],
54                         'format_id': '%s-%s' % (vcodec, quality_id),
55                         'tbr': int_or_none(quality.get('bitrate')),
56                         'vcodec': vcodec,
57                     })
58         self._sort_formats(formats)
59
60         return {
61             'id': playlist_id,
62             'title': unescapeHTML(video_data['title']),
63             'description': unescapeHTML(video_data.get('summary')),
64             'uploader': video_data.get('show', {}).get('name'),
65             'uploader_id': video_data.get('show', {}).get('slug'),
66             'duration': int_or_none(video_data.get('duration')),
67             'formats': formats,
68         }
69
70
71 class Revision3IE(InfoExtractor):
72     IE_NAME = 'revision'
73     _VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:revision3|animalist)\.com)/(?P<id>[^/]+(?:/[^/?#]+)?)'
74     _TESTS = [{
75         'url': 'http://www.revision3.com/technobuffalo/5-google-predictions-for-2016',
76         'md5': 'd94a72d85d0a829766de4deb8daaf7df',
77         'info_dict': {
78             'id': '71089',
79             'display_id': 'technobuffalo/5-google-predictions-for-2016',
80             'ext': 'webm',
81             'title': '5 Google Predictions for 2016',
82             'description': 'Google had a great 2015, but it\'s already time to look ahead. Here are our five predictions for 2016.',
83             'upload_date': '20151228',
84             'timestamp': 1451325600,
85             'duration': 187,
86             'uploader': 'TechnoBuffalo',
87             'uploader_id': 'technobuffalo',
88         }
89     }, {
90         # Show
91         'url': 'http://revision3.com/variant',
92         'only_matching': True,
93     }, {
94         # Tag
95         'url': 'http://revision3.com/vr',
96         'only_matching': True,
97     }]
98     _PAGE_DATA_TEMPLATE = 'http://www.%s/apiProxy/ddn/%s?domain=%s'
99
100     def _real_extract(self, url):
101         domain, display_id = re.match(self._VALID_URL, url).groups()
102         site = domain.split('.')[0]
103         page_info = self._download_json(
104             self._PAGE_DATA_TEMPLATE % (domain, display_id, domain), display_id)
105
106         page_data = page_info['data']
107         page_type = page_data['type']
108         if page_type in ('episode', 'embed'):
109             show_data = page_data['show']['data']
110             page_id = compat_str(page_data['id'])
111             video_id = compat_str(page_data['video']['data']['id'])
112
113             preference = qualities(['mini', 'small', 'medium', 'large'])
114             thumbnails = [{
115                 'url': image_url,
116                 'id': image_id,
117                 'preference': preference(image_id)
118             } for image_id, image_url in page_data.get('images', {}).items()]
119
120             info = {
121                 'id': page_id,
122                 'display_id': display_id,
123                 'title': unescapeHTML(page_data['name']),
124                 'description': unescapeHTML(page_data.get('summary')),
125                 'timestamp': parse_iso8601(page_data.get('publishTime'), ' '),
126                 'author': page_data.get('author'),
127                 'uploader': show_data.get('name'),
128                 'uploader_id': show_data.get('slug'),
129                 'thumbnails': thumbnails,
130                 'extractor_key': site,
131             }
132
133             if page_type == 'embed':
134                 info.update({
135                     '_type': 'url_transparent',
136                     'url': page_data['video']['data']['embed'],
137                 })
138                 return info
139
140             info.update({
141                 '_type': 'url_transparent',
142                 'url': 'revision3:%s' % video_id,
143             })
144             return info
145         else:
146             list_data = page_info[page_type]['data']
147             episodes_data = page_info['episodes']['data']
148             num_episodes = page_info['meta']['totalEpisodes']
149             processed_episodes = 0
150             entries = []
151             page_num = 1
152             while True:
153                 entries.extend([{
154                     '_type': 'url',
155                     'url': 'http://%s%s' % (domain, episode['path']),
156                     'id': compat_str(episode['id']),
157                     'ie_key': 'Revision3',
158                     'extractor_key': site,
159                 } for episode in episodes_data])
160                 processed_episodes += len(episodes_data)
161                 if processed_episodes == num_episodes:
162                     break
163                 page_num += 1
164                 episodes_data = self._download_json(self._PAGE_DATA_TEMPLATE % (
165                     domain, display_id + '/' + compat_str(page_num), domain),
166                     display_id)['episodes']['data']
167
168             return self.playlist_result(
169                 entries, compat_str(list_data['id']),
170                 list_data.get('name'), list_data.get('summary'))