[WDR] fixed parsing of playlists
[youtube-dl] / youtube_dl / extractor / revision3.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..compat import compat_str
8 from ..utils import (
9     int_or_none,
10     parse_iso8601,
11     unescapeHTML,
12     qualities,
13 )
14
15
16 class Revision3IE(InfoExtractor):
17     _VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:revision3|testtube|animalist)\.com)/(?P<id>[^/]+(?:/[^/?#]+)?)'
18     _TESTS = [{
19         'url': 'http://www.revision3.com/technobuffalo/5-google-predictions-for-2016',
20         'md5': 'd94a72d85d0a829766de4deb8daaf7df',
21         'info_dict': {
22             'id': '71089',
23             'display_id': 'technobuffalo/5-google-predictions-for-2016',
24             'ext': 'webm',
25             'title': '5 Google Predictions for 2016',
26             'description': 'Google had a great 2015, but it\'s already time to look ahead. Here are our five predictions for 2016.',
27             'upload_date': '20151228',
28             'timestamp': 1451325600,
29             'duration': 187,
30             'uploader': 'TechnoBuffalo',
31             'uploader_id': 'technobuffalo',
32         }
33     }, {
34         # Show
35         'url': 'http://testtube.com/brainstuff',
36         'info_dict': {
37             'id': '251',
38             'title': 'BrainStuff',
39             'description': 'Whether the topic is popcorn or particle physics, you can count on the HowStuffWorks team to explore-and explain-the everyday science in the world around us on BrainStuff.',
40         },
41         'playlist_mincount': 93,
42     }, {
43         'url': 'https://testtube.com/dnews/5-weird-ways-plants-can-eat-animals?utm_source=FB&utm_medium=DNews&utm_campaign=DNewsSocial',
44         'info_dict': {
45             'id': '58227',
46             'display_id': 'dnews/5-weird-ways-plants-can-eat-animals',
47             'duration': 275,
48             'ext': 'webm',
49             'title': '5 Weird Ways Plants Can Eat Animals',
50             'description': 'Why have some plants evolved to eat meat?',
51             'upload_date': '20150120',
52             'timestamp': 1421763300,
53             'uploader': 'DNews',
54             'uploader_id': 'dnews',
55         },
56     }, {
57         'url': 'http://testtube.com/tt-editors-picks/the-israel-palestine-conflict-explained-in-ten-min',
58         'info_dict': {
59             'id': '71618',
60             'ext': 'mp4',
61             'display_id': 'tt-editors-picks/the-israel-palestine-conflict-explained-in-ten-min',
62             'title': 'The Israel-Palestine Conflict Explained in Ten Minutes',
63             'description': 'If you\'d like to learn about the struggle between Israelis and Palestinians, this video is a great place to start',
64             'uploader': 'Editors\' Picks',
65             'uploader_id': 'tt-editors-picks',
66             'timestamp': 1453309200,
67             'upload_date': '20160120',
68         },
69         'add_ie': ['Youtube'],
70     }, {
71         # Tag
72         'url': 'http://testtube.com/tech-news',
73         'info_dict': {
74             'id': '21018',
75             'title': 'tech news',
76         },
77         'playlist_mincount': 9,
78     }]
79     _PAGE_DATA_TEMPLATE = 'http://www.%s/apiProxy/ddn/%s?domain=%s'
80     _API_KEY = 'ba9c741bce1b9d8e3defcc22193f3651b8867e62'
81
82     def _real_extract(self, url):
83         domain, display_id = re.match(self._VALID_URL, url).groups()
84         site = domain.split('.')[0]
85         page_info = self._download_json(
86             self._PAGE_DATA_TEMPLATE % (domain, display_id, domain), display_id)
87
88         page_data = page_info['data']
89         page_type = page_data['type']
90         if page_type in ('episode', 'embed'):
91             show_data = page_data['show']['data']
92             page_id = compat_str(page_data['id'])
93             video_id = compat_str(page_data['video']['data']['id'])
94
95             preference = qualities(['mini', 'small', 'medium', 'large'])
96             thumbnails = [{
97                 'url': image_url,
98                 'id': image_id,
99                 'preference': preference(image_id)
100             } for image_id, image_url in page_data.get('images', {}).items()]
101
102             info = {
103                 'id': page_id,
104                 'display_id': display_id,
105                 'title': unescapeHTML(page_data['name']),
106                 'description': unescapeHTML(page_data.get('summary')),
107                 'timestamp': parse_iso8601(page_data.get('publishTime'), ' '),
108                 'author': page_data.get('author'),
109                 'uploader': show_data.get('name'),
110                 'uploader_id': show_data.get('slug'),
111                 'thumbnails': thumbnails,
112                 'extractor_key': site,
113             }
114
115             if page_type == 'embed':
116                 info.update({
117                     '_type': 'url_transparent',
118                     'url': page_data['video']['data']['embed'],
119                 })
120                 return info
121
122             video_data = self._download_json(
123                 'http://revision3.com/api/getPlaylist.json?api_key=%s&codecs=h264,vp8,theora&video_id=%s' % (self._API_KEY, video_id),
124                 video_id)['items'][0]
125
126             formats = []
127             for vcodec, media in video_data['media'].items():
128                 for quality_id, quality in media.items():
129                     if quality_id == 'hls':
130                         formats.extend(self._extract_m3u8_formats(
131                             quality['url'], video_id, 'mp4',
132                             'm3u8_native', m3u8_id='hls', fatal=False))
133                     else:
134                         formats.append({
135                             'url': quality['url'],
136                             'format_id': '%s-%s' % (vcodec, quality_id),
137                             'tbr': int_or_none(quality.get('bitrate')),
138                             'vcodec': vcodec,
139                         })
140             self._sort_formats(formats)
141
142             info.update({
143                 'title': unescapeHTML(video_data['title']),
144                 'description': unescapeHTML(video_data.get('summary')),
145                 'uploader': video_data.get('show', {}).get('name'),
146                 'uploader_id': video_data.get('show', {}).get('slug'),
147                 'duration': int_or_none(video_data.get('duration')),
148                 'formats': formats,
149             })
150             return info
151         else:
152             list_data = page_info[page_type]['data']
153             episodes_data = page_info['episodes']['data']
154             num_episodes = page_info['meta']['totalEpisodes']
155             processed_episodes = 0
156             entries = []
157             page_num = 1
158             while True:
159                 entries.extend([{
160                     '_type': 'url',
161                     'url': 'http://%s%s' % (domain, episode['path']),
162                     'id': compat_str(episode['id']),
163                     'ie_key': 'Revision3',
164                     'extractor_key': site,
165                 } for episode in episodes_data])
166                 processed_episodes += len(episodes_data)
167                 if processed_episodes == num_episodes:
168                     break
169                 page_num += 1
170                 episodes_data = self._download_json(self._PAGE_DATA_TEMPLATE % (
171                     domain, display_id + '/' + compat_str(page_num), domain),
172                     display_id)['episodes']['data']
173
174             return self.playlist_result(
175                 entries, compat_str(list_data['id']),
176                 list_data.get('name'), list_data.get('summary'))