Merge pull request #8611 from remitamine/ffmpegfd
[youtube-dl] / youtube_dl / extractor / zdf.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import functools
5 import re
6
7 from .common import InfoExtractor
8 from ..utils import (
9     int_or_none,
10     unified_strdate,
11     OnDemandPagedList,
12     xpath_text,
13     determine_ext,
14     qualities,
15     float_or_none,
16     ExtractorError,
17 )
18
19
20 class ZDFIE(InfoExtractor):
21     _VALID_URL = r'(?:zdf:|zdf:video:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/(.*beitrag/(?:video/)?))(?P<id>[0-9]+)(?:/[^/?]+)?(?:\?.*)?'
22
23     _TESTS = [{
24         'url': 'http://www.zdf.de/ZDFmediathek/beitrag/video/2037704/ZDFspezial---Ende-des-Machtpokers--?bc=sts;stt',
25         'info_dict': {
26             'id': '2037704',
27             'ext': 'webm',
28             'title': 'ZDFspezial - Ende des Machtpokers',
29             'description': 'Union und SPD haben sich auf einen Koalitionsvertrag geeinigt. Aber was bedeutet das für die Bürger? Sehen Sie hierzu das ZDFspezial "Ende des Machtpokers - Große Koalition für Deutschland".',
30             'duration': 1022,
31             'uploader': 'spezial',
32             'uploader_id': '225948',
33             'upload_date': '20131127',
34         },
35         'skip': 'Videos on ZDF.de are depublicised in short order',
36     }]
37
38     def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
39         param_groups = {}
40         for param_group in smil.findall(self._xpath_ns('./head/paramGroup', namespace)):
41             group_id = param_group.attrib.get(self._xpath_ns('id', 'http://www.w3.org/XML/1998/namespace'))
42             params = {}
43             for param in param_group:
44                 params[param.get('name')] = param.get('value')
45             param_groups[group_id] = params
46
47         formats = []
48         for video in smil.findall(self._xpath_ns('.//video', namespace)):
49             src = video.get('src')
50             if not src:
51                 continue
52             bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
53             group_id = video.get('paramGroup')
54             param_group = param_groups[group_id]
55             for proto in param_group['protocols'].split(','):
56                 formats.append({
57                     'url': '%s://%s' % (proto, param_group['host']),
58                     'app': param_group['app'],
59                     'play_path': src,
60                     'ext': 'flv',
61                     'format_id': '%s-%d' % (proto, bitrate),
62                     'tbr': bitrate,
63                 })
64         self._sort_formats(formats)
65         return formats
66
67     def extract_from_xml_url(self, video_id, xml_url):
68         doc = self._download_xml(
69             xml_url, video_id,
70             note='Downloading video info',
71             errnote='Failed to download video info')
72
73         status_code = doc.find('./status/statuscode')
74         if status_code is not None and status_code.text != 'ok':
75             code = status_code.text
76             if code == 'notVisibleAnymore':
77                 message = 'Video %s is not available' % video_id
78             else:
79                 message = '%s returned error: %s' % (self.IE_NAME, code)
80             raise ExtractorError(message, expected=True)
81
82         title = doc.find('.//information/title').text
83         description = xpath_text(doc, './/information/detail', 'description')
84         duration = int_or_none(xpath_text(doc, './/details/lengthSec', 'duration'))
85         uploader = xpath_text(doc, './/details/originChannelTitle', 'uploader')
86         uploader_id = xpath_text(doc, './/details/originChannelId', 'uploader id')
87         upload_date = unified_strdate(xpath_text(doc, './/details/airtime', 'upload date'))
88
89         def xml_to_thumbnails(fnode):
90             thumbnails = []
91             for node in fnode:
92                 thumbnail_url = node.text
93                 if not thumbnail_url:
94                     continue
95                 thumbnail = {
96                     'url': thumbnail_url,
97                 }
98                 if 'key' in node.attrib:
99                     m = re.match('^([0-9]+)x([0-9]+)$', node.attrib['key'])
100                     if m:
101                         thumbnail['width'] = int(m.group(1))
102                         thumbnail['height'] = int(m.group(2))
103                 thumbnails.append(thumbnail)
104             return thumbnails
105
106         thumbnails = xml_to_thumbnails(doc.findall('.//teaserimages/teaserimage'))
107
108         format_nodes = doc.findall('.//formitaeten/formitaet')
109         quality = qualities(['veryhigh', 'high', 'med', 'low'])
110
111         def get_quality(elem):
112             return quality(xpath_text(elem, 'quality'))
113         format_nodes.sort(key=get_quality)
114         format_ids = []
115         formats = []
116         for fnode in format_nodes:
117             video_url = fnode.find('url').text
118             is_available = 'http://www.metafilegenerator' not in video_url
119             if not is_available:
120                 continue
121             format_id = fnode.attrib['basetype']
122             quality = xpath_text(fnode, './quality', 'quality')
123             format_m = re.match(r'''(?x)
124                 (?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_
125                 (?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+)
126             ''', format_id)
127
128             ext = determine_ext(video_url, None) or format_m.group('container')
129             if ext not in ('smil', 'f4m', 'm3u8'):
130                 format_id = format_id + '-' + quality
131             if format_id in format_ids:
132                 continue
133
134             if ext == 'meta':
135                 continue
136             elif ext == 'smil':
137                 formats.extend(self._extract_smil_formats(
138                     video_url, video_id, fatal=False))
139             elif ext == 'm3u8':
140                 # the certificates are misconfigured (see
141                 # https://github.com/rg3/youtube-dl/issues/8665)
142                 if video_url.startswith('https://'):
143                     continue
144                 formats.extend(self._extract_m3u8_formats(
145                     video_url, video_id, 'mp4', m3u8_id=format_id, fatal=False))
146             elif ext == 'f4m':
147                 formats.extend(self._extract_f4m_formats(
148                     video_url, video_id, f4m_id=format_id, fatal=False))
149             else:
150                 proto = format_m.group('proto').lower()
151
152                 abr = int_or_none(xpath_text(fnode, './audioBitrate', 'abr'), 1000)
153                 vbr = int_or_none(xpath_text(fnode, './videoBitrate', 'vbr'), 1000)
154
155                 width = int_or_none(xpath_text(fnode, './width', 'width'))
156                 height = int_or_none(xpath_text(fnode, './height', 'height'))
157
158                 filesize = int_or_none(xpath_text(fnode, './filesize', 'filesize'))
159
160                 format_note = ''
161                 if not format_note:
162                     format_note = None
163
164                 formats.append({
165                     'format_id': format_id,
166                     'url': video_url,
167                     'ext': ext,
168                     'acodec': format_m.group('acodec'),
169                     'vcodec': format_m.group('vcodec'),
170                     'abr': abr,
171                     'vbr': vbr,
172                     'width': width,
173                     'height': height,
174                     'filesize': filesize,
175                     'format_note': format_note,
176                     'protocol': proto,
177                     '_available': is_available,
178                 })
179             format_ids.append(format_id)
180
181         self._sort_formats(formats)
182
183         return {
184             'id': video_id,
185             'title': title,
186             'description': description,
187             'duration': duration,
188             'thumbnails': thumbnails,
189             'uploader': uploader,
190             'uploader_id': uploader_id,
191             'upload_date': upload_date,
192             'formats': formats,
193         }
194
195     def _real_extract(self, url):
196         video_id = self._match_id(url)
197         xml_url = 'http://www.zdf.de/ZDFmediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
198         return self.extract_from_xml_url(video_id, xml_url)
199
200
201 class ZDFChannelIE(InfoExtractor):
202     _VALID_URL = r'(?:zdf:topic:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/.*kanaluebersicht/(?:[^/]+/)?)(?P<id>[0-9]+)'
203     _TESTS = [{
204         'url': 'http://www.zdf.de/ZDFmediathek#/kanaluebersicht/1586442/sendung/Titanic',
205         'info_dict': {
206             'id': '1586442',
207         },
208         'playlist_count': 3,
209     }, {
210         'url': 'http://www.zdf.de/ZDFmediathek/kanaluebersicht/aktuellste/332',
211         'only_matching': True,
212     }, {
213         'url': 'http://www.zdf.de/ZDFmediathek/kanaluebersicht/meist-gesehen/332',
214         'only_matching': True,
215     }, {
216         'url': 'http://www.zdf.de/ZDFmediathek/kanaluebersicht/_/1798716?bc=nrt;nrm?flash=off',
217         'only_matching': True,
218     }]
219     _PAGE_SIZE = 50
220
221     def _fetch_page(self, channel_id, page):
222         offset = page * self._PAGE_SIZE
223         xml_url = (
224             'http://www.zdf.de/ZDFmediathek/xmlservice/web/aktuellste?ak=web&offset=%d&maxLength=%d&id=%s'
225             % (offset, self._PAGE_SIZE, channel_id))
226         doc = self._download_xml(
227             xml_url, channel_id,
228             note='Downloading channel info',
229             errnote='Failed to download channel info')
230
231         title = doc.find('.//information/title').text
232         description = doc.find('.//information/detail').text
233         for asset in doc.findall('.//teasers/teaser'):
234             a_type = asset.find('./type').text
235             a_id = asset.find('./details/assetId').text
236             if a_type not in ('video', 'topic'):
237                 continue
238             yield {
239                 '_type': 'url',
240                 'playlist_title': title,
241                 'playlist_description': description,
242                 'url': 'zdf:%s:%s' % (a_type, a_id),
243             }
244
245     def _real_extract(self, url):
246         channel_id = self._match_id(url)
247         entries = OnDemandPagedList(
248             functools.partial(self._fetch_page, channel_id), self._PAGE_SIZE)
249
250         return {
251             '_type': 'playlist',
252             'id': channel_id,
253             'entries': entries,
254         }