Merge branch 'remitamine-baidu'
[youtube-dl] / youtube_dl / extractor / bbc.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..utils import (
8     ExtractorError,
9     float_or_none,
10     int_or_none,
11     parse_duration,
12     parse_iso8601,
13     remove_end,
14     unescapeHTML,
15 )
16 from ..compat import (
17     compat_etree_fromstring,
18     compat_HTTPError,
19 )
20
21
22 class BBCCoUkIE(InfoExtractor):
23     IE_NAME = 'bbc.co.uk'
24     IE_DESC = 'BBC iPlayer'
25     _ID_REGEX = r'[pb][\da-z]{7}'
26     _VALID_URL = r'''(?x)
27                     https?://
28                         (?:www\.)?bbc\.co\.uk/
29                         (?:
30                             programmes/(?!articles/)|
31                             iplayer(?:/[^/]+)?/(?:episode/|playlist/)|
32                             music/clips[/#]|
33                             radio/player/
34                         )
35                         (?P<id>%s)
36                     ''' % _ID_REGEX
37
38     _MEDIASELECTOR_URLS = [
39         # Provides HQ HLS streams with even better quality that pc mediaset but fails
40         # with geolocation in some cases when it's even not geo restricted at all (e.g.
41         # http://www.bbc.co.uk/programmes/b06bp7lf). Also may fail with selectionunavailable.
42         'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
43         'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s',
44     ]
45
46     _MEDIASELECTION_NS = 'http://bbc.co.uk/2008/mp/mediaselection'
47     _EMP_PLAYLIST_NS = 'http://bbc.co.uk/2008/emp/playlist'
48
49     _NAMESPACES = (
50         _MEDIASELECTION_NS,
51         _EMP_PLAYLIST_NS,
52     )
53
54     _TESTS = [
55         {
56             'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
57             'info_dict': {
58                 'id': 'b039d07m',
59                 'ext': 'flv',
60                 'title': 'Leonard Cohen, Kaleidoscope - BBC Radio 4',
61                 'description': 'The Canadian poet and songwriter reflects on his musical career.',
62             },
63             'params': {
64                 # rtmp download
65                 'skip_download': True,
66             }
67         },
68         {
69             'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
70             'info_dict': {
71                 'id': 'b00yng1d',
72                 'ext': 'flv',
73                 'title': 'The Man in Black: Series 3: The Printed Name',
74                 'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
75                 'duration': 1800,
76             },
77             'params': {
78                 # rtmp download
79                 'skip_download': True,
80             },
81             'skip': 'Episode is no longer available on BBC iPlayer Radio',
82         },
83         {
84             'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
85             'info_dict': {
86                 'id': 'b00yng1d',
87                 'ext': 'flv',
88                 'title': 'The Voice UK: Series 3: Blind Auditions 5',
89                 'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
90                 'duration': 5100,
91             },
92             'params': {
93                 # rtmp download
94                 'skip_download': True,
95             },
96             'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
97         },
98         {
99             'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
100             'info_dict': {
101                 'id': 'b03k3pb7',
102                 'ext': 'flv',
103                 'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
104                 'description': '2. Invasion',
105                 'duration': 3600,
106             },
107             'params': {
108                 # rtmp download
109                 'skip_download': True,
110             },
111             'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
112         }, {
113             'url': 'http://www.bbc.co.uk/programmes/b04v20dw',
114             'info_dict': {
115                 'id': 'b04v209v',
116                 'ext': 'flv',
117                 'title': 'Pete Tong, The Essential New Tune Special',
118                 'description': "Pete has a very special mix - all of 2014's Essential New Tunes!",
119                 'duration': 10800,
120             },
121             'params': {
122                 # rtmp download
123                 'skip_download': True,
124             },
125             'skip': 'Episode is no longer available on BBC iPlayer Radio',
126         }, {
127             'url': 'http://www.bbc.co.uk/music/clips/p02frcc3',
128             'note': 'Audio',
129             'info_dict': {
130                 'id': 'p02frcch',
131                 'ext': 'flv',
132                 'title': 'Pete Tong, Past, Present and Future Special, Madeon - After Hours mix',
133                 'description': 'French house superstar Madeon takes us out of the club and onto the after party.',
134                 'duration': 3507,
135             },
136             'params': {
137                 # rtmp download
138                 'skip_download': True,
139             }
140         }, {
141             'url': 'http://www.bbc.co.uk/music/clips/p025c0zz',
142             'note': 'Video',
143             'info_dict': {
144                 'id': 'p025c103',
145                 'ext': 'flv',
146                 'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)',
147                 'description': 'Rae Morris performs Closer for BBC Three at Reading 2014',
148                 'duration': 226,
149             },
150             'params': {
151                 # rtmp download
152                 'skip_download': True,
153             }
154         }, {
155             'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls',
156             'info_dict': {
157                 'id': 'p02n76xf',
158                 'ext': 'flv',
159                 'title': 'Natural World, 2015-2016: 2. Super Powered Owls',
160                 'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d',
161                 'duration': 3540,
162             },
163             'params': {
164                 # rtmp download
165                 'skip_download': True,
166             },
167             'skip': 'geolocation',
168         }, {
169             'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition',
170             'info_dict': {
171                 'id': 'b05zmgw1',
172                 'ext': 'flv',
173                 'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.',
174                 'title': 'Royal Academy Summer Exhibition',
175                 'duration': 3540,
176             },
177             'params': {
178                 # rtmp download
179                 'skip_download': True,
180             },
181             'skip': 'geolocation',
182         }, {
183             # iptv-all mediaset fails with geolocation however there is no geo restriction
184             # for this programme at all
185             'url': 'http://www.bbc.co.uk/programmes/b06bp7lf',
186             'info_dict': {
187                 'id': 'b06bp7kf',
188                 'ext': 'flv',
189                 'title': "Annie Mac's Friday Night, B.Traits sits in for Annie",
190                 'description': 'B.Traits sits in for Annie Mac with a Mini-Mix from Disclosure.',
191                 'duration': 10800,
192             },
193             'params': {
194                 # rtmp download
195                 'skip_download': True,
196             },
197         }, {
198             'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4',
199             'only_matching': True,
200         }, {
201             'url': 'http://www.bbc.co.uk/music/clips#p02frcc3',
202             'only_matching': True,
203         }, {
204             'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo',
205             'only_matching': True,
206         }, {
207             'url': 'http://www.bbc.co.uk/radio/player/p03cchwf',
208             'only_matching': True,
209         }
210     ]
211
212     class MediaSelectionError(Exception):
213         def __init__(self, id):
214             self.id = id
215
216     def _extract_asx_playlist(self, connection, programme_id):
217         asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
218         return [ref.get('href') for ref in asx.findall('./Entry/ref')]
219
220     def _extract_connection(self, connection, programme_id):
221         formats = []
222         kind = connection.get('kind')
223         protocol = connection.get('protocol')
224         supplier = connection.get('supplier')
225         if protocol == 'http':
226             href = connection.get('href')
227             transfer_format = connection.get('transferFormat')
228             # ASX playlist
229             if supplier == 'asx':
230                 for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
231                     formats.append({
232                         'url': ref,
233                         'format_id': 'ref%s_%s' % (i, supplier),
234                     })
235             # Skip DASH until supported
236             elif transfer_format == 'dash':
237                 pass
238             elif transfer_format == 'hls':
239                 formats.extend(self._extract_m3u8_formats(
240                     href, programme_id, ext='mp4', entry_protocol='m3u8_native',
241                     m3u8_id=supplier, fatal=False))
242             # Direct link
243             else:
244                 formats.append({
245                     'url': href,
246                     'format_id': supplier or kind or protocol,
247                 })
248         elif protocol == 'rtmp':
249             application = connection.get('application', 'ondemand')
250             auth_string = connection.get('authString')
251             identifier = connection.get('identifier')
252             server = connection.get('server')
253             formats.append({
254                 'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
255                 'play_path': identifier,
256                 'app': '%s?%s' % (application, auth_string),
257                 'page_url': 'http://www.bbc.co.uk',
258                 'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
259                 'rtmp_live': False,
260                 'ext': 'flv',
261                 'format_id': supplier,
262             })
263         return formats
264
265     def _extract_items(self, playlist):
266         return playlist.findall('./{%s}item' % self._EMP_PLAYLIST_NS)
267
268     def _findall_ns(self, element, xpath):
269         elements = []
270         for ns in self._NAMESPACES:
271             elements.extend(element.findall(xpath % ns))
272         return elements
273
274     def _extract_medias(self, media_selection):
275         error = media_selection.find('./{%s}error' % self._MEDIASELECTION_NS)
276         if error is None:
277             media_selection.find('./{%s}error' % self._EMP_PLAYLIST_NS)
278         if error is not None:
279             raise BBCCoUkIE.MediaSelectionError(error.get('id'))
280         return self._findall_ns(media_selection, './{%s}media')
281
282     def _extract_connections(self, media):
283         return self._findall_ns(media, './{%s}connection')
284
285     def _extract_video(self, media, programme_id):
286         formats = []
287         vbr = int_or_none(media.get('bitrate'))
288         vcodec = media.get('encoding')
289         service = media.get('service')
290         width = int_or_none(media.get('width'))
291         height = int_or_none(media.get('height'))
292         file_size = int_or_none(media.get('media_file_size'))
293         for connection in self._extract_connections(media):
294             conn_formats = self._extract_connection(connection, programme_id)
295             for format in conn_formats:
296                 format.update({
297                     'width': width,
298                     'height': height,
299                     'vbr': vbr,
300                     'vcodec': vcodec,
301                     'filesize': file_size,
302                 })
303                 if service:
304                     format['format_id'] = '%s_%s' % (service, format['format_id'])
305             formats.extend(conn_formats)
306         return formats
307
308     def _extract_audio(self, media, programme_id):
309         formats = []
310         abr = int_or_none(media.get('bitrate'))
311         acodec = media.get('encoding')
312         service = media.get('service')
313         for connection in self._extract_connections(media):
314             conn_formats = self._extract_connection(connection, programme_id)
315             for format in conn_formats:
316                 format.update({
317                     'format_id': '%s_%s' % (service, format['format_id']),
318                     'abr': abr,
319                     'acodec': acodec,
320                 })
321             formats.extend(conn_formats)
322         return formats
323
324     def _get_subtitles(self, media, programme_id):
325         subtitles = {}
326         for connection in self._extract_connections(media):
327             captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
328             lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
329             subtitles[lang] = [
330                 {
331                     'url': connection.get('href'),
332                     'ext': 'ttml',
333                 },
334             ]
335         return subtitles
336
337     def _raise_extractor_error(self, media_selection_error):
338         raise ExtractorError(
339             '%s returned error: %s' % (self.IE_NAME, media_selection_error.id),
340             expected=True)
341
342     def _download_media_selector(self, programme_id):
343         last_exception = None
344         for mediaselector_url in self._MEDIASELECTOR_URLS:
345             try:
346                 return self._download_media_selector_url(
347                     mediaselector_url % programme_id, programme_id)
348             except BBCCoUkIE.MediaSelectionError as e:
349                 if e.id in ('notukerror', 'geolocation', 'selectionunavailable'):
350                     last_exception = e
351                     continue
352                 self._raise_extractor_error(e)
353         self._raise_extractor_error(last_exception)
354
355     def _download_media_selector_url(self, url, programme_id=None):
356         try:
357             media_selection = self._download_xml(
358                 url, programme_id, 'Downloading media selection XML')
359         except ExtractorError as ee:
360             if isinstance(ee.cause, compat_HTTPError) and ee.cause.code in (403, 404):
361                 media_selection = compat_etree_fromstring(ee.cause.read().decode('utf-8'))
362             else:
363                 raise
364         return self._process_media_selector(media_selection, programme_id)
365
366     def _process_media_selector(self, media_selection, programme_id):
367         formats = []
368         subtitles = None
369
370         for media in self._extract_medias(media_selection):
371             kind = media.get('kind')
372             if kind == 'audio':
373                 formats.extend(self._extract_audio(media, programme_id))
374             elif kind == 'video':
375                 formats.extend(self._extract_video(media, programme_id))
376             elif kind == 'captions':
377                 subtitles = self.extract_subtitles(media, programme_id)
378         return formats, subtitles
379
380     def _download_playlist(self, playlist_id):
381         try:
382             playlist = self._download_json(
383                 'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
384                 playlist_id, 'Downloading playlist JSON')
385
386             version = playlist.get('defaultAvailableVersion')
387             if version:
388                 smp_config = version['smpConfig']
389                 title = smp_config['title']
390                 description = smp_config['summary']
391                 for item in smp_config['items']:
392                     kind = item['kind']
393                     if kind != 'programme' and kind != 'radioProgramme':
394                         continue
395                     programme_id = item.get('vpid')
396                     duration = int_or_none(item.get('duration'))
397                     formats, subtitles = self._download_media_selector(programme_id)
398                 return programme_id, title, description, duration, formats, subtitles
399         except ExtractorError as ee:
400             if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
401                 raise
402
403         # fallback to legacy playlist
404         return self._process_legacy_playlist(playlist_id)
405
406     def _process_legacy_playlist_url(self, url, display_id):
407         playlist = self._download_legacy_playlist_url(url, display_id)
408         return self._extract_from_legacy_playlist(playlist, display_id)
409
410     def _process_legacy_playlist(self, playlist_id):
411         return self._process_legacy_playlist_url(
412             'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id, playlist_id)
413
414     def _download_legacy_playlist_url(self, url, playlist_id=None):
415         return self._download_xml(
416             url, playlist_id, 'Downloading legacy playlist XML')
417
418     def _extract_from_legacy_playlist(self, playlist, playlist_id):
419         no_items = playlist.find('./{%s}noItems' % self._EMP_PLAYLIST_NS)
420         if no_items is not None:
421             reason = no_items.get('reason')
422             if reason == 'preAvailability':
423                 msg = 'Episode %s is not yet available' % playlist_id
424             elif reason == 'postAvailability':
425                 msg = 'Episode %s is no longer available' % playlist_id
426             elif reason == 'noMedia':
427                 msg = 'Episode %s is not currently available' % playlist_id
428             else:
429                 msg = 'Episode %s is not available: %s' % (playlist_id, reason)
430             raise ExtractorError(msg, expected=True)
431
432         for item in self._extract_items(playlist):
433             kind = item.get('kind')
434             if kind != 'programme' and kind != 'radioProgramme':
435                 continue
436             title = playlist.find('./{%s}title' % self._EMP_PLAYLIST_NS).text
437             description_el = playlist.find('./{%s}summary' % self._EMP_PLAYLIST_NS)
438             description = description_el.text if description_el is not None else None
439
440             def get_programme_id(item):
441                 def get_from_attributes(item):
442                     for p in('identifier', 'group'):
443                         value = item.get(p)
444                         if value and re.match(r'^[pb][\da-z]{7}$', value):
445                             return value
446                 get_from_attributes(item)
447                 mediator = item.find('./{%s}mediator' % self._EMP_PLAYLIST_NS)
448                 if mediator is not None:
449                     return get_from_attributes(mediator)
450
451             programme_id = get_programme_id(item)
452             duration = int_or_none(item.get('duration'))
453
454             if programme_id:
455                 formats, subtitles = self._download_media_selector(programme_id)
456             else:
457                 formats, subtitles = self._process_media_selector(item, playlist_id)
458                 programme_id = playlist_id
459
460         return programme_id, title, description, duration, formats, subtitles
461
462     def _real_extract(self, url):
463         group_id = self._match_id(url)
464
465         webpage = self._download_webpage(url, group_id, 'Downloading video page')
466
467         programme_id = None
468         duration = None
469
470         tviplayer = self._search_regex(
471             r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById',
472             webpage, 'player', default=None)
473
474         if tviplayer:
475             player = self._parse_json(tviplayer, group_id).get('player', {})
476             duration = int_or_none(player.get('duration'))
477             programme_id = player.get('vpid')
478
479         if not programme_id:
480             programme_id = self._search_regex(
481                 r'"vpid"\s*:\s*"(%s)"' % self._ID_REGEX, webpage, 'vpid', fatal=False, default=None)
482
483         if programme_id:
484             formats, subtitles = self._download_media_selector(programme_id)
485             title = self._og_search_title(webpage, default=None) or self._html_search_regex(
486                 r'<h2[^>]+id="parent-title"[^>]*>(.+?)</h2>', webpage, 'title')
487             description = self._search_regex(
488                 r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>',
489                 webpage, 'description', default=None)
490             if not description:
491                 description = self._html_search_meta('description', webpage)
492         else:
493             programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
494
495         self._sort_formats(formats)
496
497         return {
498             'id': programme_id,
499             'title': title,
500             'description': description,
501             'thumbnail': self._og_search_thumbnail(webpage, default=None),
502             'duration': duration,
503             'formats': formats,
504             'subtitles': subtitles,
505         }
506
507
508 class BBCIE(BBCCoUkIE):
509     IE_NAME = 'bbc'
510     IE_DESC = 'BBC'
511     _VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)'
512
513     _MEDIASELECTOR_URLS = [
514         # Provides HQ HLS streams but fails with geolocation in some cases when it's
515         # even not geo restricted at all
516         'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
517         # Provides more formats, namely direct mp4 links, but fails on some videos with
518         # notukerror for non UK (?) users (e.g.
519         # http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
520         'http://open.live.bbc.co.uk/mediaselector/4/mtis/stream/%s',
521         # Provides fewer formats, but works everywhere for everybody (hopefully)
522         'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/journalism-pc/vpid/%s',
523     ]
524
525     _TESTS = [{
526         # article with multiple videos embedded with data-playable containing vpids
527         'url': 'http://www.bbc.com/news/world-europe-32668511',
528         'info_dict': {
529             'id': 'world-europe-32668511',
530             'title': 'Russia stages massive WW2 parade despite Western boycott',
531             'description': 'md5:00ff61976f6081841f759a08bf78cc9c',
532         },
533         'playlist_count': 2,
534     }, {
535         # article with multiple videos embedded with data-playable (more videos)
536         'url': 'http://www.bbc.com/news/business-28299555',
537         'info_dict': {
538             'id': 'business-28299555',
539             'title': 'Farnborough Airshow: Video highlights',
540             'description': 'BBC reports and video highlights at the Farnborough Airshow.',
541         },
542         'playlist_count': 9,
543         'skip': 'Save time',
544     }, {
545         # article with multiple videos embedded with `new SMP()`
546         # broken
547         'url': 'http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460',
548         'info_dict': {
549             'id': '3662a707-0af9-3149-963f-47bea720b460',
550             'title': 'BBC Blogs - Adam Curtis - BUGGER',
551         },
552         'playlist_count': 18,
553     }, {
554         # single video embedded with data-playable containing vpid
555         'url': 'http://www.bbc.com/news/world-europe-32041533',
556         'info_dict': {
557             'id': 'p02mprgb',
558             'ext': 'mp4',
559             'title': 'Aerial footage showed the site of the crash in the Alps - courtesy BFM TV',
560             'description': 'md5:2868290467291b37feda7863f7a83f54',
561             'duration': 47,
562             'timestamp': 1427219242,
563             'upload_date': '20150324',
564         },
565         'params': {
566             # rtmp download
567             'skip_download': True,
568         }
569     }, {
570         # article with single video embedded with data-playable containing XML playlist
571         # with direct video links as progressiveDownloadUrl (for now these are extracted)
572         # and playlist with f4m and m3u8 as streamingUrl
573         'url': 'http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu',
574         'info_dict': {
575             'id': '150615_telabyad_kentin_cogu',
576             'ext': 'mp4',
577             'title': "YPG: Tel Abyad'ın tamamı kontrolümüzde",
578             'timestamp': 1434397334,
579             'upload_date': '20150615',
580         },
581         'params': {
582             'skip_download': True,
583         }
584     }, {
585         # single video embedded with data-playable containing XML playlists (regional section)
586         'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw',
587         'info_dict': {
588             'id': '150619_video_honduras_militares_hospitales_corrupcion_aw',
589             'ext': 'mp4',
590             'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción',
591             'timestamp': 1434713142,
592             'upload_date': '20150619',
593         },
594         'params': {
595             'skip_download': True,
596         }
597     }, {
598         # single video from video playlist embedded with vxp-playlist-data JSON
599         'url': 'http://www.bbc.com/news/video_and_audio/must_see/33376376',
600         'info_dict': {
601             'id': 'p02w6qjc',
602             'ext': 'mp4',
603             'title': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
604             'duration': 56,
605             'description': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
606         },
607         'params': {
608             'skip_download': True,
609         }
610     }, {
611         # single video story with digitalData
612         'url': 'http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret',
613         'info_dict': {
614             'id': 'p02q6gc4',
615             'ext': 'flv',
616             'title': 'Sri Lanka’s spicy secret',
617             'description': 'As a new train line to Jaffna opens up the country’s north, travellers can experience a truly distinct slice of Tamil culture.',
618             'timestamp': 1437674293,
619             'upload_date': '20150723',
620         },
621         'params': {
622             # rtmp download
623             'skip_download': True,
624         }
625     }, {
626         # single video story without digitalData
627         'url': 'http://www.bbc.com/autos/story/20130513-hyundais-rock-star',
628         'info_dict': {
629             'id': 'p018zqqg',
630             'ext': 'mp4',
631             'title': 'Hyundai Santa Fe Sport: Rock star',
632             'description': 'md5:b042a26142c4154a6e472933cf20793d',
633             'timestamp': 1415867444,
634             'upload_date': '20141113',
635         },
636         'params': {
637             # rtmp download
638             'skip_download': True,
639         }
640     }, {
641         # single video with playlist.sxml URL in playlist param
642         'url': 'http://www.bbc.com/sport/0/football/33653409',
643         'info_dict': {
644             'id': 'p02xycnp',
645             'ext': 'mp4',
646             'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
647             'description': 'BBC Sport\'s David Ornstein has the latest transfer gossip, including rumours of a Manchester United return for Cristiano Ronaldo.',
648             'duration': 140,
649         },
650         'params': {
651             # rtmp download
652             'skip_download': True,
653         }
654     }, {
655         # article with multiple videos embedded with playlist.sxml in playlist param
656         'url': 'http://www.bbc.com/sport/0/football/34475836',
657         'info_dict': {
658             'id': '34475836',
659             'title': 'What Liverpool can expect from Klopp',
660         },
661         'playlist_count': 3,
662     }, {
663         # single video with playlist URL from weather section
664         'url': 'http://www.bbc.com/weather/features/33601775',
665         'only_matching': True,
666     }, {
667         # custom redirection to www.bbc.com
668         'url': 'http://www.bbc.co.uk/news/science-environment-33661876',
669         'only_matching': True,
670     }]
671
672     @classmethod
673     def suitable(cls, url):
674         return False if BBCCoUkIE.suitable(url) or BBCCoUkArticleIE.suitable(url) else super(BBCIE, cls).suitable(url)
675
676     def _extract_from_media_meta(self, media_meta, video_id):
677         # Direct links to media in media metadata (e.g.
678         # http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
679         # TODO: there are also f4m and m3u8 streams incorporated in playlist.sxml
680         source_files = media_meta.get('sourceFiles')
681         if source_files:
682             return [{
683                 'url': f['url'],
684                 'format_id': format_id,
685                 'ext': f.get('encoding'),
686                 'tbr': float_or_none(f.get('bitrate'), 1000),
687                 'filesize': int_or_none(f.get('filesize')),
688             } for format_id, f in source_files.items() if f.get('url')], []
689
690         programme_id = media_meta.get('externalId')
691         if programme_id:
692             return self._download_media_selector(programme_id)
693
694         # Process playlist.sxml as legacy playlist
695         href = media_meta.get('href')
696         if href:
697             playlist = self._download_legacy_playlist_url(href)
698             _, _, _, _, formats, subtitles = self._extract_from_legacy_playlist(playlist, video_id)
699             return formats, subtitles
700
701         return [], []
702
703     def _extract_from_playlist_sxml(self, url, playlist_id, timestamp):
704         programme_id, title, description, duration, formats, subtitles = \
705             self._process_legacy_playlist_url(url, playlist_id)
706         self._sort_formats(formats)
707         return {
708             'id': programme_id,
709             'title': title,
710             'description': description,
711             'duration': duration,
712             'timestamp': timestamp,
713             'formats': formats,
714             'subtitles': subtitles,
715         }
716
717     def _real_extract(self, url):
718         playlist_id = self._match_id(url)
719
720         webpage = self._download_webpage(url, playlist_id)
721
722         timestamp = None
723         playlist_title = None
724         playlist_description = None
725
726         ld = self._parse_json(
727             self._search_regex(
728                 r'(?s)<script type="application/ld\+json">(.+?)</script>',
729                 webpage, 'ld json', default='{}'),
730             playlist_id, fatal=False)
731         if ld:
732             timestamp = parse_iso8601(ld.get('datePublished'))
733             playlist_title = ld.get('headline')
734             playlist_description = ld.get('articleBody')
735
736         if not timestamp:
737             timestamp = parse_iso8601(self._search_regex(
738                 [r'<meta[^>]+property="article:published_time"[^>]+content="([^"]+)"',
739                  r'itemprop="datePublished"[^>]+datetime="([^"]+)"',
740                  r'"datePublished":\s*"([^"]+)'],
741                 webpage, 'date', default=None))
742
743         entries = []
744
745         # article with multiple videos embedded with playlist.sxml (e.g.
746         # http://www.bbc.com/sport/0/football/34475836)
747         playlists = re.findall(r'<param[^>]+name="playlist"[^>]+value="([^"]+)"', webpage)
748         playlists.extend(re.findall(r'data-media-id="([^"]+/playlist\.sxml)"', webpage))
749         if playlists:
750             entries = [
751                 self._extract_from_playlist_sxml(playlist_url, playlist_id, timestamp)
752                 for playlist_url in playlists]
753
754         # news article with multiple videos embedded with data-playable
755         data_playables = re.findall(r'data-playable=(["\'])({.+?})\1', webpage)
756         if data_playables:
757             for _, data_playable_json in data_playables:
758                 data_playable = self._parse_json(
759                     unescapeHTML(data_playable_json), playlist_id, fatal=False)
760                 if not data_playable:
761                     continue
762                 settings = data_playable.get('settings', {})
763                 if settings:
764                     # data-playable with video vpid in settings.playlistObject.items (e.g.
765                     # http://www.bbc.com/news/world-us-canada-34473351)
766                     playlist_object = settings.get('playlistObject', {})
767                     if playlist_object:
768                         items = playlist_object.get('items')
769                         if items and isinstance(items, list):
770                             title = playlist_object['title']
771                             description = playlist_object.get('summary')
772                             duration = int_or_none(items[0].get('duration'))
773                             programme_id = items[0].get('vpid')
774                             formats, subtitles = self._download_media_selector(programme_id)
775                             self._sort_formats(formats)
776                             entries.append({
777                                 'id': programme_id,
778                                 'title': title,
779                                 'description': description,
780                                 'timestamp': timestamp,
781                                 'duration': duration,
782                                 'formats': formats,
783                                 'subtitles': subtitles,
784                             })
785                     else:
786                         # data-playable without vpid but with a playlist.sxml URLs
787                         # in otherSettings.playlist (e.g.
788                         # http://www.bbc.com/turkce/multimedya/2015/10/151010_vid_ankara_patlama_ani)
789                         playlist = data_playable.get('otherSettings', {}).get('playlist', {})
790                         if playlist:
791                             entries.append(self._extract_from_playlist_sxml(
792                                 playlist.get('progressiveDownloadUrl'), playlist_id, timestamp))
793
794         if entries:
795             playlist_title = playlist_title or remove_end(self._og_search_title(webpage), ' - BBC News')
796             playlist_description = playlist_description or self._og_search_description(webpage, default=None)
797             return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
798
799         # single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
800         programme_id = self._search_regex(
801             [r'data-video-player-vpid="(%s)"' % self._ID_REGEX,
802              r'<param[^>]+name="externalIdentifier"[^>]+value="(%s)"' % self._ID_REGEX,
803              r'videoId\s*:\s*["\'](%s)["\']' % self._ID_REGEX],
804             webpage, 'vpid', default=None)
805
806         if programme_id:
807             formats, subtitles = self._download_media_selector(programme_id)
808             self._sort_formats(formats)
809             # digitalData may be missing (e.g. http://www.bbc.com/autos/story/20130513-hyundais-rock-star)
810             digital_data = self._parse_json(
811                 self._search_regex(
812                     r'var\s+digitalData\s*=\s*({.+?});?\n', webpage, 'digital data', default='{}'),
813                 programme_id, fatal=False)
814             page_info = digital_data.get('page', {}).get('pageInfo', {})
815             title = page_info.get('pageName') or self._og_search_title(webpage)
816             description = page_info.get('description') or self._og_search_description(webpage)
817             timestamp = parse_iso8601(page_info.get('publicationDate')) or timestamp
818             return {
819                 'id': programme_id,
820                 'title': title,
821                 'description': description,
822                 'timestamp': timestamp,
823                 'formats': formats,
824                 'subtitles': subtitles,
825             }
826
827         playlist_title = self._html_search_regex(
828             r'<title>(.*?)(?:\s*-\s*BBC [^ ]+)?</title>', webpage, 'playlist title')
829         playlist_description = self._og_search_description(webpage, default=None)
830
831         def extract_all(pattern):
832             return list(filter(None, map(
833                 lambda s: self._parse_json(s, playlist_id, fatal=False),
834                 re.findall(pattern, webpage))))
835
836         # Multiple video article (e.g.
837         # http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460)
838         EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+%s(?:\b[^"]+)?' % self._ID_REGEX
839         entries = []
840         for match in extract_all(r'new\s+SMP\(({.+?})\)'):
841             embed_url = match.get('playerSettings', {}).get('externalEmbedUrl')
842             if embed_url and re.match(EMBED_URL, embed_url):
843                 entries.append(embed_url)
844         entries.extend(re.findall(
845             r'setPlaylist\("(%s)"\)' % EMBED_URL, webpage))
846         if entries:
847             return self.playlist_result(
848                 [self.url_result(entry, 'BBCCoUk') for entry in entries],
849                 playlist_id, playlist_title, playlist_description)
850
851         # Multiple video article (e.g. http://www.bbc.com/news/world-europe-32668511)
852         medias = extract_all(r"data-media-meta='({[^']+})'")
853
854         if not medias:
855             # Single video article (e.g. http://www.bbc.com/news/video_and_audio/international)
856             media_asset = self._search_regex(
857                 r'mediaAssetPage\.init\(\s*({.+?}), "/',
858                 webpage, 'media asset', default=None)
859             if media_asset:
860                 media_asset_page = self._parse_json(media_asset, playlist_id, fatal=False)
861                 medias = []
862                 for video in media_asset_page.get('videos', {}).values():
863                     medias.extend(video.values())
864
865         if not medias:
866             # Multiple video playlist with single `now playing` entry (e.g.
867             # http://www.bbc.com/news/video_and_audio/must_see/33767813)
868             vxp_playlist = self._parse_json(
869                 self._search_regex(
870                     r'<script[^>]+class="vxp-playlist-data"[^>]+type="application/json"[^>]*>([^<]+)</script>',
871                     webpage, 'playlist data'),
872                 playlist_id)
873             playlist_medias = []
874             for item in vxp_playlist:
875                 media = item.get('media')
876                 if not media:
877                     continue
878                 playlist_medias.append(media)
879                 # Download single video if found media with asset id matching the video id from URL
880                 if item.get('advert', {}).get('assetId') == playlist_id:
881                     medias = [media]
882                     break
883             # Fallback to the whole playlist
884             if not medias:
885                 medias = playlist_medias
886
887         entries = []
888         for num, media_meta in enumerate(medias, start=1):
889             formats, subtitles = self._extract_from_media_meta(media_meta, playlist_id)
890             if not formats:
891                 continue
892             self._sort_formats(formats)
893
894             video_id = media_meta.get('externalId')
895             if not video_id:
896                 video_id = playlist_id if len(medias) == 1 else '%s-%s' % (playlist_id, num)
897
898             title = media_meta.get('caption')
899             if not title:
900                 title = playlist_title if len(medias) == 1 else '%s - Video %s' % (playlist_title, num)
901
902             duration = int_or_none(media_meta.get('durationInSeconds')) or parse_duration(media_meta.get('duration'))
903
904             images = []
905             for image in media_meta.get('images', {}).values():
906                 images.extend(image.values())
907             if 'image' in media_meta:
908                 images.append(media_meta['image'])
909
910             thumbnails = [{
911                 'url': image.get('href'),
912                 'width': int_or_none(image.get('width')),
913                 'height': int_or_none(image.get('height')),
914             } for image in images]
915
916             entries.append({
917                 'id': video_id,
918                 'title': title,
919                 'thumbnails': thumbnails,
920                 'duration': duration,
921                 'timestamp': timestamp,
922                 'formats': formats,
923                 'subtitles': subtitles,
924             })
925
926         return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
927
928
929 class BBCCoUkArticleIE(InfoExtractor):
930     _VALID_URL = 'http://www.bbc.co.uk/programmes/articles/(?P<id>[a-zA-Z0-9]+)'
931     IE_NAME = 'bbc.co.uk:article'
932     IE_DESC = 'BBC articles'
933
934     _TEST = {
935         'url': 'http://www.bbc.co.uk/programmes/articles/3jNQLTMrPlYGTBn0WV6M2MS/not-your-typical-role-model-ada-lovelace-the-19th-century-programmer',
936         'info_dict': {
937             'id': '3jNQLTMrPlYGTBn0WV6M2MS',
938             'title': 'Calculating Ada: The Countess of Computing - Not your typical role model: Ada Lovelace the 19th century programmer - BBC Four',
939             'description': 'Hannah Fry reveals some of her surprising discoveries about Ada Lovelace during filming.',
940         },
941         'playlist_count': 4,
942         'add_ie': ['BBCCoUk'],
943     }
944
945     def _real_extract(self, url):
946         playlist_id = self._match_id(url)
947
948         webpage = self._download_webpage(url, playlist_id)
949
950         title = self._og_search_title(webpage)
951         description = self._og_search_description(webpage).strip()
952
953         entries = [self.url_result(programme_url) for programme_url in re.findall(
954             r'<div[^>]+typeof="Clip"[^>]+resource="([^"]+)"', webpage)]
955
956         return self.playlist_result(entries, playlist_id, title, description)