Merge branch 'pr-bbcnews' of https://github.com/atomicdryad/youtube-dl into atomicdry...
[youtube-dl] / youtube_dl / extractor / bbc.py
index 310db9d1db7157e3164ed0df52e2641160f6bc93..86327d8ed641a14d5d6fa3233f8c9f1eceeda6a3 100644 (file)
@@ -214,12 +214,12 @@ class BBCCoUkIE(InfoExtractor):
 
     def _extract_video(self, media, programme_id):
         formats = []
-        vbr = int(media.get('bitrate'))
+        vbr = int_or_none(media.get('bitrate'))
         vcodec = media.get('encoding')
         service = media.get('service')
-        width = int(media.get('width'))
-        height = int(media.get('height'))
-        file_size = int(media.get('media_file_size'))
+        width = int_or_none(media.get('width'))
+        height = int_or_none(media.get('height'))
+        file_size = int_or_none(media.get('media_file_size'))
         for connection in self._extract_connections(media):
             conn_formats = self._extract_connection(connection, programme_id)
             for format in conn_formats:
@@ -236,7 +236,7 @@ class BBCCoUkIE(InfoExtractor):
 
     def _extract_audio(self, media, programme_id):
         formats = []
-        abr = int(media.get('bitrate'))
+        abr = int_or_none(media.get('bitrate'))
         acodec = media.get('encoding')
         service = media.get('service')
         for connection in self._extract_connections(media):
@@ -255,26 +255,11 @@ class BBCCoUkIE(InfoExtractor):
         for connection in self._extract_connections(media):
             captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
             lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
-            ps = captions.findall('./{0}body/{0}div/{0}p'.format('{http://www.w3.org/2006/10/ttaf1}'))
-            srt = ''
-
-            def _extract_text(p):
-                if p.text is not None:
-                    stripped_text = p.text.strip()
-                    if stripped_text:
-                        return stripped_text
-                return ' '.join(span.text.strip() for span in p.findall('{http://www.w3.org/2006/10/ttaf1}span'))
-            for pos, p in enumerate(ps):
-                srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (str(pos), p.get('begin'), p.get('end'), _extract_text(p))
             subtitles[lang] = [
                 {
                     'url': connection.get('href'),
                     'ext': 'ttml',
                 },
-                {
-                    'data': srt,
-                    'ext': 'srt',
-                },
             ]
         return subtitles
 
@@ -319,7 +304,7 @@ class BBCCoUkIE(InfoExtractor):
                     if kind != 'programme' and kind != 'radioProgramme':
                         continue
                     programme_id = item.get('vpid')
-                    duration = int(item.get('duration'))
+                    duration = int_or_none(item.get('duration'))
                     formats, subtitles = self._download_media_selector(programme_id)
                 return programme_id, title, description, duration, formats, subtitles
         except ExtractorError as ee:
@@ -351,7 +336,7 @@ class BBCCoUkIE(InfoExtractor):
             title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text
             description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text
             programme_id = item.get('identifier')
-            duration = int(item.get('duration'))
+            duration = int_or_none(item.get('duration'))
             formats, subtitles = self._download_media_selector(programme_id)
 
         return programme_id, title, description, duration, formats, subtitles
@@ -412,14 +397,14 @@ class BBCNewsIE(BBCCoUkIE):
             'title': 'Russia stages massive WW2 parade despite Western boycott',
         },
         'playlist_count': 2,
-    },{
+    }, {
         'url': 'http://www.bbc.com/news/business-28299555',
         'info_dict': {
             'id': 'business-28299555',
             'title': 'Farnborough Airshow: Video highlights',
         },
         'playlist_count': 9,
-    },{
+    }, {
         'url': 'http://www.bbc.com/news/world-europe-32041533',
         'note': 'Video',
         'info_dict': {
@@ -428,30 +413,38 @@ class BBCNewsIE(BBCCoUkIE):
             'title': 'Aerial footage showed the site of the crash in the Alps - courtesy BFM TV',
             'description': 'Germanwings plane crash site in aerial video - Aerial footage showed the site of the crash in the Alps - courtesy BFM TV',
             'duration': 47,
+            'upload_date': '20150324',
+            'uploader': 'BBC News',
         },
         'params': {
             'skip_download': True,
         }
-    },{
+    }, {
         'url': 'http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu',
         'note': 'Video',
         'info_dict': {
             'id': 'NA',
             'ext': 'mp4',
-            'title': 'YPG - Tel Abyad..n tamam. kontrol.m.zde',
+            'title': 'YPG: Tel Abyad\'\u0131n tamam\u0131 kontrol\xfcm\xfczde',
+            'description': 'YPG: Tel Abyad\'\u0131n tamam\u0131 kontrol\xfcm\xfczde',
             'duration': 47,
+            'upload_date': '20150615',
+            'uploader': 'BBC News',
         },
         'params': {
             'skip_download': True,
         }
-    },{
+    }, {
         'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw',
         'note': 'Video',
         'info_dict': {
             'id': '39275083',
             'ext': 'mp4',
-            'title': 'Honduras militariza sus hospitales por nuevo esc.ndalo de corrupci.n',
+            'title': 'Honduras militariza sus hospitales por nuevo esc\xe1ndalo de corrupci\xf3n',
+            'description': 'Honduras militariza sus hospitales por nuevo esc\xe1ndalo de corrupci\xf3n',
             'duration': 87,
+            'upload_date': '20150619',
+            'uploader': 'BBC News',
         },
         'params': {
             'skip_download': True,
@@ -466,81 +459,89 @@ class BBCNewsIE(BBCCoUkIE):
 
         pubdate = self._html_search_regex(r'"datePublished":\s*"(\d+-\d+-\d+)', webpage, 'date', default=None)
         if pubdate:
-           pubdate = pubdate.replace('-','')
+            pubdate = pubdate.replace('-', '')
 
         ret = []
         jsent = []
 
         # works with bbc.com/news/something-something-123456 articles
         jsent = map(
-           lambda m: self._parse_json(m,list_id),
-           re.findall(r"data-media-meta='({[^']+})'", webpage)
+            lambda m: self._parse_json(m, list_id),
+            re.findall(r"data-media-meta='({[^']+})'", webpage)
         )
 
         if len(jsent) == 0:
-           # http://www.bbc.com/news/video_and_audio/international
-           # and single-video articles
-           masset = self._html_search_regex(r'mediaAssetPage\.init\(\s*({.+?}), "/', webpage, 'mediaassets', default=None)
-           if masset:
-              jmasset = self._parse_json(masset,list_id)
-              for key, val in jmasset.get('videos',{}).items():
-                  for skey, sval in val.items():
-                      sval['id'] = skey
-                      jsent.append(sval)
+            # http://www.bbc.com/news/video_and_audio/international
+            # and single-video articles
+            masset = self._html_search_regex(r'mediaAssetPage\.init\(\s*({.+?}), "/', webpage, 'mediaassets', default=None)
+            if masset:
+                jmasset = self._parse_json(masset, list_id)
+                for key, val in jmasset.get('videos', {}).items():
+                    for skey, sval in val.items():
+                        sval['id'] = skey
+                        jsent.append(sval)
 
         if len(jsent) == 0:
-           # stubbornly generic extractor for {json with "image":{allvideoshavethis},etc}
-           # in http://www.bbc.com/news/video_and_audio/international
-           # prone to breaking if entries have sourceFiles list
-           jsent = map(
-               lambda m: self._parse_json(m,list_id),
-               re.findall(r"({[^{}]+image\":{[^}]+}[^}]+})", webpage)
-           )          
+            # stubbornly generic extractor for {json with "image":{allvideoshavethis},etc}
+            # in http://www.bbc.com/news/video_and_audio/international
+            # prone to breaking if entries have sourceFiles list
+            jsent = map(
+                lambda m: self._parse_json(m, list_id),
+                re.findall(r"({[^{}]+image\":{[^}]+}[^}]+})", webpage)
+            )
 
         if len(jsent) == 0:
-           raise ExtractorError('No video found', expected=True)
+            raise ExtractorError('No video found', expected=True)
 
         for jent in jsent:
             programme_id = jent.get('externalId')
             xml_url = jent.get('href')
 
-            title = jent.get('caption',list_title)
+            title = jent.get('caption', '')
+            if title == '':
+                title = list_title
 
             duration = parse_duration(jent.get('duration'))
-            description = list_title + ' - ' + jent.get('caption','')
+            description = list_title
+            if jent.get('caption', '') != '':
+                description += ' - ' + jent.get('caption')
             thumbnail = None
-            if jent.has_key('image'):
-               thumbnail=jent['image'].get('href')
+            if jent.get('image') is not None:
+                thumbnail = jent['image'].get('href')
 
             formats = []
             subtitles = []
 
             if programme_id:
-               formats, subtitles = self._download_media_selector(programme_id)
-            elif jent.has_key('sourceFiles'):
-               # mediaselector not used at
-               # http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu
-               for key, val in jent['sourceFiles'].items():
-                  formats.append( {
-                     'ext': val.get('encoding'),
-                     'url': val.get('url'),
-                     'filesize': int(val.get('filesize')),
-                     'format_id': key
-                  )
+                formats, subtitles = self._download_media_selector(programme_id)
+            elif jent.get('sourceFiles') is not None:
+                # mediaselector not used at
+                # http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu
+                for key, val in jent['sourceFiles'].items():
+                    formats.append({
+                        'ext': val.get('encoding'),
+                        'url': val.get('url'),
+                        'filesize': int(val.get('filesize')),
+                        'format_id': key
+                    })
             elif xml_url:
-               # Cheap fallback
-               # http://playlists.bbc.co.uk/news/(list_id)[ABC..]/playlist.sxml
-               xml = self._download_webpage(xml_url, programme_id, 'Downloading playlist.sxml for externalId (fallback)')
-               programme_id = self._search_regex(r'<mediator [^>]*identifier="(.+?)"', xml, 'playlist.sxml (externalId fallback)')
-               formats, subtitles = self._download_media_selector(programme_id)
+                # Cheap fallback
+                # http://playlists.bbc.co.uk/news/(list_id)[ABC..]/playlist.sxml
+                xml = self._download_webpage(xml_url, programme_id, 'Downloading playlist.sxml for externalId (fallback)')
+                programme_id = self._search_regex(r'<mediator [^>]*identifier="(.+?)"', xml, 'playlist.sxml (externalId fallback)')
+                formats, subtitles = self._download_media_selector(programme_id)
 
             if len(formats) == 0:
-               raise ExtractorError('unsupported json media entry.\n    '+str(jent)+'\n')
-               
+                raise ExtractorError('unsupported json media entry.\n    ' + str(jent) + '\n')
+
             self._sort_formats(formats)
 
-            ret.append( {
-                'id': jent.get('programme_id',jent.get('id')),
+            id = jent.get('id') if programme_id is None else programme_id
+            if id is None:
+                id = 'NA'
+
+            ret.append({
+                'id': id,
                 'uploader': 'BBC News',
                 'upload_date': pubdate,
                 'title': title,
@@ -549,8 +550,8 @@ class BBCNewsIE(BBCCoUkIE):
                 'duration': duration,
                 'formats': formats,
                 'subtitles': subtitles,
-            } )
+            })
 
         if len(ret) > 0:
-           return self.playlist_result(ret, list_id, list_title)
+            return self.playlist_result(ret, list_id, list_title)
         raise ExtractorError('No video found', expected=True)