[turner,nba,cnn,adultswim] add base extractor to parse cvp feeds
authorRemita Amine <remitamine@gmail.com>
Sun, 28 Aug 2016 15:43:15 +0000 (16:43 +0100)
committerRemita Amine <remitamine@gmail.com>
Sun, 28 Aug 2016 15:51:09 +0000 (16:51 +0100)
youtube_dl/extractor/adultswim.py
youtube_dl/extractor/cnn.py
youtube_dl/extractor/nba.py
youtube_dl/extractor/turner.py [new file with mode: 0644]

index 96599048f0ffe4936f6e1ecbf5208b9a60e7a83a..ef3cc2a6199ae8bb765adafe1f75c9bf9719aed1 100644 (file)
@@ -3,16 +3,11 @@ from __future__ import unicode_literals
 
 import re
 
-from .common import InfoExtractor
-from ..utils import (
-    determine_ext,
-    ExtractorError,
-    float_or_none,
-    xpath_text,
-)
+from .turner import TurnerBaseIE
+from ..utils import ExtractorError
 
 
-class AdultSwimIE(InfoExtractor):
+class AdultSwimIE(TurnerBaseIE):
     _VALID_URL = r'https?://(?:www\.)?adultswim\.com/videos/(?P<is_playlist>playlists/)?(?P<show_path>[^/]+)/(?P<episode_path>[^/?#]+)/?'
 
     _TESTS = [{
@@ -96,7 +91,8 @@ class AdultSwimIE(InfoExtractor):
         'params': {
             # m3u8 download
             'skip_download': True,
-        }
+        },
+        'expected_warnings': ['Unable to download f4m manifest'],
     }]
 
     @staticmethod
@@ -176,57 +172,23 @@ class AdultSwimIE(InfoExtractor):
 
         entries = []
         for part_num, segment_id in enumerate(segment_ids):
-            segment_url = 'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=desktop' % segment_id
-
+            segement_info = self._extract_cvp_info(
+                'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=desktop' % segment_id,
+                segment_id, {
+                    'secure': {
+                        'media_src': 'http://androidhls-secure.cdn.turner.com/adultswim/big',
+                        'tokenizer_src': 'http://www.adultswim.com/astv/mvpd/processors/services/token_ipadAdobe.do',
+                    },
+                })
             segment_title = '%s - %s' % (show_title, episode_title)
             if len(segment_ids) > 1:
                 segment_title += ' Part %d' % (part_num + 1)
-
-            idoc = self._download_xml(
-                segment_url, segment_title,
-                'Downloading segment information', 'Unable to download segment information')
-
-            segment_duration = float_or_none(
-                xpath_text(idoc, './/trt', 'segment duration').strip())
-
-            formats = []
-            file_els = idoc.findall('.//files/file') or idoc.findall('./files/file')
-
-            unique_urls = []
-            unique_file_els = []
-            for file_el in file_els:
-                media_url = file_el.text
-                if not media_url or determine_ext(media_url) == 'f4m':
-                    continue
-                if file_el.text not in unique_urls:
-                    unique_urls.append(file_el.text)
-                    unique_file_els.append(file_el)
-
-            for file_el in unique_file_els:
-                bitrate = file_el.attrib.get('bitrate')
-                ftype = file_el.attrib.get('type')
-                media_url = file_el.text
-                if determine_ext(media_url) == 'm3u8':
-                    formats.extend(self._extract_m3u8_formats(
-                        media_url, segment_title, 'mp4', preference=0,
-                        m3u8_id='hls', fatal=False))
-                else:
-                    formats.append({
-                        'format_id': '%s_%s' % (bitrate, ftype),
-                        'url': file_el.text.strip(),
-                        # The bitrate may not be a number (for example: 'iphone')
-                        'tbr': int(bitrate) if bitrate.isdigit() else None,
-                    })
-
-            self._sort_formats(formats)
-
-            entries.append({
+            segement_info.update({
                 'id': segment_id,
                 'title': segment_title,
-                'formats': formats,
-                'duration': segment_duration,
-                'description': episode_description
+                'description': episode_description,
             })
+            entries.append(segement_info)
 
         return {
             '_type': 'playlist',
index 220bb55e8f4ea417481f3f207c4112d8c9e13e17..1bf87f6ea72ef7f8b6642a9017a59710cb6b1b97 100644 (file)
@@ -3,14 +3,11 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
-    int_or_none,
-    parse_duration,
-    url_basename,
-)
+from .turner import TurnerBaseIE
+from ..utils import url_basename
 
 
-class CNNIE(InfoExtractor):
+class CNNIE(TurnerBaseIE):
     _VALID_URL = r'''(?x)https?://(?:(?P<sub_domain>edition|www|money)\.)?cnn\.com/(?:video/(?:data/.+?|\?)/)?videos?/
         (?P<path>.+?/(?P<title>[^/]+?)(?:\.(?:[a-z\-]+)|(?=&)))'''
 
@@ -18,43 +15,50 @@ class CNNIE(InfoExtractor):
         'url': 'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
         'md5': '3e6121ea48df7e2259fe73a0628605c4',
         'info_dict': {
-            'id': 'sports/2013/06/09/nadal-1-on-1.cnn',
+            'id': 'nadal-1-on-1',
             'ext': 'mp4',
             'title': 'Nadal wins 8th French Open title',
             'description': 'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.',
             'duration': 135,
             'upload_date': '20130609',
         },
+        'expected_warnings': ['Failed to download m3u8 information'],
     }, {
         'url': 'http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29',
         'md5': 'b5cc60c60a3477d185af8f19a2a26f4e',
         'info_dict': {
-            'id': 'us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology',
+            'id': 'sot-student-gives-epic-speech',
             'ext': 'mp4',
             'title': "Student's epic speech stuns new freshmen",
             'description': "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
             'upload_date': '20130821',
-        }
+        },
+        'expected_warnings': ['Failed to download m3u8 information'],
     }, {
         'url': 'http://www.cnn.com/video/data/2.0/video/living/2014/12/22/growing-america-nashville-salemtown-board-episode-1.hln.html',
         'md5': 'f14d02ebd264df951feb2400e2c25a1b',
         'info_dict': {
-            'id': 'living/2014/12/22/growing-america-nashville-salemtown-board-episode-1.hln',
+            'id': 'growing-america-nashville-salemtown-board-episode-1',
             'ext': 'mp4',
             'title': 'Nashville Ep. 1: Hand crafted skateboards',
             'description': 'md5:e7223a503315c9f150acac52e76de086',
             'upload_date': '20141222',
-        }
+        },
+        'expected_warnings': ['Failed to download m3u8 information'],
     }, {
         'url': 'http://money.cnn.com/video/news/2016/08/19/netflix-stunning-stats.cnnmoney/index.html',
         'md5': '52a515dc1b0f001cd82e4ceda32be9d1',
         'info_dict': {
-            'id': '/video/news/2016/08/19/netflix-stunning-stats.cnnmoney',
+            'id': 'netflix-stunning-stats',
             'ext': 'mp4',
             'title': '5 stunning stats about Netflix',
             'description': 'Did you know that Netflix has more than 80 million members? Here are five facts about the online video distributor that you probably didn\'t know.',
             'upload_date': '20160819',
-        }
+        },
+        'params': {
+            # m3u8 download
+            'skip_download': True,
+        },
     }, {
         'url': 'http://cnn.com/video/?/video/politics/2015/03/27/pkg-arizona-senator-church-attendance-mandatory.ktvk',
         'only_matching': True,
@@ -84,67 +88,12 @@ class CNNIE(InfoExtractor):
         if sub_domain not in ('money', 'edition'):
             sub_domain = 'edition'
         config = self._CONFIG[sub_domain]
-        info_url = config['data_src'] % path
-        info = self._download_xml(info_url, page_title)
-
-        formats = []
-        rex = re.compile(r'''(?x)
-            (?P<width>[0-9]+)x(?P<height>[0-9]+)
-            (?:_(?P<bitrate>[0-9]+)k)?
-        ''')
-        for f in info.findall('files/file'):
-            video_url = config['media_src'] + f.text.strip()
-            fdct = {
-                'format_id': f.attrib['bitrate'],
-                'url': video_url,
-            }
-
-            mf = rex.match(f.attrib['bitrate'])
-            if mf:
-                fdct['width'] = int(mf.group('width'))
-                fdct['height'] = int(mf.group('height'))
-                fdct['tbr'] = int_or_none(mf.group('bitrate'))
-            else:
-                mf = rex.search(f.text)
-                if mf:
-                    fdct['width'] = int(mf.group('width'))
-                    fdct['height'] = int(mf.group('height'))
-                    fdct['tbr'] = int_or_none(mf.group('bitrate'))
-                else:
-                    mi = re.match(r'ios_(audio|[0-9]+)$', f.attrib['bitrate'])
-                    if mi:
-                        if mi.group(1) == 'audio':
-                            fdct['vcodec'] = 'none'
-                            fdct['ext'] = 'm4a'
-                        else:
-                            fdct['tbr'] = int(mi.group(1))
-
-            formats.append(fdct)
-
-        self._sort_formats(formats)
-
-        thumbnails = [{
-            'height': int(t.attrib['height']),
-            'width': int(t.attrib['width']),
-            'url': t.text,
-        } for t in info.findall('images/image')]
-
-        metas_el = info.find('metas')
-        upload_date = (
-            metas_el.attrib.get('version') if metas_el is not None else None)
-
-        duration_el = info.find('length')
-        duration = parse_duration(duration_el.text)
-
-        return {
-            'id': info.attrib['id'],
-            'title': info.find('headline').text,
-            'formats': formats,
-            'thumbnails': thumbnails,
-            'description': info.find('description').text,
-            'duration': duration,
-            'upload_date': upload_date,
-        }
+        return self._extract_cvp_info(
+            config['data_src'] % path, page_title, {
+                'default': {
+                    'media_src': config['media_src'],
+                }
+            })
 
 
 class CNNBlogsIE(InfoExtractor):
index d896b0d04810655c1d7c993819b88e7b32029832..aabd5b6703985fbd8d23c9f4655443a6027371ae 100644 (file)
@@ -1,25 +1,20 @@
 from __future__ import unicode_literals
 
 import functools
-import os.path
 import re
 
-from .common import InfoExtractor
+from .turner import TurnerBaseIE
 from ..compat import (
     compat_urllib_parse_urlencode,
     compat_urlparse,
 )
 from ..utils import (
-    int_or_none,
     OnDemandPagedList,
-    parse_duration,
     remove_start,
-    xpath_text,
-    xpath_attr,
 )
 
 
-class NBAIE(InfoExtractor):
+class NBAIE(TurnerBaseIE):
     _VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?P<path>(?:[^/]+/)+(?P<id>[^?]*?))/?(?:/index\.html)?(?:\?.*)?$'
     _TESTS = [{
         'url': 'http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html',
@@ -59,7 +54,7 @@ class NBAIE(InfoExtractor):
             'ext': 'mp4',
             'title': 'Practice: Doc Rivers - 2/16/16',
             'description': 'Head Coach Doc Rivers addresses the media following practice.',
-            'upload_date': '20160217',
+            'upload_date': '20160216',
             'timestamp': 1455672000,
         },
         'params': {
@@ -80,7 +75,7 @@ class NBAIE(InfoExtractor):
     }, {
         'url': 'http://www.nba.com/timberwolves/wiggins-shootaround#',
         'info_dict': {
-            'id': 'Wigginsmp4',
+            'id': 'Wigginsmp4-3462601',
             'ext': 'mp4',
             'title': 'Shootaround Access - Dec. 12 | Andrew Wiggins',
             'description': 'Wolves rookie Andrew Wiggins addresses the media after Friday\'s shootaround.',
@@ -145,53 +140,12 @@ class NBAIE(InfoExtractor):
             if path.startswith('video/teams'):
                 path = 'video/channels/proxy/' + path[6:]
 
-        video_info = self._download_xml('http://www.nba.com/%s.xml' % path, video_id)
-        video_id = os.path.splitext(xpath_text(video_info, 'slug'))[0]
-        title = xpath_text(video_info, 'headline')
-        description = xpath_text(video_info, 'description')
-        duration = parse_duration(xpath_text(video_info, 'length'))
-        timestamp = int_or_none(xpath_attr(video_info, 'dateCreated', 'uts'))
-
-        thumbnails = []
-        for image in video_info.find('images'):
-            thumbnails.append({
-                'id': image.attrib.get('cut'),
-                'url': image.text,
-                'width': int_or_none(image.attrib.get('width')),
-                'height': int_or_none(image.attrib.get('height')),
+        return self._extract_cvp_info(
+            'http://www.nba.com/%s.xml' % path, video_id, {
+                'default': {
+                    'media_src': 'http://nba.cdn.turner.com/nba/big',
+                },
+                'm3u8': {
+                    'media_src': 'http://nbavod-f.akamaihd.net',
+                },
             })
-
-        formats = []
-        for video_file in video_info.findall('.//file'):
-            video_url = video_file.text
-            if video_url.startswith('/'):
-                continue
-            if video_url.endswith('.m3u8'):
-                formats.extend(self._extract_m3u8_formats(video_url, video_id, ext='mp4', m3u8_id='hls', fatal=False))
-            elif video_url.endswith('.f4m'):
-                formats.extend(self._extract_f4m_formats(video_url + '?hdcore=3.4.1.1', video_id, f4m_id='hds', fatal=False))
-            else:
-                key = video_file.attrib.get('bitrate')
-                format_info = {
-                    'format_id': key,
-                    'url': video_url,
-                }
-                mobj = re.search(r'(\d+)x(\d+)(?:_(\d+))?', key)
-                if mobj:
-                    format_info.update({
-                        'width': int(mobj.group(1)),
-                        'height': int(mobj.group(2)),
-                        'tbr': int_or_none(mobj.group(3)),
-                    })
-                formats.append(format_info)
-        self._sort_formats(formats)
-
-        return {
-            'id': video_id,
-            'title': title,
-            'description': description,
-            'duration': duration,
-            'timestamp': timestamp,
-            'thumbnails': thumbnails,
-            'formats': formats,
-        }
diff --git a/youtube_dl/extractor/turner.py b/youtube_dl/extractor/turner.py
new file mode 100644 (file)
index 0000000..0d4271f
--- /dev/null
@@ -0,0 +1,163 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    xpath_text,
+    int_or_none,
+    determine_ext,
+    parse_duration,
+    xpath_attr,
+    update_url_query,
+)
+
+
+class TurnerBaseIE(InfoExtractor):
+    def _extract_cvp_info(self, data_src, video_id, path_data={}):
+        video_data = self._download_xml(data_src, video_id)
+        video_id = video_data.attrib['id'].split('/')[-1].split('.')[0]
+        title = xpath_text(video_data, 'headline', fatal=True)
+        # rtmp_src = xpath_text(video_data, 'akamai/src')
+        # if rtmp_src:
+        #     splited_rtmp_src = rtmp_src.split(',')
+        #     if len(splited_rtmp_src) == 2:
+        #         rtmp_src = splited_rtmp_src[1]
+        # aifp = xpath_text(video_data, 'akamai/aifp', default='')
+
+        tokens = {}
+        urls = []
+        formats = []
+        rex = re.compile(r'''(?x)
+            (?P<width>[0-9]+)x(?P<height>[0-9]+)
+            (?:_(?P<bitrate>[0-9]+))?
+        ''')
+        for video_file in video_data.findall('files/file'):
+            video_url = video_file.text.strip()
+            if not video_url:
+                continue
+            ext = determine_ext(video_url)
+            if video_url.startswith('/mp4:protected/'):
+                continue
+                # TODO Correct extraction for these files
+                # protected_path_data = path_data.get('protected')
+                # if not protected_path_data or not rtmp_src:
+                #     continue
+                # protected_path = self._search_regex(
+                #     r'/mp4:(.+)\.[a-z0-9]', video_url, 'secure path')
+                # auth = self._download_webpage(
+                #     protected_path_data['tokenizer_src'], query={
+                #         'path': protected_path,
+                #         'videoId': video_id,
+                #         'aifp': aifp,
+                #     })
+                # token = xpath_text(auth, 'token')
+                # if not token:
+                #     continue
+                # video_url = rtmp_src + video_url + '?' + token
+            elif video_url.startswith('/secure/'):
+                secure_path_data = path_data.get('secure')
+                if not secure_path_data:
+                    continue
+                video_url = secure_path_data['media_src'] + video_url
+                secure_path = self._search_regex(r'https?://[^/]+(.+/)', video_url, 'secure path') + '*'
+                token = tokens.get(secure_path)
+                if not token:
+                    auth = self._download_xml(
+                        secure_path_data['tokenizer_src'], video_id, query={
+                            'path': secure_path,
+                            'videoId': video_id,
+                        })
+                    token = xpath_text(auth, 'token')
+                    if not token:
+                        continue
+                    tokens[secure_path] = token
+                video_url = video_url + '?hdnea=' + token
+            elif not re.match('https?://', video_url):
+                base_path_data = path_data.get(ext, path_data.get('default', {}))
+                media_src = base_path_data.get('media_src')
+                if not media_src:
+                    continue
+                video_url = media_src + video_url
+            if video_url in urls:
+                continue
+            urls.append(video_url)
+            format_id = video_file.attrib['bitrate']
+            if ext == 'smil':
+                formats.extend(self._extract_smil_formats(video_url, video_id, fatal=False))
+            elif ext == 'm3u8':
+                formats.extend(self._extract_m3u8_formats(
+                    video_url, video_id, 'mp4', m3u8_id=format_id, fatal=False))
+            elif ext == 'f4m':
+                formats.extend(self._extract_f4m_formats(
+                    update_url_query(video_url, {'hdcore': '3.7.0'}),
+                    video_id, f4m_id=format_id, fatal=False))
+            else:
+                f = {
+                    'format_id': format_id,
+                    'url': video_url,
+                    'ext': ext,
+                }
+                mobj = rex.search(format_id + video_url)
+                if mobj:
+                    f.update({
+                        'width': int(mobj.group('width')),
+                        'height': int(mobj.group('height')),
+                        'tbr': int_or_none(mobj.group('bitrate')),
+                    })
+                elif format_id.isdigit():
+                    f['tbr'] = int(format_id)
+                else:
+                    mobj = re.match(r'ios_(audio|[0-9]+)$', format_id)
+                    if mobj:
+                        if mobj.group(1) == 'audio':
+                            f.update({
+                                'vcodec': 'none',
+                                'ext': 'm4a',
+                            })
+                        else:
+                            f['tbr'] = int(mobj.group(1))
+                formats.append(f)
+        self._sort_formats(formats)
+
+        subtitles = {}
+        for source in video_data.findall('closedCaptions/source'):
+            for track in source.findall('track'):
+                source_url = source.get('url')
+                if not source_url:
+                    continue
+                subtitles.set_default(source.get('lang') or source.get('label') or 'en', []).append({
+                    'url': source_url,
+                    'ext': {
+                        'scc': 'scc',
+                        'webvtt': 'vtt',
+                        'smptett': 'tt',
+                    }.get(source.get('format'))
+                })
+
+        thumbnails = [{
+            'id': image.get('cut'),
+            'url': image.text,
+            'width': int_or_none(image.get('width')),
+            'height': int_or_none(image.get('height')),
+        } for image in video_data.findall('images/image')]
+
+        timestamp = None
+        if 'cnn.com' not in data_src:
+            timestamp = int_or_none(xpath_attr(video_data, 'dateCreated', 'uts'))
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'subtitles': subtitles,
+            'thumbnails': thumbnails,
+            'description': xpath_text(video_data, 'description'),
+            'duration': parse_duration(xpath_text(video_data, 'length') or xpath_text(video_data, 'trt')),
+            'timestamp': timestamp,
+            'upload_date': xpath_attr(video_data, 'metas', 'version'),
+            'series': xpath_text(video_data, 'showTitle'),
+            'season_number': int_or_none(xpath_text(video_data, 'seasonNumber')),
+            'episode_number': int_or_none(xpath_text(video_data, 'episodeNumber')),
+        }