Merge branch 'vgtv' of https://github.com/mrkolby/youtube-dl into mrkolby-vgtv
[youtube-dl] / youtube_dl / extractor / livestream.py
index 444771bd38ec1c8056493e1bbceeef0ff7799772..5161474171b2a6a53389477275f54480a02d1240 100644 (file)
@@ -19,7 +19,7 @@ from ..utils import (
 class LivestreamIE(InfoExtractor):
     IE_NAME = 'livestream'
     _VALID_URL = r'http://new\.livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>\d+))?/?$'
-    _TEST = {
+    _TESTS = [{
         'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',
         'md5': '53274c76ba7754fb0e8d072716f2292b',
         'info_dict': {
@@ -31,7 +31,56 @@ class LivestreamIE(InfoExtractor):
             'view_count': int,
             'thumbnail': 're:^http://.*\.jpg$'
         }
-    }
+    }, {
+        'url': 'http://new.livestream.com/tedx/cityenglish',
+        'info_dict': {
+            'title': 'TEDCity2.0 (English)',
+        },
+        'playlist_mincount': 4,
+    }]
+
+    def _parse_smil(self, video_id, smil_url):
+        formats = []
+        _SWITCH_XPATH = (
+            './/{http://www.w3.org/2001/SMIL20/Language}body/'
+            '{http://www.w3.org/2001/SMIL20/Language}switch')
+        smil_doc = self._download_xml(
+            smil_url, video_id,
+            note='Downloading SMIL information',
+            errnote='Unable to download SMIL information',
+            fatal=False)
+        if smil_doc is False:  # Download failed
+            return formats
+        title_node = find_xpath_attr(
+            smil_doc, './/{http://www.w3.org/2001/SMIL20/Language}meta',
+            'name', 'title')
+        if title_node is None:
+            self.report_warning('Cannot find SMIL id')
+            switch_node = smil_doc.find(_SWITCH_XPATH)
+        else:
+            title_id = title_node.attrib['content']
+            switch_node = find_xpath_attr(
+                smil_doc, _SWITCH_XPATH, 'id', title_id)
+        if switch_node is None:
+            raise ExtractorError('Cannot find switch node')
+        video_nodes = switch_node.findall(
+            '{http://www.w3.org/2001/SMIL20/Language}video')
+
+        for vn in video_nodes:
+            tbr = int_or_none(vn.attrib.get('system-bitrate'))
+            furl = (
+                'http://livestream-f.akamaihd.net/%s?v=3.0.3&fp=WIN%%2014,0,0,145' %
+                (vn.attrib['src']))
+            if 'clipBegin' in vn.attrib:
+                furl += '&ssek=' + vn.attrib['clipBegin']
+            formats.append({
+                'url': furl,
+                'format_id': 'smil_%d' % tbr,
+                'ext': 'flv',
+                'tbr': tbr,
+                'preference': -1000,
+            })
+        return formats
 
     def _extract_video_info(self, video_data):
         video_id = compat_str(video_data['id'])
@@ -49,39 +98,7 @@ class LivestreamIE(InfoExtractor):
 
         smil_url = video_data.get('smil_url')
         if smil_url:
-            _SWITCH_XPATH = (
-                './/{http://www.w3.org/2001/SMIL20/Language}body/'
-                '{http://www.w3.org/2001/SMIL20/Language}switch')
-            smil_doc = self._download_xml(
-                smil_url, video_id, note='Downloading SMIL information')
-
-            title_node = find_xpath_attr(
-                smil_doc, './/{http://www.w3.org/2001/SMIL20/Language}meta',
-                'name', 'title')
-            if title_node is None:
-                self.report_warning('Cannot find SMIL id')
-                switch_node = smil_doc.find(_SWITCH_XPATH)
-            else:
-                title_id = title_node.attrib['content']
-                switch_node = find_xpath_attr(
-                    smil_doc, _SWITCH_XPATH, 'id', title_id)
-            if switch_node is None:
-                raise ExtractorError('Cannot find switch node')
-            video_nodes = switch_node.findall(
-                '{http://www.w3.org/2001/SMIL20/Language}video')
-
-            for vn in video_nodes:
-                tbr = int_or_none(vn.attrib.get('system-bitrate'))
-                furl = (
-                    'http://livestream-f.akamaihd.net/%s?v=3.0.3&fp=WIN%%2014,0,0,145&seek=%s' %
-                    (vn.attrib['src'], vn.attrib['clipBegin']))
-                formats.append({
-                    'url': furl,
-                    'format_id': 'smil_%d' % tbr,
-                    'ext': 'flv',
-                    'tbr': tbr,
-                    'preference': -1000,
-                })
+            formats.extend(self._parse_smil(video_id, smil_url))
         self._sort_formats(formats)
 
         return {
@@ -100,23 +117,37 @@ class LivestreamIE(InfoExtractor):
         event_name = mobj.group('event_name')
         webpage = self._download_webpage(url, video_id or event_name)
 
+        og_video = self._og_search_video_url(
+            webpage, 'player url', fatal=False, default=None)
+        if og_video is not None:
+            query_str = compat_urllib_parse_urlparse(og_video).query
+            query = compat_urlparse.parse_qs(query_str)
+            if 'play_url' in query:
+                api_url = query['play_url'][0].replace('.smil', '')
+                info = json.loads(self._download_webpage(
+                    api_url, video_id, 'Downloading video info'))
+                return self._extract_video_info(info)
+
+        config_json = self._search_regex(
+            r'window.config = ({.*?});', webpage, 'window config')
+        info = json.loads(config_json)['event']
+
+        def is_relevant(vdata, vid):
+            result = vdata['type'] == 'video'
+            if video_id is not None:
+                result = result and compat_str(vdata['data']['id']) == vid
+            return result
+
+        videos = [self._extract_video_info(video_data['data'])
+                  for video_data in info['feed']['data']
+                  if is_relevant(video_data, video_id)]
         if video_id is None:
             # This is an event page:
-            config_json = self._search_regex(
-                r'window.config = ({.*?});', webpage, 'window config')
-            info = json.loads(config_json)['event']
-            videos = [self._extract_video_info(video_data['data'])
-                for video_data in info['feed']['data']
-                if video_data['type'] == 'video']
             return self.playlist_result(videos, info['id'], info['full_name'])
         else:
-            og_video = self._og_search_video_url(webpage, 'player url')
-            query_str = compat_urllib_parse_urlparse(og_video).query
-            query = compat_urlparse.parse_qs(query_str)
-            api_url = query['play_url'][0].replace('.smil', '')
-            info = json.loads(self._download_webpage(
-                api_url, video_id, 'Downloading video info'))
-            return self._extract_video_info(info)
+            if not videos:
+                raise ExtractorError('Cannot find video %s' % video_id)
+            return videos[0]
 
 
 # The original version of Livestream uses a different system
@@ -126,7 +157,7 @@ class LivestreamOriginalIE(InfoExtractor):
         (?P<user>[^/]+)/(?P<type>video|folder)
         (?:\?.*?Id=|/)(?P<id>.*?)(&|$)
         '''
-    _TEST = {
+    _TESTS = [{
         'url': 'http://www.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
         'info_dict': {
             'id': 'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
@@ -137,7 +168,13 @@ class LivestreamOriginalIE(InfoExtractor):
             # rtmp
             'skip_download': True,
         },
-    }
+    }, {
+        'url': 'https://www.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3',
+        'info_dict': {
+            'id': 'a07bf706-d0e4-4e75-a747-b021d84f2fd3',
+        },
+        'playlist_mincount': 4,
+    }]
 
     def _extract_video(self, user, video_id):
         api_url = 'http://x{0}x.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id={1}'.format(user, video_id)
@@ -160,15 +197,19 @@ class LivestreamOriginalIE(InfoExtractor):
 
     def _extract_folder(self, url, folder_id):
         webpage = self._download_webpage(url, folder_id)
-        urls = orderedSet(re.findall(r'<a href="(https?://livestre\.am/.*?)"', webpage))
+        paths = orderedSet(re.findall(
+            r'''(?x)(?:
+                <li\s+class="folder">\s*<a\s+href="|
+                <a\s+href="(?=https?://livestre\.am/)
+            )([^"]+)"''', webpage))
 
         return {
             '_type': 'playlist',
             'id': folder_id,
             'entries': [{
                 '_type': 'url',
-                'url': video_url,
-            } for video_url in urls],
+                'url': compat_urlparse.urljoin(url, p),
+            } for p in paths],
         }
 
     def _real_extract(self, url):