Merge remote-tracking branch 'Dineshs91/f4m-2.0'
[youtube-dl] / youtube_dl / extractor / npo.py
index 7a154e94abfb8c40093705111a25f38e3ce9016e..8da76ae45aa3ec012f51d0a3d832f56bd0efe77f 100644 (file)
@@ -7,6 +7,9 @@ from ..utils import (
     unified_strdate,
     parse_duration,
     qualities,
+    strip_jsonp,
+    url_basename,
+    fix_xml_ampersands,
 )
 
 
@@ -49,18 +52,34 @@ class NPOIE(InfoExtractor):
                 'upload_date': '20130225',
                 'duration': 3000,
             },
-        }
+        },
+        {
+            'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706',
+            'info_dict': {
+                'id': 'WO_VPRO_043706',
+                'ext': 'wmv',
+                'title': 'De nieuwe mens - Deel 1',
+                'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b',
+                'duration': 4680,
+            },
+            'params': {
+                # mplayer mms download
+                'skip_download': True,
+            }
+        },
     ]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
+        return self._get_info(video_id)
 
+    def _get_info(self, video_id):
         metadata = self._download_json(
             'http://e.omroep.nl/metadata/aflevering/%s' % video_id,
             video_id,
             # We have to remove the javascript callback
-            transform_source=lambda j: re.sub(r'parseMetadata\((.*?)\);\n//.*$', r'\1', j)
+            transform_source=strip_jsonp,
         )
         token_page = self._download_webpage(
             'http://ida.omroep.nl/npoplayer/i.js',
@@ -70,31 +89,58 @@ class NPOIE(InfoExtractor):
         token = self._search_regex(r'npoplayer\.token = "(.+?)"', token_page, 'token')
 
         formats = []
-        quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std'])
-        for format_id in metadata['pubopties']:
-            format_info = self._download_json(
-                'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s' % (video_id, format_id, token),
-                video_id, 'Downloading %s JSON' % format_id)
-            if format_info.get('error_code', 0) or format_info.get('errorcode', 0):
-                continue
-            streams = format_info.get('streams')
-            if streams:
-                video_info = self._download_json(
-                    streams[0] + '&type=json',
-                    video_id, 'Downloading %s stream JSON' % format_id)
-            else:
-                video_info = format_info
-            video_url = video_info.get('url')
-            if not video_url:
-                continue
-            if format_id == 'adaptive':
-                formats.extend(self._extract_m3u8_formats(video_url, video_id))
-            else:
+
+        pubopties = metadata.get('pubopties')
+        if pubopties:
+            quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std'])
+            for format_id in pubopties:
+                format_info = self._download_json(
+                    'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s'
+                    % (video_id, format_id, token),
+                    video_id, 'Downloading %s JSON' % format_id)
+                if format_info.get('error_code', 0) or format_info.get('errorcode', 0):
+                    continue
+                streams = format_info.get('streams')
+                if streams:
+                    video_info = self._download_json(
+                        streams[0] + '&type=json',
+                        video_id, 'Downloading %s stream JSON' % format_id)
+                else:
+                    video_info = format_info
+                video_url = video_info.get('url')
+                if not video_url:
+                    continue
+                if format_id == 'adaptive':
+                    formats.extend(self._extract_m3u8_formats(video_url, video_id))
+                else:
+                    formats.append({
+                        'url': video_url,
+                        'format_id': format_id,
+                        'quality': quality(format_id),
+                    })
+
+        streams = metadata.get('streams')
+        if streams:
+            for i, stream in enumerate(streams):
+                stream_url = stream.get('url')
+                if not stream_url:
+                    continue
+                asx = self._download_xml(
+                    stream_url, video_id,
+                    'Downloading stream %d ASX playlist' % i,
+                    transform_source=fix_xml_ampersands)
+                ref = asx.find('./ENTRY/Ref')
+                if ref is None:
+                    continue
+                video_url = ref.get('href')
+                if not video_url:
+                    continue
                 formats.append({
                     'url': video_url,
-                    'format_id': format_id,
-                    'quality': quality(format_id),
+                    'ext': stream.get('formaat', 'asf'),
+                    'quality': stream.get('kwaliteit'),
                 })
+
         self._sort_formats(formats)
 
         return {
@@ -106,3 +152,30 @@ class NPOIE(InfoExtractor):
             'duration': parse_duration(metadata.get('tijdsduur')),
             'formats': formats,
         }
+
+
+class TegenlichtVproIE(NPOIE):
+    IE_NAME = 'tegenlicht.vpro.nl'
+    _VALID_URL = r'https?://tegenlicht\.vpro\.nl/afleveringen/.*?'
+
+    _TESTS = [
+        {
+            'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html',
+            'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
+            'info_dict': {
+                'id': 'VPWON_1169289',
+                'ext': 'm4v',
+                'title': 'Tegenlicht',
+                'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1',
+                'upload_date': '20130225',
+            },
+        },
+    ]
+
+    def _real_extract(self, url):
+        name = url_basename(url)
+        webpage = self._download_webpage(url, name)
+        urn = self._html_search_meta('mediaurn', webpage)
+        info_page = self._download_json(
+            'http://rs.vpro.nl/v2/api/media/%s.json' % urn, name)
+        return self._get_info(info_page['mid'])