Refactor fragments interface and dash segments downloader
[youtube-dl] / youtube_dl / extractor / archiveorg.py
index a8394bfb0d35487d4a0aa038de05805b074b64ca..486dff82d00a44a13384e3b7d8ff1b0189da8451 100644 (file)
@@ -1,68 +1,65 @@
-import json
-import re
+from __future__ import unicode_literals
 
-from .common import InfoExtractor
+from .jwplatform import JWPlatformBaseIE
 from ..utils import (
-    determine_ext,
     unified_strdate,
+    clean_html,
 )
 
 
-class ArchiveOrgIE(InfoExtractor):
+class ArchiveOrgIE(JWPlatformBaseIE):
     IE_NAME = 'archive.org'
     IE_DESC = 'archive.org videos'
-    _VALID_URL = r'(?:https?://)?(?:www\.)?archive\.org/details/(?P<id>[^?/]+)(?:[?].*)?$'
-    _TEST = {
-        u"url": u"http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect",
-        u'file': u'XD300-23_68HighlightsAResearchCntAugHumanIntellect.ogv',
-        u'md5': u'8af1d4cf447933ed3c7f4871162602db',
-        u'info_dict': {
-            u"title": u"1968 Demo - FJCC Conference Presentation Reel #1",
-            u"description": u"Reel 1 of 3: Also known as the \"Mother of All Demos\", Doug Engelbart's presentation at the Fall Joint Computer Conference in San Francisco, December 9, 1968 titled \"A Research Center for Augmenting Human Intellect.\" For this presentation, Doug and his team astonished the audience by not only relating their research, but demonstrating it live. This was the debut of the mouse, interactive computing, hypermedia, computer supported software engineering, video teleconferencing, etc. See also <a href=\"http://dougengelbart.org/firsts/dougs-1968-demo.html\" rel=\"nofollow\">Doug's 1968 Demo page</a> for more background, highlights, links, and the detailed paper published in this conference proceedings. Filmed on 3 reels: Reel 1 | <a href=\"http://www.archive.org/details/XD300-24_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 2</a> | <a href=\"http://www.archive.org/details/XD300-25_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 3</a>",
-            u"upload_date": u"19681210",
-            u"uploader": u"SRI International"
+    _VALID_URL = r'https?://(?:www\.)?archive\.org/(?:details|embed)/(?P<id>[^/?#]+)(?:[?].*)?$'
+    _TESTS = [{
+        'url': 'http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
+        'md5': '8af1d4cf447933ed3c7f4871162602db',
+        'info_dict': {
+            'id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect',
+            'ext': 'ogg',
+            'title': '1968 Demo - FJCC Conference Presentation Reel #1',
+            'description': 'md5:da45c349df039f1cc8075268eb1b5c25',
+            'upload_date': '19681210',
+            'uploader': 'SRI International'
         }
-    }
-
+    }, {
+        'url': 'https://archive.org/details/Cops1922',
+        'md5': 'bc73c8ab3838b5a8fc6c6651fa7b58ba',
+        'info_dict': {
+            'id': 'Cops1922',
+            'ext': 'mp4',
+            'title': 'Buster Keaton\'s "Cops" (1922)',
+            'description': 'md5:b4544662605877edd99df22f9620d858',
+        }
+    }, {
+        'url': 'http://archive.org/embed/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
-        json_url = url + (u'?' if u'?' in url else '&') + u'output=json'
-        json_data = self._download_webpage(json_url, video_id)
-        data = json.loads(json_data)
-
-        title = data['metadata']['title'][0]
-        description = data['metadata']['description'][0]
-        uploader = data['metadata']['creator'][0]
-        upload_date = unified_strdate(data['metadata']['date'][0])
-
-        formats = [{
-                'format': fdata['format'],
-                'url': 'http://' + data['server'] + data['dir'] + fn,
-                'file_size': int(fdata['size']),
-            }
-            for fn,fdata in data['files'].items()
-            if 'Video' in fdata['format']]
-        formats.sort(key=lambda fdata: fdata['file_size'])
-        for f in formats:
-            f['ext'] = determine_ext(f['url'])
-
-        info = {
-            '_type': 'video',
-            'id': video_id,
-            'title': title,
-            'formats': formats,
-            'description': description,
-            'uploader': uploader,
-            'upload_date': upload_date,
-        }
-        thumbnail = data.get('misc', {}).get('image')
-        if thumbnail:
-            info['thumbnail'] = thumbnail
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(
+            'http://archive.org/embed/' + video_id, video_id)
+        jwplayer_playlist = self._parse_json(self._search_regex(
+            r"(?s)Play\('[^']+'\s*,\s*(\[.+\])\s*,\s*{.*?}\);",
+            webpage, 'jwplayer playlist'), video_id)
+        info = self._parse_jwplayer_data(
+            {'playlist': jwplayer_playlist}, video_id, base_url=url)
 
-        # TODO: Remove when #980 has been merged
-        info.update(formats[-1])
+        def get_optional(metadata, field):
+            return metadata.get(field, [None])[0]
 
+        metadata = self._download_json(
+            'http://archive.org/details/' + video_id, video_id, query={
+                'output': 'json',
+            })['metadata']
+        info.update({
+            'title': get_optional(metadata, 'title') or info.get('title'),
+            'description': clean_html(get_optional(metadata, 'description')),
+        })
+        if info.get('_type') != 'playlist':
+            info.update({
+                'uploader': get_optional(metadata, 'creator'),
+                'upload_date': unified_strdate(get_optional(metadata, 'date')),
+            })
         return info