X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=youtube_dl%2Fextractor%2Flibraryofcongress.py;h=40295a30b51f733b637c651cc8a434ede14f517a;hb=ec85ded83cbfa652ba94cb080aab52d8b270212a;hp=d311f994624d349d6c3e089753c9f01898bd6ec2;hpb=7f3c3dfa52769d1f44c1f1031449118c564a92bf;p=youtube-dl diff --git a/youtube_dl/extractor/libraryofcongress.py b/youtube_dl/extractor/libraryofcongress.py index d311f9946..40295a30b 100644 --- a/youtube_dl/extractor/libraryofcongress.py +++ b/youtube_dl/extractor/libraryofcongress.py @@ -1,31 +1,63 @@ # coding: utf-8 from __future__ import unicode_literals +import re + from .common import InfoExtractor from ..utils import ( determine_ext, float_or_none, int_or_none, + parse_filesize, ) class LibraryOfCongressIE(InfoExtractor): IE_NAME = 'loc' IE_DESC = 'Library of Congress' - _VALID_URL = r'https?://(?:www\.)?loc\.gov/item/(?P[0-9]+)' - _TEST = { + _VALID_URL = r'https?://(?:www\.)?loc\.gov/(?:item/|today/cyberlc/feature_wdesc\.php\?.*\brec=)(?P[0-9]+)' + _TESTS = [{ + # embedded via
.+?)\1', r']+id=(["\'])uuid-(?P.+?)\1', - r']+data-uuid=(["\'])(?P.+?)\1'), + r']+data-uuid=(["\'])(?P.+?)\1', + r'mediaObjectId\s*:\s*(["\'])(?P.+?)\1'), webpage, 'media id', group='id') - data = self._parse_json( - self._download_webpage( - 'https://media.loc.gov/services/v1/media?id=%s&context=json' % media_id, - video_id), + data = self._download_json( + 'https://media.loc.gov/services/v1/media?id=%s&context=json' % media_id, video_id)['mediaObject'] derivative = data['derivatives'][0] media_url = derivative['derivativeUrl'] + title = derivative.get('shortName') or data.get('shortName') or self._og_search_title( + webpage) + # Following algorithm was extracted from setAVSource js function # found in webpage media_url = media_url.replace('rtmp', 'https') @@ -61,6 +95,7 @@ class LibraryOfCongressIE(InfoExtractor): 'format_id': 'hls', 'ext': 'mp4', 'protocol': 'm3u8_native', + 'quality': 1, }] elif 'vod/mp3:' in media_url: formats = [{ @@ -68,17 +103,41 @@ class LibraryOfCongressIE(InfoExtractor): 'vcodec': 'none', }] + download_urls = set() + for m in re.finditer( + r']+value=(["\'])(?P.+?)\1[^>]+data-file-download=[^>]+>\s*(?P.+?)(?:(?: |\s+)\((?P.+?)\))?\s*<', webpage): + format_id = m.group('id').lower() + if format_id == 'gif': + continue + download_url = m.group('url') + if download_url in download_urls: + continue + download_urls.add(download_url) + formats.append({ + 'url': download_url, + 'format_id': format_id, + 'filesize_approx': parse_filesize(m.group('size')), + }) + self._sort_formats(formats) - title = derivative.get('shortName') or data.get('shortName') or self._og_search_title(webpage) duration = float_or_none(data.get('duration')) view_count = int_or_none(data.get('viewCount')) + subtitles = {} + cc_url = data.get('ccUrl') + if cc_url: + subtitles.setdefault('en', []).append({ + 'url': cc_url, + 'ext': 'ttml', + }) + return { 'id': video_id, 'title': title, - 'thumbnail': self._og_search_thumbnail(webpage), + 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'duration': duration, 'view_count': view_count, 'formats': formats, + 'subtitles': subtitles, }