X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=youtube_dl%2Fextractor%2Flibraryofcongress.py;h=03f2051444d63bb046b6b5943cc5400085c6d8d3;hb=HEAD;hp=0c34dbce32d6eaa4f08ecbea8c97d9fbd2f4802e;hpb=9c3c447eb389726d98189d972a2d772ef729132d;p=youtube-dl diff --git a/youtube_dl/extractor/libraryofcongress.py b/youtube_dl/extractor/libraryofcongress.py index 0c34dbce3..03f205144 100644 --- a/youtube_dl/extractor/libraryofcongress.py +++ b/youtube_dl/extractor/libraryofcongress.py @@ -1,26 +1,66 @@ # coding: utf-8 from __future__ import unicode_literals +import re + from .common import InfoExtractor -from ..utils import determine_ext +from ..utils import ( + determine_ext, + float_or_none, + int_or_none, + parse_filesize, +) class LibraryOfCongressIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?loc\.gov/item/(?P[0-9]+)' + IE_NAME = 'loc' + IE_DESC = 'Library of Congress' + _VALID_URL = r'https?://(?:www\.)?loc\.gov/(?:item/|today/cyberlc/feature_wdesc\.php\?.*\brec=)(?P[0-9a-z_.]+)' _TESTS = [{ - 'url': 'http://loc.gov/item/90716351/', + # embedded via
.+?)\1', + r']+id=(["\'])uuid-(?P.+?)\1', + r']+data-uuid=(["\'])(?P.+?)\1', + r'mediaObjectId\s*:\s*(["\'])(?P.+?)\1', + r'data-tab="share-media-(?P[0-9A-F]{32})"'), + webpage, 'media id', group='id') + + data = self._download_json( + 'https://media.loc.gov/services/v1/media?id=%s&context=json' % media_id, + media_id)['mediaObject'] + + derivative = data['derivatives'][0] + media_url = derivative['derivativeUrl'] - data = self._parse_json(self._download_webpage( - 'https://media.loc.gov/services/v1/media?id=%s' % json_id, - video_id), video_id) - data = data['mediaObject'] + title = derivative.get('shortName') or data.get('shortName') or self._og_search_title( + webpage) - media_url = data['derivatives'][0]['derivativeUrl'] + # Following algorithm was extracted from setAVSource js function + # found in webpage media_url = media_url.replace('rtmp', 'https') - is_video = data['mediaType'].lower() == 'v' - if not determine_ext(media_url) in ('mp4', 'mp3'): + is_video = data.get('mediaType', 'v').lower() == 'v' + ext = determine_ext(media_url) + if ext not in ('mp4', 'mp3'): media_url += '.mp4' if is_video else '.mp3' - if media_url.index('vod/mp4:') > -1: - media_url = media_url.replace('vod/mp4:', 'hls-vod/media/') + '.m3u8' - elif url.index('vod/mp3:') > -1: - media_url = media_url.replace('vod/mp3:', '') - formats = [] - if determine_ext(media_url) == 'm3u8': - formats = self._extract_m3u8_formats(media_url, video_id, ext='mp4') - elif determine_ext(media_url) is 'mp3': + if '/vod/mp4:' in media_url: formats.append({ - 'url': media_url, - 'ext': 'mp3', + 'url': media_url.replace('/vod/mp4:', '/hls-vod/media/') + '.m3u8', + 'format_id': 'hls', + 'ext': 'mp4', + 'protocol': 'm3u8_native', + 'quality': 1, + }) + http_format = { + 'url': re.sub(r'(://[^/]+/)(?:[^/]+/)*(?:mp4|mp3):', r'\1', media_url), + 'format_id': 'http', + 'quality': 1, + } + if not is_video: + http_format['vcodec'] = 'none' + formats.append(http_format) + + download_urls = set() + for m in re.finditer( + r']+value=(["\'])(?P.+?)\1[^>]+data-file-download=[^>]+>\s*(?P.+?)(?:(?: |\s+)\((?P.+?)\))?\s*<', webpage): + format_id = m.group('id').lower() + if format_id in ('gif', 'jpeg'): + continue + download_url = m.group('url') + if download_url in download_urls: + continue + download_urls.add(download_url) + formats.append({ + 'url': download_url, + 'format_id': format_id, + 'filesize_approx': parse_filesize(m.group('size')), + }) + + self._sort_formats(formats) + + duration = float_or_none(data.get('duration')) + view_count = int_or_none(data.get('viewCount')) + + subtitles = {} + cc_url = data.get('ccUrl') + if cc_url: + subtitles.setdefault('en', []).append({ + 'url': cc_url, + 'ext': 'ttml', }) return { 'id': video_id, - 'thumbnail': self._og_search_thumbnail(webpage), - 'title': self._og_search_title(webpage), + 'title': title, + 'thumbnail': self._og_search_thumbnail(webpage, default=None), + 'duration': duration, + 'view_count': view_count, 'formats': formats, + 'subtitles': subtitles, }