from __future__ import unicode_literals
import base64
+import functools
import json
import re
import itertools
from .common import InfoExtractor
from ..compat import (
+ compat_kwargs,
compat_HTTPError,
compat_str,
compat_urlparse,
determine_ext,
ExtractorError,
js_to_json,
- InAdvancePagedList,
int_or_none,
merge_dicts,
NO_DEFAULT,
+ OnDemandPagedList,
parse_filesize,
- qualities,
RegexNotFoundError,
sanitized_Request,
smuggle_url,
webpage, 'vuid', group='vuid')
return xsrft, vuid
+ def _extract_vimeo_config(self, webpage, video_id, *args, **kwargs):
+ vimeo_config = self._search_regex(
+ r'vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));',
+ webpage, 'vimeo config', *args, **compat_kwargs(kwargs))
+ if vimeo_config:
+ return self._parse_json(vimeo_config, video_id)
+
def _set_vimeo_cookie(self, name, value):
self._set_cookie('vimeo.com', name, value)
def _parse_config(self, config, video_id):
video_data = config['video']
video_title = video_data['title']
- is_live = try_get(video_data, lambda x: x['live_event']['status']) == 'started'
+ live_event = video_data.get('live_event') or {}
+ is_live = live_event.get('status') == 'started'
formats = []
config_files = video_data.get('files') or config['request'].get('files', {})
'tbr': int_or_none(f.get('bitrate')),
})
+ # TODO: fix handling of 308 status code returned for live archive manifest requests
for files_type in ('hls', 'dash'):
for cdn_name, cdn_data in config_files.get(files_type, {}).get('cdns', {}).items():
manifest_url = cdn_data.get('url')
f['preference'] = -40
formats.extend(mpd_formats)
+ live_archive = live_event.get('archive') or {}
+ live_archive_source_url = live_archive.get('source_url')
+ if live_archive_source_url and live_archive.get('status') == 'done':
+ formats.append({
+ 'format_id': 'live-archive-source',
+ 'url': live_archive_source_url,
+ 'preference': 1,
+ })
+
subtitles = {}
text_tracks = config['request'].get('text_tracks')
if text_tracks:
video_uploader_url = owner.get('url')
return {
+ 'id': video_id,
'title': self._live_title(video_title) if is_live else video_title,
'uploader': owner.get('name'),
'uploader_id': video_uploader_url.split('/')[-1] if video_uploader_url else None,
\.
)?
vimeo(?P<pro>pro)?\.com/
- (?!(?:channels|album)/[^/?#]+/?(?:$|[?#])|[^/]+/review/|ondemand/)
+ (?!(?:channels|album|showcase)/[^/?#]+/?(?:$|[?#])|[^/]+/review/|ondemand/)
(?:.*?/)?
(?:
(?:
# and latter we extract those that are Vimeo specific.
self.report_extraction(video_id)
- vimeo_config = self._search_regex(
- r'vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));', webpage,
- 'vimeo config', default=None)
+ vimeo_config = self._extract_vimeo_config(webpage, video_id, default=None)
if vimeo_config:
- seed_status = self._parse_json(vimeo_config, video_id).get('seed_status', {})
+ seed_status = vimeo_config.get('seed_status', {})
if seed_status.get('state') == 'failed':
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, seed_status['title']),
channel_url = 'https://vimeo.com/channels/%s' % channel_id if channel_id else None
info_dict = {
- 'id': video_id,
'formats': formats,
'timestamp': unified_timestamp(timestamp),
'description': video_description,
return '%s/videos/page:%d/' % (base_url, pagenum)
def _extract_list_title(self, webpage):
- return self._TITLE or self._html_search_regex(self._TITLE_RE, webpage, 'list title')
+ return self._TITLE or self._html_search_regex(
+ self._TITLE_RE, webpage, 'list title', fatal=False)
def _login_list_password(self, page_url, list_id, webpage):
login_form = self._search_regex(
class VimeoAlbumIE(VimeoChannelIE):
IE_NAME = 'vimeo:album'
- _VALID_URL = r'https://vimeo\.com/album/(?P<id>\d+)(?:$|[?#]|/(?!video))'
+ _VALID_URL = r'https://vimeo\.com/(?:album|showcase)/(?P<id>\d+)(?:$|[?#]|/(?!video))'
_TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
_TESTS = [{
'url': 'https://vimeo.com/album/2632481',
'params': {
'videopassword': 'youtube-dl',
}
- }, {
- 'url': 'https://vimeo.com/album/2632481/sort:plays/format:thumbnail',
- 'only_matching': True,
- }, {
- # TODO: respect page number
- 'url': 'https://vimeo.com/album/2632481/page:2/sort:plays/format:thumbnail',
- 'only_matching': True,
}]
-
- def _page_url(self, base_url, pagenum):
- return '%s/page:%d/' % (base_url, pagenum)
+ _PAGE_SIZE = 100
+
+ def _fetch_page(self, album_id, authorizaion, hashed_pass, page):
+ api_page = page + 1
+ query = {
+ 'fields': 'link,uri',
+ 'page': api_page,
+ 'per_page': self._PAGE_SIZE,
+ }
+ if hashed_pass:
+ query['_hashed_pass'] = hashed_pass
+ videos = self._download_json(
+ 'https://api.vimeo.com/albums/%s/videos' % album_id,
+ album_id, 'Downloading page %d' % api_page, query=query, headers={
+ 'Authorization': 'jwt ' + authorizaion,
+ })['data']
+ for video in videos:
+ link = video.get('link')
+ if not link:
+ continue
+ uri = video.get('uri')
+ video_id = self._search_regex(r'/videos/(\d+)', uri, 'video_id', default=None) if uri else None
+ yield self.url_result(link, VimeoIE.ie_key(), video_id)
def _real_extract(self, url):
album_id = self._match_id(url)
- return self._extract_videos(album_id, 'https://vimeo.com/album/%s' % album_id)
+ webpage = self._download_webpage(url, album_id)
+ webpage = self._login_list_password(url, album_id, webpage)
+ api_config = self._extract_vimeo_config(webpage, album_id)['api']
+ entries = OnDemandPagedList(functools.partial(
+ self._fetch_page, album_id, api_config['jwt'],
+ api_config.get('hashed_pass')), self._PAGE_SIZE)
+ return self.playlist_result(entries, album_id, self._html_search_regex(
+ r'<title>\s*(.+?)(?:\s+on Vimeo)?</title>', webpage, 'title', fatal=False))
class VimeoGroupsIE(VimeoAlbumIE):
}]
def _extract_list_title(self, webpage):
- return self._og_search_title(webpage)
+ return self._og_search_title(webpage, fatal=False)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if source_format:
info_dict['formats'].append(source_format)
self._vimeo_sort_formats(info_dict['formats'])
- info_dict['id'] = video_id
return info_dict
return self._extract_videos('watchlater', 'https://vimeo.com/watchlater')
-class VimeoLikesIE(InfoExtractor):
+class VimeoLikesIE(VimeoChannelIE):
_VALID_URL = r'https://(?:www\.)?vimeo\.com/(?P<id>[^/]+)/likes/?(?:$|[?#]|sort:)'
IE_NAME = 'vimeo:likes'
IE_DESC = 'Vimeo user likes'
'url': 'https://vimeo.com/user755559/likes/',
'playlist_mincount': 293,
'info_dict': {
- 'id': 'user755559_likes',
- 'description': 'See all the videos urza likes',
- 'title': 'Videos urza likes',
+ 'id': 'user755559',
+ 'title': 'urza’s Likes',
},
}, {
'url': 'https://vimeo.com/stormlapse/likes',
'only_matching': True,
}]
+ def _page_url(self, base_url, pagenum):
+ return '%s/page:%d/' % (base_url, pagenum)
+
def _real_extract(self, url):
user_id = self._match_id(url)
- webpage = self._download_webpage(url, user_id)
- page_count = self._int(
- self._search_regex(
- r'''(?x)<li><a\s+href="[^"]+"\s+data-page="([0-9]+)">
- .*?</a></li>\s*<li\s+class="pagination_next">
- ''', webpage, 'page count', default=1),
- 'page count', fatal=True)
- PAGE_SIZE = 12
- title = self._html_search_regex(
- r'(?s)<h1>(.+?)</h1>', webpage, 'title', fatal=False)
- description = self._html_search_meta('description', webpage)
-
- def _get_page(idx):
- page_url = 'https://vimeo.com/%s/likes/page:%d/sort:date' % (
- user_id, idx + 1)
- webpage = self._download_webpage(
- page_url, user_id,
- note='Downloading page %d/%d' % (idx + 1, page_count))
- video_list = self._search_regex(
- r'(?s)<ol class="js-browse_list[^"]+"[^>]*>(.*?)</ol>',
- webpage, 'video content')
- paths = re.findall(
- r'<li[^>]*>\s*<a\s+href="([^"]+)"', video_list)
- for path in paths:
- yield {
- '_type': 'url',
- 'url': compat_urlparse.urljoin(page_url, path),
- }
-
- pl = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
-
- return {
- '_type': 'playlist',
- 'id': '%s_likes' % user_id,
- 'title': title,
- 'description': description,
- 'entries': pl,
- }
+ return self._extract_videos(user_id, 'https://vimeo.com/%s/likes' % user_id)
-class VHXEmbedIE(InfoExtractor):
+class VHXEmbedIE(VimeoBaseInfoExtractor):
IE_NAME = 'vhx:embed'
_VALID_URL = r'https?://embed\.vhx\.tv/videos/(?P<id>\d+)'
- def _call_api(self, video_id, access_token, path='', query=None):
- return self._download_json(
- 'https://api.vhx.tv/videos/' + video_id + path, video_id, headers={
- 'Authorization': 'Bearer ' + access_token,
- }, query=query)
-
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- credentials = self._parse_json(self._search_regex(
- r'(?s)credentials\s*:\s*({.+?}),', webpage,
- 'config'), video_id, js_to_json)
- access_token = credentials['access_token']
-
- query = {}
- for k, v in credentials.items():
- if k in ('authorization', 'authUserToken', 'ticket') and v and v != 'undefined':
- if k == 'authUserToken':
- query['auth_user_token'] = v
- else:
- query[k] = v
- files = self._call_api(video_id, access_token, '/files', query)
-
- formats = []
- for f in files:
- href = try_get(f, lambda x: x['_links']['source']['href'])
- if not href:
- continue
- method = f.get('method')
- if method == 'hls':
- formats.extend(self._extract_m3u8_formats(
- href, video_id, 'mp4', 'm3u8_native',
- m3u8_id='hls', fatal=False))
- elif method == 'dash':
- formats.extend(self._extract_mpd_formats(
- href, video_id, mpd_id='dash', fatal=False))
- else:
- fmt = {
- 'filesize': int_or_none(try_get(f, lambda x: x['size']['bytes'])),
- 'format_id': 'http',
- 'preference': 1,
- 'url': href,
- 'vcodec': f.get('codec'),
- }
- quality = f.get('quality')
- if quality:
- fmt.update({
- 'format_id': 'http-' + quality,
- 'height': int_or_none(self._search_regex(r'(\d+)p', quality, 'height', default=None)),
- })
- formats.append(fmt)
- self._sort_formats(formats)
-
- video_data = self._call_api(video_id, access_token)
- title = video_data.get('title') or video_data['name']
-
- subtitles = {}
- for subtitle in try_get(video_data, lambda x: x['tracks']['subtitles'], list) or []:
- lang = subtitle.get('srclang') or subtitle.get('label')
- for _link in subtitle.get('_links', {}).values():
- href = _link.get('href')
- if not href:
- continue
- subtitles.setdefault(lang, []).append({
- 'url': href,
- })
-
- q = qualities(['small', 'medium', 'large', 'source'])
- thumbnails = []
- for thumbnail_id, thumbnail_url in video_data.get('thumbnail', {}).items():
- thumbnails.append({
- 'id': thumbnail_id,
- 'url': thumbnail_url,
- 'preference': q(thumbnail_id),
- })
-
- return {
- 'id': video_id,
- 'title': title,
- 'description': video_data.get('description'),
- 'duration': int_or_none(try_get(video_data, lambda x: x['duration']['seconds'])),
- 'formats': formats,
- 'subtitles': subtitles,
- 'thumbnails': thumbnails,
- 'timestamp': unified_timestamp(video_data.get('created_at')),
- 'view_count': int_or_none(video_data.get('plays_count')),
- }
+ config_url = self._parse_json(self._search_regex(
+ r'window\.OTTData\s*=\s*({.+})', webpage,
+ 'ott data'), video_id, js_to_json)['config_url']
+ config = self._download_json(config_url, video_id)
+ info = self._parse_config(config, video_id)
+ self._vimeo_sort_formats(info['formats'])
+ return info