import re
from .common import InfoExtractor
+from ..compat import (
+ compat_parse_qs,
+ compat_urllib_parse_urlparse,
+)
from ..utils import (
determine_ext,
dict_get,
return info_dict
-class SVTPlayIE(SVTBaseIE):
+class SVTPlayBaseIE(SVTBaseIE):
+ _SVTPLAY_RE = r'root\s*\[\s*(["\'])_*svtplay\1\s*\]\s*=\s*(?P<json>{.+?})\s*;\s*\n'
+
+
+class SVTPlayIE(SVTPlayBaseIE):
IE_DESC = 'SVT Play and Öppet arkiv'
_VALID_URL = r'https?://(?:www\.)?(?:svtplay|oppetarkiv)\.se/(?:video|klipp)/(?P<id>[0-9]+)'
_TESTS = [{
data = self._parse_json(
self._search_regex(
- r'root\["__svtplay"\]\s*=\s*([^;]+);',
- webpage, 'embedded data', default='{}'),
+ self._SVTPLAY_RE, webpage, 'embedded data', default='{}',
+ group='json'),
video_id, fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
return info_dict
-class SVTPlaylistIE(InfoExtractor):
- IE_DESC = 'SVT Play serie'
+class SVTSeriesIE(SVTPlayBaseIE):
_VALID_URL = r'https?://(?:www\.)?svtplay\.se/(?P<id>[^/?&#]+)'
- IE_NAME = 'svtplay:serie'
_TESTS = [{
'url': 'https://www.svtplay.se/rederiet',
'info_dict': {
'description': 'md5:505d491a58f4fcf6eb418ecab947e69e',
},
'playlist_mincount': 318,
+ }, {
+ 'url': 'https://www.svtplay.se/rederiet?tab=sasong2',
+ 'info_dict': {
+ 'id': 'rederiet-sasong2',
+ 'title': 'Rederiet - Säsong 2',
+ 'description': 'md5:505d491a58f4fcf6eb418ecab947e69e',
+ },
+ 'playlist_count': 12,
}]
@classmethod
def suitable(cls, url):
- return False if SVTIE.suitable(url) or SVTPlayIE.suitable(url) else super(SVTPlaylistIE, cls).suitable(url)
+ return False if SVTIE.suitable(url) or SVTPlayIE.suitable(url) else super(SVTSeriesIE, cls).suitable(url)
def _real_extract(self, url):
- video_id = self._match_id(url)
+ series_id = self._match_id(url)
- page = self._download_webpage(
- url, video_id,
- note='Downloading serie page',
- errnote='unable to fetch serie page')
+ qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+ season_slug = qs.get('tab', [None])[0]
- root_json = self._search_regex(
- r'root\[\'__svtplay\'\]\s*=(.+);\n',
- page, 'root')
- root = self._parse_json(root_json, video_id)
+ if season_slug:
+ series_id += '-%s' % season_slug
- metadata = root.get('metaData', {})
- related_videos_accordion = root['relatedVideoContent']['relatedVideosAccordion']
+ webpage = self._download_webpage(
+ url, series_id, 'Downloading series page')
+
+ root = self._parse_json(
+ self._search_regex(
+ self._SVTPLAY_RE, webpage, 'content', group='json'),
+ series_id)
+
+ season_name = None
entries = []
- for season in related_videos_accordion:
+ for season in root['relatedVideoContent']['relatedVideosAccordion']:
+ if not isinstance(season, dict):
+ continue
+ if season_slug:
+ if season.get('slug') != season_slug:
+ continue
+ season_name = season.get('name')
videos = season.get('videos')
if not isinstance(videos, list):
continue
-
for video in videos:
content_url = video.get('contentUrl')
- if not isinstance(content_url, compat_str):
+ if not content_url or not isinstance(content_url, compat_str):
continue
entries.append(
self.url_result(
video_title=video.get('title')
))
+ metadata = root.get('metaData')
+ if not isinstance(metadata, dict):
+ metadata = {}
+
+ title = metadata.get('title')
+ season_name = season_name or season_slug
+
+ if title and season_name:
+ title = '%s - %s' % (title, season_name)
+ elif season_slug:
+ title = season_slug
+
return self.playlist_result(
- entries, video_id, metadata.get('title'), metadata.get('description'))
+ entries, series_id, title, metadata.get('description'))