- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
- page = self._download_webpage(url, video_id, 'Downloading page')
-
- def extract(patterns, name, page, fatal=False):
- for pattern in patterns:
- mobj = re.search(pattern, page)
- if mobj:
- return clean_html(mobj.group(1))
- if fatal:
- raise RegexNotFoundError(u'Unable to extract %s' % name)
- return None
-
- clip_id = extract(self._CLIPID_REGEXES, 'clip id', page, fatal=True)
-
- access_token = 'testclient'
- client_name = 'kolibri-1.2.5'
- client_location = url
-
- videos_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos?%s' % compat_urllib_parse.urlencode({
- 'access_token': access_token,
- 'client_location': client_location,
- 'client_name': client_name,
- 'ids': clip_id,
- })
-
- videos = self._download_json(videos_api_url, clip_id, 'Downloading videos JSON')
-
- duration = float(videos[0]['duration'])
- source_ids = [source['id'] for source in videos[0]['sources']]
- source_ids_str = ','.join(map(str, source_ids))
-
- g = '01!8d8F_)r9]4s[qeuXfP%'
-
- client_id = g[:2] + sha1(''.join([clip_id, g, access_token, client_location, g, client_name])
- .encode('utf-8')).hexdigest()
-
- sources_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources?%s' % (clip_id, compat_urllib_parse.urlencode({
- 'access_token': access_token,
- 'client_id': client_id,
- 'client_location': client_location,
- 'client_name': client_name,
- }))
-
- sources = self._download_json(sources_api_url, clip_id, 'Downloading sources JSON')
- server_id = sources['server_id']
-
- client_id = g[:2] + sha1(''.join([g, clip_id, access_token, server_id,
- client_location, source_ids_str, g, client_name])
- .encode('utf-8')).hexdigest()
-
- url_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources/url?%s' % (clip_id, compat_urllib_parse.urlencode({
- 'access_token': access_token,
- 'client_id': client_id,
- 'client_location': client_location,
- 'client_name': client_name,
- 'server_id': server_id,
- 'source_ids': source_ids_str,
- }))
-
- urls = self._download_json(url_api_url, clip_id, 'Downloading urls JSON')
-
- title = extract(self._TITLE_REGEXES, 'title', page, fatal=True)
- description = extract(self._DESCRIPTION_REGEXES, 'description', page)
- thumbnail = self._og_search_thumbnail(page)
-
- upload_date = extract(self._UPLOAD_DATE_REGEXES, 'upload date', page)
- if upload_date:
- upload_date = unified_strdate(upload_date)
-
- formats = []
-
- urls_sources = urls['sources']
- if isinstance(urls_sources, dict):
- urls_sources = urls_sources.values()
-
- def fix_bitrate(bitrate):
- return bitrate / 1000 if bitrate % 1000 == 0 else bitrate
-
- for source in urls_sources:
- if source['protocol'] == 'rtmp':
- mobj = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', source['url'])
- if not mobj:
- continue
- formats.append({
- 'url': mobj.group('url'),
- 'app': mobj.group('app'),
- 'play_path': mobj.group('playpath'),
- 'player_url': 'http://livepassdl.conviva.com/hf/ver/2.79.0.17083/LivePassModuleMain.swf',
- 'page_url': 'http://www.prosieben.de',
- 'vbr': fix_bitrate(source['bitrate']),
- 'ext': 'mp4',
- 'format_id': '%s_%s' % (source['cdn'], source['bitrate']),
- })
- else:
- formats.append({
- 'url': source['url'],
- 'vbr': fix_bitrate(sources['bitrate']),
- })
-
- self._sort_formats(formats)