[laola1tv] Fixes for changed site layout.
authorJens Wille <jens.wille@gmail.com>
Tue, 9 Feb 2016 15:25:30 +0000 (16:25 +0100)
committerSergey M․ <dstftw@gmail.com>
Sun, 14 Feb 2016 17:01:49 +0000 (23:01 +0600)
* Fixed valid URLs (w/ tests).
* Fixed iframe URL extraction.
* Fixed token URL extraction.
* Fixed variable extraction.
* Fixed uploader spelling.
* Added upload_date to result dictionary.

youtube_dl/extractor/laola1tv.py

index b459559b0349bcae6c8d658c2fb120c2cb81d37e..c1f248b6bf2d953acf4b869238365e2019b21ae0 100644 (file)
 # -*- coding: utf-8 -*-
 from __future__ import unicode_literals
 
-import random
 import re
 
 from .common import InfoExtractor
 from ..utils import (
+    compat_urlparse,
     ExtractorError,
+    sanitized_Request,
+    xpath_element,
     xpath_text,
+    unified_strdate,
+    urlencode_postdata,
 )
 
 
 class Laola1TvIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?laola1\.tv/(?P<lang>[a-z]+)-(?P<portal>[a-z]+)/.*?/(?P<id>[0-9]+)\.html'
-    _TEST = {
+    _VALID_URL = r'https?://(?:www\.)?laola1\.tv/(?P<lang>[a-z]+)-(?P<portal>[a-z]+)/.*?/(?P<slug>[\w-]+)'
+    _TESTS = [{
         'url': 'http://www.laola1.tv/de-de/video/straubing-tigers-koelner-haie/227883.html',
         'info_dict': {
+            'categories': ['Eishockey'],
+            'ext': 'flv',
             'id': '227883',
-            'ext': 'mp4',
+            'is_live': False,
             'title': 'Straubing Tigers - Kölner Haie',
+            'upload_date': '20140912',
+        },
+        'params': {
+            'skip_download': True,
+        }
+    }, {
+        'url': 'http://www.laola1.tv/de-de/video/straubing-tigers-koelner-haie',
+        'info_dict': {
             'categories': ['Eishockey'],
+            'ext': 'flv',
+            'id': '464602',
             'is_live': False,
+            'title': 'Straubing Tigers - Kölner Haie',
+            'upload_date': '20160129',
         },
         'params': {
             'skip_download': True,
         }
-    }
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
         lang = mobj.group('lang')
         portal = mobj.group('portal')
 
-        webpage = self._download_webpage(url, video_id)
+        webpage = self._download_webpage(url, mobj.group('slug'))
         iframe_url = self._search_regex(
-            r'<iframe[^>]*?class="main_tv_player"[^>]*?src="([^"]+)"',
+            r'<iframe[^>]*?id="videoplayer"[^>]*?src="([^"]+)"',
             webpage, 'iframe URL')
 
-        iframe = self._download_webpage(
-            iframe_url, video_id, note='Downloading iframe')
-        flashvars_m = re.findall(
-            r'flashvars\.([_a-zA-Z0-9]+)\s*=\s*"([^"]*)";', iframe)
-        flashvars = dict((m[0], m[1]) for m in flashvars_m)
+        video_id = self._search_regex(
+            r'videoid=(\d+)', iframe_url, 'video ID')
+
+        iframe = self._download_webpage(compat_urlparse.urljoin(
+            url, iframe_url), video_id, note='Downloading iframe')
 
         partner_id = self._search_regex(
-            r'partnerid\s*:\s*"([^"]+)"', iframe, 'partner id')
+            r'partnerid\s*:\s*"([^"]+)"', iframe, 'partner ID')
 
         xml_url = ('http://www.laola1.tv/server/hd_video.php?' +
                    'play=%s&partner=%s&portal=%s&v5ident=&lang=%s' % (
                        video_id, partner_id, portal, lang))
         hd_doc = self._download_xml(xml_url, video_id)
 
-        title = xpath_text(hd_doc, './/video/title', fatal=True)
-        flash_url = xpath_text(hd_doc, './/video/url', fatal=True)
-        uploader = xpath_text(hd_doc, './/video/meta_organistation')
-        is_live = xpath_text(hd_doc, './/video/islive') == 'true'
+        _v = lambda x, **k: xpath_text(hd_doc, './/video/' + x, **k)
+        title = _v('title', fatal=True)
 
-        categories = xpath_text(hd_doc, './/video/meta_sports')
+        categories = _v('meta_sports')
         if categories:
             categories = categories.split(',')
 
-        ident = random.randint(10000000, 99999999)
-        token_url = '%s&ident=%s&klub=0&unikey=0&timestamp=%s&auth=%s' % (
-            flash_url, ident, flashvars['timestamp'], flashvars['auth'])
+        time_date = _v('time_date')
+        time_start = _v('time_start')
+        upload_date = None
+        if time_date and time_start:
+            upload_date = unified_strdate(time_date + ' ' + time_start)
 
+        json_url = ('https://club.laola1.tv/sp/laola1/api/v3/user/session' +
+                    '/premium/player/stream-access?videoId=%s&target=2' +
+                    '&label=laola1tv&area=%s') % (video_id, _v('area'))
+        req = sanitized_Request(json_url, urlencode_postdata(
+            dict((i, v) for i, v in enumerate(_v('req_liga_abos').split(',')))))
+
+        token_url = self._download_json(req, video_id)['data']['stream-access'][0]
         token_doc = self._download_xml(
             token_url, video_id, note='Downloading token')
-        token_attrib = token_doc.find('.//token').attrib
-        if token_attrib.get('auth') in ('blocked', 'restricted'):
+
+        token_attrib = xpath_element(token_doc, './/token').attrib
+        token_auth = token_attrib['auth']
+
+        if token_auth in ('blocked', 'restricted'):
             raise ExtractorError(
-                'Token error: %s' % token_attrib.get('comment'), expected=True)
+                'Token error: %s' % token_attrib['comment'], expected=True)
 
-        video_url = '%s?hdnea=%s&hdcore=3.2.0' % (
-            token_attrib['url'], token_attrib['auth'])
+        video_url = '%s?hdnea=%s&hdcore=3.2.0' % (token_attrib['url'], token_auth)
 
         return {
+            'categories': categories,
+            'formats': self._extract_f4m_formats(
+                video_url, video_id, f4m_id='hds'),
             'id': video_id,
-            'is_live': is_live,
+            'is_live': _v('islive') == 'true',
             'title': title,
-            'url': video_url,
-            'uploader': uploader,
-            'categories': categories,
-            'ext': 'mp4',
+            'upload_date': upload_date,
+            'uploader': _v('meta_organisation'),
         }