]> git.bitcoin.ninja Git - youtube-dl/commitdiff
[teamcoco] Fix extraction
authorNaglis Jonaitis <njonaitis@gmail.com>
Sat, 21 Feb 2015 20:19:39 +0000 (22:19 +0200)
committerNaglis Jonaitis <njonaitis@gmail.com>
Sat, 21 Feb 2015 20:19:39 +0000 (22:19 +0200)
Also, use a single style of quotes

youtube_dl/extractor/teamcoco.py

index a73da1c9c0d92657bd90f302b03e9fa8404c2dcf..5793dbc1085a86fdf573a432805be129dc62de94 100644 (file)
@@ -1,8 +1,10 @@
 from __future__ import unicode_literals
 
+import base64
 import re
 
 from .common import InfoExtractor
+from ..utils import qualities
 
 
 class TeamcocoIE(InfoExtractor):
@@ -24,8 +26,8 @@ class TeamcocoIE(InfoExtractor):
             'info_dict': {
                 'id': '19705',
                 'ext': 'mp4',
-                "description": "Louis C.K. got starstruck by George W. Bush, so what? Part one.",
-                "title": "Louis C.K. Interview Pt. 1 11/3/11",
+                'description': 'Louis C.K. got starstruck by George W. Bush, so what? Part one.',
+                'title': 'Louis C.K. Interview Pt. 1 11/3/11',
                 'age_limit': 0,
             }
         }
@@ -42,42 +44,39 @@ class TeamcocoIE(InfoExtractor):
         display_id = mobj.group('display_id')
         webpage = self._download_webpage(url, display_id)
 
-        video_id = mobj.group("video_id")
+        video_id = mobj.group('video_id')
         if not video_id:
             video_id = self._html_search_regex(
                 self._VIDEO_ID_REGEXES, webpage, 'video id')
 
-        data_url = 'http://teamcoco.com/cvp/2.0/%s.xml' % video_id
-        data = self._download_xml(
-            data_url, display_id, 'Downloading data webpage')
+        embed_url = 'http://teamcoco.com/embed/v/%s' % video_id
+        embed = self._download_webpage(
+            embed_url, video_id, 'Downloading embed page')
+
+        encoded_data = self._search_regex(
+            r'"preload"\s*:\s*"([^"]+)"', embed, 'encoded data')
+        data = self._parse_json(
+            base64.b64decode(encoded_data.encode('ascii')).decode('utf-8'), video_id)
 
-        qualities = ['500k', '480p', '1000k', '720p', '1080p']
         formats = []
-        for filed in data.findall('files/file'):
-            if filed.attrib.get('playmode') == 'all':
-                # it just duplicates one of the entries
-                break
-            file_url = filed.text
-            m_format = re.search(r'(\d+(k|p))\.mp4', file_url)
+        get_quality = qualities(['500k', '480p', '1000k', '720p', '1080p'])
+        for filed in data['files']:
+            m_format = re.search(r'(\d+(k|p))\.mp4', filed['url'])
             if m_format is not None:
                 format_id = m_format.group(1)
             else:
-                format_id = filed.attrib['bitrate']
+                format_id = filed['bitrate']
             tbr = (
-                int(filed.attrib['bitrate'])
-                if filed.attrib['bitrate'].isdigit()
+                int(filed['bitrate'])
+                if filed['bitrate'].isdigit()
                 else None)
 
-            try:
-                quality = qualities.index(format_id)
-            except ValueError:
-                quality = -1
             formats.append({
-                'url': file_url,
+                'url': filed['url'],
                 'ext': 'mp4',
                 'tbr': tbr,
                 'format_id': format_id,
-                'quality': quality,
+                'quality': get_quality(format_id),
             })
 
         self._sort_formats(formats)
@@ -86,8 +85,8 @@ class TeamcocoIE(InfoExtractor):
             'id': video_id,
             'display_id': display_id,
             'formats': formats,
-            'title': self._og_search_title(webpage),
-            'thumbnail': self._og_search_thumbnail(webpage),
-            'description': self._og_search_description(webpage),
+            'title': data['title'],
+            'thumbnail': data.get('thumb', {}).get('href'),
+            'description': data.get('teaser'),
             'age_limit': self._family_friendly_search(webpage),
         }