[ustream] Simplify channel extraction
[youtube-dl] / youtube_dl / extractor / ustream.py
index f69b27d4405d719e9c8d87807d2c1392753150eb..74c82587f802acc82e3025235a4bfe92725e8111 100644 (file)
@@ -4,7 +4,7 @@ import re
 from .common import InfoExtractor
 from ..utils import (
     compat_urlparse,
-    compat_html_parser,
+    get_meta_content,
 )
 
 
@@ -49,40 +49,6 @@ class UstreamIE(InfoExtractor):
                }
         return info
 
-# More robust than regular expressions
-
-class ChannelParser(compat_html_parser.HTMLParser):
-    """
-    <meta name="ustream:channel_id" content="1234">
-    """
-    channel_id = None
-
-    def handle_starttag(self, tag, attrs):
-        if tag != 'meta':
-            return
-        values = dict(attrs)
-        if values.get('name') != 'ustream:channel_id':
-            return
-        value = values.get('content', '')
-        if value.isdigit():
-            self.channel_id = value
-
-class SocialstreamParser(compat_html_parser.HTMLParser):
-    """
-    <li class="content123 video" data-content-id="123" data-length="1452"
-        data-href="/recorded/123" data-og-url="/recorded/123">
-    """
-    def __init__(self):
-        compat_html_parser.HTMLParser.__init__(self)
-        self.content_ids = []
-
-    def handle_starttag(self, tag, attrs):
-        if tag != 'li':
-            return
-        for (attr, value) in attrs:
-            if attr == 'data-content-id' and value.isdigit():
-                self.content_ids.append(value)
-
 class UstreamChannelIE(InfoExtractor):
     _VALID_URL = r'https?://www\.ustream\.tv/channel/(?P<slug>.+)'
     IE_NAME = u'ustream:channel'
@@ -90,21 +56,16 @@ class UstreamChannelIE(InfoExtractor):
     def _real_extract(self, url):
         m = re.match(self._VALID_URL, url)
         slug = m.group('slug')
+        webpage = self._download_webpage(url, slug)
+        channel_id = get_meta_content('ustream:channel_id', webpage)
 
-        p = ChannelParser()
-        p.feed(self._download_webpage(url, slug))
-        p.close()
-        channel_id = p.channel_id
-
-        p = SocialstreamParser()
         BASE = 'http://www.ustream.tv'
         next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id
+        video_ids = []
         while next_url:
             reply = json.loads(self._download_webpage(compat_urlparse.urljoin(BASE, next_url), channel_id))
-            p.feed(reply['data'])
+            video_ids.extend(re.findall(r'data-content-id="(\d.*)"', reply['data']))
             next_url = reply['nextUrl']
-        p.close()
-        video_ids = p.content_ids
 
         urls = ['http://www.ustream.tv/recorded/' + vid for vid in video_ids]
         url_entries = [self.url_result(eurl, 'Ustream') for eurl in urls]