Add support for Howcast.com - closes #835
[youtube-dl] / youtube_dl / InfoExtractors.py
index e4b6c01ab21aa4b12879c87ba4815e506cc4ee6f..938d2d805d2e3ad8ca881677a8a2c18709ce3201 100755 (executable)
@@ -188,6 +188,45 @@ class InfoExtractor(object):
             video_info['title'] = playlist_title
         return video_info
 
+class SearchInfoExtractor(InfoExtractor):
+    """
+    Base class for paged search queries extractors.
+    They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
+    Instances should define _SEARCH_KEY and _MAX_RESULTS.
+    """
+
+    @classmethod
+    def _make_valid_url(cls):
+        return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
+
+    @classmethod
+    def suitable(cls, url):
+        return re.match(cls._make_valid_url(), url) is not None
+
+    def _real_extract(self, query):
+        mobj = re.match(self._make_valid_url(), query)
+        if mobj is None:
+            raise ExtractorError(u'Invalid search query "%s"' % query)
+
+        prefix = mobj.group('prefix')
+        query = mobj.group('query')
+        if prefix == '':
+            return self._get_n_results(query, 1)
+        elif prefix == 'all':
+            return self._get_n_results(query, self._MAX_RESULTS)
+        else:
+            n = int(prefix)
+            if n <= 0:
+                raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
+            elif n > self._MAX_RESULTS:
+                self._downloader.report_warning(u'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
+                n = self._MAX_RESULTS
+            return self._get_n_results(query, n)
+
+    def _get_n_results(self, query, n):
+        """Get a specified number of results for a query"""
+        raise NotImplementedError("This method must be implemented by sublclasses")
+
 
 class YoutubeIE(InfoExtractor):
     """Information extractor for youtube.com."""
@@ -1265,6 +1304,8 @@ class GenericIE(InfoExtractor):
             opener.add_handler(handler())
 
         response = opener.open(HeadRequest(url))
+        if response is None:
+            raise ExtractorError(u'Invalid URL protocol')
         new_url = response.geturl()
 
         if url == new_url:
@@ -1336,42 +1377,18 @@ class GenericIE(InfoExtractor):
         }]
 
 
-class YoutubeSearchIE(InfoExtractor):
+class YoutubeSearchIE(SearchInfoExtractor):
     """Information Extractor for YouTube search queries."""
-    _VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+'
     _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
-    _max_youtube_results = 1000
+    _MAX_RESULTS = 1000
     IE_NAME = u'youtube:search'
+    _SEARCH_KEY = 'ytsearch'
 
     def report_download_page(self, query, pagenum):
         """Report attempt to download search page with given number."""
         query = query.decode(preferredencoding())
         self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
 
-    def _real_extract(self, query):
-        mobj = re.match(self._VALID_URL, query)
-        if mobj is None:
-            raise ExtractorError(u'Invalid search query "%s"' % query)
-
-        prefix, query = query.split(':')
-        prefix = prefix[8:]
-        query = query.encode('utf-8')
-        if prefix == '':
-            return self._get_n_results(query, 1)
-        elif prefix == 'all':
-            self._get_n_results(query, self._max_youtube_results)
-        else:
-            try:
-                n = int(prefix)
-                if n <= 0:
-                    raise ExtractorError(u'Invalid download number %s for query "%s"' % (n, query))
-                elif n > self._max_youtube_results:
-                    self._downloader.report_warning(u'ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
-                    n = self._max_youtube_results
-                return self._get_n_results(query, n)
-            except ValueError: # parsing prefix as integer fails
-                return self._get_n_results(query, 1)
-
     def _get_n_results(self, query, n):
         """Get a specified number of results for a query"""
 
@@ -1401,33 +1418,15 @@ class YoutubeSearchIE(InfoExtractor):
         if len(video_ids) > n:
             video_ids = video_ids[:n]
         videos = [self.url_result('http://www.youtube.com/watch?v=%s' % id, 'Youtube') for id in video_ids]
-        return videos
+        return self.playlist_result(videos, query)
 
 
-class GoogleSearchIE(InfoExtractor):
+class GoogleSearchIE(SearchInfoExtractor):
     """Information Extractor for Google Video search queries."""
-    _VALID_URL = r'gvsearch(?P<prefix>|\d+|all):(?P<query>[\s\S]+)'
     _MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"'
-    _max_google_results = 1000
+    _MAX_RESULTS = 1000
     IE_NAME = u'video.google:search'
-
-    def _real_extract(self, query):
-        mobj = re.match(self._VALID_URL, query)
-
-        prefix = mobj.group('prefix')
-        query = mobj.group('query')
-        if prefix == '':
-            return self._get_n_results(query, 1)
-        elif prefix == 'all':
-            return self._get_n_results(query, self._max_google_results)
-        else:
-            n = int(prefix)
-            if n <= 0:
-                raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
-            elif n > self._max_google_results:
-                self._downloader.report_warning(u'gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
-                n = self._max_google_results
-            return self._get_n_results(query, n)
+    _SEARCH_KEY = 'gvsearch'
 
     def _get_n_results(self, query, n):
         """Get a specified number of results for a query"""
@@ -1440,7 +1439,6 @@ class GoogleSearchIE(InfoExtractor):
 
         for pagenum in itertools.count(1):
             result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10)
-            print(result_url)
             webpage = self._download_webpage(result_url, u'gvsearch:' + query,
                                              note='Downloading result page ' + str(pagenum))
 
@@ -1454,37 +1452,12 @@ class GoogleSearchIE(InfoExtractor):
             if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage):
                 return res
 
-class YahooSearchIE(InfoExtractor):
+class YahooSearchIE(SearchInfoExtractor):
     """Information Extractor for Yahoo! Video search queries."""
 
-    _VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+'
-
-    _max_yahoo_results = 1000
+    _MAX_RESULTS = 1000
     IE_NAME = u'screen.yahoo:search'
-
-    def _real_extract(self, query):
-        mobj = re.match(self._VALID_URL, query)
-        if mobj is None:
-            raise ExtractorError(u'Invalid search query "%s"' % query)
-
-        prefix, query = query.split(':')
-        prefix = prefix[8:]
-        query = query.encode('utf-8')
-        if prefix == '':
-            return self._get_n_results(query, 1)
-        elif prefix == 'all':
-            return self._get_n_results(query, self._max_yahoo_results)
-        else:
-            try:
-                n = int(prefix)
-                if n <= 0:
-                    raise ExtractorError(u'Invalid download number %s for query "%s"' % (n, query))
-                elif n > self._max_yahoo_results:
-                    self._downloader.report_warning(u'yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
-                    n = self._max_yahoo_results
-                return self._get_n_results(query, n)
-            except ValueError: # parsing prefix as integer fails
-                return self._get_n_results(query, 1)
+    _SEARCH_KEY = 'yvsearch'
 
     def _get_n_results(self, query, n):
         """Get a specified number of results for a query"""
@@ -3328,18 +3301,26 @@ class UstreamIE(InfoExtractor):
         video_id = m.group('videoID')
         video_url = u'http://tcdn.ustream.tv/video/%s' % video_id
         webpage = self._download_webpage(url, video_id)
-        m = re.search(r'data-title="(?P<title>.+)"',webpage)
-        title = m.group('title')
-        m = re.search(r'<a class="state" data-content-type="channel" data-content-id="(?P<uploader>\d+)"',webpage)
-        uploader = m.group('uploader')
+        self.report_extraction(video_id)
+        try:
+            m = re.search(r'data-title="(?P<title>.+)"',webpage)
+            title = m.group('title')
+            m = re.search(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
+                          webpage, re.DOTALL)
+            uploader = unescapeHTML(m.group('uploader').strip())
+            m = re.search(r'<link rel="image_src" href="(?P<thumb>.*?)"', webpage)
+            thumb = m.group('thumb')
+        except AttributeError:
+            raise ExtractorError(u'Unable to extract info')
         info = {
                 'id':video_id,
                 'url':video_url,
                 'ext': 'flv',
                 'title': title,
-                'uploader': uploader
+                'uploader': uploader,
+                'thumbnail': thumb,
                   }
-        return [info]
+        return info
 
 class WorldStarHipHopIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/videos/video\.php\?v=(?P<id>.*)'
@@ -4060,7 +4041,7 @@ class RedTubeIE(InfoExtractor):
         
 class InaIE(InfoExtractor):
     """Information Extractor for Ina.fr"""
-    _VALID_URL = r'(?:http://)?(?:www.)?ina\.fr/video/(?P<id>I[0-9]+)/.*'
+    _VALID_URL = r'(?:http://)?(?:www\.)?ina\.fr/video/(?P<id>I[0-9]+)/.*'
 
     def _real_extract(self,url):
         mobj = re.match(self._VALID_URL, url)
@@ -4087,6 +4068,42 @@ class InaIE(InfoExtractor):
             'title':    video_title,
         }]
 
+class HowcastIE(InfoExtractor):
+    """Information Extractor for Ina.fr"""
+    _VALID_URL = r'(?:https?://)?(?:www\.)?howcast\.com/videos/(?P<id>[\d]+)'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+
+        video_id = mobj.group('id')
+        webpage_url = 'http://www.howcast.com/videos/' + video_id
+        webpage = self._download_webpage(webpage_url, video_id)
+
+        mobj = re.search(r'\'file\': "(http://mobile-media\.howcast\.com/\d+\.mp4)"', webpage)
+        if mobj is None:
+            raise ExtractorError(u'Unable to extract video URL')
+        video_url = mobj.group(1)
+
+        mobj = re.search(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') property=\'og:title\'', webpage)
+        if mobj is None:
+            raise ExtractorError(u'Unable to extract title')
+        video_title = mobj.group(1) or mobj.group(2)
+
+        mobj = re.search(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'', webpage)
+        if mobj is None:
+            self._downloader.report_warning(u'unable to extract description')
+            video_description = None
+        else:
+            video_description = mobj.group(1) or mobj.group(2)
+
+        return [{
+            'id':       video_id,
+            'url':      video_url,
+            'ext':      'mp4',
+            'title':    video_title,
+            'description': video_description,
+        }]
+
 def gen_extractors():
     """ Return a list of an instance of every supported extractor.
     The order does matter; the first extractor matched is the one handling the URL.
@@ -4144,6 +4161,7 @@ def gen_extractors():
         BandcampIE(),
         RedTubeIE(),
         InaIE(),
+        HowcastIE(),
         GenericIE()
     ]