Add support for Howcast.com - closes #835
[youtube-dl] / youtube_dl / InfoExtractors.py
index eacb5eb279c47966be4e2fab64d260c599cc833d..938d2d805d2e3ad8ca881677a8a2c18709ce3201 100755 (executable)
@@ -188,6 +188,45 @@ class InfoExtractor(object):
             video_info['title'] = playlist_title
         return video_info
 
+class SearchInfoExtractor(InfoExtractor):
+    """
+    Base class for paged search queries extractors.
+    They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
+    Instances should define _SEARCH_KEY and _MAX_RESULTS.
+    """
+
+    @classmethod
+    def _make_valid_url(cls):
+        return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
+
+    @classmethod
+    def suitable(cls, url):
+        return re.match(cls._make_valid_url(), url) is not None
+
+    def _real_extract(self, query):
+        mobj = re.match(self._make_valid_url(), query)
+        if mobj is None:
+            raise ExtractorError(u'Invalid search query "%s"' % query)
+
+        prefix = mobj.group('prefix')
+        query = mobj.group('query')
+        if prefix == '':
+            return self._get_n_results(query, 1)
+        elif prefix == 'all':
+            return self._get_n_results(query, self._MAX_RESULTS)
+        else:
+            n = int(prefix)
+            if n <= 0:
+                raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
+            elif n > self._MAX_RESULTS:
+                self._downloader.report_warning(u'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
+                n = self._MAX_RESULTS
+            return self._get_n_results(query, n)
+
+    def _get_n_results(self, query, n):
+        """Get a specified number of results for a query"""
+        raise NotImplementedError("This method must be implemented by sublclasses")
+
 
 class YoutubeIE(InfoExtractor):
     """Information extractor for youtube.com."""
@@ -472,14 +511,12 @@ class YoutubeIE(InfoExtractor):
             self.report_age_confirmation()
             age_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err))
-            return
+            raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err))
 
     def _extract_id(self, url):
         mobj = re.match(self._VALID_URL, url, re.VERBOSE)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
         video_id = mobj.group(2)
         return video_id
 
@@ -497,8 +534,7 @@ class YoutubeIE(InfoExtractor):
         try:
             video_webpage_bytes = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err))
-            return
+            raise ExtractorError(u'Unable to download video webpage: %s' % compat_str(err))
 
         video_webpage = video_webpage_bytes.decode('utf-8', 'ignore')
 
@@ -522,23 +558,20 @@ class YoutubeIE(InfoExtractor):
                 break
         if 'token' not in video_info:
             if 'reason' in video_info:
-                self._downloader.report_error(u'YouTube said: %s' % video_info['reason'][0])
+                raise ExtractorError(u'YouTube said: %s' % video_info['reason'][0])
             else:
-                self._downloader.report_error(u'"token" parameter not in video info for unknown reason')
-            return
+                raise ExtractorError(u'"token" parameter not in video info for unknown reason')
 
         # Check for "rental" videos
         if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
-            self._downloader.report_error(u'"rental" videos not supported')
-            return
+            raise ExtractorError(u'"rental" videos not supported')
 
         # Start extracting information
         self.report_information_extraction(video_id)
 
         # uploader
         if 'author' not in video_info:
-            self._downloader.report_error(u'unable to extract uploader name')
-            return
+            raise ExtractorError(u'Unable to extract uploader name')
         video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
 
         # uploader_id
@@ -551,8 +584,7 @@ class YoutubeIE(InfoExtractor):
 
         # title
         if 'title' not in video_info:
-            self._downloader.report_error(u'unable to extract video title')
-            return
+            raise ExtractorError(u'Unable to extract video title')
         video_title = compat_urllib_parse.unquote_plus(video_info['title'][0])
 
         # thumbnail image
@@ -617,10 +649,13 @@ class YoutubeIE(InfoExtractor):
             self.report_rtmp_download()
             video_url_list = [(None, video_info['conn'][0])]
         elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
-            url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')
-            url_data = [compat_parse_qs(uds) for uds in url_data_strs]
-            url_data = [ud for ud in url_data if 'itag' in ud and 'url' in ud]
-            url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data)
+            url_map = {}
+            for url_data_str in video_info['url_encoded_fmt_stream_map'][0].split(','):
+                url_data = compat_parse_qs(url_data_str)
+                if 'itag' in url_data and 'url' in url_data:
+                    url = url_data['url'][0] + '&signature=' + url_data['sig'][0]
+                    if not 'ratebypass' in url: url += '&ratebypass=yes'
+                    url_map[url_data['itag'][0]] = url
 
             format_limit = self._downloader.params.get('format_limit', None)
             available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
@@ -699,8 +734,7 @@ class MetacafeIE(InfoExtractor):
             self.report_disclaimer()
             disclaimer = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_error(u'unable to retrieve disclaimer: %s' % compat_str(err))
-            return
+            raise ExtractorError(u'Unable to retrieve disclaimer: %s' % compat_str(err))
 
         # Confirm age
         disclaimer_form = {
@@ -712,15 +746,13 @@ class MetacafeIE(InfoExtractor):
             self.report_age_confirmation()
             disclaimer = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err))
-            return
+            raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err))
 
     def _real_extract(self, url):
         # Extract id and simplified title from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         video_id = mobj.group(1)
 
@@ -749,30 +781,25 @@ class MetacafeIE(InfoExtractor):
         else:
             mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
             if mobj is None:
-                self._downloader.report_error(u'unable to extract media URL')
-                return
+                raise ExtractorError(u'Unable to extract media URL')
             vardict = compat_parse_qs(mobj.group(1))
             if 'mediaData' not in vardict:
-                self._downloader.report_error(u'unable to extract media URL')
-                return
+                raise ExtractorError(u'Unable to extract media URL')
             mobj = re.search(r'"mediaURL":"(?P<mediaURL>http.*?)",(.*?)"key":"(?P<key>.*?)"', vardict['mediaData'][0])
             if mobj is None:
-                self._downloader.report_error(u'unable to extract media URL')
-                return
+                raise ExtractorError(u'Unable to extract media URL')
             mediaURL = mobj.group('mediaURL').replace('\\/', '/')
             video_extension = mediaURL[-3:]
             video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key'))
 
         mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract title')
-            return
+            raise ExtractorError(u'Unable to extract title')
         video_title = mobj.group(1).decode('utf-8')
 
         mobj = re.search(r'submitter=(.*?);', webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract uploader nickname')
-            return
+            raise ExtractorError(u'Unable to extract uploader nickname')
         video_uploader = mobj.group(1)
 
         return [{
@@ -794,8 +821,7 @@ class DailymotionIE(InfoExtractor):
         # Extract id and simplified title from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         video_id = mobj.group(1).split('_')[0].split('?')[0]
 
@@ -810,8 +836,7 @@ class DailymotionIE(InfoExtractor):
         self.report_extraction(video_id)
         mobj = re.search(r'\s*var flashvars = (.*)', webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract media URL')
-            return
+            raise ExtractorError(u'Unable to extract media URL')
         flashvars = compat_urllib_parse.unquote(mobj.group(1))
 
         for key in ['hd1080URL', 'hd720URL', 'hqURL', 'sdURL', 'ldURL', 'video_url']:
@@ -820,13 +845,11 @@ class DailymotionIE(InfoExtractor):
                 self.to_screen(u'Using %s' % key)
                 break
         else:
-            self._downloader.report_error(u'unable to extract video URL')
-            return
+            raise ExtractorError(u'Unable to extract video URL')
 
         mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract video URL')
-            return
+            raise ExtractorError(u'Unable to extract video URL')
 
         video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/')
 
@@ -834,8 +857,7 @@ class DailymotionIE(InfoExtractor):
 
         mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract title')
-            return
+            raise ExtractorError(u'Unable to extract title')
         video_title = unescapeHTML(mobj.group('title'))
 
         video_uploader = None
@@ -868,43 +890,52 @@ class DailymotionIE(InfoExtractor):
 class PhotobucketIE(InfoExtractor):
     """Information extractor for photobucket.com."""
 
-    _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
+    # TODO: the original _VALID_URL was:
+    # r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
+    # Check if it's necessary to keep the old extracion process
+    _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*(([\?\&]current=)|_)(?P<id>.*)\.(?P<ext>(flv)|(mp4))'
     IE_NAME = u'photobucket'
 
     def _real_extract(self, url):
         # Extract id from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'Invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
-        video_id = mobj.group(1)
+        video_id = mobj.group('id')
 
-        video_extension = 'flv'
+        video_extension = mobj.group('ext')
 
         # Retrieve video webpage to extract further information
-        request = compat_urllib_request.Request(url)
-        try:
-            self.report_download_webpage(video_id)
-            webpage = compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
-            return
+        webpage = self._download_webpage(url, video_id)
 
         # Extract URL, uploader, and title from webpage
         self.report_extraction(video_id)
+        # We try first by looking the javascript code:
+        mobj = re.search(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (?P<json>.*?)\);', webpage)
+        if mobj is not None:
+            info = json.loads(mobj.group('json'))
+            return [{
+                'id':       video_id,
+                'url':      info[u'downloadUrl'],
+                'uploader': info[u'username'],
+                'upload_date':  datetime.date.fromtimestamp(info[u'creationDate']).strftime('%Y%m%d'),
+                'title':    info[u'title'],
+                'ext':      video_extension,
+                'thumbnail': info[u'thumbUrl'],
+            }]
+
+        # We try looking in other parts of the webpage
         mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract media URL')
-            return
+            raise ExtractorError(u'Unable to extract media URL')
         mediaURL = compat_urllib_parse.unquote(mobj.group(1))
 
         video_url = mediaURL
 
         mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract title')
-            return
+            raise ExtractorError(u'Unable to extract title')
         video_title = mobj.group(1).decode('utf-8')
 
         video_uploader = mobj.group(2).decode('utf-8')
@@ -920,136 +951,72 @@ class PhotobucketIE(InfoExtractor):
 
 
 class YahooIE(InfoExtractor):
-    """Information extractor for video.yahoo.com."""
-
-    _WORKING = False
-    # _VALID_URL matches all Yahoo! Video URLs
-    # _VPAGE_URL matches only the extractable '/watch/' URLs
-    _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?'
-    _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?'
-    IE_NAME = u'video.yahoo'
+    """Information extractor for screen.yahoo.com."""
+    _VALID_URL = r'http://screen\.yahoo\.com/.*?-(?P<id>\d*?)\.html'
 
-    def _real_extract(self, url, new_video=True):
-        # Extract ID from URL
+    def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'Invalid URL: %s' % url)
-            return
-
-        video_id = mobj.group(2)
-        video_extension = 'flv'
-
-        # Rewrite valid but non-extractable URLs as
-        # extractable English language /watch/ URLs
-        if re.match(self._VPAGE_URL, url) is None:
-            request = compat_urllib_request.Request(url)
-            try:
-                webpage = compat_urllib_request.urlopen(request).read()
-            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
-                return
-
-            mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
-            if mobj is None:
-                self._downloader.report_error(u'Unable to extract id field')
-                return
-            yahoo_id = mobj.group(1)
-
-            mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
-            if mobj is None:
-                self._downloader.report_error(u'Unable to extract vid field')
-                return
-            yahoo_vid = mobj.group(1)
-
-            url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id)
-            return self._real_extract(url, new_video=False)
-
-        # Retrieve video webpage to extract further information
-        request = compat_urllib_request.Request(url)
-        try:
-            self.report_download_webpage(video_id)
-            webpage = compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
-            return
-
-        # Extract uploader and title from webpage
-        self.report_extraction(video_id)
-        mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
-        if mobj is None:
-            self._downloader.report_error(u'unable to extract video title')
-            return
-        video_title = mobj.group(1).decode('utf-8')
-
-        mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
-        if mobj is None:
-            self._downloader.report_error(u'unable to extract video uploader')
-            return
-        video_uploader = mobj.group(1).decode('utf-8')
-
-        # Extract video thumbnail
-        mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
-        if mobj is None:
-            self._downloader.report_error(u'unable to extract video thumbnail')
-            return
-        video_thumbnail = mobj.group(1).decode('utf-8')
-
-        # Extract video description
-        mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
-        if mobj is None:
-            self._downloader.report_error(u'unable to extract video description')
-            return
-        video_description = mobj.group(1).decode('utf-8')
-        if not video_description:
-            video_description = 'No description available.'
-
-        # Extract video height and width
-        mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
-        if mobj is None:
-            self._downloader.report_error(u'unable to extract video height')
-            return
-        yv_video_height = mobj.group(1)
-
-        mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
-        if mobj is None:
-            self._downloader.report_error(u'unable to extract video width')
-            return
-        yv_video_width = mobj.group(1)
-
-        # Retrieve video playlist to extract media URL
-        # I'm not completely sure what all these options are, but we
-        # seem to need most of them, otherwise the server sends a 401.
-        yv_lg = 'R0xx6idZnW2zlrKP8xxAIR'  # not sure what this represents
-        yv_bitrate = '700'  # according to Wikipedia this is hard-coded
-        request = compat_urllib_request.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
-                '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
-                '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
-        try:
-            self.report_download_webpage(video_id)
-            webpage = compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
-            return
-
-        # Extract media URL from playlist XML
-        mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
-        if mobj is None:
-            self._downloader.report_error(u'Unable to extract media URL')
-            return
-        video_url = compat_urllib_parse.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
-        video_url = unescapeHTML(video_url)
-
-        return [{
-            'id':       video_id.decode('utf-8'),
-            'url':      video_url,
-            'uploader': video_uploader,
-            'upload_date':  None,
-            'title':    video_title,
-            'ext':      video_extension.decode('utf-8'),
-            'thumbnail':    video_thumbnail.decode('utf-8'),
-            'description':  video_description,
-        }]
-
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        video_id = mobj.group('id')
+        webpage = self._download_webpage(url, video_id)
+        m_id = re.search(r'YUI\.namespace\("Media"\)\.CONTENT_ID = "(?P<new_id>.+?)";', webpage)
+
+        if m_id is None: 
+            # TODO: Check which url parameters are required
+            info_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;lmsoverride=1;outputformat=mrss;cb=974419660;id=%s;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id
+            webpage = self._download_webpage(info_url, video_id, u'Downloading info webpage')
+            info_re = r'''<title><!\[CDATA\[(?P<title>.*?)\]\]></title>.*
+                        <description><!\[CDATA\[(?P<description>.*?)\]\]></description>.*
+                        <media:pubStart><!\[CDATA\[(?P<date>.*?)\ .*\]\]></media:pubStart>.*
+                        <media:content\ medium="image"\ url="(?P<thumb>.*?)"\ name="LARGETHUMB"
+                        '''
+            self.report_extraction(video_id)
+            m_info = re.search(info_re, webpage, re.VERBOSE|re.DOTALL)
+            if m_info is None:
+                raise ExtractorError(u'Unable to extract video info')
+            video_title = m_info.group('title')
+            video_description = m_info.group('description')
+            video_thumb = m_info.group('thumb')
+            video_date = m_info.group('date')
+            video_date = datetime.datetime.strptime(video_date, '%m/%d/%Y').strftime('%Y%m%d')
+    
+            # TODO: Find a way to get mp4 videos
+            rest_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;element=stream;outputformat=mrss;id=%s;lmsoverride=1;bw=375;dynamicstream=1;cb=83521105;tech=flv,mp4;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id
+            webpage = self._download_webpage(rest_url, video_id, u'Downloading video url webpage')
+            m_rest = re.search(r'<media:content url="(?P<url>.*?)" path="(?P<path>.*?)"', webpage)
+            video_url = m_rest.group('url')
+            video_path = m_rest.group('path')
+            if m_rest is None:
+                raise ExtractorError(u'Unable to extract video url')
+
+        else: # We have to use a different method if another id is defined
+            long_id = m_id.group('new_id')
+            info_url = 'http://video.query.yahoo.com/v1/public/yql?q=SELECT%20*%20FROM%20yahoo.media.video.streams%20WHERE%20id%3D%22' + long_id + '%22%20AND%20format%3D%22mp4%2Cflv%22%20AND%20protocol%3D%22rtmp%2Chttp%22%20AND%20plrs%3D%2286Gj0vCaSzV_Iuf6hNylf2%22%20AND%20acctid%3D%22389%22%20AND%20plidl%3D%22%22%20AND%20pspid%3D%22792700001%22%20AND%20offnetwork%3D%22false%22%20AND%20site%3D%22ivy%22%20AND%20lang%3D%22en-US%22%20AND%20region%3D%22US%22%20AND%20override%3D%22none%22%3B&env=prod&format=json&callback=YUI.Env.JSONP.yui_3_8_1_1_1368368376830_335'
+            webpage = self._download_webpage(info_url, video_id, u'Downloading info json')
+            json_str = re.search(r'YUI.Env.JSONP.yui.*?\((.*?)\);', webpage).group(1)
+            info = json.loads(json_str)
+            res = info[u'query'][u'results'][u'mediaObj'][0]
+            stream = res[u'streams'][0]
+            video_path = stream[u'path']
+            video_url = stream[u'host']
+            meta = res[u'meta']
+            video_title = meta[u'title']
+            video_description = meta[u'description']
+            video_thumb = meta[u'thumbnail']
+            video_date = None # I can't find it
+
+        info_dict = {
+                     'id': video_id,
+                     'url': video_url,
+                     'play_path': video_path,
+                     'title':video_title,
+                     'description': video_description,
+                     'thumbnail': video_thumb,
+                     'upload_date': video_date,
+                     'ext': 'flv',
+                     }
+        return info_dict
 
 class VimeoIE(InfoExtractor):
     """Information extractor for vimeo.com."""
@@ -1062,8 +1029,7 @@ class VimeoIE(InfoExtractor):
         # Extract ID from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'Invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         video_id = mobj.group('id')
         if not mobj.group('proto'):
@@ -1086,10 +1052,9 @@ class VimeoIE(InfoExtractor):
             config = json.loads(config)
         except:
             if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
-                self._downloader.report_error(u'The author has restricted the access to this video, try with the "--referer" option')
+                raise ExtractorError(u'The author has restricted the access to this video, try with the "--referer" option')
             else:
-                self._downloader.report_error(u'unable to extract info section')
-            return
+                raise ExtractorError(u'Unable to extract info section')
 
         # Extract title
         video_title = config["video"]["title"]
@@ -1138,8 +1103,7 @@ class VimeoIE(InfoExtractor):
                 self.to_screen(u'%s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
                 break
         else:
-            self._downloader.report_error(u'no known codec found')
-            return
+            raise ExtractorError(u'No known codec found')
 
         video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
                     %(video_id, sig, timestamp, video_quality, video_codec.upper())
@@ -1171,11 +1135,9 @@ class ArteTvIE(InfoExtractor):
             self.report_download_webpage(url)
             webpage = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
-            return
+            raise ExtractorError(u'Unable to retrieve video webpage: %s' % compat_str(err))
         except ValueError as err:
-            self._downloader.report_error(u'Invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
         return webpage
 
     def grep_webpage(self, url, regex, regexFlags, matchTuples):
@@ -1184,13 +1146,11 @@ class ArteTvIE(InfoExtractor):
         info = {}
 
         if mobj is None:
-            self._downloader.report_error(u'Invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         for (i, key, err) in matchTuples:
             if mobj.group(i) is None:
-                self._downloader.report_error(err)
-                return
+                raise ExtractorError(err)
             else:
                 info[key] = mobj.group(i)
 
@@ -1262,7 +1222,7 @@ class ArteTvIE(InfoExtractor):
             'id':           info.get('id'),
             'url':          compat_urllib_parse.unquote(info.get('url')),
             'uploader':     u'arte.tv',
-            'upload_date':  info.get('date'),
+            'upload_date':  unified_strdate(info.get('date')),
             'title':        info.get('title').decode('utf-8'),
             'ext':          u'mp4',
             'format':       u'NA',
@@ -1344,6 +1304,8 @@ class GenericIE(InfoExtractor):
             opener.add_handler(handler())
 
         response = opener.open(HeadRequest(url))
+        if response is None:
+            raise ExtractorError(u'Invalid URL protocol')
         new_url = response.geturl()
 
         if url == new_url:
@@ -1362,8 +1324,7 @@ class GenericIE(InfoExtractor):
         except ValueError as err:
             # since this is the last-resort InfoExtractor, if
             # this error is thrown, it'll be thrown here
-            self._downloader.report_error(u'Invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         self.report_extraction(video_id)
         # Start with something easy: JW Player in SWFObject
@@ -1375,14 +1336,12 @@ class GenericIE(InfoExtractor):
             # Broaden the search a little bit: JWPlayer JS loader
             mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
         if mobj is None:
-            self._downloader.report_error(u'Invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         # It's possible that one of the regexes
         # matched, but returned an empty group:
         if mobj.group(1) is None:
-            self._downloader.report_error(u'Invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         video_url = compat_urllib_parse.unquote(mobj.group(1))
         video_id = os.path.basename(video_url)
@@ -1399,15 +1358,13 @@ class GenericIE(InfoExtractor):
         # and so on and so forth; it's just not practical
         mobj = re.search(r'<title>(.*)</title>', webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract title')
-            return
+            raise ExtractorError(u'Unable to extract title')
         video_title = mobj.group(1)
 
         # video uploader is domain name
         mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract title')
-            return
+            raise ExtractorError(u'Unable to extract title')
         video_uploader = mobj.group(1)
 
         return [{
@@ -1420,44 +1377,18 @@ class GenericIE(InfoExtractor):
         }]
 
 
-class YoutubeSearchIE(InfoExtractor):
+class YoutubeSearchIE(SearchInfoExtractor):
     """Information Extractor for YouTube search queries."""
-    _VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+'
     _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
-    _max_youtube_results = 1000
+    _MAX_RESULTS = 1000
     IE_NAME = u'youtube:search'
+    _SEARCH_KEY = 'ytsearch'
 
     def report_download_page(self, query, pagenum):
         """Report attempt to download search page with given number."""
         query = query.decode(preferredencoding())
         self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
 
-    def _real_extract(self, query):
-        mobj = re.match(self._VALID_URL, query)
-        if mobj is None:
-            self._downloader.report_error(u'invalid search query "%s"' % query)
-            return
-
-        prefix, query = query.split(':')
-        prefix = prefix[8:]
-        query = query.encode('utf-8')
-        if prefix == '':
-            return self._get_n_results(query, 1)
-        elif prefix == 'all':
-            self._get_n_results(query, self._max_youtube_results)
-        else:
-            try:
-                n = int(prefix)
-                if n <= 0:
-                    self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
-                    return
-                elif n > self._max_youtube_results:
-                    self._downloader.report_warning(u'ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
-                    n = self._max_youtube_results
-                return self._get_n_results(query, n)
-            except ValueError: # parsing prefix as integer fails
-                return self._get_n_results(query, 1)
-
     def _get_n_results(self, query, n):
         """Get a specified number of results for a query"""
 
@@ -1472,13 +1403,11 @@ class YoutubeSearchIE(InfoExtractor):
             try:
                 data = compat_urllib_request.urlopen(request).read().decode('utf-8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.report_error(u'unable to download API page: %s' % compat_str(err))
-                return
+                raise ExtractorError(u'Unable to download API page: %s' % compat_str(err))
             api_response = json.loads(data)['data']
 
             if not 'items' in api_response:
-                self._downloader.report_error(u'[youtube] No video results')
-                return
+                raise ExtractorError(u'[youtube] No video results')
 
             new_ids = list(video['id'] for video in api_response['items'])
             video_ids += new_ids
@@ -1489,169 +1418,73 @@ class YoutubeSearchIE(InfoExtractor):
         if len(video_ids) > n:
             video_ids = video_ids[:n]
         videos = [self.url_result('http://www.youtube.com/watch?v=%s' % id, 'Youtube') for id in video_ids]
-        return videos
+        return self.playlist_result(videos, query)
 
 
-class GoogleSearchIE(InfoExtractor):
+class GoogleSearchIE(SearchInfoExtractor):
     """Information Extractor for Google Video search queries."""
-    _VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+'
-    _TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en'
-    _VIDEO_INDICATOR = r'<a href="http://video\.google\.com/videoplay\?docid=([^"\&]+)'
-    _MORE_PAGES_INDICATOR = r'class="pn" id="pnnext"'
-    _max_google_results = 1000
+    _MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"'
+    _MAX_RESULTS = 1000
     IE_NAME = u'video.google:search'
+    _SEARCH_KEY = 'gvsearch'
 
-    def report_download_page(self, query, pagenum):
-        """Report attempt to download playlist page with given number."""
-        query = query.decode(preferredencoding())
-        self.to_screen(u'query "%s": Downloading page %s' % (query, pagenum))
-
-    def _real_extract(self, query):
-        mobj = re.match(self._VALID_URL, query)
-        if mobj is None:
-            self._downloader.report_error(u'invalid search query "%s"' % query)
-            return
-
-        prefix, query = query.split(':')
-        prefix = prefix[8:]
-        query = query.encode('utf-8')
-        if prefix == '':
-            self._download_n_results(query, 1)
-            return
-        elif prefix == 'all':
-            self._download_n_results(query, self._max_google_results)
-            return
-        else:
-            try:
-                n = int(prefix)
-                if n <= 0:
-                    self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
-                    return
-                elif n > self._max_google_results:
-                    self._downloader.report_warning(u'gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
-                    n = self._max_google_results
-                self._download_n_results(query, n)
-                return
-            except ValueError: # parsing prefix as integer fails
-                self._download_n_results(query, 1)
-                return
-
-    def _download_n_results(self, query, n):
-        """Downloads a specified number of results for a query"""
-
-        video_ids = []
-        pagenum = 0
+    def _get_n_results(self, query, n):
+        """Get a specified number of results for a query"""
 
-        while True:
-            self.report_download_page(query, pagenum)
-            result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum*10)
-            request = compat_urllib_request.Request(result_url)
-            try:
-                page = compat_urllib_request.urlopen(request).read()
-            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
-                return
+        res = {
+            '_type': 'playlist',
+            'id': query,
+            'entries': []
+        }
 
-            # Extract video identifiers
-            for mobj in re.finditer(self._VIDEO_INDICATOR, page):
-                video_id = mobj.group(1)
-                if video_id not in video_ids:
-                    video_ids.append(video_id)
-                    if len(video_ids) == n:
-                        # Specified n videos reached
-                        for id in video_ids:
-                            self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id])
-                        return
-
-            if re.search(self._MORE_PAGES_INDICATOR, page) is None:
-                for id in video_ids:
-                    self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id])
-                return
+        for pagenum in itertools.count(1):
+            result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10)
+            webpage = self._download_webpage(result_url, u'gvsearch:' + query,
+                                             note='Downloading result page ' + str(pagenum))
 
-            pagenum = pagenum + 1
+            for mobj in re.finditer(r'<h3 class="r"><a href="([^"]+)"', webpage):
+                e = {
+                    '_type': 'url',
+                    'url': mobj.group(1)
+                }
+                res['entries'].append(e)
 
+            if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage):
+                return res
 
-class YahooSearchIE(InfoExtractor):
+class YahooSearchIE(SearchInfoExtractor):
     """Information Extractor for Yahoo! Video search queries."""
 
-    _WORKING = False
-    _VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+'
-    _TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s'
-    _VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"'
-    _MORE_PAGES_INDICATOR = r'\s*Next'
-    _max_yahoo_results = 1000
-    IE_NAME = u'video.yahoo:search'
-
-    def report_download_page(self, query, pagenum):
-        """Report attempt to download playlist page with given number."""
-        query = query.decode(preferredencoding())
-        self.to_screen(u'query "%s": Downloading page %s' % (query, pagenum))
-
-    def _real_extract(self, query):
-        mobj = re.match(self._VALID_URL, query)
-        if mobj is None:
-            self._downloader.report_error(u'invalid search query "%s"' % query)
-            return
-
-        prefix, query = query.split(':')
-        prefix = prefix[8:]
-        query = query.encode('utf-8')
-        if prefix == '':
-            self._download_n_results(query, 1)
-            return
-        elif prefix == 'all':
-            self._download_n_results(query, self._max_yahoo_results)
-            return
-        else:
-            try:
-                n = int(prefix)
-                if n <= 0:
-                    self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
-                    return
-                elif n > self._max_yahoo_results:
-                    self._downloader.report_warning(u'yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
-                    n = self._max_yahoo_results
-                self._download_n_results(query, n)
-                return
-            except ValueError: # parsing prefix as integer fails
-                self._download_n_results(query, 1)
-                return
+    _MAX_RESULTS = 1000
+    IE_NAME = u'screen.yahoo:search'
+    _SEARCH_KEY = 'yvsearch'
 
-    def _download_n_results(self, query, n):
-        """Downloads a specified number of results for a query"""
-
-        video_ids = []
-        already_seen = set()
-        pagenum = 1
-
-        while True:
-            self.report_download_page(query, pagenum)
-            result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum)
-            request = compat_urllib_request.Request(result_url)
-            try:
-                page = compat_urllib_request.urlopen(request).read()
-            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
-                return
+    def _get_n_results(self, query, n):
+        """Get a specified number of results for a query"""
 
-            # Extract video identifiers
-            for mobj in re.finditer(self._VIDEO_INDICATOR, page):
-                video_id = mobj.group(1)
-                if video_id not in already_seen:
-                    video_ids.append(video_id)
-                    already_seen.add(video_id)
-                    if len(video_ids) == n:
-                        # Specified n videos reached
-                        for id in video_ids:
-                            self._downloader.download(['http://video.yahoo.com/watch/%s' % id])
-                        return
-
-            if re.search(self._MORE_PAGES_INDICATOR, page) is None:
-                for id in video_ids:
-                    self._downloader.download(['http://video.yahoo.com/watch/%s' % id])
-                return
+        res = {
+            '_type': 'playlist',
+            'id': query,
+            'entries': []
+        }
+        for pagenum in itertools.count(0): 
+            result_url = u'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
+            webpage = self._download_webpage(result_url, query,
+                                             note='Downloading results page '+str(pagenum+1))
+            info = json.loads(webpage)
+            m = info[u'm']
+            results = info[u'results']
+
+            for (i, r) in enumerate(results):
+                if (pagenum * 30) +i >= n:
+                    break
+                mobj = re.search(r'(?P<url>screen\.yahoo\.com/.*?-\d*?\.html)"', r)
+                e = self.url_result('http://' + mobj.group('url'), 'Yahoo')
+                res['entries'].append(e)
+            if (pagenum * 30 +i >= n) or (m[u'last'] >= (m[u'total'] -1 )):
+                break
 
-            pagenum = pagenum + 1
+        return res
 
 
 class YoutubePlaylistIE(InfoExtractor):
@@ -1684,8 +1517,7 @@ class YoutubePlaylistIE(InfoExtractor):
         # Extract playlist id
         mobj = re.match(self._VALID_URL, url, re.VERBOSE)
         if mobj is None:
-            self._downloader.report_error(u'invalid url: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         # Download playlist videos from API
         playlist_id = mobj.group(1) or mobj.group(2)
@@ -1699,12 +1531,10 @@ class YoutubePlaylistIE(InfoExtractor):
             try:
                 response = json.loads(page)
             except ValueError as err:
-                self._downloader.report_error(u'Invalid JSON in API response: ' + compat_str(err))
-                return
+                raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err))
 
             if 'feed' not in response:
-                self._downloader.report_error(u'Got a malformed response from YouTube API')
-                return
+                raise ExtractorError(u'Got a malformed response from YouTube API')
             playlist_title = response['feed']['title']['$t']
             if 'entry' not in response['feed']:
                 # Number of videos is a multiple of self._MAX_RESULTS
@@ -1744,8 +1574,7 @@ class YoutubeChannelIE(InfoExtractor):
         # Extract channel id
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid url: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         # Download channel page
         channel_id = mobj.group(1)
@@ -1798,8 +1627,7 @@ class YoutubeUserIE(InfoExtractor):
         # Extract username
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid url: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         username = mobj.group(1)
 
@@ -1854,8 +1682,7 @@ class BlipTVUserIE(InfoExtractor):
         # Extract username
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid url: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         username = mobj.group(1)
 
@@ -1921,8 +1748,7 @@ class DepositFilesIE(InfoExtractor):
             self.report_download_webpage(file_id)
             webpage = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_error(u'Unable to retrieve file webpage: %s' % compat_str(err))
-            return
+            raise ExtractorError(u'Unable to retrieve file webpage: %s' % compat_str(err))
 
         # Search for the real file URL
         mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage)
@@ -1931,10 +1757,9 @@ class DepositFilesIE(InfoExtractor):
             mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
             if (mobj is not None) and (mobj.group(1) is not None):
                 restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
-                self._downloader.report_error(u'%s' % restriction_message)
+                raise ExtractorError(u'%s' % restriction_message)
             else:
-                self._downloader.report_error(u'unable to extract download URL from: %s' % url)
-            return
+                raise ExtractorError(u'Unable to extract download URL from: %s' % url)
 
         file_url = mobj.group(1)
         file_extension = os.path.splitext(file_url)[1][1:]
@@ -1942,8 +1767,7 @@ class DepositFilesIE(InfoExtractor):
         # Search for file title
         mobj = re.search(r'<b title="(.*?)">', webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract title')
-            return
+            raise ExtractorError(u'Unable to extract title')
         file_title = mobj.group(1).decode('utf-8')
 
         return [{
@@ -2015,8 +1839,7 @@ class FacebookIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
         video_id = mobj.group('ID')
 
         url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
@@ -2069,8 +1892,7 @@ class BlipTVIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         urlp = compat_urllib_parse_urlparse(url)
         if urlp.path.startswith('/play/'):
@@ -2116,8 +1938,7 @@ class BlipTVIE(InfoExtractor):
                 json_code_bytes = urlh.read()
                 json_code = json_code_bytes.decode('utf-8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.report_error(u'unable to read video info webpage: %s' % compat_str(err))
-                return
+                raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err))
 
             try:
                 json_data = json.loads(json_code)
@@ -2147,8 +1968,7 @@ class BlipTVIE(InfoExtractor):
                     'user_agent': 'iTunes/10.6.1',
                 }
             except (ValueError,KeyError) as err:
-                self._downloader.report_error(u'unable to parse video information: %s' % repr(err))
-                return
+                raise ExtractorError(u'Unable to parse video information: %s' % repr(err))
 
         return [info]
 
@@ -2162,8 +1982,7 @@ class MyVideoIE(InfoExtractor):
     def _real_extract(self,url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._download.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         video_id = mobj.group(1)
 
@@ -2175,14 +1994,12 @@ class MyVideoIE(InfoExtractor):
         mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\'',
                  webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract media URL')
-            return
+            raise ExtractorError(u'Unable to extract media URL')
         video_url = mobj.group(1) + ('/%s.flv' % video_id)
 
         mobj = re.search('<title>([^<]+)</title>', webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract title')
-            return
+            raise ExtractorError(u'Unable to extract title')
 
         video_title = mobj.group(1)
 
@@ -2245,8 +2062,7 @@ class ComedyCentralIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url, re.VERBOSE)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         if mobj.group('shortname'):
             if mobj.group('shortname') in ('tds', 'thedailyshow'):
@@ -2373,8 +2189,7 @@ class EscapistIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
         showName = mobj.group('showname')
         videoId = mobj.group('episode')
 
@@ -2400,8 +2215,7 @@ class EscapistIE(InfoExtractor):
         try:
             config = json.loads(configJSON)
         except (ValueError,) as err:
-            self._downloader.report_error(u'Invalid JSON in configuration file: ' + compat_str(err))
-            return
+            raise ExtractorError(u'Invalid JSON in configuration file: ' + compat_str(err))
 
         playlist = config['playlist']
         videoUrl = playlist[1]['url']
@@ -2434,8 +2248,7 @@ class CollegeHumorIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
         video_id = mobj.group('videoid')
 
         info = {
@@ -2449,8 +2262,7 @@ class CollegeHumorIE(InfoExtractor):
         try:
             metaXml = compat_urllib_request.urlopen(xmlUrl).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
-            return
+            raise ExtractorError(u'Unable to download video info XML: %s' % compat_str(err))
 
         mdoc = xml.etree.ElementTree.fromstring(metaXml)
         try:
@@ -2460,16 +2272,14 @@ class CollegeHumorIE(InfoExtractor):
             info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
             manifest_url = videoNode.findall('./file')[0].text
         except IndexError:
-            self._downloader.report_error(u'Invalid metadata XML file')
-            return
+            raise ExtractorError(u'Invalid metadata XML file')
 
         manifest_url += '?hdcore=2.10.3'
         self.report_manifest(video_id)
         try:
             manifestXml = compat_urllib_request.urlopen(manifest_url).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
-            return
+            raise ExtractorError(u'Unable to download video info XML: %s' % compat_str(err))
 
         adoc = xml.etree.ElementTree.fromstring(manifestXml)
         try:
@@ -2477,8 +2287,7 @@ class CollegeHumorIE(InfoExtractor):
             node_id = media_node.attrib['url']
             video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text
         except IndexError as err:
-            self._downloader.report_error(u'Invalid manifest file')
-            return
+            raise ExtractorError(u'Invalid manifest file')
 
         url_pr = compat_urllib_parse_urlparse(manifest_url)
         url = url_pr.scheme + '://' + url_pr.netloc + '/z' + video_id[:-2] + '/' + node_id + 'Seg1-Frag1'
@@ -2497,8 +2306,7 @@ class XVideosIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
         video_id = mobj.group(1)
 
         webpage = self._download_webpage(url, video_id)
@@ -2509,24 +2317,21 @@ class XVideosIE(InfoExtractor):
         # Extract video URL
         mobj = re.search(r'flv_url=(.+?)&', webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract video url')
-            return
+            raise ExtractorError(u'Unable to extract video url')
         video_url = compat_urllib_parse.unquote(mobj.group(1))
 
 
         # Extract title
         mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract video title')
-            return
+            raise ExtractorError(u'Unable to extract video title')
         video_title = mobj.group(1)
 
 
         # Extract video thumbnail
         mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract video thumbnail')
-            return
+            raise ExtractorError(u'Unable to extract video thumbnail')
         video_thumbnail = mobj.group(0)
 
         info = {
@@ -2562,8 +2367,7 @@ class SoundcloudIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         # extract uploader (which is in the url)
         uploader = mobj.group(1)
@@ -2620,8 +2424,7 @@ class SoundcloudSetIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         # extract uploader (which is in the url)
         uploader = mobj.group(1)
@@ -2673,8 +2476,7 @@ class InfoQIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         webpage = self._download_webpage(url, video_id=url)
         self.report_extraction(url)
@@ -2682,16 +2484,14 @@ class InfoQIE(InfoExtractor):
         # Extract video URL
         mobj = re.search(r"jsclassref ?= ?'([^']*)'", webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract video url')
-            return
+            raise ExtractorError(u'Unable to extract video url')
         real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8'))
         video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id
 
         # Extract title
         mobj = re.search(r'contentTitle = "(.*?)";', webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract video title')
-            return
+            raise ExtractorError(u'Unable to extract video title')
         video_title = mobj.group(1)
 
         # Extract description
@@ -2766,8 +2566,7 @@ class MixcloudIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
         # extract uploader & filename from url
         uploader = mobj.group(1).decode('utf-8')
         file_id = uploader + "-" + mobj.group(2).decode('utf-8')
@@ -2780,8 +2579,7 @@ class MixcloudIE(InfoExtractor):
             self.report_download_json(file_url)
             jsonData = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_error(u'Unable to retrieve file: %s' % compat_str(err))
-            return
+            raise ExtractorError(u'Unable to retrieve file: %s' % compat_str(err))
 
         # parse JSON
         json_data = json.loads(jsonData)
@@ -2804,8 +2602,7 @@ class MixcloudIE(InfoExtractor):
                     break # got it!
         else:
             if req_format not in formats:
-                self._downloader.report_error(u'format is not available')
-                return
+                raise ExtractorError(u'Format is not available')
 
             url_list = self.get_urls(formats, req_format)
             file_url = self.check_urls(url_list)
@@ -2850,15 +2647,13 @@ class StanfordOpenClassroomIE(InfoExtractor):
             try:
                 metaXml = compat_urllib_request.urlopen(xmlUrl).read()
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
-                return
+                raise ExtractorError(u'Unable to download video info XML: %s' % compat_str(err))
             mdoc = xml.etree.ElementTree.fromstring(metaXml)
             try:
                 info['title'] = mdoc.findall('./title')[0].text
                 info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
             except IndexError:
-                self._downloader.report_error(u'Invalid metadata XML file')
-                return
+                raise ExtractorError(u'Invalid metadata XML file')
             info['ext'] = info['url'].rpartition('.')[2]
             return [info]
         elif mobj.group('course'): # A course page
@@ -2909,8 +2704,7 @@ class StanfordOpenClassroomIE(InfoExtractor):
             try:
                 rootpage = compat_urllib_request.urlopen(rootURL).read()
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.report_error(u'unable to download course info page: ' + compat_str(err))
-                return
+                raise ExtractorError(u'Unable to download course info page: ' + compat_str(err))
 
             info['title'] = info['id']
 
@@ -2937,8 +2731,7 @@ class MTVIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
         if not mobj.group('proto'):
             url = 'http://' + url
         video_id = mobj.group('videoid')
@@ -2947,26 +2740,22 @@ class MTVIE(InfoExtractor):
 
         mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract song name')
-            return
+            raise ExtractorError(u'Unable to extract song name')
         song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
         mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract performer')
-            return
+            raise ExtractorError(u'Unable to extract performer')
         performer = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
         video_title = performer + ' - ' + song_name
 
         mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to mtvn_uri')
-            return
+            raise ExtractorError(u'Unable to mtvn_uri')
         mtvn_uri = mobj.group(1)
 
         mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract content id')
-            return
+            raise ExtractorError(u'Unable to extract content id')
         content_id = mobj.group(1)
 
         videogen_url = 'http://www.mtv.com/player/includes/mediaGen.jhtml?uri=' + mtvn_uri + '&id=' + content_id + '&vid=' + video_id + '&ref=www.mtvn.com&viewUri=' + mtvn_uri
@@ -2975,8 +2764,7 @@ class MTVIE(InfoExtractor):
         try:
             metadataXml = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_error(u'unable to download video metadata: %s' % compat_str(err))
-            return
+            raise ExtractorError(u'Unable to download video metadata: %s' % compat_str(err))
 
         mdoc = xml.etree.ElementTree.fromstring(metadataXml)
         renditions = mdoc.findall('.//rendition')
@@ -2989,8 +2777,7 @@ class MTVIE(InfoExtractor):
             format = ext + '-' + rendition.attrib['width'] + 'x' + rendition.attrib['height'] + '_' + rendition.attrib['bitrate']
             video_url = rendition.find('./src').text
         except KeyError:
-            self._downloader.report_error('Invalid rendition field.')
-            return
+            raise ExtractorError('Invalid rendition field.')
 
         info = {
             'id': video_id,
@@ -3039,8 +2826,7 @@ class YoukuIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
         video_id = mobj.group('ID')
 
         info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
@@ -3074,8 +2860,7 @@ class YoukuIE(InfoExtractor):
             fileid = config['data'][0]['streamfileids'][format]
             keys = [s['k'] for s in config['data'][0]['segs'][format]]
         except (UnicodeDecodeError, ValueError, KeyError):
-            self._downloader.report_error(u'unable to extract info section')
-            return
+            raise ExtractorError(u'Unable to extract info section')
 
         files_info=[]
         sid = self._gen_sid()
@@ -3113,8 +2898,7 @@ class XNXXIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
         video_id = mobj.group(1)
 
         # Get webpage content
@@ -3122,20 +2906,17 @@ class XNXXIE(InfoExtractor):
 
         result = re.search(self.VIDEO_URL_RE, webpage)
         if result is None:
-            self._downloader.report_error(u'unable to extract video url')
-            return
+            raise ExtractorError(u'Unable to extract video url')
         video_url = compat_urllib_parse.unquote(result.group(1))
 
         result = re.search(self.VIDEO_TITLE_RE, webpage)
         if result is None:
-            self._downloader.report_error(u'unable to extract video title')
-            return
+            raise ExtractorError(u'Unable to extract video title')
         video_title = result.group(1)
 
         result = re.search(self.VIDEO_THUMB_RE, webpage)
         if result is None:
-            self._downloader.report_error(u'unable to extract video thumbnail')
-            return
+            raise ExtractorError(u'Unable to extract video thumbnail')
         video_thumbnail = result.group(1)
 
         return [{
@@ -3180,8 +2961,7 @@ class GooglePlusIE(InfoExtractor):
         # Extract id from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'Invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         post_url = mobj.group(0)
         video_id = mobj.group(1)
@@ -3224,7 +3004,7 @@ class GooglePlusIE(InfoExtractor):
         pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]'
         mobj = re.search(pattern, webpage)
         if mobj is None:
-            self._downloader.report_error(u'unable to extract video page URL')
+            raise ExtractorError(u'Unable to extract video page URL')
 
         video_page = mobj.group(1)
         webpage = self._download_webpage(video_page, video_id, u'Downloading video page')
@@ -3236,7 +3016,7 @@ class GooglePlusIE(InfoExtractor):
         pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
         mobj = re.findall(pattern, webpage)
         if len(mobj) == 0:
-            self._downloader.report_error(u'unable to extract video links')
+            raise ExtractorError(u'Unable to extract video links')
 
         # Sort in resolution
         links = sorted(mobj)
@@ -3268,8 +3048,7 @@ class NBAIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         video_id = mobj.group(1)
         if video_id.endswith('/index.html'):
@@ -3368,10 +3147,6 @@ class JustinTVIE(InfoExtractor):
             if not m:
                 raise ExtractorError(u'Cannot find archive of a chapter')
             archive_id = m.group(1)
-            m = re.search(r"<h2 class='js-title'>([^<]*)</h2>", webpage)
-            if not m:
-                raise ExtractorError(u'Cannot find chapter title')
-            video_title = m.group(1)
 
             api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
             chapter_info_xml = self._download_webpage(api, chapter_id,
@@ -3387,16 +3162,31 @@ class JustinTVIE(InfoExtractor):
             video_url = a.find('./video_file_url').text
             video_ext = video_url.rpartition('.')[2] or u'flv'
 
+            chapter_api_url = u'https://api.twitch.tv/kraken/videos/c' + chapter_id
+            chapter_info_json = self._download_webpage(chapter_api_url, u'c' + chapter_id,
+                                   note='Downloading chapter metadata',
+                                   errnote='Download of chapter metadata failed')
+            chapter_info = json.loads(chapter_info_json)
+
+            bracket_start = int(doc.find('.//bracket_start').text)
+            bracket_end = int(doc.find('.//bracket_end').text)
+
             # TODO determine start (and probably fix up file)
             #  youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
-            #video_url += u'?start=' + a.find('./start_timestamp').text
-            self._downloader.report_warning(u'Chapter detected, but we do not know how to calculate start position. Downloading the whole file ... (See https://github.com/rg3/youtube-dl/issues/810 )')
+            #video_url += u'?start=' + TODO:start_timestamp
+            # bracket_start is 13290, but we want 51670615
+            self._downloader.report_warning(u'Chapter detected, but we can just download the whole file. '
+                                            u'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
 
             info = {
                 'id': u'c' + chapter_id,
                 'url': video_url,
                 'ext': video_ext,
-                'title': video_title,
+                'title': chapter_info['title'],
+                'thumbnail': chapter_info['preview'],
+                'description': chapter_info['description'],
+                'uploader': chapter_info['channel']['display_name'],
+                'uploader_id': chapter_info['channel']['name'],
             }
             return [info]
         else:
@@ -3432,14 +3222,14 @@ class FunnyOrDieIE(InfoExtractor):
 
         m = re.search(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"', webpage, re.DOTALL)
         if not m:
-            self._downloader.report_error(u'unable to find video information')
+            raise ExtractorError(u'Unable to find video information')
         video_url = unescapeHTML(m.group('url'))
 
         m = re.search(r"<h1 class='player_page_h1'.*?>(?P<title>.*?)</h1>", webpage, flags=re.DOTALL)
         if not m:
             m = re.search(r'<title>(?P<title>[^<]+?)</title>', webpage)
             if not m:
-                self._downloader.report_error(u'Cannot find video title')
+                raise ExtractorError(u'Cannot find video title')
         title = clean_html(m.group('title'))
 
         m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
@@ -3491,7 +3281,7 @@ class SteamIE(InfoExtractor):
             video_url = vid.group('videoURL')
             video_thumb = thumb.group('thumbnail')
             if not video_url:
-                self._downloader.report_error(u'Cannot find video url for %s' % video_id)
+                raise ExtractorError(u'Cannot find video url for %s' % video_id)
             info = {
                 'id':video_id,
                 'url':video_url,
@@ -3511,18 +3301,26 @@ class UstreamIE(InfoExtractor):
         video_id = m.group('videoID')
         video_url = u'http://tcdn.ustream.tv/video/%s' % video_id
         webpage = self._download_webpage(url, video_id)
-        m = re.search(r'data-title="(?P<title>.+)"',webpage)
-        title = m.group('title')
-        m = re.search(r'<a class="state" data-content-type="channel" data-content-id="(?P<uploader>\d+)"',webpage)
-        uploader = m.group('uploader')
+        self.report_extraction(video_id)
+        try:
+            m = re.search(r'data-title="(?P<title>.+)"',webpage)
+            title = m.group('title')
+            m = re.search(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
+                          webpage, re.DOTALL)
+            uploader = unescapeHTML(m.group('uploader').strip())
+            m = re.search(r'<link rel="image_src" href="(?P<thumb>.*?)"', webpage)
+            thumb = m.group('thumb')
+        except AttributeError:
+            raise ExtractorError(u'Unable to extract info')
         info = {
                 'id':video_id,
                 'url':video_url,
                 'ext': 'flv',
                 'title': title,
-                'uploader': uploader
+                'uploader': uploader,
+                'thumbnail': thumb,
                   }
-        return [info]
+        return info
 
 class WorldStarHipHopIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/videos/video\.php\?v=(?P<id>.*)'
@@ -3630,8 +3428,7 @@ class YouPornIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         video_id = mobj.group('videoid')
 
@@ -3722,8 +3519,7 @@ class YouPornIE(InfoExtractor):
         else:
             format = self._specific( req_format, formats )
             if result is None:
-                self._downloader.report_error(u'requested format not available')
-                return
+                raise ExtractorError(u'Requested format not available')
             return [format]
 
 
@@ -3735,8 +3531,7 @@ class PornotubeIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         video_id = mobj.group('videoid')
         video_title = mobj.group('title')
@@ -3748,16 +3543,14 @@ class PornotubeIE(InfoExtractor):
         VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
         result = re.search(VIDEO_URL_RE, webpage)
         if result is None:
-            self._downloader.report_error(u'unable to extract video url')
-            return
+            raise ExtractorError(u'Unable to extract video url')
         video_url = compat_urllib_parse.unquote(result.group('url'))
 
         #Get the uploaded date
         VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
         result = re.search(VIDEO_UPLOADED_RE, webpage)
         if result is None:
-            self._downloader.report_error(u'unable to extract video title')
-            return
+            raise ExtractorError(u'Unable to extract video title')
         upload_date = unified_strdate(result.group('date'))
 
         info = {'id': video_id,
@@ -3777,8 +3570,7 @@ class YouJizzIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         video_id = mobj.group('videoid')
 
@@ -3984,14 +3776,12 @@ class MySpassIE(InfoExtractor):
         # extract values from metadata
         url_flv_el = metadata.find('url_flv')
         if url_flv_el is None:
-            self._downloader.report_error(u'unable to extract download url')
-            return
+            raise ExtractorError(u'Unable to extract download url')
         video_url = url_flv_el.text
         extension = os.path.splitext(video_url)[1][1:]
         title_el = metadata.find('title')
         if title_el is None:
-            self._downloader.report_error(u'unable to extract title')
-            return
+            raise ExtractorError(u'Unable to extract title')
         title = title_el.text
         format_id_el = metadata.find('format_id')
         if format_id_el is None:
@@ -4060,8 +3850,7 @@ class LiveLeakIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.report_error(u'invalid URL: %s' % url)
-            return
+            raise ExtractorError(u'Invalid URL: %s' % url)
 
         video_id = mobj.group('video_id')
 
@@ -4069,13 +3858,12 @@ class LiveLeakIE(InfoExtractor):
 
         m = re.search(r'file: "(.*?)",', webpage)
         if not m:
-            self._downloader.report_error(u'unable to find video url')
-            return
+            raise ExtractorError(u'Unable to find video url')
         video_url = m.group(1)
 
         m = re.search(r'<meta property="og:title" content="(?P<title>.*?)"', webpage)
         if not m:
-            self._downloader.report_error(u'Cannot find video title')
+            raise ExtractorError(u'Cannot find video title')
         title = unescapeHTML(m.group('title')).replace('LiveLeak.com -', '').strip()
 
         m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
@@ -4122,8 +3910,7 @@ class ARDIE(InfoExtractor):
         streams = [m.groupdict() for m in re.finditer(self._MEDIA_STREAM, html)]
         if not streams:
             assert '"fsk"' in html
-            self._downloader.report_error(u'this video is only available after 8:00 pm')
-            return
+            raise ExtractorError(u'This video is only available after 8:00 pm')
 
         # choose default media type and highest quality for now
         stream = max([s for s in streams if int(s["media_type"]) == 0],
@@ -4185,8 +3972,8 @@ class BandcampIE(InfoExtractor):
         # We get the link to the free download page
         m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
         if m_download is None:
-            self._downloader.report_error('No free songs founded')
-            return
+            raise ExtractorError(u'No free songs founded')
+
         download_link = m_download.group(1)
         id = re.search(r'var TralbumData = {(.*?)id: (?P<id>\d*?)$', 
                        webpage, re.MULTILINE|re.DOTALL).group('id')
@@ -4251,7 +4038,71 @@ class RedTubeIE(InfoExtractor):
             'ext':      video_extension,
             'title':    video_title,
         }]
+        
+class InaIE(InfoExtractor):
+    """Information Extractor for Ina.fr"""
+    _VALID_URL = r'(?:http://)?(?:www\.)?ina\.fr/video/(?P<id>I[0-9]+)/.*'
+
+    def _real_extract(self,url):
+        mobj = re.match(self._VALID_URL, url)
+
+        video_id = mobj.group('id')
+        mrss_url='http://player.ina.fr/notices/%s.mrss' % video_id
+        video_extension = 'mp4'
+        webpage = self._download_webpage(mrss_url, video_id)
+
+        mobj = re.search(r'<media:player url="(?P<mp4url>http://mp4.ina.fr/[^"]+\.mp4)', webpage)
+        if mobj is None:
+            raise ExtractorError(u'Unable to extract media URL')
+        video_url = mobj.group(1)
+
+        mobj = re.search(r'<title><!\[CDATA\[(?P<titre>.*?)]]></title>', webpage)
+        if mobj is None:
+            raise ExtractorError(u'Unable to extract title')
+        video_title = mobj.group(1)
+
+        return [{
+            'id':       video_id,
+            'url':      video_url,
+            'ext':      video_extension,
+            'title':    video_title,
+        }]
+
+class HowcastIE(InfoExtractor):
+    """Information Extractor for Ina.fr"""
+    _VALID_URL = r'(?:https?://)?(?:www\.)?howcast\.com/videos/(?P<id>[\d]+)'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+
+        video_id = mobj.group('id')
+        webpage_url = 'http://www.howcast.com/videos/' + video_id
+        webpage = self._download_webpage(webpage_url, video_id)
+
+        mobj = re.search(r'\'file\': "(http://mobile-media\.howcast\.com/\d+\.mp4)"', webpage)
+        if mobj is None:
+            raise ExtractorError(u'Unable to extract video URL')
+        video_url = mobj.group(1)
 
+        mobj = re.search(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') property=\'og:title\'', webpage)
+        if mobj is None:
+            raise ExtractorError(u'Unable to extract title')
+        video_title = mobj.group(1) or mobj.group(2)
+
+        mobj = re.search(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'', webpage)
+        if mobj is None:
+            self._downloader.report_warning(u'unable to extract description')
+            video_description = None
+        else:
+            video_description = mobj.group(1) or mobj.group(2)
+
+        return [{
+            'id':       video_id,
+            'url':      video_url,
+            'ext':      'mp4',
+            'title':    video_title,
+            'description': video_description,
+        }]
 
 def gen_extractors():
     """ Return a list of an instance of every supported extractor.
@@ -4309,6 +4160,8 @@ def gen_extractors():
         TumblrIE(),
         BandcampIE(),
         RedTubeIE(),
+        InaIE(),
+        HowcastIE(),
         GenericIE()
     ]