Merge branch 'extract_info_rewrite'
[youtube-dl] / youtube_dl / InfoExtractors.py
index 8be2f160c1092cbfd0593431dfaaade6268a4827..ae36558d75839f68facb72300efb5b4c22bcd809 100755 (executable)
@@ -48,7 +48,7 @@ class InfoExtractor(object):
     uploader_id:    Nickname or id of the video uploader.
     location:       Physical location of the video.
     player_url:     SWF Player URL (used for rtmpdump).
-    subtitles:      The .srt file contents.
+    subtitles:      The subtitle file contents.
     urlhandle:      [internal] The urlHandle to be used to download the file,
                     like returned by urllib.request.urlopen
 
@@ -115,7 +115,8 @@ class InfoExtractor(object):
         """ Returns the response handle """
         if note is None:
             note = u'Downloading video webpage'
-        self._downloader.to_screen(u'[%s] %s: %s' % (self.IE_NAME, video_id, note))
+        if note is not False:
+            self._downloader.to_screen(u'[%s] %s: %s' % (self.IE_NAME, video_id, note))
         try:
             return compat_urllib_request.urlopen(url_or_request)
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
@@ -126,8 +127,44 @@ class InfoExtractor(object):
     def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
         """ Returns the data of the page as a string """
         urlh = self._request_webpage(url_or_request, video_id, note, errnote)
+        content_type = urlh.headers.get('Content-Type', '')
+        m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
+        if m:
+            encoding = m.group(1)
+        else:
+            encoding = 'utf-8'
         webpage_bytes = urlh.read()
-        return webpage_bytes.decode('utf-8', 'replace')
+        if self._downloader.params.get('dump_intermediate_pages', False):
+            try:
+                url = url_or_request.get_full_url()
+            except AttributeError:
+                url = url_or_request
+            self._downloader.to_screen(u'Dumping request to ' + url)
+            dump = base64.b64encode(webpage_bytes).decode('ascii')
+            self._downloader.to_screen(dump)
+        return webpage_bytes.decode(encoding, 'replace')
+        
+    #Methods for following #608
+    #They set the correct value of the '_type' key
+    def video_result(self, video_info):
+        """Returns a video"""
+        video_info['_type'] = 'video'
+        return video_info
+    def url_result(self, url, ie=None):
+        """Returns a url that points to a page that should be processed"""
+        #TODO: ie should be the class used for getting the info
+        video_info = {'_type': 'url',
+                      'url': url}
+        return video_info
+    def playlist_result(self, entries, playlist_id=None, playlist_title=None):
+        """Returns a playlist"""
+        video_info = {'_type': 'playlist',
+                      'entries': entries}
+        if playlist_id:
+            video_info['id'] = playlist_id
+        if playlist_title:
+            video_info['title'] = playlist_title
+        return video_info
 
 
 class YoutubeIE(InfoExtractor):
@@ -218,7 +255,16 @@ class YoutubeIE(InfoExtractor):
 
     def report_video_subtitles_download(self, video_id):
         """Report attempt to download video info webpage."""
-        self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles' % video_id)
+        self._downloader.to_screen(u'[youtube] %s: Checking available subtitles' % video_id)
+
+    def report_video_subtitles_request(self, video_id, sub_lang, format):
+        """Report attempt to download video info webpage."""
+        self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles for %s.%s' % (video_id, sub_lang, format))
+
+    def report_video_subtitles_available(self, video_id, sub_lang_list):
+        """Report available subtitles."""
+        sub_lang = ",".join(list(sub_lang_list.keys()))
+        self._downloader.to_screen(u'[youtube] %s: Available subtitles for video: %s' % (video_id, sub_lang))
 
     def report_information_extraction(self, video_id):
         """Report attempt to extract video information."""
@@ -232,55 +278,75 @@ class YoutubeIE(InfoExtractor):
         """Indicate the download will use the RTMP protocol."""
         self._downloader.to_screen(u'[youtube] RTMP download detected')
 
-    def _closed_captions_xml_to_srt(self, xml_string):
-        srt = ''
-        texts = re.findall(r'<text start="([\d\.]+)"( dur="([\d\.]+)")?>([^<]+)</text>', xml_string, re.MULTILINE)
-        # TODO parse xml instead of regex
-        for n, (start, dur_tag, dur, caption) in enumerate(texts):
-            if not dur: dur = '4'
-            start = float(start)
-            end = start + float(dur)
-            start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000)
-            end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000)
-            caption = unescapeHTML(caption)
-            caption = unescapeHTML(caption) # double cycle, intentional
-            srt += str(n+1) + '\n'
-            srt += start + ' --> ' + end + '\n'
-            srt += caption + '\n\n'
-        return srt
-
-    def _extract_subtitles(self, video_id):
+    def _get_available_subtitles(self, video_id):
         self.report_video_subtitles_download(video_id)
         request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
         try:
-            srt_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
+            sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
-        srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list)
-        srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list)
-        if not srt_lang_list:
-            return (u'WARNING: video has no closed captions', None)
-        if self._downloader.params.get('subtitleslang', False):
-            srt_lang = self._downloader.params.get('subtitleslang')
-        elif 'en' in srt_lang_list:
-            srt_lang = 'en'
-        else:
-            srt_lang = list(srt_lang_list.keys())[0]
-        if not srt_lang in srt_lang_list:
-            return (u'WARNING: no closed captions found in the specified language', None)
+            return (u'unable to download video subtitles: %s' % compat_str(err), None)
+        sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
+        sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list)
+        if not sub_lang_list:
+            return (u'video doesn\'t have subtitles', None)
+        return sub_lang_list
+
+    def _list_available_subtitles(self, video_id):
+        sub_lang_list = self._get_available_subtitles(video_id)
+        self.report_video_subtitles_available(video_id, sub_lang_list)
+
+    def _request_subtitle(self, sub_lang, sub_name, video_id, format):
+        """
+        Return tuple:
+        (error_message, sub_lang, sub)
+        """
+        self.report_video_subtitles_request(video_id, sub_lang, format)
         params = compat_urllib_parse.urlencode({
-            'lang': srt_lang,
-            'name': srt_lang_list[srt_lang].encode('utf-8'),
+            'lang': sub_lang,
+            'name': sub_name,
             'v': video_id,
+            'fmt': format,
         })
         url = 'http://www.youtube.com/api/timedtext?' + params
         try:
-            srt_xml = compat_urllib_request.urlopen(url).read().decode('utf-8')
+            sub = compat_urllib_request.urlopen(url).read().decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
-        if not srt_xml:
-            return (u'WARNING: Did not fetch video subtitles', None)
-        return (None, self._closed_captions_xml_to_srt(srt_xml))
+            return (u'unable to download video subtitles: %s' % compat_str(err), None, None)
+        if not sub:
+            return (u'Did not fetch video subtitles', None, None)
+        return (None, sub_lang, sub)
+
+    def _extract_subtitle(self, video_id):
+        """
+        Return a list with a tuple:
+        [(error_message, sub_lang, sub)]
+        """
+        sub_lang_list = self._get_available_subtitles(video_id)
+        sub_format = self._downloader.params.get('subtitlesformat')
+        if  isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
+            return [(sub_lang_list[0], None, None)]
+        if self._downloader.params.get('subtitleslang', False):
+            sub_lang = self._downloader.params.get('subtitleslang')
+        elif 'en' in sub_lang_list:
+            sub_lang = 'en'
+        else:
+            sub_lang = list(sub_lang_list.keys())[0]
+        if not sub_lang in sub_lang_list:
+            return [(u'no closed captions found in the specified language "%s"' % sub_lang, None, None)]
+
+        subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
+        return [subtitle]
+
+    def _extract_all_subtitles(self, video_id):
+        sub_lang_list = self._get_available_subtitles(video_id)
+        sub_format = self._downloader.params.get('subtitlesformat')
+        if  isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
+            return [(sub_lang_list[0], None, None)]
+        subtitles = []
+        for sub_lang in sub_lang_list:
+            subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
+            subtitles.append(subtitle)
+        return subtitles
 
     def _print_formats(self, formats):
         print('Available formats:')
@@ -388,13 +454,13 @@ class YoutubeIE(InfoExtractor):
             self.report_age_confirmation()
             age_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err))
             return
 
     def _extract_id(self, url):
         mobj = re.match(self._VALID_URL, url, re.VERBOSE)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         video_id = mobj.group(2)
         return video_id
@@ -413,7 +479,7 @@ class YoutubeIE(InfoExtractor):
         try:
             video_webpage_bytes = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err))
             return
 
         video_webpage = video_webpage_bytes.decode('utf-8', 'ignore')
@@ -428,28 +494,24 @@ class YoutubeIE(InfoExtractor):
         # Get video info
         self.report_video_info_webpage_download(video_id)
         for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
-            video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
+            video_info_url = ('https://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
                     % (video_id, el_type))
-            request = compat_urllib_request.Request(video_info_url)
-            try:
-                video_info_webpage_bytes = compat_urllib_request.urlopen(request).read()
-                video_info_webpage = video_info_webpage_bytes.decode('utf-8', 'ignore')
-                video_info = compat_parse_qs(video_info_webpage)
-                if 'token' in video_info:
-                    break
-            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
-                return
+            video_info_webpage = self._download_webpage(video_info_url, video_id,
+                                    note=False,
+                                    errnote='unable to download video info webpage')
+            video_info = compat_parse_qs(video_info_webpage)
+            if 'token' in video_info:
+                break
         if 'token' not in video_info:
             if 'reason' in video_info:
-                self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0])
+                self._downloader.report_error(u'YouTube said: %s' % video_info['reason'][0])
             else:
-                self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
+                self._downloader.report_error(u'"token" parameter not in video info for unknown reason')
             return
 
         # Check for "rental" videos
         if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
-            self._downloader.trouble(u'ERROR: "rental" videos not supported')
+            self._downloader.report_error(u'"rental" videos not supported')
             return
 
         # Start extracting information
@@ -457,7 +519,7 @@ class YoutubeIE(InfoExtractor):
 
         # uploader
         if 'author' not in video_info:
-            self._downloader.trouble(u'ERROR: unable to extract uploader name')
+            self._downloader.report_error(u'unable to extract uploader name')
             return
         video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
 
@@ -467,17 +529,17 @@ class YoutubeIE(InfoExtractor):
         if mobj is not None:
             video_uploader_id = mobj.group(1)
         else:
-            self._downloader.trouble(u'WARNING: unable to extract uploader nickname')
+            self._downloader.report_warning(u'unable to extract uploader nickname')
 
         # title
         if 'title' not in video_info:
-            self._downloader.trouble(u'ERROR: unable to extract video title')
+            self._downloader.report_error(u'unable to extract video title')
             return
         video_title = compat_urllib_parse.unquote_plus(video_info['title'][0])
 
         # thumbnail image
         if 'thumbnail_url' not in video_info:
-            self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
+            self._downloader.report_warning(u'unable to extract video thumbnail')
             video_thumbnail = ''
         else:   # don't panic if we can't find it
             video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
@@ -501,15 +563,29 @@ class YoutubeIE(InfoExtractor):
         else:
             video_description = ''
 
-        # closed captions
+        # subtitles
         video_subtitles = None
+
         if self._downloader.params.get('writesubtitles', False):
-            (srt_error, video_subtitles) = self._extract_subtitles(video_id)
-            if srt_error:
-                self._downloader.trouble(srt_error)
+            video_subtitles = self._extract_subtitle(video_id)
+            if video_subtitles:
+                (sub_error, sub_lang, sub) = video_subtitles[0]
+                if sub_error:
+                    self._downloader.report_error(sub_error)
+
+        if self._downloader.params.get('allsubtitles', False):
+            video_subtitles = self._extract_all_subtitles(video_id)
+            for video_subtitle in video_subtitles:
+                (sub_error, sub_lang, sub) = video_subtitle
+                if sub_error:
+                    self._downloader.report_error(sub_error)
+
+        if self._downloader.params.get('listsubtitles', False):
+            sub_lang_list = self._list_available_subtitles(video_id)
+            return
 
         if 'length_seconds' not in video_info:
-            self._downloader.trouble(u'WARNING: unable to extract video duration')
+            self._downloader.report_warning(u'unable to extract video duration')
             video_duration = ''
         else:
             video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0])
@@ -537,7 +613,7 @@ class YoutubeIE(InfoExtractor):
                 format_list = available_formats
             existing_formats = [x for x in format_list if x in url_map]
             if len(existing_formats) == 0:
-                self._downloader.trouble(u'ERROR: no known formats available for video')
+                self._downloader.report_error(u'no known formats available for video')
                 return
             if self._downloader.params.get('listformats', None):
                 self._print_formats(existing_formats)
@@ -558,10 +634,10 @@ class YoutubeIE(InfoExtractor):
                         video_url_list = [(rf, url_map[rf])]
                         break
                 if video_url_list is None:
-                    self._downloader.trouble(u'ERROR: requested format not available')
+                    self._downloader.report_error(u'requested format not available')
                     return
         else:
-            self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info')
+            self._downloader.report_error(u'no conn or url_encoded_fmt_stream_map information found in video info')
             return
 
         results = []
@@ -624,7 +700,7 @@ class MetacafeIE(InfoExtractor):
             self.report_disclaimer()
             disclaimer = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to retrieve disclaimer: %s' % compat_str(err))
             return
 
         # Confirm age
@@ -637,14 +713,14 @@ class MetacafeIE(InfoExtractor):
             self.report_age_confirmation()
             disclaimer = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err))
             return
 
     def _real_extract(self, url):
         # Extract id and simplified title from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         video_id = mobj.group(1)
@@ -652,8 +728,7 @@ class MetacafeIE(InfoExtractor):
         # Check if video comes from YouTube
         mobj2 = re.match(r'^yt-(.*)$', video_id)
         if mobj2 is not None:
-            self._downloader.download(['http://www.youtube.com/watch?v=%s' % mobj2.group(1)])
-            return
+            return [self.url_result('http://www.youtube.com/watch?v=%s' % mobj2.group(1))]
 
         # Retrieve video webpage to extract further information
         request = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id)
@@ -661,7 +736,7 @@ class MetacafeIE(InfoExtractor):
             self.report_download_webpage(video_id)
             webpage = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'unable retrieve video webpage: %s' % compat_str(err))
             return
 
         # Extract URL, uploader and title from webpage
@@ -681,15 +756,15 @@ class MetacafeIE(InfoExtractor):
         else:
             mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
             if mobj is None:
-                self._downloader.trouble(u'ERROR: unable to extract media URL')
+                self._downloader.report_error(u'unable to extract media URL')
                 return
             vardict = compat_parse_qs(mobj.group(1))
             if 'mediaData' not in vardict:
-                self._downloader.trouble(u'ERROR: unable to extract media URL')
+                self._downloader.report_error(u'unable to extract media URL')
                 return
             mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
             if mobj is None:
-                self._downloader.trouble(u'ERROR: unable to extract media URL')
+                self._downloader.report_error(u'unable to extract media URL')
                 return
             mediaURL = mobj.group(1).replace('\\/', '/')
             video_extension = mediaURL[-3:]
@@ -697,13 +772,13 @@ class MetacafeIE(InfoExtractor):
 
         mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract title')
+            self._downloader.report_error(u'unable to extract title')
             return
         video_title = mobj.group(1).decode('utf-8')
 
         mobj = re.search(r'submitter=(.*?);', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+            self._downloader.report_error(u'unable to extract uploader nickname')
             return
         video_uploader = mobj.group(1)
 
@@ -735,7 +810,7 @@ class DailymotionIE(InfoExtractor):
         # Extract id and simplified title from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         video_id = mobj.group(1).split('_')[0].split('?')[0]
@@ -751,7 +826,7 @@ class DailymotionIE(InfoExtractor):
         self.report_extraction(video_id)
         mobj = re.search(r'\s*var flashvars = (.*)', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract media URL')
+            self._downloader.report_error(u'unable to extract media URL')
             return
         flashvars = compat_urllib_parse.unquote(mobj.group(1))
 
@@ -761,12 +836,12 @@ class DailymotionIE(InfoExtractor):
                 self._downloader.to_screen(u'[dailymotion] Using %s' % key)
                 break
         else:
-            self._downloader.trouble(u'ERROR: unable to extract video URL')
+            self._downloader.report_error(u'unable to extract video URL')
             return
 
         mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video URL')
+            self._downloader.report_error(u'unable to extract video URL')
             return
 
         video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/')
@@ -775,7 +850,7 @@ class DailymotionIE(InfoExtractor):
 
         mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract title')
+            self._downloader.report_error(u'unable to extract title')
             return
         video_title = unescapeHTML(mobj.group('title'))
 
@@ -785,7 +860,7 @@ class DailymotionIE(InfoExtractor):
             # lookin for official user
             mobj_official = re.search(r'<span rel="author"[^>]+?>([^<]+?)</span>', webpage)
             if mobj_official is None:
-                self._downloader.trouble(u'WARNING: unable to extract uploader nickname')
+                self._downloader.report_warning(u'unable to extract uploader nickname')
             else:
                 video_uploader = mobj_official.group(1)
         else:
@@ -827,7 +902,7 @@ class PhotobucketIE(InfoExtractor):
         # Extract id from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
 
         video_id = mobj.group(1)
@@ -840,14 +915,14 @@ class PhotobucketIE(InfoExtractor):
             self.report_download_webpage(video_id)
             webpage = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
             return
 
         # Extract URL, uploader, and title from webpage
         self.report_extraction(video_id)
         mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract media URL')
+            self._downloader.report_error(u'unable to extract media URL')
             return
         mediaURL = compat_urllib_parse.unquote(mobj.group(1))
 
@@ -855,7 +930,7 @@ class PhotobucketIE(InfoExtractor):
 
         mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract title')
+            self._downloader.report_error(u'unable to extract title')
             return
         video_title = mobj.group(1).decode('utf-8')
 
@@ -896,7 +971,7 @@ class YahooIE(InfoExtractor):
         # Extract ID from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
 
         video_id = mobj.group(2)
@@ -909,18 +984,18 @@ class YahooIE(InfoExtractor):
             try:
                 webpage = compat_urllib_request.urlopen(request).read()
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+                self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
                 return
 
             mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
             if mobj is None:
-                self._downloader.trouble(u'ERROR: Unable to extract id field')
+                self._downloader.report_error(u'Unable to extract id field')
                 return
             yahoo_id = mobj.group(1)
 
             mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
             if mobj is None:
-                self._downloader.trouble(u'ERROR: Unable to extract vid field')
+                self._downloader.report_error(u'Unable to extract vid field')
                 return
             yahoo_vid = mobj.group(1)
 
@@ -933,34 +1008,34 @@ class YahooIE(InfoExtractor):
             self.report_download_webpage(video_id)
             webpage = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
             return
 
         # Extract uploader and title from webpage
         self.report_extraction(video_id)
         mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video title')
+            self._downloader.report_error(u'unable to extract video title')
             return
         video_title = mobj.group(1).decode('utf-8')
 
         mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video uploader')
+            self._downloader.report_error(u'unable to extract video uploader')
             return
         video_uploader = mobj.group(1).decode('utf-8')
 
         # Extract video thumbnail
         mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+            self._downloader.report_error(u'unable to extract video thumbnail')
             return
         video_thumbnail = mobj.group(1).decode('utf-8')
 
         # Extract video description
         mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video description')
+            self._downloader.report_error(u'unable to extract video description')
             return
         video_description = mobj.group(1).decode('utf-8')
         if not video_description:
@@ -969,13 +1044,13 @@ class YahooIE(InfoExtractor):
         # Extract video height and width
         mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video height')
+            self._downloader.report_error(u'unable to extract video height')
             return
         yv_video_height = mobj.group(1)
 
         mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video width')
+            self._downloader.report_error(u'unable to extract video width')
             return
         yv_video_width = mobj.group(1)
 
@@ -991,13 +1066,13 @@ class YahooIE(InfoExtractor):
             self.report_download_webpage(video_id)
             webpage = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
             return
 
         # Extract media URL from playlist XML
         mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: Unable to extract media URL')
+            self._downloader.report_error(u'Unable to extract media URL')
             return
         video_url = compat_urllib_parse.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
         video_url = unescapeHTML(video_url)
@@ -1036,7 +1111,7 @@ class VimeoIE(InfoExtractor):
         # Extract ID from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
 
         video_id = mobj.group('id')
@@ -1052,7 +1127,7 @@ class VimeoIE(InfoExtractor):
             webpage_bytes = compat_urllib_request.urlopen(request).read()
             webpage = webpage_bytes.decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
             return
 
         # Now we begin extracting as much information as we can from what we
@@ -1065,7 +1140,7 @@ class VimeoIE(InfoExtractor):
             config = webpage.split(' = {config:')[1].split(',assets:')[0]
             config = json.loads(config)
         except:
-            self._downloader.trouble(u'ERROR: unable to extract info section')
+            self._downloader.report_error(u'unable to extract info section')
             return
 
         # Extract title
@@ -1081,7 +1156,7 @@ class VimeoIE(InfoExtractor):
         # Extract video description
         video_description = get_element_by_attribute("itemprop", "description", webpage)
         if video_description: video_description = clean_html(video_description)
-        else: video_description = ''
+        else: video_description = u''
 
         # Extract upload date
         video_upload_date = None
@@ -1115,7 +1190,7 @@ class VimeoIE(InfoExtractor):
                 self._downloader.to_screen(u'[vimeo] %s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
                 break
         else:
-            self._downloader.trouble(u'ERROR: no known codec found')
+            self._downloader.report_error(u'no known codec found')
             return
 
         video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
@@ -1159,10 +1234,10 @@ class ArteTvIE(InfoExtractor):
             self.report_download_webpage(url)
             webpage = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
             return
         except ValueError as err:
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
         return webpage
 
@@ -1172,7 +1247,7 @@ class ArteTvIE(InfoExtractor):
         info = {}
 
         if mobj is None:
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
 
         for (i, key, err) in matchTuples:
@@ -1281,7 +1356,8 @@ class GenericIE(InfoExtractor):
 
     def report_download_webpage(self, video_id):
         """Report webpage download."""
-        self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
+        if not self._downloader.params.get('test', False):
+            self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
         self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id)
 
     def report_extraction(self, video_id):
@@ -1293,7 +1369,7 @@ class GenericIE(InfoExtractor):
         self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
 
     def _test_redirect(self, url):
-        """Check if it is a redirect, like url shorteners, in case restart chain."""
+        """Check if it is a redirect, like url shorteners, in case return the new url."""
         class HeadRequest(compat_urllib_request.Request):
             def get_method(self):
                 return "HEAD"
@@ -1344,24 +1420,19 @@ class GenericIE(InfoExtractor):
             return False
 
         self.report_following_redirect(new_url)
-        self._downloader.download([new_url])
-        return True
+        return new_url
 
     def _real_extract(self, url):
-        if self._test_redirect(url): return
+        new_url = self._test_redirect(url)
+        if new_url: return [self.url_result(new_url)]
 
         video_id = url.split('/')[-1]
-        request = compat_urllib_request.Request(url)
         try:
-            self.report_download_webpage(video_id)
-            webpage = compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
-            return
+            webpage = self._download_webpage(url, video_id)
         except ValueError as err:
             # since this is the last-resort InfoExtractor, if
             # this error is thrown, it'll be thrown here
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
 
         self.report_extraction(video_id)
@@ -1374,13 +1445,13 @@ class GenericIE(InfoExtractor):
             # Broaden the search a little bit: JWPlayer JS loader
             mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
 
         # It's possible that one of the regexes
         # matched, but returned an empty group:
         if mobj.group(1) is None:
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
 
         video_url = compat_urllib_parse.unquote(mobj.group(1))
@@ -1398,14 +1469,14 @@ class GenericIE(InfoExtractor):
         # and so on and so forth; it's just not practical
         mobj = re.search(r'<title>(.*)</title>', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract title')
+            self._downloader.report_error(u'unable to extract title')
             return
         video_title = mobj.group(1)
 
         # video uploader is domain name
         mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract title')
+            self._downloader.report_error(u'unable to extract title')
             return
         video_uploader = mobj.group(1)
 
@@ -1437,7 +1508,7 @@ class YoutubeSearchIE(InfoExtractor):
     def _real_extract(self, query):
         mobj = re.match(self._VALID_URL, query)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+            self._downloader.report_error(u'invalid search query "%s"' % query)
             return
 
         prefix, query = query.split(':')
@@ -1453,7 +1524,7 @@ class YoutubeSearchIE(InfoExtractor):
             try:
                 n = int(prefix)
                 if n <= 0:
-                    self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+                    self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
                     return
                 elif n > self._max_youtube_results:
                     self._downloader.report_warning(u'ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
@@ -1478,7 +1549,7 @@ class YoutubeSearchIE(InfoExtractor):
             try:
                 data = compat_urllib_request.urlopen(request).read().decode('utf-8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to download API page: %s' % compat_str(err))
                 return
             api_response = json.loads(data)['data']
 
@@ -1519,7 +1590,7 @@ class GoogleSearchIE(InfoExtractor):
     def _real_extract(self, query):
         mobj = re.match(self._VALID_URL, query)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+            self._downloader.report_error(u'invalid search query "%s"' % query)
             return
 
         prefix, query = query.split(':')
@@ -1535,7 +1606,7 @@ class GoogleSearchIE(InfoExtractor):
             try:
                 n = int(prefix)
                 if n <= 0:
-                    self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+                    self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
                     return
                 elif n > self._max_google_results:
                     self._downloader.report_warning(u'gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
@@ -1559,7 +1630,7 @@ class GoogleSearchIE(InfoExtractor):
             try:
                 page = compat_urllib_request.urlopen(request).read()
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                 return
 
             # Extract video identifiers
@@ -1603,7 +1674,7 @@ class YahooSearchIE(InfoExtractor):
     def _real_extract(self, query):
         mobj = re.match(self._VALID_URL, query)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+            self._downloader.report_error(u'invalid search query "%s"' % query)
             return
 
         prefix, query = query.split(':')
@@ -1619,7 +1690,7 @@ class YahooSearchIE(InfoExtractor):
             try:
                 n = int(prefix)
                 if n <= 0:
-                    self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+                    self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
                     return
                 elif n > self._max_yahoo_results:
                     self._downloader.report_warning(u'yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
@@ -1644,7 +1715,7 @@ class YahooSearchIE(InfoExtractor):
             try:
                 page = compat_urllib_request.urlopen(request).read()
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                 return
 
             # Extract video identifiers
@@ -1677,9 +1748,7 @@ class YoutubePlaylistIE(InfoExtractor):
                         (?:
                            (?:course|view_play_list|my_playlists|artist|playlist|watch)
                            \? (?:.*?&)*? (?:p|a|list)=
-                        |  user/.*?/user/
                         |  p/
-                        |  user/.*?#[pg]/c/
                         )
                         ((?:PL|EC|UU)?[0-9A-Za-z-_]{10,})
                         .*
@@ -1706,7 +1775,7 @@ class YoutubePlaylistIE(InfoExtractor):
         # Extract playlist id
         mobj = re.match(self._VALID_URL, url, re.VERBOSE)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+            self._downloader.report_error(u'invalid url: %s' % url)
             return
 
         # Download playlist videos from API
@@ -1721,18 +1790,22 @@ class YoutubePlaylistIE(InfoExtractor):
             try:
                 page = compat_urllib_request.urlopen(url).read().decode('utf8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                 return
 
             try:
                 response = json.loads(page)
             except ValueError as err:
-                self._downloader.trouble(u'ERROR: Invalid JSON in API response: ' + compat_str(err))
+                self._downloader.report_error(u'Invalid JSON in API response: ' + compat_str(err))
                 return
 
-            if not 'feed' in response or not 'entry' in response['feed']:
-                self._downloader.trouble(u'ERROR: Got a malformed response from YouTube API')
+            if 'feed' not in response:
+                self._downloader.report_error(u'Got a malformed response from YouTube API')
                 return
+            if 'entry' not in response['feed']:
+                # Number of videos is a multiple of self._MAX_RESULTS
+                break
+
             videos += [ (entry['yt$position']['$t'], entry['content']['src'])
                         for entry in response['feed']['entry']
                         if 'content' in entry ]
@@ -1742,23 +1815,9 @@ class YoutubePlaylistIE(InfoExtractor):
             page_num += 1
 
         videos = [v[1] for v in sorted(videos)]
-        total = len(videos)
 
-        playliststart = self._downloader.params.get('playliststart', 1) - 1
-        playlistend = self._downloader.params.get('playlistend', -1)
-        if playlistend == -1:
-            videos = videos[playliststart:]
-        else:
-            videos = videos[playliststart:playlistend]
-
-        if len(videos) == total:
-            self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total))
-        else:
-            self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(videos)))
-
-        for video in videos:
-            self._downloader.download([video])
-        return
+        url_results = [self.url_result(url) for url in videos]
+        return [self.playlist_result(url_results, playlist_id)]
 
 
 class YoutubeChannelIE(InfoExtractor):
@@ -1777,7 +1836,7 @@ class YoutubeChannelIE(InfoExtractor):
         # Extract channel id
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+            self._downloader.report_error(u'invalid url: %s' % url)
             return
 
         # Download channel pages
@@ -1792,7 +1851,7 @@ class YoutubeChannelIE(InfoExtractor):
             try:
                 page = compat_urllib_request.urlopen(request).read().decode('utf8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                 return
 
             # Extract video identifiers
@@ -1808,9 +1867,9 @@ class YoutubeChannelIE(InfoExtractor):
 
         self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
 
-        for id in video_ids:
-            self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
-        return
+        urls = ['http://www.youtube.com/watch?v=%s' % id for id in video_ids]
+        url_entries = [self.url_result(url) for url in urls]
+        return [self.playlist_result(url_entries, channel_id)]
 
 
 class YoutubeUserIE(InfoExtractor):
@@ -1835,7 +1894,7 @@ class YoutubeUserIE(InfoExtractor):
         # Extract username
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+            self._downloader.report_error(u'invalid url: %s' % url)
             return
 
         username = mobj.group(1)
@@ -1857,7 +1916,7 @@ class YoutubeUserIE(InfoExtractor):
             try:
                 page = compat_urllib_request.urlopen(request).read().decode('utf-8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                 return
 
             # Extract video identifiers
@@ -1880,20 +1939,9 @@ class YoutubeUserIE(InfoExtractor):
 
             pagenum += 1
 
-        all_ids_count = len(video_ids)
-        playliststart = self._downloader.params.get('playliststart', 1) - 1
-        playlistend = self._downloader.params.get('playlistend', -1)
-
-        if playlistend == -1:
-            video_ids = video_ids[playliststart:]
-        else:
-            video_ids = video_ids[playliststart:playlistend]
-
-        self._downloader.to_screen(u"[youtube] user %s: Collected %d video ids (downloading %d of them)" %
-                (username, all_ids_count, len(video_ids)))
-
-        for video_id in video_ids:
-            self._downloader.download(['http://www.youtube.com/watch?v=%s' % video_id])
+        urls = ['http://www.youtube.com/watch?v=%s' % video_id for video_id in video_ids]
+        url_results = [self.url_result(url) for url in urls]
+        return [self.playlist_result(url_results, playlist_title = username)]
 
 
 class BlipTVUserIE(InfoExtractor):
@@ -1915,7 +1963,7 @@ class BlipTVUserIE(InfoExtractor):
         # Extract username
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+            self._downloader.report_error(u'invalid url: %s' % url)
             return
 
         username = mobj.group(1)
@@ -1929,7 +1977,7 @@ class BlipTVUserIE(InfoExtractor):
             mobj = re.search(r'data-users-id="([^"]+)"', page)
             page_base = page_base % mobj.group(1)
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
             return
 
 
@@ -1948,7 +1996,7 @@ class BlipTVUserIE(InfoExtractor):
             try:
                 page = compat_urllib_request.urlopen(request).read().decode('utf-8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+                self._downloader.report_error(u'unable to download webpage: %s' % str(err))
                 return
 
             # Extract video identifiers
@@ -1971,20 +2019,12 @@ class BlipTVUserIE(InfoExtractor):
 
             pagenum += 1
 
-        all_ids_count = len(video_ids)
-        playliststart = self._downloader.params.get('playliststart', 1) - 1
-        playlistend = self._downloader.params.get('playlistend', -1)
-
-        if playlistend == -1:
-            video_ids = video_ids[playliststart:]
-        else:
-            video_ids = video_ids[playliststart:playlistend]
-
         self._downloader.to_screen(u"[%s] user %s: Collected %d video ids (downloading %d of them)" %
                 (self.IE_NAME, username, all_ids_count, len(video_ids)))
 
-        for video_id in video_ids:
-            self._downloader.download([u'http://blip.tv/'+video_id])
+        urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids]
+        url_entries = [self.url_result(url) for url in urls]
+        return [self.playlist_result(url_entries, playlist_title = username)]
 
 
 class DepositFilesIE(InfoExtractor):
@@ -2012,7 +2052,7 @@ class DepositFilesIE(InfoExtractor):
             self.report_download_webpage(file_id)
             webpage = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve file webpage: %s' % compat_str(err))
             return
 
         # Search for the real file URL
@@ -2022,9 +2062,9 @@ class DepositFilesIE(InfoExtractor):
             mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
             if (mobj is not None) and (mobj.group(1) is not None):
                 restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
-                self._downloader.trouble(u'ERROR: %s' % restriction_message)
+                self._downloader.report_error(u'%s' % restriction_message)
             else:
-                self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url)
+                self._downloader.report_error(u'unable to extract download URL from: %s' % url)
             return
 
         file_url = mobj.group(1)
@@ -2033,7 +2073,7 @@ class DepositFilesIE(InfoExtractor):
         # Search for file title
         mobj = re.search(r'<b title="(.*?)">', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract title')
+            self._downloader.report_error(u'unable to extract title')
             return
         file_title = mobj.group(1).decode('utf-8')
 
@@ -2106,14 +2146,14 @@ class FacebookIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         video_id = mobj.group('ID')
 
         url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
         webpage = self._download_webpage(url, video_id)
 
-        BEFORE = '[["allowFullScreen","true"],["allowScriptAccess","always"],["salign","tl"],["scale","noscale"],["wmode","opaque"]].forEach(function(param) {swf.addParam(param[0], param[1]);});\n'
+        BEFORE = '{swf.addParam(param[0], param[1]);});\n'
         AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
         m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
         if not m:
@@ -2121,12 +2161,14 @@ class FacebookIE(InfoExtractor):
         data = dict(json.loads(m.group(1)))
         params_raw = compat_urllib_parse.unquote(data['params'])
         params = json.loads(params_raw)
-        video_url = params['hd_src']
+        video_data = params['video_data'][0]
+        video_url = video_data.get('hd_src')
         if not video_url:
-            video_url = params['sd_src']
+            video_url = video_data['sd_src']
         if not video_url:
             raise ExtractorError(u'Cannot find video URL')
-        video_duration = int(params['video_duration'])
+        video_duration = int(video_data['video_duration'])
+        thumbnail = video_data['thumbnail_src']
 
         m = re.search('<h2 class="uiHeaderTitle">([^<]+)</h2>', webpage)
         if not m:
@@ -2139,7 +2181,7 @@ class FacebookIE(InfoExtractor):
             'url': video_url,
             'ext': 'mp4',
             'duration': video_duration,
-            'thumbnail': params['thumbnail_src'],
+            'thumbnail': thumbnail,
         }
         return [info]
 
@@ -2162,7 +2204,7 @@ class BlipTVIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         urlp = compat_urllib_parse_urlparse(url)
@@ -2209,7 +2251,7 @@ class BlipTVIE(InfoExtractor):
                 json_code_bytes = urlh.read()
                 json_code = json_code_bytes.decode('utf-8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to read video info webpage: %s' % compat_str(err))
                 return
 
             try:
@@ -2240,7 +2282,7 @@ class BlipTVIE(InfoExtractor):
                     'user_agent': 'iTunes/10.6.1',
                 }
             except (ValueError,KeyError) as err:
-                self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
+                self._downloader.report_error(u'unable to parse video information: %s' % repr(err))
                 return
 
         return [info]
@@ -2262,7 +2304,7 @@ class MyVideoIE(InfoExtractor):
     def _real_extract(self,url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._download.trouble(u'ERROR: invalid URL: %s' % url)
+            self._download.report_error(u'invalid URL: %s' % url)
             return
 
         video_id = mobj.group(1)
@@ -2272,16 +2314,16 @@ class MyVideoIE(InfoExtractor):
         webpage = self._download_webpage(webpage_url, video_id)
 
         self.report_extraction(video_id)
-        mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\' />',
+        mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\'',
                  webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract media URL')
+            self._downloader.report_error(u'unable to extract media URL')
             return
         video_url = mobj.group(1) + ('/%s.flv' % video_id)
 
         mobj = re.search('<title>([^<]+)</title>', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract title')
+            self._downloader.report_error(u'unable to extract title')
             return
 
         video_title = mobj.group(1)
@@ -2354,7 +2396,7 @@ class ComedyCentralIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url, re.VERBOSE)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         if mobj.group('shortname'):
@@ -2385,16 +2427,16 @@ class ComedyCentralIE(InfoExtractor):
             html = htmlHandle.read()
             webpage = html.decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
             return
         if dlNewest:
             url = htmlHandle.geturl()
             mobj = re.match(self._VALID_URL, url, re.VERBOSE)
             if mobj is None:
-                self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url)
+                self._downloader.report_error(u'Invalid redirected URL: ' + url)
                 return
             if mobj.group('episode') == '':
-                self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url)
+                self._downloader.report_error(u'Redirected URL is still not specific: ' + url)
                 return
             epTitle = mobj.group('episode')
 
@@ -2407,7 +2449,7 @@ class ComedyCentralIE(InfoExtractor):
 
             altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
             if len(altMovieParams) == 0:
-                self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url)
+                self._downloader.report_error(u'unable to find Flash URL in webpage ' + url)
                 return
             else:
                 mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
@@ -2418,7 +2460,7 @@ class ComedyCentralIE(InfoExtractor):
         try:
             indexXml = compat_urllib_request.urlopen(indexUrl).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download episode index: ' + compat_str(err))
+            self._downloader.report_error(u'unable to download episode index: ' + compat_str(err))
             return
 
         results = []
@@ -2439,7 +2481,7 @@ class ComedyCentralIE(InfoExtractor):
             try:
                 configXml = compat_urllib_request.urlopen(configReq).read()
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                 return
 
             cdoc = xml.etree.ElementTree.fromstring(configXml)
@@ -2449,7 +2491,7 @@ class ComedyCentralIE(InfoExtractor):
                 turls.append(finfo)
 
             if len(turls) == 0:
-                self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found')
+                self._downloader.report_error(u'unable to download ' + mediaId + ': No videos found')
                 continue
 
             if self._downloader.params.get('listformats', None):
@@ -2506,7 +2548,7 @@ class EscapistIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         showName = mobj.group('showname')
         videoId = mobj.group('episode')
@@ -2518,7 +2560,7 @@ class EscapistIE(InfoExtractor):
             m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type'])
             webPage = webPageBytes.decode(m.group(1) if m else 'utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download webpage: ' + compat_str(err))
+            self._downloader.report_error(u'unable to download webpage: ' + compat_str(err))
             return
 
         descMatch = re.search('<meta name="description" content="([^"]*)"', webPage)
@@ -2536,7 +2578,7 @@ class EscapistIE(InfoExtractor):
             m = re.match(r'text/html; charset="?([^"]+)"?', configJSON.headers['Content-Type'])
             configJSON = configJSON.read().decode(m.group(1) if m else 'utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download configuration: ' + compat_str(err))
+            self._downloader.report_error(u'unable to download configuration: ' + compat_str(err))
             return
 
         # Technically, it's JavaScript, not JSON
@@ -2545,7 +2587,7 @@ class EscapistIE(InfoExtractor):
         try:
             config = json.loads(configJSON)
         except (ValueError,) as err:
-            self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + compat_str(err))
+            self._downloader.report_error(u'Invalid JSON in configuration file: ' + compat_str(err))
             return
 
         playlist = config['playlist']
@@ -2557,7 +2599,7 @@ class EscapistIE(InfoExtractor):
             'uploader': showName,
             'upload_date': None,
             'title': showName,
-            'ext': 'flv',
+            'ext': 'mp4',
             'thumbnail': imgUrl,
             'description': description,
             'player_url': playerUrl,
@@ -2583,7 +2625,7 @@ class CollegeHumorIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         video_id = mobj.group('videoid')
 
@@ -2598,7 +2640,7 @@ class CollegeHumorIE(InfoExtractor):
         try:
             metaXml = compat_urllib_request.urlopen(xmlUrl).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
             return
 
         mdoc = xml.etree.ElementTree.fromstring(metaXml)
@@ -2609,7 +2651,7 @@ class CollegeHumorIE(InfoExtractor):
             info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
             manifest_url = videoNode.findall('./file')[0].text
         except IndexError:
-            self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
+            self._downloader.report_error(u'Invalid metadata XML file')
             return
 
         manifest_url += '?hdcore=2.10.3'
@@ -2617,7 +2659,7 @@ class CollegeHumorIE(InfoExtractor):
         try:
             manifestXml = compat_urllib_request.urlopen(manifest_url).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
             return
 
         adoc = xml.etree.ElementTree.fromstring(manifestXml)
@@ -2626,7 +2668,7 @@ class CollegeHumorIE(InfoExtractor):
             node_id = media_node.attrib['url']
             video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text
         except IndexError as err:
-            self._downloader.trouble(u'\nERROR: Invalid manifest file')
+            self._downloader.report_error(u'Invalid manifest file')
             return
 
         url_pr = compat_urllib_parse_urlparse(manifest_url)
@@ -2650,7 +2692,7 @@ class XVideosIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         video_id = mobj.group(1)
 
@@ -2662,7 +2704,7 @@ class XVideosIE(InfoExtractor):
         # Extract video URL
         mobj = re.search(r'flv_url=(.+?)&', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video url')
+            self._downloader.report_error(u'unable to extract video url')
             return
         video_url = compat_urllib_parse.unquote(mobj.group(1))
 
@@ -2670,7 +2712,7 @@ class XVideosIE(InfoExtractor):
         # Extract title
         mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video title')
+            self._downloader.report_error(u'unable to extract video title')
             return
         video_title = mobj.group(1)
 
@@ -2678,7 +2720,7 @@ class XVideosIE(InfoExtractor):
         # Extract video thumbnail
         mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+            self._downloader.report_error(u'unable to extract video thumbnail')
             return
         video_thumbnail = mobj.group(0)
 
@@ -2722,7 +2764,7 @@ class SoundcloudIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         # extract uploader (which is in the url)
@@ -2740,7 +2782,7 @@ class SoundcloudIE(InfoExtractor):
             info_json_bytes = compat_urllib_request.urlopen(request).read()
             info_json = info_json_bytes.decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err))
             return
 
         info = json.loads(info_json)
@@ -2753,7 +2795,7 @@ class SoundcloudIE(InfoExtractor):
             stream_json_bytes = compat_urllib_request.urlopen(request).read()
             stream_json = stream_json_bytes.decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download stream definitions: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download stream definitions: %s' % compat_str(err))
             return
 
         streams = json.loads(stream_json)
@@ -2769,6 +2811,87 @@ class SoundcloudIE(InfoExtractor):
             'description': info['description'],
         }]
 
+class SoundcloudSetIE(InfoExtractor):
+    """Information extractor for soundcloud.com sets
+       To access the media, the uid of the song and a stream token
+       must be extracted from the page source and the script must make
+       a request to media.soundcloud.com/crossdomain.xml. Then
+       the media can be grabbed by requesting from an url composed
+       of the stream token and uid
+     """
+
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/sets/([\w\d-]+)'
+    IE_NAME = u'soundcloud'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def report_resolve(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Resolving id' % (self.IE_NAME, video_id))
+
+    def report_extraction(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Retrieving stream' % (self.IE_NAME, video_id))
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+
+        # extract uploader (which is in the url)
+        uploader = mobj.group(1)
+        # extract simple title (uploader + slug of song title)
+        slug_title =  mobj.group(2)
+        simple_title = uploader + u'-' + slug_title
+
+        self.report_resolve('%s/sets/%s' % (uploader, slug_title))
+
+        url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title)
+        resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
+        request = compat_urllib_request.Request(resolv_url)
+        try:
+            info_json_bytes = compat_urllib_request.urlopen(request).read()
+            info_json = info_json_bytes.decode('utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
+            return
+
+        videos = []
+        info = json.loads(info_json)
+        if 'errors' in info:
+            for err in info['errors']:
+                self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err['error_message']))
+            return
+
+        for track in info['tracks']:
+            video_id = track['id']
+            self.report_extraction('%s/sets/%s' % (uploader, slug_title))
+
+            streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
+            request = compat_urllib_request.Request(streams_url)
+            try:
+                stream_json_bytes = compat_urllib_request.urlopen(request).read()
+                stream_json = stream_json_bytes.decode('utf-8')
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                self._downloader.trouble(u'ERROR: unable to download stream definitions: %s' % compat_str(err))
+                return
+
+            streams = json.loads(stream_json)
+            mediaURL = streams['http_mp3_128_url']
+
+            videos.append({
+                'id':       video_id,
+                'url':      mediaURL,
+                'uploader': track['user']['username'],
+                'upload_date':  track['created_at'],
+                'title':    track['title'],
+                'ext':      u'mp3',
+                'description': track['description'],
+            })
+        return videos
+
 
 class InfoQIE(InfoExtractor):
     """Information extractor for infoq.com"""
@@ -2781,7 +2904,7 @@ class InfoQIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         webpage = self._download_webpage(url, video_id=url)
@@ -2790,7 +2913,7 @@ class InfoQIE(InfoExtractor):
         # Extract video URL
         mobj = re.search(r"jsclassref='([^']*)'", webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video url')
+            self._downloader.report_error(u'unable to extract video url')
             return
         real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8'))
         video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id
@@ -2798,7 +2921,7 @@ class InfoQIE(InfoExtractor):
         # Extract title
         mobj = re.search(r'contentTitle = "(.*?)";', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video title')
+            self._downloader.report_error(u'unable to extract video title')
             return
         video_title = mobj.group(1)
 
@@ -2881,7 +3004,7 @@ class MixcloudIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         # extract uploader & filename from url
         uploader = mobj.group(1).decode('utf-8')
@@ -2895,7 +3018,7 @@ class MixcloudIE(InfoExtractor):
             self.report_download_json(file_url)
             jsonData = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve file: %s' % compat_str(err))
             return
 
         # parse JSON
@@ -2919,7 +3042,7 @@ class MixcloudIE(InfoExtractor):
                     break # got it!
         else:
             if req_format not in formats:
-                self._downloader.trouble(u'ERROR: format is not available')
+                self._downloader.report_error(u'format is not available')
                 return
 
             url_list = self.get_urls(formats, req_format)
@@ -2973,14 +3096,14 @@ class StanfordOpenClassroomIE(InfoExtractor):
             try:
                 metaXml = compat_urllib_request.urlopen(xmlUrl).read()
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
                 return
             mdoc = xml.etree.ElementTree.fromstring(metaXml)
             try:
                 info['title'] = mdoc.findall('./title')[0].text
                 info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
             except IndexError:
-                self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
+                self._downloader.report_error(u'Invalid metadata XML file')
                 return
             info['ext'] = info['url'].rpartition('.')[2]
             return [info]
@@ -3032,7 +3155,7 @@ class StanfordOpenClassroomIE(InfoExtractor):
             try:
                 rootpage = compat_urllib_request.urlopen(rootURL).read()
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
+                self._downloader.report_error(u'unable to download course info page: ' + compat_str(err))
                 return
 
             info['title'] = info['id']
@@ -3064,7 +3187,7 @@ class MTVIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         if not mobj.group('proto'):
             url = 'http://' + url
@@ -3074,25 +3197,25 @@ class MTVIE(InfoExtractor):
 
         mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract song name')
+            self._downloader.report_error(u'unable to extract song name')
             return
         song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
         mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract performer')
+            self._downloader.report_error(u'unable to extract performer')
             return
         performer = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
         video_title = performer + ' - ' + song_name
 
         mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to mtvn_uri')
+            self._downloader.report_error(u'unable to mtvn_uri')
             return
         mtvn_uri = mobj.group(1)
 
         mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract content id')
+            self._downloader.report_error(u'unable to extract content id')
             return
         content_id = mobj.group(1)
 
@@ -3102,7 +3225,7 @@ class MTVIE(InfoExtractor):
         try:
             metadataXml = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download video metadata: %s' % compat_str(err))
             return
 
         mdoc = xml.etree.ElementTree.fromstring(metadataXml)
@@ -3174,7 +3297,7 @@ class YoukuIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         video_id = mobj.group('ID')
 
@@ -3185,7 +3308,7 @@ class YoukuIE(InfoExtractor):
             self.report_download_webpage(video_id)
             jsondata = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
             return
 
         self.report_extraction(video_id)
@@ -3216,7 +3339,7 @@ class YoukuIE(InfoExtractor):
             fileid = config['data'][0]['streamfileids'][format]
             keys = [s['k'] for s in config['data'][0]['segs'][format]]
         except (UnicodeDecodeError, ValueError, KeyError):
-            self._downloader.trouble(u'ERROR: unable to extract info section')
+            self._downloader.report_error(u'unable to extract info section')
             return
 
         files_info=[]
@@ -3263,7 +3386,7 @@ class XNXXIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         video_id = mobj.group(1)
 
@@ -3274,24 +3397,24 @@ class XNXXIE(InfoExtractor):
             webpage_bytes = compat_urllib_request.urlopen(url).read()
             webpage = webpage_bytes.decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
+            self._downloader.report_error(u'unable to download video webpage: %s' % err)
             return
 
         result = re.search(self.VIDEO_URL_RE, webpage)
         if result is None:
-            self._downloader.trouble(u'ERROR: unable to extract video url')
+            self._downloader.report_error(u'unable to extract video url')
             return
         video_url = compat_urllib_parse.unquote(result.group(1))
 
         result = re.search(self.VIDEO_TITLE_RE, webpage)
         if result is None:
-            self._downloader.trouble(u'ERROR: unable to extract video title')
+            self._downloader.report_error(u'unable to extract video title')
             return
         video_title = result.group(1)
 
         result = re.search(self.VIDEO_THUMB_RE, webpage)
         if result is None:
-            self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+            self._downloader.report_error(u'unable to extract video thumbnail')
             return
         video_thumbnail = result.group(1)
 
@@ -3340,7 +3463,7 @@ class GooglePlusIE(InfoExtractor):
         # Extract id from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
 
         post_url = mobj.group(0)
@@ -3354,7 +3477,7 @@ class GooglePlusIE(InfoExtractor):
         try:
             webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve entry webpage: %s' % compat_str(err))
             return
 
         # Extract update date
@@ -3389,14 +3512,14 @@ class GooglePlusIE(InfoExtractor):
         pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]'
         mobj = re.search(pattern, webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video page URL')
+            self._downloader.report_error(u'unable to extract video page URL')
 
         video_page = mobj.group(1)
         request = compat_urllib_request.Request(video_page)
         try:
             webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
             return
         self.report_extract_vid_page(video_page)
 
@@ -3406,7 +3529,7 @@ class GooglePlusIE(InfoExtractor):
         pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
         mobj = re.findall(pattern, webpage)
         if len(mobj) == 0:
-            self._downloader.trouble(u'ERROR: unable to extract video links')
+            self._downloader.report_error(u'unable to extract video links')
 
         # Sort in resolution
         links = sorted(mobj)
@@ -3438,7 +3561,7 @@ class NBAIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         video_id = mobj.group(1)
@@ -3494,13 +3617,13 @@ class JustinTVIE(InfoExtractor):
             webpage_bytes = urlh.read()
             webpage = webpage_bytes.decode('utf-8', 'ignore')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download video info JSON: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download video info JSON: %s' % compat_str(err))
             return
 
         response = json.loads(webpage)
         if type(response) != list:
             error_text = response.get('error', 'unknown error')
-            self._downloader.trouble(u'ERROR: Justin.tv API: %s' % error_text)
+            self._downloader.report_error(u'Justin.tv API: %s' % error_text)
             return
         info = []
         for clip in response:
@@ -3525,7 +3648,7 @@ class JustinTVIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         api = 'http://api.justin.tv'
@@ -3560,7 +3683,7 @@ class FunnyOrDieIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         video_id = mobj.group('id')
@@ -3568,13 +3691,15 @@ class FunnyOrDieIE(InfoExtractor):
 
         m = re.search(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"', webpage, re.DOTALL)
         if not m:
-            self._downloader.trouble(u'ERROR: unable to find video information')
+            self._downloader.report_error(u'unable to find video information')
         video_url = unescapeHTML(m.group('url'))
 
-        m = re.search(r"class='player_page_h1'>\s+<a.*?>(?P<title>.*?)</a>", webpage)
+        m = re.search(r"<h1 class='player_page_h1'.*?>(?P<title>.*?)</h1>", webpage, flags=re.DOTALL)
         if not m:
-            self._downloader.trouble(u'Cannot find video title')
-        title = unescapeHTML(m.group('title'))
+            m = re.search(r'<title>(?P<title>[^<]+?)</title>', webpage)
+            if not m:
+                self._downloader.trouble(u'Cannot find video title')
+        title = clean_html(m.group('title'))
 
         m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
         if m:
@@ -3621,7 +3746,7 @@ class SteamIE(InfoExtractor):
             video_url = vid.group('videoURL')
             video_thumb = thumb.group('thumbnail')
             if not video_url:
-                self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id)
+                self._downloader.report_error(u'Cannot find video url for %s' % video_id)
             info = {
                 'id':video_id,
                 'url':video_url,
@@ -3655,44 +3780,40 @@ class UstreamIE(InfoExtractor):
         return [info]
 
 class WorldStarHipHopIE(InfoExtractor):
-    _VALID_URL = r"""(http://(?:www|m).worldstar(?:candy|hiphop)\.com.*)"""
+    _VALID_URL = r'http://(?:www|m)\.worldstar(?:candy|hiphop)\.com/videos/video\.php\?v=(?P<id>.*)'
     IE_NAME = u'WorldStarHipHop'
 
     def _real_extract(self, url):
-        results = []
-
         _src_url = r"""(http://hw-videos.*(?:mp4|flv))"""
 
-        webpage_src = compat_urllib_request.urlopen(str(url)).read()
+        webpage_src = compat_urllib_request.urlopen(url).read()
+        webpage_src = webpage_src.decode('utf-8')
 
         mobj = re.search(_src_url, webpage_src)
 
+        m = re.match(self._VALID_URL, url)
+        video_id = m.group('id')
+
         if mobj is not None:
             video_url = mobj.group()
             if 'mp4' in video_url:
-                ext = '.mp4'
+                ext = 'mp4'
             else:
-                ext = '.flv'
+                ext = 'flv'
         else:
-            video_url = None
-            ext = None
+            self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id)
+            return
 
         _title = r"""<title>(.*)</title>"""
 
         mobj = re.search(_title, webpage_src)
-        
+
         if mobj is not None:
             title = mobj.group(1)
-            title = title.replace("&#039;", "")
-            title = title.replace("&#39;", "")
-            title = title.replace('Video: ', '')
-            title = title.replace('&quot;', '"')
-            title = title.replace('&amp;', 'n')
         else:
-            title = None
+            title = 'World Start Hip Hop - %s' % time.ctime()
 
         _thumbnail = r"""rel="image_src" href="(.*)" />"""
-
         mobj = re.search(_thumbnail, webpage_src)
 
         # Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
@@ -3705,13 +3826,13 @@ class WorldStarHipHopIE(InfoExtractor):
                 title = mobj.group(1)
             thumbnail = None
 
-        results.append({
-            'url' : video_url,
-            'title' : title,
-            'thumbnail' : thumbnail,
-            'ext' : ext
-            })
-
+        results = [{
+                    'id': video_id,
+                    'url' : video_url,
+                    'title' : title,
+                    'thumbnail' : thumbnail,
+                    'ext' : ext,
+                    }]
         return results
 
 class RBMARadioIE(InfoExtractor):
@@ -3771,7 +3892,7 @@ class YouPornIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         video_id = mobj.group('videoid')
@@ -3863,7 +3984,7 @@ class YouPornIE(InfoExtractor):
         else:
             format = self._specific( req_format, formats )
             if result is None:
-                self._downloader.trouble(u'ERROR: requested format not available')
+                self._downloader.report_error(u'requested format not available')
                 return
             return [format]
 
@@ -3876,7 +3997,7 @@ class PornotubeIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         video_id = mobj.group('videoid')
@@ -3889,7 +4010,7 @@ class PornotubeIE(InfoExtractor):
         VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
         result = re.search(VIDEO_URL_RE, webpage)
         if result is None:
-            self._downloader.trouble(u'ERROR: unable to extract video url')
+            self._downloader.report_error(u'unable to extract video url')
             return
         video_url = compat_urllib_parse.unquote(result.group('url'))
 
@@ -3897,7 +4018,7 @@ class PornotubeIE(InfoExtractor):
         VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
         result = re.search(VIDEO_UPLOADED_RE, webpage)
         if result is None:
-            self._downloader.trouble(u'ERROR: unable to extract video title')
+            self._downloader.report_error(u'unable to extract video title')
             return
         upload_date = result.group('date')
 
@@ -3918,7 +4039,7 @@ class YouJizzIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         video_id = mobj.group('videoid')
@@ -4011,13 +4132,13 @@ class KeekIE(InfoExtractor):
         video_url = u'http://cdn.keek.com/keek/video/%s' % video_id
         thumbnail = u'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
         webpage = self._download_webpage(url, video_id)
-        m = re.search(r'<meta property="og:title" content="(?P<title>.+)"', webpage)
+        m = re.search(r'<meta property="og:title" content="(?P<title>.*?)"', webpage)
         title = unescapeHTML(m.group('title'))
-        m = re.search(r'<div class="bio-names-and-report">[\s\n]+<h4>(?P<uploader>\w+)</h4>', webpage)
-        uploader = unescapeHTML(m.group('uploader'))
+        m = re.search(r'<div class="user-name-and-bio">[\S\s]+?<h2>(?P<uploader>.+?)</h2>', webpage)
+        uploader = clean_html(m.group('uploader'))
         info = {
-                'id':video_id,
-                'url':video_url,
+                'id': video_id,
+                'url': video_url,
                 'ext': 'mp4',
                 'title': title,
                 'thumbnail': thumbnail,
@@ -4078,7 +4199,7 @@ class TEDIE(InfoExtractor):
         videoName=m.group('name')
         webpage=self._download_webpage(url, video_id, 'Downloading \"%s\" page' % videoName)
         # If the url includes the language we get the title translated
-        title_RE=r'<h1><span id="altHeadline" >(?P<title>.*)</span></h1>'
+        title_RE=r'<span id="altHeadline" >(?P<title>.*)</span>'
         title=re.search(title_RE, webpage).group('title')
         info_RE=r'''<script\ type="text/javascript">var\ talkDetails\ =(.*?)
                         "id":(?P<videoID>[\d]+).*?
@@ -4119,13 +4240,13 @@ class MySpassIE(InfoExtractor):
         # extract values from metadata
         url_flv_el = metadata.find('url_flv')
         if url_flv_el is None:
-            self._downloader.trouble(u'ERROR: unable to extract download url')
+            self._downloader.report_error(u'unable to extract download url')
             return
         video_url = url_flv_el.text
         extension = os.path.splitext(video_url)[1][1:]
         title_el = metadata.find('title')
         if title_el is None:
-            self._downloader.trouble(u'ERROR: unable to extract title')
+            self._downloader.report_error(u'unable to extract title')
             return
         title = title_el.text
         format_id_el = metadata.find('format_id')
@@ -4154,6 +4275,129 @@ class MySpassIE(InfoExtractor):
         }
         return [info]
 
+class SpiegelIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?(?:#.*)?$'
+
+    def _real_extract(self, url):
+        m = re.match(self._VALID_URL, url)
+        video_id = m.group('videoID')
+
+        webpage = self._download_webpage(url, video_id)
+        m = re.search(r'<div class="spVideoTitle">(.*?)</div>', webpage)
+        if not m:
+            raise ExtractorError(u'Cannot find title')
+        video_title = unescapeHTML(m.group(1))
+
+        xml_url = u'http://video2.spiegel.de/flash/' + video_id + u'.xml'
+        xml_code = self._download_webpage(xml_url, video_id,
+                    note=u'Downloading XML', errnote=u'Failed to download XML')
+
+        idoc = xml.etree.ElementTree.fromstring(xml_code)
+        last_type = idoc[-1]
+        filename = last_type.findall('./filename')[0].text
+        duration = float(last_type.findall('./duration')[0].text)
+
+        video_url = 'http://video2.spiegel.de/flash/' + filename
+        video_ext = filename.rpartition('.')[2]
+        info = {
+            'id': video_id,
+            'url': video_url,
+            'ext': video_ext,
+            'title': video_title,
+            'duration': duration,
+        }
+        return [info]
+
+class LiveLeakIE(InfoExtractor):
+
+    _VALID_URL = r'^(?:http?://)?(?:\w+\.)?liveleak\.com/view\?(?:.*?)i=(?P<video_id>[\w_]+)(?:.*)'
+    IE_NAME = u'liveleak'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+
+        video_id = mobj.group('video_id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        m = re.search(r'file: "(.*?)",', webpage)
+        if not m:
+            self._downloader.report_error(u'unable to find video url')
+            return
+        video_url = m.group(1)
+
+        m = re.search(r'<meta property="og:title" content="(?P<title>.*?)"', webpage)
+        if not m:
+            self._downloader.trouble(u'Cannot find video title')
+        title = unescapeHTML(m.group('title')).replace('LiveLeak.com -', '').strip()
+
+        m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
+        if m:
+            desc = unescapeHTML(m.group('desc'))
+        else:
+            desc = None
+
+        m = re.search(r'By:.*?(\w+)</a>', webpage)
+        if m:
+            uploader = clean_html(m.group(1))
+        else:
+            uploader = None
+
+        info = {
+            'id':  video_id,
+            'url': video_url,
+            'ext': 'mp4',
+            'title': title,
+            'description': desc,
+            'uploader': uploader
+        }
+
+        return [info]
+
+class ARDIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[^/\?]+)(?:\?.*)?'
+    _TITLE = r'<h1(?: class="boxTopHeadline")?>(?P<title>.*)</h1>'
+    _MEDIA_STREAM = r'mediaCollection\.addMediaStream\((?P<media_type>\d+), (?P<quality>\d+), "(?P<rtmp_url>[^"]*)", "(?P<video_url>[^"]*)", "[^"]*"\)'
+
+    def _real_extract(self, url):
+        # determine video id from url
+        m = re.match(self._VALID_URL, url)
+
+        numid = re.search(r'documentId=([0-9]+)', url)
+        if numid:
+            video_id = numid.group(1)
+        else:
+            video_id = m.group('video_id')
+
+        # determine title and media streams from webpage
+        html = self._download_webpage(url, video_id)
+        title = re.search(self._TITLE, html).group('title')
+        streams = [m.groupdict() for m in re.finditer(self._MEDIA_STREAM, html)]
+        if not streams:
+            assert '"fsk"' in html
+            self._downloader.report_error(u'this video is only available after 8:00 pm')
+            return
+
+        # choose default media type and highest quality for now
+        stream = max([s for s in streams if int(s["media_type"]) == 0],
+                     key=lambda s: int(s["quality"]))
+
+        # there's two possibilities: RTMP stream or HTTP download
+        info = {'id': video_id, 'title': title, 'ext': 'mp4'}
+        if stream['rtmp_url']:
+            self._downloader.to_screen(u'[%s] RTMP download detected' % self.IE_NAME)
+            assert stream['video_url'].startswith('mp4:')
+            info["url"] = stream["rtmp_url"]
+            info["play_path"] = stream['video_url']
+        else:
+            assert stream["video_url"].endswith('.mp4')
+            info["url"] = stream["video_url"]
+        return [info]
+
+
 def gen_extractors():
     """ Return a list of an instance of every supported extractor.
     The order does matter; the first extractor matched is the one handling the URL.
@@ -4180,6 +4424,7 @@ def gen_extractors():
         EscapistIE(),
         CollegeHumorIE(),
         XVideosIE(),
+        SoundcloudSetIE(),
         SoundcloudIE(),
         InfoQIE(),
         MixcloudIE(),
@@ -4203,7 +4448,8 @@ def gen_extractors():
         KeekIE(),
         TEDIE(),
         MySpassIE(),
+        SpiegelIE(),
+        LiveLeakIE(),
+        ARDIE(),
         GenericIE()
     ]
-
-