Merge branch 'master' into extract_info_rewrite
authorJaime Marquínez Ferrándiz <jaime.marquinez.ferrandiz@gmail.com>
Fri, 5 Apr 2013 10:39:51 +0000 (12:39 +0200)
committerJaime Marquínez Ferrándiz <jaime.marquinez.ferrandiz@gmail.com>
Fri, 5 Apr 2013 10:39:51 +0000 (12:39 +0200)
1  2 
README.md
youtube_dl/FileDownloader.py
youtube_dl/InfoExtractors.py

diff --combined README.md
index 1f3422ef89e559cf573d2cd6b0ccbf0b434b82b3,338b6133f89a345041cbcd2ee621865784121cb3..c8d28db3cd8e699974746fa2ed08f5e5ba438e0b
+++ b/README.md
@@@ -18,7 -18,7 +18,7 @@@ which means you can modify it, redistri
      --version                print program version and exit
      -U, --update             update this program to latest version
      -i, --ignore-errors      continue on download errors
-     -r, --rate-limit LIMIT   download rate limit (e.g. 50k or 44.6m)
+     -r, --rate-limit LIMIT   maximum download rate (e.g. 50k or 44.6m)
      -R, --retries RETRIES    number of retries (default is 10)
      --buffer-size SIZE       size of download buffer (e.g. 1024 or 16k) (default
                               is 1024)
                               requested
      --max-quality FORMAT     highest quality format to download
      -F, --list-formats       list all available formats (currently youtube only)
-     --write-srt              write video closed captions to a .srt file
+     --write-sub              write subtitle file (currently youtube only)
+     --only-sub               downloads only the subtitles (no video)
+     --all-subs               downloads all the available subtitles of the video
                               (currently youtube only)
-     --srt-lang LANG          language of the closed captions to download
-                              (optional) use IETF language tags like 'en'
+     --list-subs              lists all available subtitles for the video
+                              (currently youtube only)
+     --sub-format LANG        subtitle format [srt/sbv] (default=srt) (currently
+                              youtube only)
+     --sub-lang LANG          language of the subtitles to download (optional)
+                              use IETF language tags like 'en'
  
  ## Authentication Options:
      -u, --username USERNAME  account username
@@@ -138,7 -144,6 +144,7 @@@ The `-o` option allows users to indicat
   - `ext`: The sequence will be replaced by the appropriate extension (like flv or mp4).
   - `epoch`: The sequence will be replaced by the Unix epoch when creating the file.
   - `autonumber`: The sequence will be replaced by a five-digit number that will be increased with each download, starting at zero.
 + - `playlist`: The name or the id of the playlist that contains the video.
  
  The current default template is `%(id)s.%(ext)s`, but that will be switchted to `%(title)s-%(id)s.%(ext)s` (which can be requested with `-t` at the moment).
  
index d2b9be9ef88cb93e3e54fb0d3650880e35c36b43,7c5a52be15fa449396fb931bb5911f108ec38493..2237d355df893621a735f72f55cba38f9f7366ac
@@@ -231,11 -231,21 +231,21 @@@ class FileDownloader(object)
              self.to_stderr(message)
          if self.params.get('verbose'):
              if tb is None:
-                 tb_data = traceback.format_list(traceback.extract_stack())
-                 tb = u''.join(tb_data)
+                 if sys.exc_info()[0]:  # if .trouble has been called from an except block
+                     tb = u''
+                     if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
+                         tb += u''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
+                     tb += compat_str(traceback.format_exc())
+                 else:
+                     tb_data = traceback.format_list(traceback.extract_stack())
+                     tb = u''.join(tb_data)
              self.to_stderr(tb)
          if not self.params.get('ignoreerrors', False):
-             raise DownloadError(message)
+             if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
+                 exc_info = sys.exc_info()[1].exc_info
+             else:
+                 exc_info = sys.exc_info()
+             raise DownloadError(message, exc_info)
          self._download_retcode = 1
  
      def report_warning(self, message):
          warning_message=u'%s %s' % (_msg_header,message)
          self.to_stderr(warning_message)
  
+     def report_error(self, message, tb=None):
+         '''
+         Do the same as trouble, but prefixes the message with 'ERROR:', colored
+         in red if stderr is a tty file.
+         '''
+         if sys.stderr.isatty():
+             _msg_header = u'\033[0;31mERROR:\033[0m'
+         else:
+             _msg_header = u'ERROR:'
+         error_message = u'%s %s' % (_msg_header, message)
+         self.trouble(error_message, tb)
      def slow_down(self, start_time, byte_counter):
          """Sleep if the download speed is over the rate limit."""
          rate_limit = self.params.get('ratelimit', None)
                  return
              os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
          except (IOError, OSError) as err:
-             self.trouble(u'ERROR: unable to rename file')
+             self.report_error(u'unable to rename file')
  
      def try_utime(self, filename, last_modified_hdr):
          """Try to set the last-modified time of the given file."""
              if re.search(rejecttitle, title, re.IGNORECASE):
                  return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
          return None
 +        
 +    def extract_info(self, url):
 +        '''
 +        Returns a list with a dictionary for each video we find.
 +         '''
 +        suitable_found = False
 +        for ie in self._ies:
 +            # Go to next InfoExtractor if not suitable
 +            if not ie.suitable(url):
 +                continue
 +
 +            # Warn if the _WORKING attribute is False
 +            if not ie.working():
 +                self.to_stderr(u'WARNING: the program functionality for this site has been marked as broken, '
 +                               u'and will probably not work. If you want to go on, use the -i option.')
 +
 +            # Suitable InfoExtractor found
 +            suitable_found = True
 +
 +            # Extract information from URL and process it
 +            try:
 +                ie_results = ie.extract(url)
 +                results = self.process_ie_results(ie_results, ie)
 +                return results
 +            except ExtractorError as de: # An error we somewhat expected
 +                self.trouble(u'ERROR: ' + compat_str(de), de.format_traceback())
 +                break
 +            except Exception as e:
 +                if self.params.get('ignoreerrors', False):
 +                    self.trouble(u'ERROR: ' + compat_str(e), tb=compat_str(traceback.format_exc()))
 +                    break
 +                else:
 +                    raise
 +        if not suitable_found:
 +                self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
 +    def extract_info_iterable(self, urls):
 +        '''
 +            Return the videos founded for the urls
 +        '''
 +        results = []
 +        for url in urls:
 +            results.extend(self.extract_info(url))
 +        return results
 +        
 +    def process_ie_results(self, ie_results, ie):
 +        """
 +        Take the results of the ie and return a list of videos.
 +        For url elements it will seartch the suitable ie and get the videos
 +        For playlist elements it will process each of the elements of the 'entries' key
 +        """
 +        results = [] 
 +        for result in ie_results or []:
 +            result_type = result.get('_type', 'video') #If not given we suppose it's a video, support the dafault old system
 +            if result_type == 'video':
 +                if not 'extractor' in result:
 +                    #The extractor has already been set somewhere else
 +                    result['extractor'] = ie.IE_NAME
 +                results.append(result)
 +            elif result_type == 'url':
 +                #We get the videos pointed by the url
 +                results.extend(self.extract_info(result['url']))
 +            elif result_type == 'playlist':
 +                #We process each entry in the playlist
 +                entries_result = self.process_ie_results(result['entries'], ie)
 +                result['entries'] = entries_result
 +                results.extend([result])
 +        return results
  
      def process_info(self, info_dict):
          """Process a single dictionary returned by an InfoExtractor."""
  
 +        if info_dict.get('_type','video') == 'playlist':
 +            playlist = info_dict.get('title', None) or info_dict.get('id', None)
 +            self.to_screen(u'[download] Downloading playlist: %s'  % playlist)
 +            for video in info_dict['entries']:
 +                video['playlist'] = playlist
 +                self.process_info(video)
 +            return
 +        
          # Keep for backwards compatibility
          info_dict['stitle'] = info_dict['title']
  
              if dn != '' and not os.path.exists(dn): # dn is already encoded
                  os.makedirs(dn)
          except (OSError, IOError) as err:
-             self.trouble(u'ERROR: unable to create directory ' + compat_str(err))
+             self.report_error(u'unable to create directory ' + compat_str(err))
              return
  
          if self.params.get('writedescription', False):
                  with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
                      descfile.write(info_dict['description'])
              except (OSError, IOError):
-                 self.trouble(u'ERROR: Cannot write description file ' + descfn)
+                 self.report_error(u'Cannot write description file ' + descfn)
                  return
  
          if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']:
              subtitle = info_dict['subtitles'][0]
              (sub_error, sub_lang, sub) = subtitle
              sub_format = self.params.get('subtitlesformat')
-             try:
-                 sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
-                 self.report_writesubtitles(sub_filename)
-                 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
-                     subfile.write(sub)
-             except (OSError, IOError):
-                 self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
-                 return
+             if sub_error:
+                 self.report_warning("Some error while getting the subtitles")
+             else:
+                 try:
+                     sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
+                     self.report_writesubtitles(sub_filename)
+                     with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
+                         subfile.write(sub)
+                 except (OSError, IOError):
+                     self.report_error(u'Cannot write subtitles file ' + descfn)
+                     return
              if self.params.get('onlysubtitles', False):
                  return 
  
              sub_format = self.params.get('subtitlesformat')
              for subtitle in subtitles:
                  (sub_error, sub_lang, sub) = subtitle
-                 try:
-                     sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
-                     self.report_writesubtitles(sub_filename)
-                     with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
-                             subfile.write(sub)
-                 except (OSError, IOError):
-                     self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
-                     return
+                 if sub_error:
+                     self.report_warning("Some error while getting the subtitles")
+                 else:
+                     try:
+                         sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
+                         self.report_writesubtitles(sub_filename)
+                         with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
+                                 subfile.write(sub)
+                     except (OSError, IOError):
+                         self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
+                         return
              if self.params.get('onlysubtitles', False):
                  return 
  
                  json_info_dict = dict((k, v) for k,v in info_dict.items() if not k in ['urlhandle'])
                  write_json_file(json_info_dict, encodeFilename(infofn))
              except (OSError, IOError):
-                 self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn)
+                 self.report_error(u'Cannot write metadata to JSON file ' + infofn)
                  return
  
          if not self.params.get('skip_download', False):
                  except (OSError, IOError) as err:
                      raise UnavailableVideoError()
                  except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                     self.trouble(u'ERROR: unable to download video data: %s' % str(err))
+                     self.report_error(u'unable to download video data: %s' % str(err))
                      return
                  except (ContentTooShortError, ) as err:
-                     self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
+                     self.report_error(u'content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
                      return
  
              if success:
                  try:
                      self.post_process(filename, info_dict)
                  except (PostProcessingError) as err:
-                     self.trouble(u'ERROR: postprocessing: %s' % str(err))
+                     self.report_error(u'postprocessing: %s' % str(err))
                      return
  
      def download(self, url_list):
              raise SameFileError(self.params['outtmpl'])
  
          for url in url_list:
 -            suitable_found = False
 -            for ie in self._ies:
 -                # Go to next InfoExtractor if not suitable
 -                if not ie.suitable(url):
 -                    continue
 -
 -                # Warn if the _WORKING attribute is False
 -                if not ie.working():
 -                    self.report_warning(u'the program functionality for this site has been marked as broken, '
 -                                        u'and will probably not work. If you want to go on, use the -i option.')
 +            videos = self.extract_info(url)
  
 -                # Suitable InfoExtractor found
 -                suitable_found = True
 -
 -                # Extract information from URL and process it
 +            for video in videos or []:
                  try:
 -                    videos = ie.extract(url)
 -                except ExtractorError as de: # An error we somewhat expected
 -                    self.trouble(u'ERROR: ' + compat_str(de), de.format_traceback())
 -                    break
 +                    self.increment_downloads()
 +                    self.process_info(video)
 +                except UnavailableVideoError:
 +                    self.trouble(u'\nERROR: unable to download video')
+                 except MaxDownloadsReached:
+                     self.to_screen(u'[info] Maximum number of downloaded files reached.')
+                     raise
 -                except Exception as e:
 -                    if self.params.get('ignoreerrors', False):
 -                        self.report_error(u'' + compat_str(e), tb=compat_str(traceback.format_exc()))
 -                        break
 -                    else:
 -                        raise
 -
 -                if len(videos or []) > 1 and self.fixed_template():
 -                    raise SameFileError(self.params['outtmpl'])
 -
 -                for video in videos or []:
 -                    video['extractor'] = ie.IE_NAME
 -                    try:
 -                        self.increment_downloads()
 -                        self.process_info(video)
 -                    except UnavailableVideoError:
 -                        self.to_stderr(u"\n")
 -                        self.report_error(u'unable to download video')
 -
 -                # Suitable InfoExtractor had been found; go to next URL
 -                break
 -
 -            if not suitable_found:
 -                self.report_error(u'no suitable InfoExtractor: %s' % url)
  
          return self._download_retcode
  
          try:
              subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
          except (OSError, IOError):
-             self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run')
+             self.report_error(u'RTMP download detected but "rtmpdump" could not be run')
              return False
  
          # Download using rtmpdump. rtmpdump returns exit code 2 when
              })
              return True
          else:
-             self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
+             self.to_stderr(u"\n")
+             self.report_error(u'rtmpdump exited with code %d' % retval)
              return False
  
      def _do_download(self, filename, info_dict):
                  self.report_retry(count, retries)
  
          if count > retries:
-             self.trouble(u'ERROR: giving up after %s retries' % retries)
+             self.report_error(u'giving up after %s retries' % retries)
              return False
  
          data_len = data.info().get('Content-length', None)
                      filename = self.undo_temp_name(tmpfilename)
                      self.report_destination(filename)
                  except (OSError, IOError) as err:
-                     self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
+                     self.report_error(u'unable to open for writing: %s' % str(err))
                      return False
              try:
                  stream.write(data_block)
              except (IOError, OSError) as err:
-                 self.trouble(u'\nERROR: unable to write data: %s' % str(err))
+                 self.to_stderr(u"\n")
+                 self.report_error(u'unable to write data: %s' % str(err))
                  return False
              if not self.params.get('noresizebuffer', False):
                  block_size = self.best_block_size(after - before, len(data_block))
              self.slow_down(start, byte_counter - resume_len)
  
          if stream is None:
-             self.trouble(u'\nERROR: Did not get any data blocks')
+             self.to_stderr(u"\n")
+             self.report_error(u'Did not get any data blocks')
              return False
          stream.close()
          self.report_finish()
index 6053d14ecb89b8d60737297b2d7b2c51afc55ba5,aa8074a9e7d7c5fd690a6a31f5f8f5354d1ffcb5..81eaddc7260333e70a01be3ea354edfc7513f339
@@@ -134,28 -134,6 +134,28 @@@ class InfoExtractor(object)
              encoding = 'utf-8'
          webpage_bytes = urlh.read()
          return webpage_bytes.decode(encoding, 'replace')
 +        
 +    #Methods for following #608
 +    #They set the correct value of the '_type' key
 +    def video_result(self, video_info):
 +        """Returns a video"""
 +        video_info['_type'] = 'video'
 +        return video_info
 +    def url_result(self, url, ie=None):
 +        """Returns a url that points to a page that should be processed"""
 +        #TODO: ie should be the class used for getting the info
 +        video_info = {'_type': 'url',
 +                      'url': url}
 +        return video_info
 +    def playlist_result(self, entries, playlist_id=None, playlist_title=None):
 +        """Returns a playlist"""
 +        video_info = {'_type': 'playlist',
 +                      'entries': entries}
 +        if playlist_id:
 +            video_info['id'] = playlist_id
 +        if playlist_title:
 +            video_info['title'] = playlist_title
 +        return video_info
  
  
  class YoutubeIE(InfoExtractor):
          try:
              sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
+             return (u'unable to download video subtitles: %s' % compat_str(err), None)
          sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
          sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list)
          if not sub_lang_list:
-             return (u'WARNING: video doesn\'t have subtitles', None)
+             return (u'video doesn\'t have subtitles', None)
          return sub_lang_list
  
      def _list_available_subtitles(self, video_id):
          self.report_video_subtitles_available(video_id, sub_lang_list)
  
      def _request_subtitle(self, sub_lang, sub_name, video_id, format):
+         """
+         Return tuple:
+         (error_message, sub_lang, sub)
+         """
          self.report_video_subtitles_request(video_id, sub_lang, format)
          params = compat_urllib_parse.urlencode({
              'lang': sub_lang,
          try:
              sub = compat_urllib_request.urlopen(url).read().decode('utf-8')
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
+             return (u'unable to download video subtitles: %s' % compat_str(err), None, None)
          if not sub:
-             return (u'WARNING: Did not fetch video subtitles', None)
+             return (u'Did not fetch video subtitles', None, None)
          return (None, sub_lang, sub)
  
      def _extract_subtitle(self, video_id):
+         """
+         Return a list with a tuple:
+         [(error_message, sub_lang, sub)]
+         """
          sub_lang_list = self._get_available_subtitles(video_id)
          sub_format = self._downloader.params.get('subtitlesformat')
+         if  isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
+             return [(sub_lang_list[0], None, None)]
          if self._downloader.params.get('subtitleslang', False):
              sub_lang = self._downloader.params.get('subtitleslang')
          elif 'en' in sub_lang_list:
          else:
              sub_lang = list(sub_lang_list.keys())[0]
          if not sub_lang in sub_lang_list:
-             return (u'WARNING: no closed captions found in the specified language "%s"' % sub_lang, None)
+             return [(u'no closed captions found in the specified language "%s"' % sub_lang, None, None)]
  
          subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
          return [subtitle]
      def _extract_all_subtitles(self, video_id):
          sub_lang_list = self._get_available_subtitles(video_id)
          sub_format = self._downloader.params.get('subtitlesformat')
+         if  isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
+             return [(sub_lang_list[0], None, None)]
          subtitles = []
          for sub_lang in sub_lang_list:
              subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
              self.report_age_confirmation()
              age_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
+             self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err))
              return
  
      def _extract_id(self, url):
          mobj = re.match(self._VALID_URL, url, re.VERBOSE)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
          video_id = mobj.group(2)
          return video_id
          try:
              video_webpage_bytes = compat_urllib_request.urlopen(request).read()
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
+             self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err))
              return
  
          video_webpage = video_webpage_bytes.decode('utf-8', 'ignore')
                  if 'token' in video_info:
                      break
              except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                 self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
+                 self._downloader.report_error(u'unable to download video info webpage: %s' % compat_str(err))
                  return
          if 'token' not in video_info:
              if 'reason' in video_info:
-                 self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0])
+                 self._downloader.report_error(u'YouTube said: %s' % video_info['reason'][0])
              else:
-                 self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
+                 self._downloader.report_error(u'"token" parameter not in video info for unknown reason')
              return
  
          # Check for "rental" videos
          if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
-             self._downloader.trouble(u'ERROR: "rental" videos not supported')
+             self._downloader.report_error(u'"rental" videos not supported')
              return
  
          # Start extracting information
  
          # uploader
          if 'author' not in video_info:
-             self._downloader.trouble(u'ERROR: unable to extract uploader name')
+             self._downloader.report_error(u'unable to extract uploader name')
              return
          video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
  
          if mobj is not None:
              video_uploader_id = mobj.group(1)
          else:
-             self._downloader.trouble(u'WARNING: unable to extract uploader nickname')
+             self._downloader.report_warning(u'unable to extract uploader nickname')
  
          # title
          if 'title' not in video_info:
-             self._downloader.trouble(u'ERROR: unable to extract video title')
+             self._downloader.report_error(u'unable to extract video title')
              return
          video_title = compat_urllib_parse.unquote_plus(video_info['title'][0])
  
          # thumbnail image
          if 'thumbnail_url' not in video_info:
-             self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
+             self._downloader.report_warning(u'unable to extract video thumbnail')
              video_thumbnail = ''
          else:   # don't panic if we can't find it
              video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
              if video_subtitles:
                  (sub_error, sub_lang, sub) = video_subtitles[0]
                  if sub_error:
-                     self._downloader.trouble(sub_error)
+                     self._downloader.report_error(sub_error)
  
          if self._downloader.params.get('allsubtitles', False):
              video_subtitles = self._extract_all_subtitles(video_id)
              for video_subtitle in video_subtitles:
                  (sub_error, sub_lang, sub) = video_subtitle
                  if sub_error:
-                     self._downloader.trouble(sub_error)
+                     self._downloader.report_error(sub_error)
  
          if self._downloader.params.get('listsubtitles', False):
              sub_lang_list = self._list_available_subtitles(video_id)
              return
  
          if 'length_seconds' not in video_info:
-             self._downloader.trouble(u'WARNING: unable to extract video duration')
+             self._downloader.report_warning(u'unable to extract video duration')
              video_duration = ''
          else:
              video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0])
                  format_list = available_formats
              existing_formats = [x for x in format_list if x in url_map]
              if len(existing_formats) == 0:
-                 self._downloader.trouble(u'ERROR: no known formats available for video')
+                 self._downloader.report_error(u'no known formats available for video')
                  return
              if self._downloader.params.get('listformats', None):
                  self._print_formats(existing_formats)
                          video_url_list = [(rf, url_map[rf])]
                          break
                  if video_url_list is None:
-                     self._downloader.trouble(u'ERROR: requested format not available')
+                     self._downloader.report_error(u'requested format not available')
                      return
          else:
-             self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info')
+             self._downloader.report_error(u'no conn or url_encoded_fmt_stream_map information found in video info')
              return
  
          results = []
@@@ -683,7 -673,7 +695,7 @@@ class MetacafeIE(InfoExtractor)
              self.report_disclaimer()
              disclaimer = compat_urllib_request.urlopen(request).read()
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err))
+             self._downloader.report_error(u'unable to retrieve disclaimer: %s' % compat_str(err))
              return
  
          # Confirm age
              self.report_age_confirmation()
              disclaimer = compat_urllib_request.urlopen(request).read()
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
+             self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err))
              return
  
      def _real_extract(self, url):
          # Extract id and simplified title from URL
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
  
          video_id = mobj.group(1)
              self.report_download_webpage(video_id)
              webpage = compat_urllib_request.urlopen(request).read()
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err))
+             self._downloader.report_error(u'unable retrieve video webpage: %s' % compat_str(err))
              return
  
          # Extract URL, uploader and title from webpage
          else:
              mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
              if mobj is None:
-                 self._downloader.trouble(u'ERROR: unable to extract media URL')
+                 self._downloader.report_error(u'unable to extract media URL')
                  return
              vardict = compat_parse_qs(mobj.group(1))
              if 'mediaData' not in vardict:
-                 self._downloader.trouble(u'ERROR: unable to extract media URL')
+                 self._downloader.report_error(u'unable to extract media URL')
                  return
              mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
              if mobj is None:
-                 self._downloader.trouble(u'ERROR: unable to extract media URL')
+                 self._downloader.report_error(u'unable to extract media URL')
                  return
              mediaURL = mobj.group(1).replace('\\/', '/')
              video_extension = mediaURL[-3:]
  
          mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract title')
+             self._downloader.report_error(u'unable to extract title')
              return
          video_title = mobj.group(1).decode('utf-8')
  
          mobj = re.search(r'submitter=(.*?);', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+             self._downloader.report_error(u'unable to extract uploader nickname')
              return
          video_uploader = mobj.group(1)
  
@@@ -794,7 -784,7 +806,7 @@@ class DailymotionIE(InfoExtractor)
          # Extract id and simplified title from URL
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
  
          video_id = mobj.group(1).split('_')[0].split('?')[0]
          self.report_extraction(video_id)
          mobj = re.search(r'\s*var flashvars = (.*)', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract media URL')
+             self._downloader.report_error(u'unable to extract media URL')
              return
          flashvars = compat_urllib_parse.unquote(mobj.group(1))
  
                  self._downloader.to_screen(u'[dailymotion] Using %s' % key)
                  break
          else:
-             self._downloader.trouble(u'ERROR: unable to extract video URL')
+             self._downloader.report_error(u'unable to extract video URL')
              return
  
          mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract video URL')
+             self._downloader.report_error(u'unable to extract video URL')
              return
  
          video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/')
  
          mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract title')
+             self._downloader.report_error(u'unable to extract title')
              return
          video_title = unescapeHTML(mobj.group('title'))
  
              # lookin for official user
              mobj_official = re.search(r'<span rel="author"[^>]+?>([^<]+?)</span>', webpage)
              if mobj_official is None:
-                 self._downloader.trouble(u'WARNING: unable to extract uploader nickname')
+                 self._downloader.report_warning(u'unable to extract uploader nickname')
              else:
                  video_uploader = mobj_official.group(1)
          else:
@@@ -886,7 -876,7 +898,7 @@@ class PhotobucketIE(InfoExtractor)
          # Extract id from URL
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+             self._downloader.report_error(u'Invalid URL: %s' % url)
              return
  
          video_id = mobj.group(1)
              self.report_download_webpage(video_id)
              webpage = compat_urllib_request.urlopen(request).read()
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+             self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
              return
  
          # Extract URL, uploader, and title from webpage
          self.report_extraction(video_id)
          mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract media URL')
+             self._downloader.report_error(u'unable to extract media URL')
              return
          mediaURL = compat_urllib_parse.unquote(mobj.group(1))
  
  
          mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract title')
+             self._downloader.report_error(u'unable to extract title')
              return
          video_title = mobj.group(1).decode('utf-8')
  
@@@ -955,7 -945,7 +967,7 @@@ class YahooIE(InfoExtractor)
          # Extract ID from URL
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+             self._downloader.report_error(u'Invalid URL: %s' % url)
              return
  
          video_id = mobj.group(2)
              try:
                  webpage = compat_urllib_request.urlopen(request).read()
              except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                 self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+                 self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
                  return
  
              mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
              if mobj is None:
-                 self._downloader.trouble(u'ERROR: Unable to extract id field')
+                 self._downloader.report_error(u'Unable to extract id field')
                  return
              yahoo_id = mobj.group(1)
  
              mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
              if mobj is None:
-                 self._downloader.trouble(u'ERROR: Unable to extract vid field')
+                 self._downloader.report_error(u'Unable to extract vid field')
                  return
              yahoo_vid = mobj.group(1)
  
              self.report_download_webpage(video_id)
              webpage = compat_urllib_request.urlopen(request).read()
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+             self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
              return
  
          # Extract uploader and title from webpage
          self.report_extraction(video_id)
          mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract video title')
+             self._downloader.report_error(u'unable to extract video title')
              return
          video_title = mobj.group(1).decode('utf-8')
  
          mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract video uploader')
+             self._downloader.report_error(u'unable to extract video uploader')
              return
          video_uploader = mobj.group(1).decode('utf-8')
  
          # Extract video thumbnail
          mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+             self._downloader.report_error(u'unable to extract video thumbnail')
              return
          video_thumbnail = mobj.group(1).decode('utf-8')
  
          # Extract video description
          mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract video description')
+             self._downloader.report_error(u'unable to extract video description')
              return
          video_description = mobj.group(1).decode('utf-8')
          if not video_description:
          # Extract video height and width
          mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract video height')
+             self._downloader.report_error(u'unable to extract video height')
              return
          yv_video_height = mobj.group(1)
  
          mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract video width')
+             self._downloader.report_error(u'unable to extract video width')
              return
          yv_video_width = mobj.group(1)
  
              self.report_download_webpage(video_id)
              webpage = compat_urllib_request.urlopen(request).read()
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+             self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
              return
  
          # Extract media URL from playlist XML
          mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: Unable to extract media URL')
+             self._downloader.report_error(u'Unable to extract media URL')
              return
          video_url = compat_urllib_parse.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
          video_url = unescapeHTML(video_url)
@@@ -1095,7 -1085,7 +1107,7 @@@ class VimeoIE(InfoExtractor)
          # Extract ID from URL
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+             self._downloader.report_error(u'Invalid URL: %s' % url)
              return
  
          video_id = mobj.group('id')
              webpage_bytes = compat_urllib_request.urlopen(request).read()
              webpage = webpage_bytes.decode('utf-8')
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+             self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
              return
  
          # Now we begin extracting as much information as we can from what we
              config = webpage.split(' = {config:')[1].split(',assets:')[0]
              config = json.loads(config)
          except:
-             self._downloader.trouble(u'ERROR: unable to extract info section')
+             self._downloader.report_error(u'unable to extract info section')
              return
  
          # Extract title
                  self._downloader.to_screen(u'[vimeo] %s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
                  break
          else:
-             self._downloader.trouble(u'ERROR: no known codec found')
+             self._downloader.report_error(u'no known codec found')
              return
  
          video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
@@@ -1218,10 -1208,10 +1230,10 @@@ class ArteTvIE(InfoExtractor)
              self.report_download_webpage(url)
              webpage = compat_urllib_request.urlopen(request).read()
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+             self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
              return
          except ValueError as err:
-             self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+             self._downloader.report_error(u'Invalid URL: %s' % url)
              return
          return webpage
  
          info = {}
  
          if mobj is None:
-             self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+             self._downloader.report_error(u'Invalid URL: %s' % url)
              return
  
          for (i, key, err) in matchTuples:
@@@ -1353,7 -1343,7 +1365,7 @@@ class GenericIE(InfoExtractor)
          self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
  
      def _test_redirect(self, url):
 -        """Check if it is a redirect, like url shorteners, in case restart chain."""
 +        """Check if it is a redirect, like url shorteners, in case return the new url."""
          class HeadRequest(compat_urllib_request.Request):
              def get_method(self):
                  return "HEAD"
              return False
  
          self.report_following_redirect(new_url)
 -        self._downloader.download([new_url])
 -        return True
 +        return new_url
  
      def _real_extract(self, url):
 -        if self._test_redirect(url): return
 +        new_url = self._test_redirect(url)
 +        if new_url: return [self.url_result(new_url)]
  
          video_id = url.split('/')[-1]
          try:
          except ValueError as err:
              # since this is the last-resort InfoExtractor, if
              # this error is thrown, it'll be thrown here
-             self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+             self._downloader.report_error(u'Invalid URL: %s' % url)
              return
  
          self.report_extraction(video_id)
              # Broaden the search a little bit: JWPlayer JS loader
              mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+             self._downloader.report_error(u'Invalid URL: %s' % url)
              return
  
          # It's possible that one of the regexes
          # matched, but returned an empty group:
          if mobj.group(1) is None:
-             self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+             self._downloader.report_error(u'Invalid URL: %s' % url)
              return
  
          video_url = compat_urllib_parse.unquote(mobj.group(1))
          # and so on and so forth; it's just not practical
          mobj = re.search(r'<title>(.*)</title>', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract title')
+             self._downloader.report_error(u'unable to extract title')
              return
          video_title = mobj.group(1)
  
          # video uploader is domain name
          mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract title')
+             self._downloader.report_error(u'unable to extract title')
              return
          video_uploader = mobj.group(1)
  
@@@ -1492,7 -1482,7 +1504,7 @@@ class YoutubeSearchIE(InfoExtractor)
      def _real_extract(self, query):
          mobj = re.match(self._VALID_URL, query)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+             self._downloader.report_error(u'invalid search query "%s"' % query)
              return
  
          prefix, query = query.split(':')
              try:
                  n = int(prefix)
                  if n <= 0:
-                     self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+                     self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
                      return
                  elif n > self._max_youtube_results:
                      self._downloader.report_warning(u'ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
              try:
                  data = compat_urllib_request.urlopen(request).read().decode('utf-8')
              except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                 self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err))
+                 self._downloader.report_error(u'unable to download API page: %s' % compat_str(err))
                  return
              api_response = json.loads(data)['data']
  
@@@ -1574,7 -1564,7 +1586,7 @@@ class GoogleSearchIE(InfoExtractor)
      def _real_extract(self, query):
          mobj = re.match(self._VALID_URL, query)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+             self._downloader.report_error(u'invalid search query "%s"' % query)
              return
  
          prefix, query = query.split(':')
              try:
                  n = int(prefix)
                  if n <= 0:
-                     self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+                     self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
                      return
                  elif n > self._max_google_results:
                      self._downloader.report_warning(u'gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
              try:
                  page = compat_urllib_request.urlopen(request).read()
              except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                 self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                  return
  
              # Extract video identifiers
@@@ -1658,7 -1648,7 +1670,7 @@@ class YahooSearchIE(InfoExtractor)
      def _real_extract(self, query):
          mobj = re.match(self._VALID_URL, query)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+             self._downloader.report_error(u'invalid search query "%s"' % query)
              return
  
          prefix, query = query.split(':')
              try:
                  n = int(prefix)
                  if n <= 0:
-                     self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+                     self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
                      return
                  elif n > self._max_yahoo_results:
                      self._downloader.report_warning(u'yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
              try:
                  page = compat_urllib_request.urlopen(request).read()
              except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                 self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                  return
  
              # Extract video identifiers
@@@ -1732,9 -1722,7 +1744,7 @@@ class YoutubePlaylistIE(InfoExtractor)
                          (?:
                             (?:course|view_play_list|my_playlists|artist|playlist|watch)
                             \? (?:.*?&)*? (?:p|a|list)=
-                         |  user/.*?/user/
                          |  p/
-                         |  user/.*?#[pg]/c/
                          )
                          ((?:PL|EC|UU)?[0-9A-Za-z-_]{10,})
                          .*
          # Extract playlist id
          mobj = re.match(self._VALID_URL, url, re.VERBOSE)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+             self._downloader.report_error(u'invalid url: %s' % url)
              return
  
          # Download playlist videos from API
              try:
                  page = compat_urllib_request.urlopen(url).read().decode('utf8')
              except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                 self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                  return
  
              try:
                  response = json.loads(page)
              except ValueError as err:
-                 self._downloader.trouble(u'ERROR: Invalid JSON in API response: ' + compat_str(err))
+                 self._downloader.report_error(u'Invalid JSON in API response: ' + compat_str(err))
                  return
  
              if not 'feed' in response or not 'entry' in response['feed']:
-                 self._downloader.trouble(u'ERROR: Got a malformed response from YouTube API')
+                 self._downloader.report_error(u'Got a malformed response from YouTube API')
                  return
              videos += [ (entry['yt$position']['$t'], entry['content']['src'])
                          for entry in response['feed']['entry']
          else:
              self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(videos)))
  
 -        for video in videos:
 -            self._downloader.download([video])
 -        return
 +        url_results = [self.url_result(url) for url in videos]
 +        return [self.playlist_result(url_results, playlist_id)]
  
  
  class YoutubeChannelIE(InfoExtractor):
          # Extract channel id
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+             self._downloader.report_error(u'invalid url: %s' % url)
              return
  
          # Download channel pages
              try:
                  page = compat_urllib_request.urlopen(request).read().decode('utf8')
              except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                 self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                  return
  
              # Extract video identifiers
  
          self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
  
 -        for id in video_ids:
 -            self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
 -        return
 +        urls = ['http://www.youtube.com/watch?v=%s' % id for id in video_ids]
 +        url_entries = [self.url_result(url) for url in urls]
 +        return [self.playlist_result(url_entries, channel_id)]
  
  
  class YoutubeUserIE(InfoExtractor):
          # Extract username
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+             self._downloader.report_error(u'invalid url: %s' % url)
              return
  
          username = mobj.group(1)
              try:
                  page = compat_urllib_request.urlopen(request).read().decode('utf-8')
              except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                 self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                  return
  
              # Extract video identifiers
          self._downloader.to_screen(u"[youtube] user %s: Collected %d video ids (downloading %d of them)" %
                  (username, all_ids_count, len(video_ids)))
  
 -        for video_id in video_ids:
 -            self._downloader.download(['http://www.youtube.com/watch?v=%s' % video_id])
 +        urls = ['http://www.youtube.com/watch?v=%s' % video_id for video_id in video_ids]
 +        url_results = [self.url_result(url) for url in urls]
 +        return [self.playlist_result(url_results, playlist_title = username)]
  
  
  class BlipTVUserIE(InfoExtractor):
          # Extract username
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+             self._downloader.report_error(u'invalid url: %s' % url)
              return
  
          username = mobj.group(1)
              mobj = re.search(r'data-users-id="([^"]+)"', page)
              page_base = page_base % mobj.group(1)
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+             self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
              return
  
  
              try:
                  page = compat_urllib_request.urlopen(request).read().decode('utf-8')
              except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+                 self._downloader.report_error(u'unable to download webpage: %s' % str(err))
                  return
  
              # Extract video identifiers
          self._downloader.to_screen(u"[%s] user %s: Collected %d video ids (downloading %d of them)" %
                  (self.IE_NAME, username, all_ids_count, len(video_ids)))
  
 -        for video_id in video_ids:
 -            self._downloader.download([u'http://blip.tv/'+video_id])
 +        urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids]
 +        url_entries = [self.url_result(url) for url in urls]
 +        return [self.playlist_result(url_entries, playlist_title = username)]
  
  
  class DepositFilesIE(InfoExtractor):
              self.report_download_webpage(file_id)
              webpage = compat_urllib_request.urlopen(request).read()
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err))
+             self._downloader.report_error(u'Unable to retrieve file webpage: %s' % compat_str(err))
              return
  
          # Search for the real file URL
              mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
              if (mobj is not None) and (mobj.group(1) is not None):
                  restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
-                 self._downloader.trouble(u'ERROR: %s' % restriction_message)
+                 self._downloader.report_error(u'%s' % restriction_message)
              else:
-                 self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url)
+                 self._downloader.report_error(u'unable to extract download URL from: %s' % url)
              return
  
          file_url = mobj.group(1)
          # Search for file title
          mobj = re.search(r'<b title="(.*?)">', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract title')
+             self._downloader.report_error(u'unable to extract title')
              return
          file_title = mobj.group(1).decode('utf-8')
  
@@@ -2162,7 -2149,7 +2172,7 @@@ class FacebookIE(InfoExtractor)
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
          video_id = mobj.group('ID')
  
@@@ -2218,7 -2205,7 +2228,7 @@@ class BlipTVIE(InfoExtractor)
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
  
          urlp = compat_urllib_parse_urlparse(url)
                  json_code_bytes = urlh.read()
                  json_code = json_code_bytes.decode('utf-8')
              except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                 self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err))
+                 self._downloader.report_error(u'unable to read video info webpage: %s' % compat_str(err))
                  return
  
              try:
                      'user_agent': 'iTunes/10.6.1',
                  }
              except (ValueError,KeyError) as err:
-                 self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
+                 self._downloader.report_error(u'unable to parse video information: %s' % repr(err))
                  return
  
          return [info]
@@@ -2318,7 -2305,7 +2328,7 @@@ class MyVideoIE(InfoExtractor)
      def _real_extract(self,url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._download.trouble(u'ERROR: invalid URL: %s' % url)
+             self._download.report_error(u'invalid URL: %s' % url)
              return
  
          video_id = mobj.group(1)
          webpage = self._download_webpage(webpage_url, video_id)
  
          self.report_extraction(video_id)
-         mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\' />',
+         mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\'',
                   webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract media URL')
+             self._downloader.report_error(u'unable to extract media URL')
              return
          video_url = mobj.group(1) + ('/%s.flv' % video_id)
  
          mobj = re.search('<title>([^<]+)</title>', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract title')
+             self._downloader.report_error(u'unable to extract title')
              return
  
          video_title = mobj.group(1)
@@@ -2410,7 -2397,7 +2420,7 @@@ class ComedyCentralIE(InfoExtractor)
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url, re.VERBOSE)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
  
          if mobj.group('shortname'):
              html = htmlHandle.read()
              webpage = html.decode('utf-8')
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+             self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
              return
          if dlNewest:
              url = htmlHandle.geturl()
              mobj = re.match(self._VALID_URL, url, re.VERBOSE)
              if mobj is None:
-                 self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url)
+                 self._downloader.report_error(u'Invalid redirected URL: ' + url)
                  return
              if mobj.group('episode') == '':
-                 self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url)
+                 self._downloader.report_error(u'Redirected URL is still not specific: ' + url)
                  return
              epTitle = mobj.group('episode')
  
  
              altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
              if len(altMovieParams) == 0:
-                 self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url)
+                 self._downloader.report_error(u'unable to find Flash URL in webpage ' + url)
                  return
              else:
                  mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
          try:
              indexXml = compat_urllib_request.urlopen(indexUrl).read()
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: unable to download episode index: ' + compat_str(err))
+             self._downloader.report_error(u'unable to download episode index: ' + compat_str(err))
              return
  
          results = []
              try:
                  configXml = compat_urllib_request.urlopen(configReq).read()
              except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                 self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                  return
  
              cdoc = xml.etree.ElementTree.fromstring(configXml)
                  turls.append(finfo)
  
              if len(turls) == 0:
-                 self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found')
+                 self._downloader.report_error(u'unable to download ' + mediaId + ': No videos found')
                  continue
  
              if self._downloader.params.get('listformats', None):
@@@ -2562,7 -2549,7 +2572,7 @@@ class EscapistIE(InfoExtractor)
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
          showName = mobj.group('showname')
          videoId = mobj.group('episode')
              m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type'])
              webPage = webPageBytes.decode(m.group(1) if m else 'utf-8')
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: unable to download webpage: ' + compat_str(err))
+             self._downloader.report_error(u'unable to download webpage: ' + compat_str(err))
              return
  
          descMatch = re.search('<meta name="description" content="([^"]*)"', webPage)
              m = re.match(r'text/html; charset="?([^"]+)"?', configJSON.headers['Content-Type'])
              configJSON = configJSON.read().decode(m.group(1) if m else 'utf-8')
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: unable to download configuration: ' + compat_str(err))
+             self._downloader.report_error(u'unable to download configuration: ' + compat_str(err))
              return
  
          # Technically, it's JavaScript, not JSON
          try:
              config = json.loads(configJSON)
          except (ValueError,) as err:
-             self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + compat_str(err))
+             self._downloader.report_error(u'Invalid JSON in configuration file: ' + compat_str(err))
              return
  
          playlist = config['playlist']
@@@ -2639,7 -2626,7 +2649,7 @@@ class CollegeHumorIE(InfoExtractor)
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
          video_id = mobj.group('videoid')
  
          try:
              metaXml = compat_urllib_request.urlopen(xmlUrl).read()
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
+             self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
              return
  
          mdoc = xml.etree.ElementTree.fromstring(metaXml)
              info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
              manifest_url = videoNode.findall('./file')[0].text
          except IndexError:
-             self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
+             self._downloader.report_error(u'Invalid metadata XML file')
              return
  
          manifest_url += '?hdcore=2.10.3'
          try:
              manifestXml = compat_urllib_request.urlopen(manifest_url).read()
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
+             self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
              return
  
          adoc = xml.etree.ElementTree.fromstring(manifestXml)
              node_id = media_node.attrib['url']
              video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text
          except IndexError as err:
-             self._downloader.trouble(u'\nERROR: Invalid manifest file')
+             self._downloader.report_error(u'Invalid manifest file')
              return
  
          url_pr = compat_urllib_parse_urlparse(manifest_url)
@@@ -2706,7 -2693,7 +2716,7 @@@ class XVideosIE(InfoExtractor)
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
          video_id = mobj.group(1)
  
          # Extract video URL
          mobj = re.search(r'flv_url=(.+?)&', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract video url')
+             self._downloader.report_error(u'unable to extract video url')
              return
          video_url = compat_urllib_parse.unquote(mobj.group(1))
  
          # Extract title
          mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract video title')
+             self._downloader.report_error(u'unable to extract video title')
              return
          video_title = mobj.group(1)
  
          # Extract video thumbnail
          mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+             self._downloader.report_error(u'unable to extract video thumbnail')
              return
          video_thumbnail = mobj.group(0)
  
@@@ -2778,7 -2765,7 +2788,7 @@@ class SoundcloudIE(InfoExtractor)
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
  
          # extract uploader (which is in the url)
              info_json_bytes = compat_urllib_request.urlopen(request).read()
              info_json = info_json_bytes.decode('utf-8')
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
+             self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err))
              return
  
          info = json.loads(info_json)
              stream_json_bytes = compat_urllib_request.urlopen(request).read()
              stream_json = stream_json_bytes.decode('utf-8')
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: unable to download stream definitions: %s' % compat_str(err))
+             self._downloader.report_error(u'unable to download stream definitions: %s' % compat_str(err))
              return
  
          streams = json.loads(stream_json)
              'description': info['description'],
          }]
  
+ class SoundcloudSetIE(InfoExtractor):
+     """Information extractor for soundcloud.com sets
+        To access the media, the uid of the song and a stream token
+        must be extracted from the page source and the script must make
+        a request to media.soundcloud.com/crossdomain.xml. Then
+        the media can be grabbed by requesting from an url composed
+        of the stream token and uid
+      """
+     _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/sets/([\w\d-]+)'
+     IE_NAME = u'soundcloud'
+     def __init__(self, downloader=None):
+         InfoExtractor.__init__(self, downloader)
+     def report_resolve(self, video_id):
+         """Report information extraction."""
+         self._downloader.to_screen(u'[%s] %s: Resolving id' % (self.IE_NAME, video_id))
+     def report_extraction(self, video_id):
+         """Report information extraction."""
+         self._downloader.to_screen(u'[%s] %s: Retrieving stream' % (self.IE_NAME, video_id))
+     def _real_extract(self, url):
+         mobj = re.match(self._VALID_URL, url)
+         if mobj is None:
+             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             return
+         # extract uploader (which is in the url)
+         uploader = mobj.group(1)
+         # extract simple title (uploader + slug of song title)
+         slug_title =  mobj.group(2)
+         simple_title = uploader + u'-' + slug_title
+         self.report_resolve('%s/sets/%s' % (uploader, slug_title))
+         url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title)
+         resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
+         request = compat_urllib_request.Request(resolv_url)
+         try:
+             info_json_bytes = compat_urllib_request.urlopen(request).read()
+             info_json = info_json_bytes.decode('utf-8')
+         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+             self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
+             return
+         videos = []
+         info = json.loads(info_json)
+         if 'errors' in info:
+             for err in info['errors']:
+                 self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err['error_message']))
+             return
+         for track in info['tracks']:
+             video_id = track['id']
+             self.report_extraction('%s/sets/%s' % (uploader, slug_title))
+             streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
+             request = compat_urllib_request.Request(streams_url)
+             try:
+                 stream_json_bytes = compat_urllib_request.urlopen(request).read()
+                 stream_json = stream_json_bytes.decode('utf-8')
+             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                 self._downloader.trouble(u'ERROR: unable to download stream definitions: %s' % compat_str(err))
+                 return
+             streams = json.loads(stream_json)
+             mediaURL = streams['http_mp3_128_url']
+             videos.append({
+                 'id':       video_id,
+                 'url':      mediaURL,
+                 'uploader': track['user']['username'],
+                 'upload_date':  track['created_at'],
+                 'title':    track['title'],
+                 'ext':      u'mp3',
+                 'description': track['description'],
+             })
+         return videos
  
  class InfoQIE(InfoExtractor):
      """Information extractor for infoq.com"""
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
  
          webpage = self._download_webpage(url, video_id=url)
          # Extract video URL
          mobj = re.search(r"jsclassref='([^']*)'", webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract video url')
+             self._downloader.report_error(u'unable to extract video url')
              return
          real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8'))
          video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id
          # Extract title
          mobj = re.search(r'contentTitle = "(.*?)";', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract video title')
+             self._downloader.report_error(u'unable to extract video title')
              return
          video_title = mobj.group(1)
  
@@@ -2937,7 -3005,7 +3028,7 @@@ class MixcloudIE(InfoExtractor)
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
          # extract uploader & filename from url
          uploader = mobj.group(1).decode('utf-8')
              self.report_download_json(file_url)
              jsonData = compat_urllib_request.urlopen(request).read()
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err))
+             self._downloader.report_error(u'Unable to retrieve file: %s' % compat_str(err))
              return
  
          # parse JSON
                      break # got it!
          else:
              if req_format not in formats:
-                 self._downloader.trouble(u'ERROR: format is not available')
+                 self._downloader.report_error(u'format is not available')
                  return
  
              url_list = self.get_urls(formats, req_format)
@@@ -3029,14 -3097,14 +3120,14 @@@ class StanfordOpenClassroomIE(InfoExtra
              try:
                  metaXml = compat_urllib_request.urlopen(xmlUrl).read()
              except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                 self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
+                 self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
                  return
              mdoc = xml.etree.ElementTree.fromstring(metaXml)
              try:
                  info['title'] = mdoc.findall('./title')[0].text
                  info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
              except IndexError:
-                 self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
+                 self._downloader.report_error(u'Invalid metadata XML file')
                  return
              info['ext'] = info['url'].rpartition('.')[2]
              return [info]
              try:
                  rootpage = compat_urllib_request.urlopen(rootURL).read()
              except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                 self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
+                 self._downloader.report_error(u'unable to download course info page: ' + compat_str(err))
                  return
  
              info['title'] = info['id']
@@@ -3120,7 -3188,7 +3211,7 @@@ class MTVIE(InfoExtractor)
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
          if not mobj.group('proto'):
              url = 'http://' + url
  
          mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract song name')
+             self._downloader.report_error(u'unable to extract song name')
              return
          song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
          mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract performer')
+             self._downloader.report_error(u'unable to extract performer')
              return
          performer = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
          video_title = performer + ' - ' + song_name
  
          mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to mtvn_uri')
+             self._downloader.report_error(u'unable to mtvn_uri')
              return
          mtvn_uri = mobj.group(1)
  
          mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract content id')
+             self._downloader.report_error(u'unable to extract content id')
              return
          content_id = mobj.group(1)
  
          try:
              metadataXml = compat_urllib_request.urlopen(request).read()
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err))
+             self._downloader.report_error(u'unable to download video metadata: %s' % compat_str(err))
              return
  
          mdoc = xml.etree.ElementTree.fromstring(metadataXml)
@@@ -3230,7 -3298,7 +3321,7 @@@ class YoukuIE(InfoExtractor)
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
          video_id = mobj.group('ID')
  
              self.report_download_webpage(video_id)
              jsondata = compat_urllib_request.urlopen(request).read()
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+             self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
              return
  
          self.report_extraction(video_id)
              fileid = config['data'][0]['streamfileids'][format]
              keys = [s['k'] for s in config['data'][0]['segs'][format]]
          except (UnicodeDecodeError, ValueError, KeyError):
-             self._downloader.trouble(u'ERROR: unable to extract info section')
+             self._downloader.report_error(u'unable to extract info section')
              return
  
          files_info=[]
@@@ -3319,7 -3387,7 +3410,7 @@@ class XNXXIE(InfoExtractor)
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
          video_id = mobj.group(1)
  
              webpage_bytes = compat_urllib_request.urlopen(url).read()
              webpage = webpage_bytes.decode('utf-8')
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
+             self._downloader.report_error(u'unable to download video webpage: %s' % err)
              return
  
          result = re.search(self.VIDEO_URL_RE, webpage)
          if result is None:
-             self._downloader.trouble(u'ERROR: unable to extract video url')
+             self._downloader.report_error(u'unable to extract video url')
              return
          video_url = compat_urllib_parse.unquote(result.group(1))
  
          result = re.search(self.VIDEO_TITLE_RE, webpage)
          if result is None:
-             self._downloader.trouble(u'ERROR: unable to extract video title')
+             self._downloader.report_error(u'unable to extract video title')
              return
          video_title = result.group(1)
  
          result = re.search(self.VIDEO_THUMB_RE, webpage)
          if result is None:
-             self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+             self._downloader.report_error(u'unable to extract video thumbnail')
              return
          video_thumbnail = result.group(1)
  
@@@ -3396,7 -3464,7 +3487,7 @@@ class GooglePlusIE(InfoExtractor)
          # Extract id from URL
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+             self._downloader.report_error(u'Invalid URL: %s' % url)
              return
  
          post_url = mobj.group(0)
          try:
              webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err))
+             self._downloader.report_error(u'Unable to retrieve entry webpage: %s' % compat_str(err))
              return
  
          # Extract update date
          pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]'
          mobj = re.search(pattern, webpage)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: unable to extract video page URL')
+             self._downloader.report_error(u'unable to extract video page URL')
  
          video_page = mobj.group(1)
          request = compat_urllib_request.Request(video_page)
          try:
              webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+             self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
              return
          self.report_extract_vid_page(video_page)
  
          pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
          mobj = re.findall(pattern, webpage)
          if len(mobj) == 0:
-             self._downloader.trouble(u'ERROR: unable to extract video links')
+             self._downloader.report_error(u'unable to extract video links')
  
          # Sort in resolution
          links = sorted(mobj)
@@@ -3494,7 -3562,7 +3585,7 @@@ class NBAIE(InfoExtractor)
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
  
          video_id = mobj.group(1)
@@@ -3550,13 -3618,13 +3641,13 @@@ class JustinTVIE(InfoExtractor)
              webpage_bytes = urlh.read()
              webpage = webpage_bytes.decode('utf-8', 'ignore')
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-             self._downloader.trouble(u'ERROR: unable to download video info JSON: %s' % compat_str(err))
+             self._downloader.report_error(u'unable to download video info JSON: %s' % compat_str(err))
              return
  
          response = json.loads(webpage)
          if type(response) != list:
              error_text = response.get('error', 'unknown error')
-             self._downloader.trouble(u'ERROR: Justin.tv API: %s' % error_text)
+             self._downloader.report_error(u'Justin.tv API: %s' % error_text)
              return
          info = []
          for clip in response:
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
  
          api = 'http://api.justin.tv'
@@@ -3616,7 -3684,7 +3707,7 @@@ class FunnyOrDieIE(InfoExtractor)
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
  
          video_id = mobj.group('id')
  
          m = re.search(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"', webpage, re.DOTALL)
          if not m:
-             self._downloader.trouble(u'ERROR: unable to find video information')
+             self._downloader.report_error(u'unable to find video information')
          video_url = unescapeHTML(m.group('url'))
  
-         m = re.search(r"class='player_page_h1'>\s+<a.*?>(?P<title>.*?)</a>", webpage)
+         m = re.search(r"<h1 class='player_page_h1'.*?>(?P<title>.*?)</h1>", webpage, flags=re.DOTALL)
          if not m:
              self._downloader.trouble(u'Cannot find video title')
-         title = unescapeHTML(m.group('title'))
+         title = clean_html(m.group('title'))
  
          m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
          if m:
@@@ -3677,7 -3745,7 +3768,7 @@@ class SteamIE(InfoExtractor)
              video_url = vid.group('videoURL')
              video_thumb = thumb.group('thumbnail')
              if not video_url:
-                 self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id)
+                 self._downloader.report_error(u'Cannot find video url for %s' % video_id)
              info = {
                  'id':video_id,
                  'url':video_url,
@@@ -3710,6 -3778,62 +3801,62 @@@ class UstreamIE(InfoExtractor)
                    }
          return [info]
  
+ class WorldStarHipHopIE(InfoExtractor):
+     _VALID_URL = r'http://(?:www|m)\.worldstar(?:candy|hiphop)\.com/videos/video\.php\?v=(?P<id>.*)'
+     IE_NAME = u'WorldStarHipHop'
+     def _real_extract(self, url):
+         _src_url = r"""(http://hw-videos.*(?:mp4|flv))"""
+         webpage_src = compat_urllib_request.urlopen(url).read()
+         webpage_src = webpage_src.decode('utf-8')
+         mobj = re.search(_src_url, webpage_src)
+         m = re.match(self._VALID_URL, url)
+         video_id = m.group('id')
+         if mobj is not None:
+             video_url = mobj.group()
+             if 'mp4' in video_url:
+                 ext = 'mp4'
+             else:
+                 ext = 'flv'
+         else:
+             self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id)
+             return
+         _title = r"""<title>(.*)</title>"""
+         mobj = re.search(_title, webpage_src)
+         if mobj is not None:
+             title = mobj.group(1)
+         else:
+             title = 'World Start Hip Hop - %s' % time.ctime()
+         _thumbnail = r"""rel="image_src" href="(.*)" />"""
+         mobj = re.search(_thumbnail, webpage_src)
+         # Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
+         if mobj is not None:
+             thumbnail = mobj.group(1)
+         else:
+             _title = r"""candytitles.*>(.*)</span>"""
+             mobj = re.search(_title, webpage_src)
+             if mobj is not None:
+                 title = mobj.group(1)
+             thumbnail = None
+         results = [{
+                     'id': video_id,
+                     'url' : video_url,
+                     'title' : title,
+                     'thumbnail' : thumbnail,
+                     'ext' : ext,
+                     }]
+         return results
  class RBMARadioIE(InfoExtractor):
      _VALID_URL = r'https?://(?:www\.)?rbmaradio\.com/shows/(?P<videoID>[^/]+)$'
  
@@@ -3767,7 -3891,7 +3914,7 @@@ class YouPornIE(InfoExtractor)
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
  
          video_id = mobj.group('videoid')
          else:
              format = self._specific( req_format, formats )
              if result is None:
-                 self._downloader.trouble(u'ERROR: requested format not available')
+                 self._downloader.report_error(u'requested format not available')
                  return
              return [format]
  
@@@ -3872,7 -3996,7 +4019,7 @@@ class PornotubeIE(InfoExtractor)
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
  
          video_id = mobj.group('videoid')
          VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
          result = re.search(VIDEO_URL_RE, webpage)
          if result is None:
-             self._downloader.trouble(u'ERROR: unable to extract video url')
+             self._downloader.report_error(u'unable to extract video url')
              return
          video_url = compat_urllib_parse.unquote(result.group('url'))
  
          VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
          result = re.search(VIDEO_UPLOADED_RE, webpage)
          if result is None:
-             self._downloader.trouble(u'ERROR: unable to extract video title')
+             self._downloader.report_error(u'unable to extract video title')
              return
          upload_date = result.group('date')
  
@@@ -3914,7 -4038,7 +4061,7 @@@ class YouJizzIE(InfoExtractor)
      def _real_extract(self, url):
          mobj = re.match(self._VALID_URL, url)
          if mobj is None:
-             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             self._downloader.report_error(u'invalid URL: %s' % url)
              return
  
          video_id = mobj.group('videoid')
@@@ -4074,7 -4198,7 +4221,7 @@@ class TEDIE(InfoExtractor)
          videoName=m.group('name')
          webpage=self._download_webpage(url, video_id, 'Downloading \"%s\" page' % videoName)
          # If the url includes the language we get the title translated
-         title_RE=r'<h1><span id="altHeadline" >(?P<title>.*)</span></h1>'
+         title_RE=r'<span id="altHeadline" >(?P<title>.*)</span>'
          title=re.search(title_RE, webpage).group('title')
          info_RE=r'''<script\ type="text/javascript">var\ talkDetails\ =(.*?)
                          "id":(?P<videoID>[\d]+).*?
@@@ -4115,13 -4239,13 +4262,13 @@@ class MySpassIE(InfoExtractor)
          # extract values from metadata
          url_flv_el = metadata.find('url_flv')
          if url_flv_el is None:
-             self._downloader.trouble(u'ERROR: unable to extract download url')
+             self._downloader.report_error(u'unable to extract download url')
              return
          video_url = url_flv_el.text
          extension = os.path.splitext(video_url)[1][1:]
          title_el = metadata.find('title')
          if title_el is None:
-             self._downloader.trouble(u'ERROR: unable to extract title')
+             self._downloader.report_error(u'unable to extract title')
              return
          title = title_el.text
          format_id_el = metadata.find('format_id')
          return [info]
  
  class SpiegelIE(InfoExtractor):
-     _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?$'
+     _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?(?:#.*)?$'
  
      def _real_extract(self, url):
          m = re.match(self._VALID_URL, url)
          }
          return [info]
  
+ class LiveLeakIE(InfoExtractor):
+     _VALID_URL = r'^(?:http?://)?(?:\w+\.)?liveleak\.com/view\?(?:.*?)i=(?P<video_id>[\w_]+)(?:.*)'
+     IE_NAME = u'liveleak'
+     def _real_extract(self, url):
+         mobj = re.match(self._VALID_URL, url)
+         if mobj is None:
+             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+             return
+         video_id = mobj.group('video_id')
+         webpage = self._download_webpage(url, video_id)
+         m = re.search(r'file: "(.*?)",', webpage)
+         if not m:
+             self._downloader.report_error(u'unable to find video url')
+             return
+         video_url = m.group(1)
+         m = re.search(r'<meta property="og:title" content="(?P<title>.*?)"', webpage)
+         if not m:
+             self._downloader.trouble(u'Cannot find video title')
+         title = unescapeHTML(m.group('title')).replace('LiveLeak.com -', '').strip()
+         m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
+         if m:
+             desc = unescapeHTML(m.group('desc'))
+         else:
+             desc = None
+         m = re.search(r'By:.*?(\w+)</a>', webpage)
+         if m:
+             uploader = clean_html(m.group(1))
+         else:
+             uploader = None
+         info = {
+             'id':  video_id,
+             'url': video_url,
+             'ext': 'mp4',
+             'title': title,
+             'description': desc,
+             'uploader': uploader
+         }
+         return [info]
  
  def gen_extractors():
      """ Return a list of an instance of every supported extractor.
          EscapistIE(),
          CollegeHumorIE(),
          XVideosIE(),
+         SoundcloudSetIE(),
          SoundcloudIE(),
          InfoQIE(),
          MixcloudIE(),
          GooglePlusIE(),
          ArteTvIE(),
          NBAIE(),
+         WorldStarHipHopIE(),
          JustinTVIE(),
          FunnyOrDieIE(),
          SteamIE(),
          TEDIE(),
          MySpassIE(),
          SpiegelIE(),
+         LiveLeakIE(),
          GenericIE()
      ]