Merge branch 'master' into extract_info_rewrite
authorJaime Marquínez Ferrándiz <jaime.marquinez.ferrandiz@gmail.com>
Fri, 5 Apr 2013 10:39:51 +0000 (12:39 +0200)
committerJaime Marquínez Ferrándiz <jaime.marquinez.ferrandiz@gmail.com>
Fri, 5 Apr 2013 10:39:51 +0000 (12:39 +0200)
.gitignore
README.md
devscripts/gh-pages/update-feed.py [new file with mode: 0755]
devscripts/release.sh
test/test_download.py
test/tests.json
youtube-dl
youtube_dl/FileDownloader.py
youtube_dl/InfoExtractors.py
youtube_dl/utils.py
youtube_dl/version.py

index 77469b8a7993a9b529769452f109e8805e8b1e50..ca4e8f35321bea65b42197d82b63769a35944168 100644 (file)
@@ -17,4 +17,4 @@ youtube-dl.tar.gz
 .coverage
 cover/
 updates_key.pem
-*.egg-info
+*.egg-info
\ No newline at end of file
index 1f3422ef89e559cf573d2cd6b0ccbf0b434b82b3..c8d28db3cd8e699974746fa2ed08f5e5ba438e0b 100644 (file)
--- a/README.md
+++ b/README.md
@@ -18,7 +18,7 @@ which means you can modify it, redistribute it or use it however you like.
     --version                print program version and exit
     -U, --update             update this program to latest version
     -i, --ignore-errors      continue on download errors
-    -r, --rate-limit LIMIT   download rate limit (e.g. 50k or 44.6m)
+    -r, --rate-limit LIMIT   maximum download rate (e.g. 50k or 44.6m)
     -R, --retries RETRIES    number of retries (default is 10)
     --buffer-size SIZE       size of download buffer (e.g. 1024 or 16k) (default
                              is 1024)
@@ -97,10 +97,16 @@ which means you can modify it, redistribute it or use it however you like.
                              requested
     --max-quality FORMAT     highest quality format to download
     -F, --list-formats       list all available formats (currently youtube only)
-    --write-srt              write video closed captions to a .srt file
+    --write-sub              write subtitle file (currently youtube only)
+    --only-sub               downloads only the subtitles (no video)
+    --all-subs               downloads all the available subtitles of the video
                              (currently youtube only)
-    --srt-lang LANG          language of the closed captions to download
-                             (optional) use IETF language tags like 'en'
+    --list-subs              lists all available subtitles for the video
+                             (currently youtube only)
+    --sub-format LANG        subtitle format [srt/sbv] (default=srt) (currently
+                             youtube only)
+    --sub-lang LANG          language of the subtitles to download (optional)
+                             use IETF language tags like 'en'
 
 ## Authentication Options:
     -u, --username USERNAME  account username
diff --git a/devscripts/gh-pages/update-feed.py b/devscripts/gh-pages/update-feed.py
new file mode 100755 (executable)
index 0000000..e299429
--- /dev/null
@@ -0,0 +1,57 @@
+#!/usr/bin/env python3
+
+import datetime
+
+import textwrap
+
+import json
+
+atom_template=textwrap.dedent("""\
+                                                               <?xml version='1.0' encoding='utf-8'?>
+                                                               <atom:feed xmlns:atom="http://www.w3.org/2005/Atom">
+                                                                       <atom:title>youtube-dl releases</atom:title>
+                                                                       <atom:id>youtube-dl-updates-feed</atom:id>
+                                                                       <atom:updated>@TIMESTAMP@</atom:updated>
+                                                                       @ENTRIES@
+                                                               </atom:feed>""")
+
+entry_template=textwrap.dedent("""
+                                                               <atom:entry>
+                                                                       <atom:id>youtube-dl-@VERSION@</atom:id>
+                                                                       <atom:title>New version @VERSION@</atom:title>
+                                                                       <atom:link href="http://rg3.github.com/youtube-dl" />
+                                                                       <atom:content type="xhtml">
+                                                                               <div xmlns="http://www.w3.org/1999/xhtml">
+                                                                                       Downloads available at <a href="http://youtube-dl.org/downloads/@VERSION@/">http://youtube-dl.org/downloads/@VERSION@/</a>
+                                                                               </div>
+                                                                       </atom:content>
+                                                                       <atom:author>
+                                                                               <atom:name>The youtube-dl maintainers</atom:name>
+                                                                       </atom:author>
+                                                                       <atom:updated>@TIMESTAMP@</atom:updated>
+                                                               </atom:entry>
+                                                               """)
+
+now = datetime.datetime.now()
+now_iso = now.isoformat()
+
+atom_template = atom_template.replace('@TIMESTAMP@',now_iso)
+
+entries=[]
+
+versions_info = json.load(open('update/versions.json'))
+versions = list(versions_info['versions'].keys())
+versions.sort()
+
+for v in versions:
+       entry = entry_template.replace('@TIMESTAMP@',v.replace('.','-'))
+       entry = entry.replace('@VERSION@',v)
+       entries.append(entry)
+
+entries_str = textwrap.indent(''.join(entries), '\t')
+atom_template = atom_template.replace('@ENTRIES@', entries_str)
+
+with open('update/releases.atom','w',encoding='utf-8') as atom_file:
+       atom_file.write(atom_template)
+
+
index ee650f2219277439e7cc2d2fc9732a4e0b4306e0..b2a91f817922853ee502624fbd621f535cfa935e 100755 (executable)
@@ -69,6 +69,7 @@ ROOT=$(pwd)
     ORIGIN_URL=$(git config --get remote.origin.url)
     cd build/gh-pages
     "$ROOT/devscripts/gh-pages/add-version.py" $version
+    "$ROOT/devscripts/gh-pages/update-feed.py"
     "$ROOT/devscripts/gh-pages/sign-versions.py" < "$ROOT/updates_key.pem"
     "$ROOT/devscripts/gh-pages/generate-download.py"
     "$ROOT/devscripts/gh-pages/update-copyright.py"
index f1bccf58c5a2eaab0fbb8d60bb1185007eae0ee8..59a6e1498fd120bc6566f7256fcba4c20d313c7a 100644 (file)
@@ -20,6 +20,8 @@ from youtube_dl.utils import *
 DEF_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests.json')
 PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
 
+RETRIES = 3
+
 # General configuration (from __init__, not very elegant...)
 jar = compat_cookiejar.CookieJar()
 cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
@@ -79,9 +81,8 @@ def generator(test_case):
         params.update(test_case.get('params', {}))
 
         fd = FileDownloader(params)
-        fd.add_info_extractor(ie())
-        for ien in test_case.get('add_ie', []):
-            fd.add_info_extractor(getattr(youtube_dl.InfoExtractors, ien + 'IE')())
+        for ie in youtube_dl.InfoExtractors.gen_extractors():
+            fd.add_info_extractor(ie)
         finished_hook_called = set()
         def _hook(status):
             if status['status'] == 'finished':
@@ -94,7 +95,19 @@ def generator(test_case):
             _try_rm(tc['file'] + '.part')
             _try_rm(tc['file'] + '.info.json')
         try:
-            fd.download([test_case['url']])
+            for retry in range(1, RETRIES + 1):
+                try:
+                    fd.download([test_case['url']])
+                except (DownloadError, ExtractorError) as err:
+                    if retry == RETRIES: raise
+
+                    # Check if the exception is not a network related one
+                    if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
+                        raise
+
+                    print('Retrying: {0} failed tries\n\n##########\n\n'.format(retry))
+                else:
+                    break
 
             for tc in test_cases:
                 if not test_case.get('params', {}).get('skip_download', False):
index fd9d333327b2de3fc8f0b5b3dabb0336e84e1b39..0c94c65bdee2d77cd3a2ffadb0b403444ba1d8e2 100644 (file)
     "info_dict": {
         "title": "Vulkanausbruch in Ecuador: Der \"Feuerschlund\" ist wieder aktiv"
     }
+  },
+  {
+    "name": "LiveLeak",
+    "md5":  "0813c2430bea7a46bf13acf3406992f4",
+    "url":  "http://www.liveleak.com/view?i=757_1364311680",
+    "file":  "757_1364311680.mp4",
+    "info_dict": {
+        "title": "Most unlucky car accident",
+        "description": "extremely bad day for this guy..!",
+        "uploader": "ljfriel2"
+    }
+  },
+  {
+    "name": "WorldStarHipHop",
+    "url": "http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO",
+    "file": "wshh6a7q1ny0G34ZwuIO.mp4",
+    "md5": "9d04de741161603bf7071bbf4e883186",
+    "info_dict": {
+        "title": "Video: KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick! "
+    }
   }
 ]
index ef9f332410cdea77f0bc2f47432a1a0a29c66786..e6f05c17327ed58f8db66e6dc7d2a38380355d61 100755 (executable)
Binary files a/youtube-dl and b/youtube-dl differ
index d2b9be9ef88cb93e3e54fb0d3650880e35c36b43..2237d355df893621a735f72f55cba38f9f7366ac 100644 (file)
@@ -231,11 +231,21 @@ class FileDownloader(object):
             self.to_stderr(message)
         if self.params.get('verbose'):
             if tb is None:
-                tb_data = traceback.format_list(traceback.extract_stack())
-                tb = u''.join(tb_data)
+                if sys.exc_info()[0]:  # if .trouble has been called from an except block
+                    tb = u''
+                    if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
+                        tb += u''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
+                    tb += compat_str(traceback.format_exc())
+                else:
+                    tb_data = traceback.format_list(traceback.extract_stack())
+                    tb = u''.join(tb_data)
             self.to_stderr(tb)
         if not self.params.get('ignoreerrors', False):
-            raise DownloadError(message)
+            if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
+                exc_info = sys.exc_info()[1].exc_info
+            else:
+                exc_info = sys.exc_info()
+            raise DownloadError(message, exc_info)
         self._download_retcode = 1
 
     def report_warning(self, message):
@@ -250,6 +260,18 @@ class FileDownloader(object):
         warning_message=u'%s %s' % (_msg_header,message)
         self.to_stderr(warning_message)
 
+    def report_error(self, message, tb=None):
+        '''
+        Do the same as trouble, but prefixes the message with 'ERROR:', colored
+        in red if stderr is a tty file.
+        '''
+        if sys.stderr.isatty():
+            _msg_header = u'\033[0;31mERROR:\033[0m'
+        else:
+            _msg_header = u'ERROR:'
+        error_message = u'%s %s' % (_msg_header, message)
+        self.trouble(error_message, tb)
+
     def slow_down(self, start_time, byte_counter):
         """Sleep if the download speed is over the rate limit."""
         rate_limit = self.params.get('ratelimit', None)
@@ -281,7 +303,7 @@ class FileDownloader(object):
                 return
             os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
         except (IOError, OSError) as err:
-            self.trouble(u'ERROR: unable to rename file')
+            self.report_error(u'unable to rename file')
 
     def try_utime(self, filename, last_modified_hdr):
         """Try to set the last-modified time of the given file."""
@@ -519,7 +541,7 @@ class FileDownloader(object):
             if dn != '' and not os.path.exists(dn): # dn is already encoded
                 os.makedirs(dn)
         except (OSError, IOError) as err:
-            self.trouble(u'ERROR: unable to create directory ' + compat_str(err))
+            self.report_error(u'unable to create directory ' + compat_str(err))
             return
 
         if self.params.get('writedescription', False):
@@ -529,7 +551,7 @@ class FileDownloader(object):
                 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
                     descfile.write(info_dict['description'])
             except (OSError, IOError):
-                self.trouble(u'ERROR: Cannot write description file ' + descfn)
+                self.report_error(u'Cannot write description file ' + descfn)
                 return
 
         if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']:
@@ -538,14 +560,17 @@ class FileDownloader(object):
             subtitle = info_dict['subtitles'][0]
             (sub_error, sub_lang, sub) = subtitle
             sub_format = self.params.get('subtitlesformat')
-            try:
-                sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
-                self.report_writesubtitles(sub_filename)
-                with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
-                    subfile.write(sub)
-            except (OSError, IOError):
-                self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
-                return
+            if sub_error:
+                self.report_warning("Some error while getting the subtitles")
+            else:
+                try:
+                    sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
+                    self.report_writesubtitles(sub_filename)
+                    with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
+                        subfile.write(sub)
+                except (OSError, IOError):
+                    self.report_error(u'Cannot write subtitles file ' + descfn)
+                    return
             if self.params.get('onlysubtitles', False):
                 return 
 
@@ -554,14 +579,17 @@ class FileDownloader(object):
             sub_format = self.params.get('subtitlesformat')
             for subtitle in subtitles:
                 (sub_error, sub_lang, sub) = subtitle
-                try:
-                    sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
-                    self.report_writesubtitles(sub_filename)
-                    with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
-                            subfile.write(sub)
-                except (OSError, IOError):
-                    self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
-                    return
+                if sub_error:
+                    self.report_warning("Some error while getting the subtitles")
+                else:
+                    try:
+                        sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
+                        self.report_writesubtitles(sub_filename)
+                        with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
+                                subfile.write(sub)
+                    except (OSError, IOError):
+                        self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
+                        return
             if self.params.get('onlysubtitles', False):
                 return 
 
@@ -572,7 +600,7 @@ class FileDownloader(object):
                 json_info_dict = dict((k, v) for k,v in info_dict.items() if not k in ['urlhandle'])
                 write_json_file(json_info_dict, encodeFilename(infofn))
             except (OSError, IOError):
-                self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn)
+                self.report_error(u'Cannot write metadata to JSON file ' + infofn)
                 return
 
         if not self.params.get('skip_download', False):
@@ -584,17 +612,17 @@ class FileDownloader(object):
                 except (OSError, IOError) as err:
                     raise UnavailableVideoError()
                 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                    self.trouble(u'ERROR: unable to download video data: %s' % str(err))
+                    self.report_error(u'unable to download video data: %s' % str(err))
                     return
                 except (ContentTooShortError, ) as err:
-                    self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
+                    self.report_error(u'content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
                     return
 
             if success:
                 try:
                     self.post_process(filename, info_dict)
                 except (PostProcessingError) as err:
-                    self.trouble(u'ERROR: postprocessing: %s' % str(err))
+                    self.report_error(u'postprocessing: %s' % str(err))
                     return
 
     def download(self, url_list):
@@ -611,6 +639,9 @@ class FileDownloader(object):
                     self.process_info(video)
                 except UnavailableVideoError:
                     self.trouble(u'\nERROR: unable to download video')
+                except MaxDownloadsReached:
+                    self.to_screen(u'[info] Maximum number of downloaded files reached.')
+                    raise
 
         return self._download_retcode
 
@@ -645,7 +676,7 @@ class FileDownloader(object):
         try:
             subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
         except (OSError, IOError):
-            self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run')
+            self.report_error(u'RTMP download detected but "rtmpdump" could not be run')
             return False
 
         # Download using rtmpdump. rtmpdump returns exit code 2 when
@@ -690,7 +721,8 @@ class FileDownloader(object):
             })
             return True
         else:
-            self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
+            self.to_stderr(u"\n")
+            self.report_error(u'rtmpdump exited with code %d' % retval)
             return False
 
     def _do_download(self, filename, info_dict):
@@ -790,7 +822,7 @@ class FileDownloader(object):
                 self.report_retry(count, retries)
 
         if count > retries:
-            self.trouble(u'ERROR: giving up after %s retries' % retries)
+            self.report_error(u'giving up after %s retries' % retries)
             return False
 
         data_len = data.info().get('Content-length', None)
@@ -826,12 +858,13 @@ class FileDownloader(object):
                     filename = self.undo_temp_name(tmpfilename)
                     self.report_destination(filename)
                 except (OSError, IOError) as err:
-                    self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
+                    self.report_error(u'unable to open for writing: %s' % str(err))
                     return False
             try:
                 stream.write(data_block)
             except (IOError, OSError) as err:
-                self.trouble(u'\nERROR: unable to write data: %s' % str(err))
+                self.to_stderr(u"\n")
+                self.report_error(u'unable to write data: %s' % str(err))
                 return False
             if not self.params.get('noresizebuffer', False):
                 block_size = self.best_block_size(after - before, len(data_block))
@@ -857,7 +890,8 @@ class FileDownloader(object):
             self.slow_down(start, byte_counter - resume_len)
 
         if stream is None:
-            self.trouble(u'\nERROR: Did not get any data blocks')
+            self.to_stderr(u"\n")
+            self.report_error(u'Did not get any data blocks')
             return False
         stream.close()
         self.report_finish()
index 6053d14ecb89b8d60737297b2d7b2c51afc55ba5..81eaddc7260333e70a01be3ea354edfc7513f339 100755 (executable)
@@ -275,11 +275,11 @@ class YoutubeIE(InfoExtractor):
         try:
             sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
+            return (u'unable to download video subtitles: %s' % compat_str(err), None)
         sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
         sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list)
         if not sub_lang_list:
-            return (u'WARNING: video doesn\'t have subtitles', None)
+            return (u'video doesn\'t have subtitles', None)
         return sub_lang_list
 
     def _list_available_subtitles(self, video_id):
@@ -287,6 +287,10 @@ class YoutubeIE(InfoExtractor):
         self.report_video_subtitles_available(video_id, sub_lang_list)
 
     def _request_subtitle(self, sub_lang, sub_name, video_id, format):
+        """
+        Return tuple:
+        (error_message, sub_lang, sub)
+        """
         self.report_video_subtitles_request(video_id, sub_lang, format)
         params = compat_urllib_parse.urlencode({
             'lang': sub_lang,
@@ -298,14 +302,20 @@ class YoutubeIE(InfoExtractor):
         try:
             sub = compat_urllib_request.urlopen(url).read().decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
+            return (u'unable to download video subtitles: %s' % compat_str(err), None, None)
         if not sub:
-            return (u'WARNING: Did not fetch video subtitles', None)
+            return (u'Did not fetch video subtitles', None, None)
         return (None, sub_lang, sub)
 
     def _extract_subtitle(self, video_id):
+        """
+        Return a list with a tuple:
+        [(error_message, sub_lang, sub)]
+        """
         sub_lang_list = self._get_available_subtitles(video_id)
         sub_format = self._downloader.params.get('subtitlesformat')
+        if  isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
+            return [(sub_lang_list[0], None, None)]
         if self._downloader.params.get('subtitleslang', False):
             sub_lang = self._downloader.params.get('subtitleslang')
         elif 'en' in sub_lang_list:
@@ -313,7 +323,7 @@ class YoutubeIE(InfoExtractor):
         else:
             sub_lang = list(sub_lang_list.keys())[0]
         if not sub_lang in sub_lang_list:
-            return (u'WARNING: no closed captions found in the specified language "%s"' % sub_lang, None)
+            return [(u'no closed captions found in the specified language "%s"' % sub_lang, None, None)]
 
         subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
         return [subtitle]
@@ -321,6 +331,8 @@ class YoutubeIE(InfoExtractor):
     def _extract_all_subtitles(self, video_id):
         sub_lang_list = self._get_available_subtitles(video_id)
         sub_format = self._downloader.params.get('subtitlesformat')
+        if  isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
+            return [(sub_lang_list[0], None, None)]
         subtitles = []
         for sub_lang in sub_lang_list:
             subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
@@ -433,13 +445,13 @@ class YoutubeIE(InfoExtractor):
             self.report_age_confirmation()
             age_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err))
             return
 
     def _extract_id(self, url):
         mobj = re.match(self._VALID_URL, url, re.VERBOSE)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         video_id = mobj.group(2)
         return video_id
@@ -458,7 +470,7 @@ class YoutubeIE(InfoExtractor):
         try:
             video_webpage_bytes = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err))
             return
 
         video_webpage = video_webpage_bytes.decode('utf-8', 'ignore')
@@ -483,18 +495,18 @@ class YoutubeIE(InfoExtractor):
                 if 'token' in video_info:
                     break
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to download video info webpage: %s' % compat_str(err))
                 return
         if 'token' not in video_info:
             if 'reason' in video_info:
-                self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0])
+                self._downloader.report_error(u'YouTube said: %s' % video_info['reason'][0])
             else:
-                self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
+                self._downloader.report_error(u'"token" parameter not in video info for unknown reason')
             return
 
         # Check for "rental" videos
         if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
-            self._downloader.trouble(u'ERROR: "rental" videos not supported')
+            self._downloader.report_error(u'"rental" videos not supported')
             return
 
         # Start extracting information
@@ -502,7 +514,7 @@ class YoutubeIE(InfoExtractor):
 
         # uploader
         if 'author' not in video_info:
-            self._downloader.trouble(u'ERROR: unable to extract uploader name')
+            self._downloader.report_error(u'unable to extract uploader name')
             return
         video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
 
@@ -512,17 +524,17 @@ class YoutubeIE(InfoExtractor):
         if mobj is not None:
             video_uploader_id = mobj.group(1)
         else:
-            self._downloader.trouble(u'WARNING: unable to extract uploader nickname')
+            self._downloader.report_warning(u'unable to extract uploader nickname')
 
         # title
         if 'title' not in video_info:
-            self._downloader.trouble(u'ERROR: unable to extract video title')
+            self._downloader.report_error(u'unable to extract video title')
             return
         video_title = compat_urllib_parse.unquote_plus(video_info['title'][0])
 
         # thumbnail image
         if 'thumbnail_url' not in video_info:
-            self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
+            self._downloader.report_warning(u'unable to extract video thumbnail')
             video_thumbnail = ''
         else:   # don't panic if we can't find it
             video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
@@ -554,21 +566,21 @@ class YoutubeIE(InfoExtractor):
             if video_subtitles:
                 (sub_error, sub_lang, sub) = video_subtitles[0]
                 if sub_error:
-                    self._downloader.trouble(sub_error)
+                    self._downloader.report_error(sub_error)
 
         if self._downloader.params.get('allsubtitles', False):
             video_subtitles = self._extract_all_subtitles(video_id)
             for video_subtitle in video_subtitles:
                 (sub_error, sub_lang, sub) = video_subtitle
                 if sub_error:
-                    self._downloader.trouble(sub_error)
+                    self._downloader.report_error(sub_error)
 
         if self._downloader.params.get('listsubtitles', False):
             sub_lang_list = self._list_available_subtitles(video_id)
             return
 
         if 'length_seconds' not in video_info:
-            self._downloader.trouble(u'WARNING: unable to extract video duration')
+            self._downloader.report_warning(u'unable to extract video duration')
             video_duration = ''
         else:
             video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0])
@@ -596,7 +608,7 @@ class YoutubeIE(InfoExtractor):
                 format_list = available_formats
             existing_formats = [x for x in format_list if x in url_map]
             if len(existing_formats) == 0:
-                self._downloader.trouble(u'ERROR: no known formats available for video')
+                self._downloader.report_error(u'no known formats available for video')
                 return
             if self._downloader.params.get('listformats', None):
                 self._print_formats(existing_formats)
@@ -617,10 +629,10 @@ class YoutubeIE(InfoExtractor):
                         video_url_list = [(rf, url_map[rf])]
                         break
                 if video_url_list is None:
-                    self._downloader.trouble(u'ERROR: requested format not available')
+                    self._downloader.report_error(u'requested format not available')
                     return
         else:
-            self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info')
+            self._downloader.report_error(u'no conn or url_encoded_fmt_stream_map information found in video info')
             return
 
         results = []
@@ -683,7 +695,7 @@ class MetacafeIE(InfoExtractor):
             self.report_disclaimer()
             disclaimer = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to retrieve disclaimer: %s' % compat_str(err))
             return
 
         # Confirm age
@@ -696,14 +708,14 @@ class MetacafeIE(InfoExtractor):
             self.report_age_confirmation()
             disclaimer = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err))
             return
 
     def _real_extract(self, url):
         # Extract id and simplified title from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         video_id = mobj.group(1)
@@ -720,7 +732,7 @@ class MetacafeIE(InfoExtractor):
             self.report_download_webpage(video_id)
             webpage = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'unable retrieve video webpage: %s' % compat_str(err))
             return
 
         # Extract URL, uploader and title from webpage
@@ -740,15 +752,15 @@ class MetacafeIE(InfoExtractor):
         else:
             mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
             if mobj is None:
-                self._downloader.trouble(u'ERROR: unable to extract media URL')
+                self._downloader.report_error(u'unable to extract media URL')
                 return
             vardict = compat_parse_qs(mobj.group(1))
             if 'mediaData' not in vardict:
-                self._downloader.trouble(u'ERROR: unable to extract media URL')
+                self._downloader.report_error(u'unable to extract media URL')
                 return
             mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
             if mobj is None:
-                self._downloader.trouble(u'ERROR: unable to extract media URL')
+                self._downloader.report_error(u'unable to extract media URL')
                 return
             mediaURL = mobj.group(1).replace('\\/', '/')
             video_extension = mediaURL[-3:]
@@ -756,13 +768,13 @@ class MetacafeIE(InfoExtractor):
 
         mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract title')
+            self._downloader.report_error(u'unable to extract title')
             return
         video_title = mobj.group(1).decode('utf-8')
 
         mobj = re.search(r'submitter=(.*?);', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+            self._downloader.report_error(u'unable to extract uploader nickname')
             return
         video_uploader = mobj.group(1)
 
@@ -794,7 +806,7 @@ class DailymotionIE(InfoExtractor):
         # Extract id and simplified title from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         video_id = mobj.group(1).split('_')[0].split('?')[0]
@@ -810,7 +822,7 @@ class DailymotionIE(InfoExtractor):
         self.report_extraction(video_id)
         mobj = re.search(r'\s*var flashvars = (.*)', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract media URL')
+            self._downloader.report_error(u'unable to extract media URL')
             return
         flashvars = compat_urllib_parse.unquote(mobj.group(1))
 
@@ -820,12 +832,12 @@ class DailymotionIE(InfoExtractor):
                 self._downloader.to_screen(u'[dailymotion] Using %s' % key)
                 break
         else:
-            self._downloader.trouble(u'ERROR: unable to extract video URL')
+            self._downloader.report_error(u'unable to extract video URL')
             return
 
         mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video URL')
+            self._downloader.report_error(u'unable to extract video URL')
             return
 
         video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/')
@@ -834,7 +846,7 @@ class DailymotionIE(InfoExtractor):
 
         mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract title')
+            self._downloader.report_error(u'unable to extract title')
             return
         video_title = unescapeHTML(mobj.group('title'))
 
@@ -844,7 +856,7 @@ class DailymotionIE(InfoExtractor):
             # lookin for official user
             mobj_official = re.search(r'<span rel="author"[^>]+?>([^<]+?)</span>', webpage)
             if mobj_official is None:
-                self._downloader.trouble(u'WARNING: unable to extract uploader nickname')
+                self._downloader.report_warning(u'unable to extract uploader nickname')
             else:
                 video_uploader = mobj_official.group(1)
         else:
@@ -886,7 +898,7 @@ class PhotobucketIE(InfoExtractor):
         # Extract id from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
 
         video_id = mobj.group(1)
@@ -899,14 +911,14 @@ class PhotobucketIE(InfoExtractor):
             self.report_download_webpage(video_id)
             webpage = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
             return
 
         # Extract URL, uploader, and title from webpage
         self.report_extraction(video_id)
         mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract media URL')
+            self._downloader.report_error(u'unable to extract media URL')
             return
         mediaURL = compat_urllib_parse.unquote(mobj.group(1))
 
@@ -914,7 +926,7 @@ class PhotobucketIE(InfoExtractor):
 
         mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract title')
+            self._downloader.report_error(u'unable to extract title')
             return
         video_title = mobj.group(1).decode('utf-8')
 
@@ -955,7 +967,7 @@ class YahooIE(InfoExtractor):
         # Extract ID from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
 
         video_id = mobj.group(2)
@@ -968,18 +980,18 @@ class YahooIE(InfoExtractor):
             try:
                 webpage = compat_urllib_request.urlopen(request).read()
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+                self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
                 return
 
             mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
             if mobj is None:
-                self._downloader.trouble(u'ERROR: Unable to extract id field')
+                self._downloader.report_error(u'Unable to extract id field')
                 return
             yahoo_id = mobj.group(1)
 
             mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
             if mobj is None:
-                self._downloader.trouble(u'ERROR: Unable to extract vid field')
+                self._downloader.report_error(u'Unable to extract vid field')
                 return
             yahoo_vid = mobj.group(1)
 
@@ -992,34 +1004,34 @@ class YahooIE(InfoExtractor):
             self.report_download_webpage(video_id)
             webpage = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
             return
 
         # Extract uploader and title from webpage
         self.report_extraction(video_id)
         mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video title')
+            self._downloader.report_error(u'unable to extract video title')
             return
         video_title = mobj.group(1).decode('utf-8')
 
         mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video uploader')
+            self._downloader.report_error(u'unable to extract video uploader')
             return
         video_uploader = mobj.group(1).decode('utf-8')
 
         # Extract video thumbnail
         mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+            self._downloader.report_error(u'unable to extract video thumbnail')
             return
         video_thumbnail = mobj.group(1).decode('utf-8')
 
         # Extract video description
         mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video description')
+            self._downloader.report_error(u'unable to extract video description')
             return
         video_description = mobj.group(1).decode('utf-8')
         if not video_description:
@@ -1028,13 +1040,13 @@ class YahooIE(InfoExtractor):
         # Extract video height and width
         mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video height')
+            self._downloader.report_error(u'unable to extract video height')
             return
         yv_video_height = mobj.group(1)
 
         mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video width')
+            self._downloader.report_error(u'unable to extract video width')
             return
         yv_video_width = mobj.group(1)
 
@@ -1050,13 +1062,13 @@ class YahooIE(InfoExtractor):
             self.report_download_webpage(video_id)
             webpage = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
             return
 
         # Extract media URL from playlist XML
         mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: Unable to extract media URL')
+            self._downloader.report_error(u'Unable to extract media URL')
             return
         video_url = compat_urllib_parse.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
         video_url = unescapeHTML(video_url)
@@ -1095,7 +1107,7 @@ class VimeoIE(InfoExtractor):
         # Extract ID from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
 
         video_id = mobj.group('id')
@@ -1111,7 +1123,7 @@ class VimeoIE(InfoExtractor):
             webpage_bytes = compat_urllib_request.urlopen(request).read()
             webpage = webpage_bytes.decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
             return
 
         # Now we begin extracting as much information as we can from what we
@@ -1124,7 +1136,7 @@ class VimeoIE(InfoExtractor):
             config = webpage.split(' = {config:')[1].split(',assets:')[0]
             config = json.loads(config)
         except:
-            self._downloader.trouble(u'ERROR: unable to extract info section')
+            self._downloader.report_error(u'unable to extract info section')
             return
 
         # Extract title
@@ -1174,7 +1186,7 @@ class VimeoIE(InfoExtractor):
                 self._downloader.to_screen(u'[vimeo] %s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
                 break
         else:
-            self._downloader.trouble(u'ERROR: no known codec found')
+            self._downloader.report_error(u'no known codec found')
             return
 
         video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
@@ -1218,10 +1230,10 @@ class ArteTvIE(InfoExtractor):
             self.report_download_webpage(url)
             webpage = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
             return
         except ValueError as err:
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
         return webpage
 
@@ -1231,7 +1243,7 @@ class ArteTvIE(InfoExtractor):
         info = {}
 
         if mobj is None:
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
 
         for (i, key, err) in matchTuples:
@@ -1416,7 +1428,7 @@ class GenericIE(InfoExtractor):
         except ValueError as err:
             # since this is the last-resort InfoExtractor, if
             # this error is thrown, it'll be thrown here
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
 
         self.report_extraction(video_id)
@@ -1429,13 +1441,13 @@ class GenericIE(InfoExtractor):
             # Broaden the search a little bit: JWPlayer JS loader
             mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
 
         # It's possible that one of the regexes
         # matched, but returned an empty group:
         if mobj.group(1) is None:
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
 
         video_url = compat_urllib_parse.unquote(mobj.group(1))
@@ -1453,14 +1465,14 @@ class GenericIE(InfoExtractor):
         # and so on and so forth; it's just not practical
         mobj = re.search(r'<title>(.*)</title>', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract title')
+            self._downloader.report_error(u'unable to extract title')
             return
         video_title = mobj.group(1)
 
         # video uploader is domain name
         mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract title')
+            self._downloader.report_error(u'unable to extract title')
             return
         video_uploader = mobj.group(1)
 
@@ -1492,7 +1504,7 @@ class YoutubeSearchIE(InfoExtractor):
     def _real_extract(self, query):
         mobj = re.match(self._VALID_URL, query)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+            self._downloader.report_error(u'invalid search query "%s"' % query)
             return
 
         prefix, query = query.split(':')
@@ -1508,7 +1520,7 @@ class YoutubeSearchIE(InfoExtractor):
             try:
                 n = int(prefix)
                 if n <= 0:
-                    self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+                    self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
                     return
                 elif n > self._max_youtube_results:
                     self._downloader.report_warning(u'ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
@@ -1533,7 +1545,7 @@ class YoutubeSearchIE(InfoExtractor):
             try:
                 data = compat_urllib_request.urlopen(request).read().decode('utf-8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to download API page: %s' % compat_str(err))
                 return
             api_response = json.loads(data)['data']
 
@@ -1574,7 +1586,7 @@ class GoogleSearchIE(InfoExtractor):
     def _real_extract(self, query):
         mobj = re.match(self._VALID_URL, query)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+            self._downloader.report_error(u'invalid search query "%s"' % query)
             return
 
         prefix, query = query.split(':')
@@ -1590,7 +1602,7 @@ class GoogleSearchIE(InfoExtractor):
             try:
                 n = int(prefix)
                 if n <= 0:
-                    self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+                    self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
                     return
                 elif n > self._max_google_results:
                     self._downloader.report_warning(u'gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
@@ -1614,7 +1626,7 @@ class GoogleSearchIE(InfoExtractor):
             try:
                 page = compat_urllib_request.urlopen(request).read()
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                 return
 
             # Extract video identifiers
@@ -1658,7 +1670,7 @@ class YahooSearchIE(InfoExtractor):
     def _real_extract(self, query):
         mobj = re.match(self._VALID_URL, query)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+            self._downloader.report_error(u'invalid search query "%s"' % query)
             return
 
         prefix, query = query.split(':')
@@ -1674,7 +1686,7 @@ class YahooSearchIE(InfoExtractor):
             try:
                 n = int(prefix)
                 if n <= 0:
-                    self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+                    self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
                     return
                 elif n > self._max_yahoo_results:
                     self._downloader.report_warning(u'yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
@@ -1699,7 +1711,7 @@ class YahooSearchIE(InfoExtractor):
             try:
                 page = compat_urllib_request.urlopen(request).read()
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                 return
 
             # Extract video identifiers
@@ -1732,9 +1744,7 @@ class YoutubePlaylistIE(InfoExtractor):
                         (?:
                            (?:course|view_play_list|my_playlists|artist|playlist|watch)
                            \? (?:.*?&)*? (?:p|a|list)=
-                        |  user/.*?/user/
                         |  p/
-                        |  user/.*?#[pg]/c/
                         )
                         ((?:PL|EC|UU)?[0-9A-Za-z-_]{10,})
                         .*
@@ -1761,7 +1771,7 @@ class YoutubePlaylistIE(InfoExtractor):
         # Extract playlist id
         mobj = re.match(self._VALID_URL, url, re.VERBOSE)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+            self._downloader.report_error(u'invalid url: %s' % url)
             return
 
         # Download playlist videos from API
@@ -1776,17 +1786,17 @@ class YoutubePlaylistIE(InfoExtractor):
             try:
                 page = compat_urllib_request.urlopen(url).read().decode('utf8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                 return
 
             try:
                 response = json.loads(page)
             except ValueError as err:
-                self._downloader.trouble(u'ERROR: Invalid JSON in API response: ' + compat_str(err))
+                self._downloader.report_error(u'Invalid JSON in API response: ' + compat_str(err))
                 return
 
             if not 'feed' in response or not 'entry' in response['feed']:
-                self._downloader.trouble(u'ERROR: Got a malformed response from YouTube API')
+                self._downloader.report_error(u'Got a malformed response from YouTube API')
                 return
             videos += [ (entry['yt$position']['$t'], entry['content']['src'])
                         for entry in response['feed']['entry']
@@ -1831,7 +1841,7 @@ class YoutubeChannelIE(InfoExtractor):
         # Extract channel id
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+            self._downloader.report_error(u'invalid url: %s' % url)
             return
 
         # Download channel pages
@@ -1846,7 +1856,7 @@ class YoutubeChannelIE(InfoExtractor):
             try:
                 page = compat_urllib_request.urlopen(request).read().decode('utf8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                 return
 
             # Extract video identifiers
@@ -1889,7 +1899,7 @@ class YoutubeUserIE(InfoExtractor):
         # Extract username
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+            self._downloader.report_error(u'invalid url: %s' % url)
             return
 
         username = mobj.group(1)
@@ -1911,7 +1921,7 @@ class YoutubeUserIE(InfoExtractor):
             try:
                 page = compat_urllib_request.urlopen(request).read().decode('utf-8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                 return
 
             # Extract video identifiers
@@ -1970,7 +1980,7 @@ class BlipTVUserIE(InfoExtractor):
         # Extract username
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+            self._downloader.report_error(u'invalid url: %s' % url)
             return
 
         username = mobj.group(1)
@@ -1984,7 +1994,7 @@ class BlipTVUserIE(InfoExtractor):
             mobj = re.search(r'data-users-id="([^"]+)"', page)
             page_base = page_base % mobj.group(1)
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
             return
 
 
@@ -2003,7 +2013,7 @@ class BlipTVUserIE(InfoExtractor):
             try:
                 page = compat_urllib_request.urlopen(request).read().decode('utf-8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+                self._downloader.report_error(u'unable to download webpage: %s' % str(err))
                 return
 
             # Extract video identifiers
@@ -2068,7 +2078,7 @@ class DepositFilesIE(InfoExtractor):
             self.report_download_webpage(file_id)
             webpage = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve file webpage: %s' % compat_str(err))
             return
 
         # Search for the real file URL
@@ -2078,9 +2088,9 @@ class DepositFilesIE(InfoExtractor):
             mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
             if (mobj is not None) and (mobj.group(1) is not None):
                 restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
-                self._downloader.trouble(u'ERROR: %s' % restriction_message)
+                self._downloader.report_error(u'%s' % restriction_message)
             else:
-                self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url)
+                self._downloader.report_error(u'unable to extract download URL from: %s' % url)
             return
 
         file_url = mobj.group(1)
@@ -2089,7 +2099,7 @@ class DepositFilesIE(InfoExtractor):
         # Search for file title
         mobj = re.search(r'<b title="(.*?)">', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract title')
+            self._downloader.report_error(u'unable to extract title')
             return
         file_title = mobj.group(1).decode('utf-8')
 
@@ -2162,7 +2172,7 @@ class FacebookIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         video_id = mobj.group('ID')
 
@@ -2218,7 +2228,7 @@ class BlipTVIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         urlp = compat_urllib_parse_urlparse(url)
@@ -2265,7 +2275,7 @@ class BlipTVIE(InfoExtractor):
                 json_code_bytes = urlh.read()
                 json_code = json_code_bytes.decode('utf-8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to read video info webpage: %s' % compat_str(err))
                 return
 
             try:
@@ -2296,7 +2306,7 @@ class BlipTVIE(InfoExtractor):
                     'user_agent': 'iTunes/10.6.1',
                 }
             except (ValueError,KeyError) as err:
-                self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
+                self._downloader.report_error(u'unable to parse video information: %s' % repr(err))
                 return
 
         return [info]
@@ -2318,7 +2328,7 @@ class MyVideoIE(InfoExtractor):
     def _real_extract(self,url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._download.trouble(u'ERROR: invalid URL: %s' % url)
+            self._download.report_error(u'invalid URL: %s' % url)
             return
 
         video_id = mobj.group(1)
@@ -2328,16 +2338,16 @@ class MyVideoIE(InfoExtractor):
         webpage = self._download_webpage(webpage_url, video_id)
 
         self.report_extraction(video_id)
-        mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\' />',
+        mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\'',
                  webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract media URL')
+            self._downloader.report_error(u'unable to extract media URL')
             return
         video_url = mobj.group(1) + ('/%s.flv' % video_id)
 
         mobj = re.search('<title>([^<]+)</title>', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract title')
+            self._downloader.report_error(u'unable to extract title')
             return
 
         video_title = mobj.group(1)
@@ -2410,7 +2420,7 @@ class ComedyCentralIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url, re.VERBOSE)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         if mobj.group('shortname'):
@@ -2441,16 +2451,16 @@ class ComedyCentralIE(InfoExtractor):
             html = htmlHandle.read()
             webpage = html.decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
             return
         if dlNewest:
             url = htmlHandle.geturl()
             mobj = re.match(self._VALID_URL, url, re.VERBOSE)
             if mobj is None:
-                self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url)
+                self._downloader.report_error(u'Invalid redirected URL: ' + url)
                 return
             if mobj.group('episode') == '':
-                self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url)
+                self._downloader.report_error(u'Redirected URL is still not specific: ' + url)
                 return
             epTitle = mobj.group('episode')
 
@@ -2463,7 +2473,7 @@ class ComedyCentralIE(InfoExtractor):
 
             altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
             if len(altMovieParams) == 0:
-                self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url)
+                self._downloader.report_error(u'unable to find Flash URL in webpage ' + url)
                 return
             else:
                 mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
@@ -2474,7 +2484,7 @@ class ComedyCentralIE(InfoExtractor):
         try:
             indexXml = compat_urllib_request.urlopen(indexUrl).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download episode index: ' + compat_str(err))
+            self._downloader.report_error(u'unable to download episode index: ' + compat_str(err))
             return
 
         results = []
@@ -2495,7 +2505,7 @@ class ComedyCentralIE(InfoExtractor):
             try:
                 configXml = compat_urllib_request.urlopen(configReq).read()
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
                 return
 
             cdoc = xml.etree.ElementTree.fromstring(configXml)
@@ -2505,7 +2515,7 @@ class ComedyCentralIE(InfoExtractor):
                 turls.append(finfo)
 
             if len(turls) == 0:
-                self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found')
+                self._downloader.report_error(u'unable to download ' + mediaId + ': No videos found')
                 continue
 
             if self._downloader.params.get('listformats', None):
@@ -2562,7 +2572,7 @@ class EscapistIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         showName = mobj.group('showname')
         videoId = mobj.group('episode')
@@ -2574,7 +2584,7 @@ class EscapistIE(InfoExtractor):
             m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type'])
             webPage = webPageBytes.decode(m.group(1) if m else 'utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download webpage: ' + compat_str(err))
+            self._downloader.report_error(u'unable to download webpage: ' + compat_str(err))
             return
 
         descMatch = re.search('<meta name="description" content="([^"]*)"', webPage)
@@ -2592,7 +2602,7 @@ class EscapistIE(InfoExtractor):
             m = re.match(r'text/html; charset="?([^"]+)"?', configJSON.headers['Content-Type'])
             configJSON = configJSON.read().decode(m.group(1) if m else 'utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download configuration: ' + compat_str(err))
+            self._downloader.report_error(u'unable to download configuration: ' + compat_str(err))
             return
 
         # Technically, it's JavaScript, not JSON
@@ -2601,7 +2611,7 @@ class EscapistIE(InfoExtractor):
         try:
             config = json.loads(configJSON)
         except (ValueError,) as err:
-            self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + compat_str(err))
+            self._downloader.report_error(u'Invalid JSON in configuration file: ' + compat_str(err))
             return
 
         playlist = config['playlist']
@@ -2639,7 +2649,7 @@ class CollegeHumorIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         video_id = mobj.group('videoid')
 
@@ -2654,7 +2664,7 @@ class CollegeHumorIE(InfoExtractor):
         try:
             metaXml = compat_urllib_request.urlopen(xmlUrl).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
             return
 
         mdoc = xml.etree.ElementTree.fromstring(metaXml)
@@ -2665,7 +2675,7 @@ class CollegeHumorIE(InfoExtractor):
             info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
             manifest_url = videoNode.findall('./file')[0].text
         except IndexError:
-            self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
+            self._downloader.report_error(u'Invalid metadata XML file')
             return
 
         manifest_url += '?hdcore=2.10.3'
@@ -2673,7 +2683,7 @@ class CollegeHumorIE(InfoExtractor):
         try:
             manifestXml = compat_urllib_request.urlopen(manifest_url).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
             return
 
         adoc = xml.etree.ElementTree.fromstring(manifestXml)
@@ -2682,7 +2692,7 @@ class CollegeHumorIE(InfoExtractor):
             node_id = media_node.attrib['url']
             video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text
         except IndexError as err:
-            self._downloader.trouble(u'\nERROR: Invalid manifest file')
+            self._downloader.report_error(u'Invalid manifest file')
             return
 
         url_pr = compat_urllib_parse_urlparse(manifest_url)
@@ -2706,7 +2716,7 @@ class XVideosIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         video_id = mobj.group(1)
 
@@ -2718,7 +2728,7 @@ class XVideosIE(InfoExtractor):
         # Extract video URL
         mobj = re.search(r'flv_url=(.+?)&', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video url')
+            self._downloader.report_error(u'unable to extract video url')
             return
         video_url = compat_urllib_parse.unquote(mobj.group(1))
 
@@ -2726,7 +2736,7 @@ class XVideosIE(InfoExtractor):
         # Extract title
         mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video title')
+            self._downloader.report_error(u'unable to extract video title')
             return
         video_title = mobj.group(1)
 
@@ -2734,7 +2744,7 @@ class XVideosIE(InfoExtractor):
         # Extract video thumbnail
         mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+            self._downloader.report_error(u'unable to extract video thumbnail')
             return
         video_thumbnail = mobj.group(0)
 
@@ -2778,7 +2788,7 @@ class SoundcloudIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         # extract uploader (which is in the url)
@@ -2796,7 +2806,7 @@ class SoundcloudIE(InfoExtractor):
             info_json_bytes = compat_urllib_request.urlopen(request).read()
             info_json = info_json_bytes.decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err))
             return
 
         info = json.loads(info_json)
@@ -2809,7 +2819,7 @@ class SoundcloudIE(InfoExtractor):
             stream_json_bytes = compat_urllib_request.urlopen(request).read()
             stream_json = stream_json_bytes.decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download stream definitions: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download stream definitions: %s' % compat_str(err))
             return
 
         streams = json.loads(stream_json)
@@ -2825,6 +2835,87 @@ class SoundcloudIE(InfoExtractor):
             'description': info['description'],
         }]
 
+class SoundcloudSetIE(InfoExtractor):
+    """Information extractor for soundcloud.com sets
+       To access the media, the uid of the song and a stream token
+       must be extracted from the page source and the script must make
+       a request to media.soundcloud.com/crossdomain.xml. Then
+       the media can be grabbed by requesting from an url composed
+       of the stream token and uid
+     """
+
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/sets/([\w\d-]+)'
+    IE_NAME = u'soundcloud'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def report_resolve(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Resolving id' % (self.IE_NAME, video_id))
+
+    def report_extraction(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Retrieving stream' % (self.IE_NAME, video_id))
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+
+        # extract uploader (which is in the url)
+        uploader = mobj.group(1)
+        # extract simple title (uploader + slug of song title)
+        slug_title =  mobj.group(2)
+        simple_title = uploader + u'-' + slug_title
+
+        self.report_resolve('%s/sets/%s' % (uploader, slug_title))
+
+        url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title)
+        resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
+        request = compat_urllib_request.Request(resolv_url)
+        try:
+            info_json_bytes = compat_urllib_request.urlopen(request).read()
+            info_json = info_json_bytes.decode('utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
+            return
+
+        videos = []
+        info = json.loads(info_json)
+        if 'errors' in info:
+            for err in info['errors']:
+                self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err['error_message']))
+            return
+
+        for track in info['tracks']:
+            video_id = track['id']
+            self.report_extraction('%s/sets/%s' % (uploader, slug_title))
+
+            streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
+            request = compat_urllib_request.Request(streams_url)
+            try:
+                stream_json_bytes = compat_urllib_request.urlopen(request).read()
+                stream_json = stream_json_bytes.decode('utf-8')
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                self._downloader.trouble(u'ERROR: unable to download stream definitions: %s' % compat_str(err))
+                return
+
+            streams = json.loads(stream_json)
+            mediaURL = streams['http_mp3_128_url']
+
+            videos.append({
+                'id':       video_id,
+                'url':      mediaURL,
+                'uploader': track['user']['username'],
+                'upload_date':  track['created_at'],
+                'title':    track['title'],
+                'ext':      u'mp3',
+                'description': track['description'],
+            })
+        return videos
+
 
 class InfoQIE(InfoExtractor):
     """Information extractor for infoq.com"""
@@ -2837,7 +2928,7 @@ class InfoQIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         webpage = self._download_webpage(url, video_id=url)
@@ -2846,7 +2937,7 @@ class InfoQIE(InfoExtractor):
         # Extract video URL
         mobj = re.search(r"jsclassref='([^']*)'", webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video url')
+            self._downloader.report_error(u'unable to extract video url')
             return
         real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8'))
         video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id
@@ -2854,7 +2945,7 @@ class InfoQIE(InfoExtractor):
         # Extract title
         mobj = re.search(r'contentTitle = "(.*?)";', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video title')
+            self._downloader.report_error(u'unable to extract video title')
             return
         video_title = mobj.group(1)
 
@@ -2937,7 +3028,7 @@ class MixcloudIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         # extract uploader & filename from url
         uploader = mobj.group(1).decode('utf-8')
@@ -2951,7 +3042,7 @@ class MixcloudIE(InfoExtractor):
             self.report_download_json(file_url)
             jsonData = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve file: %s' % compat_str(err))
             return
 
         # parse JSON
@@ -2975,7 +3066,7 @@ class MixcloudIE(InfoExtractor):
                     break # got it!
         else:
             if req_format not in formats:
-                self._downloader.trouble(u'ERROR: format is not available')
+                self._downloader.report_error(u'format is not available')
                 return
 
             url_list = self.get_urls(formats, req_format)
@@ -3029,14 +3120,14 @@ class StanfordOpenClassroomIE(InfoExtractor):
             try:
                 metaXml = compat_urllib_request.urlopen(xmlUrl).read()
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
+                self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
                 return
             mdoc = xml.etree.ElementTree.fromstring(metaXml)
             try:
                 info['title'] = mdoc.findall('./title')[0].text
                 info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
             except IndexError:
-                self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
+                self._downloader.report_error(u'Invalid metadata XML file')
                 return
             info['ext'] = info['url'].rpartition('.')[2]
             return [info]
@@ -3088,7 +3179,7 @@ class StanfordOpenClassroomIE(InfoExtractor):
             try:
                 rootpage = compat_urllib_request.urlopen(rootURL).read()
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
+                self._downloader.report_error(u'unable to download course info page: ' + compat_str(err))
                 return
 
             info['title'] = info['id']
@@ -3120,7 +3211,7 @@ class MTVIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         if not mobj.group('proto'):
             url = 'http://' + url
@@ -3130,25 +3221,25 @@ class MTVIE(InfoExtractor):
 
         mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract song name')
+            self._downloader.report_error(u'unable to extract song name')
             return
         song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
         mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract performer')
+            self._downloader.report_error(u'unable to extract performer')
             return
         performer = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
         video_title = performer + ' - ' + song_name
 
         mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to mtvn_uri')
+            self._downloader.report_error(u'unable to mtvn_uri')
             return
         mtvn_uri = mobj.group(1)
 
         mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract content id')
+            self._downloader.report_error(u'unable to extract content id')
             return
         content_id = mobj.group(1)
 
@@ -3158,7 +3249,7 @@ class MTVIE(InfoExtractor):
         try:
             metadataXml = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download video metadata: %s' % compat_str(err))
             return
 
         mdoc = xml.etree.ElementTree.fromstring(metadataXml)
@@ -3230,7 +3321,7 @@ class YoukuIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         video_id = mobj.group('ID')
 
@@ -3241,7 +3332,7 @@ class YoukuIE(InfoExtractor):
             self.report_download_webpage(video_id)
             jsondata = compat_urllib_request.urlopen(request).read()
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
             return
 
         self.report_extraction(video_id)
@@ -3272,7 +3363,7 @@ class YoukuIE(InfoExtractor):
             fileid = config['data'][0]['streamfileids'][format]
             keys = [s['k'] for s in config['data'][0]['segs'][format]]
         except (UnicodeDecodeError, ValueError, KeyError):
-            self._downloader.trouble(u'ERROR: unable to extract info section')
+            self._downloader.report_error(u'unable to extract info section')
             return
 
         files_info=[]
@@ -3319,7 +3410,7 @@ class XNXXIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
         video_id = mobj.group(1)
 
@@ -3330,24 +3421,24 @@ class XNXXIE(InfoExtractor):
             webpage_bytes = compat_urllib_request.urlopen(url).read()
             webpage = webpage_bytes.decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
+            self._downloader.report_error(u'unable to download video webpage: %s' % err)
             return
 
         result = re.search(self.VIDEO_URL_RE, webpage)
         if result is None:
-            self._downloader.trouble(u'ERROR: unable to extract video url')
+            self._downloader.report_error(u'unable to extract video url')
             return
         video_url = compat_urllib_parse.unquote(result.group(1))
 
         result = re.search(self.VIDEO_TITLE_RE, webpage)
         if result is None:
-            self._downloader.trouble(u'ERROR: unable to extract video title')
+            self._downloader.report_error(u'unable to extract video title')
             return
         video_title = result.group(1)
 
         result = re.search(self.VIDEO_THUMB_RE, webpage)
         if result is None:
-            self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+            self._downloader.report_error(u'unable to extract video thumbnail')
             return
         video_thumbnail = result.group(1)
 
@@ -3396,7 +3487,7 @@ class GooglePlusIE(InfoExtractor):
         # Extract id from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            self._downloader.report_error(u'Invalid URL: %s' % url)
             return
 
         post_url = mobj.group(0)
@@ -3410,7 +3501,7 @@ class GooglePlusIE(InfoExtractor):
         try:
             webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve entry webpage: %s' % compat_str(err))
             return
 
         # Extract update date
@@ -3445,14 +3536,14 @@ class GooglePlusIE(InfoExtractor):
         pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]'
         mobj = re.search(pattern, webpage)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: unable to extract video page URL')
+            self._downloader.report_error(u'unable to extract video page URL')
 
         video_page = mobj.group(1)
         request = compat_urllib_request.Request(video_page)
         try:
             webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
             return
         self.report_extract_vid_page(video_page)
 
@@ -3462,7 +3553,7 @@ class GooglePlusIE(InfoExtractor):
         pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
         mobj = re.findall(pattern, webpage)
         if len(mobj) == 0:
-            self._downloader.trouble(u'ERROR: unable to extract video links')
+            self._downloader.report_error(u'unable to extract video links')
 
         # Sort in resolution
         links = sorted(mobj)
@@ -3494,7 +3585,7 @@ class NBAIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         video_id = mobj.group(1)
@@ -3550,13 +3641,13 @@ class JustinTVIE(InfoExtractor):
             webpage_bytes = urlh.read()
             webpage = webpage_bytes.decode('utf-8', 'ignore')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download video info JSON: %s' % compat_str(err))
+            self._downloader.report_error(u'unable to download video info JSON: %s' % compat_str(err))
             return
 
         response = json.loads(webpage)
         if type(response) != list:
             error_text = response.get('error', 'unknown error')
-            self._downloader.trouble(u'ERROR: Justin.tv API: %s' % error_text)
+            self._downloader.report_error(u'Justin.tv API: %s' % error_text)
             return
         info = []
         for clip in response:
@@ -3581,7 +3672,7 @@ class JustinTVIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         api = 'http://api.justin.tv'
@@ -3616,7 +3707,7 @@ class FunnyOrDieIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         video_id = mobj.group('id')
@@ -3624,13 +3715,13 @@ class FunnyOrDieIE(InfoExtractor):
 
         m = re.search(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"', webpage, re.DOTALL)
         if not m:
-            self._downloader.trouble(u'ERROR: unable to find video information')
+            self._downloader.report_error(u'unable to find video information')
         video_url = unescapeHTML(m.group('url'))
 
-        m = re.search(r"class='player_page_h1'>\s+<a.*?>(?P<title>.*?)</a>", webpage)
+        m = re.search(r"<h1 class='player_page_h1'.*?>(?P<title>.*?)</h1>", webpage, flags=re.DOTALL)
         if not m:
             self._downloader.trouble(u'Cannot find video title')
-        title = unescapeHTML(m.group('title'))
+        title = clean_html(m.group('title'))
 
         m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
         if m:
@@ -3677,7 +3768,7 @@ class SteamIE(InfoExtractor):
             video_url = vid.group('videoURL')
             video_thumb = thumb.group('thumbnail')
             if not video_url:
-                self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id)
+                self._downloader.report_error(u'Cannot find video url for %s' % video_id)
             info = {
                 'id':video_id,
                 'url':video_url,
@@ -3710,6 +3801,62 @@ class UstreamIE(InfoExtractor):
                   }
         return [info]
 
+class WorldStarHipHopIE(InfoExtractor):
+    _VALID_URL = r'http://(?:www|m)\.worldstar(?:candy|hiphop)\.com/videos/video\.php\?v=(?P<id>.*)'
+    IE_NAME = u'WorldStarHipHop'
+
+    def _real_extract(self, url):
+        _src_url = r"""(http://hw-videos.*(?:mp4|flv))"""
+
+        webpage_src = compat_urllib_request.urlopen(url).read()
+        webpage_src = webpage_src.decode('utf-8')
+
+        mobj = re.search(_src_url, webpage_src)
+
+        m = re.match(self._VALID_URL, url)
+        video_id = m.group('id')
+
+        if mobj is not None:
+            video_url = mobj.group()
+            if 'mp4' in video_url:
+                ext = 'mp4'
+            else:
+                ext = 'flv'
+        else:
+            self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id)
+            return
+
+        _title = r"""<title>(.*)</title>"""
+
+        mobj = re.search(_title, webpage_src)
+
+        if mobj is not None:
+            title = mobj.group(1)
+        else:
+            title = 'World Start Hip Hop - %s' % time.ctime()
+
+        _thumbnail = r"""rel="image_src" href="(.*)" />"""
+        mobj = re.search(_thumbnail, webpage_src)
+
+        # Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
+        if mobj is not None:
+            thumbnail = mobj.group(1)
+        else:
+            _title = r"""candytitles.*>(.*)</span>"""
+            mobj = re.search(_title, webpage_src)
+            if mobj is not None:
+                title = mobj.group(1)
+            thumbnail = None
+
+        results = [{
+                    'id': video_id,
+                    'url' : video_url,
+                    'title' : title,
+                    'thumbnail' : thumbnail,
+                    'ext' : ext,
+                    }]
+        return results
+
 class RBMARadioIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?rbmaradio\.com/shows/(?P<videoID>[^/]+)$'
 
@@ -3767,7 +3914,7 @@ class YouPornIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         video_id = mobj.group('videoid')
@@ -3859,7 +4006,7 @@ class YouPornIE(InfoExtractor):
         else:
             format = self._specific( req_format, formats )
             if result is None:
-                self._downloader.trouble(u'ERROR: requested format not available')
+                self._downloader.report_error(u'requested format not available')
                 return
             return [format]
 
@@ -3872,7 +4019,7 @@ class PornotubeIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         video_id = mobj.group('videoid')
@@ -3885,7 +4032,7 @@ class PornotubeIE(InfoExtractor):
         VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
         result = re.search(VIDEO_URL_RE, webpage)
         if result is None:
-            self._downloader.trouble(u'ERROR: unable to extract video url')
+            self._downloader.report_error(u'unable to extract video url')
             return
         video_url = compat_urllib_parse.unquote(result.group('url'))
 
@@ -3893,7 +4040,7 @@ class PornotubeIE(InfoExtractor):
         VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
         result = re.search(VIDEO_UPLOADED_RE, webpage)
         if result is None:
-            self._downloader.trouble(u'ERROR: unable to extract video title')
+            self._downloader.report_error(u'unable to extract video title')
             return
         upload_date = result.group('date')
 
@@ -3914,7 +4061,7 @@ class YouJizzIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            self._downloader.report_error(u'invalid URL: %s' % url)
             return
 
         video_id = mobj.group('videoid')
@@ -4074,7 +4221,7 @@ class TEDIE(InfoExtractor):
         videoName=m.group('name')
         webpage=self._download_webpage(url, video_id, 'Downloading \"%s\" page' % videoName)
         # If the url includes the language we get the title translated
-        title_RE=r'<h1><span id="altHeadline" >(?P<title>.*)</span></h1>'
+        title_RE=r'<span id="altHeadline" >(?P<title>.*)</span>'
         title=re.search(title_RE, webpage).group('title')
         info_RE=r'''<script\ type="text/javascript">var\ talkDetails\ =(.*?)
                         "id":(?P<videoID>[\d]+).*?
@@ -4115,13 +4262,13 @@ class MySpassIE(InfoExtractor):
         # extract values from metadata
         url_flv_el = metadata.find('url_flv')
         if url_flv_el is None:
-            self._downloader.trouble(u'ERROR: unable to extract download url')
+            self._downloader.report_error(u'unable to extract download url')
             return
         video_url = url_flv_el.text
         extension = os.path.splitext(video_url)[1][1:]
         title_el = metadata.find('title')
         if title_el is None:
-            self._downloader.trouble(u'ERROR: unable to extract title')
+            self._downloader.report_error(u'unable to extract title')
             return
         title = title_el.text
         format_id_el = metadata.find('format_id')
@@ -4151,7 +4298,7 @@ class MySpassIE(InfoExtractor):
         return [info]
 
 class SpiegelIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?$'
+    _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?(?:#.*)?$'
 
     def _real_extract(self, url):
         m = re.match(self._VALID_URL, url)
@@ -4183,6 +4330,55 @@ class SpiegelIE(InfoExtractor):
         }
         return [info]
 
+class LiveLeakIE(InfoExtractor):
+
+    _VALID_URL = r'^(?:http?://)?(?:\w+\.)?liveleak\.com/view\?(?:.*?)i=(?P<video_id>[\w_]+)(?:.*)'
+    IE_NAME = u'liveleak'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+
+        video_id = mobj.group('video_id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        m = re.search(r'file: "(.*?)",', webpage)
+        if not m:
+            self._downloader.report_error(u'unable to find video url')
+            return
+        video_url = m.group(1)
+
+        m = re.search(r'<meta property="og:title" content="(?P<title>.*?)"', webpage)
+        if not m:
+            self._downloader.trouble(u'Cannot find video title')
+        title = unescapeHTML(m.group('title')).replace('LiveLeak.com -', '').strip()
+
+        m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
+        if m:
+            desc = unescapeHTML(m.group('desc'))
+        else:
+            desc = None
+
+        m = re.search(r'By:.*?(\w+)</a>', webpage)
+        if m:
+            uploader = clean_html(m.group(1))
+        else:
+            uploader = None
+
+        info = {
+            'id':  video_id,
+            'url': video_url,
+            'ext': 'mp4',
+            'title': title,
+            'description': desc,
+            'uploader': uploader
+        }
+
+        return [info]
+
 
 def gen_extractors():
     """ Return a list of an instance of every supported extractor.
@@ -4210,6 +4406,7 @@ def gen_extractors():
         EscapistIE(),
         CollegeHumorIE(),
         XVideosIE(),
+        SoundcloudSetIE(),
         SoundcloudIE(),
         InfoQIE(),
         MixcloudIE(),
@@ -4223,6 +4420,7 @@ def gen_extractors():
         GooglePlusIE(),
         ArteTvIE(),
         NBAIE(),
+        WorldStarHipHopIE(),
         JustinTVIE(),
         FunnyOrDieIE(),
         SteamIE(),
@@ -4233,7 +4431,6 @@ def gen_extractors():
         TEDIE(),
         MySpassIE(),
         SpiegelIE(),
+        LiveLeakIE(),
         GenericIE()
     ]
-
-
index 95bd948438830ca7f98ad82decb722de69319bdf..017f06c42e9a019e18e25480c5e5d8d3aaaef335 100644 (file)
@@ -311,7 +311,7 @@ def clean_html(html):
     html = re.sub('<.*?>', '', html)
     # Replace html entities
     html = unescapeHTML(html)
-    return html
+    return html.strip()
 
 
 def sanitize_open(filename, open_mode):
@@ -329,7 +329,7 @@ def sanitize_open(filename, open_mode):
             if sys.platform == 'win32':
                 import msvcrt
                 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
-            return (sys.stdout, filename)
+            return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
         stream = open(encodeFilename(filename), open_mode)
         return (stream, filename)
     except (IOError, OSError) as err:
@@ -435,6 +435,7 @@ class ExtractorError(Exception):
         """ tb, if given, is the original traceback (so that it can be printed out). """
         super(ExtractorError, self).__init__(msg)
         self.traceback = tb
+        self.exc_info = sys.exc_info()  # preserve original exception
 
     def format_traceback(self):
         if self.traceback is None:
@@ -449,7 +450,10 @@ class DownloadError(Exception):
     configured to continue on errors. They will contain the appropriate
     error message.
     """
-    pass
+    def __init__(self, msg, exc_info=None):
+        """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
+        super(DownloadError, self).__init__(msg)
+        self.exc_info = exc_info
 
 
 class SameFileError(Exception):
index ce8f6ca233684014a93da3ad8262b40ec6a1e1ac..c433e2eaa293c43a2bce6898a6dade24044422e6 100644 (file)
@@ -1,2 +1,2 @@
 
-__version__ = '2013.02.25'
+__version__ = '2013.04.03'