don't corrupt stdout (-o -) in verbose mode
authorchocolateboy <chocolate@cpan.org>
Wed, 30 May 2012 10:50:13 +0000 (11:50 +0100)
committerchocolateboy <chocolate@cpan.org>
Wed, 30 May 2012 10:50:13 +0000 (11:50 +0100)
youtube-dl
youtube_dl/InfoExtractors.py
youtube_dl/__init__.py
youtube_dl/utils.py

index 80175ed6eb869f022cf9e2981db0ff04d06c215e..f65e97fd077d46b2c6535f32a65b32dd50639a74 100755 (executable)
Binary files a/youtube-dl and b/youtube-dl differ
index 0fc39163ee2a74131ed84442c68f388a72fad0f1..40f96ad76590bc732c4d5c422d1d839fe6eb9986 100644 (file)
@@ -181,7 +181,7 @@ class YoutubeIE(InfoExtractor):
                        start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000)
                        end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000)
                        caption = unescapeHTML(caption)
-                       caption = unescapeHTML(caption) # double cycle, inentional
+                       caption = unescapeHTML(caption) # double cycle, intentional
                        srt += str(n) + '\n'
                        srt += start + ' --> ' + end + '\n'
                        srt += caption + '\n\n'
@@ -2450,7 +2450,7 @@ class SoundcloudIE(InfoExtractor):
                        try:
                                upload_date = datetime.datetime.strptime(mobj.group(1), '%B %d, %Y %H:%M').strftime('%Y%m%d')
                        except Exception, e:
-                               print str(e)
+                               self._downloader.to_stderr(str(e))
 
                # for soundcloud, a request to a cross domain is required for cookies
                request = urllib2.Request('http://media.soundcloud.com/crossdomain.xml', std_headers)
index 73590ecd6f0863c4787f62880df5203e8caf17d0..827b58264fc9af28bd971c305381673e575b1b1a 100644 (file)
@@ -396,9 +396,6 @@ def _real_main():
        urllib2.install_opener(opener)
        socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
 
-       if opts.verbose:
-               print(u'[debug] Proxy map: ' + str(proxy_handler.proxies))
-
        extractors = gen_extractors()
 
        if opts.list_extractors:
@@ -496,6 +493,10 @@ def _real_main():
                'prefer_free_formats': opts.prefer_free_formats,
                'verbose': opts.verbose,
                })
+
+       if opts.verbose:
+               fd.to_screen(u'[debug] Proxy map: ' + str(proxy_handler.proxies))
+
        for extractor in extractors:
                fd.add_info_extractor(extractor)
 
index ae30da53e3b73441c2e39b1c8d8bd139054871fa..2853ba50f228bb1f87900aa78a256b1637fcee2b 100644 (file)
@@ -83,7 +83,7 @@ class IDParser(HTMLParser.HTMLParser):
                HTMLParser.HTMLParser.__init__(self)
 
        def error(self, message):
-               print self.getpos()
+               print >> sys.stderr, self.getpos()
                if self.error_count > 10 or self.started:
                        raise HTMLParser.HTMLParseError(message, self.getpos())
                self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line