Fix all PEP8 issues except E501
authorPhilipp Hagemeister <phihag@phihag.de>
Sun, 23 Nov 2014 21:21:46 +0000 (22:21 +0100)
committerPhilipp Hagemeister <phihag@phihag.de>
Sun, 23 Nov 2014 21:21:46 +0000 (22:21 +0100)
17 files changed:
devscripts/gh-pages/update-sites.py
youtube_dl/__init__.py
youtube_dl/aes.py
youtube_dl/extractor/bambuser.py
youtube_dl/extractor/channel9.py
youtube_dl/extractor/crunchyroll.py
youtube_dl/extractor/facebook.py
youtube_dl/extractor/fourtube.py
youtube_dl/extractor/generic.py
youtube_dl/extractor/ign.py
youtube_dl/extractor/ivi.py
youtube_dl/extractor/myvideo.py
youtube_dl/extractor/soundcloud.py
youtube_dl/extractor/tlc.py
youtube_dl/extractor/tudou.py
youtube_dl/options.py
youtube_dl/utils.py

index 0d526784d2c9bb7eae459d2da9f7a620502500a5..4a6bb5e356c7bec65f02219088239319f5685518 100755 (executable)
@@ -22,7 +22,7 @@ def main():
             continue
         elif ie_desc is not None:
             ie_html += ': {}'.format(ie.IE_DESC)
-        if ie.working() == False:
+        if not ie.working():
             ie_html += ' (Currently broken)'
         ie_htmls.append('<li>{}</li>'.format(ie_html))
 
index 427b6ad27c4dd11c768424fbaf0ceae158acabd9..77b3384a05fa45d6d4cada65decdb5588ade9100 100644 (file)
@@ -189,7 +189,7 @@ def _real_main(argv=None):
 
     # --all-sub automatically sets --write-sub if --write-auto-sub is not given
     # this was the old behaviour if only --all-sub was given.
-    if opts.allsubtitles and (opts.writeautomaticsub == False):
+    if opts.allsubtitles and not opts.writeautomaticsub:
         opts.writesubtitles = True
 
     if sys.version_info < (3,):
index 662d49011be5a64a5b981aeec22881f113c98532..2eeb96a5a066665dd239997ad8bda90b1e5e60b4 100644 (file)
@@ -87,7 +87,7 @@ def key_expansion(data):
             temp = sub_bytes(temp)
             data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
 
-        for _ in range(3 if key_size_bytes == 32  else 2 if key_size_bytes == 24 else 0):
+        for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
             temp = data[-4:]
             data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
     data = data[:expanded_key_size_bytes]
index 928ea61a31152788cee71e7623f83f920715890c..ea2ba0fe23fa8eec1773f9c33789270853522f0e 100644 (file)
@@ -73,10 +73,11 @@ class BambuserChannelIE(InfoExtractor):
         urls = []
         last_id = ''
         for i in itertools.count(1):
-            req_url = ('http://bambuser.com/xhr-api/index.php?username={user}'
-                       '&sort=created&access_mode=0%2C1%2C2&limit={count}'
-                       '&method=broadcast&format=json&vid_older_than={last}'
-                ).format(user=user, count=self._STEP, last=last_id)
+            req_url = (
+                'http://bambuser.com/xhr-api/index.php?username={user}'
+                '&sort=created&access_mode=0%2C1%2C2&limit={count}'
+                '&method=broadcast&format=json&vid_older_than={last}'
+            ).format(user=user, count=self._STEP, last=last_id)
             req = compat_urllib_request.Request(req_url)
             # Without setting this header, we wouldn't get any result
             req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
index 82a48870acbdf7223250b977e982b6961030ffb3..2a05813f8d600540f66015a75ce4df647096d3fc 100644 (file)
@@ -188,16 +188,17 @@ class Channel9IE(InfoExtractor):
         view_count = self._extract_view_count(html)
         comment_count = self._extract_comment_count(html)
 
-        common = {'_type': 'video',
-                  'id': content_path,
-                  'description': description,
-                  'thumbnail': thumbnail,
-                  'duration': duration,
-                  'avg_rating': avg_rating,
-                  'rating_count': rating_count,
-                  'view_count': view_count,
-                  'comment_count': comment_count,
-                }
+        common = {
+            '_type': 'video',
+            'id': content_path,
+            'description': description,
+            'thumbnail': thumbnail,
+            'duration': duration,
+            'avg_rating': avg_rating,
+            'rating_count': rating_count,
+            'view_count': view_count,
+            'comment_count': comment_count,
+        }
 
         result = []
 
index c3c4d114ae2c6dc08ead32081b0a6170b001def4..d7e2b841e10856cadf0526fe8ff6d4c280dc0dae 100644 (file)
@@ -248,8 +248,9 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
         subtitles = {}
         sub_format = self._downloader.params.get('subtitlesformat', 'srt')
         for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
-            sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,\
-                                              video_id, note='Downloading subtitles for ' + sub_name)
+            sub_page = self._download_webpage(
+                'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,
+                video_id, note='Downloading subtitles for ' + sub_name)
             id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
             iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
             data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
index c989879ba745bc44a1924c01f4c7946a0a865bfe..2139f68aa3cb16facdc45b5fd9e014621e1c6674 100644 (file)
@@ -77,7 +77,7 @@ class FacebookIE(InfoExtractor):
             'legacy_return': '1',
             'timezone': '-60',
             'trynum': '1',
-            }
+        }
         request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         try:
index 701241bb06b86a5ca5f038ce870f14140baf0b13..b22ce2acb5d1beae29d1b298b4ef63c8e7639e5f 100644 (file)
@@ -65,9 +65,9 @@ class FourTubeIE(InfoExtractor):
 
         token_url = "http://tkn.4tube.com/{0}/desktop/{1}".format(media_id, "+".join(sources))
         headers = {
-                b'Content-Type': b'application/x-www-form-urlencoded',
-                b'Origin': b'http://www.4tube.com',
-                }
+            b'Content-Type': b'application/x-www-form-urlencoded',
+            b'Origin': b'http://www.4tube.com',
+        }
         token_req = compat_urllib_request.Request(token_url, b'{}', headers)
         tokens = self._download_json(token_req, video_id)
 
@@ -76,7 +76,7 @@ class FourTubeIE(InfoExtractor):
             'format_id': format + 'p',
             'resolution': format + 'p',
             'quality': int(format),
-            } for format in sources]
+        } for format in sources]
 
         self._sort_formats(formats)
 
index d224aa8e18471787bc00a515836b39fd558bac1f..128715caf091e07b28d8b6641c8faef0917a07fe 100644 (file)
@@ -537,9 +537,9 @@ class GenericIE(InfoExtractor):
 
             if default_search in ('error', 'fixup_error'):
                 raise ExtractorError(
-                    ('%r is not a valid URL. '
-                     'Set --default-search "ytsearch" (or run  youtube-dl "ytsearch:%s" ) to search YouTube'
-                    % (url, url), expected=True)
+                    '%r is not a valid URL. '
+                    'Set --default-search "ytsearch" (or run  youtube-dl "ytsearch:%s" ) to search YouTube'
+                    % (url, url), expected=True)
             else:
                 if ':' not in default_search:
                     default_search += ':'
index 3555f98a591b3751c1a78f34b7f8f1371ba5b97e..3db668cd0297ea0ff3c0168c2b3f5db1491a0db4 100644 (file)
@@ -63,8 +63,10 @@ class IGNIE(InfoExtractor):
                 'id': '078fdd005f6d3c02f63d795faa1b984f',
                 'ext': 'mp4',
                 'title': 'Rewind Theater - Wild Trailer Gamescom 2014',
-                'description': 'Giant skeletons, bloody hunts, and captivating'
-                    ' natural beauty take our breath away.',
+                'description': (
+                    'Giant skeletons, bloody hunts, and captivating'
+                    ' natural beauty take our breath away.'
+                ),
             },
         },
     ]
index 06af734179059c6128299c0d7ef2b1021da6996e..f0fba1adb7dba9c4c09717132731d98ff0e5ffd3 100644 (file)
@@ -43,7 +43,7 @@ class IviIE(InfoExtractor):
                 'thumbnail': 'http://thumbs.ivi.ru/f15.vcp.digitalaccess.ru/contents/8/4/0068dc0677041f3336b7c2baad8fc0.jpg',
             },
             'skip': 'Only works from Russia',
-         }
+        }
     ]
 
     # Sorted by quality
index 9195e53bd399f484f21ab1d3be8be7b966b04088..5e754fcffb6403cbd359b9b358ad3879ef279f53 100644 (file)
@@ -57,9 +57,9 @@ class MyVideoIE(InfoExtractor):
         video_id = mobj.group('id')
 
         GK = (
-          b'WXpnME1EZGhNRGhpTTJNM01XVmhOREU0WldNNVpHTTJOakpt'
-          b'TW1FMU5tVTBNR05pWkRaa05XRXhNVFJoWVRVd1ptSXhaVEV3'
-          b'TnpsbA0KTVRkbU1tSTRNdz09'
+            b'WXpnME1EZGhNRGhpTTJNM01XVmhOREU0WldNNVpHTTJOakpt'
+            b'TW1FMU5tVTBNR05pWkRaa05XRXhNVFJoWVRVd1ptSXhaVEV3'
+            b'TnpsbA0KTVRkbU1tSTRNdz09'
         )
 
         # Get video webpage
index 410477d7459a4dd39eb2f6dc2c96d9fce34b0b8e..3c1d058db9cf546bb94a299d9f1badd79fab12d3 100644 (file)
@@ -371,7 +371,7 @@ class SoundcloudPlaylistIE(SoundcloudIE):
 
         entries = [
             self._extract_info_dict(t, quiet=True, secret_token=token)
-                for t in data['tracks']]
+            for t in data['tracks']]
 
         return {
             '_type': 'playlist',
index d848ee1863252dbc155652c997210476c42479e4..66d159e99f6b15c01d53017b24b0a70b57470bd3 100644 (file)
@@ -36,9 +36,10 @@ class TlcDeIE(InfoExtractor):
             'ext': 'mp4',
             'title': 'Breaking Amish: Die Welt da draußen',
             'uploader': 'Discovery Networks - Germany',
-            'description': 'Vier Amische und eine Mennonitin wagen in New York'
+            'description': (
+                'Vier Amische und eine Mennonitin wagen in New York'
                 '  den Sprung in ein komplett anderes Leben. Begleitet sie auf'
-                ' ihrem spannenden Weg.',
+                ' ihrem spannenden Weg.'),
         },
     }
 
index b6e4a432b43c61d0267196c9ed4c58c10afabd6a..be51f4f877ff7be71012098c89c520bd2826d30d 100644 (file)
@@ -36,7 +36,7 @@ class TudouIE(InfoExtractor):
         'skip': 'Only works from China'
     }]
 
-    def _url_for_id(self, id, quality = None):
+    def _url_for_id(self, id, quality=None):
         info_url = "http://v2.tudou.com/f?id=" + str(id)
         if quality:
             info_url += '&hd' + quality
index 38177c1d7b1d25e28cc7400f3917c808a54f7cd3..da02fb6bb73bcc43a81f93dba32216c9ebf4cdb9 100644 (file)
@@ -262,7 +262,8 @@ def parseOpts(overrideArguments=None):
     video_format.add_option(
         '-f', '--format',
         action='store', dest='format', metavar='FORMAT', default=None,
-        help='video format code, specify the order of preference using'
+        help=(
+            'video format code, specify the order of preference using'
             ' slashes: -f 22/17/18 .  -f mp4 , -f m4a and  -f flv  are also'
             ' supported. You can also use the special names "best",'
             ' "bestvideo", "bestaudio", "worst", "worstvideo" and'
@@ -271,7 +272,7 @@ def parseOpts(overrideArguments=None):
             ' -f  136/137/mp4/bestvideo,140/m4a/bestaudio.'
             ' You can merge the video and audio of two formats into a single'
             ' file using -f <video-format>+<audio-format> (requires ffmpeg or'
-            ' avconv), for example -f bestvideo+bestaudio.')
+            ' avconv), for example -f bestvideo+bestaudio.'))
     video_format.add_option(
         '--all-formats',
         action='store_const', dest='format', const='all',
index 9ad0952d5a0bd279fc23bb7585fefb72f341021b..dd1023eefac6678d29d47917622320225c807b04 100644 (file)
@@ -240,9 +240,9 @@ def sanitize_open(filename, open_mode):
 
         # In case of error, try to remove win32 forbidden chars
         alt_filename = os.path.join(
-                        re.sub('[/<>:"\\|\\\\?\\*]', '#', path_part)
-                        for path_part in os.path.split(filename)
-                       )
+            re.sub('[/<>:"\\|\\\\?\\*]', '#', path_part)
+            for path_part in os.path.split(filename)
+        )
         if alt_filename == filename:
             raise
         else: