PEP8: applied even more rules
authorJouke Waleson <jouke.waleson@mendix.com>
Sun, 23 Nov 2014 20:39:15 +0000 (21:39 +0100)
committerJouke Waleson <jouke.waleson@mendix.com>
Sun, 23 Nov 2014 20:39:15 +0000 (21:39 +0100)
77 files changed:
devscripts/buildserver.py
devscripts/fish-completion.py
setup.py
test/helper.py
youtube_dl/YoutubeDL.py
youtube_dl/compat.py
youtube_dl/downloader/f4m.py
youtube_dl/extractor/appletrailers.py
youtube_dl/extractor/bambuser.py
youtube_dl/extractor/bbccouk.py
youtube_dl/extractor/cnn.py
youtube_dl/extractor/collegehumor.py
youtube_dl/extractor/common.py
youtube_dl/extractor/dailymotion.py
youtube_dl/extractor/defense.py
youtube_dl/extractor/discovery.py
youtube_dl/extractor/dropbox.py
youtube_dl/extractor/ehow.py
youtube_dl/extractor/facebook.py
youtube_dl/extractor/firsttv.py
youtube_dl/extractor/fivemin.py
youtube_dl/extractor/fktv.py
youtube_dl/extractor/flickr.py
youtube_dl/extractor/fourtube.py
youtube_dl/extractor/francetv.py
youtube_dl/extractor/generic.py
youtube_dl/extractor/howcast.py
youtube_dl/extractor/ign.py
youtube_dl/extractor/instagram.py
youtube_dl/extractor/internetvideoarchive.py
youtube_dl/extractor/jukebox.py
youtube_dl/extractor/kickstarter.py
youtube_dl/extractor/lynda.py
youtube_dl/extractor/m6.py
youtube_dl/extractor/metacafe.py
youtube_dl/extractor/metacritic.py
youtube_dl/extractor/mtv.py
youtube_dl/extractor/myspace.py
youtube_dl/extractor/myvideo.py
youtube_dl/extractor/naver.py
youtube_dl/extractor/nfb.py
youtube_dl/extractor/nhl.py
youtube_dl/extractor/niconico.py
youtube_dl/extractor/ninegag.py
youtube_dl/extractor/normalboots.py
youtube_dl/extractor/ooyala.py
youtube_dl/extractor/photobucket.py
youtube_dl/extractor/rbmaradio.py
youtube_dl/extractor/sbs.py
youtube_dl/extractor/screencast.py
youtube_dl/extractor/sina.py
youtube_dl/extractor/slutload.py
youtube_dl/extractor/smotri.py
youtube_dl/extractor/soundcloud.py
youtube_dl/extractor/stanfordoc.py
youtube_dl/extractor/teamcoco.py
youtube_dl/extractor/ted.py
youtube_dl/extractor/tf1.py
youtube_dl/extractor/theplatform.py
youtube_dl/extractor/tinypic.py
youtube_dl/extractor/traileraddict.py
youtube_dl/extractor/tumblr.py
youtube_dl/extractor/udemy.py
youtube_dl/extractor/ustream.py
youtube_dl/extractor/vbox7.py
youtube_dl/extractor/veehd.py
youtube_dl/extractor/vesti.py
youtube_dl/extractor/videofyme.py
youtube_dl/extractor/vimeo.py
youtube_dl/extractor/vk.py
youtube_dl/extractor/weibo.py
youtube_dl/extractor/xhamster.py
youtube_dl/extractor/xnxx.py
youtube_dl/extractor/yahoo.py
youtube_dl/extractor/youku.py
youtube_dl/extractor/youporn.py
youtube_dl/extractor/youtube.py

index 42ee2b5cb6545c0a6c9fdc83988cbef8fba202ed..7c2f49f8bb63bbe2b47efca151129a7e6b49674d 100644 (file)
@@ -142,7 +142,7 @@ def win_service_set_status(handle, status_code):
 
 def win_service_main(service_name, real_main, argc, argv_raw):
     try:
-        #args = [argv_raw[i].value for i in range(argc)]
+        # args = [argv_raw[i].value for i in range(argc)]
         stop_event = threading.Event()
         handler = HandlerEx(functools.partial(stop_event, win_service_handler))
         h = advapi32.RegisterServiceCtrlHandlerExW(service_name, handler, None)
index 2185d55229862be88d249f72071f8095f3f4d6b1..f2445984f55099794bb3eea953d25003885b0d07 100755 (executable)
@@ -30,7 +30,7 @@ def build_completion(opt_parser):
     for group in opt_parser.option_groups:
         for option in group.option_list:
             long_option = option.get_opt_string().strip('-')
-            help_msg = shell_quote([option.help])
+            shell_quote([option.help])
             complete_cmd = ['complete', '--command', 'youtube-dl', '--long-option', long_option]
             if option._short_opts:
                 complete_cmd += ['--short-option', option._short_opts[0].strip('-')]
index cf6b92b0f7e61b504dfdc16b6b04568fd073982b..5a0d084cce667d25e5ca35fe8d492a92f50ac5ab 100644 (file)
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,6 @@
 from __future__ import print_function
 
 import os.path
-import pkg_resources
 import warnings
 import sys
 
index 91822935fafa41ca3ebdaed6a8719c395e7c5f2c..2e320e2cf23fbe9bf169933c0a7172a7a9a617ae 100644 (file)
@@ -116,14 +116,14 @@ def expect_info_dict(self, expected_dict, got_dict):
         elif isinstance(expected, type):
             got = got_dict.get(info_field)
             self.assertTrue(isinstance(got, expected),
-                'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
+                            'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
         else:
             if isinstance(expected, compat_str) and expected.startswith('md5:'):
                 got = 'md5:' + md5(got_dict.get(info_field))
             else:
                 got = got_dict.get(info_field)
             self.assertEqual(expected, got,
-                'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
+                             'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
 
     # Check for the presence of mandatory fields
     if got_dict.get('_type') != 'playlist':
@@ -135,8 +135,8 @@ def expect_info_dict(self, expected_dict, got_dict):
 
     # Are checkable fields missing from the test case definition?
     test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
-        for key, value in got_dict.items()
-        if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
+                          for key, value in got_dict.items()
+                          if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
     missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
     if missing_keys:
         def _repr(v):
index d9e93f5d2c8125aeb4faf15fbc2df81637111bae..21c7c298a830dd8b4a1b4651419506aaa8c8a8bc 100755 (executable)
@@ -314,7 +314,7 @@ class YoutubeDL(object):
         self._output_process.stdin.write((message + '\n').encode('utf-8'))
         self._output_process.stdin.flush()
         res = ''.join(self._output_channel.readline().decode('utf-8')
-                       for _ in range(line_count))
+                      for _ in range(line_count))
         return res[:-len('\n')]
 
     def to_screen(self, message, skip_eol=False):
@@ -701,13 +701,15 @@ class YoutubeDL(object):
                 'It needs to be updated.' % ie_result.get('extractor'))
 
             def _fixup(r):
-                self.add_extra_info(r,
+                self.add_extra_info(
+                    r,
                     {
                         'extractor': ie_result['extractor'],
                         'webpage_url': ie_result['webpage_url'],
                         'webpage_url_basename': url_basename(ie_result['webpage_url']),
                         'extractor_key': ie_result['extractor_key'],
-                    })
+                    }
+                )
                 return r
             ie_result['entries'] = [
                 self.process_ie_result(_fixup(r), download, extra_info)
@@ -857,14 +859,14 @@ class YoutubeDL(object):
                         # Two formats have been requested like '137+139'
                         format_1, format_2 = rf.split('+')
                         formats_info = (self.select_format(format_1, formats),
-                            self.select_format(format_2, formats))
+                                        self.select_format(format_2, formats))
                         if all(formats_info):
                             # The first format must contain the video and the
                             # second the audio
                             if formats_info[0].get('vcodec') == 'none':
                                 self.report_error('The first format must '
-                                    'contain the video, try using '
-                                    '"-f %s+%s"' % (format_2, format_1))
+                                                  'contain the video, try using '
+                                                  '"-f %s+%s"' % (format_2, format_1))
                                 return
                             selected_format = {
                                 'requested_formats': formats_info,
@@ -1042,10 +1044,10 @@ class YoutubeDL(object):
                         with open(thumb_filename, 'wb') as thumbf:
                             shutil.copyfileobj(uf, thumbf)
                         self.to_screen('[%s] %s: Writing thumbnail to: %s' %
-                            (info_dict['extractor'], info_dict['id'], thumb_filename))
+                                       (info_dict['extractor'], info_dict['id'], thumb_filename))
                     except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
                         self.report_warning('Unable to download thumbnail "%s": %s' %
-                            (info_dict['thumbnail'], compat_str(err)))
+                                            (info_dict['thumbnail'], compat_str(err)))
 
         if not self.params.get('skip_download', False):
             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
@@ -1066,8 +1068,8 @@ class YoutubeDL(object):
                         if not merger._executable:
                             postprocessors = []
                             self.report_warning('You have requested multiple '
-                                'formats but ffmpeg or avconv are not installed.'
-                                ' The formats won\'t be merged')
+                                                'formats but ffmpeg or avconv are not installed.'
+                                                ' The formats won\'t be merged')
                         else:
                             postprocessors = [merger]
                         for f in info_dict['requested_formats']:
index 9087b4f85764ba9356b1c54a5b0463a5bdc0cea1..2983501aeb44feecf1bd335d5782f455a52f4edb 100644 (file)
@@ -116,7 +116,7 @@ except ImportError:  # Python 2
     # Python 2's version is apparently totally broken
 
     def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
-                encoding='utf-8', errors='replace'):
+                   encoding='utf-8', errors='replace'):
         qs, _coerce_result = qs, unicode
         pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
         r = []
@@ -145,10 +145,10 @@ except ImportError:  # Python 2
         return r
 
     def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
-                encoding='utf-8', errors='replace'):
+                        encoding='utf-8', errors='replace'):
         parsed_result = {}
         pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
-                        encoding=encoding, errors=errors)
+                           encoding=encoding, errors=errors)
         for name, value in pairs:
             if name in parsed_result:
                 parsed_result[name].append(value)
index c752e8e249e124c77f6e9ab1d820a9ab1e307d3e..7cd22c504e463ad2551692728bd3933e8bcf20ab 100644 (file)
@@ -225,13 +225,15 @@ class F4mFD(FileDownloader):
         self.to_screen('[download] Downloading f4m manifest')
         manifest = self.ydl.urlopen(man_url).read()
         self.report_destination(filename)
-        http_dl = HttpQuietDownloader(self.ydl,
+        http_dl = HttpQuietDownloader(
+            self.ydl,
             {
                 'continuedl': True,
                 'quiet': True,
                 'noprogress': True,
                 'test': self.params.get('test', False),
-            })
+            }
+        )
 
         doc = etree.fromstring(manifest)
         formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))]
@@ -277,7 +279,7 @@ class F4mFD(FileDownloader):
         def frag_progress_hook(status):
             frag_total_bytes = status.get('total_bytes', 0)
             estimated_size = (state['downloaded_bytes'] +
-                (total_frags - state['frag_counter']) * frag_total_bytes)
+                              (total_frags - state['frag_counter']) * frag_total_bytes)
             if status['status'] == 'finished':
                 state['downloaded_bytes'] += frag_total_bytes
                 state['frag_counter'] += 1
@@ -287,13 +289,13 @@ class F4mFD(FileDownloader):
                 frag_downloaded_bytes = status['downloaded_bytes']
                 byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes
                 frag_progress = self.calc_percent(frag_downloaded_bytes,
-                    frag_total_bytes)
+                                                  frag_total_bytes)
                 progress = self.calc_percent(state['frag_counter'], total_frags)
                 progress += frag_progress / float(total_frags)
 
             eta = self.calc_eta(start, time.time(), estimated_size, byte_counter)
             self.report_progress(progress, format_bytes(estimated_size),
-                status.get('speed'), eta)
+                                 status.get('speed'), eta)
         http_dl.add_progress_hook(frag_progress_hook)
 
         frags_filenames = []
index 567a76cf088428d90a58b7a7e9a9da6566daaf47..87580147dcb4176b163417053ac5a2489cf556c3 100644 (file)
@@ -88,7 +88,7 @@ class AppleTrailersIE(InfoExtractor):
         for li in doc.findall('./div/ul/li'):
             on_click = li.find('.//a').attrib['onClick']
             trailer_info_json = self._search_regex(self._JSON_RE,
-                on_click, 'trailer info')
+                                                   on_click, 'trailer info')
             trailer_info = json.loads(trailer_info_json)
             title = trailer_info['title']
             video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower()
index eab99faaaeb1ca744b07d827144556956e95d150..928ea61a31152788cee71e7623f83f920715890c 100644 (file)
@@ -38,7 +38,7 @@ class BambuserIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
         info_url = ('http://player-c.api.bambuser.com/getVideo.json?'
-            '&api_key=%s&vid=%s' % (self._API_KEY, video_id))
+                    '&api_key=%s&vid=%s' % (self._API_KEY, video_id))
         info_json = self._download_webpage(info_url, video_id)
         info = json.loads(info_json)['result']
 
@@ -74,8 +74,8 @@ class BambuserChannelIE(InfoExtractor):
         last_id = ''
         for i in itertools.count(1):
             req_url = ('http://bambuser.com/xhr-api/index.php?username={user}'
-                '&sort=created&access_mode=0%2C1%2C2&limit={count}'
-                '&method=broadcast&format=json&vid_older_than={last}'
+                       '&sort=created&access_mode=0%2C1%2C2&limit={count}'
+                       '&method=broadcast&format=json&vid_older_than={last}'
                 ).format(user=user, count=self._STEP, last=last_id)
             req = compat_urllib_request.Request(req_url)
             # Without setting this header, we wouldn't get any result
index fa15bf19cf006109a59dffe6d736cd5163f2b1d3..6a507e113e6236775a944e988dbb6e6aedc5fa0b 100644 (file)
@@ -165,10 +165,10 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
         webpage = self._download_webpage(url, group_id, 'Downloading video page')
         if re.search(r'id="emp-error" class="notinuk">', webpage):
             raise ExtractorError('Currently BBC iPlayer TV programmes are available to play in the UK only',
-                expected=True)
+                                 expected=True)
 
         playlist = self._download_xml('http://www.bbc.co.uk/iplayer/playlist/%s' % group_id, group_id,
-            'Downloading playlist XML')
+                                      'Downloading playlist XML')
 
         no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
         if no_items is not None:
index 3826ce7e1ee29bde4d640cb62bf2c127a83eb7bc..81142ee419d45b9df9f75bdc152ab87e1317650f 100644 (file)
@@ -25,8 +25,7 @@ class CNNIE(InfoExtractor):
             'duration': 135,
             'upload_date': '20130609',
         },
-    },
-    {
+    }, {
         "url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
         "md5": "b5cc60c60a3477d185af8f19a2a26f4e",
         "info_dict": {
index 6f866e7fcee7f401362b24d69db2285e22cfa6a4..002b240378299bf18000b2ee92c76ae062115126 100644 (file)
@@ -10,47 +10,46 @@ from ..utils import int_or_none
 class CollegeHumorIE(InfoExtractor):
     _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P<videoid>[0-9]+)/?(?P<shorttitle>.*)$'
 
-    _TESTS = [{
-        'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
-        'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd',
-        'info_dict': {
-            'id': '6902724',
-            'ext': 'mp4',
-            'title': 'Comic-Con Cosplay Catastrophe',
-            'description': "Fans get creative this year at San Diego.  Too creative.  And yes, that's really Joss Whedon.",
-            'age_limit': 13,
-            'duration': 187,
+    _TESTS = [
+        {
+            'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
+            'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd',
+            'info_dict': {
+                'id': '6902724',
+                'ext': 'mp4',
+                'title': 'Comic-Con Cosplay Catastrophe',
+                'description': "Fans get creative this year at San Diego.  Too creative.  And yes, that's really Joss Whedon.",
+                'age_limit': 13,
+                'duration': 187,
+            },
+        }, {
+            'url': 'http://www.collegehumor.com/video/3505939/font-conference',
+            'md5': '72fa701d8ef38664a4dbb9e2ab721816',
+            'info_dict': {
+                'id': '3505939',
+                'ext': 'mp4',
+                'title': 'Font Conference',
+                'description': "This video wasn't long enough, so we made it double-spaced.",
+                'age_limit': 10,
+                'duration': 179,
+            },
+        }, {
+            # embedded youtube video
+            'url': 'http://www.collegehumor.com/embed/6950306',
+            'info_dict': {
+                'id': 'Z-bao9fg6Yc',
+                'ext': 'mp4',
+                'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!',
+                'uploader': 'Mark Dice',
+                'uploader_id': 'MarkDice',
+                'description': 'md5:62c3dab9351fac7bb44b53b69511d87f',
+                'upload_date': '20140127',
+            },
+            'params': {
+                'skip_download': True,
+            },
+            'add_ie': ['Youtube'],
         },
-    },
-    {
-        'url': 'http://www.collegehumor.com/video/3505939/font-conference',
-        'md5': '72fa701d8ef38664a4dbb9e2ab721816',
-        'info_dict': {
-            'id': '3505939',
-            'ext': 'mp4',
-            'title': 'Font Conference',
-            'description': "This video wasn't long enough, so we made it double-spaced.",
-            'age_limit': 10,
-            'duration': 179,
-        },
-    },
-    # embedded youtube video
-    {
-        'url': 'http://www.collegehumor.com/embed/6950306',
-        'info_dict': {
-            'id': 'Z-bao9fg6Yc',
-            'ext': 'mp4',
-            'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!',
-            'uploader': 'Mark Dice',
-            'uploader_id': 'MarkDice',
-            'description': 'md5:62c3dab9351fac7bb44b53b69511d87f',
-            'upload_date': '20140127',
-        },
-        'params': {
-            'skip_download': True,
-        },
-        'add_ie': ['Youtube'],
-    },
     ]
 
     def _real_extract(self, url):
index f0489ede4759d4ed4e1db3448acf05208e56e2e8..3c2d46dd5c8ee780a04cd0f3fedb05e33707b71c 100644 (file)
@@ -478,7 +478,7 @@ class InfoExtractor(object):
             raise RegexNotFoundError('Unable to extract %s' % _name)
         else:
             self._downloader.report_warning('unable to extract %s; '
-                'please report this issue on http://yt-dl.org/bug' % _name)
+                                            'please report this issue on http://yt-dl.org/bug' % _name)
             return None
 
     def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
@@ -612,7 +612,7 @@ class InfoExtractor(object):
 
     def _twitter_search_player(self, html):
         return self._html_search_meta('twitter:player', html,
-            'twitter card player')
+                                      'twitter card player')
 
     def _sort_formats(self, formats):
         if not formats:
index 22cdcdfa531b7a2e946237be2df924bf23a63009..936c13cd60b0ec44f376818a7e17cb9ccf4d1384 100644 (file)
@@ -114,7 +114,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
         embed_page = self._download_webpage(embed_url, video_id,
                                             'Downloading embed page')
         info = self._search_regex(r'var info = ({.*?}),$', embed_page,
-            'video info', flags=re.MULTILINE)
+                                  'video info', flags=re.MULTILINE)
         info = json.loads(info)
         if info.get('error') is not None:
             msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
@@ -208,7 +208,7 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
             if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:
                 break
         return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
-                   for video_id in orderedSet(video_ids)]
+                for video_id in orderedSet(video_ids)]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
index 3ffed3d447e1a5ee735e450f81d3e2ceff62d26d..5e50c63d9aca7d2642239ccf32a5cedd91b05174 100644 (file)
@@ -9,7 +9,7 @@ from .common import InfoExtractor
 class DefenseGouvFrIE(InfoExtractor):
     IE_NAME = 'defense.gouv.fr'
     _VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/'
-        r'ligthboxvideo/base-de-medias/webtv/(.*)')
+                  r'ligthboxvideo/base-de-medias/webtv/(.*)')
 
     _TEST = {
         'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1',
@@ -28,9 +28,9 @@ class DefenseGouvFrIE(InfoExtractor):
             webpage, 'ID')
 
         json_url = ('http://static.videos.gouv.fr/brightcovehub/export/json/'
-            + video_id)
+                    + video_id)
         info = self._download_webpage(json_url, title,
-                                                  'Downloading JSON config')
+                                      'Downloading JSON config')
         video_url = json.loads(info)['renditions'][0]['url']
 
         return {'id': video_id,
index 554df673506a88cada08b9db8300cd15d301087d..52c2d7ddf99873779b7f3223b0acfe4563e2b5d9 100644 (file)
@@ -16,9 +16,9 @@ class DiscoveryIE(InfoExtractor):
             'ext': 'mp4',
             'title': 'MythBusters: Mission Impossible Outtakes',
             'description': ('Watch Jamie Hyneman and Adam Savage practice being'
-                ' each other -- to the point of confusing Jamie\'s dog -- and '
-                'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s'
-                ' back.'),
+                            ' each other -- to the point of confusing Jamie\'s dog -- and '
+                            'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s'
+                            ' back.'),
             'duration': 156,
         },
     }
@@ -29,7 +29,7 @@ class DiscoveryIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
 
         video_list_json = self._search_regex(r'var videoListJSON = ({.*?});',
-            webpage, 'video list', flags=re.DOTALL)
+                                             webpage, 'video list', flags=re.DOTALL)
         video_list = json.loads(video_list_json)
         info = video_list['clips'][0]
         formats = []
index aefca848a25a7c4c10a0a821bf3454efbe6c1117..14b6c00b0bd1c4d3a306b5513477e2bb6c4cd52d 100644 (file)
@@ -11,18 +11,18 @@ from ..utils import url_basename
 
 class DropboxIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?dropbox[.]com/sh?/(?P<id>[a-zA-Z0-9]{15})/.*'
-    _TESTS = [{
-        'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0',
-        'info_dict': {
-            'id': 'nelirfsxnmcfbfh',
-            'ext': 'mp4',
-            'title': 'youtube-dl test video \'ä"BaW_jenozKc'
-        }
-    },
-    {
-        'url': 'https://www.dropbox.com/sh/662glsejgzoj9sr/AAByil3FGH9KFNZ13e08eSa1a/Pregame%20Ceremony%20Program%20PA%2020140518.m4v',
-        'only_matching': True,
-    },
+    _TESTS = [
+        {
+            'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0',
+            'info_dict': {
+                'id': 'nelirfsxnmcfbfh',
+                'ext': 'mp4',
+                'title': 'youtube-dl test video \'ä"BaW_jenozKc'
+            }
+        }, {
+            'url': 'https://www.dropbox.com/sh/662glsejgzoj9sr/AAByil3FGH9KFNZ13e08eSa1a/Pregame%20Ceremony%20Program%20PA%2020140518.m4v',
+            'only_matching': True,
+        },
     ]
 
     def _real_extract(self, url):
index f8f49a013503cc853c2bf79e345b360af3db7fee..b766e17f26a9e79d654d4b160fa8f98f5f21503f 100644 (file)
@@ -28,7 +28,7 @@ class EHowIE(InfoExtractor):
         video_id = mobj.group('id')
         webpage = self._download_webpage(url, video_id)
         video_url = self._search_regex(r'(?:file|source)=(http[^\'"&]*)',
-            webpage, 'video URL')
+                                       webpage, 'video URL')
         final_url = compat_urllib_parse.unquote(video_url)
         uploader = self._html_search_meta('uploader', webpage)
         title = self._og_search_title(webpage).replace(' | eHow', '')
index 10480356324d2b1d3558e570b02646990321bb27..c989879ba745bc44a1924c01f4c7946a0a865bfe 100644 (file)
@@ -60,8 +60,8 @@ class FacebookIE(InfoExtractor):
         login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
         login_page_req.add_header('Cookie', 'locale=en_US')
         login_page = self._download_webpage(login_page_req, None,
-            note='Downloading login page',
-            errnote='Unable to download login page')
+                                            note='Downloading login page',
+                                            errnote='Unable to download login page')
         lsd = self._search_regex(
             r'<input type="hidden" name="lsd" value="([^"]*)"',
             login_page, 'lsd')
@@ -82,7 +82,7 @@ class FacebookIE(InfoExtractor):
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         try:
             login_results = self._download_webpage(request, None,
-                note='Logging in', errnote='unable to fetch login page')
+                                                   note='Logging in', errnote='unable to fetch login page')
             if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
                 self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
                 return
@@ -96,7 +96,7 @@ class FacebookIE(InfoExtractor):
             check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
             check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
             check_response = self._download_webpage(check_req, None,
-                note='Confirming login')
+                                                    note='Confirming login')
             if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
                 self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
index 3410daa98860f1e923cfc4e8c3db20436bbb9a50..08ceee4ed7d5e8b96b81e7d8b9b823a5ea18e120 100644 (file)
@@ -44,9 +44,9 @@ class FirstTVIE(InfoExtractor):
         duration = self._og_search_property('video:duration', webpage, 'video duration', fatal=False)
 
         like_count = self._html_search_regex(r'title="Понравилось".*?/></label> \[(\d+)\]',
-            webpage, 'like count', fatal=False)
+                                             webpage, 'like count', fatal=False)
         dislike_count = self._html_search_regex(r'title="Не понравилось".*?/></label> \[(\d+)\]',
-            webpage, 'dislike count', fatal=False)
+                                                webpage, 'dislike count', fatal=False)
 
         return {
             'id': video_id,
index 3a50bab5c9bd04c9d176a88be080033368fe46c7..f9c127ce67bd7edefd22e7e7953ecce57e888d15 100644 (file)
@@ -50,7 +50,7 @@ class FiveMinIE(InfoExtractor):
         video_id = mobj.group('id')
         embed_url = 'https://embed.5min.com/playerseed/?playList=%s' % video_id
         embed_page = self._download_webpage(embed_url, video_id,
-            'Downloading embed page')
+                                            'Downloading embed page')
         sid = self._search_regex(r'sid=(\d+)', embed_page, 'sid')
         query = compat_urllib_parse.urlencode({
             'func': 'GetResults',
index 21b89142c118de83e4d5be6856ac9a3b3f3232c8..d09d1c13a70cffb725329f69368f37359d7f7a08 100644 (file)
@@ -32,9 +32,9 @@ class FKTVIE(InfoExtractor):
         server = random.randint(2, 4)
         video_thumbnail = 'http://fernsehkritik.tv/images/magazin/folge%d.jpg' % episode
         start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%d/Start' % episode,
-            episode)
+                                               episode)
         playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage,
-            'playlist', flags=re.DOTALL)
+                                      'playlist', flags=re.DOTALL)
         files = json.loads(re.sub('{[^{}]*?}', '{}', playlist))
         # TODO: return a single multipart video
         videos = []
index 5b0bc9d219dab5a997c165068732d88308910882..0c858b6544b919b1b569b4c4102447631298046e 100644 (file)
@@ -37,7 +37,7 @@ class FlickrIE(InfoExtractor):
         first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage')
 
         node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>',
-            first_xml, 'node_id')
+                                          first_xml, 'node_id')
 
         second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'
         second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage')
index 24d4e97545ae9734ab8354f0bc51df00749e56a9..701241bb06b86a5ca5f038ce870f14140baf0b13 100644 (file)
@@ -55,7 +55,7 @@ class FourTubeIE(InfoExtractor):
         description = self._html_search_meta('description', webpage, 'description')
         if description:
             upload_date = self._search_regex(r'Published Date: (\d{2} [a-zA-Z]{3} \d{4})', description, 'upload date',
-                fatal=False)
+                                             fatal=False)
             if upload_date:
                 upload_date = unified_strdate(upload_date)
             view_count = self._search_regex(r'Views: ([\d,\.]+)', description, 'view count', fatal=False)
index 35d7d15e1b6726dd0301e52e1e671c73a76f8d77..d7e9aef90db5b7c8f8027b0c072420d5ec346f66 100644 (file)
@@ -234,7 +234,7 @@ class GenerationQuoiIE(InfoExtractor):
         info_json = self._download_webpage(info_url, name)
         info = json.loads(info_json)
         return self.url_result('http://www.dailymotion.com/video/%s' % info['id'],
-            ie='Dailymotion')
+                               ie='Dailymotion')
 
 
 class CultureboxIE(FranceTVBaseInfoExtractor):
index 109dd20dbd62cc947a6f1d764f315ebef432a332..d224aa8e18471787bc00a515836b39fd558bac1f 100644 (file)
@@ -784,7 +784,7 @@ class GenericIE(InfoExtractor):
 
         # Look for Ooyala videos
         mobj = (re.search(r'player.ooyala.com/[^"?]+\?[^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or
-             re.search(r'OO.Player.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage))
+                re.search(r'OO.Player.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage))
         if mobj is not None:
             return OoyalaIE._build_url_result(mobj.group('ec'))
 
index 4ddf06409ea370c6ab43e1cf124c598b9a032984..3f7d6666c0810e545c0f285dcf70689806c4dac7 100644 (file)
@@ -27,10 +27,10 @@ class HowcastIE(InfoExtractor):
         self.report_extraction(video_id)
 
         video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
-            webpage, 'video URL')
+                                       webpage, 'video URL')
 
         video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
-            webpage, 'description', fatal=False)
+                                                    webpage, 'description', fatal=False)
 
         return {
             'id': video_id,
index c80185b535b8d9853adf886fb8b5582284d12a03..3555f98a591b3751c1a78f34b7f8f1371ba5b97e 100644 (file)
@@ -99,7 +99,7 @@ class IGNIE(InfoExtractor):
         video_id = self._find_video_id(webpage)
         result = self._get_video_info(video_id)
         description = self._html_search_regex(self._DESCRIPTION_RE,
-            webpage, 'video description', flags=re.DOTALL)
+                                              webpage, 'video description', flags=re.DOTALL)
         result['description'] = description
         return result
 
index 5109f26ce860edc0675eaba6350e0ab820e7fe27..b020e2621a5cc3c8d7ef6a1bc2cb6aaea989f779 100644 (file)
@@ -27,9 +27,9 @@ class InstagramIE(InfoExtractor):
         video_id = mobj.group('id')
         webpage = self._download_webpage(url, video_id)
         uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
-            webpage, 'uploader id', fatal=False)
+                                         webpage, 'uploader id', fatal=False)
         desc = self._search_regex(r'"caption":"(.*?)"', webpage, 'description',
-            fatal=False)
+                                  fatal=False)
 
         return {
             'id': video_id,
index c137f4a5d96eaf9da7685bdb33858e8f42d7b8fa..1e47991874ecf6afa75181f99b6bf98a8dd60916 100644 (file)
@@ -45,22 +45,26 @@ class InternetVideoArchiveIE(InfoExtractor):
         url = self._build_url(query)
 
         flashconfiguration = self._download_xml(url, video_id,
-            'Downloading flash configuration')
+                                                'Downloading flash configuration')
         file_url = flashconfiguration.find('file').text
         file_url = file_url.replace('/playlist.aspx', '/mrssplaylist.aspx')
         # Replace some of the parameters in the query to get the best quality
         # and http links (no m3u8 manifests)
         file_url = re.sub(r'(?<=\?)(.+)$',
-            lambda m: self._clean_query(m.group()),
-            file_url)
+                          lambda m: self._clean_query(m.group()),
+                          file_url)
         info = self._download_xml(file_url, video_id,
-            'Downloading video info')
+                                  'Downloading video info')
         item = info.find('channel/item')
 
         def _bp(p):
-            return xpath_with_ns(p,
-                {'media': 'http://search.yahoo.com/mrss/',
-                'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats'})
+            return xpath_with_ns(
+                p,
+                {
+                    'media': 'http://search.yahoo.com/mrss/',
+                    'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats',
+                }
+            )
         formats = []
         for content in item.findall(_bp('media:group/media:content')):
             attr = content.attrib
index 5aa32bf092d8bfae17fd302fe399acf5d5264164..da8068efcba914a14788bc7b39872b8b5cd01c7c 100644 (file)
@@ -36,7 +36,7 @@ class JukeboxIE(InfoExtractor):
 
         try:
             video_url = self._search_regex(r'"config":{"file":"(?P<video_url>http:[^"]+\?mdtk=[0-9]+)"',
-                iframe_html, 'video url')
+                                           iframe_html, 'video url')
             video_url = unescapeHTML(video_url).replace('\/', '/')
         except RegexNotFoundError:
             youtube_url = self._search_regex(
@@ -47,9 +47,9 @@ class JukeboxIE(InfoExtractor):
             return self.url_result(youtube_url, ie='Youtube')
 
         title = self._html_search_regex(r'<h1 class="inline">([^<]+)</h1>',
-            html, 'title')
+                                        html, 'title')
         artist = self._html_search_regex(r'<span id="infos_article_artist">([^<]+)</span>',
-            html, 'artist')
+                                         html, 'artist')
 
         return {
             'id': video_id,
index 827091e601c9c896c206a25a545ad39d61d0f658..7d4b57056509383fdc082a68c1650f38dc258763 100644 (file)
@@ -13,8 +13,10 @@ class KickStarterIE(InfoExtractor):
             'id': '1404461844',
             'ext': 'mp4',
             'title': 'Intersection: The Story of Josh Grant by Kyle Cowling',
-            'description': 'A unique motocross documentary that examines the '
-                'life and mind of one of sports most elite athletes: Josh Grant.',
+            'description': (
+                'A unique motocross documentary that examines the '
+                'life and mind of one of sports most elite athletes: Josh Grant.'
+            ),
         },
     }, {
         'note': 'Embedded video (not using the native kickstarter video service)',
index 97ca4337b4637f53419e7a15b48a4065210a8f0c..2160d6cb08ae5b71584ffdc0d76982e2a9bdf0c0 100644 (file)
@@ -45,7 +45,7 @@ class LyndaIE(SubtitlesInfoExtractor):
         video_id = mobj.group(1)
 
         page = self._download_webpage('http://www.lynda.com/ajax/player?videoId=%s&type=video' % video_id, video_id,
-            'Downloading video JSON')
+                                      'Downloading video JSON')
         video_json = json.loads(page)
 
         if 'Status' in video_json:
index 3d806323a2e1464324917b8f06d59158bae41c88..7e025831b51d611f00e248bda637b4ae8f35efb6 100644 (file)
@@ -27,7 +27,7 @@ class M6IE(InfoExtractor):
         video_id = mobj.group('id')
 
         rss = self._download_xml('http://ws.m6.fr/v1/video/info/m6/bonus/%s' % video_id, video_id,
-            'Downloading video RSS')
+                                 'Downloading video RSS')
 
         title = rss.find('./channel/item/title').text
         description = rss.find('./channel/item/description').text
index f68add6c0e63fad57135445dd8d93e072501566b..858c1c0c31f4c08c3068a62983781129288dc3b8 100644 (file)
@@ -219,8 +219,8 @@ class MetacafeIE(InfoExtractor):
         description = self._og_search_description(webpage)
         thumbnail = self._og_search_thumbnail(webpage)
         video_uploader = self._html_search_regex(
-                r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);',
-                webpage, 'uploader nickname', fatal=False)
+            r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);',
+            webpage, 'uploader nickname', fatal=False)
         duration = int_or_none(
             self._html_search_meta('video:duration', webpage))
 
index 07f072924a6dadb2838230fd29a6a830ff99bb64..e30320569805aedaa6694ae54f9086909593f7a4 100644 (file)
@@ -28,7 +28,7 @@ class MetacriticIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
         # The xml is not well formatted, there are raw '&'
         info = self._download_xml('http://www.metacritic.com/video_data?video=' + video_id,
-            video_id, 'Downloading info xml', transform_source=fix_xml_ampersands)
+                                  video_id, 'Downloading info xml', transform_source=fix_xml_ampersands)
 
         clip = next(c for c in info.findall('playList/clip') if c.find('id').text == video_id)
         formats = []
@@ -44,7 +44,7 @@ class MetacriticIE(InfoExtractor):
         self._sort_formats(formats)
 
         description = self._html_search_regex(r'<b>Description:</b>(.*?)</p>',
-            webpage, 'description', flags=re.DOTALL)
+                                              webpage, 'description', flags=re.DOTALL)
 
         return {
             'id': video_id,
index b6755ff019f5f67be6fb08a977ad77855cbf220a..506d2d5a071d9ab27c0c7ecb868af3ebcf75b24f 100644 (file)
@@ -53,7 +53,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
         # Otherwise we get a webpage that would execute some javascript
         req.add_header('Youtubedl-user-agent', 'curl/7')
         webpage = self._download_webpage(req, mtvn_id,
-            'Downloading mobile page')
+                                         'Downloading mobile page')
         metrics_url = unescapeHTML(self._search_regex(r'<a href="(http://metrics.+?)"', webpage, 'url'))
         req = HEADRequest(metrics_url)
         response = self._request_webpage(req, mtvn_id, 'Resolving url')
@@ -66,10 +66,10 @@ class MTVServicesInfoExtractor(InfoExtractor):
         if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4)$', mdoc.find('.//src').text) is not None:
             if mtvn_id is not None and self._MOBILE_TEMPLATE is not None:
                 self.to_screen('The normal version is not available from your '
-                    'country, trying with the mobile version')
+                               'country, trying with the mobile version')
                 return self._extract_mobile_video_formats(mtvn_id)
             raise ExtractorError('This video is not available from your country.',
-                expected=True)
+                                 expected=True)
 
         formats = []
         for rendition in mdoc.findall('.//rendition'):
@@ -98,7 +98,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
             mediagen_url += '&acceptMethods=fms'
 
         mediagen_doc = self._download_xml(mediagen_url, video_id,
-            'Downloading video urls')
+                                          'Downloading video urls')
 
         description_node = itemdoc.find('description')
         if description_node is not None:
@@ -126,7 +126,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
         # This a short id that's used in the webpage urls
         mtvn_id = None
         mtvn_id_node = find_xpath_attr(itemdoc, './/{http://search.yahoo.com/mrss/}category',
-                'scheme', 'urn:mtvn:id')
+                                       'scheme', 'urn:mtvn:id')
         if mtvn_id_node is not None:
             mtvn_id = mtvn_id_node.text
 
@@ -188,7 +188,7 @@ class MTVServicesEmbeddedIE(MTVServicesInfoExtractor):
         video_id = self._id_from_uri(uri)
         site_id = uri.replace(video_id, '')
         config_url = ('http://media.mtvnservices.com/pmt/e1/players/{0}/'
-            'context4/context5/config.xml'.format(site_id))
+                      'context4/context5/config.xml'.format(site_id))
         config_doc = self._download_xml(config_url, video_id)
         feed_node = config_doc.find('.//feed')
         feed_url = feed_node.text.strip().split('?')[0]
index c16939f5437cf55b6d3e51e9d51ab45d1d12392f..553ded56d5198ccef32879dd33a84b8fc2c13f15 100644 (file)
@@ -53,7 +53,7 @@ class MySpaceIE(InfoExtractor):
             # songs don't store any useful info in the 'context' variable
             def search_data(name):
                 return self._search_regex(r'data-%s="(.*?)"' % name, webpage,
-                    name)
+                                          name)
             streamUrl = search_data('stream-url')
             info = {
                 'id': video_id,
@@ -63,7 +63,7 @@ class MySpaceIE(InfoExtractor):
             }
         else:
             context = json.loads(self._search_regex(r'context = ({.*?});', webpage,
-                u'context'))
+                                                    u'context'))
             video = context['video']
             streamUrl = video['streamUrl']
             info = {
index 956cf8b8699798ffaca57af9bf97b3b678db66ab..9195e53bd399f484f21ab1d3be8be7b966b04088 100644 (file)
@@ -72,7 +72,7 @@ class MyVideoIE(InfoExtractor):
             video_url = mobj.group(1) + '.flv'
 
             video_title = self._html_search_regex('<title>([^<]+)</title>',
-                webpage, 'title')
+                                                  webpage, 'title')
 
             return {
                 'id': video_id,
@@ -162,7 +162,7 @@ class MyVideoIE(InfoExtractor):
         video_swfobj = compat_urllib_parse.unquote(video_swfobj)
 
         video_title = self._html_search_regex("<h1(?: class='globalHd')?>(.*?)</h1>",
-            webpage, 'title')
+                                              webpage, 'title')
 
         return {
             'id': video_id,
index 5ce35dbf5aeca02b19b05f7da7b5cc27a2ffe18a..fbe34defd868694d44f4371825322a41b39019a3 100644 (file)
@@ -30,7 +30,7 @@ class NaverIE(InfoExtractor):
         video_id = mobj.group(1)
         webpage = self._download_webpage(url, video_id)
         m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"',
-            webpage)
+                         webpage)
         if m_id is None:
             m_error = re.search(
                 r'(?s)<div class="nation_error">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>',
index 09dcc8c84d9737d3e887c40ecc976cedbfa6ed20..7ce1d481d0a73218598bf24fee964ca8ec65956d 100644 (file)
@@ -38,12 +38,12 @@ class NFBIE(InfoExtractor):
         page = self._download_webpage('https://www.nfb.ca/film/%s' % video_id, video_id, 'Downloading film page')
 
         uploader_id = self._html_search_regex(r'<a class="director-link" href="/explore-all-directors/([^/]+)/"',
-            page, 'director id', fatal=False)
+                                              page, 'director id', fatal=False)
         uploader = self._html_search_regex(r'<em class="director-name" itemprop="name">([^<]+)</em>',
-            page, 'director name', fatal=False)
+                                           page, 'director name', fatal=False)
 
         request = compat_urllib_request.Request('https://www.nfb.ca/film/%s/player_config' % video_id,
-            compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii'))
+                                                compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii'))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf')
 
index 31813eb684519e9c42e40ca86c766972b1357e28..719eb51a468d72ef6939d5a4bf2f49570c24b15d 100644 (file)
@@ -125,7 +125,7 @@ class NHLVideocenterIE(NHLBaseInfoExtractor):
             self._downloader.report_warning(u'Got an empty reponse, trying '
                                             'adding the "newvideos" parameter')
             response = self._download_webpage(request_url + '&newvideos=true',
-                playlist_title)
+                                              playlist_title)
             response = self._fix_json(response)
         videos = json.loads(response)
 
index 3b5784e8f5cb5ae8014c4f51d2d451d989b87860..1d9c1a096403e7e7b4f3835f31dbee31812594c9 100644 (file)
@@ -111,7 +111,7 @@ class NiconicoIE(InfoExtractor):
 
         if 'deleted=' in flv_info_webpage:
             raise ExtractorError('The video has been deleted.',
-                expected=True)
+                                 expected=True)
         video_real_url = compat_urlparse.parse_qs(flv_info_webpage)['url'][0]
 
         # Start extracting information
@@ -170,13 +170,13 @@ class NiconicoPlaylistIE(InfoExtractor):
         webpage = self._download_webpage(url, list_id)
 
         entries_json = self._search_regex(r'Mylist\.preload\(\d+, (\[.*\])\);',
-            webpage, 'entries')
+                                          webpage, 'entries')
         entries = json.loads(entries_json)
         entries = [{
             '_type': 'url',
             'ie_key': NiconicoIE.ie_key(),
             'url': ('http://www.nicovideo.jp/watch/%s' %
-                entry['item_data']['video_id']),
+                    entry['item_data']['video_id']),
         } for entry in entries]
 
         return {
index 33daa0dec327dea3f691f5dccab5b6b312d79e4f..16a02ad7939082627ffb9edd41d7bf6e62fd1f6d 100644 (file)
@@ -27,8 +27,7 @@ class NineGagIE(InfoExtractor):
             "thumbnail": "re:^https?://",
         },
         'add_ie': ['Youtube']
-    },
-    {
+    }, {
         'url': 'http://9gag.tv/p/KklwM/alternate-banned-opening-scene-of-gravity?ref=fsidebar',
         'info_dict': {
             'id': 'KklwM',
index 25e71a56e196d9cf7f9d2423c47293b01e46cd24..3d35b11ac81286a359a06620d0e08e8fae18043b 100644 (file)
@@ -31,9 +31,9 @@ class NormalbootsIE(InfoExtractor):
 
         webpage = self._download_webpage(url, video_id)
         video_uploader = self._html_search_regex(r'Posted\sby\s<a\shref="[A-Za-z0-9/]*">(?P<uploader>[A-Za-z]*)\s</a>',
-            webpage, 'uploader')
+                                                 webpage, 'uploader')
         raw_upload_date = self._html_search_regex('<span style="text-transform:uppercase; font-size:inherit;">[A-Za-z]+, (?P<date>.*)</span>',
-            webpage, 'date')
+                                                  webpage, 'date')
         video_upload_date = unified_strdate(raw_upload_date)
 
         player_url = self._html_search_regex(r'<iframe\swidth="[0-9]+"\sheight="[0-9]+"\ssrc="(?P<url>[\S]+)"', webpage, 'url')
index 30d80fa6b07c76f3b4a5ac791c8f67c3aab3bfdb..f17a528583bf7296906431bdc1a0e3a3cbd6c71d 100644 (file)
@@ -43,7 +43,7 @@ class OoyalaIE(InfoExtractor):
     @classmethod
     def _build_url_result(cls, embed_code):
         return cls.url_result(cls._url_for_embed_code(embed_code),
-            ie=cls.ie_key())
+                              ie=cls.ie_key())
 
     def _extract_result(self, info, more_info):
         return {
index 8aa69c46eb75e9ccfe6fab5b7bff2c9a5778009e..b4389e0b6feaf0726a4805bec674b77cd38e295b 100644 (file)
@@ -31,7 +31,7 @@ class PhotobucketIE(InfoExtractor):
         # Extract URL, uploader, and title from webpage
         self.report_extraction(video_id)
         info_json = self._search_regex(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (.*?)\);',
-            webpage, 'info json')
+                                       webpage, 'info json')
         info = json.loads(info_json)
         url = compat_urllib_parse.unquote(self._html_search_regex(r'file=(.+\.mp4)', info['linkcodes']['html'], 'url'))
         return {
index 2c53ed2e1147a50248a4294c838ceef67688a356..0f8f3ebde0999e8599eaa86516dd2b52524c9b40 100644 (file)
@@ -33,7 +33,7 @@ class RBMARadioIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
 
         json_data = self._search_regex(r'window\.gon.*?gon\.show=(.+?);$',
-            webpage, 'json data', flags=re.MULTILINE)
+                                       webpage, 'json data', flags=re.MULTILINE)
 
         try:
             data = json.loads(json_data)
index 409f8540a0b2e2ef9db1ab3c5746d7779a2e5db3..b8775c2f99f4a105ae35f1b04a919e64c987df0f 100644 (file)
@@ -27,8 +27,7 @@ class SBSIE(InfoExtractor):
             'thumbnail': 're:http://.*\.jpg',
         },
         'add_ies': ['generic'],
-    },
-    {
+    }, {
         'url': 'http://www.sbs.com.au/ondemand/video/320403011771/Dingo-Conservation-The-Feed',
         'only_matching': True,
     }]
index 306869e6af109cf6971a72df14f47fe99a7b6885..c145f6fc72f1b9eed8a5089dce48dfdd5f106a79 100644 (file)
@@ -96,7 +96,7 @@ class ScreencastIE(InfoExtractor):
         if title is None:
             title = self._html_search_regex(
                 [r'<b>Title:</b> ([^<]*)</div>',
-                r'class="tabSeperator">></span><span class="tabText">(.*?)<'],
+                 r'class="tabSeperator">></span><span class="tabText">(.*?)<'],
                 webpage, 'title')
         thumbnail = self._og_search_thumbnail(webpage)
         description = self._og_search_description(webpage, default=None)
index 2909ef18b51a5ac6dadc4eec33bd05522443da8c..5eadbb7eaea263b8a37307fbdcc01c3e54c5eaa2 100644 (file)
@@ -46,7 +46,7 @@ class SinaIE(InfoExtractor):
     def _extract_video(self, video_id):
         data = compat_urllib_parse.urlencode({'vid': video_id})
         url_doc = self._download_xml('http://v.iask.com/v_play.php?%s' % data,
-            video_id, 'Downloading video url')
+                                     video_id, 'Downloading video url')
         image_page = self._download_webpage(
             'http://interface.video.sina.com.cn/interface/common/getVideoImage.php?%s' % data,
             video_id, 'Downloading thumbnail info')
index e6e7d086503a04a3fda862f601e109003169b9d7..3df71304dafc9c9e353923f6769c88e1fcf8c5ff 100644 (file)
@@ -26,7 +26,7 @@ class SlutloadIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
 
         video_title = self._html_search_regex(r'<h1><strong>([^<]+)</strong>',
-            webpage, 'title').strip()
+                                              webpage, 'title').strip()
 
         video_url = self._html_search_regex(
             r'(?s)<div id="vidPlayer"\s+data-url="([^"]+)"',
index b6a71305f959a3d8861e5aa8e76214654b94e8e9..0751efc6111c96ca1c089c66183429f9bde6147c 100644 (file)
@@ -282,7 +282,7 @@ class SmotriBroadcastIE(InfoExtractor):
             (username, password) = self._get_login_info()
             if username is None:
                 raise ExtractorError('Erotic broadcasts allowed only for registered users, '
-                    'use --username and --password options to provide account credentials.', expected=True)
+                                     'use --username and --password options to provide account credentials.', expected=True)
 
             login_form = {
                 'login-hint53': '1',
index f92f7fa243fcda2d61cda6aaa099a9db917897cd..410477d7459a4dd39eb2f6dc2c96d9fce34b0b8e 100644 (file)
@@ -159,7 +159,7 @@ class SoundcloudIE(InfoExtractor):
 
         # We have to retrieve the url
         streams_url = ('http://api.soundcloud.com/i1/tracks/{0}/streams?'
-            'client_id={1}&secret_token={2}'.format(track_id, self._IPHONE_CLIENT_ID, secret_token))
+                       'client_id={1}&secret_token={2}'.format(track_id, self._IPHONE_CLIENT_ID, secret_token))
         format_dict = self._download_json(
             streams_url,
             track_id, 'Downloading track url')
index 5feb4ff83f9c4d2def9196666bde4fcc2a3a9101..4a3d8bb8f267b588c59e2f16b208955a70d362d9 100644 (file)
@@ -82,7 +82,7 @@ class StanfordOpenClassroomIE(InfoExtractor):
 
             rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
             rootpage = self._download_webpage(rootURL, info['id'],
-                errnote='Unable to download course info page')
+                                              errnote='Unable to download course info page')
 
             links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage))
             info['entries'] = [self.url_result(
index d5dda34f657825fa78e2b5598482be0d71c34ce4..5fa67eb8d4441d62c1591289551171cdbcbcf45b 100644 (file)
@@ -8,24 +8,23 @@ from .common import InfoExtractor
 class TeamcocoIE(InfoExtractor):
     _VALID_URL = r'http://teamcoco\.com/video/(?P<video_id>[0-9]+)?/?(?P<display_id>.*)'
     _TESTS = [
-    {
-        'url': 'http://teamcoco.com/video/80187/conan-becomes-a-mary-kay-beauty-consultant',
-        'file': '80187.mp4',
-        'md5': '3f7746aa0dc86de18df7539903d399ea',
-        'info_dict': {
-            'title': 'Conan Becomes A Mary Kay Beauty Consultant',
-            'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.'
+        {
+            'url': 'http://teamcoco.com/video/80187/conan-becomes-a-mary-kay-beauty-consultant',
+            'file': '80187.mp4',
+            'md5': '3f7746aa0dc86de18df7539903d399ea',
+            'info_dict': {
+                'title': 'Conan Becomes A Mary Kay Beauty Consultant',
+                'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.'
+            }
+        }, {
+            'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush',
+            'file': '19705.mp4',
+            'md5': 'cde9ba0fa3506f5f017ce11ead928f9a',
+            'info_dict': {
+                "description": "Louis C.K. got starstruck by George W. Bush, so what? Part one.",
+                "title": "Louis C.K. Interview Pt. 1 11/3/11"
+            }
         }
-    },
-    {
-        'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush',
-        'file': '19705.mp4',
-        'md5': 'cde9ba0fa3506f5f017ce11ead928f9a',
-        'info_dict': {
-            "description": "Louis C.K. got starstruck by George W. Bush, so what? Part one.",
-            "title": "Louis C.K. Interview Pt. 1 11/3/11"
-        }
-    }
     ]
 
     def _real_extract(self, url):
index 8550380779168a80b95e526f8921059e2eddf8f4..f8a87afdaf4d27c59b4b29491569b243331b2322 100644 (file)
@@ -33,9 +33,9 @@ class TEDIE(SubtitlesInfoExtractor):
             'ext': 'mp4',
             'title': 'The illusion of consciousness',
             'description': ('Philosopher Dan Dennett makes a compelling '
-                'argument that not only don\'t we understand our own '
-                'consciousness, but that half the time our brains are '
-                'actively fooling us.'),
+                            'argument that not only don\'t we understand our own '
+                            'consciousness, but that half the time our brains are '
+                            'actively fooling us.'),
             'uploader': 'Dan Dennett',
             'width': 854,
             'duration': 1308,
@@ -93,7 +93,7 @@ class TEDIE(SubtitlesInfoExtractor):
 
     def _extract_info(self, webpage):
         info_json = self._search_regex(r'q\("\w+.init",({.+})\)</script>',
-            webpage, 'info json')
+                                       webpage, 'info json')
         return json.loads(info_json)
 
     def _real_extract(self, url):
@@ -113,7 +113,7 @@ class TEDIE(SubtitlesInfoExtractor):
         '''Returns the videos of the playlist'''
 
         webpage = self._download_webpage(url, name,
-            'Downloading playlist webpage')
+                                         'Downloading playlist webpage')
         info = self._extract_info(webpage)
         playlist_info = info['playlist']
 
index fdae17b1b817efd2a7666d44cc2cc38de1ccfa22..6e61cc9e2ecf621b19fe13924456970b091bce1c 100644 (file)
@@ -30,7 +30,7 @@ class TF1IE(InfoExtractor):
         embed_url = self._html_search_regex(
             r'"(https://www.wat.tv/embedframe/.*?)"', webpage, 'embed url')
         embed_page = self._download_webpage(embed_url, video_id,
-            'Downloading embed player page')
+                                            'Downloading embed player page')
         wat_id = self._search_regex(r'UVID=(.*?)&', embed_page, 'wat id')
         wat_info = self._download_json(
             'http://www.wat.tv/interface/contentv3/%s' % wat_id, video_id)
index 522a095a28757c1707baced702dfc72fbb8448d1..e2653d62dc8c288ce8e58e5bfda52793aef7cfaf 100644 (file)
@@ -47,7 +47,7 @@ class ThePlatformIE(InfoExtractor):
             smil_url = config['releaseUrl'] + '&format=SMIL&formats=MPEG4&manifest=f4m'
         else:
             smil_url = ('http://link.theplatform.com/s/dJ5BDC/{0}/meta.smil?'
-                'format=smil&mbr=true'.format(video_id))
+                        'format=smil&mbr=true'.format(video_id))
 
         meta = self._download_xml(smil_url, video_id)
         try:
index 6ba5dc5f1b4cf0eb108fc619bafcd3095cfa1e20..4fe89dbe516f8e25eb1f84239bc9cbc9f26bd648 100644 (file)
@@ -28,7 +28,7 @@ class TinyPicIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id, 'Downloading page')
 
         mobj = re.search(r'(?m)fo\.addVariable\("file",\s"(?P<fileid>[\da-z]+)"\);\n'
-            '\s+fo\.addVariable\("s",\s"(?P<serverid>\d+)"\);', webpage)
+                         '\s+fo\.addVariable\("s",\s"(?P<serverid>\d+)"\);', webpage)
         if mobj is None:
             raise ExtractorError('Video %s does not exist' % video_id, expected=True)
 
index b1a440e79fb256c4b069c2886781c564deb6d951..1c53a3fd09459f31fdf188dc852141ef000af4a6 100644 (file)
@@ -25,7 +25,7 @@ class TrailerAddictIE(InfoExtractor):
         webpage = self._download_webpage(url, name)
 
         title = self._search_regex(r'<title>(.+?)</title>',
-                webpage, 'video title').replace(' - Trailer Addict', '')
+                                   webpage, 'video title').replace(' - Trailer Addict', '')
         view_count_str = self._search_regex(
             r'<span class="views_n">([0-9,.]+)</span>',
             webpage, 'view count', fatal=False)
@@ -46,9 +46,9 @@ class TrailerAddictIE(InfoExtractor):
         info_webpage = self._download_webpage(info_url, video_id, "Downloading the info webpage")
 
         final_url = self._search_regex(r'&fileurl=(.+)',
-                info_webpage, 'Download url').replace('%3F', '?')
+                                       info_webpage, 'Download url').replace('%3F', '?')
         thumbnail_url = self._search_regex(r'&image=(.+?)&',
-                info_webpage, 'thumbnail url')
+                                           info_webpage, 'thumbnail url')
 
         description = self._html_search_regex(
             r'(?s)<div class="synopsis">.*?<div class="movie_label_info"[^>]*>(.*?)</div>',
index 40c53ff17ec1e6eb8a05bcf683d55d00f2455101..2a1ae5a717cf7b2af16bf5a1ce3ef7494e28a7a6 100644 (file)
@@ -43,7 +43,7 @@ class TumblrIE(InfoExtractor):
             webpage, 'iframe url')
         iframe = self._download_webpage(iframe_url, video_id)
         video_url = self._search_regex(r'<source src="([^"]+)"',
-            iframe, 'video url')
+                                       iframe, 'video url')
 
         # The only place where you can get a title, it's not complete,
         # but searching in other places doesn't work for all videos
index 7df11fc1916f20cb48c28c62372381560046d639..1a7d01c6790e0caa8c408c388ebc5922dc379889 100644 (file)
@@ -154,7 +154,7 @@ class UdemyCourseIE(UdemyIE):
             self.to_screen('%s: Already enrolled in' % course_id)
 
         response = self._download_json('https://www.udemy.com/api-1.1/courses/%s/curriculum' % course_id,
-            course_id, 'Downloading course curriculum')
+                                       course_id, 'Downloading course curriculum')
 
         entries = [
             self.url_result('https://www.udemy.com/%s/#/lecture/%s' % (course_path, asset['id']), 'Udemy')
index 875450908eb15856643dfbd7b085b107f5e1ca1e..53dc3a496ff65edf044137540080d9190ad8d72b 100644 (file)
@@ -45,13 +45,13 @@ class UstreamIE(InfoExtractor):
         self.report_extraction(video_id)
 
         video_title = self._html_search_regex(r'data-title="(?P<title>.+)"',
-            webpage, 'title')
+                                              webpage, 'title')
 
         uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
-            webpage, 'uploader', fatal=False, flags=re.DOTALL)
+                                           webpage, 'uploader', fatal=False, flags=re.DOTALL)
 
         thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"',
-            webpage, 'thumbnail', fatal=False)
+                                            webpage, 'thumbnail', fatal=False)
 
         return {
             'id': video_id,
index ebd64f0f54df23fca1d243e32dc8071fcfcbfb1d..455b6d9da62f221cf0854655f707d5963546840f 100644 (file)
@@ -30,13 +30,13 @@ class Vbox7IE(InfoExtractor):
 
         redirect_page, urlh = self._download_webpage_handle(url, video_id)
         new_location = self._search_regex(r'window\.location = \'(.*)\';',
-            redirect_page, 'redirect location')
+                                          redirect_page, 'redirect location')
         redirect_url = urlh.geturl() + new_location
         webpage = self._download_webpage(redirect_url, video_id,
-            'Downloading redirect page')
+                                         'Downloading redirect page')
 
         title = self._html_search_regex(r'<title>(.*)</title>',
-            webpage, 'title').split('/')[0].strip()
+                                        webpage, 'title').split('/')[0].strip()
 
         info_url = "http://vbox7.com/play/magare.do"
         data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id})
index 77b1f91ce3636cbb8f805f60ff06d95852cf4940..94647d1c8c88a18cfb6abcba2ec5ed8e71e9c4b6 100644 (file)
@@ -48,11 +48,11 @@ class VeeHDIE(InfoExtractor):
         video_url = compat_urlparse.unquote(config['clip']['url'])
         title = clean_html(get_element_by_id('videoName', webpage).rpartition('|')[0])
         uploader_id = self._html_search_regex(r'<a href="/profile/\d+">(.+?)</a>',
-            webpage, 'uploader')
+                                              webpage, 'uploader')
         thumbnail = self._search_regex(r'<img id="veehdpreview" src="(.+?)"',
-            webpage, 'thumbnail')
+                                       webpage, 'thumbnail')
         description = self._html_search_regex(r'<td class="infodropdown".*?<div>(.*?)<ul',
-            webpage, 'description', flags=re.DOTALL)
+                                              webpage, 'description', flags=re.DOTALL)
 
         return {
             '_type': 'video',
index 5aebcecd7554c9f3c2c584fff33612ae59db0d73..a0c59a2e0e1cb8fca2e0e3eb3ec2e4edce2918bb 100644 (file)
@@ -112,7 +112,7 @@ class VestiIE(InfoExtractor):
         if mobj:
             video_id = mobj.group('id')
             page = self._download_webpage('http://www.vesti.ru/only_video.html?vid=%s' % video_id, video_id,
-                'Downloading video page')
+                                          'Downloading video page')
 
         rutv_url = RUTVIE._extract_url(page)
         if rutv_url:
index d69fe1e778c5e13eef89537ae7b87090b3dd55df..d764e60fb506ace0e8290039976c5b5d7415b549 100644 (file)
@@ -28,11 +28,11 @@ class VideofyMeIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
         config = self._download_xml('http://sunshine.videofy.me/?videoId=%s' % video_id,
-                                            video_id)
+                                    video_id)
         video = config.find('video')
         sources = video.find('sources')
         url_node = next(node for node in [find_xpath_attr(sources, 'source', 'id', 'HQ %s' % key)
-            for key in ['on', 'av', 'off']] if node is not None)
+                                          for key in ['on', 'av', 'off']] if node is not None)
         video_url = url_node.find('url').text
 
         return {'id': video_id,
index c744d4f041b4eddb21a9f9b9aa8f3924ca578f03..06b0bed41e68401a8667cbabdca0d9796ea8ca3d 100644 (file)
@@ -260,7 +260,7 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
                 else:
                     config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
                 config = self._search_regex(config_re, webpage, 'info section',
-                    flags=re.DOTALL)
+                                            flags=re.DOTALL)
                 config = json.loads(config)
         except Exception as e:
             if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
index 9d6ec43821405363d23e4831ddf59365ead4c7d1..d9acafd70b2fa9089e6d39ce808e2ec74c9fbe76 100644 (file)
@@ -121,7 +121,7 @@ class VKIE(InfoExtractor):
         }
 
         request = compat_urllib_request.Request('https://login.vk.com/?act=login',
-            compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+                                                compat_urllib_parse.urlencode(login_form).encode('utf-8'))
         login_page = self._download_webpage(request, None, note='Logging in as %s' % username)
 
         if re.search(r'onLoginFailed', login_page):
@@ -175,7 +175,7 @@ class VKIE(InfoExtractor):
         upload_date = None
         mobj = re.search(r'id="mv_date_wrap".*?Added ([a-zA-Z]+ [0-9]+), ([0-9]+) at', info_page)
         if mobj is not None:
-            x = mobj.group(1) + ' ' + mobj.group(2)
+            mobj.group(1) + ' ' + mobj.group(2)
             upload_date = unified_strdate(mobj.group(1) + ' ' + mobj.group(2))
 
         formats = [{
index b24297a409911c79433cca404dc94206009aefe5..20bb039d38dd9c62ff6c38135f67f9aa41f484aa 100644 (file)
@@ -41,7 +41,7 @@ class WeiboIE(InfoExtractor):
         videos_urls = sorted(videos_urls, key=lambda u: 'video.sina.com' in u)
         player_url = videos_urls[-1]
         m_sina = re.match(r'https?://video\.sina\.com\.cn/v/b/(\d+)-\d+\.html',
-            player_url)
+                          player_url)
         if m_sina is not None:
             self.to_screen('Sina video detected')
             sina_id = m_sina.group(1)
index 1e6c9462353d28aacd2c88ffd7baca085eef7174..6b37bcbc959a8e8b83fee052da18728ca9a9c298 100644 (file)
@@ -67,17 +67,17 @@ class XHamsterIE(InfoExtractor):
         description = mobj.group(1) if mobj else None
 
         upload_date = self._html_search_regex(r'hint=\'(\d{4}-\d{2}-\d{2}) \d{2}:\d{2}:\d{2} [A-Z]{3,4}\'',
-            webpage, 'upload date', fatal=False)
+                                              webpage, 'upload date', fatal=False)
         if upload_date:
             upload_date = unified_strdate(upload_date)
 
         uploader_id = self._html_search_regex(r'<a href=\'/user/[^>]+>(?P<uploader_id>[^<]+)',
-            webpage, 'uploader id', default='anonymous')
+                                              webpage, 'uploader id', default='anonymous')
 
         thumbnail = self._html_search_regex(r'<video\s+.*?poster="([^"]+)".*?>', webpage, 'thumbnail', fatal=False)
 
         duration = parse_duration(self._html_search_regex(r'<span>Runtime:</span> (\d+:\d+)</div>',
-            webpage, 'duration', fatal=False))
+                                                          webpage, 'duration', fatal=False))
 
         view_count = self._html_search_regex(r'<span>Views:</span> ([^<]+)</div>', webpage, 'view count', fatal=False)
         if view_count:
index 7a73b243080406b29b6c4a17d50a3e4d1ac023cb..53ed7ef5a6ea95826d0324bac53a71bea4913fa4 100644 (file)
@@ -30,14 +30,14 @@ class XNXXIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
 
         video_url = self._search_regex(r'flv_url=(.*?)&amp;',
-            webpage, 'video URL')
+                                       webpage, 'video URL')
         video_url = compat_urllib_parse.unquote(video_url)
 
         video_title = self._html_search_regex(r'<title>(.*?)\s+-\s+XNXX.COM',
-            webpage, 'title')
+                                              webpage, 'title')
 
         video_thumbnail = self._search_regex(r'url_bigthumb=(.*?)&amp;',
-            webpage, 'thumbnail', fatal=False)
+                                             webpage, 'thumbnail', fatal=False)
 
         return {
             'id': video_id,
index 39caf60f22572462e10ca32c46220100549c4e22..0fdb122436d9b7e158a706cd1048630e252d5666 100644 (file)
@@ -229,7 +229,7 @@ class YahooSearchIE(SearchInfoExtractor):
         for pagenum in itertools.count(0):
             result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
             info = self._download_json(result_url, query,
-                note='Downloading results page ' + str(pagenum + 1))
+                                       note='Downloading results page ' + str(pagenum + 1))
             m = info['m']
             results = info['results']
 
index 830ae6cd98efc9b62e4fc6aab0370917bd3562c6..97b98bbe88715f644da6bec1709d697af2c8e0e0 100644 (file)
@@ -74,7 +74,7 @@ class YoukuIE(InfoExtractor):
             # -8 means blocked outside China.
             error = config['data'][0].get('error')  # Chinese and English, separated by newline.
             raise ExtractorError(error or 'Server reported error %i' % error_code,
-                expected=True)
+                                 expected=True)
 
         video_title = config['data'][0]['title']
         seed = config['data'][0]['seed']
index ee61e2381c204c7bf73fd1ee434387e98375201b..97ef9c17e5b29e8aa9bd72c027acab63a74d81b3 100644 (file)
@@ -64,7 +64,7 @@ class YouPornIE(InfoExtractor):
         # Get all of the links from the page
         DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
         download_list_html = self._search_regex(DOWNLOAD_LIST_RE,
-            webpage, 'download list').strip()
+                                                webpage, 'download list').strip()
         LINK_RE = r'<a href="([^"]+)">'
         links = re.findall(LINK_RE, download_list_html)
 
index 8711b06d468c9ad83a67a75f0d4f431cd5102226..fa3ead95b00281e53f61626c9b26c359efa0e950 100644 (file)
@@ -950,7 +950,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
 
                         parts_sizes = self._signature_cache_id(encrypted_sig)
                         self.to_screen('{%s} signature length %s, %s' %
-                            (format_id, parts_sizes, player_desc))
+                                       (format_id, parts_sizes, player_desc))
 
                     signature = self._decrypt_signature(
                         encrypted_sig, video_id, player_url, age_gate)
@@ -1214,7 +1214,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
 class YoutubeTopListIE(YoutubePlaylistIE):
     IE_NAME = 'youtube:toplist'
     IE_DESC = ('YouTube.com top lists, "yttoplist:{channel}:{list title}"'
-        ' (Example: "yttoplist:music:Top Tracks")')
+               ' (Example: "yttoplist:music:Top Tracks")')
     _VALID_URL = r'yttoplist:(?P<chann>.*?):(?P<title>.*?)$'
     _TESTS = [{
         'url': 'yttoplist:music:Trending',