Fix W504 and disable W503 (closes #20863)
authorSergey M․ <dstftw@gmail.com>
Fri, 10 May 2019 20:56:22 +0000 (03:56 +0700)
committerSergey M․ <dstftw@gmail.com>
Fri, 10 May 2019 20:57:40 +0000 (03:57 +0700)
49 files changed:
devscripts/check-porn.py
setup.cfg
test/test_aes.py
test/test_swfinterp.py
youtube_dl/YoutubeDL.py
youtube_dl/__init__.py
youtube_dl/compat.py
youtube_dl/downloader/common.py
youtube_dl/downloader/f4m.py
youtube_dl/downloader/fragment.py
youtube_dl/downloader/hls.py
youtube_dl/downloader/http.py
youtube_dl/extractor/addanime.py
youtube_dl/extractor/blinkx.py
youtube_dl/extractor/common.py
youtube_dl/extractor/dailymail.py
youtube_dl/extractor/dctp.py
youtube_dl/extractor/expressen.py
youtube_dl/extractor/frontendmasters.py
youtube_dl/extractor/generic.py
youtube_dl/extractor/heise.py
youtube_dl/extractor/hitbox.py
youtube_dl/extractor/hitrecord.py
youtube_dl/extractor/hketv.py
youtube_dl/extractor/hrti.py
youtube_dl/extractor/infoq.py
youtube_dl/extractor/iqiyi.py
youtube_dl/extractor/itv.py
youtube_dl/extractor/kaltura.py
youtube_dl/extractor/karrierevideos.py
youtube_dl/extractor/motherless.py
youtube_dl/extractor/ndtv.py
youtube_dl/extractor/nextmedia.py
youtube_dl/extractor/niconico.py
youtube_dl/extractor/nrk.py
youtube_dl/extractor/ooyala.py
youtube_dl/extractor/openload.py
youtube_dl/extractor/podomatic.py
youtube_dl/extractor/ruutu.py
youtube_dl/extractor/sbs.py
youtube_dl/extractor/vevo.py
youtube_dl/extractor/vk.py
youtube_dl/extractor/yandexvideo.py
youtube_dl/extractor/youku.py
youtube_dl/extractor/youtube.py
youtube_dl/extractor/zattoo.py
youtube_dl/postprocessor/ffmpeg.py
youtube_dl/postprocessor/xattrpp.py
youtube_dl/utils.py

index 72b2ee422766e440a311856b3c0268e546a3411e..740f04de0f22ad3ac6352b114b0e8f99cf717a9f 100644 (file)
@@ -45,12 +45,12 @@ for test in gettestcases():
 
         RESULT = ('.' + domain + '\n' in LIST or '\n' + domain + '\n' in LIST)
 
-    if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict'] or
-                   test['info_dict']['age_limit'] != 18):
+    if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict']
+                   or test['info_dict']['age_limit'] != 18):
         print('\nPotential missing age_limit check: {0}'.format(test['name']))
 
-    elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict'] and
-                         test['info_dict']['age_limit'] == 18):
+    elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict']
+                         and test['info_dict']['age_limit'] == 18):
         print('\nPotential false negative: {0}'.format(test['name']))
 
     else:
index af9a554c605b1593945dccaa8f2a4f5e10968717..da78a9c471d548a01711d37012e209f421134a37 100644 (file)
--- a/setup.cfg
+++ b/setup.cfg
@@ -3,4 +3,4 @@ universal = True
 
 [flake8]
 exclude = youtube_dl/extractor/__init__.py,devscripts/buildserver.py,devscripts/lazy_load_template.py,devscripts/make_issue_template.py,setup.py,build,.git,venv
-ignore = E402,E501,E731,E741
+ignore = E402,E501,E731,E741,W503
index 78a28751b21d885dac7bd9206309a6fb40f09994..cc89fb6ab2770ebeb4b47b5458e20399ec37e765 100644 (file)
@@ -44,16 +44,16 @@ class TestAES(unittest.TestCase):
     def test_decrypt_text(self):
         password = intlist_to_bytes(self.key).decode('utf-8')
         encrypted = base64.b64encode(
-            intlist_to_bytes(self.iv[:8]) +
-            b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae'
+            intlist_to_bytes(self.iv[:8])
+            b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae'
         ).decode('utf-8')
         decrypted = (aes_decrypt_text(encrypted, password, 16))
         self.assertEqual(decrypted, self.secret_msg)
 
         password = intlist_to_bytes(self.key).decode('utf-8')
         encrypted = base64.b64encode(
-            intlist_to_bytes(self.iv[:8]) +
-            b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83'
+            intlist_to_bytes(self.iv[:8])
+            b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83'
         ).decode('utf-8')
         decrypted = (aes_decrypt_text(encrypted, password, 32))
         self.assertEqual(decrypted, self.secret_msg)
index f1e8998192b131613cb9d26a1167ce35e0a61e9f..9f18055e629d3c21826ad8159bdf0ae55409bca2 100644 (file)
@@ -34,8 +34,8 @@ def _make_testfunc(testfile):
     def test_func(self):
         as_file = os.path.join(TEST_DIR, testfile)
         swf_file = os.path.join(TEST_DIR, test_id + '.swf')
-        if ((not os.path.exists(swf_file)) or
-                os.path.getmtime(swf_file) < os.path.getmtime(as_file)):
+        if ((not os.path.exists(swf_file))
+                or os.path.getmtime(swf_file) < os.path.getmtime(as_file)):
             # Recompile
             try:
                 subprocess.check_call([
index 57f52f888ac6821843992be9a786b947ab5e6264..3e832fec29f621db0837c6775deac0721b9211ff 100755 (executable)
@@ -400,9 +400,9 @@ class YoutubeDL(object):
                 else:
                     raise
 
-        if (sys.platform != 'win32' and
-                sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and
-                not params.get('restrictfilenames', False)):
+        if (sys.platform != 'win32'
+                and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
+                and not params.get('restrictfilenames', False)):
             # Unicode filesystem API will throw errors (#1474, #13027)
             self.report_warning(
                 'Assuming --restrict-filenames since file system encoding '
@@ -440,9 +440,9 @@ class YoutubeDL(object):
             if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
         if idxs:
             correct_argv = (
-                ['youtube-dl'] +
-                [a for i, a in enumerate(argv) if i not in idxs] +
-                ['--'] + [argv[i] for i in idxs]
+                ['youtube-dl']
+                + [a for i, a in enumerate(argv) if i not in idxs]
+                ['--'] + [argv[i] for i in idxs]
             )
             self.report_warning(
                 'Long argument string detected. '
@@ -850,8 +850,8 @@ class YoutubeDL(object):
         if result_type in ('url', 'url_transparent'):
             ie_result['url'] = sanitize_url(ie_result['url'])
             extract_flat = self.params.get('extract_flat', False)
-            if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or
-                    extract_flat is True):
+            if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
+                    or extract_flat is True):
                 if self.params.get('forcejson', False):
                     self.to_stdout(json.dumps(ie_result))
                 return ie_result
@@ -1619,9 +1619,9 @@ class YoutubeDL(object):
         # https://github.com/ytdl-org/youtube-dl/issues/10083).
         incomplete_formats = (
             # All formats are video-only or
-            all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats) or
+            all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
             # all formats are audio-only
-            all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
+            or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
 
         ctx = {
             'formats': formats,
@@ -1947,8 +1947,8 @@ class YoutubeDL(object):
                     else:
                         assert fixup_policy in ('ignore', 'never')
 
-                if (info_dict.get('requested_formats') is None and
-                        info_dict.get('container') == 'm4a_dash'):
+                if (info_dict.get('requested_formats') is None
+                        and info_dict.get('container') == 'm4a_dash'):
                     if fixup_policy == 'warn':
                         self.report_warning(
                             '%s: writing DASH m4a. '
@@ -1967,9 +1967,9 @@ class YoutubeDL(object):
                     else:
                         assert fixup_policy in ('ignore', 'never')
 
-                if (info_dict.get('protocol') == 'm3u8_native' or
-                        info_dict.get('protocol') == 'm3u8' and
-                        self.params.get('hls_prefer_native')):
+                if (info_dict.get('protocol') == 'm3u8_native'
+                        or info_dict.get('protocol') == 'm3u8'
+                        and self.params.get('hls_prefer_native')):
                     if fixup_policy == 'warn':
                         self.report_warning('%s: malformed AAC bitstream detected.' % (
                             info_dict['id']))
@@ -1995,10 +1995,10 @@ class YoutubeDL(object):
     def download(self, url_list):
         """Download a given list of URLs."""
         outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
-        if (len(url_list) > 1 and
-                outtmpl != '-' and
-                '%' not in outtmpl and
-                self.params.get('max_downloads') != 1):
+        if (len(url_list) > 1
+                and outtmpl != '-'
+                and '%' not in outtmpl
+                and self.params.get('max_downloads') != 1):
             raise SameFileError(outtmpl)
 
         for url in url_list:
@@ -2143,8 +2143,8 @@ class YoutubeDL(object):
             if res:
                 res += ', '
             res += '%s container' % fdict['container']
-        if (fdict.get('vcodec') is not None and
-                fdict.get('vcodec') != 'none'):
+        if (fdict.get('vcodec') is not None
+                and fdict.get('vcodec') != 'none'):
             if res:
                 res += ', '
             res += fdict['vcodec']
index 9d4859bcf668a22342faa8efa8fddc502a628169..165c975dd758e90afa657b9d2ce4ee8101abd036 100644 (file)
@@ -230,14 +230,14 @@ def _real_main(argv=None):
     if opts.allsubtitles and not opts.writeautomaticsub:
         opts.writesubtitles = True
 
-    outtmpl = ((opts.outtmpl is not None and opts.outtmpl) or
-               (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') or
-               (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') or
-               (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') or
-               (opts.usetitle and '%(title)s-%(id)s.%(ext)s') or
-               (opts.useid and '%(id)s.%(ext)s') or
-               (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') or
-               DEFAULT_OUTTMPL)
+    outtmpl = ((opts.outtmpl is not None and opts.outtmpl)
+               or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s')
+               or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s')
+               or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s')
+               or (opts.usetitle and '%(title)s-%(id)s.%(ext)s')
+               or (opts.useid and '%(id)s.%(ext)s')
+               or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s')
+               or DEFAULT_OUTTMPL)
     if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
         parser.error('Cannot download a video and extract audio into the same'
                      ' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
index 7992a23ca92eaf95eddeb1c3217821ce058e8444..c75ab131b9955cec1367ec42aa41d8dadde423da 100644 (file)
@@ -2649,9 +2649,9 @@ else:
 
 try:
     args = shlex.split('中文')
-    assert (isinstance(args, list) and
-            isinstance(args[0], compat_str) and
-            args[0] == '中文')
+    assert (isinstance(args, list)
+            and isinstance(args[0], compat_str)
+            and args[0] == '中文')
     compat_shlex_split = shlex.split
 except (AssertionError, UnicodeEncodeError):
     # Working around shlex issue with unicode strings on some python 2
index 5979833c08ab877973893ed9fa4d24cd12c3193b..646d7f7795cc47c281db3033fa3a9df87a646ed2 100644 (file)
@@ -330,15 +330,15 @@ class FileDownloader(object):
         """
 
         nooverwrites_and_exists = (
-            self.params.get('nooverwrites', False) and
-            os.path.exists(encodeFilename(filename))
+            self.params.get('nooverwrites', False)
+            and os.path.exists(encodeFilename(filename))
         )
 
         if not hasattr(filename, 'write'):
             continuedl_and_exists = (
-                self.params.get('continuedl', True) and
-                os.path.isfile(encodeFilename(filename)) and
-                not self.params.get('nopart', False)
+                self.params.get('continuedl', True)
+                and os.path.isfile(encodeFilename(filename))
+                and not self.params.get('nopart', False)
             )
 
             # Check file already present
index 9b15a0e15bd5f2047490feda44662572f46aac6a..8dd3c2eeb3154b98e48cf6a58fe50ca50943dada 100644 (file)
@@ -238,8 +238,8 @@ def write_metadata_tag(stream, metadata):
 
 
 def remove_encrypted_media(media):
-    return list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and
-                                 'drmAdditionalHeaderSetId' not in e.attrib,
+    return list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib
+                                 and 'drmAdditionalHeaderSetId' not in e.attrib,
                        media))
 
 
@@ -267,8 +267,8 @@ class F4mFD(FragmentFD):
         media = doc.findall(_add_ns('media'))
         if not media:
             self.report_error('No media found')
-        for e in (doc.findall(_add_ns('drmAdditionalHeader')) +
-                  doc.findall(_add_ns('drmAdditionalHeaderSet'))):
+        for e in (doc.findall(_add_ns('drmAdditionalHeader'))
+                  doc.findall(_add_ns('drmAdditionalHeaderSet'))):
             # If id attribute is missing it's valid for all media nodes
             # without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute
             if 'id' not in e.attrib:
index 917f6dc019a475139b2f8360a13b617e2c29a7ce..f2e5733b6406603f9a52b26b65bd4bc3cc833fec 100644 (file)
@@ -219,8 +219,8 @@ class FragmentFD(FileDownloader):
             frag_total_bytes = s.get('total_bytes') or 0
             if not ctx['live']:
                 estimated_size = (
-                    (ctx['complete_frags_downloaded_bytes'] + frag_total_bytes) /
-                    (state['fragment_index'] + 1) * total_frags)
+                    (ctx['complete_frags_downloaded_bytes'] + frag_total_bytes)
+                    (state['fragment_index'] + 1) * total_frags)
                 state['total_bytes_estimate'] = estimated_size
 
             if s['status'] == 'finished':
index 419e7357615a318a339ae2aeb4c2e01d8692d5c0..b59aad73f9fc0a798548585bd2b2ef120e867d76 100644 (file)
@@ -76,12 +76,12 @@ class HlsFD(FragmentFD):
             return fd.real_download(filename, info_dict)
 
         def is_ad_fragment_start(s):
-            return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s or
-                    s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad'))
+            return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s
+                    or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad'))
 
         def is_ad_fragment_end(s):
-            return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s or
-                    s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment'))
+            return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s
+                    or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment'))
 
         media_frags = 0
         ad_frags = 0
index 08670ee3c00b78354e6a534c8d39738f811e8efa..3c72ea18b2304befd5221960503ff5b6141304c3 100644 (file)
@@ -46,8 +46,8 @@ class HttpFD(FileDownloader):
 
         is_test = self.params.get('test', False)
         chunk_size = self._TEST_FILE_SIZE if is_test else (
-            info_dict.get('downloader_options', {}).get('http_chunk_size') or
-            self.params.get('http_chunk_size') or 0)
+            info_dict.get('downloader_options', {}).get('http_chunk_size')
+            or self.params.get('http_chunk_size') or 0)
 
         ctx.open_mode = 'wb'
         ctx.resume_len = 0
@@ -123,11 +123,11 @@ class HttpFD(FileDownloader):
                                 content_len = int_or_none(content_range_m.group(3))
                                 accept_content_len = (
                                     # Non-chunked download
-                                    not ctx.chunk_size or
+                                    not ctx.chunk_size
                                     # Chunked download and requested piece or
                                     # its part is promised to be served
-                                    content_range_end == range_end or
-                                    content_len < range_end)
+                                    or content_range_end == range_end
+                                    or content_len < range_end)
                                 if accept_content_len:
                                     ctx.data_len = content_len
                                     return
@@ -152,8 +152,8 @@ class HttpFD(FileDownloader):
                             raise
                     else:
                         # Examine the reported length
-                        if (content_length is not None and
-                                (ctx.resume_len - 100 < int(content_length) < ctx.resume_len + 100)):
+                        if (content_length is not None
+                                and (ctx.resume_len - 100 < int(content_length) < ctx.resume_len + 100)):
                             # The file had already been fully downloaded.
                             # Explanation to the above condition: in issue #175 it was revealed that
                             # YouTube sometimes adds or removes a few bytes from the end of the file,
index 9f8a71262883f21f9180211e3639991030e47d38..5e7c0724e994c1df632cc5d03d565e0e8ddaafdf 100644 (file)
@@ -59,9 +59,9 @@ class AddAnimeIE(InfoExtractor):
             parsed_url = compat_urllib_parse_urlparse(url)
             av_val = av_res + len(parsed_url.netloc)
             confirm_url = (
-                parsed_url.scheme + '://' + parsed_url.netloc +
-                action + '?' +
-                compat_urllib_parse_urlencode({
+                parsed_url.scheme + '://' + parsed_url.netloc
+                + action + '?'
+                compat_urllib_parse_urlencode({
                     'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
             self._download_webpage(
                 confirm_url, video_id,
index 3b8eabe8f4e42283eaa8a2288413f971fdcd5b35..db5e12b21bc90cdaf1b38ae1ae436890b445695c 100644 (file)
@@ -32,8 +32,8 @@ class BlinkxIE(InfoExtractor):
         video_id = self._match_id(url)
         display_id = video_id[:8]
 
-        api_url = ('https://apib4.blinkx.com/api.php?action=play_video&' +
-                   'video=%s' % video_id)
+        api_url = ('https://apib4.blinkx.com/api.php?action=play_video&'
+                   'video=%s' % video_id)
         data_json = self._download_webpage(api_url, display_id)
         data = json.loads(data_json)['api']['results'][0]
         duration = None
index 59ad455c17a4bbc59044fa1febfa0f0f0a9f5a48..23b4f372a3b80cdaa7ebedb713f1d6cf34c7c76d 100644 (file)
@@ -542,11 +542,11 @@ class InfoExtractor(object):
             raise ExtractorError('An extractor error has occurred.', cause=e)
 
     def __maybe_fake_ip_and_retry(self, countries):
-        if (not self._downloader.params.get('geo_bypass_country', None) and
-                self._GEO_BYPASS and
-                self._downloader.params.get('geo_bypass', True) and
-                not self._x_forwarded_for_ip and
-                countries):
+        if (not self._downloader.params.get('geo_bypass_country', None)
+                and self._GEO_BYPASS
+                and self._downloader.params.get('geo_bypass', True)
+                and not self._x_forwarded_for_ip
+                and countries):
             country_code = random.choice(countries)
             self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
             if self._x_forwarded_for_ip:
@@ -682,8 +682,8 @@ class InfoExtractor(object):
 
     def __check_blocked(self, content):
         first_block = content[:512]
-        if ('<title>Access to this site is blocked</title>' in content and
-                'Websense' in first_block):
+        if ('<title>Access to this site is blocked</title>' in content
+                and 'Websense' in first_block):
             msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
             blocked_iframe = self._html_search_regex(
                 r'<iframe src="([^"]+)"', content,
@@ -701,8 +701,8 @@ class InfoExtractor(object):
             if block_msg:
                 msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
             raise ExtractorError(msg, expected=True)
-        if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content and
-                'blocklist.rkn.gov.ru' in content):
+        if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content
+                and 'blocklist.rkn.gov.ru' in content):
             raise ExtractorError(
                 'Access to this webpage has been blocked by decision of the Russian government. '
                 'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
@@ -1709,8 +1709,8 @@ class InfoExtractor(object):
                 continue
             else:
                 tbr = float_or_none(
-                    last_stream_inf.get('AVERAGE-BANDWIDTH') or
-                    last_stream_inf.get('BANDWIDTH'), scale=1000)
+                    last_stream_inf.get('AVERAGE-BANDWIDTH')
+                    or last_stream_inf.get('BANDWIDTH'), scale=1000)
                 format_id = []
                 if m3u8_id:
                     format_id.append(m3u8_id)
@@ -2504,8 +2504,8 @@ class InfoExtractor(object):
                             if str_or_none(s_attr.get(lbl))
                         ]
                         width = int_or_none(s_attr.get('width'))
-                        height = (int_or_none(s_attr.get('height')) or
-                                  int_or_none(s_attr.get('res')))
+                        height = (int_or_none(s_attr.get('height'))
+                                  or int_or_none(s_attr.get('res')))
                         if not width or not height:
                             for lbl in labels:
                                 resolution = parse_resolution(lbl)
@@ -2847,8 +2847,8 @@ class InfoExtractor(object):
         return not any_restricted
 
     def extract_subtitles(self, *args, **kwargs):
-        if (self._downloader.params.get('writesubtitles', False) or
-                self._downloader.params.get('listsubtitles')):
+        if (self._downloader.params.get('writesubtitles', False)
+                or self._downloader.params.get('listsubtitles')):
             return self._get_subtitles(*args, **kwargs)
         return {}
 
@@ -2873,8 +2873,8 @@ class InfoExtractor(object):
         return ret
 
     def extract_automatic_captions(self, *args, **kwargs):
-        if (self._downloader.params.get('writeautomaticsub', False) or
-                self._downloader.params.get('listsubtitles')):
+        if (self._downloader.params.get('writeautomaticsub', False)
+                or self._downloader.params.get('listsubtitles')):
             return self._get_automatic_captions(*args, **kwargs)
         return {}
 
@@ -2882,9 +2882,9 @@ class InfoExtractor(object):
         raise NotImplementedError('This method must be implemented by subclasses')
 
     def mark_watched(self, *args, **kwargs):
-        if (self._downloader.params.get('mark_watched', False) and
-                (self._get_login_info()[0] is not None or
-                    self._downloader.params.get('cookiefile') is not None)):
+        if (self._downloader.params.get('mark_watched', False)
+                and (self._get_login_info()[0] is not None
+                     or self._downloader.params.get('cookiefile') is not None)):
             self._mark_watched(*args, **kwargs)
 
     def _mark_watched(self, *args, **kwargs):
index 4f75a2a307169c91e141e8ee73d71f68fa20a66e..67b88fd56234ee9448bd4d6cc33c4d4534eb672d 100644 (file)
@@ -45,8 +45,8 @@ class DailyMailIE(InfoExtractor):
         sources_url = (try_get(
             video_data,
             (lambda x: x['plugins']['sources']['url'],
-             lambda x: x['sources']['url']), compat_str) or
-            'http://www.dailymail.co.uk/api/player/%s/video-sources.json' % video_id)
+             lambda x: x['sources']['url']), compat_str)
+            or 'http://www.dailymail.co.uk/api/player/%s/video-sources.json' % video_id)
 
         video_sources = self._download_json(sources_url, video_id)
         body = video_sources.get('body')
index 769a219dffe36ec73dab5d1e0fc18ab9bfc61993..04ff214f727826a60bbdde5ec17bb48ba004a91e 100644 (file)
@@ -70,8 +70,8 @@ class DctpTvIE(InfoExtractor):
             endpoint = next(
                 server['endpoint']
                 for server in servers
-                if url_or_none(server.get('endpoint')) and
-                'cloudfront' in server['endpoint'])
+                if url_or_none(server.get('endpoint'))
+                and 'cloudfront' in server['endpoint'])
         else:
             endpoint = 'rtmpe://s2pqqn4u96e4j8.cloudfront.net/cfx/st/'
 
index 9345714726bb7121b39eabf7fe86c1e669a4b39c..f79365038d973754ee0a2a398bc035d9521b2a6d 100644 (file)
@@ -82,8 +82,8 @@ class ExpressenIE(InfoExtractor):
         title = info.get('titleRaw') or data['title']
         description = info.get('descriptionRaw')
         thumbnail = info.get('socialMediaImage') or data.get('image')
-        duration = int_or_none(info.get('videoTotalSecondsDuration') or
-                               data.get('totalSecondsDuration'))
+        duration = int_or_none(info.get('videoTotalSecondsDuration')
+                               or data.get('totalSecondsDuration'))
         timestamp = unified_timestamp(info.get('publishDate'))
 
         return {
index cb57ba007fd61683630ea6cf2dd2d4d15d01ecc4..f1db33fb161f13d2b2fe6e09d327c04247444945 100644 (file)
@@ -94,8 +94,8 @@ class FrontendMastersPageBaseIE(FrontendMastersBaseIE):
         chapter_number = None
         index = lesson.get('index')
         element_index = lesson.get('elementIndex')
-        if (isinstance(index, int) and isinstance(element_index, int) and
-                index < element_index):
+        if (isinstance(index, int) and isinstance(element_index, int)
+                and index < element_index):
             chapter_number = element_index - index
         chapter = (chapters[chapter_number - 1]
                    if chapter_number - 1 < len(chapters) else None)
index 495fa497544ba4e2a61aa905a69e717b472c003d..3a13c62eb60b5ce2600c96814cb29176a29d0270 100644 (file)
@@ -2549,11 +2549,11 @@ class GenericIE(InfoExtractor):
             return self.url_result(mobj.group('url'))
 
         # Look for Ooyala videos
-        mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or
-                re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage) or
-                re.search(r'OO\.Player\.create\.apply\(\s*OO\.Player\s*,\s*op\(\s*\[\s*[\'"][^\'"]*[\'"]\s*,\s*[\'"](?P<ec>.{32})[\'"]', webpage) or
-                re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage) or
-                re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
+        mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage)
+                or re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage)
+                or re.search(r'OO\.Player\.create\.apply\(\s*OO\.Player\s*,\s*op\(\s*\[\s*[\'"][^\'"]*[\'"]\s*,\s*[\'"](?P<ec>.{32})[\'"]', webpage)
+                or re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage)
+                or re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
         if mobj is not None:
             embed_token = self._search_regex(
                 r'embedToken[\'"]?\s*:\s*[\'"]([^\'"]+)',
@@ -3221,8 +3221,8 @@ class GenericIE(InfoExtractor):
                 else:
                     formats.append({
                         'url': src,
-                        'ext': (mimetype2ext(src_type) or
-                                ext if ext in KNOWN_EXTENSIONS else 'mp4'),
+                        'ext': (mimetype2ext(src_type)
+                                or ext if ext in KNOWN_EXTENSIONS else 'mp4'),
                     })
             if formats:
                 self._sort_formats(formats)
index 5c03780a3389fa8272663b805855c305bf934689..d8a2f9d76b027f4bb2e0c34009b9ec499c5681f1 100644 (file)
@@ -155,8 +155,8 @@ class HeiseIE(InfoExtractor):
             'id': video_id,
             'title': title,
             'description': description,
-            'thumbnail': (xpath_text(doc, './/{http://rss.jwpcdn.com/}image') or
-                          self._og_search_thumbnail(webpage)),
+            'thumbnail': (xpath_text(doc, './/{http://rss.jwpcdn.com/}image')
+                          or self._og_search_thumbnail(webpage)),
             'timestamp': parse_iso8601(
                 self._html_search_meta('date', webpage)),
             'formats': formats,
index 1d905dc81d925f89a5c8f7a4a23c1e62238d01b5..3e5ff2685e0cea5f57ffbbebbe8a8129767358f5 100644 (file)
@@ -58,8 +58,8 @@ class HitboxIE(InfoExtractor):
         title = video_meta.get('media_status')
         alt_title = video_meta.get('media_title')
         description = clean_html(
-            video_meta.get('media_description') or
-            video_meta.get('media_description_md'))
+            video_meta.get('media_description')
+            or video_meta.get('media_description_md'))
         duration = float_or_none(video_meta.get('media_duration'))
         uploader = video_meta.get('media_user_name')
         views = int_or_none(video_meta.get('media_views'))
index 01a6946d0d706691cd6ca43c0679cdfb4069924a..fd5dc293573654d206cbd19f26b995c1bcea5832 100644 (file)
@@ -47,8 +47,8 @@ class HitRecordIE(InfoExtractor):
             tags = [
                 t['text']
                 for t in tags_list
-                if isinstance(t, dict) and t.get('text') and
-                isinstance(t['text'], compat_str)]
+                if isinstance(t, dict) and t.get('text')
+                and isinstance(t['text'], compat_str)]
 
         return {
             'id': video_id,
index b57927fc13071dbb56a03557604c5e5c19874833..1f3502b902483e09671bc2e27f01632c775ab42a 100644 (file)
@@ -77,13 +77,13 @@ class HKETVIE(InfoExtractor):
 
         title = (
             self._html_search_meta(
-                ('ed_title', 'search.ed_title'), webpage, default=None) or
-            self._search_regex(
+                ('ed_title', 'search.ed_title'), webpage, default=None)
+            or self._search_regex(
                 r'data-favorite_title_(?:eng|chi)=(["\'])(?P<id>(?:(?!\1).)+)\1',
-                webpage, 'title', default=None, group='url') or
-            self._html_search_regex(
-                r'<h1>([^<]+)</h1>', webpage, 'title', default=None) or
-            self._og_search_title(webpage)
+                webpage, 'title', default=None, group='url')
+            or self._html_search_regex(
+                r'<h1>([^<]+)</h1>', webpage, 'title', default=None)
+            or self._og_search_title(webpage)
         )
 
         file_id = self._search_regex(
index 9ba1aa7032eb932e325f9e1e40da1e4b583ce322..23f7b1fc9550a40a74476b3f414d90425acfca7e 100644 (file)
@@ -60,8 +60,8 @@ class HRTiBaseIE(InfoExtractor):
             language=self._APP_LANGUAGE,
             application_id=self._APP_PUBLICATION_ID)
 
-        self._login_url = (modules['user']['resources']['login']['uri'] +
-                           '/format/json').format(session_id=self._session_id)
+        self._login_url = (modules['user']['resources']['login']['uri']
+                           '/format/json').format(session_id=self._session_id)
 
         self._logout_url = modules['user']['resources']['logout']['uri']
 
index 391c2f5d015f970098fea710725e9be77e574cab..18249cf9b4a29fabdcc7cf4b41061671e7ea33e8 100644 (file)
@@ -122,9 +122,9 @@ class InfoQIE(BokeCCBaseIE):
             formats = self._extract_bokecc_formats(webpage, video_id)
         else:
             formats = (
-                self._extract_rtmp_video(webpage) +
-                self._extract_http_video(webpage) +
-                self._extract_http_audio(webpage, video_id))
+                self._extract_rtmp_video(webpage)
+                + self._extract_http_video(webpage)
+                self._extract_http_audio(webpage, video_id))
 
         self._sort_formats(formats)
 
index 4b081bd469ca084f5ecf47ac38cc97326b011b31..cd11aa70f01ea7f1f258be9d9a76eeba1381d525 100644 (file)
@@ -383,9 +383,9 @@ class IqiyiIE(InfoExtractor):
             self._sleep(5, video_id)
 
         self._sort_formats(formats)
-        title = (get_element_by_id('widget-videotitle', webpage) or
-                 clean_html(get_element_by_attribute('class', 'mod-play-tit', webpage)) or
-                 self._html_search_regex(r'<span[^>]+data-videochanged-title="word"[^>]*>([^<]+)</span>', webpage, 'title'))
+        title = (get_element_by_id('widget-videotitle', webpage)
+                 or clean_html(get_element_by_attribute('class', 'mod-play-tit', webpage))
+                 or self._html_search_regex(r'<span[^>]+data-videochanged-title="word"[^>]*>([^<]+)</span>', webpage, 'title'))
 
         return {
             'id': video_id,
index de65b6bb45d8980f9dafce9f60c3e19539dc58d5..ad2f4eca51308d530823646a4de855bed87f95ff 100644 (file)
@@ -77,10 +77,10 @@ class ITVIE(InfoExtractor):
             return etree.SubElement(element, _add_ns(name))
 
         production_id = (
-            params.get('data-video-autoplay-id') or
-            '%s#001' % (
-                params.get('data-video-episode-id') or
-                video_id.replace('a', '/')))
+            params.get('data-video-autoplay-id')
+            or '%s#001' % (
+                params.get('data-video-episode-id')
+                or video_id.replace('a', '/')))
 
         req_env = etree.Element(_add_ns('soapenv:Envelope'))
         _add_sub_element(req_env, 'soapenv:Header')
index 79162f665ff27f40eea31a4c09fcff09018eedaf..639d7383727bbdb48681827908103361ce6828cc 100644 (file)
@@ -118,8 +118,8 @@ class KalturaIE(InfoExtractor):
                         (?P<q2>['"])_?(?P<partner_id>(?:(?!(?P=q2)).)+)(?P=q2),.*?
                         (?P<q3>['"])entry_?[Ii]d(?P=q3)\s*:\s*
                         (?P<q4>['"])(?P<id>(?:(?!(?P=q4)).)+)(?P=q4)(?:,|\s*\})
-                """, webpage) or
-            re.search(
+                """, webpage)
+            or re.search(
                 r'''(?xs)
                     (?P<q1>["'])
                         (?:https?:)?//cdnapi(?:sec)?\.kaltura\.com(?::\d+)?/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)(?:(?!(?P=q1)).)*
@@ -132,8 +132,8 @@ class KalturaIE(InfoExtractor):
                         \[\s*(?P<q2_1>["'])entry_?[Ii]d(?P=q2_1)\s*\]\s*=\s*
                     )
                     (?P<q3>["'])(?P<id>(?:(?!(?P=q3)).)+)(?P=q3)
-                ''', webpage) or
-            re.search(
+                ''', webpage)
+            or re.search(
                 r'''(?xs)
                     <(?:iframe[^>]+src|meta[^>]+\bcontent)=(?P<q1>["'])
                       (?:https?:)?//(?:(?:www|cdnapi(?:sec)?)\.)?kaltura\.com/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)
index f236a2f78e600d45c3f7dbd40de2f320f4cf277e..7b291e0a059ece063b0c94d3c8fb485be7c4818d 100644 (file)
@@ -47,8 +47,8 @@ class KarriereVideosIE(InfoExtractor):
 
         webpage = self._download_webpage(url, video_id)
 
-        title = (self._html_search_meta('title', webpage, default=None) or
-                 self._search_regex(r'<h1 class="title">([^<]+)</h1>', webpage, 'video title'))
+        title = (self._html_search_meta('title', webpage, default=None)
+                 or self._search_regex(r'<h1 class="title">([^<]+)</h1>', webpage, 'video title'))
 
         video_id = self._search_regex(
             r'/config/video/(.+?)\.xml', webpage, 'video id')
index d4bd273b61e756ef71db83bf264752fb2c0182af..43fd70f112005a893377c8e5cf489291fb8cc812 100644 (file)
@@ -80,8 +80,8 @@ class MotherlessIE(InfoExtractor):
         video_url = (self._html_search_regex(
             (r'setup\(\{\s*["\']file["\']\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
              r'fileurl\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1'),
-            webpage, 'video URL', default=None, group='url') or
-            'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id)
+            webpage, 'video URL', default=None, group='url')
+            or 'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id)
         age_limit = self._rta_search(webpage)
         view_count = str_to_int(self._html_search_regex(
             r'<strong>Views</strong>\s+([^<]+)<',
index ddec89f2c3091c822a61d2a6790b9fc877c15829..bc3eb91606885524bca8dd6f1667f50e33788fd5 100644 (file)
@@ -84,8 +84,8 @@ class NDTVIE(InfoExtractor):
 
         # '__title' does not contain extra words such as sub-site name, "Video" etc.
         title = compat_urllib_parse_unquote_plus(
-            self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None) or
-            self._og_search_title(webpage))
+            self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None)
+            or self._og_search_title(webpage))
 
         filename = self._search_regex(
             r"(?:__)?filename\s*[:=]\s*'([^']+)'", webpage, 'video filename')
index 680f03aad4b318a70806555ac14d57a4bdfd05e0..7bd1290bf0cab1e97145fcac41495c5f9e534213 100644 (file)
@@ -180,8 +180,8 @@ class AppleDailyIE(NextMediaIE):
     _URL_PATTERN = r'\{url: \'(.+)\'\}'
 
     def _fetch_title(self, page):
-        return (self._html_search_regex(r'<h1 id="h1">([^<>]+)</h1>', page, 'news title', default=None) or
-                self._html_search_meta('description', page, 'news title'))
+        return (self._html_search_regex(r'<h1 id="h1">([^<>]+)</h1>', page, 'news title', default=None)
+                or self._html_search_meta('description', page, 'news title'))
 
     def _fetch_thumbnail(self, page):
         return self._html_search_regex(r"setInitialImage\(\'([^']+)'\)", page, 'video thumbnail', fatal=False)
index 76b412ff1ae7d2d0866f3357706db365a9c0e613..eb07ca7765e6ccfe08f856f44647f1ea7d7f706f 100644 (file)
@@ -369,14 +369,14 @@ class NiconicoIE(InfoExtractor):
         video_detail = watch_api_data.get('videoDetail', {})
 
         thumbnail = (
-            get_video_info(['thumbnail_url', 'thumbnailURL']) or
-            self._html_search_meta('image', webpage, 'thumbnail', default=None) or
-            video_detail.get('thumbnail'))
+            get_video_info(['thumbnail_url', 'thumbnailURL'])
+            or self._html_search_meta('image', webpage, 'thumbnail', default=None)
+            or video_detail.get('thumbnail'))
 
         description = get_video_info('description')
 
-        timestamp = (parse_iso8601(get_video_info('first_retrieve')) or
-                     unified_timestamp(get_video_info('postedDateTime')))
+        timestamp = (parse_iso8601(get_video_info('first_retrieve'))
+                     or unified_timestamp(get_video_info('postedDateTime')))
         if not timestamp:
             match = self._html_search_meta('datePublished', webpage, 'date published', default=None)
             if match:
@@ -395,9 +395,9 @@ class NiconicoIE(InfoExtractor):
                 view_count = int_or_none(match.replace(',', ''))
         view_count = view_count or video_detail.get('viewCount')
 
-        comment_count = (int_or_none(get_video_info('comment_num')) or
-                         video_detail.get('commentCount') or
-                         try_get(api_data, lambda x: x['thread']['commentCount']))
+        comment_count = (int_or_none(get_video_info('comment_num'))
+                         or video_detail.get('commentCount')
+                         or try_get(api_data, lambda x: x['thread']['commentCount']))
         if not comment_count:
             match = self._html_search_regex(
                 r'>Comments: <strong[^>]*>([^<]+)</strong>',
@@ -406,11 +406,11 @@ class NiconicoIE(InfoExtractor):
                 comment_count = int_or_none(match.replace(',', ''))
 
         duration = (parse_duration(
-            get_video_info('length') or
-            self._html_search_meta(
-                'video:duration', webpage, 'video duration', default=None)) or
-            video_detail.get('length') or
-            get_video_info('duration'))
+            get_video_info('length')
+            or self._html_search_meta(
+                'video:duration', webpage, 'video duration', default=None))
+            or video_detail.get('length')
+            or get_video_info('duration'))
 
         webpage_url = get_video_info('watch_url') or url
 
index 072f920a973a39faae363c9109196c438edc9f30..5f43e692f43091eff351567e57c030585b4cc22e 100644 (file)
@@ -45,8 +45,8 @@ class NRKBaseIE(InfoExtractor):
         entries = []
 
         conviva = data.get('convivaStatistics') or {}
-        live = (data.get('mediaElementType') == 'Live' or
-                data.get('isLive') is True or conviva.get('isLive'))
+        live = (data.get('mediaElementType') == 'Live'
+                or data.get('isLive') is True or conviva.get('isLive'))
 
         def make_title(t):
             return self._live_title(t) if live else t
index e42d67df9a426bc249c702f0d90a8cfbd59e435b..995b24d1bd4758f0c51350d517a52a83053d584f 100644 (file)
@@ -31,8 +31,8 @@ class OoyalaBaseIE(InfoExtractor):
         title = metadata['title']
 
         auth_data = self._download_json(
-            self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code) +
-            compat_urllib_parse_urlencode({
+            self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code)
+            compat_urllib_parse_urlencode({
                 'domain': domain,
                 'supportedFormats': supportedformats or 'mp4,rtmp,m3u8,hds,dash,smooth',
                 'embedToken': embed_token,
index f77296f42461520a98b002fece41b49509b14cd4..a8e906858f8361b3a72557e580c2507aecfa2312 100644 (file)
@@ -43,9 +43,9 @@ def cookie_to_dict(cookie):
     if cookie.discard is not None:
         cookie_dict['discard'] = cookie.discard
     try:
-        if (cookie.has_nonstandard_attr('httpOnly') or
-                cookie.has_nonstandard_attr('httponly') or
-                cookie.has_nonstandard_attr('HttpOnly')):
+        if (cookie.has_nonstandard_attr('httpOnly')
+                or cookie.has_nonstandard_attr('httponly')
+                or cookie.has_nonstandard_attr('HttpOnly')):
             cookie_dict['httponly'] = True
     except TypeError:
         pass
index 25fcebf9fa6a06a6bf955468ddf0740b648593a5..e782e3f1fd4a990bcfeedd5c311b8fa7395d8f17 100644 (file)
@@ -50,8 +50,8 @@ class PodomaticIE(InfoExtractor):
         video_id = mobj.group('id')
         channel = mobj.group('channel') or mobj.group('channel_2')
 
-        json_url = (('%s://%s.podomatic.com/entry/embed_params/%s' +
-                     '?permalink=true&rtmp=0') %
+        json_url = (('%s://%s.podomatic.com/entry/embed_params/%s'
+                     '?permalink=true&rtmp=0') %
                     (mobj.group('proto'), channel, video_id))
         data_json = self._download_webpage(
             json_url, video_id, 'Downloading video info')
index f05401b36b46d9f10b9ea92493798345b94708d0..f984040aa07b08f56b1028d0dde1dc3fc78137cb 100644 (file)
@@ -91,8 +91,8 @@ class RuutuIE(InfoExtractor):
                     extract_formats(child)
                 elif child.tag.endswith('File'):
                     video_url = child.text
-                    if (not video_url or video_url in processed_urls or
-                            any(p in video_url for p in ('NOT_USED', 'NOT-USED'))):
+                    if (not video_url or video_url in processed_urls
+                            or any(p in video_url for p in ('NOT_USED', 'NOT-USED'))):
                         continue
                     processed_urls.append(video_url)
                     ext = determine_ext(video_url)
index 845712a7640afe9f675757c2f711830c9c79a00f..0e623ff7b7039e541754088d606c33855ca5a01b 100644 (file)
@@ -55,8 +55,8 @@ class SBSIE(InfoExtractor):
             raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True)
 
         urls = player_params['releaseUrls']
-        theplatform_url = (urls.get('progressive') or urls.get('html') or
-                           urls.get('standard') or player_params['relatedItemsURL'])
+        theplatform_url = (urls.get('progressive') or urls.get('html')
+                           or urls.get('standard') or player_params['relatedItemsURL'])
 
         return {
             '_type': 'url_transparent',
index 4aa72cbd1d4106f79ac6d8152f76b83412fbe246..232e05816074d7bf53b5fb07c1f3eb882fd4a1ac 100644 (file)
@@ -275,8 +275,8 @@ class VevoIE(VevoBaseIE):
 
         genres = video_info.get('genres')
         genre = (
-            genres[0] if genres and isinstance(genres, list) and
-            isinstance(genres[0], compat_str) else None)
+            genres[0] if genres and isinstance(genres, list)
+            and isinstance(genres[0], compat_str) else None)
 
         is_explicit = video_info.get('isExplicit')
         if is_explicit is True:
index 1072550f1945344176b16cec11b2441e462ac926..b7ce2fb97471338d293a6ae07a24c3c3432c9081 100644 (file)
@@ -443,8 +443,8 @@ class VKIE(VKBaseIE):
             format_url = url_or_none(format_url)
             if not format_url or not format_url.startswith(('http', '//', 'rtmp')):
                 continue
-            if (format_id.startswith(('url', 'cache')) or
-                    format_id in ('extra_data', 'live_mp4', 'postlive_mp4')):
+            if (format_id.startswith(('url', 'cache'))
+                    or format_id in ('extra_data', 'live_mp4', 'postlive_mp4')):
                 height = int_or_none(self._search_regex(
                     r'^(?:url|cache)(\d+)', format_id, 'height', default=None))
                 formats.append({
index 940c24af33ebb7d960f6950445a68f4e26e65af8..1aea9538310bb4cdffdc8adc5e60be15e904dcce 100644 (file)
@@ -70,9 +70,9 @@ class YandexVideoIE(InfoExtractor):
 
         description = content.get('description')
         thumbnail = content.get('thumbnail')
-        timestamp = (int_or_none(content.get('release_date')) or
-                     int_or_none(content.get('release_date_ut')) or
-                     int_or_none(content.get('start_time')))
+        timestamp = (int_or_none(content.get('release_date'))
+                     or int_or_none(content.get('release_date_ut'))
+                     or int_or_none(content.get('start_time')))
         duration = int_or_none(content.get('duration'))
         series = content.get('program_title')
         age_limit = int_or_none(content.get('restriction_age'))
index 2f5a7b023ba70cf320cbd7ec11f07262e1d2378a..61d1ab20908bf1a233cf90624170d9ee824ff337 100644 (file)
@@ -258,8 +258,8 @@ class YoukuShowIE(InfoExtractor):
             transform_source=lambda s: js_to_json(strip_jsonp(s))).get('html')
         if playlist_data is None:
             return [None, None]
-        drama_list = (get_element_by_class('p-drama-grid', playlist_data) or
-                      get_element_by_class('p-drama-half-row', playlist_data))
+        drama_list = (get_element_by_class('p-drama-grid', playlist_data)
+                      or get_element_by_class('p-drama-half-row', playlist_data))
         if drama_list is None:
             raise ExtractorError('No episodes found')
         video_urls = re.findall(r'<a[^>]+href="([^"]+)"', drama_list)
index 8619f38381e42f46367f8044ba3552d1b1a0f7dd..06005f8d245eb975d67290cead3fb883c27f3f59 100644 (file)
@@ -2052,8 +2052,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 url_or_none(try_get(
                     player_response,
                     lambda x: x['streamingData']['hlsManifestUrl'],
-                    compat_str)) or
-                url_or_none(try_get(
+                    compat_str))
+                or url_or_none(try_get(
                     video_info, lambda x: x['hlsvp'][0], compat_str)))
             if manifest_url:
                 formats = []
@@ -2102,10 +2102,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             self._downloader.report_warning('unable to extract uploader nickname')
 
         channel_id = (
-            str_or_none(video_details.get('channelId')) or
-            self._html_search_meta(
-                'channelId', video_webpage, 'channel id', default=None) or
-            self._search_regex(
+            str_or_none(video_details.get('channelId'))
+            or self._html_search_meta(
+                'channelId', video_webpage, 'channel id', default=None)
+            or self._search_regex(
                 r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
                 video_webpage, 'channel id', default=None, group='id'))
         channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
@@ -2564,9 +2564,9 @@ class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
 
         search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
         title_span = (
-            search_title('playlist-title') or
-            search_title('title long-title') or
-            search_title('title'))
+            search_title('playlist-title')
+            or search_title('title long-title')
+            or search_title('title'))
         title = clean_html(title_span)
 
         return self.playlist_result(url_results, playlist_id, title)
index ee514666b3e6ae274ccc5f97c964a9d2cbaacd18..6bac3026ede81754f41d6eb56c92815c41cfa06c 100644 (file)
@@ -86,8 +86,8 @@ class ZattooPlatformBaseIE(InfoExtractor):
             return next(
                 chan['cid'] for chan in channel_list
                 if chan.get('cid') and (
-                    chan.get('display_alias') == channel_name or
-                    chan.get('cid') == channel_name))
+                    chan.get('display_alias') == channel_name
+                    or chan.get('cid') == channel_name))
         except StopIteration:
             raise ExtractorError('Could not extract channel id')
 
index 5bcb00ac0daed0037d1d3f3978b1644e72a613a3..70416c25ee1a3fb333c63ff7334f87250122fb07 100644 (file)
@@ -221,9 +221,9 @@ class FFmpegPostProcessor(PostProcessor):
         # avconv does not have repeat option
         if self.basename == 'ffmpeg':
             cmd += [encodeArgument('-loglevel'), encodeArgument('repeat+info')]
-        cmd += (files_cmd +
-                [encodeArgument(o) for o in opts] +
-                [encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
+        cmd += (files_cmd
+                + [encodeArgument(o) for o in opts]
+                [encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
 
         if self._downloader.params.get('verbose', False):
             self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd))
@@ -326,8 +326,8 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
         information['ext'] = extension
 
         # If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly.
-        if (new_path == path or
-                (self._nopostoverwrites and os.path.exists(encodeFilename(new_path)))):
+        if (new_path == path
+                or (self._nopostoverwrites and os.path.exists(encodeFilename(new_path)))):
             self._downloader.to_screen('[ffmpeg] Post-process file %s exists, skipping' % new_path)
             return [], information
 
index b0aed9ca7b2da21d09223b8822a0c530c2a7b4cc..814dabecf7be8a2e659ddd08882e5381003c2844 100644 (file)
@@ -64,8 +64,8 @@ class XAttrMetadataPP(PostProcessor):
         except XAttrMetadataError as e:
             if e.reason == 'NO_SPACE':
                 self._downloader.report_warning(
-                    'There\'s no disk space left, disk quota exceeded or filesystem xattr limit exceeded. ' +
-                    (('Some ' if num_written else '') + 'extended attributes are not written.').capitalize())
+                    'There\'s no disk space left, disk quota exceeded or filesystem xattr limit exceeded. '
+                    (('Some ' if num_written else '') + 'extended attributes are not written.').capitalize())
             elif e.reason == 'VALUE_TOO_LONG':
                 self._downloader.report_warning(
                     'Unable to write extended attributes due to too long values.')
index 99ee5494293423645e990624348a4fc0258f86e0..9be9b2e761545ecbd8c8a1c10eeba4e97f586f8c 100644 (file)
@@ -861,8 +861,8 @@ class XAttrMetadataError(YoutubeDLError):
         self.msg = msg
 
         # Parsing code and msg
-        if (self.code in (errno.ENOSPC, errno.EDQUOT) or
-                'No space left' in self.msg or 'Disk quota excedded' in self.msg):
+        if (self.code in (errno.ENOSPC, errno.EDQUOT)
+                or 'No space left' in self.msg or 'Disk quota excedded' in self.msg):
             self.reason = 'NO_SPACE'
         elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
             self.reason = 'VALUE_TOO_LONG'
@@ -1453,8 +1453,8 @@ def _windows_write_string(s, out):
     def not_a_console(handle):
         if handle == INVALID_HANDLE_VALUE or handle is None:
             return True
-        return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
-                GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
+        return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR
+                or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
 
     if not_a_console(h):
         return False
@@ -1490,8 +1490,8 @@ def write_string(s, out=None, encoding=None):
         if _windows_write_string(s, out):
             return
 
-    if ('b' in getattr(out, 'mode', '') or
-            sys.version_info[0] < 3):  # Python 2 lies about mode of sys.stderr
+    if ('b' in getattr(out, 'mode', '')
+            or sys.version_info[0] < 3):  # Python 2 lies about mode of sys.stderr
         byt = s.encode(encoding or preferredencoding(), 'ignore')
         out.write(byt)
     elif hasattr(out, 'buffer'):
@@ -2328,10 +2328,10 @@ def merge_dicts(*dicts):
         for k, v in a_dict.items():
             if v is None:
                 continue
-            if (k not in merged or
-                    (isinstance(v, compat_str) and v and
-                        isinstance(merged[k], compat_str) and
-                        not merged[k])):
+            if (k not in merged
+                    or (isinstance(v, compat_str) and v
+                        and isinstance(merged[k], compat_str)
+                        and not merged[k])):
                 merged[k] = v
     return merged
 
@@ -2657,14 +2657,14 @@ def _match_one(filter_part, dct):
     if m:
         op = COMPARISON_OPERATORS[m.group('op')]
         actual_value = dct.get(m.group('key'))
-        if (m.group('quotedstrval') is not None or
-            m.group('strval') is not None or
+        if (m.group('quotedstrval') is not None
+            or m.group('strval') is not None
             # If the original field is a string and matching comparisonvalue is
             # a number we should respect the origin of the original field
             # and process comparison value as a string (see
             # https://github.com/ytdl-org/youtube-dl/issues/11082).
-            actual_value is not None and m.group('intval') is not None and
-                isinstance(actual_value, compat_str)):
+            or actual_value is not None and m.group('intval') is not None
+                and isinstance(actual_value, compat_str)):
             if m.group('op') not in ('=', '!='):
                 raise ValueError(
                     'Operator %s does not support string values!' % m.group('op'))
@@ -3973,9 +3973,9 @@ def write_xattr(path, key, value):
                     executable = 'xattr'
                     opts = ['-w', key, value]
 
-                cmd = ([encodeFilename(executable, True)] +
-                       [encodeArgument(o) for o in opts] +
-                       [encodeFilename(path, True)])
+                cmd = ([encodeFilename(executable, True)]
+                       + [encodeArgument(o) for o in opts]
+                       [encodeFilename(path, True)])
 
                 try:
                     p = subprocess.Popen(