Fix typos
authorJakub Wilk <jwilk@jwilk.net>
Sun, 10 Jan 2016 15:17:47 +0000 (16:17 +0100)
committerJaime Marquínez Ferrándiz <jaime.marquinez.ferrandiz@gmail.com>
Sun, 10 Jan 2016 16:24:28 +0000 (17:24 +0100)
Closes #8200.

16 files changed:
devscripts/gh-pages/update-copyright.py
test/test_write_annotations.py
youtube_dl/YoutubeDL.py
youtube_dl/extractor/common.py
youtube_dl/extractor/facebook.py
youtube_dl/extractor/generic.py
youtube_dl/extractor/ivi.py
youtube_dl/extractor/mdr.py
youtube_dl/extractor/nbc.py
youtube_dl/extractor/nhl.py
youtube_dl/extractor/ora.py
youtube_dl/extractor/testurl.py
youtube_dl/extractor/tv4.py
youtube_dl/extractor/videomore.py
youtube_dl/swfinterp.py
youtube_dl/utils.py

index 3663c8afef278f132518ed9c0286bdfe34d028a7..e6c3abc8d8c716db6adbb62598a9d9179fcaa2da 100755 (executable)
@@ -5,7 +5,7 @@ from __future__ import with_statement, unicode_literals
 
 import datetime
 import glob
 
 import datetime
 import glob
-import io  # For Python 2 compatibilty
+import io  # For Python 2 compatibility
 import os
 import re
 
 import os
 import re
 
index 84b8f39e00a45458e3c3a37308332b97068d6e27..8de08f2d6d3974bd2d28265c323e7ff76d1317a3 100644 (file)
@@ -66,7 +66,7 @@ class TestAnnotations(unittest.TestCase):
                 textTag = a.find('TEXT')
                 text = textTag.text
                 self.assertTrue(text in expected)  # assertIn only added in python 2.7
                 textTag = a.find('TEXT')
                 text = textTag.text
                 self.assertTrue(text in expected)  # assertIn only added in python 2.7
-                # remove the first occurance, there could be more than one annotation with the same text
+                # remove the first occurrence, there could be more than one annotation with the same text
                 expected.remove(text)
         # We should have seen (and removed) all the expected annotation texts.
         self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
                 expected.remove(text)
         # We should have seen (and removed) all the expected annotation texts.
         self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
index 3b2be3159115e907359557178be1238b061ca507..d50b7cfed3c537a02fd53a9dc46f4e0981b6608b 100755 (executable)
@@ -1312,7 +1312,7 @@ class YoutubeDL(object):
             # only set the 'formats' fields if the original info_dict list them
             # otherwise we end up with a circular reference, the first (and unique)
             # element in the 'formats' field in info_dict is info_dict itself,
             # only set the 'formats' fields if the original info_dict list them
             # otherwise we end up with a circular reference, the first (and unique)
             # element in the 'formats' field in info_dict is info_dict itself,
-            # wich can't be exported to json
+            # which can't be exported to json
             info_dict['formats'] = formats
         if self.params.get('listformats'):
             self.list_formats(info_dict)
             info_dict['formats'] = formats
         if self.params.get('listformats'):
             self.list_formats(info_dict)
index 0719c7bcd291dc66b03eb2c09080c54605de9885..b05b22a94b0bac346e776ef5c3d42305cf5f4163 100644 (file)
@@ -313,9 +313,9 @@ class InfoExtractor(object):
         except ExtractorError:
             raise
         except compat_http_client.IncompleteRead as e:
         except ExtractorError:
             raise
         except compat_http_client.IncompleteRead as e:
-            raise ExtractorError('A network error has occured.', cause=e, expected=True)
+            raise ExtractorError('A network error has occurred.', cause=e, expected=True)
         except (KeyError, StopIteration) as e:
         except (KeyError, StopIteration) as e:
-            raise ExtractorError('An extractor error has occured.', cause=e)
+            raise ExtractorError('An extractor error has occurred.', cause=e)
 
     def set_downloader(self, downloader):
         """Sets the downloader for this IE."""
 
     def set_downloader(self, downloader):
         """Sets the downloader for this IE."""
index 5e43f235965cb7b702987276d63f2ae43a75c501..ec699ba5405426b07a169b3a6cfdb3a0aeb355d0 100644 (file)
@@ -105,7 +105,7 @@ class FacebookIE(InfoExtractor):
                     login_results, 'login error', default=None, group='error')
                 if error:
                     raise ExtractorError('Unable to login: %s' % error, expected=True)
                     login_results, 'login error', default=None, group='error')
                 if error:
                     raise ExtractorError('Unable to login: %s' % error, expected=True)
-                self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
+                self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
                 return
 
             fb_dtsg = self._search_regex(
                 return
 
             fb_dtsg = self._search_regex(
@@ -126,7 +126,7 @@ class FacebookIE(InfoExtractor):
             check_response = self._download_webpage(check_req, None,
                                                     note='Confirming login')
             if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
             check_response = self._download_webpage(check_req, None,
                                                     note='Confirming login')
             if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
-                self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
+                self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
             self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
             return
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
             self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
             return
index d79e1adc9b1c8e958027e2c7999ad6ce9757f09f..b3f8efc80ba5f6ecaf99f528832f5ad4db8b11c3 100644 (file)
@@ -487,7 +487,7 @@ class GenericIE(InfoExtractor):
                 'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
             }
         },
                 'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
             }
         },
-        # Embeded Ustream video
+        # Embedded Ustream video
         {
             'url': 'http://www.american.edu/spa/pti/nsa-privacy-janus-2014.cfm',
             'md5': '27b99cdb639c9b12a79bca876a073417',
         {
             'url': 'http://www.american.edu/spa/pti/nsa-privacy-janus-2014.cfm',
             'md5': '27b99cdb639c9b12a79bca876a073417',
@@ -1644,7 +1644,7 @@ class GenericIE(InfoExtractor):
         if myvi_url:
             return self.url_result(myvi_url)
 
         if myvi_url:
             return self.url_result(myvi_url)
 
-        # Look for embeded soundcloud player
+        # Look for embedded soundcloud player
         mobj = re.search(
             r'<iframe\s+(?:[a-zA-Z0-9_-]+="[^"]+"\s+)*src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
             webpage)
         mobj = re.search(
             r'<iframe\s+(?:[a-zA-Z0-9_-]+="[^"]+"\s+)*src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
             webpage)
index d0f00cdea52a57fd0edbfa45ca0aeff03ab48e0f..472d72b4c34fa3305b6b2808be1e45c6da25a60e 100644 (file)
@@ -32,7 +32,7 @@ class IviIE(InfoExtractor):
             },
             'skip': 'Only works from Russia',
         },
             },
             'skip': 'Only works from Russia',
         },
-        # Serial's serie
+        # Serial's series
         {
             'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/9549',
             'md5': '221f56b35e3ed815fde2df71032f4b3e',
         {
             'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/9549',
             'md5': '221f56b35e3ed815fde2df71032f4b3e',
index 88334889e950915304bca35c8c42bf104c143307..425fc9e2a69b93879eb71a32eae2a042d97770cc 100644 (file)
@@ -17,7 +17,7 @@ class MDRIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z]+(?P<id>\d+)(?:_.+?)?\.html'
 
     _TESTS = [{
     _VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z]+(?P<id>\d+)(?:_.+?)?\.html'
 
     _TESTS = [{
-        # MDR regularily deletes its videos
+        # MDR regularly deletes its videos
         'url': 'http://www.mdr.de/fakt/video189002.html',
         'only_matching': True,
     }, {
         'url': 'http://www.mdr.de/fakt/video189002.html',
         'only_matching': True,
     }, {
index 340c922bda4b9be00bd145207eb5ad8790a3fddc..1dd54c2f113c51b1c7856f1885dfe4d3b6081c19 100644 (file)
@@ -100,7 +100,7 @@ class NBCSportsVPlayerIE(InfoExtractor):
 
 
 class NBCSportsIE(InfoExtractor):
 
 
 class NBCSportsIE(InfoExtractor):
-    # Does not include https becuase its certificate is invalid
+    # Does not include https because its certificate is invalid
     _VALID_URL = r'http://www\.nbcsports\.com//?(?:[^/]+/)+(?P<id>[0-9a-z-]+)'
 
     _TEST = {
     _VALID_URL = r'http://www\.nbcsports\.com//?(?:[^/]+/)+(?P<id>[0-9a-z-]+)'
 
     _TEST = {
index e98a5ef89d9bb034c72ffe590fda54fb67dc9b08..8d5ce46ad6bd2c5bdac2aff9998e8f278fca6376 100644 (file)
@@ -223,7 +223,7 @@ class NHLVideocenterIE(NHLBaseInfoExtractor):
         response = self._download_webpage(request_url, playlist_title)
         response = self._fix_json(response)
         if not response.strip():
         response = self._download_webpage(request_url, playlist_title)
         response = self._fix_json(response)
         if not response.strip():
-            self._downloader.report_warning('Got an empty reponse, trying '
+            self._downloader.report_warning('Got an empty response, trying '
                                             'adding the "newvideos" parameter')
             response = self._download_webpage(request_url + '&newvideos=true',
                                               playlist_title)
                                             'adding the "newvideos" parameter')
             response = self._download_webpage(request_url + '&newvideos=true',
                                               playlist_title)
index 9c4255a2d655d1dc6183ad207eb48af82d5fac8f..02de1502a42f40153e912536ac721221e0dbc00d 100644 (file)
@@ -37,7 +37,7 @@ class OraTVIE(InfoExtractor):
             formats = self._extract_m3u8_formats(
                 m3u8_url, display_id, 'mp4', 'm3u8_native',
                 m3u8_id='hls', fatal=False)
             formats = self._extract_m3u8_formats(
                 m3u8_url, display_id, 'mp4', 'm3u8_native',
                 m3u8_id='hls', fatal=False)
-            # simular to GameSpotIE
+            # similar to GameSpotIE
             m3u8_path = compat_urlparse.urlparse(m3u8_url).path
             QUALITIES_RE = r'((,[a-z]+\d+)+,?)'
             available_qualities = self._search_regex(
             m3u8_path = compat_urlparse.urlparse(m3u8_url).path
             QUALITIES_RE = r'((,[a-z]+\d+)+,?)'
             available_qualities = self._search_regex(
index c7d559315be7d2ceed094fab691e4141de5534e7..46918adb05fc77d45b480b138575afff9d69a086 100644 (file)
@@ -7,7 +7,7 @@ from ..utils import ExtractorError
 
 
 class TestURLIE(InfoExtractor):
 
 
 class TestURLIE(InfoExtractor):
-    """ Allows adressing of the test cases as test:yout.*be_1 """
+    """ Allows addressing of the test cases as test:yout.*be_1 """
 
     IE_DESC = False  # Do not list
     _VALID_URL = r'test(?:url)?:(?P<id>(?P<extractor>.+?)(?:_(?P<num>[0-9]+))?)$'
 
     IE_DESC = False  # Do not list
     _VALID_URL = r'test(?:url)?:(?P<id>(?P<extractor>.+?)(?:_(?P<num>[0-9]+))?)$'
index 1c4b6d6353292dddbf3d67a42a24667ec3bb81d7..343edf20663d172a4e071a77f228fc4d9962003d 100644 (file)
@@ -67,7 +67,7 @@ class TV4IE(InfoExtractor):
         info = self._download_json(
             'http://www.tv4play.se/player/assets/%s.json' % video_id, video_id, 'Downloading video info JSON')
 
         info = self._download_json(
             'http://www.tv4play.se/player/assets/%s.json' % video_id, video_id, 'Downloading video info JSON')
 
-        # If is_geo_restricted is true, it doesn't neceserally mean we can't download it
+        # If is_geo_restricted is true, it doesn't necessarily mean we can't download it
         if info['is_geo_restricted']:
             self.report_warning('This content might not be available in your country due to licensing restrictions.')
         if info['requires_subscription']:
         if info['is_geo_restricted']:
             self.report_warning('This content might not be available in your country due to licensing restrictions.')
         if info['requires_subscription']:
index a66d6de23aa0769e895a2c1842c4b6e7f26dd0fb..fcee940e6183c9fb1d461eeeb9d699d4c3a5a00a 100644 (file)
@@ -170,7 +170,7 @@ class VideomoreVideoIE(InfoExtractor):
             'skip_download': True,
         },
     }, {
             'skip_download': True,
         },
     }, {
-        # season single serie with og:video:iframe
+        # season single series with og:video:iframe
         'url': 'http://videomore.ru/poslednii_ment/1_sezon/14_seriya',
         'only_matching': True,
     }, {
         'url': 'http://videomore.ru/poslednii_ment/1_sezon/14_seriya',
         'only_matching': True,
     }, {
index e60505ace8b8451666f2aeebea3277bc58cb6297..06c1d6cc1755ef022aa78967d4b651e21fd66618 100644 (file)
@@ -689,7 +689,7 @@ class SWFInterpreter(object):
                     elif mname in _builtin_classes:
                         res = _builtin_classes[mname]
                     else:
                     elif mname in _builtin_classes:
                         res = _builtin_classes[mname]
                     else:
-                        # Assume unitialized
+                        # Assume uninitialized
                         # TODO warn here
                         res = undefined
                     stack.append(res)
                         # TODO warn here
                         res = undefined
                     stack.append(res)
index da4ec7f209a4d0dfbad84254ee51914c3fadc270..9c1c0e0bdf55040dc3b82d4393d50c3623e3b0de 100644 (file)
@@ -984,7 +984,7 @@ def date_from_str(date_str):
         if sign == '-':
             time = -time
         unit = match.group('unit')
         if sign == '-':
             time = -time
         unit = match.group('unit')
-        # A bad aproximation?
+        # A bad approximation?
         if unit == 'month':
             unit = 'day'
             time *= 30
         if unit == 'month':
             unit = 'day'
             time *= 30
@@ -1307,7 +1307,7 @@ def parse_filesize(s):
     if s is None:
         return None
 
     if s is None:
         return None
 
-    # The lower-case forms are of course incorrect and inofficial,
+    # The lower-case forms are of course incorrect and unofficial,
     # but we support those too
     _UNIT_TABLE = {
         'B': 1,
     # but we support those too
     _UNIT_TABLE = {
         'B': 1,