Merge branch 'ted_subtitles'
authorIsmaël Mejía <iemejia@gmail.com>
Sat, 2 Nov 2013 18:50:45 +0000 (19:50 +0100)
committerIsmaël Mejía <iemejia@gmail.com>
Sat, 2 Nov 2013 18:50:45 +0000 (19:50 +0100)
67 files changed:
Makefile
README.md
devscripts/bash-completion.in
devscripts/check-porn.py [new file with mode: 0644]
devscripts/release.sh
setup.py
test/helper.py
test/test_YoutubeDL.py [new file with mode: 0644]
test/test_age_restriction.py
test/test_all_urls.py
test/test_dailymotion_subtitles.py
test/test_download.py
test/test_playlists.py
test/test_utils.py
test/test_write_annotations.py
test/test_write_info_json.py
test/test_youtube_lists.py
test/test_youtube_signature.py
test/test_youtube_subtitles.py
youtube_dl/PostProcessor.py
youtube_dl/YoutubeDL.py
youtube_dl/__init__.py
youtube_dl/extractor/__init__.py
youtube_dl/extractor/addanime.py
youtube_dl/extractor/arte.py
youtube_dl/extractor/brightcove.py
youtube_dl/extractor/cinemassacre.py
youtube_dl/extractor/common.py
youtube_dl/extractor/dailymotion.py
youtube_dl/extractor/eighttracks.py
youtube_dl/extractor/exfm.py
youtube_dl/extractor/facebook.py
youtube_dl/extractor/faz.py
youtube_dl/extractor/generic.py
youtube_dl/extractor/googleplus.py
youtube_dl/extractor/instagram.py
youtube_dl/extractor/internetvideoarchive.py
youtube_dl/extractor/keezmovies.py [new file with mode: 0644]
youtube_dl/extractor/livestream.py
youtube_dl/extractor/metacafe.py
youtube_dl/extractor/mtv.py
youtube_dl/extractor/myspace.py [new file with mode: 0644]
youtube_dl/extractor/nhl.py
youtube_dl/extractor/nowvideo.py
youtube_dl/extractor/pornhub.py [new file with mode: 0644]
youtube_dl/extractor/pornotube.py
youtube_dl/extractor/redtube.py
youtube_dl/extractor/rtlnow.py
youtube_dl/extractor/rutube.py [new file with mode: 0644]
youtube_dl/extractor/spankwire.py [new file with mode: 0644]
youtube_dl/extractor/sztvhu.py [new file with mode: 0644]
youtube_dl/extractor/techtalks.py [new file with mode: 0644]
youtube_dl/extractor/tube8.py [new file with mode: 0644]
youtube_dl/extractor/tudou.py
youtube_dl/extractor/vevo.py
youtube_dl/extractor/videodetective.py
youtube_dl/extractor/vimeo.py
youtube_dl/extractor/vk.py [new file with mode: 0644]
youtube_dl/extractor/websurg.py [new file with mode: 0644]
youtube_dl/extractor/xhamster.py
youtube_dl/extractor/xnxx.py
youtube_dl/extractor/xvideos.py
youtube_dl/extractor/youjizz.py
youtube_dl/extractor/youporn.py
youtube_dl/extractor/youtube.py
youtube_dl/utils.py
youtube_dl/version.py

index 85dacfa4c31f2b83860891d6339b8b4a0e48c6b7..c6d09932bcd4f45b8910e828255703403c2df0d7 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -13,13 +13,13 @@ PYTHON=/usr/bin/env python
 
 # set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
 ifeq ($(PREFIX),/usr)
-    SYSCONFDIR=/etc
+       SYSCONFDIR=/etc
 else
-    ifeq ($(PREFIX),/usr/local)
-        SYSCONFDIR=/etc
-    else
-        SYSCONFDIR=$(PREFIX)/etc
-    endif
+       ifeq ($(PREFIX),/usr/local)
+               SYSCONFDIR=/etc
+       else
+               SYSCONFDIR=$(PREFIX)/etc
+       endif
 endif
 
 install: youtube-dl youtube-dl.1 youtube-dl.bash-completion
@@ -71,6 +71,7 @@ youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-
                --exclude '*~' \
                --exclude '__pycache' \
                --exclude '.git' \
+               --exclude 'testdata' \
                -- \
                bin devscripts test youtube_dl \
                CHANGELOG LICENSE README.md README.txt \
index 8824daee2cba4437c44db2576384527c55a3c3f1..a2b29661379647447c856173c60ab65a3c65d3ad 100644 (file)
--- a/README.md
+++ b/README.md
@@ -21,6 +21,8 @@ which means you can modify it, redistribute it or use it however you like.
                                sudo if needed)
     -i, --ignore-errors        continue on download errors, for example to to
                                skip unavailable videos in a playlist
+    --abort-on-error           Abort downloading of further videos (in the
+                               playlist or the command line) if an error occurs
     --dump-user-agent          display the current browser identification
     --user-agent UA            specify a custom user agent
     --referer REF              specify a custom referer, use if the video access
@@ -30,7 +32,7 @@ which means you can modify it, redistribute it or use it however you like.
     --extractor-descriptions   Output descriptions of all supported extractors
     --proxy URL                Use the specified HTTP/HTTPS proxy
     --no-check-certificate     Suppress HTTPS certificate validation.
-    --cache-dir None           Location in the filesystem where youtube-dl can
+    --cache-dir DIR            Location in the filesystem where youtube-dl can
                                store downloaded information permanently. By
                                default $XDG_CACHE_HOME/youtube-dl or ~/.cache
                                /youtube-dl .
@@ -57,9 +59,10 @@ which means you can modify it, redistribute it or use it however you like.
                                file. Record all downloaded videos in it.
 
 ## Download Options:
-    -r, --rate-limit LIMIT     maximum download rate (e.g. 50k or 44.6m)
+    -r, --rate-limit LIMIT     maximum download rate in bytes per second (e.g.
+                               50K or 4.2M)
     -R, --retries RETRIES      number of retries (default is 10)
-    --buffer-size SIZE         size of download buffer (e.g. 1024 or 16k)
+    --buffer-size SIZE         size of download buffer (e.g. 1024 or 16K)
                                (default is 1024)
     --no-resize-buffer         do not automatically adjust the buffer size. By
                                default, the buffer size is automatically resized
@@ -75,7 +78,10 @@ which means you can modify it, redistribute it or use it however you like.
                                %(uploader_id)s for the uploader nickname if
                                different, %(autonumber)s to get an automatically
                                incremented number, %(ext)s for the filename
-                               extension, %(upload_date)s for the upload date
+                               extension, %(format)s for the format description
+                               (like "22 - 1280x720" or "HD"),%(format_id)s for
+                               the unique id of the format (like Youtube's
+                               itags: "137"),%(upload_date)s for the upload date
                                (YYYYMMDD), %(extractor)s for the provider
                                (youtube, metacafe, etc), %(id)s for the video id
                                , %(playlist)s for the playlist the video is in,
@@ -100,6 +106,7 @@ which means you can modify it, redistribute it or use it however you like.
                                file modification time
     --write-description        write video description to a .description file
     --write-info-json          write video metadata to a .info.json file
+    --write-annotations        write video annotations to a .annotation file
     --write-thumbnail          write thumbnail image to disk
 
 ## Verbosity / Simulation Options:
@@ -120,6 +127,8 @@ which means you can modify it, redistribute it or use it however you like.
     -v, --verbose              print various debugging information
     --dump-intermediate-pages  print downloaded pages to debug problems(very
                                verbose)
+    --write-pages              Write downloaded pages to files in the current
+                               directory
 
 ## Video Format Options:
     -f, --format FORMAT        video format code, specifiy the order of
@@ -166,6 +175,7 @@ which means you can modify it, redistribute it or use it however you like.
                                processed files are overwritten by default
     --embed-subs               embed subtitles in the video (only for mp4
                                videos)
+    --add-metadata             add metadata to the files
 
 # CONFIGURATION
 
index bd10f63c2f2615168a482085ad156a9f4e330ddd..ce893fcbe1a681e535452c35f5b833eea54b2d95 100644 (file)
@@ -1,4 +1,4 @@
-__youtube-dl()
+__youtube_dl()
 {
     local cur prev opts
     COMPREPLY=()
@@ -15,4 +15,4 @@ __youtube-dl()
     fi
 }
 
-complete -F __youtube-dl youtube-dl
+complete -F __youtube_dl youtube-dl
diff --git a/devscripts/check-porn.py b/devscripts/check-porn.py
new file mode 100644 (file)
index 0000000..63401fe
--- /dev/null
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+
+"""
+This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check
+if we are not 'age_limit' tagging some porn site
+"""
+
+# Allow direct execution
+import os
+import sys
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from test.helper import get_testcases
+from youtube_dl.utils import compat_urllib_request
+
+for test in get_testcases():
+    try:
+        webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read()
+    except:
+        print('\nFail: {0}'.format(test['name']))
+        continue
+
+    webpage = webpage.decode('utf8', 'replace')
+
+    if 'porn' in webpage.lower() and ('info_dict' not in test
+                                      or 'age_limit' not in test['info_dict']
+                                      or test['info_dict']['age_limit'] != 18):
+        print('\nPotential missing age_limit check: {0}'.format(test['name']))
+
+    elif 'porn' not in webpage.lower() and ('info_dict' in test and
+                                            'age_limit' in test['info_dict'] and
+                                            test['info_dict']['age_limit'] == 18):
+        print('\nPotential false negative: {0}'.format(test['name']))
+
+    else:
+        sys.stdout.write('.')
+    sys.stdout.flush()
+
+print()
index 796468b4b3aee3e603ddb919535bfde281cd71e5..2766174c1a8477519eb818f287897ebc93d04a72 100755 (executable)
@@ -88,10 +88,6 @@ ROOT=$(pwd)
     "$ROOT/devscripts/gh-pages/update-sites.py"
     git add *.html *.html.in update
     git commit -m "release $version"
-    git show HEAD
-    read -p "Is it good, can I push? (y/n) " -n 1
-    if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi
-    echo
     git push "$ROOT" gh-pages
     git push "$ORIGIN_URL" gh-pages
 )
index 3b6dc2d40f0f551630dac1007aaf72e0af819724..aa7cfca0862b1f4ba2cfd220fd570ca63bcfda7e 100644 (file)
--- a/setup.py
+++ b/setup.py
@@ -8,8 +8,10 @@ import sys
 
 try:
     from setuptools import setup
+    setuptools_available = True
 except ImportError:
     from distutils.core import setup
+    setuptools_available = False
 
 try:
     # This will create an exe that needs Microsoft Visual C++ 2008
@@ -43,13 +45,16 @@ if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
     params = py2exe_params
 else:
     params = {
-        'scripts': ['bin/youtube-dl'],
         'data_files': [  # Installing system-wide would require sudo...
             ('etc/bash_completion.d', ['youtube-dl.bash-completion']),
             ('share/doc/youtube_dl', ['README.txt']),
             ('share/man/man1/', ['youtube-dl.1'])
         ]
     }
+    if setuptools_available:
+        params['entry_points'] = {'console_scripts': ['youtube-dl = youtube_dl:main']}
+    else:
+        params['scripts'] = ['bin/youtube-dl']
 
 # Get the version from youtube_dl/version.py without importing the package
 exec(compile(open('youtube_dl/version.py').read(),
@@ -63,6 +68,7 @@ setup(
     ' YouTube.com and other video sites.',
     url='https://github.com/rg3/youtube-dl',
     author='Ricardo Garcia',
+    author_email='ytdl@yt-dl.org',
     maintainer='Philipp Hagemeister',
     maintainer_email='phihag@phihag.de',
     packages=['youtube_dl', 'youtube_dl.extractor'],
index ad1b74dd30c140b01d8c9a4c51513cb2e3b8997f..d7bf7a82802e58f0a80d788de83146d3a9d3fadf 100644 (file)
@@ -1,22 +1,29 @@
 import errno
 import io
+import hashlib
 import json
 import os.path
 import re
 import types
+import sys
 
 import youtube_dl.extractor
-from youtube_dl import YoutubeDL, YoutubeDLHandler
-from youtube_dl.utils import (
-    compat_cookiejar,
-    compat_urllib_request,
-)
+from youtube_dl import YoutubeDL
+from youtube_dl.utils import preferredencoding
 
-youtube_dl._setup_opener(timeout=10)
 
-PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
-with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
-    parameters = json.load(pf)
+def global_setup():
+    youtube_dl._setup_opener(timeout=10)
+
+
+def get_params(override=None):
+    PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
+                                   "parameters.json")
+    with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
+        parameters = json.load(pf)
+    if override:
+        parameters.update(override)
+    return parameters
 
 
 def try_rm(filename):
@@ -28,11 +35,26 @@ def try_rm(filename):
             raise
 
 
+def report_warning(message):
+    '''
+    Print the message to stderr, it will be prefixed with 'WARNING:'
+    If stderr is a tty file the 'WARNING:' will be colored
+    '''
+    if sys.stderr.isatty() and os.name != 'nt':
+        _msg_header = u'\033[0;33mWARNING:\033[0m'
+    else:
+        _msg_header = u'WARNING:'
+    output = u'%s %s\n' % (_msg_header, message)
+    if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
+        output = output.encode(preferredencoding())
+    sys.stderr.write(output)
+
+
 class FakeYDL(YoutubeDL):
-    def __init__(self):
+    def __init__(self, override=None):
         # Different instances of the downloader can't share the same dictionary
         # some test set the "sublang" parameter, which would break the md5 checks.
-        params = dict(parameters)
+        params = get_params(override=override)
         super(FakeYDL, self).__init__(params)
         self.result = []
         
@@ -62,3 +84,6 @@ def get_testcases():
         for t in getattr(ie, '_TESTS', []):
             t['name'] = type(ie).__name__[:-len('IE')]
             yield t
+
+
+md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
diff --git a/test/test_YoutubeDL.py b/test/test_YoutubeDL.py
new file mode 100644 (file)
index 0000000..ffebb4a
--- /dev/null
@@ -0,0 +1,133 @@
+#!/usr/bin/env python
+
+# Allow direct execution
+import os
+import sys
+import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from test.helper import FakeYDL
+
+
+class YDL(FakeYDL):
+    def __init__(self, *args, **kwargs):
+        super(YDL, self).__init__(*args, **kwargs)
+        self.downloaded_info_dicts = []
+        self.msgs = []
+
+    def process_info(self, info_dict):
+        self.downloaded_info_dicts.append(info_dict)
+
+    def to_screen(self, msg):
+        self.msgs.append(msg)
+
+
+class TestFormatSelection(unittest.TestCase):
+    def test_prefer_free_formats(self):
+        # Same resolution => download webm
+        ydl = YDL()
+        ydl.params['prefer_free_formats'] = True
+        formats = [
+            {u'ext': u'webm', u'height': 460},
+            {u'ext': u'mp4',  u'height': 460},
+        ]
+        info_dict = {u'formats': formats, u'extractor': u'test'}
+        ydl.process_ie_result(info_dict)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(downloaded[u'ext'], u'webm')
+
+        # Different resolution => download best quality (mp4)
+        ydl = YDL()
+        ydl.params['prefer_free_formats'] = True
+        formats = [
+            {u'ext': u'webm', u'height': 720},
+            {u'ext': u'mp4', u'height': 1080},
+        ]
+        info_dict[u'formats'] = formats
+        ydl.process_ie_result(info_dict)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(downloaded[u'ext'], u'mp4')
+
+        # No prefer_free_formats => keep original formats order
+        ydl = YDL()
+        ydl.params['prefer_free_formats'] = False
+        formats = [
+            {u'ext': u'webm', u'height': 720},
+            {u'ext': u'flv', u'height': 720},
+        ]
+        info_dict[u'formats'] = formats
+        ydl.process_ie_result(info_dict)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(downloaded[u'ext'], u'flv')
+
+    def test_format_limit(self):
+        formats = [
+            {u'format_id': u'meh', u'url': u'http://example.com/meh'},
+            {u'format_id': u'good', u'url': u'http://example.com/good'},
+            {u'format_id': u'great', u'url': u'http://example.com/great'},
+            {u'format_id': u'excellent', u'url': u'http://example.com/exc'},
+        ]
+        info_dict = {
+            u'formats': formats, u'extractor': u'test', 'id': 'testvid'}
+
+        ydl = YDL()
+        ydl.process_ie_result(info_dict)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(downloaded[u'format_id'], u'excellent')
+
+        ydl = YDL({'format_limit': 'good'})
+        assert ydl.params['format_limit'] == 'good'
+        ydl.process_ie_result(info_dict)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(downloaded[u'format_id'], u'good')
+
+        ydl = YDL({'format_limit': 'great', 'format': 'all'})
+        ydl.process_ie_result(info_dict)
+        self.assertEqual(ydl.downloaded_info_dicts[0][u'format_id'], u'meh')
+        self.assertEqual(ydl.downloaded_info_dicts[1][u'format_id'], u'good')
+        self.assertEqual(ydl.downloaded_info_dicts[2][u'format_id'], u'great')
+        self.assertTrue('3' in ydl.msgs[0])
+
+        ydl = YDL()
+        ydl.params['format_limit'] = 'excellent'
+        ydl.process_ie_result(info_dict)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(downloaded[u'format_id'], u'excellent')
+
+    def test_format_selection(self):
+        formats = [
+            {u'format_id': u'35', u'ext': u'mp4'},
+            {u'format_id': u'45', u'ext': u'webm'},
+            {u'format_id': u'47', u'ext': u'webm'},
+            {u'format_id': u'2', u'ext': u'flv'},
+        ]
+        info_dict = {u'formats': formats, u'extractor': u'test'}
+
+        ydl = YDL({'format': u'20/47'})
+        ydl.process_ie_result(info_dict)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(downloaded['format_id'], u'47')
+
+        ydl = YDL({'format': u'20/71/worst'})
+        ydl.process_ie_result(info_dict)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(downloaded['format_id'], u'35')
+
+        ydl = YDL()
+        ydl.process_ie_result(info_dict)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(downloaded['format_id'], u'2')
+
+        ydl = YDL({'format': u'webm/mp4'})
+        ydl.process_ie_result(info_dict)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(downloaded['format_id'], u'47')
+
+        ydl = YDL({'format': u'3gp/40/mp4'})
+        ydl.process_ie_result(info_dict)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(downloaded['format_id'], u'35')
+
+
+if __name__ == '__main__':
+    unittest.main()
index ec3e30572fa6c15e7a3c24839e40f409428f41b2..d500c6edceb6018510b9226d925d9f407b72fcbd 100644 (file)
@@ -1,14 +1,16 @@
 #!/usr/bin/env python
 
+# Allow direct execution
+import os
 import sys
 import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from test.helper import global_setup, try_rm
+global_setup()
 
-# Allow direct execution
-import os
-sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 from youtube_dl import YoutubeDL
-from .helper import try_rm
 
 
 def _download_restricted(url, filename, age):
index b28ad000bc8d7a1f172ee12bd218e0b96ad08c2b..56e5f80e1f6ddb17fef3ee5c499c238996c12051 100644 (file)
@@ -1,14 +1,20 @@
 #!/usr/bin/env python
 
+# Allow direct execution
+import os
 import sys
 import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
-# Allow direct execution
-import os
-sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
-from youtube_dl.extractor import YoutubeIE, YoutubePlaylistIE, YoutubeChannelIE, JustinTVIE, gen_extractors
-from .helper import get_testcases
+from test.helper import get_testcases
+
+from youtube_dl.extractor import (
+    gen_extractors,
+    JustinTVIE,
+    YoutubeIE,
+)
+
 
 class TestAllURLsMatching(unittest.TestCase):
     def setUp(self):
index e655d280deb0640e32f71a3d76abac16e7d7f68e..ba3580ea419d513d8e92c97f813c236ce9d596e0 100644 (file)
@@ -1,18 +1,16 @@
 #!/usr/bin/env python
 
+# Allow direct execution
+import os
 import sys
 import unittest
-import hashlib
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
-# Allow direct execution
-import os
-sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+from test.helper import FakeYDL, global_setup, md5
+global_setup()
 
-from youtube_dl.extractor import DailymotionIE
-from youtube_dl.utils import *
-from .helper import FakeYDL
 
-md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
+from youtube_dl.extractor import DailymotionIE
 
 class TestDailymotionSubtitles(unittest.TestCase):
     def setUp(self):
@@ -24,7 +22,7 @@ class TestDailymotionSubtitles(unittest.TestCase):
         return info_dict
     def getSubtitles(self):
         info_dict = self.getInfoDict()
-        return info_dict[0]['subtitles']
+        return info_dict['subtitles']
     def test_no_writesubtitles(self):
         subtitles = self.getSubtitles()
         self.assertEqual(subtitles, None)
index 68da4d98450e12a3bae790e43e62f5d8dc9b7909..dfb04d010a0814037a1da4aac52ff1f65c2b1ab6 100644 (file)
@@ -1,26 +1,39 @@
 #!/usr/bin/env python
 
+# Allow direct execution
+import os
+import sys
+import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from test.helper import (
+    get_params,
+    get_testcases,
+    global_setup,
+    try_rm,
+    md5,
+    report_warning
+)
+global_setup()
+
+
 import hashlib
 import io
-import os
 import json
-import unittest
-import sys
 import socket
-import binascii
-
-# Allow direct execution
-sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 import youtube_dl.YoutubeDL
-from youtube_dl.utils import *
-
-PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
+from youtube_dl.utils import (
+    compat_str,
+    compat_urllib_error,
+    compat_HTTPError,
+    DownloadError,
+    ExtractorError,
+    UnavailableVideoError,
+)
 
 RETRIES = 3
 
-md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
-
 class YoutubeDL(youtube_dl.YoutubeDL):
     def __init__(self, *args, **kwargs):
         self.to_stderr = self.to_screen
@@ -37,18 +50,12 @@ def _file_md5(fn):
     with open(fn, 'rb') as f:
         return hashlib.md5(f.read()).hexdigest()
 
-import test.helper as helper  # Set up remaining global configuration
-from .helper import get_testcases, try_rm
 defs = get_testcases()
 
-with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
-    parameters = json.load(pf)
-
 
 class TestDownload(unittest.TestCase):
     maxDiff = None
     def setUp(self):
-        self.parameters = parameters
         self.defs = defs
 
 ### Dynamically generate tests
@@ -61,15 +68,17 @@ def generator(test_case):
         if not ie._WORKING:
             print_skipping('IE marked as not _WORKING')
             return
-        if 'playlist' not in test_case and not test_case['file']:
-            print_skipping('No output file specified')
-            return
+        if 'playlist' not in test_case:
+            info_dict = test_case.get('info_dict', {})
+            if not test_case.get('file') and not (info_dict.get('id') and info_dict.get('ext')):
+                print_skipping('The output file cannot be know, the "file" '
+                    'key is missing or the info_dict is incomplete')
+                return
         if 'skip' in test_case:
             print_skipping(test_case['skip'])
             return
 
-        params = self.parameters.copy()
-        params.update(test_case.get('params', {}))
+        params = get_params(test_case.get('params', {}))
 
         ydl = YoutubeDL(params)
         ydl.add_default_info_extractors()
@@ -79,35 +88,47 @@ def generator(test_case):
                 finished_hook_called.add(status['filename'])
         ydl.fd.add_progress_hook(_hook)
 
+        def get_tc_filename(tc):
+            return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
+
         test_cases = test_case.get('playlist', [test_case])
-        for tc in test_cases:
-            try_rm(tc['file'])
-            try_rm(tc['file'] + '.part')
-            try_rm(tc['file'] + '.info.json')
+        def try_rm_tcs_files():
+            for tc in test_cases:
+                tc_filename = get_tc_filename(tc)
+                try_rm(tc_filename)
+                try_rm(tc_filename + '.part')
+                try_rm(tc_filename + '.info.json')
+        try_rm_tcs_files()
         try:
-            for retry in range(1, RETRIES + 1):
+            try_num = 1
+            while True:
                 try:
                     ydl.download([test_case['url']])
                 except (DownloadError, ExtractorError) as err:
-                    if retry == RETRIES: raise
-
                     # Check if the exception is not a network related one
-                    if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
+                    if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
                         raise
 
-                    print('Retrying: {0} failed tries\n\n##########\n\n'.format(retry))
+                    if try_num == RETRIES:
+                        report_warning(u'Failed due to network errors, skipping...')
+                        return
+
+                    print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num))
+
+                    try_num += 1
                 else:
                     break
 
             for tc in test_cases:
+                tc_filename = get_tc_filename(tc)
                 if not test_case.get('params', {}).get('skip_download', False):
-                    self.assertTrue(os.path.exists(tc['file']), msg='Missing file ' + tc['file'])
-                    self.assertTrue(tc['file'] in finished_hook_called)
-                self.assertTrue(os.path.exists(tc['file'] + '.info.json'))
+                    self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename)
+                    self.assertTrue(tc_filename in finished_hook_called)
+                self.assertTrue(os.path.exists(tc_filename + '.info.json'))
                 if 'md5' in tc:
-                    md5_for_file = _file_md5(tc['file'])
+                    md5_for_file = _file_md5(tc_filename)
                     self.assertEqual(md5_for_file, tc['md5'])
-                with io.open(tc['file'] + '.info.json', encoding='utf-8') as infof:
+                with io.open(tc_filename + '.info.json', encoding='utf-8') as infof:
                     info_dict = json.load(infof)
                 for (info_field, expected) in tc.get('info_dict', {}).items():
                     if isinstance(expected, compat_str) and expected.startswith('md5:'):
@@ -128,10 +149,7 @@ def generator(test_case):
                 for key in ('id', 'url', 'title', 'ext'):
                     self.assertTrue(key in info_dict.keys() and info_dict[key])
         finally:
-            for tc in test_cases:
-                try_rm(tc['file'])
-                try_rm(tc['file'] + '.part')
-                try_rm(tc['file'] + '.info.json')
+            try_rm_tcs_files()
 
     return test_template
 
index 108a4d63bc60e8bc5a20335798ae43ed510eea1f..d6a8d56df99609e50ea5885d2f5a3eb48b72cf37 100644 (file)
@@ -1,13 +1,16 @@
 #!/usr/bin/env python
 # encoding: utf-8
 
-import sys
-import unittest
-import json
 
 # Allow direct execution
 import os
-sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+import sys
+import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from test.helper import FakeYDL, global_setup
+global_setup()
+
 
 from youtube_dl.extractor import (
     DailymotionPlaylistIE,
@@ -18,9 +21,7 @@ from youtube_dl.extractor import (
     LivestreamIE,
     NHLVideocenterIE,
 )
-from youtube_dl.utils import *
 
-from .helper import FakeYDL
 
 class TestPlaylists(unittest.TestCase):
     def assertIsPlaylist(self, info):
index f2c03d42149b3cacac4af11ea16df6cd45c6e4d0..f3fbff042ccc8193d8d08527fdc04421c9832305 100644 (file)
@@ -1,14 +1,15 @@
 #!/usr/bin/env python
+# coding: utf-8
 
-# Various small unit tests
-
+# Allow direct execution
+import os
 import sys
 import unittest
-import xml.etree.ElementTree
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
-# Allow direct execution
-import os
-sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+# Various small unit tests
+import xml.etree.ElementTree
 
 #from youtube_dl.utils import htmlentity_transform
 from youtube_dl.utils import (
@@ -21,6 +22,8 @@ from youtube_dl.utils import (
     find_xpath_attr,
     get_meta_content,
     xpath_with_ns,
+    smuggle_url,
+    unsmuggle_url,
 )
 
 if sys.version_info < (3, 0):
@@ -155,5 +158,18 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(find('media:song/media:author').text, u'The Author')
         self.assertEqual(find('media:song/url').text, u'http://server.com/download.mp3')
 
+    def test_smuggle_url(self):
+        data = {u"ö": u"ö", u"abc": [3]}
+        url = 'https://foo.bar/baz?x=y#a'
+        smug_url = smuggle_url(url, data)
+        unsmug_url, unsmug_data = unsmuggle_url(smug_url)
+        self.assertEqual(url, unsmug_url)
+        self.assertEqual(data, unsmug_data)
+
+        res_url, res_data = unsmuggle_url(url)
+        self.assertEqual(res_url, url)
+        self.assertEqual(res_data, None)
+
+
 if __name__ == '__main__':
     unittest.main()
index ba7a9f50a861fcf54c505a3440cdba7b622554ba..35defb8953402a74ff71b7a9a14cec105a5f1703 100644 (file)
@@ -1,39 +1,37 @@
 #!/usr/bin/env python
 # coding: utf-8
 
-import xml.etree.ElementTree
+# Allow direct execution
 import os
 import sys
 import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
-# Allow direct execution
-sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+from test.helper import get_params, global_setup, try_rm
+global_setup()
+
+
+import io
+
+import xml.etree.ElementTree
 
 import youtube_dl.YoutubeDL
 import youtube_dl.extractor
-from youtube_dl.utils import *
-from .helper import try_rm
-
-PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
 
-# General configuration (from __init__, not very elegant...)
-jar = compat_cookiejar.CookieJar()
-cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
-proxy_handler = compat_urllib_request.ProxyHandler()
-opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
-compat_urllib_request.install_opener(opener)
 
 class YoutubeDL(youtube_dl.YoutubeDL):
     def __init__(self, *args, **kwargs):
         super(YoutubeDL, self).__init__(*args, **kwargs)
         self.to_stderr = self.to_screen
 
-with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
-    params = json.load(pf)
-params['writeannotations'] = True
-params['skip_download'] = True
-params['writeinfojson'] = False
-params['format'] = 'flv'
+params = get_params({
+    'writeannotations': True,
+    'skip_download': True,
+    'writeinfojson': False,
+    'format': 'flv',
+})
+
+
 
 TEST_ID = 'gr51aVj-mLg'
 ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
index de6d5180fc0a88a66b747548352fdf72775d7c00..a5b6f6972df48f6b7cdcfebc3ea32d11c6a27afa 100644 (file)
@@ -1,37 +1,34 @@
 #!/usr/bin/env python
 # coding: utf-8
 
-import json
+# Allow direct execution
 import os
 import sys
 import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
-# Allow direct execution
-sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+from test.helper import get_params, global_setup
+global_setup()
+
+
+import io
+import json
 
 import youtube_dl.YoutubeDL
 import youtube_dl.extractor
-from youtube_dl.utils import *
-
-PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
 
-# General configuration (from __init__, not very elegant...)
-jar = compat_cookiejar.CookieJar()
-cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
-proxy_handler = compat_urllib_request.ProxyHandler()
-opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
-compat_urllib_request.install_opener(opener)
 
 class YoutubeDL(youtube_dl.YoutubeDL):
     def __init__(self, *args, **kwargs):
         super(YoutubeDL, self).__init__(*args, **kwargs)
         self.to_stderr = self.to_screen
 
-with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
-    params = json.load(pf)
-params['writeinfojson'] = True
-params['skip_download'] = True
-params['writedescription'] = True
+params = get_params({
+    'writeinfojson': True,
+    'skip_download': True,
+    'writedescription': True,
+})
+
 
 TEST_ID = 'BaW_jenozKc'
 INFO_JSON_FILE = TEST_ID + '.mp4.info.json'
@@ -42,6 +39,7 @@ This is a test video for youtube-dl.
 
 For more information, contact phihag@phihag.de .'''
 
+
 class TestInfoJSON(unittest.TestCase):
     def setUp(self):
         # Clear old files
index 0b5c790301b6c7c1a87d43c5ae7789de545e8efa..4b7a7847bd3a33a9a2bff3e99f9f4cff0de7eebf 100644 (file)
@@ -1,20 +1,26 @@
 #!/usr/bin/env python
 
+# Allow direct execution
+import os
 import sys
 import unittest
-import json
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from test.helper import FakeYDL, global_setup
+global_setup()
 
-# Allow direct execution
-import os
-sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
-from youtube_dl.extractor import YoutubeUserIE, YoutubePlaylistIE, YoutubeIE, YoutubeChannelIE, YoutubeShowIE
-from youtube_dl.utils import *
+from youtube_dl.extractor import (
+    YoutubeUserIE,
+    YoutubePlaylistIE,
+    YoutubeIE,
+    YoutubeChannelIE,
+    YoutubeShowIE,
+)
 
-from .helper import FakeYDL
 
 class TestYoutubeLists(unittest.TestCase):
-    def assertIsPlaylist(self,info):
+    def assertIsPlaylist(self, info):
         """Make sure the info has '_type' set to 'playlist'"""
         self.assertEqual(info['_type'], 'playlist')
 
@@ -100,7 +106,7 @@ class TestYoutubeLists(unittest.TestCase):
         dl = FakeYDL()
         ie = YoutubeShowIE(dl)
         result = ie.extract('http://www.youtube.com/show/airdisasters')
-        self.assertTrue(len(result) >= 4)
+        self.assertTrue(len(result) >= 3)
 
 if __name__ == '__main__':
     unittest.main()
index 5007d9a16305f055d38231cf2626dedcbd0c70ee..5e1ff5eb0ede5bcb020cd027ca00d5b4159f9812 100644 (file)
@@ -1,14 +1,18 @@
 #!/usr/bin/env python
 
-import io
-import re
-import string
+# Allow direct execution
+import os
 import sys
 import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
-# Allow direct execution
-import os
-sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+from test.helper import global_setup
+global_setup()
+
+
+import io
+import re
+import string
 
 from youtube_dl.extractor import YoutubeIE
 from youtube_dl.utils import compat_str, compat_urlretrieve
index 07850385e0d33bc17f5de3b68698374c838bf6fc..00430a338af7edfcdc7ea5f0380b888e86563ec4 100644 (file)
@@ -1,69 +1,79 @@
 #!/usr/bin/env python
 
+# Allow direct execution
+import os
 import sys
 import unittest
-import hashlib
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from test.helper import FakeYDL, global_setup, md5
+global_setup()
 
-# Allow direct execution
-import os
-sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 from youtube_dl.extractor import YoutubeIE
-from youtube_dl.utils import *
-from .helper import FakeYDL
 
-md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
 
 class TestYoutubeSubtitles(unittest.TestCase):
     def setUp(self):
         self.DL = FakeYDL()
         self.url = 'QRS8MkLhQmM'
+
     def getInfoDict(self):
         IE = YoutubeIE(self.DL)
         info_dict = IE.extract(self.url)
         return info_dict
+
     def getSubtitles(self):
         info_dict = self.getInfoDict()
-        return info_dict[0]['subtitles']        
+        return info_dict[0]['subtitles']
+
     def test_youtube_no_writesubtitles(self):
         self.DL.params['writesubtitles'] = False
         subtitles = self.getSubtitles()
         self.assertEqual(subtitles, None)
+
     def test_youtube_subtitles(self):
         self.DL.params['writesubtitles'] = True
         subtitles = self.getSubtitles()
         self.assertEqual(md5(subtitles['en']), '4cd9278a35ba2305f47354ee13472260')
+
     def test_youtube_subtitles_lang(self):
         self.DL.params['writesubtitles'] = True
         self.DL.params['subtitleslangs'] = ['it']
         subtitles = self.getSubtitles()
         self.assertEqual(md5(subtitles['it']), '164a51f16f260476a05b50fe4c2f161d')
+
     def test_youtube_allsubtitles(self):
         self.DL.params['writesubtitles'] = True
         self.DL.params['allsubtitles'] = True
         subtitles = self.getSubtitles()
         self.assertEqual(len(subtitles.keys()), 13)
+
     def test_youtube_subtitles_sbv_format(self):
         self.DL.params['writesubtitles'] = True
         self.DL.params['subtitlesformat'] = 'sbv'
         subtitles = self.getSubtitles()
         self.assertEqual(md5(subtitles['en']), '13aeaa0c245a8bed9a451cb643e3ad8b')
+
     def test_youtube_subtitles_vtt_format(self):
         self.DL.params['writesubtitles'] = True
         self.DL.params['subtitlesformat'] = 'vtt'
         subtitles = self.getSubtitles()
         self.assertEqual(md5(subtitles['en']), '356cdc577fde0c6783b9b822e7206ff7')
+
     def test_youtube_list_subtitles(self):
         self.DL.expect_warning(u'Video doesn\'t have automatic captions')
         self.DL.params['listsubtitles'] = True
         info_dict = self.getInfoDict()
         self.assertEqual(info_dict, None)
+
     def test_youtube_automatic_captions(self):
         self.url = '8YoUxe5ncPo'
         self.DL.params['writeautomaticsub'] = True
         self.DL.params['subtitleslangs'] = ['it']
         subtitles = self.getSubtitles()
         self.assertTrue(subtitles['it'] is not None)
+
     def test_youtube_nosubtitles(self):
         self.DL.expect_warning(u'video doesn\'t have subtitles')
         self.url = 'sAjKT8FhjI8'
@@ -71,6 +81,7 @@ class TestYoutubeSubtitles(unittest.TestCase):
         self.DL.params['allsubtitles'] = True
         subtitles = self.getSubtitles()
         self.assertEqual(len(subtitles), 0)
+
     def test_youtube_multiple_langs(self):
         self.url = 'QRS8MkLhQmM'
         self.DL.params['writesubtitles'] = True
index 039e014982e2396ad3175a4c3fcd3dc15030952e..13b56ede5fdb3d66064a8072cdda87787eee1bae 100644 (file)
@@ -2,9 +2,15 @@ import os
 import subprocess
 import sys
 import time
-import datetime
 
-from .utils import *
+
+from .utils import (
+    compat_subprocess_get_DEVNULL,
+    encodeFilename,
+    PostProcessingError,
+    shell_quote,
+    subtitles_filename,
+)
 
 
 class PostProcessor(object):
index c8054544a60db20f041d22c5ee6e5405d935061a..7f73ea3605345d94db57dfaab0819cec35b8aca3 100644 (file)
@@ -91,7 +91,7 @@ class YoutubeDL(object):
     downloadarchive:   File name of a file where all downloads are recorded.
                        Videos already present in the file are not downloaded
                        again.
-    
+
     The following parameters are not used by YoutubeDL itself, they are used by
     the FileDownloader:
     nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
@@ -216,10 +216,10 @@ class YoutubeDL(object):
         If stderr is a tty file the 'WARNING:' will be colored
         '''
         if sys.stderr.isatty() and os.name != 'nt':
-            _msg_header=u'\033[0;33mWARNING:\033[0m'
+            _msg_header = u'\033[0;33mWARNING:\033[0m'
         else:
-            _msg_header=u'WARNING:'
-        warning_message=u'%s %s' % (_msg_header,message)
+            _msg_header = u'WARNING:'
+        warning_message = u'%s %s' % (_msg_header, message)
         self.to_stderr(warning_message)
 
     def report_error(self, message, tb=None):
@@ -234,19 +234,6 @@ class YoutubeDL(object):
         error_message = u'%s %s' % (_msg_header, message)
         self.trouble(error_message, tb)
 
-    def slow_down(self, start_time, byte_counter):
-        """Sleep if the download speed is over the rate limit."""
-        rate_limit = self.params.get('ratelimit', None)
-        if rate_limit is None or byte_counter == 0:
-            return
-        now = time.time()
-        elapsed = now - start_time
-        if elapsed <= 0.0:
-            return
-        speed = float(byte_counter) / elapsed
-        if speed > rate_limit:
-            time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
-
     def report_writedescription(self, descfn):
         """ Report that the description file is being written """
         self.to_screen(u'[info] Writing video description to: ' + descfn)
@@ -285,16 +272,18 @@ class YoutubeDL(object):
                 autonumber_size = 5
             autonumber_templ = u'%0' + str(autonumber_size) + u'd'
             template_dict['autonumber'] = autonumber_templ % self._num_downloads
-            if template_dict['playlist_index'] is not None:
+            if template_dict.get('playlist_index') is not None:
                 template_dict['playlist_index'] = u'%05d' % template_dict['playlist_index']
 
-            sanitize = lambda k,v: sanitize_filename(
+            sanitize = lambda k, v: sanitize_filename(
                 u'NA' if v is None else compat_str(v),
                 restricted=self.params.get('restrictfilenames'),
-                is_id=(k==u'id'))
-            template_dict = dict((k, sanitize(k, v)) for k,v in template_dict.items())
+                is_id=(k == u'id'))
+            template_dict = dict((k, sanitize(k, v))
+                                 for k, v in template_dict.items())
 
-            filename = self.params['outtmpl'] % template_dict
+            tmpl = os.path.expanduser(self.params['outtmpl'])
+            filename = tmpl % template_dict
             return filename
         except KeyError as err:
             self.report_error(u'Erroneous output template')
@@ -328,14 +317,14 @@ class YoutubeDL(object):
             return (u'%(title)s has already been recorded in archive'
                     % info_dict)
         return None
-        
+
     def extract_info(self, url, download=True, ie_key=None, extra_info={}):
         '''
         Returns a list with a dictionary for each video we find.
         If 'download', also downloads the videos.
         extra_info is a dict containing the extra values to add to each result
          '''
-        
+
         if ie_key:
             ies = [self.get_info_extractor(ie_key)]
         else:
@@ -377,7 +366,7 @@ class YoutubeDL(object):
                     raise
         else:
             self.report_error(u'no suitable InfoExtractor: %s' % url)
-        
+
     def process_ie_result(self, ie_result, download=True, extra_info={}):
         """
         Take the result of the ie(may be modified) and resolve all unresolved
@@ -390,13 +379,7 @@ class YoutubeDL(object):
         result_type = ie_result.get('_type', 'video') # If not given we suppose it's a video, support the default old system
         if result_type == 'video':
             ie_result.update(extra_info)
-            if 'playlist' not in ie_result:
-                # It isn't part of a playlist
-                ie_result['playlist'] = None
-                ie_result['playlist_index'] = None
-            if download:
-                self.process_info(ie_result)
-            return ie_result
+            return self.process_video_result(ie_result)
         elif result_type == 'url':
             # We have to add extra_info to the results because it may be
             # contained in a playlist
@@ -407,7 +390,7 @@ class YoutubeDL(object):
         elif result_type == 'playlist':
             # We process each entry in the playlist
             playlist = ie_result.get('title', None) or ie_result.get('id', None)
-            self.to_screen(u'[download] Downloading playlist: %s'  % playlist)
+            self.to_screen(u'[download] Downloading playlist: %s' % playlist)
 
             playlist_results = []
 
@@ -425,12 +408,12 @@ class YoutubeDL(object):
             self.to_screen(u"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" %
                 (ie_result['extractor'], playlist, n_all_entries, n_entries))
 
-            for i,entry in enumerate(entries,1):
-                self.to_screen(u'[download] Downloading video #%s of %s' %(i, n_entries))
+            for i, entry in enumerate(entries, 1):
+                self.to_screen(u'[download] Downloading video #%s of %s' % (i, n_entries))
                 extra = {
-                         'playlist': playlist, 
-                         'playlist_index': i + playliststart,
-                         }
+                    'playlist': playlist,
+                    'playlist_index': i + playliststart,
+                }
                 if not 'extractor' in entry:
                     # We set the extractor, if it's an url it will be set then to
                     # the new extractor, but if it's already a video we must make
@@ -454,6 +437,107 @@ class YoutubeDL(object):
         else:
             raise Exception('Invalid result type: %s' % result_type)
 
+    def select_format(self, format_spec, available_formats):
+        if format_spec == 'best' or format_spec is None:
+            return available_formats[-1]
+        elif format_spec == 'worst':
+            return available_formats[0]
+        else:
+            extensions = [u'mp4', u'flv', u'webm', u'3gp']
+            if format_spec in extensions:
+                filter_f = lambda f: f['ext'] == format_spec
+            else:
+                filter_f = lambda f: f['format_id'] == format_spec
+            matches = list(filter(filter_f, available_formats))
+            if matches:
+                return matches[-1]
+        return None
+
+    def process_video_result(self, info_dict, download=True):
+        assert info_dict.get('_type', 'video') == 'video'
+
+        if 'playlist' not in info_dict:
+            # It isn't part of a playlist
+            info_dict['playlist'] = None
+            info_dict['playlist_index'] = None
+
+        # This extractors handle format selection themselves
+        if info_dict['extractor'] in [u'youtube', u'Youku']:
+            if download:
+                self.process_info(info_dict)
+            return info_dict
+
+        # We now pick which formats have to be downloaded
+        if info_dict.get('formats') is None:
+            # There's only one format available
+            formats = [info_dict]
+        else:
+            formats = info_dict['formats']
+
+        # We check that all the formats have the format and format_id fields
+        for (i, format) in enumerate(formats):
+            if format.get('format_id') is None:
+                format['format_id'] = compat_str(i)
+            if format.get('format') is None:
+                format['format'] = u'{id} - {res}{note}'.format(
+                    id=format['format_id'],
+                    res=self.format_resolution(format),
+                    note=u' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
+                )
+            # Automatically determine file extension if missing
+            if 'ext' not in format:
+                format['ext'] = determine_ext(format['url'])
+
+        if self.params.get('listformats', None):
+            self.list_formats(info_dict)
+            return
+
+        format_limit = self.params.get('format_limit', None)
+        if format_limit:
+            formats = list(takewhile_inclusive(
+                lambda f: f['format_id'] != format_limit, formats
+            ))
+        if self.params.get('prefer_free_formats'):
+            def _free_formats_key(f):
+                try:
+                    ext_ord = [u'flv', u'mp4', u'webm'].index(f['ext'])
+                except ValueError:
+                    ext_ord = -1
+                # We only compare the extension if they have the same height and width
+                return (f.get('height'), f.get('width'), ext_ord)
+            formats = sorted(formats, key=_free_formats_key)
+
+        req_format = self.params.get('format', 'best')
+        if req_format is None:
+            req_format = 'best'
+        formats_to_download = []
+        # The -1 is for supporting YoutubeIE
+        if req_format in ('-1', 'all'):
+            formats_to_download = formats
+        else:
+            # We can accept formats requestd in the format: 34/5/best, we pick
+            # the first that is available, starting from left
+            req_formats = req_format.split('/')
+            for rf in req_formats:
+                selected_format = self.select_format(rf, formats)
+                if selected_format is not None:
+                    formats_to_download = [selected_format]
+                    break
+        if not formats_to_download:
+            raise ExtractorError(u'requested format not available',
+                                 expected=True)
+
+        if download:
+            if len(formats_to_download) > 1:
+                self.to_screen(u'[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
+            for format in formats_to_download:
+                new_info = dict(info_dict)
+                new_info.update(format)
+                self.process_info(new_info)
+        # We update the info dict with the best quality format (backwards compatibility)
+        info_dict.update(formats_to_download[-1])
+        return info_dict
+
     def process_info(self, info_dict):
         """Process a single resolved IE result."""
 
@@ -491,9 +575,9 @@ class YoutubeDL(object):
         if self.params.get('forceurl', False):
             # For RTMP URLs, also include the playpath
             compat_print(info_dict['url'] + info_dict.get('play_path', u''))
-        if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict:
+        if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
             compat_print(info_dict['thumbnail'])
-        if self.params.get('forcedescription', False) and 'description' in info_dict:
+        if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
             compat_print(info_dict['description'])
         if self.params.get('forcefilename', False) and filename is not None:
             compat_print(filename)
@@ -529,20 +613,20 @@ class YoutubeDL(object):
 
         if self.params.get('writeannotations', False):
             try:
-               annofn = filename + u'.annotations.xml'
-               self.report_writeannotations(annofn)
-               with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
-                   annofile.write(info_dict['annotations'])
+                annofn = filename + u'.annotations.xml'
+                self.report_writeannotations(annofn)
+                with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
+                    annofile.write(info_dict['annotations'])
             except (KeyError, TypeError):
                 self.report_warning(u'There are no annotations to write.')
             except (OSError, IOError):
-                 self.report_error(u'Cannot write annotations file: ' + annofn)
-                 return
+                self.report_error(u'Cannot write annotations file: ' + annofn)
+                return
 
         subtitles_are_requested = any([self.params.get('writesubtitles', False),
                                        self.params.get('writeautomaticsub')])
 
-        if  subtitles_are_requested and 'subtitles' in info_dict and info_dict['subtitles']:
+        if subtitles_are_requested and 'subtitles' in info_dict and info_dict['subtitles']:
             # subtitles download errors are already managed as troubles in relevant IE
             # that way it will silently go on when used with unsupporting IE
             subtitles = info_dict['subtitles']
@@ -564,7 +648,7 @@ class YoutubeDL(object):
             infofn = filename + u'.info.json'
             self.report_writeinfojson(infofn)
             try:
-                json_info_dict = dict((k, v) for k,v in info_dict.items() if not k in ['urlhandle'])
+                json_info_dict = dict((k, v) for k, v in info_dict.items() if not k in ['urlhandle'])
                 write_json_file(json_info_dict, encodeFilename(infofn))
             except (OSError, IOError):
                 self.report_error(u'Cannot write metadata to JSON file ' + infofn)
@@ -634,7 +718,7 @@ class YoutubeDL(object):
         keep_video = None
         for pp in self._pps:
             try:
-                keep_video_wish,new_info = pp.run(info)
+                keep_video_wish, new_info = pp.run(info)
                 if keep_video_wish is not None:
                     if keep_video_wish:
                         keep_video = keep_video_wish
@@ -672,3 +756,38 @@ class YoutubeDL(object):
         vid_id = info_dict['extractor'] + u' ' + info_dict['id']
         with locked_file(fn, 'a', encoding='utf-8') as archive_file:
             archive_file.write(vid_id + u'\n')
+
+    @staticmethod
+    def format_resolution(format, default='unknown'):
+        if format.get('_resolution') is not None:
+            return format['_resolution']
+        if format.get('height') is not None:
+            if format.get('width') is not None:
+                res = u'%sx%s' % (format['width'], format['height'])
+            else:
+                res = u'%sp' % format['height']
+        else:
+            res = default
+        return res
+
+    def list_formats(self, info_dict):
+        def line(format):
+            return (u'%-15s%-10s%-12s%s' % (
+                format['format_id'],
+                format['ext'],
+                self.format_resolution(format),
+                format.get('format_note', ''),
+                )
+            )
+
+        formats = info_dict.get('formats', [info_dict])
+        formats_s = list(map(line, formats))
+        if len(formats) > 1:
+            formats_s[0] += (' ' if formats[0].get('format_note') else '') + '(worst)'
+            formats_s[-1] += (' ' if formats[-1].get('format_note') else '') + '(best)'
+
+        header_line = line({
+            'format_id': u'format code', 'ext': u'extension',
+            '_resolution': u'resolution', 'format_note': u'note'})
+        self.to_screen(u'[info] Available formats for %s:\n%s\n%s' %
+                       (info_dict['id'], header_line, u"\n".join(formats_s)))
index fb1270ea20163c84edbc1e83c587dd3b8e114960..48ffcbf8e1caa7b197c103d4fb0ebee972dddf60 100644 (file)
@@ -31,6 +31,7 @@ __authors__  = (
     'Huarong Huo',
     'Ismael Mejía',
     'Steffan \'Ruirize\' James',
+    'Andras Elso',
 )
 
 __license__ = 'Public Domain'
@@ -46,17 +47,43 @@ import shlex
 import socket
 import subprocess
 import sys
-import warnings
+import traceback
 import platform
 
 
-from .utils import *
+from .utils import (
+    compat_cookiejar,
+    compat_print,
+    compat_str,
+    compat_urllib_request,
+    DateRange,
+    decodeOption,
+    determine_ext,
+    DownloadError,
+    get_cachedir,
+    make_HTTPS_handler,
+    MaxDownloadsReached,
+    platform_name,
+    preferredencoding,
+    SameFileError,
+    std_headers,
+    write_string,
+    YoutubeDLHandler,
+)
 from .update import update_self
 from .version import __version__
-from .FileDownloader import *
+from .FileDownloader import (
+    FileDownloader,
+)
 from .extractor import gen_extractors
 from .YoutubeDL import YoutubeDL
-from .PostProcessor import *
+from .PostProcessor import (
+    FFmpegMetadataPP,
+    FFmpegVideoConvertor,
+    FFmpegExtractAudioPP,
+    FFmpegEmbedSubtitlePP,
+)
+
 
 def parseOpts(overrideArguments=None):
     def _readOptions(filename_bytes):
@@ -106,7 +133,7 @@ def parseOpts(overrideArguments=None):
 
     def _hide_login_info(opts):
         opts = list(opts)
-        for private_opt in ['-p', '--password', '-u', '--username']:
+        for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
             try:
                 i = opts.index(private_opt)
                 opts[i+1] = '<PRIVATE>'
@@ -152,6 +179,9 @@ def parseOpts(overrideArguments=None):
             action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
     general.add_option('-i', '--ignore-errors',
             action='store_true', dest='ignoreerrors', help='continue on download errors, for example to to skip unavailable videos in a playlist', default=False)
+    general.add_option('--abort-on-error',
+            action='store_false', dest='ignoreerrors',
+            help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
     general.add_option('--dump-user-agent',
             action='store_true', dest='dump_user_agent',
             help='display the current browser identification', default=False)
@@ -169,7 +199,7 @@ def parseOpts(overrideArguments=None):
     general.add_option('--proxy', dest='proxy', default=None, help='Use the specified HTTP/HTTPS proxy', metavar='URL')
     general.add_option('--no-check-certificate', action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation.')
     general.add_option(
-        '--cache-dir', dest='cachedir', default=get_cachedir(),
+        '--cache-dir', dest='cachedir', default=get_cachedir(), metavar='DIR',
         help='Location in the filesystem where youtube-dl can store downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl .')
     general.add_option(
         '--no-cache-dir', action='store_const', const=None, dest='cachedir',
@@ -208,7 +238,7 @@ def parseOpts(overrideArguments=None):
 
 
     video_format.add_option('-f', '--format',
-            action='store', dest='format', metavar='FORMAT',
+            action='store', dest='format', metavar='FORMAT', default='best',
             help='video format code, specifiy the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported')
     video_format.add_option('--all-formats',
             action='store_const', dest='format', help='download all available video formats', const='all')
@@ -240,11 +270,11 @@ def parseOpts(overrideArguments=None):
             help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
 
     downloader.add_option('-r', '--rate-limit',
-            dest='ratelimit', metavar='LIMIT', help='maximum download rate (e.g. 50k or 44.6m)')
+            dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
     downloader.add_option('-R', '--retries',
             dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
     downloader.add_option('--buffer-size',
-            dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16k) (default is %default)', default="1024")
+            dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16K) (default is %default)', default="1024")
     downloader.add_option('--no-resize-buffer',
             action='store_true', dest='noresizebuffer',
             help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
@@ -286,6 +316,9 @@ def parseOpts(overrideArguments=None):
     verbosity.add_option('--dump-intermediate-pages',
             action='store_true', dest='dump_intermediate_pages', default=False,
             help='print downloaded pages to debug problems(very verbose)')
+    verbosity.add_option('--write-pages',
+            action='store_true', dest='write_pages', default=False,
+            help='Write downloaded pages to files in the current directory')
     verbosity.add_option('--youtube-print-sig-code',
             action='store_true', dest='youtube_print_sig_code', default=False,
             help=optparse.SUPPRESS_HELP)
@@ -305,7 +338,10 @@ def parseOpts(overrideArguments=None):
             help=('output filename template. Use %(title)s to get the title, '
                   '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
                   '%(autonumber)s to get an automatically incremented number, '
-                  '%(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), '
+                  '%(ext)s for the filename extension, '
+                  '%(format)s for the format description (like "22 - 1280x720" or "HD"),'
+                  '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"),'
+                  '%(upload_date)s for the upload date (YYYYMMDD), '
                   '%(extractor)s for the provider (youtube, metacafe, etc), '
                   '%(id)s for the video id , %(playlist)s for the playlist the video is in, '
                   '%(playlist_index)s for the position in the playlist and %% for a literal percent. '
@@ -619,6 +655,7 @@ def _real_main(argv=None):
         'prefer_free_formats': opts.prefer_free_formats,
         'verbose': opts.verbose,
         'dump_intermediate_pages': opts.dump_intermediate_pages,
+        'write_pages': opts.write_pages,
         'test': opts.test,
         'keepvideo': opts.keepvideo,
         'min_filesize': opts.min_filesize,
@@ -688,7 +725,7 @@ def _real_main(argv=None):
     if opts.cookiefile is not None:
         try:
             jar.save()
-        except (IOError, OSError) as err:
+        except (IOError, OSError):
             sys.exit(u'ERROR: unable to save cookie jar')
 
     sys.exit(retcode)
index 748f12e5a7644e4a1f348900ec7dba7f80d181c9..bcf1cce7f9f52ba4283e975393640ebe3d8f9fd2 100644 (file)
@@ -72,6 +72,7 @@ from .jeuxvideo import JeuxVideoIE
 from .jukebox import JukeboxIE
 from .justintv import JustinTVIE
 from .kankan import KankanIE
+from .keezmovies import KeezMoviesIE
 from .kickstarter import KickStarterIE
 from .keek import KeekIE
 from .liveleak import LiveLeakIE
@@ -82,6 +83,7 @@ from .mit import TechTVMITIE, MITIE
 from .mixcloud import MixcloudIE
 from .mtv import MTVIE
 from .muzu import MuzuTVIE
+from .myspace import MySpaceIE
 from .myspass import MySpassIE
 from .myvideo import MyVideoIE
 from .naver import NaverIE
@@ -94,6 +96,7 @@ from .ooyala import OoyalaIE
 from .orf import ORFIE
 from .pbs import PBSIE
 from .photobucket import PhotobucketIE
+from .pornhub import PornHubIE
 from .pornotube import PornotubeIE
 from .rbmaradio import RBMARadioIE
 from .redtube import RedTubeIE
@@ -102,22 +105,27 @@ from .ro220 import Ro220IE
 from .rottentomatoes import RottenTomatoesIE
 from .roxwel import RoxwelIE
 from .rtlnow import RTLnowIE
+from .rutube import RutubeIE
 from .sina import SinaIE
 from .slashdot import SlashdotIE
 from .slideshare import SlideshareIE
 from .sohu import SohuIE
 from .soundcloud import SoundcloudIE, SoundcloudSetIE, SoundcloudUserIE
 from .southparkstudios import SouthParkStudiosIE
+from .spankwire import SpankwireIE
 from .spiegel import SpiegelIE
 from .stanfordoc import StanfordOpenClassroomIE
 from .statigram import StatigramIE
 from .steam import SteamIE
+from .sztvhu import SztvHuIE
 from .teamcoco import TeamcocoIE
+from .techtalks import TechTalksIE
 from .ted import TEDIE
 from .tf1 import TF1IE
 from .thisav import ThisAVIE
 from .traileraddict import TrailerAddictIE
 from .trilulilu import TriluliluIE
+from .tube8 import Tube8IE
 from .tudou import TudouIE
 from .tumblr import TumblrIE
 from .tutv import TutvIE
@@ -134,7 +142,9 @@ from .videofyme import VideofyMeIE
 from .videopremium import VideoPremiumIE
 from .vimeo import VimeoIE, VimeoChannelIE
 from .vine import VineIE
+from .vk import VKIE
 from .wat import WatIE
+from .websurg import WeBSurgIE
 from .weibo import WeiboIE
 from .wimp import WimpIE
 from .worldstarhiphop import WorldStarHipHopIE
index 82a785a19c34517c17da294ad64c1cbe7d22cba4..b99d4b96689c23a13379d4392484c3763ce0e36f 100644 (file)
@@ -17,8 +17,8 @@ class AddAnimeIE(InfoExtractor):
     IE_NAME = u'AddAnime'
     _TEST = {
         u'url': u'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
-        u'file': u'24MR3YO5SAS9.flv',
-        u'md5': u'1036a0e0cd307b95bd8a8c3a5c8cfaf1',
+        u'file': u'24MR3YO5SAS9.mp4',
+        u'md5': u'72954ea10bc979ab5e2eb288b21425a0',
         u'info_dict': {
             u"description": u"One Piece 606",
             u"title": u"One Piece 606"
@@ -31,7 +31,8 @@ class AddAnimeIE(InfoExtractor):
             video_id = mobj.group('video_id')
             webpage = self._download_webpage(url, video_id)
         except ExtractorError as ee:
-            if not isinstance(ee.cause, compat_HTTPError):
+            if not isinstance(ee.cause, compat_HTTPError) or \
+               ee.cause.code != 503:
                 raise
 
             redir_webpage = ee.cause.read().decode('utf-8')
@@ -60,16 +61,26 @@ class AddAnimeIE(InfoExtractor):
                 note=u'Confirming after redirect')
             webpage = self._download_webpage(url, video_id)
 
-        video_url = self._search_regex(r"var normal_video_file = '(.*?)';",
-                                       webpage, u'video file URL')
+        formats = []
+        for format_id in ('normal', 'hq'):
+            rex = r"var %s_video_file = '(.*?)';" % re.escape(format_id)
+            video_url = self._search_regex(rex, webpage, u'video file URLx',
+                                           fatal=False)
+            if not video_url:
+                continue
+            formats.append({
+                'format_id': format_id,
+                'url': video_url,
+            })
+        if not formats:
+            raise ExtractorError(u'Cannot find any video format!')
         video_title = self._og_search_title(webpage)
         video_description = self._og_search_description(webpage)
 
         return {
             '_type': 'video',
             'id':  video_id,
-            'url': video_url,
-            'ext': 'flv',
+            'formats': formats,
             'title': video_title,
             'description': video_description
         }
index 5ee8a67b14699a330914cd4f0e0f627ca9fca5a5..e10c74c112a0b7bbb9335e76cdff88524c4cbafb 100644 (file)
@@ -158,7 +158,9 @@ class ArteTVPlus7IE(InfoExtractor):
             'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
         }
 
-        formats = player_info['VSR'].values()
+        all_formats = player_info['VSR'].values()
+        # Some formats use the m3u8 protocol
+        all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats))
         def _match_lang(f):
             if f.get('versionCode') is None:
                 return True
@@ -170,16 +172,36 @@ class ArteTVPlus7IE(InfoExtractor):
             regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l]
             return any(re.match(r, f['versionCode']) for r in regexes)
         # Some formats may not be in the same language as the url
-        formats = filter(_match_lang, formats)
-        # Some formats use the m3u8 protocol
-        formats = filter(lambda f: f.get('videoFormat') != 'M3U8', formats)
+        formats = filter(_match_lang, all_formats)
+        formats = list(formats) # in python3 filter returns an iterator
+        if not formats:
+            # Some videos are only available in the 'Originalversion'
+            # they aren't tagged as being in French or German
+            if all(f['versionCode'] == 'VO' for f in all_formats):
+                formats = all_formats
+            else:
+                raise ExtractorError(u'The formats list is empty')
         # We order the formats by quality
-        formats = sorted(formats, key=lambda f: int(f.get('height',-1)))
+        if re.match(r'[A-Z]Q', formats[0]['quality']) is not None:
+            sort_key = lambda f: ['HQ', 'MQ', 'EQ', 'SQ'].index(f['quality'])
+        else:
+            sort_key = lambda f: int(f.get('height',-1))
+        formats = sorted(formats, key=sort_key)
         # Prefer videos without subtitles in the same language
         formats = sorted(formats, key=lambda f: re.match(r'VO(F|A)-STM\1', f.get('versionCode', '')) is None)
         # Pick the best quality
         def _format(format_info):
+            quality = format_info['quality']
+            m_quality = re.match(r'\w*? - (\d*)p', quality)
+            if m_quality is not None:
+                quality = m_quality.group(1)
+            if format_info.get('versionCode') is not None:
+                format_id = u'%s-%s' % (quality, format_info['versionCode'])
+            else:
+                format_id = quality
             info = {
+                'format_id': format_id,
+                'format_note': format_info.get('versionLibelle'),
                 'width': format_info.get('width'),
                 'height': format_info.get('height'),
             }
@@ -192,8 +214,6 @@ class ArteTVPlus7IE(InfoExtractor):
                 info['ext'] = determine_ext(info['url'])
             return info
         info_dict['formats'] = [_format(f) for f in formats]
-        # TODO: Remove when #980 has been merged 
-        info_dict.update(info_dict['formats'][-1])
 
         return info_dict
 
@@ -207,7 +227,7 @@ class ArteTVCreativeIE(ArteTVPlus7IE):
         u'url': u'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
         u'file': u'050489-002.mp4',
         u'info_dict': {
-            u'title': u'Agentur Amateur #2 - Corporate Design',
+            u'title': u'Agentur Amateur / Agence Amateur #2 : Corporate Design',
         },
     }
 
index 745212f2fe731bf305e56e8087089c65efabbd68..1392f382a24c273604f0c67db7afafefbcec85b8 100644 (file)
@@ -53,6 +53,8 @@ class BrightcoveIE(InfoExtractor):
         # Fix up some stupid HTML, see https://github.com/rg3/youtube-dl/issues/1553
         object_str = re.sub(r'(<param name="[^"]+" value="[^"]+")>',
                             lambda m: m.group(1) + '/>', object_str)
+        # Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
+        object_str = object_str.replace(u'<--', u'<!--')
 
         object_doc = xml.etree.ElementTree.fromstring(object_str)
         assert u'BrightcoveExperience' in object_doc.attrib['class']
@@ -96,7 +98,10 @@ class BrightcoveIE(InfoExtractor):
         playlist_info = self._download_webpage(self._PLAYLIST_URL_TEMPLATE % player_key,
                                                player_key, u'Downloading playlist information')
 
-        playlist_info = json.loads(playlist_info)['videoList']
+        json_data = json.loads(playlist_info)
+        if 'videoList' not in json_data:
+            raise ExtractorError(u'Empty playlist')
+        playlist_info = json_data['videoList']
         videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']]
 
         return self.playlist_result(videos, playlist_id=playlist_info['id'],
index 6925b96c2ee1fd1e09624638805597259b068dcd..2fe1033f01d3ccc53b51f6b8c8b24efe490f5cb6 100644 (file)
@@ -55,30 +55,30 @@ class CinemassacreIE(InfoExtractor):
             video_description = None
 
         playerdata = self._download_webpage(playerdata_url, video_id)
-        base_url = self._html_search_regex(r'\'streamer\': \'(?P<base_url>rtmp://.*?)/(?:vod|Cinemassacre)\'',
-            playerdata, u'base_url')
-        base_url += '/Cinemassacre/'
-        # Important: The file names in playerdata are not used by the player and even wrong for some videos
-        sd_file = 'Cinemassacre-%s_high.mp4' % video_id
-        hd_file = 'Cinemassacre-%s.mp4' % video_id
-        video_thumbnail = 'http://image.screenwavemedia.com/Cinemassacre/Cinemassacre-%s_thumb_640x360.jpg' % video_id
+        url = self._html_search_regex(r'\'streamer\': \'(?P<url>[^\']+)\'', playerdata, u'url')
+
+        sd_file = self._html_search_regex(r'\'file\': \'(?P<sd_file>[^\']+)\'', playerdata, u'sd_file')
+        hd_file = self._html_search_regex(r'\'?file\'?: "(?P<hd_file>[^"]+)"', playerdata, u'hd_file')
+        video_thumbnail = self._html_search_regex(r'\'image\': \'(?P<thumbnail>[^\']+)\'', playerdata, u'thumbnail', fatal=False)
 
         formats = [
             {
-                'url': base_url + sd_file,
+                'url': url,
+                'play_path': 'mp4:' + sd_file,
                 'ext': 'flv',
                 'format': 'sd',
                 'format_id': 'sd',
             },
             {
-                'url': base_url + hd_file,
+                'url': url,
+                'play_path': 'mp4:' + hd_file,
                 'ext': 'flv',
                 'format': 'hd',
                 'format_id': 'hd',
             },
         ]
 
-        info = {
+        return {
             'id': video_id,
             'title': video_title,
             'formats': formats,
@@ -86,6 +86,3 @@ class CinemassacreIE(InfoExtractor):
             'upload_date': video_date,
             'thumbnail': video_thumbnail,
         }
-        # TODO: Remove when #980 has been merged
-        info.update(formats[-1])
-        return info
index 2a5a85dc67b4f7a57d04d4f21c1608aa2c47f7f3..cef4dce856fe9cc7339d28c18a9b23cc87dbfc8d 100644 (file)
@@ -14,6 +14,8 @@ from ..utils import (
     clean_html,
     compiled_regex_type,
     ExtractorError,
+    RegexNotFoundError,
+    sanitize_filename,
     unescapeHTML,
 )
 
@@ -61,9 +63,12 @@ class InfoExtractor(object):
                     * ext       Will be calculated from url if missing
                     * format    A human-readable description of the format
                                 ("mp4 container with h264/opus").
-                                Calculated from width and height if missing.
+                                Calculated from the format_id, width, height.
+                                and format_note fields if missing.
                     * format_id A short description of the format
                                 ("mp4_h264_opus" or "19")
+                    * format_note Additional info about the format
+                                ("3D" or "DASH video")
                     * width     Width of the video, if known
                     * height    Height of the video, if known
 
@@ -178,6 +183,17 @@ class InfoExtractor(object):
             self.to_screen(u'Dumping request to ' + url)
             dump = base64.b64encode(webpage_bytes).decode('ascii')
             self._downloader.to_screen(dump)
+        if self._downloader.params.get('write_pages', False):
+            try:
+                url = url_or_request.get_full_url()
+            except AttributeError:
+                url = url_or_request
+            raw_filename = ('%s_%s.dump' % (video_id, url))
+            filename = sanitize_filename(raw_filename, restricted=True)
+            self.to_screen(u'Saving request to ' + filename)
+            with open(filename, 'wb') as outf:
+                outf.write(webpage_bytes)
+
         content = webpage_bytes.decode(encoding, 'replace')
         return (content, urlh)
 
@@ -228,7 +244,7 @@ class InfoExtractor(object):
         Perform a regex search on the given string, using a single or a list of
         patterns returning the first matching group.
         In case of failure return a default value or raise a WARNING or a
-        ExtractorError, depending on fatal, specifying the field name.
+        RegexNotFoundError, depending on fatal, specifying the field name.
         """
         if isinstance(pattern, (str, compat_str, compiled_regex_type)):
             mobj = re.search(pattern, string, flags)
@@ -248,7 +264,7 @@ class InfoExtractor(object):
         elif default is not None:
             return default
         elif fatal:
-            raise ExtractorError(u'Unable to extract %s' % _name)
+            raise RegexNotFoundError(u'Unable to extract %s' % _name)
         else:
             self._downloader.report_warning(u'unable to extract %s; '
                 u'please report this issue on http://yt-dl.org/bug' % _name)
@@ -314,10 +330,10 @@ class InfoExtractor(object):
     def _og_search_title(self, html, **kargs):
         return self._og_search_property('title', html, **kargs)
 
-    def _og_search_video_url(self, html, name='video url', **kargs):
-        return self._html_search_regex([self._og_regex('video:secure_url'),
-                                        self._og_regex('video')],
-                                       html, name, **kargs)
+    def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
+        regexes = [self._og_regex('video')]
+        if secure: regexes.insert(0, self._og_regex('video:secure_url'))
+        return self._html_search_regex(regexes, html, name, **kargs)
 
     def _rta_search(self, html):
         # See http://www.rtalabel.org/index.php?content=howtofaq#single
@@ -365,7 +381,7 @@ class SearchInfoExtractor(InfoExtractor):
 
     def _get_n_results(self, query, n):
         """Get a specified number of results for a query"""
-        raise NotImplementedError("This method must be implemented by sublclasses")
+        raise NotImplementedError("This method must be implemented by subclasses")
 
     @property
     def SEARCH_KEY(self):
index 3aef82bcf402e0a8795a71cc6210596f811e954d..e87690f9d288103ea222e1c216786b42e89364de 100644 (file)
@@ -21,6 +21,7 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
         """Build a request with the family filter disabled"""
         request = compat_urllib_request.Request(url)
         request.add_header('Cookie', 'family_filter=off')
+        request.add_header('Cookie', 'ff=off')
         return request
 
 class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
@@ -28,6 +29,15 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
 
     _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/(?:embed/)?video/([^/]+)'
     IE_NAME = u'dailymotion'
+
+    _FORMATS = [
+        (u'stream_h264_ld_url', u'ld'),
+        (u'stream_h264_url', u'standard'),
+        (u'stream_h264_hq_url', u'hq'),
+        (u'stream_h264_hd_url', u'hd'),
+        (u'stream_h264_hd1080_url', u'hd180'),
+    ]
+
     _TESTS = [
         {
             u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
@@ -52,6 +62,18 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
             },
             u'skip': u'VEVO is only available in some countries',
         },
+        # age-restricted video
+        {
+            u'url': u'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
+            u'file': u'xyh2zz.mp4',
+            u'md5': u'0d667a7b9cebecc3c89ee93099c4159d',
+            u'info_dict': {
+                u'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
+                u'uploader': 'HotWaves1012',
+                u'age_limit': 18,
+            }
+
+        }
     ]
 
     def _real_extract(self, url):
@@ -60,7 +82,6 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
 
         video_id = mobj.group(1).split('_')[0].split('?')[0]
 
-        video_extension = 'mp4'
         url = 'http://www.dailymotion.com/video/%s' % video_id
 
         # Retrieve video webpage to extract further information
@@ -82,7 +103,8 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
         video_uploader = self._search_regex([r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>',
                                              # Looking for official user
                                              r'<(?:span|a) .*?rel="author".*?>([^<]+?)</'],
-                                            webpage, 'video uploader')
+                                            webpage, 'video uploader', fatal=False)
+        age_limit = self._rta_search(webpage)
 
         video_upload_date = None
         mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
@@ -99,18 +121,24 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
             msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
             raise ExtractorError(msg, expected=True)
 
-        # TODO: support choosing qualities
-
-        for key in ['stream_h264_hd1080_url','stream_h264_hd_url',
-                    'stream_h264_hq_url','stream_h264_url',
-                    'stream_h264_ld_url']:
-            if info.get(key):#key in info and info[key]:
-                max_quality = key
-                self.to_screen(u'Using %s' % key)
-                break
-        else:
+        formats = []
+        for (key, format_id) in self._FORMATS:
+            video_url = info.get(key)
+            if video_url is not None:
+                m_size = re.search(r'H264-(\d+)x(\d+)', video_url)
+                if m_size is not None:
+                    width, height = m_size.group(1), m_size.group(2)
+                else:
+                    width, height = None, None
+                formats.append({
+                    'url': video_url,
+                    'ext': 'mp4',
+                    'format_id': format_id,
+                    'width': width,
+                    'height': height,
+                })
+        if not formats:
             raise ExtractorError(u'Unable to extract video URL')
-        video_url = info[max_quality]
 
         # subtitles
         video_subtitles = self.extract_subtitles(video_id, webpage)
@@ -118,16 +146,16 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
             self._list_available_subtitles(video_id, webpage)
             return
 
-        return [{
+        return {
             'id':       video_id,
-            'url':      video_url,
+            'formats': formats,
             'uploader': video_uploader,
             'upload_date':  video_upload_date,
             'title':    self._og_search_title(webpage),
-            'ext':      video_extension,
             'subtitles':    video_subtitles,
-            'thumbnail': info['thumbnail_url']
-        }]
+            'thumbnail': info['thumbnail_url'],
+            'age_limit': age_limit,
+        }
 
     def _get_available_subtitles(self, video_id, webpage):
         try:
index cced0681171a3dbc818e62ee2551da1958eacae2..2cfbcd363c0db4f2505d8da7120d7c3161a7b0a9 100644 (file)
@@ -101,7 +101,7 @@ class EightTracksIE(InfoExtractor):
         first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
         next_url = first_url
         res = []
-        for i in itertools.count():
+        for i in range(track_count):
             api_json = self._download_webpage(next_url, playlist_id,
                 note=u'Downloading song information %s/%s' % (str(i+1), track_count),
                 errnote=u'Failed to download song information')
@@ -116,7 +116,5 @@ class EightTracksIE(InfoExtractor):
                 'ext': 'm4a',
             }
             res.append(info)
-            if api_data['set']['at_last_track']:
-                break
             next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])
         return res
index 3443f19c5f9bb8e2853c95b4ca5e153b395a701f..c7455657907f7c3a67e4544b7e25819c29042dc5 100644 (file)
@@ -11,14 +11,14 @@ class ExfmIE(InfoExtractor):
     _SOUNDCLOUD_URL = r'(?:http://)?(?:www\.)?api\.soundcloud.com/tracks/([^/]+)/stream'
     _TESTS = [
         {
-            u'url': u'http://ex.fm/song/1bgtzg',
-            u'file': u'95223130.mp3',
-            u'md5': u'8a7967a3fef10e59a1d6f86240fd41cf',
+            u'url': u'http://ex.fm/song/eh359',
+            u'file': u'44216187.mp3',
+            u'md5': u'e45513df5631e6d760970b14cc0c11e7',
             u'info_dict': {
-                u"title": u"We Can't Stop - Miley Cyrus",
-                u"uploader": u"Miley Cyrus",
-                u'upload_date': u'20130603',
-                u'description': u'Download "We Can\'t Stop" \r\niTunes: http://smarturl.it/WeCantStop?IQid=SC\r\nAmazon: http://smarturl.it/WeCantStopAMZ?IQid=SC',
+                u"title": u"Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive",
+                u"uploader": u"deadjournalist",
+                u'upload_date': u'20120424',
+                u'description': u'Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive',
             },
             u'note': u'Soundcloud song',
         },
index 9d1bc07510c3148b8ed8659d697c46017c6a36ff..f8bdfc2d33c9f00b9f902a4303eb7024f4646312 100644 (file)
@@ -19,7 +19,8 @@ class FacebookIE(InfoExtractor):
     """Information Extractor for Facebook"""
 
     _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
-    _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
+    _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
+    _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
     _NETRC_MACHINE = 'facebook'
     IE_NAME = u'facebook'
     _TEST = {
@@ -36,50 +37,56 @@ class FacebookIE(InfoExtractor):
         """Report attempt to log in."""
         self.to_screen(u'Logging in')
 
-    def _real_initialize(self):
-        if self._downloader is None:
-            return
-
-        useremail = None
-        password = None
-        downloader_params = self._downloader.params
-
-        # Attempt to use provided username and password or .netrc data
-        if downloader_params.get('username', None) is not None:
-            useremail = downloader_params['username']
-            password = downloader_params['password']
-        elif downloader_params.get('usenetrc', False):
-            try:
-                info = netrc.netrc().authenticators(self._NETRC_MACHINE)
-                if info is not None:
-                    useremail = info[0]
-                    password = info[2]
-                else:
-                    raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
-            except (IOError, netrc.NetrcParseError) as err:
-                self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
-                return
-
+    def _login(self):
+        (useremail, password) = self._get_login_info()
         if useremail is None:
             return
 
-        # Log in
+        login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
+        login_page_req.add_header('Cookie', 'locale=en_US')
+        self.report_login()
+        login_page = self._download_webpage(login_page_req, None, note=False,
+            errnote=u'Unable to download login page')
+        lsd = self._search_regex(r'"lsd":"(\w*?)"', login_page, u'lsd')
+        lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, u'lgnrnd')
+
         login_form = {
             'email': useremail,
             'pass': password,
-            'login': 'Log+In'
+            'lsd': lsd,
+            'lgnrnd': lgnrnd,
+            'next': 'http://facebook.com/home.php',
+            'default_persistent': '0',
+            'legacy_return': '1',
+            'timezone': '-60',
+            'trynum': '1',
             }
         request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
+        request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         try:
-            self.report_login()
             login_results = compat_urllib_request.urlopen(request).read()
             if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
                 self._downloader.report_warning(u'unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
                 return
+
+            check_form = {
+                'fb_dtsg': self._search_regex(r'"fb_dtsg":"(.*?)"', login_results, u'fb_dtsg'),
+                'nh': self._search_regex(r'name="nh" value="(\w*?)"', login_results, u'nh'),
+                'name_action_selected': 'dont_save',
+                'submit[Continue]': self._search_regex(r'<input value="(.*?)" name="submit\[Continue\]"', login_results, u'continue'),
+            }
+            check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, compat_urllib_parse.urlencode(check_form))
+            check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
+            check_response = compat_urllib_request.urlopen(check_req).read()
+            if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
+                self._downloader.report_warning(u'Unable to confirm login, you have to login in your brower and authorize the login.')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
             self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
             return
 
+    def _real_initialize(self):
+        self._login()
+
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
@@ -93,7 +100,13 @@ class FacebookIE(InfoExtractor):
         AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
         m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
         if not m:
-            raise ExtractorError(u'Cannot parse data')
+            m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
+            if m_msg is not None:
+                raise ExtractorError(
+                    u'The video is not available, Facebook said: "%s"' % m_msg.group(1),
+                    expected=True)
+            else:
+                raise ExtractorError(u'Cannot parse data')
         data = dict(json.loads(m.group(1)))
         params_raw = compat_urllib_parse.unquote(data['params'])
         params = json.loads(params_raw)
index deaa4ed2d9bc14406b6a7d3d6e8b015c6fcf915d..89ed08db4cbb99f9381013813fa03a19474c8e24 100644 (file)
@@ -5,8 +5,6 @@ import xml.etree.ElementTree
 from .common import InfoExtractor
 from ..utils import (
     determine_ext,
-    clean_html,
-    get_element_by_attribute,
 )
 
 
@@ -47,12 +45,12 @@ class FazIE(InfoExtractor):
                 'format_id': code.lower(),
             })
 
-        descr_html = get_element_by_attribute('class', 'Content Copy', webpage)
+        descr = self._html_search_regex(r'<p class="Content Copy">(.*?)</p>', webpage, u'description')
         info = {
             'id': video_id,
             'title': self._og_search_title(webpage),
             'formats': formats,
-            'description': clean_html(descr_html),
+            'description': descr,
             'thumbnail': config.find('STILL/STILL_BIG').text,
         }
         # TODO: Remove when #980 has been merged
index d48c84f8d575111dd0459056efd33e7338b1a1df..2c8fcf5ae5df24a1dedc5e461feb5ac2300688a6 100644 (file)
@@ -11,6 +11,8 @@ from ..utils import (
     compat_urlparse,
 
     ExtractorError,
+    smuggle_url,
+    unescapeHTML,
 )
 from .brightcove import BrightcoveIE
 
@@ -23,12 +25,33 @@ class GenericIE(InfoExtractor):
         {
             u'url': u'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
             u'file': u'13601338388002.mp4',
-            u'md5': u'85b90ccc9d73b4acd9138d3af4c27f89',
+            u'md5': u'6e15c93721d7ec9e9ca3fdbf07982cfd',
             u'info_dict': {
                 u"uploader": u"www.hodiho.fr",
                 u"title": u"R\u00e9gis plante sa Jeep"
             }
         },
+        # embedded vimeo video
+        {
+            u'url': u'http://skillsmatter.com/podcast/home/move-semanticsperfect-forwarding-and-rvalue-references',
+            u'file': u'22444065.mp4',
+            u'md5': u'2903896e23df39722c33f015af0666e2',
+            u'info_dict': {
+                u'title': u'ACCU 2011: Move Semantics,Perfect Forwarding, and Rvalue references- Scott Meyers- 13/04/2011',
+                u"uploader_id": u"skillsmatter",
+                u"uploader": u"Skills Matter",
+            }
+        },
+        # bandcamp page with custom domain
+        {
+            u'url': u'http://bronyrock.com/track/the-pony-mash',
+            u'file': u'3235767654.mp3',
+            u'info_dict': {
+                u'title': u'The Pony Mash',
+                u'uploader': u'M_Pallante',
+            },
+            u'skip': u'There is a limit of 200 free downloads / month for the test song',
+        },
     ]
 
     def report_download_webpage(self, video_id):
@@ -127,6 +150,27 @@ class GenericIE(InfoExtractor):
             bc_url = BrightcoveIE._build_brighcove_url(m_brightcove.group())
             return self.url_result(bc_url, 'Brightcove')
 
+        # Look for embedded Vimeo player
+        mobj = re.search(
+            r'<iframe[^>]+?src="(https?://player.vimeo.com/video/.+?)"', webpage)
+        if mobj:
+            player_url = unescapeHTML(mobj.group(1))
+            surl = smuggle_url(player_url, {'Referer': url})
+            return self.url_result(surl, 'Vimeo')
+
+        # Look for embedded YouTube player
+        mobj = re.search(
+            r'<iframe[^>]+?src="(https?://(?:www\.)?youtube.com/embed/.+?)"', webpage)
+        if mobj:
+            surl = unescapeHTML(mobj.group(1))
+            return self.url_result(surl, 'Youtube')
+
+        # Look for Bandcamp pages with custom domain
+        mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
+        if mobj is not None:
+            burl = unescapeHTML(mobj.group(1))
+            return self.url_result(burl, 'Bandcamp')
+
         # Start with something easy: JW Player in SWFObject
         mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
         if mobj is None:
index ab12d7e9381317b4dfddb679eced39db2f752ed4..2570746b2047a1d1ae0a60b48970b1414f168e40 100644 (file)
@@ -41,9 +41,9 @@ class GooglePlusIE(InfoExtractor):
 
         # Extract update date
         upload_date = self._html_search_regex(
-            r'''(?x)<a.+?class="o-T-s\s[^"]+"\s+style="display:\s*none"\s*>
+            r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
                     ([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
-            webpage, u'upload date', fatal=False)
+            webpage, u'upload date', fatal=False, flags=re.VERBOSE)
         if upload_date:
             # Convert timestring to a format suitable for filename
             upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
index ddc42882a436a216cbd24b0b28d03da89ec27b0d..213aac428451bfcb860585b26de0e1c43abc732d 100644 (file)
@@ -26,7 +26,7 @@ class InstagramIE(InfoExtractor):
 
         return [{
             'id':        video_id,
-            'url':       self._og_search_video_url(webpage),
+            'url':       self._og_search_video_url(webpage, secure=False),
             'ext':       'mp4',
             'title':     u'Video by %s' % uploader_id,
             'thumbnail': self._og_search_thumbnail(webpage),
index 5986459d6dfdf7358b7d7a2e4bc139a579a01265..be8e05f539d7f64c301f7a63a488aedbf9d129cd 100644 (file)
@@ -19,7 +19,7 @@ class InternetVideoArchiveIE(InfoExtractor):
         u'info_dict': {
             u'title': u'SKYFALL',
             u'description': u'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.',
-            u'duration': 156,
+            u'duration': 153,
         },
     }
 
@@ -74,7 +74,7 @@ class InternetVideoArchiveIE(InfoExtractor):
             })
         formats = sorted(formats, key=lambda f: f['bitrate'])
 
-        info = {
+        return {
             'id': video_id,
             'title': item.find('title').text,
             'formats': formats,
@@ -82,6 +82,3 @@ class InternetVideoArchiveIE(InfoExtractor):
             'description': item.find('description').text,
             'duration': int(attr['duration']),
         }
-        # TODO: Remove when #980 has been merged
-        info.update(formats[-1])
-        return info
diff --git a/youtube_dl/extractor/keezmovies.py b/youtube_dl/extractor/keezmovies.py
new file mode 100644 (file)
index 0000000..5e05900
--- /dev/null
@@ -0,0 +1,61 @@
+import os
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse_urlparse,
+    compat_urllib_request,
+    compat_urllib_parse,
+)
+from ..aes import (
+    aes_decrypt_text
+)
+
+class KeezMoviesIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>keezmovies\.com/video/.+?(?P<videoid>[0-9]+))'
+    _TEST = {
+        u'url': u'http://www.keezmovies.com/video/petite-asian-lady-mai-playing-in-bathtub-1214711',
+        u'file': u'1214711.mp4',
+        u'md5': u'6e297b7e789329923fcf83abb67c9289',
+        u'info_dict': {
+            u"title": u"Petite Asian Lady Mai Playing In Bathtub",
+            u"age_limit": 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('videoid')
+        url = 'http://www.' + mobj.group('url')
+
+        req = compat_urllib_request.Request(url)
+        req.add_header('Cookie', 'age_verified=1')
+        webpage = self._download_webpage(req, video_id)
+
+        # embedded video
+        mobj = re.search(r'href="([^"]+)"></iframe>', webpage)
+        if mobj:
+            embedded_url = mobj.group(1)
+            return self.url_result(embedded_url)
+
+        video_title = self._html_search_regex(r'<h1 [^>]*>([^<]+)', webpage, u'title')
+        video_url = compat_urllib_parse.unquote(self._html_search_regex(r'video_url=(.+?)&amp;', webpage, u'video_url'))
+        if webpage.find('encrypted=true')!=-1:
+            password = self._html_search_regex(r'video_title=(.+?)&amp;', webpage, u'password')
+            video_url = aes_decrypt_text(video_url, password, 32).decode('utf-8')
+        path = compat_urllib_parse_urlparse( video_url ).path
+        extension = os.path.splitext( path )[1][1:]
+        format = path.split('/')[4].split('_')[:2]
+        format = "-".join( format )
+
+        age_limit = self._rta_search(webpage)
+
+        return {
+            'id': video_id,
+            'title': video_title,
+            'url': video_url,
+            'ext': extension,
+            'format': format,
+            'format_id': format,
+            'age_limit': age_limit,
+        }
index d04da98c89ed582e83e8bb905b15ff04c78d3018..4531fd6ab23a958d3d4dc4e38d52e7f330d85196 100644 (file)
@@ -40,13 +40,9 @@ class LivestreamIE(InfoExtractor):
 
         if video_id is None:
             # This is an event page:
-            player = get_meta_content('twitter:player', webpage)
-            if player is None:
-                raise ExtractorError('Couldn\'t extract event api url')
-            api_url = player.replace('/player', '')
-            api_url = re.sub(r'^(https?://)(new\.)', r'\1api.\2', api_url)
-            info = json.loads(self._download_webpage(api_url, event_name,
-                                                     u'Downloading event info'))
+            config_json = self._search_regex(r'window.config = ({.*?});',
+                webpage, u'window config')
+            info = json.loads(config_json)['event']
             videos = [self._extract_video_info(video_data['data'])
                 for video_data in info['feed']['data'] if video_data['type'] == u'video']
             return self.playlist_result(videos, info['id'], info['full_name'])
index e537648ffb83564e56f43f7e1e21a949cc609925..91480ba875d5fff781ce08a47c41a3824e94e910 100644 (file)
@@ -20,10 +20,12 @@ class MetacafeIE(InfoExtractor):
     _DISCLAIMER = 'http://www.metacafe.com/family_filter/'
     _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
     IE_NAME = u'metacafe'
-    _TESTS = [{
+    _TESTS = [
+    # Youtube video
+    {
         u"add_ie": ["Youtube"],
         u"url":  u"http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/",
-        u"file":  u"_aUehQsCQtM.flv",
+        u"file":  u"_aUehQsCQtM.mp4",
         u"info_dict": {
             u"upload_date": u"20090102",
             u"title": u"The Electric Company | \"Short I\" | PBS KIDS GO!",
@@ -32,15 +34,42 @@ class MetacafeIE(InfoExtractor):
             u"uploader_id": u"PBS"
         }
     },
+    # Normal metacafe video
+    {
+        u'url': u'http://www.metacafe.com/watch/11121940/news_stuff_you_wont_do_with_your_playstation_4/',
+        u'md5': u'6e0bca200eaad2552e6915ed6fd4d9ad',
+        u'info_dict': {
+            u'id': u'11121940',
+            u'ext': u'mp4',
+            u'title': u'News: Stuff You Won\'t Do with Your PlayStation 4',
+            u'uploader': u'ign',
+            u'description': u'Sony released a massive FAQ on the PlayStation Blog detailing the PS4\'s capabilities and limitations.',
+        },
+    },
+    # AnyClip video
     {
         u"url": u"http://www.metacafe.com/watch/an-dVVXnuY7Jh77J/the_andromeda_strain_1971_stop_the_bomb_part_3/",
         u"file": u"an-dVVXnuY7Jh77J.mp4",
         u"info_dict": {
             u"title": u"The Andromeda Strain (1971): Stop the Bomb Part 3",
             u"uploader": u"anyclip",
-            u"description": u"md5:38c711dd98f5bb87acf973d573442e67"
-        }
-    }]
+            u"description": u"md5:38c711dd98f5bb87acf973d573442e67",
+        },
+    },
+    # age-restricted video
+    {
+        u'url': u'http://www.metacafe.com/watch/5186653/bbc_internal_christmas_tape_79_uncensored_outtakes_etc/',
+        u'md5': u'98dde7c1a35d02178e8ab7560fe8bd09',
+        u'info_dict': {
+            u'id': u'5186653',
+            u'ext': u'mp4',
+            u'title': u'BBC INTERNAL Christmas Tape \'79 - UNCENSORED Outtakes, Etc.',
+            u'uploader': u'Dwayne Pipe',
+            u'description': u'md5:950bf4c581e2c059911fa3ffbe377e4b',
+            u'age_limit': 18,
+        },
+    },
+    ]
 
 
     def report_disclaimer(self):
@@ -62,6 +91,7 @@ class MetacafeIE(InfoExtractor):
             'submit': "Continue - I'm over 18",
             }
         request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
+        request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         try:
             self.report_age_confirmation()
             compat_urllib_request.urlopen(request).read()
@@ -83,7 +113,12 @@ class MetacafeIE(InfoExtractor):
 
         # Retrieve video webpage to extract further information
         req = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id)
-        req.headers['Cookie'] = 'flashVersion=0;'
+
+        # AnyClip videos require the flashversion cookie so that we get the link
+        # to the mp4 file
+        mobj_an = re.match(r'^an-(.*?)$', video_id)
+        if mobj_an:
+            req.headers['Cookie'] = 'flashVersion=0;'
         webpage = self._download_webpage(req, video_id)
 
         # Extract URL, uploader and title from webpage
@@ -125,6 +160,11 @@ class MetacafeIE(InfoExtractor):
                 r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);',
                 webpage, u'uploader nickname', fatal=False)
 
+        if re.search(r'"contentRating":"restricted"', webpage) is not None:
+            age_limit = 18
+        else:
+            age_limit = 0
+
         return {
             '_type':    'video',
             'id':       video_id,
@@ -134,4 +174,5 @@ class MetacafeIE(InfoExtractor):
             'upload_date':  None,
             'title':    video_title,
             'ext':      video_ext,
+            'age_limit': age_limit,
         }
index e520e2bb491f2c55f3867ab214b2b949eca6e684..e96d3952cc79ebe8294302d9795672b7a574590e 100644 (file)
@@ -80,6 +80,8 @@ class MTVIE(InfoExtractor):
         video_id = self._id_from_uri(uri)
         self.report_extraction(video_id)
         mediagen_url = itemdoc.find('%s/%s' % (_media_xml_tag('group'), _media_xml_tag('content'))).attrib['url']
+        # Remove the templates, like &device={device}
+        mediagen_url = re.sub(r'&[^=]*?={.*?}(?=(&|$))', u'', mediagen_url)
         if 'acceptMethods' not in mediagen_url:
             mediagen_url += '&acceptMethods=fms'
         mediagen_page = self._download_webpage(mediagen_url, video_id,
diff --git a/youtube_dl/extractor/myspace.py b/youtube_dl/extractor/myspace.py
new file mode 100644 (file)
index 0000000..050f54a
--- /dev/null
@@ -0,0 +1,48 @@
+import re
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_str,
+)
+
+
+class MySpaceIE(InfoExtractor):
+    _VALID_URL = r'https?://myspace\.com/([^/]+)/video/[^/]+/(?P<id>\d+)'
+
+    _TEST = {
+        u'url': u'https://myspace.com/coldplay/video/viva-la-vida/100008689',
+        u'info_dict': {
+            u'id': u'100008689',
+            u'ext': u'flv',
+            u'title': u'Viva La Vida',
+            u'description': u'The official Viva La Vida video, directed by Hype Williams',
+            u'uploader': u'Coldplay',
+            u'uploader_id': u'coldplay',
+        },
+        u'params': {
+            # rtmp download
+            u'skip_download': True,
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        webpage = self._download_webpage(url, video_id)
+        context = json.loads(self._search_regex(r'context = ({.*?});', webpage,
+            u'context'))
+        video = context['video']
+        rtmp_url, play_path = video['streamUrl'].split(';', 1)
+
+        return {
+            'id': compat_str(video['mediaId']),
+            'title': video['title'],
+            'url': rtmp_url,
+            'play_path': play_path,
+            'ext': 'flv',
+            'description': video['description'],
+            'thumbnail': video['imageUrl'],
+            'uploader': video['artistName'],
+            'uploader_id': video['artistUsername'],
+        }
index e8d43dd135ff29bc9471466f66b8b4e1b55eadea..224f56ac84b77647c2ff5468b41d229786da632e 100644 (file)
@@ -90,8 +90,8 @@ class NHLVideocenterIE(NHLBaseInfoExtractor):
              r'{statusIndex:0,index:0,.*?id:(.*?),'],
             webpage, u'category id')
         playlist_title = self._html_search_regex(
-            r'\?catid=%s">(.*?)</a>' % cat_id,
-            webpage, u'playlist title', flags=re.DOTALL)
+            r'tab0"[^>]*?>(.*?)</td>',
+            webpage, u'playlist title', flags=re.DOTALL).lower().capitalize()
 
         data = compat_urllib_parse.urlencode({
             'cid': cat_id,
index ab52ad4011851405e9a6b17f73720a8cd646860c..241cc160b9ca58bfc6b88bf9c12fe134df3b3d66 100644 (file)
@@ -20,7 +20,10 @@ class NowVideoIE(InfoExtractor):
 
         video_id = mobj.group('id')
         webpage_url = 'http://www.nowvideo.ch/video/' + video_id
+        embed_url = 'http://embed.nowvideo.ch/embed.php?v=' + video_id
         webpage = self._download_webpage(webpage_url, video_id)
+        embed_page = self._download_webpage(embed_url, video_id,
+            u'Downloading embed page')
 
         self.report_extraction(video_id)
 
@@ -28,7 +31,7 @@ class NowVideoIE(InfoExtractor):
             webpage, u'video title')
 
         video_key = self._search_regex(r'var fkzd="(.*)";',
-            webpage, u'video key')
+            embed_page, u'video key')
 
         api_call = "http://www.nowvideo.ch/api/player.api.php?file={0}&numOfErrors=0&cid=1&key={1}".format(video_id, video_key)
         api_response = self._download_webpage(api_call, video_id,
diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py
new file mode 100644 (file)
index 0000000..5e2454f
--- /dev/null
@@ -0,0 +1,69 @@
+import os
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse_urlparse,
+    compat_urllib_request,
+    compat_urllib_parse,
+    unescapeHTML,
+)
+from ..aes import (
+    aes_decrypt_text
+)
+
+class PornHubIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>pornhub\.com/view_video\.php\?viewkey=(?P<videoid>[0-9]+))'
+    _TEST = {
+        u'url': u'http://www.pornhub.com/view_video.php?viewkey=648719015',
+        u'file': u'648719015.mp4',
+        u'md5': u'882f488fa1f0026f023f33576004a2ed',
+        u'info_dict': {
+            u"uploader": u"BABES-COM", 
+            u"title": u"Seductive Indian beauty strips down and fingers her pink pussy",
+            u"age_limit": 18
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('videoid')
+        url = 'http://www.' + mobj.group('url')
+
+        req = compat_urllib_request.Request(url)
+        req.add_header('Cookie', 'age_verified=1')
+        webpage = self._download_webpage(req, video_id)
+
+        video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, u'title')
+        video_uploader = self._html_search_regex(r'<b>From: </b>(?:\s|<[^>]*>)*(.+?)<', webpage, u'uploader', fatal=False)
+        thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, u'thumbnail', fatal=False)
+        if thumbnail:
+            thumbnail = compat_urllib_parse.unquote(thumbnail)
+
+        video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
+        if webpage.find('"encrypted":true') != -1:
+            password = self._html_search_regex(r'"video_title":"([^"]+)', webpage, u'password').replace('+', ' ')
+            video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
+
+        formats = []
+        for video_url in video_urls:
+            path = compat_urllib_parse_urlparse( video_url ).path
+            extension = os.path.splitext( path )[1][1:]
+            format = path.split('/')[5].split('_')[:2]
+            format = "-".join( format )
+            formats.append({
+                'url': video_url,
+                'ext': extension,
+                'format': format,
+                'format_id': format,
+            })
+        formats.sort(key=lambda format: list(map(lambda s: s.zfill(6), format['format'].split('-'))))
+
+        return {
+            'id': video_id,
+            'uploader': video_uploader,
+            'title': video_title,
+            'thumbnail': thumbnail,
+            'formats': formats,
+            'age_limit': 18,
+        }
index 5d770ec285c3d1e3dcad04cfe49ca7780a9dd2b4..35dc5a9ffafb32d36e30f51988291dded6a6d18c 100644 (file)
@@ -16,7 +16,8 @@ class PornotubeIE(InfoExtractor):
         u'md5': u'374dd6dcedd24234453b295209aa69b6',
         u'info_dict': {
             u"upload_date": u"20090708", 
-            u"title": u"Marilyn-Monroe-Bathing"
+            u"title": u"Marilyn-Monroe-Bathing",
+            u"age_limit": 18
         }
     }
 
index 365aade564bd4f423317d427b3070c91e3e22ad3..994778e16758bc292a01e99e5292caee30a6d5c2 100644 (file)
@@ -10,7 +10,8 @@ class RedTubeIE(InfoExtractor):
         u'file': u'66418.mp4',
         u'md5': u'7b8c22b5e7098a3e1c09709df1126d2d',
         u'info_dict': {
-            u"title": u"Sucked on a toilet"
+            u"title": u"Sucked on a toilet",
+            u"age_limit": 18,
         }
     }
 
index d1b08c9bc050b3639ca252f2e84a373a8e4fa5f9..9ac7c3be8c8f1b97f46c944f08124eafbe8f1a5a 100644 (file)
@@ -63,13 +63,12 @@ class RTLnowIE(InfoExtractor):
         },
     },
     {
-        u'url': u'http://www.rtlnitronow.de/recht-ordnung/lebensmittelkontrolle-erlangenordnungsamt-berlin.php?film_id=127367&player=1&season=1',
-        u'file': u'127367.flv',
+        u'url': u'http://www.rtlnitronow.de/recht-ordnung/stadtpolizei-frankfurt-gerichtsvollzieher-leipzig.php?film_id=129679&player=1&season=1',
+        u'file': u'129679.flv',
         u'info_dict': {
-            u'upload_date': u'20130926', 
-            u'title': u'Recht & Ordnung - Lebensmittelkontrolle Erlangen/Ordnungsamt...',
-            u'description': u'Lebensmittelkontrolle Erlangen/Ordnungsamt Berlin',
-            u'thumbnail': u'http://autoimg.static-fra.de/nitronow/344787/1500x1500/image2.jpg',
+            u'upload_date': u'20131016', 
+            u'title': u'Recht & Ordnung - Stadtpolizei Frankfurt/ Gerichtsvollzieher...',
+            u'description': u'Stadtpolizei Frankfurt/ Gerichtsvollzieher Leipzig',
         },
         u'params': {
             u'skip_download': True,
diff --git a/youtube_dl/extractor/rutube.py b/youtube_dl/extractor/rutube.py
new file mode 100644 (file)
index 0000000..a18034f
--- /dev/null
@@ -0,0 +1,58 @@
+# encoding: utf-8
+import re
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urlparse,
+    compat_str,
+    ExtractorError,
+)
+
+
+class RutubeIE(InfoExtractor):
+    _VALID_URL = r'https?://rutube.ru/video/(?P<long_id>\w+)'
+
+    _TEST = {
+        u'url': u'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/',
+        u'file': u'3eac3b4561676c17df9132a9a1e62e3e.mp4',
+        u'info_dict': {
+            u'title': u'Раненный кенгуру забежал в аптеку',
+            u'uploader': u'NTDRussian',
+            u'uploader_id': u'29790',
+        },
+        u'params': {
+            # It requires ffmpeg (m3u8 download)
+            u'skip_download': True,
+        },
+    }
+
+    def _get_api_response(self, short_id, subpath):
+        api_url = 'http://rutube.ru/api/play/%s/%s/?format=json' % (subpath, short_id)
+        response_json = self._download_webpage(api_url, short_id,
+            u'Downloading %s json' % subpath)
+        return json.loads(response_json)
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        long_id = mobj.group('long_id')
+        webpage = self._download_webpage(url, long_id)
+        og_video = self._og_search_video_url(webpage)
+        short_id = compat_urlparse.urlparse(og_video).path[1:]
+        options = self._get_api_response(short_id, 'options')
+        trackinfo = self._get_api_response(short_id, 'trackinfo')
+        # Some videos don't have the author field
+        author = trackinfo.get('author') or {}
+        m3u8_url = trackinfo['video_balancer'].get('m3u8')
+        if m3u8_url is None:
+            raise ExtractorError(u'Couldn\'t find m3u8 manifest url')
+
+        return {
+            'id': trackinfo['id'],
+            'title': trackinfo['title'],
+            'url': m3u8_url,
+            'ext': 'mp4',
+            'thumbnail': options['thumbnail_url'],
+            'uploader': author.get('name'),
+            'uploader_id': compat_str(author['id']) if author else None,
+        }
diff --git a/youtube_dl/extractor/spankwire.py b/youtube_dl/extractor/spankwire.py
new file mode 100644 (file)
index 0000000..32df0a7
--- /dev/null
@@ -0,0 +1,74 @@
+import os
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse_urlparse,
+    compat_urllib_request,
+    compat_urllib_parse,
+    unescapeHTML,
+)
+from ..aes import (
+    aes_decrypt_text
+)
+
+class SpankwireIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>spankwire\.com/[^/]*/video(?P<videoid>[0-9]+)/?)'
+    _TEST = {
+        u'url': u'http://www.spankwire.com/Buckcherry-s-X-Rated-Music-Video-Crazy-Bitch/video103545/',
+        u'file': u'103545.mp4',
+        u'md5': u'1b3f55e345500552dbc252a3e9c1af43',
+        u'info_dict': {
+            u"uploader": u"oreusz", 
+            u"title": u"Buckcherry`s X Rated Music Video Crazy Bitch",
+            u"description": u"Crazy Bitch X rated music video.",
+            u"age_limit": 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('videoid')
+        url = 'http://www.' + mobj.group('url')
+
+        req = compat_urllib_request.Request(url)
+        req.add_header('Cookie', 'age_verified=1')
+        webpage = self._download_webpage(req, video_id)
+
+        video_title = self._html_search_regex(r'<h1>([^<]+)', webpage, u'title')
+        video_uploader = self._html_search_regex(r'by:\s*<a [^>]*>(.+?)</a>', webpage, u'uploader', fatal=False)
+        thumbnail = self._html_search_regex(r'flashvars\.image_url = "([^"]+)', webpage, u'thumbnail', fatal=False)
+        description = self._html_search_regex(r'>\s*Description:</div>\s*<[^>]*>([^<]+)', webpage, u'description', fatal=False)
+        if len(description) == 0:
+            description = None
+
+        video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'flashvars\.quality_[0-9]{3}p = "([^"]+)', webpage)))
+        if webpage.find('flashvars\.encrypted = "true"') != -1:
+            password = self._html_search_regex(r'flashvars\.video_title = "([^"]+)', webpage, u'password').replace('+', ' ')
+            video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
+
+        formats = []
+        for video_url in video_urls:
+            path = compat_urllib_parse_urlparse( video_url ).path
+            extension = os.path.splitext( path )[1][1:]
+            format = path.split('/')[4].split('_')[:2]
+            format = "-".join( format )
+            formats.append({
+                'url': video_url,
+                'ext': extension,
+                'format': format,
+                'format_id': format,
+            })
+        formats.sort(key=lambda format: list(map(lambda s: s.zfill(6), format['format'].split('-'))))
+
+        age_limit = self._rta_search(webpage)
+
+        return {
+            'id': video_id,
+            'uploader': video_uploader,
+            'title': video_title,
+            'thumbnail': thumbnail,
+            'description': description,
+            'formats': formats,
+            'age_limit': age_limit,
+        }
diff --git a/youtube_dl/extractor/sztvhu.py b/youtube_dl/extractor/sztvhu.py
new file mode 100644 (file)
index 0000000..81fa35c
--- /dev/null
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+import re
+
+from .common import InfoExtractor
+from ..utils import determine_ext
+
+
+class SztvHuIE(InfoExtractor):
+    _VALID_URL = r'(?:http://)?(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)'
+    _TEST = {
+        u'url': u'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909',
+        u'file': u'20130909.mp4',
+        u'md5': u'a6df607b11fb07d0e9f2ad94613375cb',
+        u'info_dict': {
+            u"title": u"Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren",
+            u"description": u'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...',
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        webpage = self._download_webpage(url, video_id)
+        video_file = self._search_regex(
+            r'file: "...:(.*?)",', webpage, 'video file')
+        title = self._html_search_regex(
+            r'<meta name="title" content="([^"]*?) - [^-]*? - [^-]*?"',
+            webpage, 'video title')
+        description = self._html_search_regex(
+            r'<meta name="description" content="([^"]*)"/>',
+            webpage, 'video description', fatal=False)
+        thumbnail = self._og_search_thumbnail(webpage)
+
+        video_url = 'http://media.sztv.hu/vod/' + video_file
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+            'ext': determine_ext(video_url),
+            'description': description,
+            'thumbnail': thumbnail,
+        }
diff --git a/youtube_dl/extractor/techtalks.py b/youtube_dl/extractor/techtalks.py
new file mode 100644 (file)
index 0000000..a55f236
--- /dev/null
@@ -0,0 +1,65 @@
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    get_element_by_attribute,
+    clean_html,
+)
+
+
+class TechTalksIE(InfoExtractor):
+    _VALID_URL = r'https?://techtalks\.tv/talks/[^/]*/(?P<id>\d+)/'
+
+    _TEST = {
+        u'url': u'http://techtalks.tv/talks/learning-topic-models-going-beyond-svd/57758/',
+        u'playlist': [
+            {
+                u'file': u'57758.flv',
+                u'info_dict': {
+                    u'title': u'Learning Topic Models --- Going beyond SVD',
+                },
+            },
+            {
+                u'file': u'57758-slides.flv',
+                u'info_dict': {
+                    u'title': u'Learning Topic Models --- Going beyond SVD',
+                },
+            },
+        ],
+        u'params': {
+            # rtmp download
+            u'skip_download': True,
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        talk_id = mobj.group('id')
+        webpage = self._download_webpage(url, talk_id)
+        rtmp_url = self._search_regex(r'netConnectionUrl: \'(.*?)\'', webpage,
+            u'rtmp url')
+        play_path = self._search_regex(r'href=\'(.*?)\' [^>]*id="flowplayer_presenter"',
+            webpage, u'presenter play path')
+        title = clean_html(get_element_by_attribute('class', 'title', webpage))
+        video_info = {
+                'id': talk_id,
+                'title': title,
+                'url': rtmp_url,
+                'play_path': play_path,
+                'ext': 'flv',
+            }
+        m_slides = re.search(r'<a class="slides" href=\'(.*?)\'', webpage)
+        if m_slides is None:
+            return video_info
+        else:
+            return [
+                video_info,
+                # The slides video
+                {
+                    'id': talk_id + '-slides',
+                    'title': title,
+                    'url': rtmp_url,
+                    'play_path': m_slides.group(1),
+                    'ext': 'flv',
+                },
+            ]
diff --git a/youtube_dl/extractor/tube8.py b/youtube_dl/extractor/tube8.py
new file mode 100644 (file)
index 0000000..aea9d9a
--- /dev/null
@@ -0,0 +1,65 @@
+import os
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse_urlparse,
+    compat_urllib_request,
+    compat_urllib_parse,
+    unescapeHTML,
+)
+from ..aes import (
+    aes_decrypt_text
+)
+
+class Tube8IE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>tube8\.com/[^/]+/[^/]+/(?P<videoid>[0-9]+)/?)'
+    _TEST = {
+        u'url': u'http://www.tube8.com/teen/kasia-music-video/229795/',
+        u'file': u'229795.mp4',
+        u'md5': u'e9e0b0c86734e5e3766e653509475db0',
+        u'info_dict': {
+            u"description": u"hot teen Kasia grinding", 
+            u"uploader": u"unknown", 
+            u"title": u"Kasia music video",
+            u"age_limit": 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('videoid')
+        url = 'http://www.' + mobj.group('url')
+
+        req = compat_urllib_request.Request(url)
+        req.add_header('Cookie', 'age_verified=1')
+        webpage = self._download_webpage(req, video_id)
+
+        video_title = self._html_search_regex(r'videotitle     ="([^"]+)', webpage, u'title')
+        video_description = self._html_search_regex(r'>Description:</strong>(.+?)<', webpage, u'description', fatal=False)
+        video_uploader = self._html_search_regex(r'>Submitted by:</strong>(?:\s|<[^>]*>)*(.+?)<', webpage, u'uploader', fatal=False)
+        thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, u'thumbnail', fatal=False)
+        if thumbnail:
+            thumbnail = thumbnail.replace('\\/', '/')
+
+        video_url = self._html_search_regex(r'"video_url":"([^"]+)', webpage, u'video_url')
+        if webpage.find('"encrypted":true')!=-1:
+            password = self._html_search_regex(r'"video_title":"([^"]+)', webpage, u'password')
+            video_url = aes_decrypt_text(video_url, password, 32).decode('utf-8')
+        path = compat_urllib_parse_urlparse( video_url ).path
+        extension = os.path.splitext( path )[1][1:]
+        format = path.split('/')[4].split('_')[:2]
+        format = "-".join( format )
+
+        return {
+            'id': video_id,
+            'uploader': video_uploader,
+            'title': video_title,
+            'thumbnail': thumbnail,
+            'description': video_description,
+            'url': video_url,
+            'ext': extension,
+            'format': format,
+            'format_id': format,
+            'age_limit': 18,
+        }
index 1405b73f76ad5166d45d9a9eb9687c49fa8a0bde..7a3891b89b736fb05f4c09d441d7eb56e68d8dcd 100644 (file)
@@ -7,15 +7,25 @@ from .common import InfoExtractor
 
 
 class TudouIE(InfoExtractor):
-    _VALID_URL = r'(?:http://)?(?:www\.)?tudou\.com/(?:listplay|programs)/(?:view|(.+?))/(?:([^/]+)|([^/]+))(?:\.html)?'
-    _TEST = {
+    _VALID_URL = r'(?:http://)?(?:www\.)?tudou\.com/(?:listplay|programs|albumplay)/(?:view|(.+?))/(?:([^/]+)|([^/]+))(?:\.html)?'
+    _TESTS = [{
         u'url': u'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html',
         u'file': u'159448201.f4v',
         u'md5': u'140a49ed444bd22f93330985d8475fcb',
         u'info_dict': {
             u"title": u"卡马乔国足开大脚长传冲吊集锦"
         }
-    }
+    },
+    {
+        u'url': u'http://www.tudou.com/albumplay/TenTw_JgiPM/PzsAs5usU9A.html',
+        u'file': u'todo.mp4',
+        u'md5': u'todo.mp4',
+        u'info_dict': {
+            u'title': u'todo.mp4',
+        },
+        u'add_ie': [u'Youku'],
+        u'skip': u'Only works from China'
+    }]
 
     def _url_for_id(self, id, quality = None):
         info_url = "http://v2.tudou.com/f?id="+str(id)
@@ -29,14 +39,19 @@ class TudouIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group(2)
         webpage = self._download_webpage(url, video_id)
-        title = re.search(",kw:\"(.+)\"",webpage)
-        if title is None:
-            title = re.search(",kw: \'(.+)\'",webpage)
-        title = title.group(1)
-        thumbnail_url = re.search(",pic: \'(.+?)\'",webpage)
-        if thumbnail_url is None:
-            thumbnail_url = re.search(",pic:\"(.+?)\"",webpage)
-        thumbnail_url = thumbnail_url.group(1)
+
+        m = re.search(r'vcode:\s*[\'"](.+?)[\'"]', webpage)
+        if m and m.group(1):
+            return {
+                '_type': 'url',
+                'url': u'youku:' + m.group(1),
+                'ie_key': 'Youku'
+            }
+
+        title = self._search_regex(
+            r",kw:\s*['\"](.+?)[\"']", webpage, u'title')
+        thumbnail_url = self._search_regex(
+            r",pic:\s*[\"'](.+?)[\"']", webpage, u'thumbnail URL', fatal=False)
 
         segs_json = self._search_regex(r'segs: \'(.*)\'', webpage, 'segments')
         segments = json.loads(segs_json)
index 1c1cc418d29a8897e2a2825492ed7becab75af6b..3f6020f74ec9eeefbddafc184d3f48cf5e436adb 100644 (file)
@@ -5,7 +5,7 @@ import datetime
 
 from .common import InfoExtractor
 from ..utils import (
-    determine_ext,
+    compat_HTTPError,
     ExtractorError,
 )
 
@@ -16,26 +16,22 @@ class VevoIE(InfoExtractor):
     (currently used by MTVIE)
     """
     _VALID_URL = r'((http://www.vevo.com/watch/.*?/.*?/)|(vevo:))(?P<id>.*?)(\?|$)'
-    _TEST = {
+    _TESTS = [{
         u'url': u'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280',
         u'file': u'GB1101300280.mp4',
+        u"md5": u"06bea460acb744eab74a9d7dcb4bfd61",
         u'info_dict': {
             u"upload_date": u"20130624",
             u"uploader": u"Hurts",
             u"title": u"Somebody to Die For",
-            u'duration': 230,
+            u"duration": 230,
+            u"width": 1920,
+            u"height": 1080,
         }
-    }
+    }]
+    _SMIL_BASE_URL = 'http://smil.lvl3.vevo.com/'
 
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
-        json_url = 'http://videoplayer.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id
-        info_json = self._download_webpage(json_url, video_id, u'Downloading json info')
-
-        self.report_extraction(video_id)
-        video_info = json.loads(info_json)['video']
+    def _formats_from_json(self, video_info):
         last_version = {'version': -1}
         for version in video_info['videoVersions']:
             # These are the HTTP downloads, other types are for different manifests
@@ -50,17 +46,74 @@ class VevoIE(InfoExtractor):
         # Already sorted from worst to best quality
         for rend in renditions.findall('rendition'):
             attr = rend.attrib
-            f_url = attr['url']
+            format_note = '%(videoCodec)s@%(videoBitrate)4sk, %(audioCodec)s@%(audioBitrate)3sk' % attr
             formats.append({
-                'url': f_url,
-                'ext': determine_ext(f_url),
+                'url': attr['url'],
+                'format_id': attr['name'],
+                'format_note': format_note,
                 'height': int(attr['frameheight']),
                 'width': int(attr['frameWidth']),
             })
+        return formats
+
+    def _formats_from_smil(self, smil_xml):
+        formats = []
+        smil_doc = xml.etree.ElementTree.fromstring(smil_xml.encode('utf-8'))
+        els = smil_doc.findall('.//{http://www.w3.org/2001/SMIL20/Language}video')
+        for el in els:
+            src = el.attrib['src']
+            m = re.match(r'''(?xi)
+                (?P<ext>[a-z0-9]+):
+                (?P<path>
+                    [/a-z0-9]+     # The directory and main part of the URL
+                    _(?P<cbr>[0-9]+)k
+                    _(?P<width>[0-9]+)x(?P<height>[0-9]+)
+                    _(?P<vcodec>[a-z0-9]+)
+                    _(?P<vbr>[0-9]+)
+                    _(?P<acodec>[a-z0-9]+)
+                    _(?P<abr>[0-9]+)
+                    \.[a-z0-9]+  # File extension
+                )''', src)
+            if not m:
+                continue
 
-        date_epoch = int(self._search_regex(
-            r'/Date\((\d+)\)/', video_info['launchDate'], u'launch date'))/1000
-        upload_date = datetime.datetime.fromtimestamp(date_epoch)
+            format_url = self._SMIL_BASE_URL + m.group('path')
+            format_note = ('%(vcodec)s@%(vbr)4sk, %(acodec)s@%(abr)3sk' %
+                           m.groupdict())
+            formats.append({
+                'url': format_url,
+                'format_id': u'SMIL_' + m.group('cbr'),
+                'format_note': format_note,
+                'ext': m.group('ext'),
+                'width': int(m.group('width')),
+                'height': int(m.group('height')),
+            })
+        return formats
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        json_url = 'http://videoplayer.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id
+        info_json = self._download_webpage(json_url, video_id, u'Downloading json info')
+        video_info = json.loads(info_json)['video']
+
+        formats = self._formats_from_json(video_info)
+        try:
+            smil_url = '%s/Video/V2/VFILE/%s/%sr.smil' % (
+                self._SMIL_BASE_URL, video_id, video_id.lower())
+            smil_xml = self._download_webpage(smil_url, video_id,
+                                              u'Downloading SMIL info')
+            formats.extend(self._formats_from_smil(smil_xml))
+        except ExtractorError as ee:
+            if not isinstance(ee.cause, compat_HTTPError):
+                raise
+            self._downloader.report_warning(
+                u'Cannot download SMIL information, falling back to JSON ..')
+
+        timestamp_ms = int(self._search_regex(
+            r'/Date\((\d+)\)/', video_info['launchDate'], u'launch date'))
+        upload_date = datetime.datetime.fromtimestamp(timestamp_ms // 1000)
         info = {
             'id': video_id,
             'title': video_info['title'],
@@ -71,7 +124,4 @@ class VevoIE(InfoExtractor):
             'duration': video_info['duration'],
         }
 
-        # TODO: Remove when #980 has been merged
-        info.update(formats[-1])
-
         return info
index d89f8409443675f4359b85b96ffc0c2d49ec32e5..265dd5b91fd9e5c4fc5a0cac8a9f36dd36731cfe 100644 (file)
@@ -16,7 +16,7 @@ class VideoDetectiveIE(InfoExtractor):
         u'info_dict': {
             u'title': u'KICK-ASS 2',
             u'description': u'md5:65ba37ad619165afac7d432eaded6013',
-            u'duration': 138,
+            u'duration': 135,
         },
     }
 
index cea29f03525af91d1be56c475da0f62ce45eea83..c7d864a2b6de2e121393bccb0fa50ffca7c2fde2 100644 (file)
@@ -1,3 +1,4 @@
+# encoding: utf-8
 import json
 import re
 import itertools
@@ -10,19 +11,21 @@ from ..utils import (
     clean_html,
     get_element_by_attribute,
     ExtractorError,
+    RegexNotFoundError,
     std_headers,
+    unsmuggle_url,
 )
 
 class VimeoIE(InfoExtractor):
     """Information extractor for vimeo.com."""
 
     # _VALID_URL matches Vimeo URLs
-    _VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo(?P<pro>pro)?\.com/(?:(?:(?:groups|album)/[^/]+)|(?:.*?)/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)/?(?:[?].*)?$'
+    _VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo(?P<pro>pro)?\.com/(?:(?:(?:groups|album)/[^/]+)|(?:.*?)/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)/?(?:[?].*)?(?:#.*)?$'
     _NETRC_MACHINE = 'vimeo'
     IE_NAME = u'vimeo'
     _TESTS = [
         {
-            u'url': u'http://vimeo.com/56015672',
+            u'url': u'http://vimeo.com/56015672#at=0',
             u'file': u'56015672.mp4',
             u'md5': u'8879b6cc097e987f02484baf890129e5',
             u'info_dict': {
@@ -54,6 +57,21 @@ class VimeoIE(InfoExtractor):
                 u'uploader': u'The BLN & Business of Software',
             },
         },
+        {
+            u'url': u'http://vimeo.com/68375962',
+            u'file': u'68375962.mp4',
+            u'md5': u'aaf896bdb7ddd6476df50007a0ac0ae7',
+            u'note': u'Video protected with password',
+            u'info_dict': {
+                u'title': u'youtube-dl password protected test video',
+                u'upload_date': u'20130614',
+                u'uploader_id': u'user18948128',
+                u'uploader': u'Jaime Marquínez Ferrándiz',
+            },
+            u'params': {
+                u'videopassword': u'youtube-dl',
+            },
+        },
     ]
 
     def _login(self):
@@ -98,6 +116,12 @@ class VimeoIE(InfoExtractor):
         self._login()
 
     def _real_extract(self, url, new_video=True):
+        url, data = unsmuggle_url(url)
+        headers = std_headers
+        if data is not None:
+            headers = headers.copy()
+            headers.update(data)
+
         # Extract ID from URL
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
@@ -112,7 +136,7 @@ class VimeoIE(InfoExtractor):
             url = 'https://vimeo.com/' + video_id
 
         # Retrieve video webpage to extract further information
-        request = compat_urllib_request.Request(url, None, std_headers)
+        request = compat_urllib_request.Request(url, None, headers)
         webpage = self._download_webpage(request, video_id)
 
         # Now we begin extracting as much information as we can from what we
@@ -122,18 +146,26 @@ class VimeoIE(InfoExtractor):
 
         # Extract the config JSON
         try:
-            config = self._search_regex([r' = {config:({.+?}),assets:', r'c=({.+?);'],
-                webpage, u'info section', flags=re.DOTALL)
-            config = json.loads(config)
-        except:
+            try:
+                config_url = self._html_search_regex(
+                    r' data-config-url="(.+?)"', webpage, u'config URL')
+                config_json = self._download_webpage(config_url, video_id)
+                config = json.loads(config_json)
+            except RegexNotFoundError:
+                # For pro videos or player.vimeo.com urls
+                config = self._search_regex([r' = {config:({.+?}),assets:', r'c=({.+?);'],
+                    webpage, u'info section', flags=re.DOTALL)
+                config = json.loads(config)
+        except Exception as e:
             if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
                 raise ExtractorError(u'The author has restricted the access to this video, try with the "--referer" option')
 
-            if re.search('If so please provide the correct password.', webpage):
+            if re.search('<form[^>]+?id="pw_form"', webpage) is not None:
                 self._verify_video_password(url, video_id, webpage)
                 return self._real_extract(url)
             else:
-                raise ExtractorError(u'Unable to extract info section')
+                raise ExtractorError(u'Unable to extract info section',
+                                     cause=e)
 
         # Extract title
         video_title = config["video"]["title"]
@@ -172,46 +204,45 @@ class VimeoIE(InfoExtractor):
 
         # Vimeo specific: extract video codec and quality information
         # First consider quality, then codecs, then take everything
-        # TODO bind to format param
-        codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')]
+        codecs = [('vp6', 'flv'), ('vp8', 'flv'), ('h264', 'mp4')]
         files = { 'hd': [], 'sd': [], 'other': []}
         config_files = config["video"].get("files") or config["request"].get("files")
         for codec_name, codec_extension in codecs:
-            if codec_name in config_files:
-                if 'hd' in config_files[codec_name]:
-                    files['hd'].append((codec_name, codec_extension, 'hd'))
-                elif 'sd' in config_files[codec_name]:
-                    files['sd'].append((codec_name, codec_extension, 'sd'))
+            for quality in config_files.get(codec_name, []):
+                format_id = '-'.join((codec_name, quality)).lower()
+                key = quality if quality in files else 'other'
+                video_url = None
+                if isinstance(config_files[codec_name], dict):
+                    file_info = config_files[codec_name][quality]
+                    video_url = file_info.get('url')
                 else:
-                    files['other'].append((codec_name, codec_extension, config_files[codec_name][0]))
-
-        for quality in ('hd', 'sd', 'other'):
-            if len(files[quality]) > 0:
-                video_quality = files[quality][0][2]
-                video_codec = files[quality][0][0]
-                video_extension = files[quality][0][1]
-                self.to_screen(u'%s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
-                break
-        else:
-            raise ExtractorError(u'No known codec found')
+                    file_info = {}
+                if video_url is None:
+                    video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
+                        %(video_id, sig, timestamp, quality, codec_name.upper())
 
-        video_url = None
-        if isinstance(config_files[video_codec], dict):
-            video_url = config_files[video_codec][video_quality].get("url")
-        if video_url is None:
-            video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
-                        %(video_id, sig, timestamp, video_quality, video_codec.upper())
+                files[key].append({
+                    'ext': codec_extension,
+                    'url': video_url,
+                    'format_id': format_id,
+                    'width': file_info.get('width'),
+                    'height': file_info.get('height'),
+                })
+        formats = []
+        for key in ('other', 'sd', 'hd'):
+            formats += files[key]
+        if len(formats) == 0:
+            raise ExtractorError(u'No known codec found')
 
         return [{
             'id':       video_id,
-            'url':      video_url,
             'uploader': video_uploader,
             'uploader_id': video_uploader_id,
             'upload_date':  video_upload_date,
             'title':    video_title,
-            'ext':      video_extension,
             'thumbnail':    video_thumbnail,
             'description':  video_description,
+            'formats': formats,
         }]
 
 
diff --git a/youtube_dl/extractor/vk.py b/youtube_dl/extractor/vk.py
new file mode 100644 (file)
index 0000000..90d8a6d
--- /dev/null
@@ -0,0 +1,45 @@
+# encoding: utf-8
+import re
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_str,
+    unescapeHTML,
+)
+
+
+class VKIE(InfoExtractor):
+    IE_NAME = u'vk.com'
+    _VALID_URL = r'https?://vk\.com/(?:videos.*?\?.*?z=)?video(?P<id>.*?)(?:\?|%2F|$)'
+
+    _TEST = {
+        u'url': u'http://vk.com/videos-77521?z=video-77521_162222515%2Fclub77521',
+        u'md5': u'0deae91935c54e00003c2a00646315f0',
+        u'info_dict': {
+            u'id': u'162222515',
+            u'ext': u'flv',
+            u'title': u'ProtivoGunz - Хуёвая песня',
+            u'uploader': u'Noize MC',
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        info_url = 'http://vk.com/al_video.php?act=show&al=1&video=%s' % video_id
+        info_page = self._download_webpage(info_url, video_id)
+        m_yt = re.search(r'src="(http://www.youtube.com/.*?)"', info_page)
+        if m_yt is not None:
+            self.to_screen(u'Youtube video detected')
+            return self.url_result(m_yt.group(1), 'Youtube')
+        vars_json = self._search_regex(r'var vars = ({.*?});', info_page, u'vars')
+        vars = json.loads(vars_json)
+
+        return {
+            'id': compat_str(vars['vid']),
+            'url': vars['url240'],
+            'title': unescapeHTML(vars['md_title']),
+            'thumbnail': vars['jpg'],
+            'uploader': vars['md_author'],
+        }
diff --git a/youtube_dl/extractor/websurg.py b/youtube_dl/extractor/websurg.py
new file mode 100644 (file)
index 0000000..43953bf
--- /dev/null
@@ -0,0 +1,59 @@
+# coding: utf-8
+
+import re
+
+from ..utils import (
+    compat_urllib_request,
+    compat_urllib_parse
+)
+
+from .common import InfoExtractor
+
+class WeBSurgIE(InfoExtractor):
+    IE_NAME = u'websurg.com'
+    _VALID_URL = r'http://.*?\.websurg\.com/MEDIA/\?noheader=1&doi=(.*)'
+
+    _TEST = {
+        u'url': u'http://www.websurg.com/MEDIA/?noheader=1&doi=vd01en4012',
+        u'file': u'vd01en4012.mp4',
+        u'params': {
+            u'skip_download': True,
+        },
+        u'skip': u'Requires login information',
+    }
+    
+    _LOGIN_URL = 'http://www.websurg.com/inc/login/login_div.ajax.php?login=1'
+
+    def _real_initialize(self):
+
+        login_form = {
+            'username': self._downloader.params['username'],
+            'password': self._downloader.params['password'],
+            'Submit': 1
+        }
+        
+        request = compat_urllib_request.Request(
+            self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
+        request.add_header(
+            'Content-Type', 'application/x-www-form-urlencoded;charset=utf-8')
+        compat_urllib_request.urlopen(request).info()
+        webpage = self._download_webpage(self._LOGIN_URL, '', 'Logging in')
+        
+        if webpage != 'OK':
+            self._downloader.report_error(
+                u'Unable to log in: bad username/password')
+        
+    def _real_extract(self, url):
+        video_id = re.match(self._VALID_URL, url).group(1)
+        
+        webpage = self._download_webpage(url, video_id)
+        
+        url_info = re.search(r'streamer="(.*?)" src="(.*?)"', webpage)
+        
+        return {'id': video_id,
+                'title': self._og_search_title(webpage),
+                'description': self._og_search_description(webpage),
+                'ext' : 'mp4',
+                'url' : url_info.group(1) + '/' + url_info.group(2),
+                'thumbnail': self._og_search_thumbnail(webpage)
+                }
index 361619694980d3260ff81aeed2d0d07294739a0e..7444d3393a25f8a49778a5bd589aa839591bd9d8 100644 (file)
@@ -19,7 +19,8 @@ class XHamsterIE(InfoExtractor):
         u'info_dict': {
             u"upload_date": u"20121014", 
             u"uploader_id": u"Ruseful2011", 
-            u"title": u"FemaleAgent Shy beauty takes the bait"
+            u"title": u"FemaleAgent Shy beauty takes the bait",
+            u"age_limit": 18,
         }
     },
     {
@@ -27,28 +28,33 @@ class XHamsterIE(InfoExtractor):
         u'file': u'2221348.flv',
         u'md5': u'e767b9475de189320f691f49c679c4c7',
         u'info_dict': {
-            u"upload_date": u"20130914", 
-            u"uploader_id": u"jojo747400", 
-            u"title": u"Britney Spears  Sexy Booty"
+            u"upload_date": u"20130914",
+            u"uploader_id": u"jojo747400",
+            u"title": u"Britney Spears  Sexy Booty",
+            u"age_limit": 18,
         }
     }]
 
     def _real_extract(self,url):
+        def extract_video_url(webpage):
+            mobj = re.search(r'\'srv\': \'(?P<server>[^\']*)\',\s*\'file\': \'(?P<file>[^\']+)\',', webpage)
+            if mobj is None:
+                raise ExtractorError(u'Unable to extract media URL')
+            if len(mobj.group('server')) == 0:
+                return compat_urllib_parse.unquote(mobj.group('file'))
+            else:
+                return mobj.group('server')+'/key='+mobj.group('file')
+
+        def is_hd(webpage):
+            return webpage.find('<div class=\'icon iconHD\'>') != -1
+
         mobj = re.match(self._VALID_URL, url)
 
         video_id = mobj.group('id')
         seo = mobj.group('seo')
-        mrss_url = 'http://xhamster.com/movies/%s/%s.html?hd' % (video_id, seo)
+        mrss_url = 'http://xhamster.com/movies/%s/%s.html' % (video_id, seo)
         webpage = self._download_webpage(mrss_url, video_id)
 
-        mobj = re.search(r'\'srv\': \'(?P<server>[^\']*)\',\s*\'file\': \'(?P<file>[^\']+)\',', webpage)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract media URL')
-        if len(mobj.group('server')) == 0:
-            video_url = compat_urllib_parse.unquote(mobj.group('file'))
-        else:
-            video_url = mobj.group('server')+'/key='+mobj.group('file')
-
         video_title = self._html_search_regex(r'<title>(?P<title>.+?) - xHamster\.com</title>',
             webpage, u'title')
 
@@ -72,13 +78,34 @@ class XHamsterIE(InfoExtractor):
         video_thumbnail = self._search_regex(r'\'image\':\'(?P<thumbnail>[^\']+)\'',
             webpage, u'thumbnail', fatal=False)
 
-        return [{
-            'id':       video_id,
-            'url':      video_url,
-            'ext':      determine_ext(video_url),
-            'title':    video_title,
+        age_limit = self._rta_search(webpage)
+
+        video_url = extract_video_url(webpage)
+        hd = is_hd(webpage)
+        formats = [{
+            'url': video_url,
+            'ext': determine_ext(video_url),
+            'format': 'hd' if hd else 'sd',
+            'format_id': 'hd' if hd else 'sd',
+        }]
+        if not hd:
+            webpage = self._download_webpage(mrss_url+'?hd', video_id)
+            if is_hd(webpage):
+                video_url = extract_video_url(webpage)
+                formats.append({
+                    'url': video_url,
+                    'ext': determine_ext(video_url),
+                    'format': 'hd',
+                    'format_id': 'hd',
+                })
+
+        return {
+            'id': video_id,
+            'title': video_title,
+            'formats': formats,
             'description': video_description,
             'upload_date': video_upload_date,
             'uploader_id': video_uploader_id,
-            'thumbnail': video_thumbnail
-        }]
+            'thumbnail': video_thumbnail,
+            'age_limit': age_limit,
+        }
index 40d8489000bb7a25f277c84d6d407b72ce778c8d..8a0eb1afdacc4cbe1cbb441b939cff3d7697cf4e 100644 (file)
@@ -18,7 +18,8 @@ class XNXXIE(InfoExtractor):
         u'file': u'1135332.flv',
         u'md5': u'0831677e2b4761795f68d417e0b7b445',
         u'info_dict': {
-            u"title": u"lida \u00bb Naked Funny Actress  (5)"
+            u"title": u"lida \u00bb Naked Funny Actress  (5)",
+            u"age_limit": 18,
         }
     }
 
@@ -50,4 +51,5 @@ class XNXXIE(InfoExtractor):
             'ext': 'flv',
             'thumbnail': video_thumbnail,
             'description': None,
+            'age_limit': 18,
         }]
index c3b9736d70a7af6fb90cd617312d25fd2d7cc740..90138d7e523a405c20bae8352c6233b5868860f5 100644 (file)
@@ -13,7 +13,8 @@ class XVideosIE(InfoExtractor):
         u'file': u'939581.flv',
         u'md5': u'1d0c835822f0a71a7bf011855db929d0',
         u'info_dict': {
-            u"title": u"Funny Porns By >>>>S<<<<<< -1"
+            u"title": u"Funny Porns By >>>>S<<<<<< -1",
+            u"age_limit": 18,
         }
     }
 
@@ -46,6 +47,7 @@ class XVideosIE(InfoExtractor):
             'ext': 'flv',
             'thumbnail': video_thumbnail,
             'description': None,
+            'age_limit': 18,
         }
 
         return [info]
index 1265639e821bd873b74aeea08811f8c22e966ba1..1fcc518acde9dbb08fef1ccb42a9ee7ae550967a 100644 (file)
@@ -13,7 +13,8 @@ class YouJizzIE(InfoExtractor):
         u'file': u'2189178.flv',
         u'md5': u'07e15fa469ba384c7693fd246905547c',
         u'info_dict': {
-            u"title": u"Zeichentrick 1"
+            u"title": u"Zeichentrick 1",
+            u"age_limit": 18,
         }
     }
 
@@ -25,6 +26,8 @@ class YouJizzIE(InfoExtractor):
         # Get webpage content
         webpage = self._download_webpage(url, video_id)
 
+        age_limit = self._rta_search(webpage)
+
         # Get the video title
         video_title = self._html_search_regex(r'<title>(?P<title>.*)</title>',
             webpage, u'title').strip()
@@ -60,6 +63,7 @@ class YouJizzIE(InfoExtractor):
                 'title': video_title,
                 'ext': 'flv',
                 'format': 'flv',
-                'player_url': embed_page_url}
+                'player_url': embed_page_url,
+                'age_limit': age_limit}
 
         return [info]
index b1f93dd1bb90d964916394d88d83aaaf153ba15b..e46a9b4d6ca33f5dd768c928aae0901677e7b2fc 100644 (file)
@@ -17,7 +17,7 @@ from ..aes import (
 )
 
 class YouPornIE(InfoExtractor):
-    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)'
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+))'
     _TEST = {
         u'url': u'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
         u'file': u'505835.mp4',
@@ -26,27 +26,15 @@ class YouPornIE(InfoExtractor):
             u"upload_date": u"20101221", 
             u"description": u"Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?", 
             u"uploader": u"Ask Dan And Jennifer", 
-            u"title": u"Sex Ed: Is It Safe To Masturbate Daily?"
+            u"title": u"Sex Ed: Is It Safe To Masturbate Daily?",
+            u"age_limit": 18,
         }
     }
 
-    def _print_formats(self, formats):
-        """Print all available formats"""
-        print(u'Available formats:')
-        print(u'ext\t\tformat')
-        print(u'---------------------------------')
-        for format in formats:
-            print(u'%s\t\t%s'  % (format['ext'], format['format']))
-
-    def _specific(self, req_format, formats):
-        for x in formats:
-            if x["format"] == req_format:
-                return x
-        return None
-
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('videoid')
+        url = 'http://www.' + mobj.group('url')
 
         req = compat_urllib_request.Request(url)
         req.add_header('Cookie', 'age_verified=1')
@@ -70,27 +58,22 @@ class YouPornIE(InfoExtractor):
         except KeyError:
             raise ExtractorError('Missing JSON parameter: ' + sys.exc_info()[1])
 
-        # Get all of the formats available
+        # Get all of the links from the page
         DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
         download_list_html = self._search_regex(DOWNLOAD_LIST_RE,
             webpage, u'download list').strip()
-
-        # Get all of the links from the page
-        LINK_RE = r'(?s)<a href="(?P<url>[^"]+)">'
+        LINK_RE = r'<a href="([^"]+)">'
         links = re.findall(LINK_RE, download_list_html)
-        
-        # Get link of hd video if available
-        mobj = re.search(r'var encryptedQuality720URL = \'(?P<encrypted_video_url>[a-zA-Z0-9+/]+={0,2})\';', webpage)
-        if mobj != None:
-            encrypted_video_url = mobj.group(u'encrypted_video_url')
-            video_url = aes_decrypt_text(encrypted_video_url, video_title, 32).decode('utf-8')
-            links = [video_url] + links
+
+        # Get all encrypted links
+        encrypted_links = re.findall(r'var encryptedQuality[0-9]{3}URL = \'([a-zA-Z0-9+/]+={0,2})\';', webpage)
+        for encrypted_link in encrypted_links:
+            link = aes_decrypt_text(encrypted_link, video_title, 32).decode('utf-8')
+            links.append(link)
         
         if not links:
             raise ExtractorError(u'ERROR: no known formats available for video')
 
-        self.to_screen(u'Links found: %d' % len(links))
-
         formats = []
         for link in links:
 
@@ -102,39 +85,32 @@ class YouPornIE(InfoExtractor):
             path = compat_urllib_parse_urlparse( video_url ).path
             extension = os.path.splitext( path )[1][1:]
             format = path.split('/')[4].split('_')[:2]
+
             # size = format[0]
             # bitrate = format[1]
             format = "-".join( format )
             # title = u'%s-%s-%s' % (video_title, size, bitrate)
 
             formats.append({
-                'id': video_id,
                 'url': video_url,
-                'uploader': video_uploader,
-                'upload_date': upload_date,
-                'title': video_title,
                 'ext': extension,
                 'format': format,
-                'thumbnail': thumbnail,
-                'description': video_description,
-                'age_limit': age_limit,
+                'format_id': format,
             })
 
-        if self._downloader.params.get('listformats', None):
-            self._print_formats(formats)
-            return
-
-        req_format = self._downloader.params.get('format', 'best')
-        self.to_screen(u'Format: %s' % req_format)
-
-        if req_format is None or req_format == 'best':
-            return [formats[0]]
-        elif req_format == 'worst':
-            return [formats[-1]]
-        elif req_format in ('-1', 'all'):
-            return formats
-        else:
-            format = self._specific( req_format, formats )
-            if format is None:
-                raise ExtractorError(u'Requested format not available')
-            return [format]
+        # Sort and remove doubles
+        formats.sort(key=lambda format: list(map(lambda s: s.zfill(6), format['format'].split('-'))))
+        for i in range(len(formats)-1,0,-1):
+            if formats[i]['format_id'] == formats[i-1]['format_id']:
+                del formats[i]
+        
+        return {
+            'id': video_id,
+            'uploader': video_uploader,
+            'upload_date': upload_date,
+            'title': video_title,
+            'thumbnail': thumbnail,
+            'description': video_description,
+            'age_limit': age_limit,
+            'formats': formats,
+        }
index d7c9b38f9da7054a43827a5e44572986717714a7..9053f3ead8ee81b490fdc9982359465e29d965db 100644 (file)
@@ -74,14 +74,8 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
             self._downloader.report_warning(u'unable to fetch login page: %s' % compat_str(err))
             return False
 
-        galx = None
-        dsh = None
-        match = re.search(re.compile(r'<input.+?name="GALX".+?value="(.+?)"', re.DOTALL), login_page)
-        if match:
-          galx = match.group(1)
-        match = re.search(re.compile(r'<input.+?name="dsh".+?value="(.+?)"', re.DOTALL), login_page)
-        if match:
-          dsh = match.group(1)
+        galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
+                                  login_page, u'Login GALX parameter')
 
         # Log in
         login_form_strs = {
@@ -95,7 +89,6 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
                 u'checkConnection': u'',
                 u'checkedDomains': u'youtube',
                 u'dnConn': u'',
-                u'dsh': dsh,
                 u'pstMsg': u'0',
                 u'rmShown': u'1',
                 u'secTok': u'',
@@ -236,11 +229,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         '136': 'mp4',
         '137': 'mp4',
         '138': 'mp4',
-        '139': 'mp4',
-        '140': 'mp4',
-        '141': 'mp4',
         '160': 'mp4',
 
+        # Dash mp4 audio
+        '139': 'm4a',
+        '140': 'm4a',
+        '141': 'm4a',
+
         # Dash webm
         '171': 'webm',
         '172': 'webm',
@@ -346,7 +341,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         },
         {
             u"url":  u"http://www.youtube.com/watch?v=1ltcDfZMA3U",
-            u"file":  u"1ltcDfZMA3U.flv",
+            u"file":  u"1ltcDfZMA3U.mp4",
             u"note": u"Test VEVO video (#897)",
             u"info_dict": {
                 u"upload_date": u"20070518",
@@ -1116,7 +1111,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                 'lang': lang,
                 'v': video_id,
                 'fmt': self._downloader.params.get('subtitlesformat'),
-                'name': l[0],
+                'name': l[0].encode('utf-8'),
             })
             url = u'http://www.youtube.com/api/timedtext?' + params
             sub_lang_list[lang] = url
@@ -1150,7 +1145,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             list_page = self._download_webpage(list_url, video_id)
             caption_list = xml.etree.ElementTree.fromstring(list_page.encode('utf-8'))
             original_lang_node = caption_list.find('track')
-            if original_lang_node.attrib.get('kind') != 'asr' :
+            if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr' :
                 self._downloader.report_warning(u'Video doesn\'t have automatic captions')
                 return {}
             original_lang = original_lang_node.attrib['lang_code']
@@ -1403,32 +1398,29 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             # this signatures are encrypted
             if 'url_encoded_fmt_stream_map' not in args:
                 raise ValueError(u'No stream_map present')  # caught below
-            m_s = re.search(r'[&,]s=', args['url_encoded_fmt_stream_map'])
+            re_signature = re.compile(r'[&,]s=')
+            m_s = re_signature.search(args['url_encoded_fmt_stream_map'])
             if m_s is not None:
                 self.to_screen(u'%s: Encrypted signatures detected.' % video_id)
                 video_info['url_encoded_fmt_stream_map'] = [args['url_encoded_fmt_stream_map']]
-            m_s = re.search(r'[&,]s=', args.get('adaptive_fmts', u''))
+            m_s = re_signature.search(args.get('adaptive_fmts', u''))
             if m_s is not None:
-                if 'url_encoded_fmt_stream_map' in video_info:
-                    video_info['url_encoded_fmt_stream_map'][0] += ',' + args['adaptive_fmts']
-                else:
-                    video_info['url_encoded_fmt_stream_map'] = [args['adaptive_fmts']]
-            elif 'adaptive_fmts' in video_info:
-                if 'url_encoded_fmt_stream_map' in video_info:
-                    video_info['url_encoded_fmt_stream_map'][0] += ',' + video_info['adaptive_fmts'][0]
+                if 'adaptive_fmts' in video_info:
+                    video_info['adaptive_fmts'][0] += ',' + args['adaptive_fmts']
                 else:
-                    video_info['url_encoded_fmt_stream_map'] = video_info['adaptive_fmts']
+                    video_info['adaptive_fmts'] = [args['adaptive_fmts']]
         except ValueError:
             pass
 
         if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
             self.report_rtmp_download()
             video_url_list = [(None, video_info['conn'][0])]
-        elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
-            if 'rtmpe%3Dyes' in video_info['url_encoded_fmt_stream_map'][0]:
+        elif len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len(video_info.get('adaptive_fmts', [])) >= 1:
+            encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts',[''])[0]
+            if 'rtmpe%3Dyes' in encoded_url_map:
                 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
             url_map = {}
-            for url_data_str in video_info['url_encoded_fmt_stream_map'][0].split(','):
+            for url_data_str in encoded_url_map.split(','):
                 url_data = compat_parse_qs(url_data_str)
                 if 'itag' in url_data and 'url' in url_data:
                     url = url_data['url'][0]
@@ -1481,13 +1473,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             raise ExtractorError(u'no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
 
         results = []
-        for format_param, video_real_url in video_url_list:
+        for itag, video_real_url in video_url_list:
             # Extension
-            video_extension = self._video_extensions.get(format_param, 'flv')
+            video_extension = self._video_extensions.get(itag, 'flv')
 
-            video_format = '{0} - {1}{2}'.format(format_param if format_param else video_extension,
-                                              self._video_dimensions.get(format_param, '???'),
-                                              ' ('+self._special_itags[format_param]+')' if format_param in self._special_itags else '')
+            video_format = '{0} - {1}{2}'.format(itag if itag else video_extension,
+                                              self._video_dimensions.get(itag, '???'),
+                                              ' ('+self._special_itags[itag]+')' if itag in self._special_itags else '')
 
             results.append({
                 'id':       video_id,
@@ -1498,6 +1490,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                 'title':    video_title,
                 'ext':      video_extension,
                 'format':   video_format,
+                'format_id': itag,
                 'thumbnail':    video_thumbnail,
                 'description':  video_description,
                 'player_url':   player_url,
index 3e81c308b27fb9618dba89a30fd5964651f48cbf..1d9785341ec685071ea8fcc4846029a3e889bc72 100644 (file)
@@ -572,6 +572,11 @@ class ExtractorError(Exception):
         return u''.join(traceback.format_tb(self.traceback))
 
 
+class RegexNotFoundError(ExtractorError):
+    """Error when a regex didn't match"""
+    pass
+
+
 class DownloadError(Exception):
     """Download Error exception.
 
@@ -945,3 +950,29 @@ class locked_file(object):
 
 def shell_quote(args):
     return ' '.join(map(pipes.quote, args))
+
+
+def takewhile_inclusive(pred, seq):
+    """ Like itertools.takewhile, but include the latest evaluated element
+        (the first element so that Not pred(e)) """
+    for e in seq:
+        yield e
+        if not pred(e):
+            return
+
+
+def smuggle_url(url, data):
+    """ Pass additional data in a URL for internal use. """
+
+    sdata = compat_urllib_parse.urlencode(
+        {u'__youtubedl_smuggle': json.dumps(data)})
+    return url + u'#' + sdata
+
+
+def unsmuggle_url(smug_url):
+    if not '#__youtubedl_smuggle' in smug_url:
+        return smug_url, None
+    url, _, sdata = smug_url.rpartition(u'#')
+    jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0]
+    data = json.loads(jsond)
+    return url, data
index 1004af116bc88dba99ff62273d63cc02d6154ea4..75a46a2d55d41171fc6664046ab3631d9e7ae4d3 100644 (file)
@@ -1,2 +1,2 @@
 
-__version__ = '2013.10.09'
+__version__ = '2013.11.02'