Merge pull request #1620 from jaimeMF/console_script
authorFilippo Valsorda <filippo.valsorda@gmail.com>
Mon, 28 Oct 2013 06:08:59 +0000 (23:08 -0700)
committerFilippo Valsorda <filippo.valsorda@gmail.com>
Mon, 28 Oct 2013 06:08:59 +0000 (23:08 -0700)
Use the console_scripts entry point if setuptools is available

41 files changed:
README.md
devscripts/bash-completion.in
devscripts/check-porn.py [new file with mode: 0644]
devscripts/release.sh
setup.py
test/test_YoutubeDL.py
youtube_dl/YoutubeDL.py
youtube_dl/__init__.py
youtube_dl/extractor/__init__.py
youtube_dl/extractor/addanime.py
youtube_dl/extractor/arte.py
youtube_dl/extractor/cinemassacre.py
youtube_dl/extractor/common.py
youtube_dl/extractor/dailymotion.py
youtube_dl/extractor/eighttracks.py
youtube_dl/extractor/exfm.py
youtube_dl/extractor/facebook.py
youtube_dl/extractor/faz.py
youtube_dl/extractor/generic.py
youtube_dl/extractor/googleplus.py
youtube_dl/extractor/internetvideoarchive.py
youtube_dl/extractor/keezmovies.py [new file with mode: 0644]
youtube_dl/extractor/metacafe.py
youtube_dl/extractor/nhl.py
youtube_dl/extractor/nowvideo.py
youtube_dl/extractor/pornhub.py [new file with mode: 0644]
youtube_dl/extractor/pornotube.py
youtube_dl/extractor/redtube.py
youtube_dl/extractor/rtlnow.py
youtube_dl/extractor/spankwire.py [new file with mode: 0644]
youtube_dl/extractor/tube8.py [new file with mode: 0644]
youtube_dl/extractor/videodetective.py
youtube_dl/extractor/vimeo.py
youtube_dl/extractor/xhamster.py
youtube_dl/extractor/xnxx.py
youtube_dl/extractor/xvideos.py
youtube_dl/extractor/youjizz.py
youtube_dl/extractor/youporn.py
youtube_dl/extractor/youtube.py
youtube_dl/utils.py
youtube_dl/version.py

index 6dae0a580f282ac053e0af0bcf7b41ff9cb1270a..2b8db0cfc2c9f7f78be5f3fb69fdeb345562d1b5 100644 (file)
--- a/README.md
+++ b/README.md
@@ -21,6 +21,8 @@ which means you can modify it, redistribute it or use it however you like.
                                sudo if needed)
     -i, --ignore-errors        continue on download errors, for example to to
                                skip unavailable videos in a playlist
+    --abort-on-error           Abort downloading of further videos (in the
+                               playlist or the command line) if an error occurs
     --dump-user-agent          display the current browser identification
     --user-agent UA            specify a custom user agent
     --referer REF              specify a custom referer, use if the video access
@@ -30,7 +32,7 @@ which means you can modify it, redistribute it or use it however you like.
     --extractor-descriptions   Output descriptions of all supported extractors
     --proxy URL                Use the specified HTTP/HTTPS proxy
     --no-check-certificate     Suppress HTTPS certificate validation.
-    --cache-dir None           Location in the filesystem where youtube-dl can
+    --cache-dir DIR            Location in the filesystem where youtube-dl can
                                store downloaded information permanently. By
                                default $XDG_CACHE_HOME/youtube-dl or ~/.cache
                                /youtube-dl .
@@ -76,15 +78,17 @@ which means you can modify it, redistribute it or use it however you like.
                                %(uploader_id)s for the uploader nickname if
                                different, %(autonumber)s to get an automatically
                                incremented number, %(ext)s for the filename
-                               extension, %(upload_date)s for the upload date
-                               (YYYYMMDD), %(extractor)s for the provider
-                               (youtube, metacafe, etc), %(id)s for the video id
-                               , %(playlist)s for the playlist the video is in,
-                               %(playlist_index)s for the position in the
-                               playlist and %% for a literal percent. Use - to
-                               output to stdout. Can also be used to download to
-                               a different directory, for example with -o '/my/d
-                               ownloads/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
+                               extension, %(format)s for the format description
+                               (like "22 - 1280x720" or "HD")%(upload_date)s for
+                               the upload date (YYYYMMDD), %(extractor)s for the
+                               provider (youtube, metacafe, etc), %(id)s for the
+                               video id , %(playlist)s for the playlist the
+                               video is in, %(playlist_index)s for the position
+                               in the playlist and %% for a literal percent. Use
+                               - to output to stdout. Can also be used to
+                               download to a different directory, for example
+                               with -o '/my/downloads/%(uploader)s/%(title)s-%(i
+                               d)s.%(ext)s' .
     --autonumber-size NUMBER   Specifies the number of digits in %(autonumber)s
                                when it is present in output filename template or
                                --autonumber option is given
index bd10f63c2f2615168a482085ad156a9f4e330ddd..ce893fcbe1a681e535452c35f5b833eea54b2d95 100644 (file)
@@ -1,4 +1,4 @@
-__youtube-dl()
+__youtube_dl()
 {
     local cur prev opts
     COMPREPLY=()
@@ -15,4 +15,4 @@ __youtube-dl()
     fi
 }
 
-complete -F __youtube-dl youtube-dl
+complete -F __youtube_dl youtube-dl
diff --git a/devscripts/check-porn.py b/devscripts/check-porn.py
new file mode 100644 (file)
index 0000000..63401fe
--- /dev/null
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+
+"""
+This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check
+if we are not 'age_limit' tagging some porn site
+"""
+
+# Allow direct execution
+import os
+import sys
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from test.helper import get_testcases
+from youtube_dl.utils import compat_urllib_request
+
+for test in get_testcases():
+    try:
+        webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read()
+    except:
+        print('\nFail: {0}'.format(test['name']))
+        continue
+
+    webpage = webpage.decode('utf8', 'replace')
+
+    if 'porn' in webpage.lower() and ('info_dict' not in test
+                                      or 'age_limit' not in test['info_dict']
+                                      or test['info_dict']['age_limit'] != 18):
+        print('\nPotential missing age_limit check: {0}'.format(test['name']))
+
+    elif 'porn' not in webpage.lower() and ('info_dict' in test and
+                                            'age_limit' in test['info_dict'] and
+                                            test['info_dict']['age_limit'] == 18):
+        print('\nPotential false negative: {0}'.format(test['name']))
+
+    else:
+        sys.stdout.write('.')
+    sys.stdout.flush()
+
+print()
index 796468b4b3aee3e603ddb919535bfde281cd71e5..2766174c1a8477519eb818f287897ebc93d04a72 100755 (executable)
@@ -88,10 +88,6 @@ ROOT=$(pwd)
     "$ROOT/devscripts/gh-pages/update-sites.py"
     git add *.html *.html.in update
     git commit -m "release $version"
-    git show HEAD
-    read -p "Is it good, can I push? (y/n) " -n 1
-    if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi
-    echo
     git push "$ROOT" gh-pages
     git push "$ORIGIN_URL" gh-pages
 )
index 347a4f2d8090ad85823ca8ca4bae424fcd25a832..f14f9637722e03aceb0aa96a9da1c65d91ef776e 100644 (file)
--- a/setup.py
+++ b/setup.py
@@ -67,6 +67,7 @@ setup(
     ' YouTube.com and other video sites.',
     url='https://github.com/rg3/youtube-dl',
     author='Ricardo Garcia',
+    author_email='ytdl@yt-dl.org',
     maintainer='Philipp Hagemeister',
     maintainer_email='phihag@phihag.de',
     packages=['youtube_dl', 'youtube_dl.extractor'],
index ba6dc05bc3c5549783d7e93c3226484da2f0602a..f8cd1bdce9a64ac87ec83ab15130148f580a5291 100644 (file)
@@ -94,6 +94,40 @@ class TestFormatSelection(unittest.TestCase):
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded[u'format_id'], u'excellent')
 
+    def test_format_selection(self):
+        formats = [
+            {u'format_id': u'35', u'ext': u'mp4'},
+            {u'format_id': u'45', u'ext': u'webm'},
+            {u'format_id': u'47', u'ext': u'webm'},
+            {u'format_id': u'2', u'ext': u'flv'},
+        ]
+        info_dict = {u'formats': formats, u'extractor': u'test'}
+
+        ydl = YDL({'format': u'20/47'})
+        ydl.process_ie_result(info_dict)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(downloaded['format_id'], u'47')
+
+        ydl = YDL({'format': u'20/71/worst'})
+        ydl.process_ie_result(info_dict)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(downloaded['format_id'], u'35')
+
+        ydl = YDL()
+        ydl.process_ie_result(info_dict)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(downloaded['format_id'], u'2')
+
+        ydl = YDL({'format': u'webm/mp4'})
+        ydl.process_ie_result(info_dict)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(downloaded['format_id'], u'47')
+
+        ydl = YDL({'format': u'3gp/40/mp4'})
+        ydl.process_ie_result(info_dict)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(downloaded['format_id'], u'35')
+
 
 if __name__ == '__main__':
     unittest.main()
index 296c0f9924940d145c95226ba0dd27a61f700253..d4654cc05a5f6dae132d818fd33067cbbc98b49f 100644 (file)
@@ -91,7 +91,7 @@ class YoutubeDL(object):
     downloadarchive:   File name of a file where all downloads are recorded.
                        Videos already present in the file are not downloaded
                        again.
-    
+
     The following parameters are not used by YoutubeDL itself, they are used by
     the FileDownloader:
     nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
@@ -216,10 +216,10 @@ class YoutubeDL(object):
         If stderr is a tty file the 'WARNING:' will be colored
         '''
         if sys.stderr.isatty() and os.name != 'nt':
-            _msg_header=u'\033[0;33mWARNING:\033[0m'
+            _msg_header = u'\033[0;33mWARNING:\033[0m'
         else:
-            _msg_header=u'WARNING:'
-        warning_message=u'%s %s' % (_msg_header,message)
+            _msg_header = u'WARNING:'
+        warning_message = u'%s %s' % (_msg_header, message)
         self.to_stderr(warning_message)
 
     def report_error(self, message, tb=None):
@@ -234,19 +234,6 @@ class YoutubeDL(object):
         error_message = u'%s %s' % (_msg_header, message)
         self.trouble(error_message, tb)
 
-    def slow_down(self, start_time, byte_counter):
-        """Sleep if the download speed is over the rate limit."""
-        rate_limit = self.params.get('ratelimit', None)
-        if rate_limit is None or byte_counter == 0:
-            return
-        now = time.time()
-        elapsed = now - start_time
-        if elapsed <= 0.0:
-            return
-        speed = float(byte_counter) / elapsed
-        if speed > rate_limit:
-            time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
-
     def report_writedescription(self, descfn):
         """ Report that the description file is being written """
         self.to_screen(u'[info] Writing video description to: ' + descfn)
@@ -288,13 +275,15 @@ class YoutubeDL(object):
             if template_dict['playlist_index'] is not None:
                 template_dict['playlist_index'] = u'%05d' % template_dict['playlist_index']
 
-            sanitize = lambda k,v: sanitize_filename(
+            sanitize = lambda k, v: sanitize_filename(
                 u'NA' if v is None else compat_str(v),
                 restricted=self.params.get('restrictfilenames'),
-                is_id=(k==u'id'))
-            template_dict = dict((k, sanitize(k, v)) for k,v in template_dict.items())
+                is_id=(k == u'id'))
+            template_dict = dict((k, sanitize(k, v))
+                                 for k, v in template_dict.items())
 
-            filename = self.params['outtmpl'] % template_dict
+            tmpl = os.path.expanduser(self.params['outtmpl'])
+            filename = tmpl % template_dict
             return filename
         except KeyError as err:
             self.report_error(u'Erroneous output template')
@@ -328,14 +317,14 @@ class YoutubeDL(object):
             return (u'%(title)s has already been recorded in archive'
                     % info_dict)
         return None
-        
+
     def extract_info(self, url, download=True, ie_key=None, extra_info={}):
         '''
         Returns a list with a dictionary for each video we find.
         If 'download', also downloads the videos.
         extra_info is a dict containing the extra values to add to each result
          '''
-        
+
         if ie_key:
             ies = [self.get_info_extractor(ie_key)]
         else:
@@ -377,7 +366,7 @@ class YoutubeDL(object):
                     raise
         else:
             self.report_error(u'no suitable InfoExtractor: %s' % url)
-        
+
     def process_ie_result(self, ie_result, download=True, extra_info={}):
         """
         Take the result of the ie(may be modified) and resolve all unresolved
@@ -401,7 +390,7 @@ class YoutubeDL(object):
         elif result_type == 'playlist':
             # We process each entry in the playlist
             playlist = ie_result.get('title', None) or ie_result.get('id', None)
-            self.to_screen(u'[download] Downloading playlist: %s'  % playlist)
+            self.to_screen(u'[download] Downloading playlist: %s' % playlist)
 
             playlist_results = []
 
@@ -419,12 +408,12 @@ class YoutubeDL(object):
             self.to_screen(u"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" %
                 (ie_result['extractor'], playlist, n_all_entries, n_entries))
 
-            for i,entry in enumerate(entries,1):
-                self.to_screen(u'[download] Downloading video #%s of %s' %(i, n_entries))
+            for i, entry in enumerate(entries, 1):
+                self.to_screen(u'[download] Downloading video #%s of %s' % (i, n_entries))
                 extra = {
-                         'playlist': playlist, 
-                         'playlist_index': i + playliststart,
-                         }
+                    'playlist': playlist,
+                    'playlist_index': i + playliststart,
+                }
                 if not 'extractor' in entry:
                     # We set the extractor, if it's an url it will be set then to
                     # the new extractor, but if it's already a video we must make
@@ -448,6 +437,22 @@ class YoutubeDL(object):
         else:
             raise Exception('Invalid result type: %s' % result_type)
 
+    def select_format(self, format_spec, available_formats):
+        if format_spec == 'best' or format_spec is None:
+            return available_formats[-1]
+        elif format_spec == 'worst':
+            return available_formats[0]
+        else:
+            extensions = [u'mp4', u'flv', u'webm', u'3gp']
+            if format_spec in extensions:
+                filter_f = lambda f: f['ext'] == format_spec
+            else:
+                filter_f = lambda f: f['format_id'] == format_spec
+            matches = list(filter(filter_f, available_formats))
+            if matches:
+                return matches[-1]
+        return None
+
     def process_video_result(self, info_dict, download=True):
         assert info_dict.get('_type', 'video') == 'video'
 
@@ -457,8 +462,9 @@ class YoutubeDL(object):
             info_dict['playlist_index'] = None
 
         # This extractors handle format selection themselves
-        if info_dict['extractor'] in [u'youtube', u'Youku', u'YouPorn', u'mixcloud']:
-            self.process_info(info_dict)
+        if info_dict['extractor'] in [u'youtube', u'Youku', u'mixcloud']:
+            if download:
+                self.process_info(info_dict)
             return info_dict
 
         # We now pick which formats have to be downloaded
@@ -470,17 +476,14 @@ class YoutubeDL(object):
 
         # We check that all the formats have the format and format_id fields
         for (i, format) in enumerate(formats):
-            if format.get('format') is None:
-                if format.get('height') is not None:
-                    if format.get('width') is not None:
-                        format_desc = u'%sx%s' % (format['width'], format['height'])
-                    else:
-                        format_desc = u'%sp' % format['height']
-                else:
-                    format_desc = '???'
-                format['format'] = format_desc
             if format.get('format_id') is None:
                 format['format_id'] = compat_str(i)
+            if format.get('format') is None:
+                format['format'] = u'{id} - {res}{note}'.format(
+                    id=format['format_id'],
+                    res=self.format_resolution(format),
+                    note=u' ({})'.format(format['format_note']) if format.get('format_note') is not None else '',
+                )
 
         if self.params.get('listformats', None):
             self.list_formats(info_dict)
@@ -502,22 +505,20 @@ class YoutubeDL(object):
             formats = sorted(formats, key=_free_formats_key)
 
         req_format = self.params.get('format', 'best')
+        if req_format is None:
+            req_format = 'best'
         formats_to_download = []
-        if req_format == 'best' or req_format is None:
-            formats_to_download = [formats[-1]]
-        elif req_format == 'worst':
-            formats_to_download = [formats[0]]
         # The -1 is for supporting YoutubeIE
-        elif req_format in ('-1', 'all'):
+        if req_format in ('-1', 'all'):
             formats_to_download = formats
         else:
-            # We can accept formats requestd in the format: 34/10/5, we pick
+            # We can accept formats requestd in the format: 34/5/best, we pick
             # the first that is available, starting from left
             req_formats = req_format.split('/')
             for rf in req_formats:
-                matches = filter(lambda f:f['format_id'] == rf ,formats)
-                if matches:
-                    formats_to_download = [matches[0]]
+                selected_format = self.select_format(rf, formats)
+                if selected_format is not None:
+                    formats_to_download = [selected_format]
                     break
         if not formats_to_download:
             raise ExtractorError(u'requested format not available')
@@ -608,20 +609,20 @@ class YoutubeDL(object):
 
         if self.params.get('writeannotations', False):
             try:
-               annofn = filename + u'.annotations.xml'
-               self.report_writeannotations(annofn)
-               with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
-                   annofile.write(info_dict['annotations'])
+                annofn = filename + u'.annotations.xml'
+                self.report_writeannotations(annofn)
+                with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
+                    annofile.write(info_dict['annotations'])
             except (KeyError, TypeError):
                 self.report_warning(u'There are no annotations to write.')
             except (OSError, IOError):
-                 self.report_error(u'Cannot write annotations file: ' + annofn)
-                 return
+                self.report_error(u'Cannot write annotations file: ' + annofn)
+                return
 
         subtitles_are_requested = any([self.params.get('writesubtitles', False),
                                        self.params.get('writeautomaticsub')])
 
-        if  subtitles_are_requested and 'subtitles' in info_dict and info_dict['subtitles']:
+        if subtitles_are_requested and 'subtitles' in info_dict and info_dict['subtitles']:
             # subtitles download errors are already managed as troubles in relevant IE
             # that way it will silently go on when used with unsupporting IE
             subtitles = info_dict['subtitles']
@@ -643,7 +644,7 @@ class YoutubeDL(object):
             infofn = filename + u'.info.json'
             self.report_writeinfojson(infofn)
             try:
-                json_info_dict = dict((k, v) for k,v in info_dict.items() if not k in ['urlhandle'])
+                json_info_dict = dict((k, v) for k, v in info_dict.items() if not k in ['urlhandle'])
                 write_json_file(json_info_dict, encodeFilename(infofn))
             except (OSError, IOError):
                 self.report_error(u'Cannot write metadata to JSON file ' + infofn)
@@ -713,7 +714,7 @@ class YoutubeDL(object):
         keep_video = None
         for pp in self._pps:
             try:
-                keep_video_wish,new_info = pp.run(info)
+                keep_video_wish, new_info = pp.run(info)
                 if keep_video_wish is not None:
                     if keep_video_wish:
                         keep_video = keep_video_wish
@@ -752,16 +753,31 @@ class YoutubeDL(object):
         with locked_file(fn, 'a', encoding='utf-8') as archive_file:
             archive_file.write(vid_id + u'\n')
 
+    @staticmethod
+    def format_resolution(format):
+        if format.get('height') is not None:
+            if format.get('width') is not None:
+                res = u'%sx%s' % (format['width'], format['height'])
+            else:
+                res = u'%sp' % format['height']
+        else:
+            res = '???'
+        return res
+
     def list_formats(self, info_dict):
         formats_s = []
         for format in info_dict.get('formats', [info_dict]):
-            formats_s.append("%s\t:\t%s\t[%s]" % (format['format_id'],
-                                                format['ext'],
-                                                format.get('format', '???'),
-                                                )
-                            )
+            formats_s.append(u'%-15s: %-5s     %-15s[%s]' % (
+                format['format_id'],
+                format['ext'],
+                format.get('format_note') or '-',
+                self.format_resolution(format),
+                )
+            )
         if len(formats_s) != 1:
-            formats_s[0]  += ' (worst)'
+            formats_s[0] += ' (worst)'
             formats_s[-1] += ' (best)'
         formats_s = "\n".join(formats_s)
-        self.to_screen(u"[info] Available formats for %s:\nformat code\textension\n%s" % (info_dict['id'], formats_s)) 
+        self.to_screen(u'[info] Available formats for %s:\n'
+            u'format code    extension   note           resolution\n%s' % (
+                info_dict['id'], formats_s))
index 47acb4320db7142c58789ce120a567b1f46c7088..a33dec78560888861bab35ab43a90bee4b9924e0 100644 (file)
@@ -133,7 +133,7 @@ def parseOpts(overrideArguments=None):
 
     def _hide_login_info(opts):
         opts = list(opts)
-        for private_opt in ['-p', '--password', '-u', '--username']:
+        for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
             try:
                 i = opts.index(private_opt)
                 opts[i+1] = '<PRIVATE>'
@@ -179,6 +179,9 @@ def parseOpts(overrideArguments=None):
             action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
     general.add_option('-i', '--ignore-errors',
             action='store_true', dest='ignoreerrors', help='continue on download errors, for example to to skip unavailable videos in a playlist', default=False)
+    general.add_option('--abort-on-error',
+            action='store_false', dest='ignoreerrors',
+            help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
     general.add_option('--dump-user-agent',
             action='store_true', dest='dump_user_agent',
             help='display the current browser identification', default=False)
@@ -332,7 +335,10 @@ def parseOpts(overrideArguments=None):
             help=('output filename template. Use %(title)s to get the title, '
                   '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
                   '%(autonumber)s to get an automatically incremented number, '
-                  '%(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), '
+                  '%(ext)s for the filename extension, '
+                  '%(format)s for the format description (like "22 - 1280x720" or "HD"),'
+                  '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"),'
+                  '%(upload_date)s for the upload date (YYYYMMDD), '
                   '%(extractor)s for the provider (youtube, metacafe, etc), '
                   '%(id)s for the video id , %(playlist)s for the playlist the video is in, '
                   '%(playlist_index)s for the position in the playlist and %% for a literal percent. '
index db69af361929fd7ff726d1a1df980730cad3630c..0d933986f10af717befe791eb4565ab40c5f48b7 100644 (file)
@@ -72,6 +72,7 @@ from .jeuxvideo import JeuxVideoIE
 from .jukebox import JukeboxIE
 from .justintv import JustinTVIE
 from .kankan import KankanIE
+from .keezmovies import KeezMoviesIE
 from .kickstarter import KickStarterIE
 from .keek import KeekIE
 from .liveleak import LiveLeakIE
@@ -94,6 +95,7 @@ from .ooyala import OoyalaIE
 from .orf import ORFIE
 from .pbs import PBSIE
 from .photobucket import PhotobucketIE
+from .pornhub import PornHubIE
 from .pornotube import PornotubeIE
 from .rbmaradio import RBMARadioIE
 from .redtube import RedTubeIE
@@ -109,6 +111,7 @@ from .slideshare import SlideshareIE
 from .sohu import SohuIE
 from .soundcloud import SoundcloudIE, SoundcloudSetIE, SoundcloudUserIE
 from .southparkstudios import SouthParkStudiosIE
+from .spankwire import SpankwireIE
 from .spiegel import SpiegelIE
 from .stanfordoc import StanfordOpenClassroomIE
 from .statigram import StatigramIE
@@ -121,6 +124,7 @@ from .tf1 import TF1IE
 from .thisav import ThisAVIE
 from .traileraddict import TrailerAddictIE
 from .trilulilu import TriluliluIE
+from .tube8 import Tube8IE
 from .tudou import TudouIE
 from .tumblr import TumblrIE
 from .tutv import TutvIE
index 82a785a19c34517c17da294ad64c1cbe7d22cba4..465df8cf081b3fe68eaef21eea1f01cd4ca124ce 100644 (file)
@@ -17,8 +17,8 @@ class AddAnimeIE(InfoExtractor):
     IE_NAME = u'AddAnime'
     _TEST = {
         u'url': u'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
-        u'file': u'24MR3YO5SAS9.flv',
-        u'md5': u'1036a0e0cd307b95bd8a8c3a5c8cfaf1',
+        u'file': u'24MR3YO5SAS9.mp4',
+        u'md5': u'72954ea10bc979ab5e2eb288b21425a0',
         u'info_dict': {
             u"description": u"One Piece 606",
             u"title": u"One Piece 606"
@@ -60,8 +60,10 @@ class AddAnimeIE(InfoExtractor):
                 note=u'Confirming after redirect')
             webpage = self._download_webpage(url, video_id)
 
-        video_url = self._search_regex(r"var normal_video_file = '(.*?)';",
+        video_url = self._search_regex(r"var (?:hq|normal)_video_file = '(.*?)';",
                                        webpage, u'video file URL')
+        
+        video_extension = video_url[-3:]  # mp4 or flv ?
         video_title = self._og_search_title(webpage)
         video_description = self._og_search_description(webpage)
 
@@ -69,7 +71,7 @@ class AddAnimeIE(InfoExtractor):
             '_type': 'video',
             'id':  video_id,
             'url': video_url,
-            'ext': 'flv',
+            'ext': video_extension,
             'title': video_title,
             'description': video_description
         }
index 5ee8a67b14699a330914cd4f0e0f627ca9fca5a5..d39b489518f7bc699854d9ddeb7cd4fa357a6c2b 100644 (file)
@@ -174,12 +174,27 @@ class ArteTVPlus7IE(InfoExtractor):
         # Some formats use the m3u8 protocol
         formats = filter(lambda f: f.get('videoFormat') != 'M3U8', formats)
         # We order the formats by quality
-        formats = sorted(formats, key=lambda f: int(f.get('height',-1)))
+        formats = list(formats) # in python3 filter returns an iterator
+        if re.match(r'[A-Z]Q', formats[0]['quality']) is not None:
+            sort_key = lambda f: ['HQ', 'MQ', 'EQ', 'SQ'].index(f['quality'])
+        else:
+            sort_key = lambda f: int(f.get('height',-1))
+        formats = sorted(formats, key=sort_key)
         # Prefer videos without subtitles in the same language
         formats = sorted(formats, key=lambda f: re.match(r'VO(F|A)-STM\1', f.get('versionCode', '')) is None)
         # Pick the best quality
         def _format(format_info):
+            quality = format_info['quality']
+            m_quality = re.match(r'\w*? - (\d*)p', quality)
+            if m_quality is not None:
+                quality = m_quality.group(1)
+            if format_info.get('versionCode') is not None:
+                format_id = u'%s-%s' % (quality, format_info['versionCode'])
+            else:
+                format_id = quality
             info = {
+                'format_id': format_id,
+                'format_note': format_info.get('versionLibelle'),
                 'width': format_info.get('width'),
                 'height': format_info.get('height'),
             }
@@ -192,8 +207,6 @@ class ArteTVPlus7IE(InfoExtractor):
                 info['ext'] = determine_ext(info['url'])
             return info
         info_dict['formats'] = [_format(f) for f in formats]
-        # TODO: Remove when #980 has been merged 
-        info_dict.update(info_dict['formats'][-1])
 
         return info_dict
 
@@ -207,7 +220,7 @@ class ArteTVCreativeIE(ArteTVPlus7IE):
         u'url': u'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
         u'file': u'050489-002.mp4',
         u'info_dict': {
-            u'title': u'Agentur Amateur #2 - Corporate Design',
+            u'title': u'Agentur Amateur / Agence Amateur #2 : Corporate Design',
         },
     }
 
index 6925b96c2ee1fd1e09624638805597259b068dcd..2fe1033f01d3ccc53b51f6b8c8b24efe490f5cb6 100644 (file)
@@ -55,30 +55,30 @@ class CinemassacreIE(InfoExtractor):
             video_description = None
 
         playerdata = self._download_webpage(playerdata_url, video_id)
-        base_url = self._html_search_regex(r'\'streamer\': \'(?P<base_url>rtmp://.*?)/(?:vod|Cinemassacre)\'',
-            playerdata, u'base_url')
-        base_url += '/Cinemassacre/'
-        # Important: The file names in playerdata are not used by the player and even wrong for some videos
-        sd_file = 'Cinemassacre-%s_high.mp4' % video_id
-        hd_file = 'Cinemassacre-%s.mp4' % video_id
-        video_thumbnail = 'http://image.screenwavemedia.com/Cinemassacre/Cinemassacre-%s_thumb_640x360.jpg' % video_id
+        url = self._html_search_regex(r'\'streamer\': \'(?P<url>[^\']+)\'', playerdata, u'url')
+
+        sd_file = self._html_search_regex(r'\'file\': \'(?P<sd_file>[^\']+)\'', playerdata, u'sd_file')
+        hd_file = self._html_search_regex(r'\'?file\'?: "(?P<hd_file>[^"]+)"', playerdata, u'hd_file')
+        video_thumbnail = self._html_search_regex(r'\'image\': \'(?P<thumbnail>[^\']+)\'', playerdata, u'thumbnail', fatal=False)
 
         formats = [
             {
-                'url': base_url + sd_file,
+                'url': url,
+                'play_path': 'mp4:' + sd_file,
                 'ext': 'flv',
                 'format': 'sd',
                 'format_id': 'sd',
             },
             {
-                'url': base_url + hd_file,
+                'url': url,
+                'play_path': 'mp4:' + hd_file,
                 'ext': 'flv',
                 'format': 'hd',
                 'format_id': 'hd',
             },
         ]
 
-        info = {
+        return {
             'id': video_id,
             'title': video_title,
             'formats': formats,
@@ -86,6 +86,3 @@ class CinemassacreIE(InfoExtractor):
             'upload_date': video_date,
             'thumbnail': video_thumbnail,
         }
-        # TODO: Remove when #980 has been merged
-        info.update(formats[-1])
-        return info
index d4af3b5ebf119ef6612093bd6a5c03d1cc6cc380..aaa5c24c8565fffb85295188daf7bbd52f5aae89 100644 (file)
@@ -14,6 +14,7 @@ from ..utils import (
     clean_html,
     compiled_regex_type,
     ExtractorError,
+    RegexNotFoundError,
     unescapeHTML,
 )
 
@@ -61,9 +62,12 @@ class InfoExtractor(object):
                     * ext       Will be calculated from url if missing
                     * format    A human-readable description of the format
                                 ("mp4 container with h264/opus").
-                                Calculated from width and height if missing.
+                                Calculated from the format_id, width, height 
+                                and format_note fields if missing.
                     * format_id A short description of the format
                                 ("mp4_h264_opus" or "19")
+                    * format_note Additional info about the format
+                                ("3D" or "DASH video")
                     * width     Width of the video, if known
                     * height    Height of the video, if known
 
@@ -228,7 +232,7 @@ class InfoExtractor(object):
         Perform a regex search on the given string, using a single or a list of
         patterns returning the first matching group.
         In case of failure return a default value or raise a WARNING or a
-        ExtractorError, depending on fatal, specifying the field name.
+        RegexNotFoundError, depending on fatal, specifying the field name.
         """
         if isinstance(pattern, (str, compat_str, compiled_regex_type)):
             mobj = re.search(pattern, string, flags)
@@ -248,7 +252,7 @@ class InfoExtractor(object):
         elif default is not None:
             return default
         elif fatal:
-            raise ExtractorError(u'Unable to extract %s' % _name)
+            raise RegexNotFoundError(u'Unable to extract %s' % _name)
         else:
             self._downloader.report_warning(u'unable to extract %s; '
                 u'please report this issue on http://yt-dl.org/bug' % _name)
index 7d83539469d3d7ff120f916cc837a60bacfe8390..4c0488245cd60df341df74110a434ed554fa304e 100644 (file)
@@ -28,6 +28,15 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
 
     _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/(?:embed/)?video/([^/]+)'
     IE_NAME = u'dailymotion'
+
+    _FORMATS = [
+        (u'stream_h264_ld_url', u'ld'),
+        (u'stream_h264_url', u'standard'),
+        (u'stream_h264_hq_url', u'hq'),
+        (u'stream_h264_hd_url', u'hd'),
+        (u'stream_h264_hd1080_url', u'hd180'),
+    ]
+
     _TESTS = [
         {
             u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
@@ -60,7 +69,6 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
 
         video_id = mobj.group(1).split('_')[0].split('?')[0]
 
-        video_extension = 'mp4'
         url = 'http://www.dailymotion.com/video/%s' % video_id
 
         # Retrieve video webpage to extract further information
@@ -99,18 +107,24 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
             msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
             raise ExtractorError(msg, expected=True)
 
-        # TODO: support choosing qualities
-
-        for key in ['stream_h264_hd1080_url','stream_h264_hd_url',
-                    'stream_h264_hq_url','stream_h264_url',
-                    'stream_h264_ld_url']:
-            if info.get(key):#key in info and info[key]:
-                max_quality = key
-                self.to_screen(u'Using %s' % key)
-                break
-        else:
+        formats = []
+        for (key, format_id) in self._FORMATS:
+            video_url = info.get(key)
+            if video_url is not None:
+                m_size = re.search(r'H264-(\d+)x(\d+)', video_url)
+                if m_size is not None:
+                    width, height = m_size.group(1), m_size.group(2)
+                else:
+                    width, height = None, None
+                formats.append({
+                    'url': video_url,
+                    'ext': 'mp4',
+                    'format_id': format_id,
+                    'width': width,
+                    'height': height,
+                })
+        if not formats:
             raise ExtractorError(u'Unable to extract video URL')
-        video_url = info[max_quality]
 
         # subtitles
         video_subtitles = self.extract_subtitles(video_id)
@@ -120,11 +134,10 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
 
         return [{
             'id':       video_id,
-            'url':      video_url,
+            'formats': formats,
             'uploader': video_uploader,
             'upload_date':  video_upload_date,
             'title':    self._og_search_title(webpage),
-            'ext':      video_extension,
             'subtitles':    video_subtitles,
             'thumbnail': info['thumbnail_url']
         }]
index cced0681171a3dbc818e62ee2551da1958eacae2..2cfbcd363c0db4f2505d8da7120d7c3161a7b0a9 100644 (file)
@@ -101,7 +101,7 @@ class EightTracksIE(InfoExtractor):
         first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
         next_url = first_url
         res = []
-        for i in itertools.count():
+        for i in range(track_count):
             api_json = self._download_webpage(next_url, playlist_id,
                 note=u'Downloading song information %s/%s' % (str(i+1), track_count),
                 errnote=u'Failed to download song information')
@@ -116,7 +116,5 @@ class EightTracksIE(InfoExtractor):
                 'ext': 'm4a',
             }
             res.append(info)
-            if api_data['set']['at_last_track']:
-                break
             next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])
         return res
index 3443f19c5f9bb8e2853c95b4ca5e153b395a701f..c7455657907f7c3a67e4544b7e25819c29042dc5 100644 (file)
@@ -11,14 +11,14 @@ class ExfmIE(InfoExtractor):
     _SOUNDCLOUD_URL = r'(?:http://)?(?:www\.)?api\.soundcloud.com/tracks/([^/]+)/stream'
     _TESTS = [
         {
-            u'url': u'http://ex.fm/song/1bgtzg',
-            u'file': u'95223130.mp3',
-            u'md5': u'8a7967a3fef10e59a1d6f86240fd41cf',
+            u'url': u'http://ex.fm/song/eh359',
+            u'file': u'44216187.mp3',
+            u'md5': u'e45513df5631e6d760970b14cc0c11e7',
             u'info_dict': {
-                u"title": u"We Can't Stop - Miley Cyrus",
-                u"uploader": u"Miley Cyrus",
-                u'upload_date': u'20130603',
-                u'description': u'Download "We Can\'t Stop" \r\niTunes: http://smarturl.it/WeCantStop?IQid=SC\r\nAmazon: http://smarturl.it/WeCantStopAMZ?IQid=SC',
+                u"title": u"Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive",
+                u"uploader": u"deadjournalist",
+                u'upload_date': u'20120424',
+                u'description': u'Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive',
             },
             u'note': u'Soundcloud song',
         },
index 9d1bc07510c3148b8ed8659d697c46017c6a36ff..f8bdfc2d33c9f00b9f902a4303eb7024f4646312 100644 (file)
@@ -19,7 +19,8 @@ class FacebookIE(InfoExtractor):
     """Information Extractor for Facebook"""
 
     _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
-    _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
+    _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
+    _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
     _NETRC_MACHINE = 'facebook'
     IE_NAME = u'facebook'
     _TEST = {
@@ -36,50 +37,56 @@ class FacebookIE(InfoExtractor):
         """Report attempt to log in."""
         self.to_screen(u'Logging in')
 
-    def _real_initialize(self):
-        if self._downloader is None:
-            return
-
-        useremail = None
-        password = None
-        downloader_params = self._downloader.params
-
-        # Attempt to use provided username and password or .netrc data
-        if downloader_params.get('username', None) is not None:
-            useremail = downloader_params['username']
-            password = downloader_params['password']
-        elif downloader_params.get('usenetrc', False):
-            try:
-                info = netrc.netrc().authenticators(self._NETRC_MACHINE)
-                if info is not None:
-                    useremail = info[0]
-                    password = info[2]
-                else:
-                    raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
-            except (IOError, netrc.NetrcParseError) as err:
-                self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
-                return
-
+    def _login(self):
+        (useremail, password) = self._get_login_info()
         if useremail is None:
             return
 
-        # Log in
+        login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
+        login_page_req.add_header('Cookie', 'locale=en_US')
+        self.report_login()
+        login_page = self._download_webpage(login_page_req, None, note=False,
+            errnote=u'Unable to download login page')
+        lsd = self._search_regex(r'"lsd":"(\w*?)"', login_page, u'lsd')
+        lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, u'lgnrnd')
+
         login_form = {
             'email': useremail,
             'pass': password,
-            'login': 'Log+In'
+            'lsd': lsd,
+            'lgnrnd': lgnrnd,
+            'next': 'http://facebook.com/home.php',
+            'default_persistent': '0',
+            'legacy_return': '1',
+            'timezone': '-60',
+            'trynum': '1',
             }
         request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
+        request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         try:
-            self.report_login()
             login_results = compat_urllib_request.urlopen(request).read()
             if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
                 self._downloader.report_warning(u'unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
                 return
+
+            check_form = {
+                'fb_dtsg': self._search_regex(r'"fb_dtsg":"(.*?)"', login_results, u'fb_dtsg'),
+                'nh': self._search_regex(r'name="nh" value="(\w*?)"', login_results, u'nh'),
+                'name_action_selected': 'dont_save',
+                'submit[Continue]': self._search_regex(r'<input value="(.*?)" name="submit\[Continue\]"', login_results, u'continue'),
+            }
+            check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, compat_urllib_parse.urlencode(check_form))
+            check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
+            check_response = compat_urllib_request.urlopen(check_req).read()
+            if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
+                self._downloader.report_warning(u'Unable to confirm login, you have to login in your brower and authorize the login.')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
             self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
             return
 
+    def _real_initialize(self):
+        self._login()
+
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
@@ -93,7 +100,13 @@ class FacebookIE(InfoExtractor):
         AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
         m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
         if not m:
-            raise ExtractorError(u'Cannot parse data')
+            m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
+            if m_msg is not None:
+                raise ExtractorError(
+                    u'The video is not available, Facebook said: "%s"' % m_msg.group(1),
+                    expected=True)
+            else:
+                raise ExtractorError(u'Cannot parse data')
         data = dict(json.loads(m.group(1)))
         params_raw = compat_urllib_parse.unquote(data['params'])
         params = json.loads(params_raw)
index deaa4ed2d9bc14406b6a7d3d6e8b015c6fcf915d..89ed08db4cbb99f9381013813fa03a19474c8e24 100644 (file)
@@ -5,8 +5,6 @@ import xml.etree.ElementTree
 from .common import InfoExtractor
 from ..utils import (
     determine_ext,
-    clean_html,
-    get_element_by_attribute,
 )
 
 
@@ -47,12 +45,12 @@ class FazIE(InfoExtractor):
                 'format_id': code.lower(),
             })
 
-        descr_html = get_element_by_attribute('class', 'Content Copy', webpage)
+        descr = self._html_search_regex(r'<p class="Content Copy">(.*?)</p>', webpage, u'description')
         info = {
             'id': video_id,
             'title': self._og_search_title(webpage),
             'formats': formats,
-            'description': clean_html(descr_html),
+            'description': descr,
             'thumbnail': config.find('STILL/STILL_BIG').text,
         }
         # TODO: Remove when #980 has been merged
index 69e0a7bd271dd0965f5c1f6f9c3a7cdce7a3da0b..2c8fcf5ae5df24a1dedc5e461feb5ac2300688a6 100644 (file)
@@ -25,7 +25,7 @@ class GenericIE(InfoExtractor):
         {
             u'url': u'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
             u'file': u'13601338388002.mp4',
-            u'md5': u'85b90ccc9d73b4acd9138d3af4c27f89',
+            u'md5': u'6e15c93721d7ec9e9ca3fdbf07982cfd',
             u'info_dict': {
                 u"uploader": u"www.hodiho.fr",
                 u"title": u"R\u00e9gis plante sa Jeep"
@@ -41,7 +41,17 @@ class GenericIE(InfoExtractor):
                 u"uploader_id": u"skillsmatter",
                 u"uploader": u"Skills Matter",
             }
-        }
+        },
+        # bandcamp page with custom domain
+        {
+            u'url': u'http://bronyrock.com/track/the-pony-mash',
+            u'file': u'3235767654.mp3',
+            u'info_dict': {
+                u'title': u'The Pony Mash',
+                u'uploader': u'M_Pallante',
+            },
+            u'skip': u'There is a limit of 200 free downloads / month for the test song',
+        },
     ]
 
     def report_download_webpage(self, video_id):
@@ -155,6 +165,12 @@ class GenericIE(InfoExtractor):
             surl = unescapeHTML(mobj.group(1))
             return self.url_result(surl, 'Youtube')
 
+        # Look for Bandcamp pages with custom domain
+        mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
+        if mobj is not None:
+            burl = unescapeHTML(mobj.group(1))
+            return self.url_result(burl, 'Bandcamp')
+
         # Start with something easy: JW Player in SWFObject
         mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
         if mobj is None:
index ab12d7e9381317b4dfddb679eced39db2f752ed4..2570746b2047a1d1ae0a60b48970b1414f168e40 100644 (file)
@@ -41,9 +41,9 @@ class GooglePlusIE(InfoExtractor):
 
         # Extract update date
         upload_date = self._html_search_regex(
-            r'''(?x)<a.+?class="o-T-s\s[^"]+"\s+style="display:\s*none"\s*>
+            r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
                     ([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
-            webpage, u'upload date', fatal=False)
+            webpage, u'upload date', fatal=False, flags=re.VERBOSE)
         if upload_date:
             # Convert timestring to a format suitable for filename
             upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
index 5986459d6dfdf7358b7d7a2e4bc139a579a01265..be8e05f539d7f64c301f7a63a488aedbf9d129cd 100644 (file)
@@ -19,7 +19,7 @@ class InternetVideoArchiveIE(InfoExtractor):
         u'info_dict': {
             u'title': u'SKYFALL',
             u'description': u'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.',
-            u'duration': 156,
+            u'duration': 153,
         },
     }
 
@@ -74,7 +74,7 @@ class InternetVideoArchiveIE(InfoExtractor):
             })
         formats = sorted(formats, key=lambda f: f['bitrate'])
 
-        info = {
+        return {
             'id': video_id,
             'title': item.find('title').text,
             'formats': formats,
@@ -82,6 +82,3 @@ class InternetVideoArchiveIE(InfoExtractor):
             'description': item.find('description').text,
             'duration': int(attr['duration']),
         }
-        # TODO: Remove when #980 has been merged
-        info.update(formats[-1])
-        return info
diff --git a/youtube_dl/extractor/keezmovies.py b/youtube_dl/extractor/keezmovies.py
new file mode 100644 (file)
index 0000000..5e05900
--- /dev/null
@@ -0,0 +1,61 @@
+import os
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse_urlparse,
+    compat_urllib_request,
+    compat_urllib_parse,
+)
+from ..aes import (
+    aes_decrypt_text
+)
+
+class KeezMoviesIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>keezmovies\.com/video/.+?(?P<videoid>[0-9]+))'
+    _TEST = {
+        u'url': u'http://www.keezmovies.com/video/petite-asian-lady-mai-playing-in-bathtub-1214711',
+        u'file': u'1214711.mp4',
+        u'md5': u'6e297b7e789329923fcf83abb67c9289',
+        u'info_dict': {
+            u"title": u"Petite Asian Lady Mai Playing In Bathtub",
+            u"age_limit": 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('videoid')
+        url = 'http://www.' + mobj.group('url')
+
+        req = compat_urllib_request.Request(url)
+        req.add_header('Cookie', 'age_verified=1')
+        webpage = self._download_webpage(req, video_id)
+
+        # embedded video
+        mobj = re.search(r'href="([^"]+)"></iframe>', webpage)
+        if mobj:
+            embedded_url = mobj.group(1)
+            return self.url_result(embedded_url)
+
+        video_title = self._html_search_regex(r'<h1 [^>]*>([^<]+)', webpage, u'title')
+        video_url = compat_urllib_parse.unquote(self._html_search_regex(r'video_url=(.+?)&amp;', webpage, u'video_url'))
+        if webpage.find('encrypted=true')!=-1:
+            password = self._html_search_regex(r'video_title=(.+?)&amp;', webpage, u'password')
+            video_url = aes_decrypt_text(video_url, password, 32).decode('utf-8')
+        path = compat_urllib_parse_urlparse( video_url ).path
+        extension = os.path.splitext( path )[1][1:]
+        format = path.split('/')[4].split('_')[:2]
+        format = "-".join( format )
+
+        age_limit = self._rta_search(webpage)
+
+        return {
+            'id': video_id,
+            'title': video_title,
+            'url': video_url,
+            'ext': extension,
+            'format': format,
+            'format_id': format,
+            'age_limit': age_limit,
+        }
index e537648ffb83564e56f43f7e1e21a949cc609925..234b9e80f3bf3fac48ff77f4c8b1ac9da7de2c7e 100644 (file)
@@ -23,7 +23,7 @@ class MetacafeIE(InfoExtractor):
     _TESTS = [{
         u"add_ie": ["Youtube"],
         u"url":  u"http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/",
-        u"file":  u"_aUehQsCQtM.flv",
+        u"file":  u"_aUehQsCQtM.mp4",
         u"info_dict": {
             u"upload_date": u"20090102",
             u"title": u"The Electric Company | \"Short I\" | PBS KIDS GO!",
index e8d43dd135ff29bc9471466f66b8b4e1b55eadea..224f56ac84b77647c2ff5468b41d229786da632e 100644 (file)
@@ -90,8 +90,8 @@ class NHLVideocenterIE(NHLBaseInfoExtractor):
              r'{statusIndex:0,index:0,.*?id:(.*?),'],
             webpage, u'category id')
         playlist_title = self._html_search_regex(
-            r'\?catid=%s">(.*?)</a>' % cat_id,
-            webpage, u'playlist title', flags=re.DOTALL)
+            r'tab0"[^>]*?>(.*?)</td>',
+            webpage, u'playlist title', flags=re.DOTALL).lower().capitalize()
 
         data = compat_urllib_parse.urlencode({
             'cid': cat_id,
index ab52ad4011851405e9a6b17f73720a8cd646860c..241cc160b9ca58bfc6b88bf9c12fe134df3b3d66 100644 (file)
@@ -20,7 +20,10 @@ class NowVideoIE(InfoExtractor):
 
         video_id = mobj.group('id')
         webpage_url = 'http://www.nowvideo.ch/video/' + video_id
+        embed_url = 'http://embed.nowvideo.ch/embed.php?v=' + video_id
         webpage = self._download_webpage(webpage_url, video_id)
+        embed_page = self._download_webpage(embed_url, video_id,
+            u'Downloading embed page')
 
         self.report_extraction(video_id)
 
@@ -28,7 +31,7 @@ class NowVideoIE(InfoExtractor):
             webpage, u'video title')
 
         video_key = self._search_regex(r'var fkzd="(.*)";',
-            webpage, u'video key')
+            embed_page, u'video key')
 
         api_call = "http://www.nowvideo.ch/api/player.api.php?file={0}&numOfErrors=0&cid=1&key={1}".format(video_id, video_key)
         api_response = self._download_webpage(api_call, video_id,
diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py
new file mode 100644 (file)
index 0000000..5e2454f
--- /dev/null
@@ -0,0 +1,69 @@
+import os
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse_urlparse,
+    compat_urllib_request,
+    compat_urllib_parse,
+    unescapeHTML,
+)
+from ..aes import (
+    aes_decrypt_text
+)
+
+class PornHubIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>pornhub\.com/view_video\.php\?viewkey=(?P<videoid>[0-9]+))'
+    _TEST = {
+        u'url': u'http://www.pornhub.com/view_video.php?viewkey=648719015',
+        u'file': u'648719015.mp4',
+        u'md5': u'882f488fa1f0026f023f33576004a2ed',
+        u'info_dict': {
+            u"uploader": u"BABES-COM", 
+            u"title": u"Seductive Indian beauty strips down and fingers her pink pussy",
+            u"age_limit": 18
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('videoid')
+        url = 'http://www.' + mobj.group('url')
+
+        req = compat_urllib_request.Request(url)
+        req.add_header('Cookie', 'age_verified=1')
+        webpage = self._download_webpage(req, video_id)
+
+        video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, u'title')
+        video_uploader = self._html_search_regex(r'<b>From: </b>(?:\s|<[^>]*>)*(.+?)<', webpage, u'uploader', fatal=False)
+        thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, u'thumbnail', fatal=False)
+        if thumbnail:
+            thumbnail = compat_urllib_parse.unquote(thumbnail)
+
+        video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
+        if webpage.find('"encrypted":true') != -1:
+            password = self._html_search_regex(r'"video_title":"([^"]+)', webpage, u'password').replace('+', ' ')
+            video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
+
+        formats = []
+        for video_url in video_urls:
+            path = compat_urllib_parse_urlparse( video_url ).path
+            extension = os.path.splitext( path )[1][1:]
+            format = path.split('/')[5].split('_')[:2]
+            format = "-".join( format )
+            formats.append({
+                'url': video_url,
+                'ext': extension,
+                'format': format,
+                'format_id': format,
+            })
+        formats.sort(key=lambda format: list(map(lambda s: s.zfill(6), format['format'].split('-'))))
+
+        return {
+            'id': video_id,
+            'uploader': video_uploader,
+            'title': video_title,
+            'thumbnail': thumbnail,
+            'formats': formats,
+            'age_limit': 18,
+        }
index 5d770ec285c3d1e3dcad04cfe49ca7780a9dd2b4..35dc5a9ffafb32d36e30f51988291dded6a6d18c 100644 (file)
@@ -16,7 +16,8 @@ class PornotubeIE(InfoExtractor):
         u'md5': u'374dd6dcedd24234453b295209aa69b6',
         u'info_dict': {
             u"upload_date": u"20090708", 
-            u"title": u"Marilyn-Monroe-Bathing"
+            u"title": u"Marilyn-Monroe-Bathing",
+            u"age_limit": 18
         }
     }
 
index 365aade564bd4f423317d427b3070c91e3e22ad3..994778e16758bc292a01e99e5292caee30a6d5c2 100644 (file)
@@ -10,7 +10,8 @@ class RedTubeIE(InfoExtractor):
         u'file': u'66418.mp4',
         u'md5': u'7b8c22b5e7098a3e1c09709df1126d2d',
         u'info_dict': {
-            u"title": u"Sucked on a toilet"
+            u"title": u"Sucked on a toilet",
+            u"age_limit": 18,
         }
     }
 
index d1b08c9bc050b3639ca252f2e84a373a8e4fa5f9..9ac7c3be8c8f1b97f46c944f08124eafbe8f1a5a 100644 (file)
@@ -63,13 +63,12 @@ class RTLnowIE(InfoExtractor):
         },
     },
     {
-        u'url': u'http://www.rtlnitronow.de/recht-ordnung/lebensmittelkontrolle-erlangenordnungsamt-berlin.php?film_id=127367&player=1&season=1',
-        u'file': u'127367.flv',
+        u'url': u'http://www.rtlnitronow.de/recht-ordnung/stadtpolizei-frankfurt-gerichtsvollzieher-leipzig.php?film_id=129679&player=1&season=1',
+        u'file': u'129679.flv',
         u'info_dict': {
-            u'upload_date': u'20130926', 
-            u'title': u'Recht & Ordnung - Lebensmittelkontrolle Erlangen/Ordnungsamt...',
-            u'description': u'Lebensmittelkontrolle Erlangen/Ordnungsamt Berlin',
-            u'thumbnail': u'http://autoimg.static-fra.de/nitronow/344787/1500x1500/image2.jpg',
+            u'upload_date': u'20131016', 
+            u'title': u'Recht & Ordnung - Stadtpolizei Frankfurt/ Gerichtsvollzieher...',
+            u'description': u'Stadtpolizei Frankfurt/ Gerichtsvollzieher Leipzig',
         },
         u'params': {
             u'skip_download': True,
diff --git a/youtube_dl/extractor/spankwire.py b/youtube_dl/extractor/spankwire.py
new file mode 100644 (file)
index 0000000..32df0a7
--- /dev/null
@@ -0,0 +1,74 @@
+import os
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse_urlparse,
+    compat_urllib_request,
+    compat_urllib_parse,
+    unescapeHTML,
+)
+from ..aes import (
+    aes_decrypt_text
+)
+
+class SpankwireIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>spankwire\.com/[^/]*/video(?P<videoid>[0-9]+)/?)'
+    _TEST = {
+        u'url': u'http://www.spankwire.com/Buckcherry-s-X-Rated-Music-Video-Crazy-Bitch/video103545/',
+        u'file': u'103545.mp4',
+        u'md5': u'1b3f55e345500552dbc252a3e9c1af43',
+        u'info_dict': {
+            u"uploader": u"oreusz", 
+            u"title": u"Buckcherry`s X Rated Music Video Crazy Bitch",
+            u"description": u"Crazy Bitch X rated music video.",
+            u"age_limit": 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('videoid')
+        url = 'http://www.' + mobj.group('url')
+
+        req = compat_urllib_request.Request(url)
+        req.add_header('Cookie', 'age_verified=1')
+        webpage = self._download_webpage(req, video_id)
+
+        video_title = self._html_search_regex(r'<h1>([^<]+)', webpage, u'title')
+        video_uploader = self._html_search_regex(r'by:\s*<a [^>]*>(.+?)</a>', webpage, u'uploader', fatal=False)
+        thumbnail = self._html_search_regex(r'flashvars\.image_url = "([^"]+)', webpage, u'thumbnail', fatal=False)
+        description = self._html_search_regex(r'>\s*Description:</div>\s*<[^>]*>([^<]+)', webpage, u'description', fatal=False)
+        if len(description) == 0:
+            description = None
+
+        video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'flashvars\.quality_[0-9]{3}p = "([^"]+)', webpage)))
+        if webpage.find('flashvars\.encrypted = "true"') != -1:
+            password = self._html_search_regex(r'flashvars\.video_title = "([^"]+)', webpage, u'password').replace('+', ' ')
+            video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
+
+        formats = []
+        for video_url in video_urls:
+            path = compat_urllib_parse_urlparse( video_url ).path
+            extension = os.path.splitext( path )[1][1:]
+            format = path.split('/')[4].split('_')[:2]
+            format = "-".join( format )
+            formats.append({
+                'url': video_url,
+                'ext': extension,
+                'format': format,
+                'format_id': format,
+            })
+        formats.sort(key=lambda format: list(map(lambda s: s.zfill(6), format['format'].split('-'))))
+
+        age_limit = self._rta_search(webpage)
+
+        return {
+            'id': video_id,
+            'uploader': video_uploader,
+            'title': video_title,
+            'thumbnail': thumbnail,
+            'description': description,
+            'formats': formats,
+            'age_limit': age_limit,
+        }
diff --git a/youtube_dl/extractor/tube8.py b/youtube_dl/extractor/tube8.py
new file mode 100644 (file)
index 0000000..aea9d9a
--- /dev/null
@@ -0,0 +1,65 @@
+import os
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse_urlparse,
+    compat_urllib_request,
+    compat_urllib_parse,
+    unescapeHTML,
+)
+from ..aes import (
+    aes_decrypt_text
+)
+
+class Tube8IE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>tube8\.com/[^/]+/[^/]+/(?P<videoid>[0-9]+)/?)'
+    _TEST = {
+        u'url': u'http://www.tube8.com/teen/kasia-music-video/229795/',
+        u'file': u'229795.mp4',
+        u'md5': u'e9e0b0c86734e5e3766e653509475db0',
+        u'info_dict': {
+            u"description": u"hot teen Kasia grinding", 
+            u"uploader": u"unknown", 
+            u"title": u"Kasia music video",
+            u"age_limit": 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('videoid')
+        url = 'http://www.' + mobj.group('url')
+
+        req = compat_urllib_request.Request(url)
+        req.add_header('Cookie', 'age_verified=1')
+        webpage = self._download_webpage(req, video_id)
+
+        video_title = self._html_search_regex(r'videotitle     ="([^"]+)', webpage, u'title')
+        video_description = self._html_search_regex(r'>Description:</strong>(.+?)<', webpage, u'description', fatal=False)
+        video_uploader = self._html_search_regex(r'>Submitted by:</strong>(?:\s|<[^>]*>)*(.+?)<', webpage, u'uploader', fatal=False)
+        thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, u'thumbnail', fatal=False)
+        if thumbnail:
+            thumbnail = thumbnail.replace('\\/', '/')
+
+        video_url = self._html_search_regex(r'"video_url":"([^"]+)', webpage, u'video_url')
+        if webpage.find('"encrypted":true')!=-1:
+            password = self._html_search_regex(r'"video_title":"([^"]+)', webpage, u'password')
+            video_url = aes_decrypt_text(video_url, password, 32).decode('utf-8')
+        path = compat_urllib_parse_urlparse( video_url ).path
+        extension = os.path.splitext( path )[1][1:]
+        format = path.split('/')[4].split('_')[:2]
+        format = "-".join( format )
+
+        return {
+            'id': video_id,
+            'uploader': video_uploader,
+            'title': video_title,
+            'thumbnail': thumbnail,
+            'description': video_description,
+            'url': video_url,
+            'ext': extension,
+            'format': format,
+            'format_id': format,
+            'age_limit': 18,
+        }
index d89f8409443675f4359b85b96ffc0c2d49ec32e5..265dd5b91fd9e5c4fc5a0cac8a9f36dd36731cfe 100644 (file)
@@ -16,7 +16,7 @@ class VideoDetectiveIE(InfoExtractor):
         u'info_dict': {
             u'title': u'KICK-ASS 2',
             u'description': u'md5:65ba37ad619165afac7d432eaded6013',
-            u'duration': 138,
+            u'duration': 135,
         },
     }
 
index 2de56ac814462e3c3536ccac34b980b3e9a8bfb5..ef90fecc07b596c70f4ff6f3ff2cbb7c6fc86092 100644 (file)
@@ -1,3 +1,4 @@
+# encoding: utf-8
 import json
 import re
 import itertools
@@ -10,6 +11,7 @@ from ..utils import (
     clean_html,
     get_element_by_attribute,
     ExtractorError,
+    RegexNotFoundError,
     std_headers,
     unsmuggle_url,
 )
@@ -25,7 +27,7 @@ class VimeoIE(InfoExtractor):
         {
             u'url': u'http://vimeo.com/56015672',
             u'file': u'56015672.mp4',
-            u'md5': u'8879b6cc097e987f02484baf890129e5',
+            u'md5': u'ae7a1d8b183758a0506b0622f37dfa14',
             u'info_dict': {
                 u"upload_date": u"20121220", 
                 u"description": u"This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550", 
@@ -54,7 +56,22 @@ class VimeoIE(InfoExtractor):
                 u'title': u'Kathy Sierra: Building the minimum Badass User, Business of Software',
                 u'uploader': u'The BLN & Business of Software',
             },
-        }
+        },
+        {
+            u'url': u'http://vimeo.com/68375962',
+            u'file': u'68375962.mp4',
+            u'md5': u'aaf896bdb7ddd6476df50007a0ac0ae7',
+            u'note': u'Video protected with password',
+            u'info_dict': {
+                u'title': u'youtube-dl password protected test video',
+                u'upload_date': u'20130614',
+                u'uploader_id': u'user18948128',
+                u'uploader': u'Jaime Marquínez Ferrándiz',
+            },
+            u'params': {
+                u'videopassword': u'youtube-dl',
+            },
+        },
     ]
 
     def _login(self):
@@ -129,18 +146,26 @@ class VimeoIE(InfoExtractor):
 
         # Extract the config JSON
         try:
-            config = self._search_regex([r' = {config:({.+?}),assets:', r'c=({.+?);'],
-                webpage, u'info section', flags=re.DOTALL)
-            config = json.loads(config)
-        except:
+            try:
+                config_url = self._html_search_regex(
+                    r' data-config-url="(.+?)"', webpage, u'config URL')
+                config_json = self._download_webpage(config_url, video_id)
+                config = json.loads(config_json)
+            except RegexNotFoundError:
+                # For pro videos or player.vimeo.com urls
+                config = self._search_regex([r' = {config:({.+?}),assets:', r'c=({.+?);'],
+                    webpage, u'info section', flags=re.DOTALL)
+                config = json.loads(config)
+        except Exception as e:
             if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
                 raise ExtractorError(u'The author has restricted the access to this video, try with the "--referer" option')
 
-            if re.search('If so please provide the correct password.', webpage):
+            if re.search('<form[^>]+?id="pw_form"', webpage) is not None:
                 self._verify_video_password(url, video_id, webpage)
                 return self._real_extract(url)
             else:
-                raise ExtractorError(u'Unable to extract info section')
+                raise ExtractorError(u'Unable to extract info section',
+                                     cause=e)
 
         # Extract title
         video_title = config["video"]["title"]
@@ -179,46 +204,45 @@ class VimeoIE(InfoExtractor):
 
         # Vimeo specific: extract video codec and quality information
         # First consider quality, then codecs, then take everything
-        # TODO bind to format param
-        codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')]
+        codecs = [('vp6', 'flv'), ('vp8', 'flv'), ('h264', 'mp4')]
         files = { 'hd': [], 'sd': [], 'other': []}
         config_files = config["video"].get("files") or config["request"].get("files")
         for codec_name, codec_extension in codecs:
-            if codec_name in config_files:
-                if 'hd' in config_files[codec_name]:
-                    files['hd'].append((codec_name, codec_extension, 'hd'))
-                elif 'sd' in config_files[codec_name]:
-                    files['sd'].append((codec_name, codec_extension, 'sd'))
+            for quality in config_files.get(codec_name, []):
+                format_id = '-'.join((codec_name, quality)).lower()
+                key = quality if quality in files else 'other'
+                video_url = None
+                if isinstance(config_files[codec_name], dict):
+                    file_info = config_files[codec_name][quality]
+                    video_url = file_info.get('url')
                 else:
-                    files['other'].append((codec_name, codec_extension, config_files[codec_name][0]))
-
-        for quality in ('hd', 'sd', 'other'):
-            if len(files[quality]) > 0:
-                video_quality = files[quality][0][2]
-                video_codec = files[quality][0][0]
-                video_extension = files[quality][0][1]
-                self.to_screen(u'%s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
-                break
-        else:
-            raise ExtractorError(u'No known codec found')
+                    file_info = {}
+                if video_url is None:
+                    video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
+                        %(video_id, sig, timestamp, quality, codec_name.upper())
 
-        video_url = None
-        if isinstance(config_files[video_codec], dict):
-            video_url = config_files[video_codec][video_quality].get("url")
-        if video_url is None:
-            video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
-                        %(video_id, sig, timestamp, video_quality, video_codec.upper())
+                files[key].append({
+                    'ext': codec_extension,
+                    'url': video_url,
+                    'format_id': format_id,
+                    'width': file_info.get('width'),
+                    'height': file_info.get('height'),
+                })
+        formats = []
+        for key in ('other', 'sd', 'hd'):
+            formats += files[key]
+        if len(formats) == 0:
+            raise ExtractorError(u'No known codec found')
 
         return [{
             'id':       video_id,
-            'url':      video_url,
             'uploader': video_uploader,
             'uploader_id': video_uploader_id,
             'upload_date':  video_upload_date,
             'title':    video_title,
-            'ext':      video_extension,
             'thumbnail':    video_thumbnail,
             'description':  video_description,
+            'formats': formats,
         }]
 
 
index 361619694980d3260ff81aeed2d0d07294739a0e..7444d3393a25f8a49778a5bd589aa839591bd9d8 100644 (file)
@@ -19,7 +19,8 @@ class XHamsterIE(InfoExtractor):
         u'info_dict': {
             u"upload_date": u"20121014", 
             u"uploader_id": u"Ruseful2011", 
-            u"title": u"FemaleAgent Shy beauty takes the bait"
+            u"title": u"FemaleAgent Shy beauty takes the bait",
+            u"age_limit": 18,
         }
     },
     {
@@ -27,28 +28,33 @@ class XHamsterIE(InfoExtractor):
         u'file': u'2221348.flv',
         u'md5': u'e767b9475de189320f691f49c679c4c7',
         u'info_dict': {
-            u"upload_date": u"20130914", 
-            u"uploader_id": u"jojo747400", 
-            u"title": u"Britney Spears  Sexy Booty"
+            u"upload_date": u"20130914",
+            u"uploader_id": u"jojo747400",
+            u"title": u"Britney Spears  Sexy Booty",
+            u"age_limit": 18,
         }
     }]
 
     def _real_extract(self,url):
+        def extract_video_url(webpage):
+            mobj = re.search(r'\'srv\': \'(?P<server>[^\']*)\',\s*\'file\': \'(?P<file>[^\']+)\',', webpage)
+            if mobj is None:
+                raise ExtractorError(u'Unable to extract media URL')
+            if len(mobj.group('server')) == 0:
+                return compat_urllib_parse.unquote(mobj.group('file'))
+            else:
+                return mobj.group('server')+'/key='+mobj.group('file')
+
+        def is_hd(webpage):
+            return webpage.find('<div class=\'icon iconHD\'>') != -1
+
         mobj = re.match(self._VALID_URL, url)
 
         video_id = mobj.group('id')
         seo = mobj.group('seo')
-        mrss_url = 'http://xhamster.com/movies/%s/%s.html?hd' % (video_id, seo)
+        mrss_url = 'http://xhamster.com/movies/%s/%s.html' % (video_id, seo)
         webpage = self._download_webpage(mrss_url, video_id)
 
-        mobj = re.search(r'\'srv\': \'(?P<server>[^\']*)\',\s*\'file\': \'(?P<file>[^\']+)\',', webpage)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract media URL')
-        if len(mobj.group('server')) == 0:
-            video_url = compat_urllib_parse.unquote(mobj.group('file'))
-        else:
-            video_url = mobj.group('server')+'/key='+mobj.group('file')
-
         video_title = self._html_search_regex(r'<title>(?P<title>.+?) - xHamster\.com</title>',
             webpage, u'title')
 
@@ -72,13 +78,34 @@ class XHamsterIE(InfoExtractor):
         video_thumbnail = self._search_regex(r'\'image\':\'(?P<thumbnail>[^\']+)\'',
             webpage, u'thumbnail', fatal=False)
 
-        return [{
-            'id':       video_id,
-            'url':      video_url,
-            'ext':      determine_ext(video_url),
-            'title':    video_title,
+        age_limit = self._rta_search(webpage)
+
+        video_url = extract_video_url(webpage)
+        hd = is_hd(webpage)
+        formats = [{
+            'url': video_url,
+            'ext': determine_ext(video_url),
+            'format': 'hd' if hd else 'sd',
+            'format_id': 'hd' if hd else 'sd',
+        }]
+        if not hd:
+            webpage = self._download_webpage(mrss_url+'?hd', video_id)
+            if is_hd(webpage):
+                video_url = extract_video_url(webpage)
+                formats.append({
+                    'url': video_url,
+                    'ext': determine_ext(video_url),
+                    'format': 'hd',
+                    'format_id': 'hd',
+                })
+
+        return {
+            'id': video_id,
+            'title': video_title,
+            'formats': formats,
             'description': video_description,
             'upload_date': video_upload_date,
             'uploader_id': video_uploader_id,
-            'thumbnail': video_thumbnail
-        }]
+            'thumbnail': video_thumbnail,
+            'age_limit': age_limit,
+        }
index 40d8489000bb7a25f277c84d6d407b72ce778c8d..8a0eb1afdacc4cbe1cbb441b939cff3d7697cf4e 100644 (file)
@@ -18,7 +18,8 @@ class XNXXIE(InfoExtractor):
         u'file': u'1135332.flv',
         u'md5': u'0831677e2b4761795f68d417e0b7b445',
         u'info_dict': {
-            u"title": u"lida \u00bb Naked Funny Actress  (5)"
+            u"title": u"lida \u00bb Naked Funny Actress  (5)",
+            u"age_limit": 18,
         }
     }
 
@@ -50,4 +51,5 @@ class XNXXIE(InfoExtractor):
             'ext': 'flv',
             'thumbnail': video_thumbnail,
             'description': None,
+            'age_limit': 18,
         }]
index c3b9736d70a7af6fb90cd617312d25fd2d7cc740..90138d7e523a405c20bae8352c6233b5868860f5 100644 (file)
@@ -13,7 +13,8 @@ class XVideosIE(InfoExtractor):
         u'file': u'939581.flv',
         u'md5': u'1d0c835822f0a71a7bf011855db929d0',
         u'info_dict': {
-            u"title": u"Funny Porns By >>>>S<<<<<< -1"
+            u"title": u"Funny Porns By >>>>S<<<<<< -1",
+            u"age_limit": 18,
         }
     }
 
@@ -46,6 +47,7 @@ class XVideosIE(InfoExtractor):
             'ext': 'flv',
             'thumbnail': video_thumbnail,
             'description': None,
+            'age_limit': 18,
         }
 
         return [info]
index 1265639e821bd873b74aeea08811f8c22e966ba1..1fcc518acde9dbb08fef1ccb42a9ee7ae550967a 100644 (file)
@@ -13,7 +13,8 @@ class YouJizzIE(InfoExtractor):
         u'file': u'2189178.flv',
         u'md5': u'07e15fa469ba384c7693fd246905547c',
         u'info_dict': {
-            u"title": u"Zeichentrick 1"
+            u"title": u"Zeichentrick 1",
+            u"age_limit": 18,
         }
     }
 
@@ -25,6 +26,8 @@ class YouJizzIE(InfoExtractor):
         # Get webpage content
         webpage = self._download_webpage(url, video_id)
 
+        age_limit = self._rta_search(webpage)
+
         # Get the video title
         video_title = self._html_search_regex(r'<title>(?P<title>.*)</title>',
             webpage, u'title').strip()
@@ -60,6 +63,7 @@ class YouJizzIE(InfoExtractor):
                 'title': video_title,
                 'ext': 'flv',
                 'format': 'flv',
-                'player_url': embed_page_url}
+                'player_url': embed_page_url,
+                'age_limit': age_limit}
 
         return [info]
index b1f93dd1bb90d964916394d88d83aaaf153ba15b..e46a9b4d6ca33f5dd768c928aae0901677e7b2fc 100644 (file)
@@ -17,7 +17,7 @@ from ..aes import (
 )
 
 class YouPornIE(InfoExtractor):
-    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)'
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+))'
     _TEST = {
         u'url': u'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
         u'file': u'505835.mp4',
@@ -26,27 +26,15 @@ class YouPornIE(InfoExtractor):
             u"upload_date": u"20101221", 
             u"description": u"Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?", 
             u"uploader": u"Ask Dan And Jennifer", 
-            u"title": u"Sex Ed: Is It Safe To Masturbate Daily?"
+            u"title": u"Sex Ed: Is It Safe To Masturbate Daily?",
+            u"age_limit": 18,
         }
     }
 
-    def _print_formats(self, formats):
-        """Print all available formats"""
-        print(u'Available formats:')
-        print(u'ext\t\tformat')
-        print(u'---------------------------------')
-        for format in formats:
-            print(u'%s\t\t%s'  % (format['ext'], format['format']))
-
-    def _specific(self, req_format, formats):
-        for x in formats:
-            if x["format"] == req_format:
-                return x
-        return None
-
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('videoid')
+        url = 'http://www.' + mobj.group('url')
 
         req = compat_urllib_request.Request(url)
         req.add_header('Cookie', 'age_verified=1')
@@ -70,27 +58,22 @@ class YouPornIE(InfoExtractor):
         except KeyError:
             raise ExtractorError('Missing JSON parameter: ' + sys.exc_info()[1])
 
-        # Get all of the formats available
+        # Get all of the links from the page
         DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
         download_list_html = self._search_regex(DOWNLOAD_LIST_RE,
             webpage, u'download list').strip()
-
-        # Get all of the links from the page
-        LINK_RE = r'(?s)<a href="(?P<url>[^"]+)">'
+        LINK_RE = r'<a href="([^"]+)">'
         links = re.findall(LINK_RE, download_list_html)
-        
-        # Get link of hd video if available
-        mobj = re.search(r'var encryptedQuality720URL = \'(?P<encrypted_video_url>[a-zA-Z0-9+/]+={0,2})\';', webpage)
-        if mobj != None:
-            encrypted_video_url = mobj.group(u'encrypted_video_url')
-            video_url = aes_decrypt_text(encrypted_video_url, video_title, 32).decode('utf-8')
-            links = [video_url] + links
+
+        # Get all encrypted links
+        encrypted_links = re.findall(r'var encryptedQuality[0-9]{3}URL = \'([a-zA-Z0-9+/]+={0,2})\';', webpage)
+        for encrypted_link in encrypted_links:
+            link = aes_decrypt_text(encrypted_link, video_title, 32).decode('utf-8')
+            links.append(link)
         
         if not links:
             raise ExtractorError(u'ERROR: no known formats available for video')
 
-        self.to_screen(u'Links found: %d' % len(links))
-
         formats = []
         for link in links:
 
@@ -102,39 +85,32 @@ class YouPornIE(InfoExtractor):
             path = compat_urllib_parse_urlparse( video_url ).path
             extension = os.path.splitext( path )[1][1:]
             format = path.split('/')[4].split('_')[:2]
+
             # size = format[0]
             # bitrate = format[1]
             format = "-".join( format )
             # title = u'%s-%s-%s' % (video_title, size, bitrate)
 
             formats.append({
-                'id': video_id,
                 'url': video_url,
-                'uploader': video_uploader,
-                'upload_date': upload_date,
-                'title': video_title,
                 'ext': extension,
                 'format': format,
-                'thumbnail': thumbnail,
-                'description': video_description,
-                'age_limit': age_limit,
+                'format_id': format,
             })
 
-        if self._downloader.params.get('listformats', None):
-            self._print_formats(formats)
-            return
-
-        req_format = self._downloader.params.get('format', 'best')
-        self.to_screen(u'Format: %s' % req_format)
-
-        if req_format is None or req_format == 'best':
-            return [formats[0]]
-        elif req_format == 'worst':
-            return [formats[-1]]
-        elif req_format in ('-1', 'all'):
-            return formats
-        else:
-            format = self._specific( req_format, formats )
-            if format is None:
-                raise ExtractorError(u'Requested format not available')
-            return [format]
+        # Sort and remove doubles
+        formats.sort(key=lambda format: list(map(lambda s: s.zfill(6), format['format'].split('-'))))
+        for i in range(len(formats)-1,0,-1):
+            if formats[i]['format_id'] == formats[i-1]['format_id']:
+                del formats[i]
+        
+        return {
+            'id': video_id,
+            'uploader': video_uploader,
+            'upload_date': upload_date,
+            'title': video_title,
+            'thumbnail': thumbnail,
+            'description': video_description,
+            'age_limit': age_limit,
+            'formats': formats,
+        }
index fb7c42830781bf4cbf3f3ba78547e6defe5b13c5..d05d0a8c13cc65034723036cef07e5ac6ca899f3 100644 (file)
@@ -236,11 +236,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         '136': 'mp4',
         '137': 'mp4',
         '138': 'mp4',
-        '139': 'mp4',
-        '140': 'mp4',
-        '141': 'mp4',
         '160': 'mp4',
 
+        # Dash mp4 audio
+        '139': 'm4a',
+        '140': 'm4a',
+        '141': 'm4a',
+
         # Dash webm
         '171': 'webm',
         '172': 'webm',
@@ -346,7 +348,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         },
         {
             u"url":  u"http://www.youtube.com/watch?v=1ltcDfZMA3U",
-            u"file":  u"1ltcDfZMA3U.flv",
+            u"file":  u"1ltcDfZMA3U.mp4",
             u"note": u"Test VEVO video (#897)",
             u"info_dict": {
                 u"upload_date": u"20070518",
@@ -1150,7 +1152,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             list_page = self._download_webpage(list_url, video_id)
             caption_list = xml.etree.ElementTree.fromstring(list_page.encode('utf-8'))
             original_lang_node = caption_list.find('track')
-            if not original_lang_node or original_lang_node.attrib.get('kind') != 'asr' :
+            if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr' :
                 self._downloader.report_warning(u'Video doesn\'t have automatic captions')
                 return {}
             original_lang = original_lang_node.attrib['lang_code']
@@ -1403,32 +1405,29 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             # this signatures are encrypted
             if 'url_encoded_fmt_stream_map' not in args:
                 raise ValueError(u'No stream_map present')  # caught below
-            m_s = re.search(r'[&,]s=', args['url_encoded_fmt_stream_map'])
+            re_signature = re.compile(r'[&,]s=')
+            m_s = re_signature.search(args['url_encoded_fmt_stream_map'])
             if m_s is not None:
                 self.to_screen(u'%s: Encrypted signatures detected.' % video_id)
                 video_info['url_encoded_fmt_stream_map'] = [args['url_encoded_fmt_stream_map']]
-            m_s = re.search(r'[&,]s=', args.get('adaptive_fmts', u''))
+            m_s = re_signature.search(args.get('adaptive_fmts', u''))
             if m_s is not None:
-                if 'url_encoded_fmt_stream_map' in video_info:
-                    video_info['url_encoded_fmt_stream_map'][0] += ',' + args['adaptive_fmts']
-                else:
-                    video_info['url_encoded_fmt_stream_map'] = [args['adaptive_fmts']]
-            elif 'adaptive_fmts' in video_info:
-                if 'url_encoded_fmt_stream_map' in video_info:
-                    video_info['url_encoded_fmt_stream_map'][0] += ',' + video_info['adaptive_fmts'][0]
+                if 'adaptive_fmts' in video_info:
+                    video_info['adaptive_fmts'][0] += ',' + args['adaptive_fmts']
                 else:
-                    video_info['url_encoded_fmt_stream_map'] = video_info['adaptive_fmts']
+                    video_info['adaptive_fmts'] = [args['adaptive_fmts']]
         except ValueError:
             pass
 
         if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
             self.report_rtmp_download()
             video_url_list = [(None, video_info['conn'][0])]
-        elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
-            if 'rtmpe%3Dyes' in video_info['url_encoded_fmt_stream_map'][0]:
+        elif len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len(video_info.get('adaptive_fmts', [])) >= 1:
+            encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts',[''])[0]
+            if 'rtmpe%3Dyes' in encoded_url_map:
                 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
             url_map = {}
-            for url_data_str in video_info['url_encoded_fmt_stream_map'][0].split(','):
+            for url_data_str in encoded_url_map.split(','):
                 url_data = compat_parse_qs(url_data_str)
                 if 'itag' in url_data and 'url' in url_data:
                     url = url_data['url'][0]
@@ -1481,13 +1480,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             raise ExtractorError(u'no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
 
         results = []
-        for format_param, video_real_url in video_url_list:
+        for itag, video_real_url in video_url_list:
             # Extension
-            video_extension = self._video_extensions.get(format_param, 'flv')
+            video_extension = self._video_extensions.get(itag, 'flv')
 
-            video_format = '{0} - {1}{2}'.format(format_param if format_param else video_extension,
-                                              self._video_dimensions.get(format_param, '???'),
-                                              ' ('+self._special_itags[format_param]+')' if format_param in self._special_itags else '')
+            video_format = '{0} - {1}{2}'.format(itag if itag else video_extension,
+                                              self._video_dimensions.get(itag, '???'),
+                                              ' ('+self._special_itags[itag]+')' if itag in self._special_itags else '')
 
             results.append({
                 'id':       video_id,
@@ -1498,6 +1497,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                 'title':    video_title,
                 'ext':      video_extension,
                 'format':   video_format,
+                'format_id': itag,
                 'thumbnail':    video_thumbnail,
                 'description':  video_description,
                 'player_url':   player_url,
index bfb8f6bcd971dad03d5236c8e607b59ff81c667a..1d9785341ec685071ea8fcc4846029a3e889bc72 100644 (file)
@@ -572,6 +572,11 @@ class ExtractorError(Exception):
         return u''.join(traceback.format_tb(self.traceback))
 
 
+class RegexNotFoundError(ExtractorError):
+    """Error when a regex didn't match"""
+    pass
+
+
 class DownloadError(Exception):
     """Download Error exception.
 
index 971530f8be4ee141e055519c9a6e1d9aaf2792db..b4ce6068fd0866d9a3665797f311d03643c5f516 100644 (file)
@@ -1,2 +1,2 @@
 
-__version__ = '2013.10.18.1'
+__version__ = '2013.10.23.2'