Merge branch 'master' into subtitles_rework
authorIsmael Mejia <iemejia@gmail.com>
Fri, 6 Sep 2013 21:23:23 +0000 (23:23 +0200)
committerIsmael Mejia <iemejia@gmail.com>
Fri, 6 Sep 2013 21:24:41 +0000 (23:24 +0200)
1  2 
youtube_dl/__init__.py
youtube_dl/extractor/dailymotion.py
youtube_dl/extractor/youtube.py

diff --combined youtube_dl/__init__.py
index 2c2fd441cf40243e4be43273177b8c10f4374aaf,4213ec1d51fd1ea0029056227b95fa2d715674c1..0083f2e99cfbc7a4ec28924a75de851f24c75f2f
@@@ -28,6 -28,7 +28,7 @@@ __authors__  = 
      'Axel Noack',
      'Albert Kim',
      'Pierre Rudloff',
+     'Huarong Huo',
  )
  
  __license__ = 'Public Domain'
@@@ -45,6 -46,7 +46,7 @@@ import sy
  import warnings
  import platform
  
  from .utils import *
  from .update import update_self
  from .version import __version__
@@@ -99,6 -101,16 +101,16 @@@ def parseOpts(overrideArguments=None)
              pass
          return None
  
+     def _hide_login_info(opts):
+         opts = list(opts)
+         for private_opt in ['-p', '--password', '-u', '--username']:
+             try:
+                 i = opts.index(private_opt)
+                 opts[i+1] = '<PRIVATE>'
+             except ValueError:
+                 pass
+         return opts
      max_width = 80
      max_help_position = 80
  
  
      video_format.add_option('-f', '--format',
              action='store', dest='format', metavar='FORMAT',
-             help='video format code, specifiy the order of preference using slashes: "-f 22/17/18"')
+             help='video format code, specifiy the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported')
      video_format.add_option('--all-formats',
              action='store_const', dest='format', help='download all available video formats', const='all')
      video_format.add_option('--prefer-free-formats',
  
      subtitles.add_option('--write-sub', '--write-srt',
              action='store_true', dest='writesubtitles',
 -            help='write subtitle file (currently youtube only)', default=False)
 +            help='write subtitle file', default=False)
      subtitles.add_option('--write-auto-sub', '--write-automatic-sub',
              action='store_true', dest='writeautomaticsub',
 -            help='write automatic subtitle file (currently youtube only)', default=False)
 -    subtitles.add_option('--only-sub',
 -            action='store_true', dest='skip_download',
 -            help='[deprecated] alias of --skip-download', default=False)
 +            help='write automatic subtitle file (youtube only)', default=False)
      subtitles.add_option('--all-subs',
              action='store_true', dest='allsubtitles',
              help='downloads all the available subtitles of the video', default=False)
              action='store', dest='subtitlesformat', metavar='FORMAT',
              help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt')
      subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang',
 -            action='callback', dest='subtitleslang', metavar='LANGS', type='str',
 +            action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
              default=[], callback=_comma_separated_values_options_callback,
              help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
  
          argv = systemConf + userConf + commandLineConf
          opts, args = parser.parse_args(argv)
          if opts.verbose:
-             sys.stderr.write(u'[debug] System config: ' + repr(systemConf) + '\n')
-             sys.stderr.write(u'[debug] User config: ' + repr(userConf) + '\n')
-             sys.stderr.write(u'[debug] Command-line args: ' + repr(commandLineConf) + '\n')
+             sys.stderr.write(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
+             sys.stderr.write(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
+             sys.stderr.write(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
  
      return parser, opts, args
  
@@@ -427,6 -442,10 +439,10 @@@ def _real_main(argv=None)
      proxy_handler = compat_urllib_request.ProxyHandler(proxies)
      https_handler = make_HTTPS_handler(opts)
      opener = compat_urllib_request.build_opener(https_handler, proxy_handler, cookie_processor, YoutubeDLHandler())
+     # Delete the default user-agent header, which would otherwise apply in
+     # cases where our custom HTTP handler doesn't come into play
+     # (See https://github.com/rg3/youtube-dl/issues/1309 for details)
+     opener.addheaders =[]
      compat_urllib_request.install_opener(opener)
      socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
  
          'allsubtitles': opts.allsubtitles,
          'listsubtitles': opts.listsubtitles,
          'subtitlesformat': opts.subtitlesformat,
 -        'subtitleslangs': opts.subtitleslang,
 +        'subtitleslangs': opts.subtitleslangs,
          'matchtitle': decodeOption(opts.matchtitle),
          'rejecttitle': decodeOption(opts.rejecttitle),
          'max_downloads': opts.max_downloads,
                  sys.exc_clear()
              except:
                  pass
-         sys.stderr.write(u'[debug] Python version %s - %s' %(platform.python_version(), platform.platform()) + u'\n')
+         sys.stderr.write(u'[debug] Python version %s - %s' %(platform.python_version(), platform_name()) + u'\n')
          sys.stderr.write(u'[debug] Proxy map: ' + str(proxy_handler.proxies) + u'\n')
  
      ydl.add_default_info_extractors()
index 003b1d8c3e6233368b764e2866431cde13e032f3,3c616e0896cecc33077c9bd7ae0c9b15c5d4ddbd..f7dffd4cce3a1fc2d04bd016111aa354717072a6
@@@ -1,50 -1,27 +1,50 @@@
  import re
  import json
  import itertools
 +import socket
  
  from .common import InfoExtractor
 +from .subtitles import NoAutoSubtitlesIE
 +
  from ..utils import (
 +    compat_http_client,
 +    compat_urllib_error,
      compat_urllib_request,
 +    compat_str,
      get_element_by_attribute,
      get_element_by_id,
  
      ExtractorError,
  )
  
 -class DailymotionIE(InfoExtractor):
 +
 +class DailyMotionSubtitlesIE(NoAutoSubtitlesIE):
 +
 +    def _get_available_subtitles(self, video_id):
 +        request = compat_urllib_request.Request('https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id)
 +        try:
 +            sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
 +        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
 +            self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
 +            return {}
 +        info = json.loads(sub_list)
 +        if (info['total'] > 0):
 +            sub_lang_list = dict((l['language'], l['url']) for l in info['list'])
 +            return sub_lang_list
 +        self._downloader.report_warning(u'video doesn\'t have subtitles')
 +        return {}
 +
 +class DailymotionIE(DailyMotionSubtitlesIE, InfoExtractor):
      """Information Extractor for Dailymotion"""
  
-     _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
+     _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/(?:embed/)?video/([^/]+)'
      IE_NAME = u'dailymotion'
      _TEST = {
          u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
          u'file': u'x33vw9.mp4',
          u'md5': u'392c4b85a60a90dc4792da41ce3144eb',
          u'info_dict': {
-             u"uploader": u"Alex and Van .", 
+             u"uploader": u"Amphora Alex and Van .", 
              u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\""
          }
      }
@@@ -56,6 -33,7 +56,7 @@@
          video_id = mobj.group(1).split('_')[0].split('?')[0]
  
          video_extension = 'mp4'
+         url = 'http://www.dailymotion.com/video/%s' % video_id
  
          # Retrieve video webpage to extract further information
          request = compat_urllib_request.Request(url)
@@@ -78,7 -56,8 +79,8 @@@
          embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id
          embed_page = self._download_webpage(embed_url, video_id,
                                              u'Downloading embed page')
-         info = self._search_regex(r'var info = ({.*?}),', embed_page, 'video info')
+         info = self._search_regex(r'var info = ({.*?}),$', embed_page,
+             'video info', flags=re.MULTILINE)
          info = json.loads(info)
  
          # TODO: support choosing qualities
              raise ExtractorError(u'Unable to extract video URL')
          video_url = info[max_quality]
  
 +        # subtitles
 +        video_subtitles = None
 +        video_webpage = None
 +
 +        if self._downloader.params.get('writesubtitles', False) or self._downloader.params.get('allsubtitles', False):
 +            video_subtitles = self._extract_subtitles(video_id)
 +        elif self._downloader.params.get('writeautomaticsub', False):
 +            video_subtitles = self._request_automatic_caption(video_id, video_webpage)
 +
 +        if self._downloader.params.get('listsubtitles', False):
 +            self._list_available_subtitles(video_id)
 +            return
 +
          return [{
              'id':       video_id,
              'url':      video_url,
              'upload_date':  video_upload_date,
              'title':    self._og_search_title(webpage),
              'ext':      video_extension,
 +            'subtitles':    video_subtitles,
              'thumbnail': info['thumbnail_url']
          }]
  
index b3400df0abe359df85ec6bc029887ad400e3e15e,423a5e973ce258e1248c62e7ed444ef943fc797e..11611f10da73f085843ba0586c90065f3feae816
@@@ -7,7 -7,6 +7,7 @@@ import socke
  import itertools
  
  from .common import InfoExtractor, SearchInfoExtractor
 +from .subtitles import SubtitlesIE
  from ..utils import (
      compat_http_client,
      compat_parse_qs,
@@@ -131,70 -130,12 +131,70 @@@ class YoutubeBaseInfoExtractor(InfoExtr
              return
          self._confirm_age()
  
 -class YoutubeIE(YoutubeBaseInfoExtractor):
 +class YoutubeSubtitlesIE(SubtitlesIE):
 +
 +    def _get_available_subtitles(self, video_id):
 +        request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
 +        try:
 +            sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
 +        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
 +            self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
 +            return {}
 +        lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
 +
 +        sub_lang_list = {}
 +        for l in lang_list:
 +            lang = l[1]
 +            params = compat_urllib_parse.urlencode({
 +                'lang': lang,
 +                'v': video_id,
 +                'fmt': self._downloader.params.get('subtitlesformat'),
 +            })
 +            url = u'http://www.youtube.com/api/timedtext?' + params
 +            sub_lang_list[lang] = url
 +        if not sub_lang_list:
 +            self._downloader.report_warning(u'video doesn\'t have subtitles')
 +            return {}
 +        return sub_lang_list
 +
 +    def _request_automatic_caption(self, video_id, webpage):
 +        """We need the webpage for getting the captions url, pass it as an
 +           argument to speed up the process."""
 +        sub_lang = (self._downloader.params.get('subtitleslangs') or ['en'])[0]
 +        sub_format = self._downloader.params.get('subtitlesformat')
 +        self.to_screen(u'%s: Looking for automatic captions' % video_id)
 +        mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
 +        err_msg = u'Couldn\'t find automatic captions for "%s"' % sub_lang
 +        if mobj is None:
 +            self._downloader.report_warning(err_msg)
 +            return {}
 +        player_config = json.loads(mobj.group(1))
 +        try:
 +            args = player_config[u'args']
 +            caption_url = args[u'ttsurl']
 +            timestamp = args[u'timestamp']
 +            params = compat_urllib_parse.urlencode({
 +                'lang': 'en',
 +                'tlang': sub_lang,
 +                'fmt': sub_format,
 +                'ts': timestamp,
 +                'kind': 'asr',
 +            })
 +            subtitles_url = caption_url + '&' + params
 +            sub = self._download_webpage(subtitles_url, video_id, u'Downloading automatic captions')
 +            return {sub_lang: sub}
 +        # An extractor error can be raise by the download process if there are
 +        # no automatic captions but there are subtitles
 +        except (KeyError, ExtractorError):
 +            self._downloader.report_warning(err_msg)
 +            return {}
 +
 +class YoutubeIE(YoutubeSubtitlesIE, YoutubeBaseInfoExtractor):
      IE_DESC = u'YouTube.com'
      _VALID_URL = r"""^
                       (
                           (?:https?://)?                                       # http(s):// (optional)
-                          (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|
+                          (?:(?:(?:(?:\w+\.)?youtube(?:-nocookie)?\.com/|
                              tube\.majestyc\.net/)                             # the various hostnames, with wildcard subdomains
                           (?:.*?\#/)?                                          # handle anchor (#/) redirect urls
                           (?:                                                  # the various things that can precede the ID:
                                   (?:.*?&)?                                    # any other preceding param (like /?s=tuff&v=xxxx)
                                   v=
                               )
-                          )?                                                   # optional -> youtube.com/xxxx is OK
+                          ))
+                          |youtu\.be/                                          # just youtu.be/xxxx
+                          )
                       )?                                                       # all until now is optional -> you can pass the naked ID
                       ([0-9A-Za-z_-]+)                                         # here is it! the YouTube video ID
                       (?(1).+)?                                                # if we found the ID, everything can follow
                       $"""
      _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
      # Listed in order of quality
-     _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13',
-                           '95', '94', '93', '92', '132', '151',
+     _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '36', '17', '13',
+                           # Apple HTTP Live Streaming
+                           '96', '95', '94', '93', '92', '132', '151',
                            # 3D
                            '85', '84', '102', '83', '101', '82', '100',
                            # Dash video
                            # Dash audio
                            '141', '172', '140', '171', '139',
                            ]
-     _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13',
-                                       '95', '94', '93', '92', '132', '151',
+     _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '36', '17', '13',
+                                       # Apple HTTP Live Streaming
+                                       '96', '95', '94', '93', '92', '132', '151',
+                                       # 3D
                                        '85', '102', '84', '101', '83', '100', '82',
                                        # Dash video
                                        '138', '248', '137', '247', '136', '246', '245',
                                        # Dash audio
                                        '172', '141', '171', '140', '139',
                                        ]
+     _video_formats_map = {
+         'flv': ['35', '34', '6', '5'],
+         '3gp': ['36', '17', '13'],
+         'mp4': ['38', '37', '22', '18'],
+         'webm': ['46', '45', '44', '43'],
+     }
      _video_extensions = {
          '13': '3gp',
-         '17': 'mp4',
+         '17': '3gp',
          '18': 'mp4',
          '22': 'mp4',
+         '36': '3gp',
          '37': 'mp4',
          '38': 'mp4',
          '43': 'webm',
          '101': 'webm',
          '102': 'webm',
  
-         # videos that use m3u8
+         # Apple HTTP Live Streaming
          '92': 'mp4',
          '93': 'mp4',
          '94': 'mp4',
          '22': '720x1280',
          '34': '360x640',
          '35': '480x854',
+         '36': '240x320',
          '37': '1080x1920',
          '38': '3072x4096',
          '43': '360x640',
              u"info_dict": {
                  u"upload_date": u"20120506",
                  u"title": u"Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]",
-                 u"description": u"md5:b085c9804f5ab69f4adea963a2dceb3c",
+                 u"description": u"md5:3e2666e0a55044490499ea45fe9037b7",
                  u"uploader": u"Icona Pop",
                  u"uploader_id": u"IconaPop"
              }
      @classmethod
      def suitable(cls, url):
          """Receives a URL and returns True if suitable for this IE."""
-         if YoutubePlaylistIE.suitable(url) or YoutubeSubscriptionsIE.suitable(url): return False
+         if YoutubePlaylistIE.suitable(url): return False
          return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
  
      def report_video_webpage_download(self, video_id):
          """Report attempt to download video info webpage."""
          self.to_screen(u'%s: Downloading video info webpage' % video_id)
  
 -    def report_video_subtitles_download(self, video_id):
 -        """Report attempt to download video info webpage."""
 -        self.to_screen(u'%s: Checking available subtitles' % video_id)
 -
 -    def report_video_subtitles_request(self, video_id, sub_lang, format):
 -        """Report attempt to download video info webpage."""
 -        self.to_screen(u'%s: Downloading video subtitles for %s.%s' % (video_id, sub_lang, format))
 -
 -    def report_video_subtitles_available(self, video_id, sub_lang_list):
 -        """Report available subtitles."""
 -        sub_lang = ",".join(list(sub_lang_list.keys()))
 -        self.to_screen(u'%s: Available subtitles for video: %s' % (video_id, sub_lang))
 -
      def report_information_extraction(self, video_id):
          """Report attempt to extract video information."""
          self.to_screen(u'%s: Extracting video information' % video_id)
          elif len(s) == 89:
              return s[84:78:-1] + s[87] + s[77:60:-1] + s[0] + s[59:3:-1]
          elif len(s) == 88:
-             return s[48] + s[81:67:-1] + s[82] + s[66:62:-1] + s[85] + s[61:48:-1] + s[67] + s[47:12:-1] + s[3] + s[11:3:-1] + s[2] + s[12]
+             return s[7:28] + s[87] + s[29:45] + s[55] + s[46:55] + s[2] + s[56:87] + s[28]
          elif len(s) == 87:
              return s[6:27] + s[4] + s[28:39] + s[27] + s[40:59] + s[2] + s[60:]
          elif len(s) == 86:
-             return s[5:20] + s[2] + s[21:]
+             return s[5:34] + s[0] + s[35:38] + s[3] + s[39:45] + s[38] + s[46:53] + s[73] + s[54:73] + s[85] + s[74:85] + s[53]
          elif len(s) == 85:
              return s[83:34:-1] + s[0] + s[33:27:-1] + s[3] + s[26:19:-1] + s[34] + s[18:3:-1] + s[27]
          elif len(s) == 84:
-             return s[83:27:-1] + s[0] + s[26:5:-1] + s[2:0:-1] + s[27]
+             return s[81:36:-1] + s[0] + s[35:2:-1]
          elif len(s) == 83:
              return s[81:64:-1] + s[82] + s[63:52:-1] + s[45] + s[51:45:-1] + s[1] + s[44:1:-1] + s[0]
          elif len(s) == 82:
              # Fallback to the other algortihms
              return self._decrypt_signature(s)
  
 -
 -    def _get_available_subtitles(self, video_id):
 -        self.report_video_subtitles_download(video_id)
 -        request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
 -        try:
 -            sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
 -        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
 -            self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
 -            return {}
 -        sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
 -        sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list)
 -        if not sub_lang_list:
 -            self._downloader.report_warning(u'video doesn\'t have subtitles')
 -            return {}
 -        return sub_lang_list
 -
 -    def _list_available_subtitles(self, video_id):
 -        sub_lang_list = self._get_available_subtitles(video_id)
 -        self.report_video_subtitles_available(video_id, sub_lang_list)
 -
 -    def _request_subtitle(self, sub_lang, sub_name, video_id, format):
 -        """
 -        Return the subtitle as a string or None if they are not found
 -        """
 -        self.report_video_subtitles_request(video_id, sub_lang, format)
 -        params = compat_urllib_parse.urlencode({
 -            'lang': sub_lang,
 -            'name': sub_name,
 -            'v': video_id,
 -            'fmt': format,
 -        })
 -        url = 'http://www.youtube.com/api/timedtext?' + params
 -        try:
 -            sub = compat_urllib_request.urlopen(url).read().decode('utf-8')
 -        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
 -            self._downloader.report_warning(u'unable to download video subtitles for %s: %s' % (sub_lang, compat_str(err)))
 -            return
 -        if not sub:
 -            self._downloader.report_warning(u'Did not fetch video subtitles')
 -            return
 -        return sub
 -
 -    def _request_automatic_caption(self, video_id, webpage):
 -        """We need the webpage for getting the captions url, pass it as an
 -           argument to speed up the process."""
 -        sub_lang = (self._downloader.params.get('subtitleslangs') or ['en'])[0]
 -        sub_format = self._downloader.params.get('subtitlesformat')
 -        self.to_screen(u'%s: Looking for automatic captions' % video_id)
 -        mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
 -        err_msg = u'Couldn\'t find automatic captions for "%s"' % sub_lang
 -        if mobj is None:
 -            self._downloader.report_warning(err_msg)
 -            return {}
 -        player_config = json.loads(mobj.group(1))
 -        try:
 -            args = player_config[u'args']
 -            caption_url = args[u'ttsurl']
 -            timestamp = args[u'timestamp']
 -            params = compat_urllib_parse.urlencode({
 -                'lang': 'en',
 -                'tlang': sub_lang,
 -                'fmt': sub_format,
 -                'ts': timestamp,
 -                'kind': 'asr',
 -            })
 -            subtitles_url = caption_url + '&' + params
 -            sub = self._download_webpage(subtitles_url, video_id, u'Downloading automatic captions')
 -            return {sub_lang: sub}
 -        # An extractor error can be raise by the download process if there are
 -        # no automatic captions but there are subtitles
 -        except (KeyError, ExtractorError):
 -            self._downloader.report_warning(err_msg)
 -            return {}
 -    
 -    def _extract_subtitles(self, video_id):
 -        """
 -        Return a dictionary: {language: subtitles} or {} if the subtitles
 -        couldn't be found
 -        """
 -        available_subs_list = self._get_available_subtitles(video_id)
 -        sub_format = self._downloader.params.get('subtitlesformat')
 -        if  not available_subs_list: #There was some error, it didn't get the available subtitles
 -            return {}
 -        if self._downloader.params.get('allsubtitles', False):
 -            sub_lang_list = available_subs_list
 -        else:
 -            if self._downloader.params.get('subtitleslangs', False):
 -                reqested_langs = self._downloader.params.get('subtitleslangs')
 -            elif 'en' in available_subs_list:
 -                reqested_langs = ['en']
 -            else:
 -                reqested_langs = [list(available_subs_list.keys())[0]]
 -
 -            sub_lang_list = {}
 -            for sub_lang in reqested_langs:
 -                if not sub_lang in available_subs_list:
 -                    self._downloader.report_warning(u'no closed captions found in the specified language "%s"' % sub_lang)
 -                    continue
 -                sub_lang_list[sub_lang] = available_subs_list[sub_lang]
 -        subtitles = {}
 -        for sub_lang in sub_lang_list:
 -            subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
 -            if subtitle:
 -                subtitles[sub_lang] = subtitle
 -        return subtitles
 -
      def _print_formats(self, formats):
          print('Available formats:')
          for x in formats:
              video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
          else:
              # Specific formats. We pick the first in a slash-delimeted sequence.
-             # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'.
+             # Format can be specified as itag or 'mp4' or 'flv' etc. We pick the highest quality
+             # available in the specified format. For example,
+             # if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'.
+             # if '1/mp4/3/4' is requested and '1' and '5' (is a mp4) are available, we pick '1'.
+             # if '1/mp4/3/4' is requested and '4' and '5' (is a mp4) are available, we pick '5'.
              req_formats = req_format.split('/')
              video_url_list = None
              for rf in req_formats:
                  if rf in url_map:
                      video_url_list = [(rf, url_map[rf])]
                      break
+                 if rf in self._video_formats_map:
+                     for srf in self._video_formats_map[rf]:
+                         if srf in url_map:
+                             video_url_list = [(srf, url_map[srf])]
+                             break
+                     else:
+                         continue
+                     break
              if video_url_list is None:
                  raise ExtractorError(u'requested format not available')
          return video_url_list
          manifest = self._download_webpage(manifest_url, video_id, u'Downloading formats manifest')
          formats_urls = _get_urls(manifest)
          for format_url in formats_urls:
-             itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
+             itag = self._search_regex(r'itag%3D(\d+?)/', format_url, 'itag')
              url_map[itag] = format_url
          return url_map
  
@@@ -860,8 -945,11 +885,11 @@@ class YoutubePlaylistIE(InfoExtractor)
  
              for entry in response['feed']['entry']:
                  index = entry['yt$position']['$t']
-                 if 'media$group' in entry and 'media$player' in entry['media$group']:
-                     videos.append((index, entry['media$group']['media$player']['url']))
+                 if 'media$group' in entry and 'yt$videoid' in entry['media$group']:
+                     videos.append((
+                         index,
+                         'https://www.youtube.com/watch?v=' + entry['media$group']['yt$videoid']['$t']
+                     ))
  
          videos = [v[1] for v in sorted(videos)]
  
@@@ -927,13 -1015,20 +955,20 @@@ class YoutubeChannelIE(InfoExtractor)
  
  class YoutubeUserIE(InfoExtractor):
      IE_DESC = u'YouTube.com user videos (URL or "ytuser" keyword)'
-     _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)'
+     _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?)|ytuser:)(?!feed/)([A-Za-z0-9_-]+)'
      _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
      _GDATA_PAGE_SIZE = 50
-     _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
-     _VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]'
+     _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d&alt=json'
      IE_NAME = u'youtube:user'
  
+     @classmethod
+     def suitable(cls, url):
+         # Don't return True if the url can be extracted with other youtube
+         # extractor, the regex would is too permissive and it would match.
+         other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
+         if any(ie.suitable(url) for ie in other_ies): return False
+         else: return super(YoutubeUserIE, cls).suitable(url)
      def _real_extract(self, url):
          # Extract username
          mobj = re.match(self._VALID_URL, url)
              page = self._download_webpage(gdata_url, username,
                                            u'Downloading video ids from %d to %d' % (start_index, start_index + self._GDATA_PAGE_SIZE))
  
+             try:
+                 response = json.loads(page)
+             except ValueError as err:
+                 raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err))
              # Extract video identifiers
              ids_in_page = []
-             for mobj in re.finditer(self._VIDEO_INDICATOR, page):
-                 if mobj.group(1) not in ids_in_page:
-                     ids_in_page.append(mobj.group(1))
+             for entry in response['feed']['entry']:
+                 ids_in_page.append(entry['id']['$t'].split('/')[-1])
              video_ids.extend(ids_in_page)
  
              # A little optimization - if current page is not
@@@ -1101,7 -1198,7 +1138,7 @@@ class YoutubeWatchLaterIE(YoutubeFeedsI
  class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
      IE_NAME = u'youtube:favorites'
      IE_DESC = u'YouTube.com favourite videos, "ytfav" keyword (requires authentication)'
-     _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:o?rites)?'
+     _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
      _LOGIN_REQUIRED = True
  
      def _real_extract(self, url):