Merge pull request #8513 from remitamine/dash-sort
authorremitamine <remitamine@gmail.com>
Tue, 15 Mar 2016 17:39:50 +0000 (18:39 +0100)
committerremitamine <remitamine@gmail.com>
Tue, 15 Mar 2016 17:39:50 +0000 (18:39 +0100)
[extractor/common] fix dash formats sorting

1  2 
youtube_dl/extractor/common.py

index ecd7da767f72da9c07903d7013cd95ebf616078a,cd7087bec8297f185958d3ddf1530f8e2f183fee..770105a5b58013bbcf76e342280b76827724dae4
@@@ -15,14 -15,13 +15,14 @@@ import mat
  from ..compat import (
      compat_cookiejar,
      compat_cookies,
 +    compat_etree_fromstring,
      compat_getpass,
      compat_http_client,
 +    compat_os_name,
 +    compat_str,
      compat_urllib_error,
      compat_urllib_parse,
      compat_urlparse,
 -    compat_str,
 -    compat_etree_fromstring,
  )
  from ..utils import (
      NO_DEFAULT,
@@@ -47,8 -46,6 +47,8 @@@
      xpath_with_ns,
      determine_protocol,
      parse_duration,
 +    mimetype2ext,
 +    update_url_query,
  )
  
  
@@@ -106,7 -103,7 +106,7 @@@ class InfoExtractor(object)
                      * protocol   The protocol that will be used for the actual
                                   download, lower-case.
                                   "http", "https", "rtsp", "rtmp", "rtmpe",
 -                                 "m3u8", or "m3u8_native".
 +                                 "m3u8", "m3u8_native" or "http_dash_segments".
                      * preference Order number of this format. If this field is
                                   present and not None, the formats get sorted
                                   by this field, regardless of all other values.
      thumbnail:      Full URL to a video thumbnail image.
      description:    Full video description.
      uploader:       Full name of the video uploader.
 +    license:        License name the video is licensed under.
      creator:        The main artist who created the video.
      release_date:   The date (YYYYMMDD) when the video was released.
      timestamp:      UNIX timestamp of the moment the video became available.
      upload_date:    Video upload date (YYYYMMDD).
                      If not explicitly set, calculated from timestamp.
      uploader_id:    Nickname or id of the video uploader.
 +    uploader_url:   Full URL to a personal webpage of the video uploader.
      location:       Physical location where the video was filmed.
      subtitles:      The available subtitles as a dictionary in the format
                      {language: subformats}. "subformats" is a list sorted from
      def IE_NAME(self):
          return compat_str(type(self).__name__[:-2])
  
 -    def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
 +    def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers=None, query=None):
          """ Returns the response handle """
          if note is None:
              self.report_download_webpage(video_id)
                  self.to_screen('%s' % (note,))
              else:
                  self.to_screen('%s: %s' % (video_id, note))
 +        # data, headers and query params will be ignored for `Request` objects
 +        if isinstance(url_or_request, compat_str):
 +            if query:
 +                url_or_request = update_url_query(url_or_request, query)
 +            if data or headers:
 +                url_or_request = sanitized_Request(url_or_request, data, headers or {})
          try:
              return self._downloader.urlopen(url_or_request)
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
                  self._downloader.report_warning(errmsg)
                  return False
  
 -    def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None):
 +    def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers=None, query=None):
          """ Returns a tuple (page content as string, URL handle) """
          # Strip hashes from the URL (#1038)
          if isinstance(url_or_request, (compat_str, str)):
              url_or_request = url_or_request.partition('#')[0]
  
 -        urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal)
 +        urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
          if urlh is False:
              assert not fatal
              return False
              self.to_screen('Saving request to ' + filename)
              # Working around MAX_PATH limitation on Windows (see
              # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
 -            if os.name == 'nt':
 +            if compat_os_name == 'nt':
                  absfilepath = os.path.abspath(filename)
                  if len(absfilepath) > 259:
                      filename = '\\\\?\\' + absfilepath
  
          return content
  
 -    def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
 +    def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers=None, query=None):
          """ Returns the data of the page as a string """
          success = False
          try_count = 0
          while success is False:
              try:
 -                res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding)
 +                res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query)
                  success = True
              except compat_http_client.IncompleteRead as e:
                  try_count += 1
  
      def _download_xml(self, url_or_request, video_id,
                        note='Downloading XML', errnote='Unable to download XML',
 -                      transform_source=None, fatal=True, encoding=None):
 +                      transform_source=None, fatal=True, encoding=None, data=None, headers=None, query=None):
          """Return the xml as an xml.etree.ElementTree.Element"""
          xml_string = self._download_webpage(
 -            url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding)
 +            url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query)
          if xml_string is False:
              return xml_string
          if transform_source:
                         note='Downloading JSON metadata',
                         errnote='Unable to download JSON metadata',
                         transform_source=None,
 -                       fatal=True, encoding=None):
 +                       fatal=True, encoding=None, data=None, headers=None, query=None):
          json_string = self._download_webpage(
              url_or_request, video_id, note, errnote, fatal=fatal,
 -            encoding=encoding)
 +            encoding=encoding, data=data, headers=headers, query=query)
          if (not fatal) and json_string is False:
              return None
          return self._parse_json(
                  if mobj:
                      break
  
 -        if not self._downloader.params.get('no_color') and os.name != 'nt' and sys.stderr.isatty():
 +        if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
              _name = '\033[0;34m%s\033[0m' % name
          else:
              _name = name
          downloader_params = self._downloader.params
  
          # Attempt to use provided username and password or .netrc data
 -        if downloader_params.get('username', None) is not None:
 +        if downloader_params.get('username') is not None:
              username = downloader_params['username']
              password = downloader_params['password']
          elif downloader_params.get('usenetrc', False):
              return None
          downloader_params = self._downloader.params
  
 -        if downloader_params.get('twofactor', None) is not None:
 +        if downloader_params.get('twofactor') is not None:
              return downloader_params['twofactor']
  
          return compat_getpass('Type %s and press [Return]: ' % note)
              'mature': 17,
              'restricted': 19,
          }
 -        return RATING_TABLE.get(rating.lower(), None)
 +        return RATING_TABLE.get(rating.lower())
  
      def _family_friendly_search(self, html):
          # See http://schema.org/VideoObject
              '0': 18,
              'false': 18,
          }
 -        return RATING_TABLE.get(family_friendly.lower(), None)
 +        return RATING_TABLE.get(family_friendly.lower())
  
      def _twitter_search_player(self, html):
          return self._html_search_meta('twitter:player', html,
              proto_preference = 0 if determine_protocol(f) in ['http', 'https'] else -0.1
  
              if f.get('vcodec') == 'none':  # audio only
+                 preference -= 50
                  if self._downloader.params.get('prefer_free_formats'):
                      ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
                  else:
                  except ValueError:
                      audio_ext_preference = -1
              else:
+                 if f.get('acodec') == 'none':  # video only
+                     preference -= 40
                  if self._downloader.params.get('prefer_free_formats'):
                      ORDER = ['flv', 'mp4', 'webm']
                  else:
                      item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
                  formats)
  
 +    @staticmethod
 +    def _remove_duplicate_formats(formats):
 +        format_urls = set()
 +        unique_formats = []
 +        for f in formats:
 +            if f['url'] not in format_urls:
 +                format_urls.add(f['url'])
 +                unique_formats.append(f)
 +        formats[:] = unique_formats
 +
      def _is_valid_url(self, url, video_id, item='video'):
          url = self._proto_relative_url(url, scheme='http:')
          # For now assume non HTTP(S) URLs always valid
          if manifest is False:
              return []
  
 +        return self._parse_f4m_formats(
 +            manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
 +            transform_source=transform_source, fatal=fatal)
 +
 +    def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
 +                           transform_source=lambda s: fix_xml_ampersands(s).strip(),
 +                           fatal=True):
          formats = []
          manifest_version = '1.0'
          media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
                  # bitrate in f4m downloader
                  if determine_ext(manifest_url) == 'f4m':
                      formats.extend(self._extract_f4m_formats(
 -                        manifest_url, video_id, preference, f4m_id, fatal=fatal))
 +                        manifest_url, video_id, preference=preference, f4m_id=f4m_id,
 +                        transform_source=transform_source, fatal=fatal))
                      continue
              tbr = int_or_none(media_el.attrib.get('bitrate'))
              formats.append({
              return []
          m3u8_doc, urlh = res
          m3u8_url = urlh.geturl()
 -        # A Media Playlist Tag MUST NOT appear in a Master Playlist
 -        # https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3
 -        # The EXT-X-TARGETDURATION tag is REQUIRED for every M3U8 Media Playlists
 -        # https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1
 -        if '#EXT-X-TARGETDURATION' in m3u8_doc:
 +
 +        # We should try extracting formats only from master playlists [1], i.e.
 +        # playlists that describe available qualities. On the other hand media
 +        # playlists [2] should be returned as is since they contain just the media
 +        # without qualities renditions.
 +        # Fortunately, master playlist can be easily distinguished from media
 +        # playlist based on particular tags availability. As of [1, 2] master
 +        # playlist tags MUST NOT appear in a media playist and vice versa.
 +        # As of [3] #EXT-X-TARGETDURATION tag is REQUIRED for every media playlist
 +        # and MUST NOT appear in master playlist thus we can clearly detect media
 +        # playlist with this criterion.
 +        # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.4
 +        # 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3
 +        # 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1
 +        if '#EXT-X-TARGETDURATION' in m3u8_doc:  # media playlist, return as is
              return [{
                  'url': m3u8_url,
                  'format_id': m3u8_id,
                      'protocol': entry_protocol,
                      'preference': preference,
                  }
 -                codecs = last_info.get('CODECS')
 -                if codecs:
 -                    # TODO: looks like video codec is not always necessarily goes first
 -                    va_codecs = codecs.split(',')
 -                    if va_codecs[0]:
 -                        f['vcodec'] = va_codecs[0]
 -                    if len(va_codecs) > 1 and va_codecs[1]:
 -                        f['acodec'] = va_codecs[1]
                  resolution = last_info.get('RESOLUTION')
                  if resolution:
                      width_str, height_str = resolution.split('x')
                      f['width'] = int(width_str)
                      f['height'] = int(height_str)
 +                codecs = last_info.get('CODECS')
 +                if codecs:
 +                    vcodec, acodec = [None] * 2
 +                    va_codecs = codecs.split(',')
 +                    if len(va_codecs) == 1:
 +                        # Audio only entries usually come with single codec and
 +                        # no resolution. For more robustness we also check it to
 +                        # be mp4 audio.
 +                        if not resolution and va_codecs[0].startswith('mp4a'):
 +                            vcodec, acodec = 'none', va_codecs[0]
 +                        else:
 +                            vcodec = va_codecs[0]
 +                    else:
 +                        vcodec, acodec = va_codecs[:2]
 +                    f.update({
 +                        'acodec': acodec,
 +                        'vcodec': vcodec,
 +                    })
                  if last_media is not None:
                      f['m3u8_media'] = last_media
                      last_media = None
                  out.append('{%s}%s' % (namespace, c))
          return '/'.join(out)
  
 -    def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None):
 -        smil = self._download_smil(smil_url, video_id, fatal=fatal)
 +    def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
 +        smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
  
          if smil is False:
              assert not fatal
              return {}
          return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
  
 -    def _download_smil(self, smil_url, video_id, fatal=True):
 +    def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
          return self._download_xml(
              smil_url, video_id, 'Downloading SMIL file',
 -            'Unable to download SMIL file', fatal=fatal)
 +            'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
  
      def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
          namespace = self._parse_smil_namespace(smil)
          http_count = 0
          m3u8_count = 0
  
 -        src_urls = []
 +        srcs = []
          videos = smil.findall(self._xpath_ns('.//video', namespace))
          for video in videos:
              src = video.get('src')
 -            if not src:
 +            if not src or src in srcs:
                  continue
 +            srcs.append(src)
  
              bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
              filesize = int_or_none(video.get('size') or video.get('fileSize'))
                  continue
  
              src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
 -            if src_url in src_urls:
 -                continue
 -            src_urls.append(src_url)
 +            src_url = src_url.strip()
  
              if proto == 'm3u8' or src_ext == 'm3u8':
                  m3u8_formats = self._extract_m3u8_formats(
              if not src or src in urls:
                  continue
              urls.append(src)
 -            ext = textstream.get('ext') or determine_ext(src)
 -            if not ext:
 -                type_ = textstream.get('type')
 -                SUBTITLES_TYPES = {
 -                    'text/vtt': 'vtt',
 -                    'text/srt': 'srt',
 -                    'application/smptett+xml': 'tt',
 -                }
 -                if type_ in SUBTITLES_TYPES:
 -                    ext = SUBTITLES_TYPES[type_]
 +            ext = textstream.get('ext') or determine_ext(src) or mimetype2ext(textstream.get('type'))
              lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
              subtitles.setdefault(lang, []).append({
                  'url': src,
                          continue
                      representation_attrib = adaptation_set.attrib.copy()
                      representation_attrib.update(representation.attrib)
 -                    mime_type = representation_attrib.get('mimeType')
 -                    content_type = mime_type.split('/')[0] if mime_type else representation_attrib.get('contentType')
 +                    # According to page 41 of ISO/IEC 29001-1:2014, @mimeType is mandatory
 +                    mime_type = representation_attrib['mimeType']
 +                    content_type = mime_type.split('/')[0]
                      if content_type == 'text':
                          # TODO implement WebVTT downloading
                          pass
                                  base_url = base_url_e.text + base_url
                                  if re.match(r'^https?://', base_url):
                                      break
 -                        if not re.match(r'^https?://', base_url):
 +                        if mpd_base_url and not re.match(r'^https?://', base_url):
 +                            if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
 +                                mpd_base_url += '/'
                              base_url = mpd_base_url + base_url
                          representation_id = representation_attrib.get('id')
                          lang = representation_attrib.get('lang')
                          f = {
                              'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
                              'url': base_url,
 +                            'ext': mimetype2ext(mime_type),
                              'width': int_or_none(representation_attrib.get('width')),
                              'height': int_or_none(representation_attrib.get('height')),
                              'tbr': int_or_none(representation_attrib.get('bandwidth'), 1000),
      def _live_title(self, name):
          """ Generate the title for a live video """
          now = datetime.datetime.now()
 -        now_str = now.strftime("%Y-%m-%d %H:%M")
 +        now_str = now.strftime('%Y-%m-%d %H:%M')
          return name + ' ' + now_str
  
      def _int(self, v, name, fatal=False, **kwargs):
          return {}
  
      def _get_subtitles(self, *args, **kwargs):
 -        raise NotImplementedError("This method must be implemented by subclasses")
 +        raise NotImplementedError('This method must be implemented by subclasses')
  
      @staticmethod
      def _merge_subtitle_items(subtitle_list1, subtitle_list2):
          return {}
  
      def _get_automatic_captions(self, *args, **kwargs):
 -        raise NotImplementedError("This method must be implemented by subclasses")
 +        raise NotImplementedError('This method must be implemented by subclasses')
 +
 +    def mark_watched(self, *args, **kwargs):
 +        if (self._downloader.params.get('mark_watched', False) and
 +                (self._get_login_info()[0] is not None or
 +                    self._downloader.params.get('cookiefile') is not None)):
 +            self._mark_watched(*args, **kwargs)
 +
 +    def _mark_watched(self, *args, **kwargs):
 +        raise NotImplementedError('This method must be implemented by subclasses')
  
  
  class SearchInfoExtractor(InfoExtractor):
  
      def _get_n_results(self, query, n):
          """Get a specified number of results for a query"""
 -        raise NotImplementedError("This method must be implemented by subclasses")
 +        raise NotImplementedError('This method must be implemented by subclasses')
  
      @property
      def SEARCH_KEY(self):