X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=youtube_dl%2Fextractor%2Fcommon.py;h=07bd2cbe2f9f6c4213e31e4c6e90d2df7b611f58;hb=12557339453e25dbb18dfc51dc1e88ca5325d8e9;hp=4859b911a01f600bc16e403bc7785113fd40bf95;hpb=675d001633c9446e9d53db2794614862d1d82607;p=youtube-dl diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 4859b911a..07bd2cbe2 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -15,13 +15,14 @@ import math from ..compat import ( compat_cookiejar, compat_cookies, + compat_etree_fromstring, compat_getpass, compat_http_client, + compat_os_name, + compat_str, compat_urllib_error, compat_urllib_parse, compat_urlparse, - compat_str, - compat_etree_fromstring, ) from ..utils import ( NO_DEFAULT, @@ -46,6 +47,7 @@ from ..utils import ( xpath_with_ns, determine_protocol, parse_duration, + mimetype2ext, ) @@ -156,12 +158,14 @@ class InfoExtractor(object): thumbnail: Full URL to a video thumbnail image. description: Full video description. uploader: Full name of the video uploader. + license: License name the video is licensed under. creator: The main artist who created the video. release_date: The date (YYYYMMDD) when the video was released. timestamp: UNIX timestamp of the moment the video became available. upload_date: Video upload date (YYYYMMDD). If not explicitly set, calculated from timestamp. uploader_id: Nickname or id of the video uploader. + uploader_url: Full URL to a personal webpage of the video uploader. location: Physical location where the video was filmed. subtitles: The available subtitles as a dictionary in the format {language: subformats}. "subformats" is a list sorted from @@ -424,7 +428,7 @@ class InfoExtractor(object): self.to_screen('Saving request to ' + filename) # Working around MAX_PATH limitation on Windows (see # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) - if os.name == 'nt': + if compat_os_name == 'nt': absfilepath = os.path.abspath(filename) if len(absfilepath) > 259: filename = '\\\\?\\' + absfilepath @@ -593,7 +597,7 @@ class InfoExtractor(object): if mobj: break - if not self._downloader.params.get('no_color') and os.name != 'nt' and sys.stderr.isatty(): + if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty(): _name = '\033[0;34m%s\033[0m' % name else: _name = name @@ -636,7 +640,7 @@ class InfoExtractor(object): downloader_params = self._downloader.params # Attempt to use provided username and password or .netrc data - if downloader_params.get('username', None) is not None: + if downloader_params.get('username') is not None: username = downloader_params['username'] password = downloader_params['password'] elif downloader_params.get('usenetrc', False): @@ -663,7 +667,7 @@ class InfoExtractor(object): return None downloader_params = self._downloader.params - if downloader_params.get('twofactor', None) is not None: + if downloader_params.get('twofactor') is not None: return downloader_params['twofactor'] return compat_getpass('Type %s and press [Return]: ' % note) @@ -744,7 +748,7 @@ class InfoExtractor(object): 'mature': 17, 'restricted': 19, } - return RATING_TABLE.get(rating.lower(), None) + return RATING_TABLE.get(rating.lower()) def _family_friendly_search(self, html): # See http://schema.org/VideoObject @@ -759,7 +763,7 @@ class InfoExtractor(object): '0': 18, 'false': 18, } - return RATING_TABLE.get(family_friendly.lower(), None) + return RATING_TABLE.get(family_friendly.lower()) def _twitter_search_player(self, html): return self._html_search_meta('twitter:player', html, @@ -899,6 +903,16 @@ class InfoExtractor(object): item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'), formats) + @staticmethod + def _remove_duplicate_formats(formats): + format_urls = set() + unique_formats = [] + for f in formats: + if f['url'] not in format_urls: + format_urls.add(f['url']) + unique_formats.append(f) + formats[:] = unique_formats + def _is_valid_url(self, url, video_id, item='video'): url = self._proto_relative_url(url, scheme='http:') # For now assume non HTTP(S) URLs always valid @@ -1022,11 +1036,21 @@ class InfoExtractor(object): return [] m3u8_doc, urlh = res m3u8_url = urlh.geturl() - # A Media Playlist Tag MUST NOT appear in a Master Playlist - # https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3 - # The EXT-X-TARGETDURATION tag is REQUIRED for every M3U8 Media Playlists - # https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1 - if '#EXT-X-TARGETDURATION' in m3u8_doc: + + # We should try extracting formats only from master playlists [1], i.e. + # playlists that describe available qualities. On the other hand media + # playlists [2] should be returned as is since they contain just the media + # without qualities renditions. + # Fortunately, master playlist can be easily distinguished from media + # playlist based on particular tags availability. As of [1, 2] master + # playlist tags MUST NOT appear in a media playist and vice versa. + # As of [3] #EXT-X-TARGETDURATION tag is REQUIRED for every media playlist + # and MUST NOT appear in master playlist thus we can clearly detect media + # playlist with this criterion. + # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.4 + # 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3 + # 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1 + if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is return [{ 'url': m3u8_url, 'format_id': m3u8_id, @@ -1073,19 +1097,29 @@ class InfoExtractor(object): 'protocol': entry_protocol, 'preference': preference, } - codecs = last_info.get('CODECS') - if codecs: - # TODO: looks like video codec is not always necessarily goes first - va_codecs = codecs.split(',') - if va_codecs[0]: - f['vcodec'] = va_codecs[0] - if len(va_codecs) > 1 and va_codecs[1]: - f['acodec'] = va_codecs[1] resolution = last_info.get('RESOLUTION') if resolution: width_str, height_str = resolution.split('x') f['width'] = int(width_str) f['height'] = int(height_str) + codecs = last_info.get('CODECS') + if codecs: + vcodec, acodec = [None] * 2 + va_codecs = codecs.split(',') + if len(va_codecs) == 1: + # Audio only entries usually come with single codec and + # no resolution. For more robustness we also check it to + # be mp4 audio. + if not resolution and va_codecs[0].startswith('mp4a'): + vcodec, acodec = 'none', va_codecs[0] + else: + vcodec = va_codecs[0] + else: + vcodec, acodec = va_codecs[:2] + f.update({ + 'acodec': acodec, + 'vcodec': vcodec, + }) if last_media is not None: f['m3u8_media'] = last_media last_media = None @@ -1186,11 +1220,13 @@ class InfoExtractor(object): http_count = 0 m3u8_count = 0 + srcs = [] videos = smil.findall(self._xpath_ns('.//video', namespace)) for video in videos: src = video.get('src') - if not src: + if not src or src in srcs: continue + srcs.append(src) bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000) filesize = int_or_none(video.get('size') or video.get('fileSize')) @@ -1222,6 +1258,7 @@ class InfoExtractor(object): continue src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src) + src_url = src_url.strip() if proto == 'm3u8' or src_ext == 'm3u8': m3u8_formats = self._extract_m3u8_formats( @@ -1267,21 +1304,14 @@ class InfoExtractor(object): return formats def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'): + urls = [] subtitles = {} for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))): src = textstream.get('src') - if not src: + if not src or src in urls: continue - ext = textstream.get('ext') or determine_ext(src) - if not ext: - type_ = textstream.get('type') - SUBTITLES_TYPES = { - 'text/vtt': 'vtt', - 'text/srt': 'srt', - 'application/smptett+xml': 'tt', - } - if type_ in SUBTITLES_TYPES: - ext = SUBTITLES_TYPES[type_] + urls.append(src) + ext = textstream.get('ext') or determine_ext(src) or mimetype2ext(textstream.get('type')) lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang subtitles.setdefault(lang, []).append({ 'url': src, @@ -1343,14 +1373,14 @@ class InfoExtractor(object): mpd, urlh = res mpd_base_url = re.match(r'https?://.+/', urlh.geturl()).group() - return self._parse_mpd( + return self._parse_mpd_formats( compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url, formats_dict=formats_dict) - def _parse_mpd(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}): + def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}): if mpd_doc.get('type') == 'dynamic': return [] - namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace') + namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None) def _add_ns(path): return self._xpath_ns(path, namespace) @@ -1430,12 +1460,16 @@ class InfoExtractor(object): base_url = base_url_e.text + base_url if re.match(r'^https?://', base_url): break - if not re.match(r'^https?://', base_url): + if mpd_base_url and not re.match(r'^https?://', base_url): + if not mpd_base_url.endswith('/') and not base_url.startswith('/'): + mpd_base_url += '/' base_url = mpd_base_url + base_url representation_id = representation_attrib.get('id') lang = representation_attrib.get('lang') + url_el = representation.find(_add_ns('BaseURL')) + filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None) f = { - 'format_id': mpd_id or representation_id, + 'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id, 'url': base_url, 'width': int_or_none(representation_attrib.get('width')), 'height': int_or_none(representation_attrib.get('height')), @@ -1446,19 +1480,18 @@ class InfoExtractor(object): 'acodec': 'none' if content_type == 'video' else representation_attrib.get('codecs'), 'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None, 'format_note': 'DASH %s' % content_type, + 'filesize': filesize, } representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info) if 'segment_urls' not in representation_ms_info and 'media_template' in representation_ms_info: if 'total_number' not in representation_ms_info and 'segment_duration': - segment_duration = representation_ms_info['segment_duration'] / representation_ms_info['timescale'] - representation_ms_info['total_number'] = int(math.ceil(period_duration / segment_duration)) + segment_duration = float(representation_ms_info['segment_duration']) / float(representation_ms_info['timescale']) + representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration)) media_template = representation_ms_info['media_template'] media_template = media_template.replace('$RepresentationID$', representation_id) - media_template = re.sub(r'\$(Bandwidth)(?:%(0\d+)d)?\$', r'%(\1)\2d', media_template) - media_template = media_template % {'Bandwidth': representation_attrib.get('bandwidth')} - media_template = re.sub(r'\$(Number)(?:%(0\d+)d)?\$', r'%(\1)\2d', media_template) + media_template = re.sub(r'\$(Number|Bandwidth)(?:%(0\d+)d)?\$', r'%(\1)\2d', media_template) media_template.replace('$$', '$') - representation_ms_info['segment_urls'] = [media_template % {'Number': segment_number} for segment_number in range(representation_ms_info['start_number'], representation_ms_info['total_number'] + representation_ms_info['start_number'])] + representation_ms_info['segment_urls'] = [media_template % {'Number': segment_number, 'Bandwidth': representation_attrib.get('bandwidth')} for segment_number in range(representation_ms_info['start_number'], representation_ms_info['total_number'] + representation_ms_info['start_number'])] if 'segment_urls' in representation_ms_info: f.update({ 'segment_urls': representation_ms_info['segment_urls'], @@ -1483,12 +1516,13 @@ class InfoExtractor(object): existing_format.update(f) else: self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type) + self._sort_formats(formats) return formats def _live_title(self, name): """ Generate the title for a live video """ now = datetime.datetime.now() - now_str = now.strftime("%Y-%m-%d %H:%M") + now_str = now.strftime('%Y-%m-%d %H:%M') return name + ' ' + now_str def _int(self, v, name, fatal=False, **kwargs): @@ -1561,7 +1595,7 @@ class InfoExtractor(object): return {} def _get_subtitles(self, *args, **kwargs): - raise NotImplementedError("This method must be implemented by subclasses") + raise NotImplementedError('This method must be implemented by subclasses') @staticmethod def _merge_subtitle_items(subtitle_list1, subtitle_list2): @@ -1587,7 +1621,16 @@ class InfoExtractor(object): return {} def _get_automatic_captions(self, *args, **kwargs): - raise NotImplementedError("This method must be implemented by subclasses") + raise NotImplementedError('This method must be implemented by subclasses') + + def mark_watched(self, *args, **kwargs): + if (self._downloader.params.get('mark_watched', False) and + (self._get_login_info()[0] is not None or + self._downloader.params.get('cookiefile') is not None)): + self._mark_watched(*args, **kwargs) + + def _mark_watched(self, *args, **kwargs): + raise NotImplementedError('This method must be implemented by subclasses') class SearchInfoExtractor(InfoExtractor): @@ -1627,7 +1670,7 @@ class SearchInfoExtractor(InfoExtractor): def _get_n_results(self, query, n): """Get a specified number of results for a query""" - raise NotImplementedError("This method must be implemented by subclasses") + raise NotImplementedError('This method must be implemented by subclasses') @property def SEARCH_KEY(self):