11 compat_urllib_request,
20 class InfoExtractor(object):
21 """Information Extractor class.
23 Information extractors are the classes that, given a URL, extract
24 information about the video (or videos) the URL refers to. This
25 information includes the real video URL, the video title, author and
26 others. The information is stored in a dictionary which is then
27 passed to the FileDownloader. The FileDownloader processes this
28 information possibly downloading the video to the file system, among
29 other possible outcomes.
31 The dictionaries must include the following fields:
35 title: Video title, unescaped.
36 ext: Video filename extension.
38 The following fields are optional:
40 format: The video format, defaults to ext (used for --get-format)
41 thumbnails: A list of dictionaries (with the entries "resolution" and
42 "url") for the varying thumbnails
43 thumbnail: Full URL to a video thumbnail image.
44 description: One-line video description.
45 uploader: Full name of the video uploader.
46 upload_date: Video upload date (YYYYMMDD).
47 uploader_id: Nickname or id of the video uploader.
48 location: Physical location of the video.
49 player_url: SWF Player URL (used for rtmpdump).
50 subtitles: The subtitle file contents as a dictionary in the format
51 {language: subtitles}.
52 view_count: How many users have watched the video on the platform.
53 urlhandle: [internal] The urlHandle to be used to download the file,
54 like returned by urllib.request.urlopen
56 The fields should all be Unicode strings.
58 Subclasses of this one should re-define the _real_initialize() and
59 _real_extract() methods and define a _VALID_URL regexp.
60 Probably, they should also be added to the list of extractors.
62 _real_extract() must return a *list* of information dictionaries as
65 Finally, the _WORKING attribute should be set to False for broken IEs
66 in order to warn the users and skip the tests.
73 def __init__(self, downloader=None):
74 """Constructor. Receives an optional downloader."""
76 self.set_downloader(downloader)
79 def suitable(cls, url):
80 """Receives a URL and returns True if suitable for this IE."""
82 # This does not use has/getattr intentionally - we want to know whether
83 # we have cached the regexp for *this* class, whereas getattr would also
84 # match the superclass
85 if '_VALID_URL_RE' not in cls.__dict__:
86 cls._VALID_URL_RE = re.compile(cls._VALID_URL)
87 return cls._VALID_URL_RE.match(url) is not None
91 """Getter method for _WORKING."""
95 """Initializes an instance (authentication, etc)."""
97 self._real_initialize()
100 def extract(self, url):
101 """Extracts URL information and returns it in list of dicts."""
103 return self._real_extract(url)
105 def set_downloader(self, downloader):
106 """Sets the downloader for this IE."""
107 self._downloader = downloader
109 def _real_initialize(self):
110 """Real initialization process. Redefine in subclasses."""
113 def _real_extract(self, url):
114 """Real extraction process. Redefine in subclasses."""
119 """A string for getting the InfoExtractor with get_info_extractor"""
120 return cls.__name__[:-2]
124 return type(self).__name__[:-2]
126 def _request_webpage(self, url_or_request, video_id, note=None, errnote=None):
127 """ Returns the response handle """
129 self.report_download_webpage(video_id)
130 elif note is not False:
131 self.to_screen(u'%s: %s' % (video_id, note))
133 return compat_urllib_request.urlopen(url_or_request)
134 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
136 errnote = u'Unable to download webpage'
137 raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2], cause=err)
139 def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None):
140 """ Returns a tuple (page content as string, URL handle) """
142 # Strip hashes from the URL (#1038)
143 if isinstance(url_or_request, (compat_str, str)):
144 url_or_request = url_or_request.partition('#')[0]
146 urlh = self._request_webpage(url_or_request, video_id, note, errnote)
147 content_type = urlh.headers.get('Content-Type', '')
148 webpage_bytes = urlh.read()
149 m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
151 encoding = m.group(1)
153 m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
154 webpage_bytes[:1024])
156 encoding = m.group(1).decode('ascii')
159 if self._downloader.params.get('dump_intermediate_pages', False):
161 url = url_or_request.get_full_url()
162 except AttributeError:
164 self.to_screen(u'Dumping request to ' + url)
165 dump = base64.b64encode(webpage_bytes).decode('ascii')
166 self._downloader.to_screen(dump)
167 content = webpage_bytes.decode(encoding, 'replace')
168 return (content, urlh)
170 def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
171 """ Returns the data of the page as a string """
172 return self._download_webpage_handle(url_or_request, video_id, note, errnote)[0]
174 def to_screen(self, msg):
175 """Print msg to screen, prefixing it with '[ie_name]'"""
176 self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
178 def report_extraction(self, id_or_name):
179 """Report information extraction."""
180 self.to_screen(u'%s: Extracting information' % id_or_name)
182 def report_download_webpage(self, video_id):
183 """Report webpage download."""
184 self.to_screen(u'%s: Downloading webpage' % video_id)
186 def report_age_confirmation(self):
187 """Report attempt to confirm age."""
188 self.to_screen(u'Confirming age')
190 def report_login(self):
191 """Report attempt to log in."""
192 self.to_screen(u'Logging in')
194 #Methods for following #608
195 def url_result(self, url, ie=None):
196 """Returns a url that points to a page that should be processed"""
197 #TODO: ie should be the class used for getting the info
198 video_info = {'_type': 'url',
202 def playlist_result(self, entries, playlist_id=None, playlist_title=None):
203 """Returns a playlist"""
204 video_info = {'_type': 'playlist',
207 video_info['id'] = playlist_id
209 video_info['title'] = playlist_title
212 def _search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
214 Perform a regex search on the given string, using a single or a list of
215 patterns returning the first matching group.
216 In case of failure return a default value or raise a WARNING or a
217 ExtractorError, depending on fatal, specifying the field name.
219 if isinstance(pattern, (str, compat_str, compiled_regex_type)):
220 mobj = re.search(pattern, string, flags)
223 mobj = re.search(p, string, flags)
226 if sys.stderr.isatty() and os.name != 'nt':
227 _name = u'\033[0;34m%s\033[0m' % name
232 # return the first matching group
233 return next(g for g in mobj.groups() if g is not None)
234 elif default is not None:
237 raise ExtractorError(u'Unable to extract %s' % _name)
239 self._downloader.report_warning(u'unable to extract %s; '
240 u'please report this issue on http://yt-dl.org/bug' % _name)
243 def _html_search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
245 Like _search_regex, but strips HTML tags and unescapes entities.
247 res = self._search_regex(pattern, string, name, default, fatal, flags)
249 return clean_html(res).strip()
253 def _get_login_info(self):
255 Get the the login info as (username, password)
256 It will look in the netrc file using the _NETRC_MACHINE value
257 If there's no info available, return (None, None)
259 if self._downloader is None:
264 downloader_params = self._downloader.params
266 # Attempt to use provided username and password or .netrc data
267 if downloader_params.get('username', None) is not None:
268 username = downloader_params['username']
269 password = downloader_params['password']
270 elif downloader_params.get('usenetrc', False):
272 info = netrc.netrc().authenticators(self._NETRC_MACHINE)
277 raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
278 except (IOError, netrc.NetrcParseError) as err:
279 self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
281 return (username, password)
283 # Helper functions for extracting OpenGraph info
286 return r'<meta.+?property=[\'"]og:%s[\'"].+?content=(?:"(.+?)"|\'(.+?)\')' % re.escape(prop)
288 def _og_search_property(self, prop, html, name=None, **kargs):
290 name = 'OpenGraph %s' % prop
291 escaped = self._search_regex(self._og_regex(prop), html, name, flags=re.DOTALL, **kargs)
292 return unescapeHTML(escaped)
294 def _og_search_thumbnail(self, html, **kargs):
295 return self._og_search_property('image', html, u'thumbnail url', fatal=False, **kargs)
297 def _og_search_description(self, html, **kargs):
298 return self._og_search_property('description', html, fatal=False, **kargs)
300 def _og_search_title(self, html, **kargs):
301 return self._og_search_property('title', html, **kargs)
303 def _og_search_video_url(self, html, name='video url', **kargs):
304 return self._html_search_regex([self._og_regex('video:secure_url'),
305 self._og_regex('video')],
308 class SearchInfoExtractor(InfoExtractor):
310 Base class for paged search queries extractors.
311 They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
312 Instances should define _SEARCH_KEY and _MAX_RESULTS.
316 def _make_valid_url(cls):
317 return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
320 def suitable(cls, url):
321 return re.match(cls._make_valid_url(), url) is not None
323 def _real_extract(self, query):
324 mobj = re.match(self._make_valid_url(), query)
326 raise ExtractorError(u'Invalid search query "%s"' % query)
328 prefix = mobj.group('prefix')
329 query = mobj.group('query')
331 return self._get_n_results(query, 1)
332 elif prefix == 'all':
333 return self._get_n_results(query, self._MAX_RESULTS)
337 raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
338 elif n > self._MAX_RESULTS:
339 self._downloader.report_warning(u'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
340 n = self._MAX_RESULTS
341 return self._get_n_results(query, n)
343 def _get_n_results(self, query, n):
344 """Get a specified number of results for a query"""
345 raise NotImplementedError("This method must be implemented by sublclasses")
348 def SEARCH_KEY(self):
349 return self._SEARCH_KEY