Merge remote-tracking branch 'jaimeMF/yt-toplists'
[youtube-dl] / youtube_dl / extractor / common.py
index 482a231ec0dc4632daabb4526253dae89bea64f8..534908a2b89af8db08f5d7ba0fc7f983c1199fa9 100644 (file)
@@ -9,7 +9,6 @@ import xml.etree.ElementTree
 from ..utils import (
     compat_http_client,
     compat_urllib_error,
-    compat_urllib_request,
     compat_str,
 
     clean_html,
@@ -20,6 +19,7 @@ from ..utils import (
     unescapeHTML,
 )
 
+
 class InfoExtractor(object):
     """Information Extractor class.
 
@@ -55,6 +55,9 @@ class InfoExtractor(object):
     subtitles:      The subtitle file contents as a dictionary in the format
                     {language: subtitles}.
     view_count:     How many users have watched the video on the platform.
+    like_count:     Number of positive ratings of the video
+    dislike_count:  Number of negative ratings of the video
+    comment_count:  Number of comments on the video
     urlhandle:      [internal] The urlHandle to be used to download the file,
                     like returned by urllib.request.urlopen
     age_limit:      Age restriction for the video, as an integer (years)
@@ -76,6 +79,7 @@ class InfoExtractor(object):
                     * acodec    Name of the audio codec in use
                     * vbr       Average video bitrate in KBit/s
                     * vcodec    Name of the video codec in use
+                    * filesize  The number of bytes, if known in advance
     webpage_url:    The url to the video webpage, if given to youtube-dl it
                     should allow to get the same result again. (It will be set
                     by YoutubeDL if it's missing)
@@ -150,27 +154,38 @@ class InfoExtractor(object):
     def IE_NAME(self):
         return type(self).__name__[:-2]
 
-    def _request_webpage(self, url_or_request, video_id, note=None, errnote=None):
+    def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
         """ Returns the response handle """
         if note is None:
             self.report_download_webpage(video_id)
         elif note is not False:
-            self.to_screen(u'%s: %s' % (video_id, note))
+            if video_id is None:
+                self.to_screen(u'%s' % (note,))
+            else:
+                self.to_screen(u'%s: %s' % (video_id, note))
         try:
-            return compat_urllib_request.urlopen(url_or_request)
+            return self._downloader.urlopen(url_or_request)
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
             if errnote is None:
                 errnote = u'Unable to download webpage'
-            raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2], cause=err)
+            errmsg = u'%s: %s' % (errnote, compat_str(err))
+            if fatal:
+                raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
+            else:
+                self._downloader.report_warning(errmsg)
+                return False
 
-    def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None):
+    def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
         """ Returns a tuple (page content as string, URL handle) """
 
         # Strip hashes from the URL (#1038)
         if isinstance(url_or_request, (compat_str, str)):
             url_or_request = url_or_request.partition('#')[0]
 
-        urlh = self._request_webpage(url_or_request, video_id, note, errnote)
+        urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal)
+        if urlh is False:
+            assert not fatal
+            return False
         content_type = urlh.headers.get('Content-Type', '')
         webpage_bytes = urlh.read()
         m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
@@ -205,11 +220,17 @@ class InfoExtractor(object):
         content = webpage_bytes.decode(encoding, 'replace')
         return (content, urlh)
 
-    def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
+    def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
         """ Returns the data of the page as a string """
-        return self._download_webpage_handle(url_or_request, video_id, note, errnote)[0]
+        res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal)
+        if res is False:
+            return res
+        else:
+            content, _ = res
+            return content
 
-    def _download_xml(self, url_or_request, video_id, note=u'Downloading XML', errnote=u'Unable to downloand XML'):
+    def _download_xml(self, url_or_request, video_id,
+                      note=u'Downloading XML', errnote=u'Unable to download XML'):
         """Return the xml as an xml.etree.ElementTree.Element"""
         xml_string = self._download_webpage(url_or_request, video_id, note, errnote)
         return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
@@ -362,7 +383,8 @@ class InfoExtractor(object):
         if display_name is None:
             display_name = name
         return self._html_search_regex(
-            r'''(?ix)<meta(?=[^>]+(?:name|property)=["\']%s["\'])
+            r'''(?ix)<meta
+                    (?=[^>]+(?:itemprop|name|property)=["\']%s["\'])
                     [^>]+content=["\']([^"\']+)["\']''' % re.escape(name),
             html, display_name, fatal=False)