Move ARD, Arte, ZDF into their own files
[youtube-dl] / youtube_dl / InfoExtractors.py
index 619ddeba1c8ff06587d4c4945aff0c679053555f..e8940ac36b806363ef0dff1fc95d9254216c3a22 100755 (executable)
@@ -21,1296 +21,27 @@ import binascii
 import urllib
 
 from .utils import *
+from .extractor.common import InfoExtractor, SearchInfoExtractor
 
+from .extractor.ard import ARDIE
+from .extractor.arte import ArteTvIE
+from .extractor.dailymotion import DailymotionIE
+from .extractor.metacafe import MetacafeIE
+from .extractor.statigram import StatigramIE
+from .extractor.photobucket import PhotobucketIE
+from .extractor.vimeo import VimeoIE
+from .extractor.yahoo import YahooIE
+from .extractor.youtube import YoutubeIE, YoutubePlaylistIE, YoutubeUserIE, YoutubeChannelIE
+from .extractor.zdf import ZDFIE
 
-class InfoExtractor(object):
-    """Information Extractor class.
 
-    Information extractors are the classes that, given a URL, extract
-    information about the video (or videos) the URL refers to. This
-    information includes the real video URL, the video title, author and
-    others. The information is stored in a dictionary which is then
-    passed to the FileDownloader. The FileDownloader processes this
-    information possibly downloading the video to the file system, among
-    other possible outcomes.
 
-    The dictionaries must include the following fields:
 
-    id:             Video identifier.
-    url:            Final video URL.
-    title:          Video title, unescaped.
-    ext:            Video filename extension.
 
-    The following fields are optional:
 
-    format:         The video format, defaults to ext (used for --get-format)
-    thumbnail:      Full URL to a video thumbnail image.
-    description:    One-line video description.
-    uploader:       Full name of the video uploader.
-    upload_date:    Video upload date (YYYYMMDD).
-    uploader_id:    Nickname or id of the video uploader.
-    location:       Physical location of the video.
-    player_url:     SWF Player URL (used for rtmpdump).
-    subtitles:      The subtitle file contents.
-    urlhandle:      [internal] The urlHandle to be used to download the file,
-                    like returned by urllib.request.urlopen
 
-    The fields should all be Unicode strings.
 
-    Subclasses of this one should re-define the _real_initialize() and
-    _real_extract() methods and define a _VALID_URL regexp.
-    Probably, they should also be added to the list of extractors.
 
-    _real_extract() must return a *list* of information dictionaries as
-    described above.
-
-    Finally, the _WORKING attribute should be set to False for broken IEs
-    in order to warn the users and skip the tests.
-    """
-
-    _ready = False
-    _downloader = None
-    _WORKING = True
-
-    def __init__(self, downloader=None):
-        """Constructor. Receives an optional downloader."""
-        self._ready = False
-        self.set_downloader(downloader)
-
-    @classmethod
-    def suitable(cls, url):
-        """Receives a URL and returns True if suitable for this IE."""
-        return re.match(cls._VALID_URL, url) is not None
-
-    @classmethod
-    def working(cls):
-        """Getter method for _WORKING."""
-        return cls._WORKING
-
-    def initialize(self):
-        """Initializes an instance (authentication, etc)."""
-        if not self._ready:
-            self._real_initialize()
-            self._ready = True
-
-    def extract(self, url):
-        """Extracts URL information and returns it in list of dicts."""
-        self.initialize()
-        return self._real_extract(url)
-
-    def set_downloader(self, downloader):
-        """Sets the downloader for this IE."""
-        self._downloader = downloader
-
-    def _real_initialize(self):
-        """Real initialization process. Redefine in subclasses."""
-        pass
-
-    def _real_extract(self, url):
-        """Real extraction process. Redefine in subclasses."""
-        pass
-
-    @property
-    def IE_NAME(self):
-        return type(self).__name__[:-2]
-
-    def _request_webpage(self, url_or_request, video_id, note=None, errnote=None):
-        """ Returns the response handle """
-        if note is None:
-            self.report_download_webpage(video_id)
-        elif note is not False:
-            self.to_screen(u'%s: %s' % (video_id, note))
-        try:
-            return compat_urllib_request.urlopen(url_or_request)
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            if errnote is None:
-                errnote = u'Unable to download webpage'
-            raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2])
-
-    def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None):
-        """ Returns a tuple (page content as string, URL handle) """
-        urlh = self._request_webpage(url_or_request, video_id, note, errnote)
-        content_type = urlh.headers.get('Content-Type', '')
-        m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
-        if m:
-            encoding = m.group(1)
-        else:
-            encoding = 'utf-8'
-        webpage_bytes = urlh.read()
-        if self._downloader.params.get('dump_intermediate_pages', False):
-            try:
-                url = url_or_request.get_full_url()
-            except AttributeError:
-                url = url_or_request
-            self.to_screen(u'Dumping request to ' + url)
-            dump = base64.b64encode(webpage_bytes).decode('ascii')
-            self._downloader.to_screen(dump)
-        content = webpage_bytes.decode(encoding, 'replace')
-        return (content, urlh)
-
-    def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
-        """ Returns the data of the page as a string """
-        return self._download_webpage_handle(url_or_request, video_id, note, errnote)[0]
-
-    def to_screen(self, msg):
-        """Print msg to screen, prefixing it with '[ie_name]'"""
-        self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
-
-    def report_extraction(self, id_or_name):
-        """Report information extraction."""
-        self.to_screen(u'%s: Extracting information' % id_or_name)
-
-    def report_download_webpage(self, video_id):
-        """Report webpage download."""
-        self.to_screen(u'%s: Downloading webpage' % video_id)
-
-    def report_age_confirmation(self):
-        """Report attempt to confirm age."""
-        self.to_screen(u'Confirming age')
-
-    #Methods for following #608
-    #They set the correct value of the '_type' key
-    def video_result(self, video_info):
-        """Returns a video"""
-        video_info['_type'] = 'video'
-        return video_info
-    def url_result(self, url, ie=None):
-        """Returns a url that points to a page that should be processed"""
-        #TODO: ie should be the class used for getting the info
-        video_info = {'_type': 'url',
-                      'url': url,
-                      'ie_key': ie}
-        return video_info
-    def playlist_result(self, entries, playlist_id=None, playlist_title=None):
-        """Returns a playlist"""
-        video_info = {'_type': 'playlist',
-                      'entries': entries}
-        if playlist_id:
-            video_info['id'] = playlist_id
-        if playlist_title:
-            video_info['title'] = playlist_title
-        return video_info
-
-    def _search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
-        """
-        Perform a regex search on the given string, using a single or a list of
-        patterns returning the first matching group.
-        In case of failure return a default value or raise a WARNING or a
-        ExtractorError, depending on fatal, specifying the field name.
-        """
-        if isinstance(pattern, (str, compat_str, compiled_regex_type)):
-            mobj = re.search(pattern, string, flags)
-        else:
-            for p in pattern:
-                mobj = re.search(p, string, flags)
-                if mobj: break
-
-        if sys.stderr.isatty() and os.name != 'nt':
-            _name = u'\033[0;34m%s\033[0m' % name
-        else:
-            _name = name
-
-        if mobj:
-            # return the first matching group
-            return next(g for g in mobj.groups() if g is not None)
-        elif default is not None:
-            return default
-        elif fatal:
-            raise ExtractorError(u'Unable to extract %s' % _name)
-        else:
-            self._downloader.report_warning(u'unable to extract %s; '
-                u'please report this issue on GitHub.' % _name)
-            return None
-
-    def _html_search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
-        """
-        Like _search_regex, but strips HTML tags and unescapes entities.
-        """
-        res = self._search_regex(pattern, string, name, default, fatal, flags)
-        if res:
-            return clean_html(res).strip()
-        else:
-            return res
-
-class SearchInfoExtractor(InfoExtractor):
-    """
-    Base class for paged search queries extractors.
-    They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
-    Instances should define _SEARCH_KEY and _MAX_RESULTS.
-    """
-
-    @classmethod
-    def _make_valid_url(cls):
-        return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
-
-    @classmethod
-    def suitable(cls, url):
-        return re.match(cls._make_valid_url(), url) is not None
-
-    def _real_extract(self, query):
-        mobj = re.match(self._make_valid_url(), query)
-        if mobj is None:
-            raise ExtractorError(u'Invalid search query "%s"' % query)
-
-        prefix = mobj.group('prefix')
-        query = mobj.group('query')
-        if prefix == '':
-            return self._get_n_results(query, 1)
-        elif prefix == 'all':
-            return self._get_n_results(query, self._MAX_RESULTS)
-        else:
-            n = int(prefix)
-            if n <= 0:
-                raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
-            elif n > self._MAX_RESULTS:
-                self._downloader.report_warning(u'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
-                n = self._MAX_RESULTS
-            return self._get_n_results(query, n)
-
-    def _get_n_results(self, query, n):
-        """Get a specified number of results for a query"""
-        raise NotImplementedError("This method must be implemented by sublclasses")
-
-
-class YoutubeIE(InfoExtractor):
-    """Information extractor for youtube.com."""
-
-    _VALID_URL = r"""^
-                     (
-                         (?:https?://)?                                       # http(s):// (optional)
-                         (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|
-                            tube\.majestyc\.net/)                             # the various hostnames, with wildcard subdomains
-                         (?:.*?\#/)?                                          # handle anchor (#/) redirect urls
-                         (?:                                                  # the various things that can precede the ID:
-                             (?:(?:v|embed|e)/)                               # v/ or embed/ or e/
-                             |(?:                                             # or the v= param in all its forms
-                                 (?:watch(?:_popup)?(?:\.php)?)?              # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
-                                 (?:\?|\#!?)                                  # the params delimiter ? or # or #!
-                                 (?:.*?&)?                                    # any other preceding param (like /?s=tuff&v=xxxx)
-                                 v=
-                             )
-                         )?                                                   # optional -> youtube.com/xxxx is OK
-                     )?                                                       # all until now is optional -> you can pass the naked ID
-                     ([0-9A-Za-z_-]+)                                         # here is it! the YouTube video ID
-                     (?(1).+)?                                                # if we found the ID, everything can follow
-                     $"""
-    _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
-    _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
-    _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
-    _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
-    _NETRC_MACHINE = 'youtube'
-    # Listed in order of quality
-    _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13']
-    _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13']
-    _video_extensions = {
-        '13': '3gp',
-        '17': 'mp4',
-        '18': 'mp4',
-        '22': 'mp4',
-        '37': 'mp4',
-        '38': 'video', # You actually don't know if this will be MOV, AVI or whatever
-        '43': 'webm',
-        '44': 'webm',
-        '45': 'webm',
-        '46': 'webm',
-    }
-    _video_dimensions = {
-        '5': '240x400',
-        '6': '???',
-        '13': '???',
-        '17': '144x176',
-        '18': '360x640',
-        '22': '720x1280',
-        '34': '360x640',
-        '35': '480x854',
-        '37': '1080x1920',
-        '38': '3072x4096',
-        '43': '360x640',
-        '44': '480x854',
-        '45': '720x1280',
-        '46': '1080x1920',
-    }
-    IE_NAME = u'youtube'
-
-    @classmethod
-    def suitable(cls, url):
-        """Receives a URL and returns True if suitable for this IE."""
-        if YoutubePlaylistIE.suitable(url): return False
-        return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
-
-    def report_lang(self):
-        """Report attempt to set language."""
-        self.to_screen(u'Setting language')
-
-    def report_login(self):
-        """Report attempt to log in."""
-        self.to_screen(u'Logging in')
-
-    def report_video_webpage_download(self, video_id):
-        """Report attempt to download video webpage."""
-        self.to_screen(u'%s: Downloading video webpage' % video_id)
-
-    def report_video_info_webpage_download(self, video_id):
-        """Report attempt to download video info webpage."""
-        self.to_screen(u'%s: Downloading video info webpage' % video_id)
-
-    def report_video_subtitles_download(self, video_id):
-        """Report attempt to download video info webpage."""
-        self.to_screen(u'%s: Checking available subtitles' % video_id)
-
-    def report_video_subtitles_request(self, video_id, sub_lang, format):
-        """Report attempt to download video info webpage."""
-        self.to_screen(u'%s: Downloading video subtitles for %s.%s' % (video_id, sub_lang, format))
-
-    def report_video_subtitles_available(self, video_id, sub_lang_list):
-        """Report available subtitles."""
-        sub_lang = ",".join(list(sub_lang_list.keys()))
-        self.to_screen(u'%s: Available subtitles for video: %s' % (video_id, sub_lang))
-
-    def report_information_extraction(self, video_id):
-        """Report attempt to extract video information."""
-        self.to_screen(u'%s: Extracting video information' % video_id)
-
-    def report_unavailable_format(self, video_id, format):
-        """Report extracted video URL."""
-        self.to_screen(u'%s: Format %s not available' % (video_id, format))
-
-    def report_rtmp_download(self):
-        """Indicate the download will use the RTMP protocol."""
-        self.to_screen(u'RTMP download detected')
-
-    def _get_available_subtitles(self, video_id):
-        self.report_video_subtitles_download(video_id)
-        request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
-        try:
-            sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            return (u'unable to download video subtitles: %s' % compat_str(err), None)
-        sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
-        sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list)
-        if not sub_lang_list:
-            return (u'video doesn\'t have subtitles', None)
-        return sub_lang_list
-
-    def _list_available_subtitles(self, video_id):
-        sub_lang_list = self._get_available_subtitles(video_id)
-        self.report_video_subtitles_available(video_id, sub_lang_list)
-
-    def _request_subtitle(self, sub_lang, sub_name, video_id, format):
-        """
-        Return tuple:
-        (error_message, sub_lang, sub)
-        """
-        self.report_video_subtitles_request(video_id, sub_lang, format)
-        params = compat_urllib_parse.urlencode({
-            'lang': sub_lang,
-            'name': sub_name,
-            'v': video_id,
-            'fmt': format,
-        })
-        url = 'http://www.youtube.com/api/timedtext?' + params
-        try:
-            sub = compat_urllib_request.urlopen(url).read().decode('utf-8')
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            return (u'unable to download video subtitles: %s' % compat_str(err), None, None)
-        if not sub:
-            return (u'Did not fetch video subtitles', None, None)
-        return (None, sub_lang, sub)
-
-    def _request_automatic_caption(self, video_id, webpage):
-        """We need the webpage for getting the captions url, pass it as an
-           argument to speed up the process."""
-        sub_lang = self._downloader.params.get('subtitleslang') or 'en'
-        sub_format = self._downloader.params.get('subtitlesformat')
-        self.to_screen(u'%s: Looking for automatic captions' % video_id)
-        mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
-        err_msg = u'Couldn\'t find automatic captions for "%s"' % sub_lang
-        if mobj is None:
-            return [(err_msg, None, None)]
-        player_config = json.loads(mobj.group(1))
-        try:
-            args = player_config[u'args']
-            caption_url = args[u'ttsurl']
-            timestamp = args[u'timestamp']
-            params = compat_urllib_parse.urlencode({
-                'lang': 'en',
-                'tlang': sub_lang,
-                'fmt': sub_format,
-                'ts': timestamp,
-                'kind': 'asr',
-            })
-            subtitles_url = caption_url + '&' + params
-            sub = self._download_webpage(subtitles_url, video_id, u'Downloading automatic captions')
-            return [(None, sub_lang, sub)]
-        except KeyError:
-            return [(err_msg, None, None)]
-
-    def _extract_subtitle(self, video_id):
-        """
-        Return a list with a tuple:
-        [(error_message, sub_lang, sub)]
-        """
-        sub_lang_list = self._get_available_subtitles(video_id)
-        sub_format = self._downloader.params.get('subtitlesformat')
-        if  isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
-            return [(sub_lang_list[0], None, None)]
-        if self._downloader.params.get('subtitleslang', False):
-            sub_lang = self._downloader.params.get('subtitleslang')
-        elif 'en' in sub_lang_list:
-            sub_lang = 'en'
-        else:
-            sub_lang = list(sub_lang_list.keys())[0]
-        if not sub_lang in sub_lang_list:
-            return [(u'no closed captions found in the specified language "%s"' % sub_lang, None, None)]
-
-        subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
-        return [subtitle]
-
-    def _extract_all_subtitles(self, video_id):
-        sub_lang_list = self._get_available_subtitles(video_id)
-        sub_format = self._downloader.params.get('subtitlesformat')
-        if  isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
-            return [(sub_lang_list[0], None, None)]
-        subtitles = []
-        for sub_lang in sub_lang_list:
-            subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
-            subtitles.append(subtitle)
-        return subtitles
-
-    def _print_formats(self, formats):
-        print('Available formats:')
-        for x in formats:
-            print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???')))
-
-    def _real_initialize(self):
-        if self._downloader is None:
-            return
-
-        username = None
-        password = None
-        downloader_params = self._downloader.params
-
-        # Attempt to use provided username and password or .netrc data
-        if downloader_params.get('username', None) is not None:
-            username = downloader_params['username']
-            password = downloader_params['password']
-        elif downloader_params.get('usenetrc', False):
-            try:
-                info = netrc.netrc().authenticators(self._NETRC_MACHINE)
-                if info is not None:
-                    username = info[0]
-                    password = info[2]
-                else:
-                    raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
-            except (IOError, netrc.NetrcParseError) as err:
-                self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
-                return
-
-        # Set language
-        request = compat_urllib_request.Request(self._LANG_URL)
-        try:
-            self.report_lang()
-            compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_warning(u'unable to set language: %s' % compat_str(err))
-            return
-
-        # No authentication to be performed
-        if username is None:
-            return
-
-        request = compat_urllib_request.Request(self._LOGIN_URL)
-        try:
-            login_page = compat_urllib_request.urlopen(request).read().decode('utf-8')
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_warning(u'unable to fetch login page: %s' % compat_str(err))
-            return
-
-        galx = None
-        dsh = None
-        match = re.search(re.compile(r'<input.+?name="GALX".+?value="(.+?)"', re.DOTALL), login_page)
-        if match:
-          galx = match.group(1)
-
-        match = re.search(re.compile(r'<input.+?name="dsh".+?value="(.+?)"', re.DOTALL), login_page)
-        if match:
-          dsh = match.group(1)
-
-        # Log in
-        login_form_strs = {
-                u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
-                u'Email': username,
-                u'GALX': galx,
-                u'Passwd': password,
-                u'PersistentCookie': u'yes',
-                u'_utf8': u'霱',
-                u'bgresponse': u'js_disabled',
-                u'checkConnection': u'',
-                u'checkedDomains': u'youtube',
-                u'dnConn': u'',
-                u'dsh': dsh,
-                u'pstMsg': u'0',
-                u'rmShown': u'1',
-                u'secTok': u'',
-                u'signIn': u'Sign in',
-                u'timeStmp': u'',
-                u'service': u'youtube',
-                u'uilel': u'3',
-                u'hl': u'en_US',
-        }
-        # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
-        # chokes on unicode
-        login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
-        login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
-        request = compat_urllib_request.Request(self._LOGIN_URL, login_data)
-        try:
-            self.report_login()
-            login_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
-            if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
-                self._downloader.report_warning(u'unable to log in: bad username or password')
-                return
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
-            return
-
-        # Confirm age
-        age_form = {
-                'next_url':     '/',
-                'action_confirm':   'Confirm',
-                }
-        request = compat_urllib_request.Request(self._AGE_URL, compat_urllib_parse.urlencode(age_form))
-        try:
-            self.report_age_confirmation()
-            age_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err))
-
-    def _extract_id(self, url):
-        mobj = re.match(self._VALID_URL, url, re.VERBOSE)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        video_id = mobj.group(2)
-        return video_id
-
-    def _real_extract(self, url):
-        # Extract original video URL from URL with redirection, like age verification, using next_url parameter
-        mobj = re.search(self._NEXT_URL_RE, url)
-        if mobj:
-            url = 'https://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
-        video_id = self._extract_id(url)
-
-        # Get video webpage
-        self.report_video_webpage_download(video_id)
-        url = 'https://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
-        request = compat_urllib_request.Request(url)
-        try:
-            video_webpage_bytes = compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            raise ExtractorError(u'Unable to download video webpage: %s' % compat_str(err))
-
-        video_webpage = video_webpage_bytes.decode('utf-8', 'ignore')
-
-        # Attempt to extract SWF player URL
-        mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
-        if mobj is not None:
-            player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
-        else:
-            player_url = None
-
-        # Get video info
-        self.report_video_info_webpage_download(video_id)
-        for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
-            video_info_url = ('https://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
-                    % (video_id, el_type))
-            video_info_webpage = self._download_webpage(video_info_url, video_id,
-                                    note=False,
-                                    errnote='unable to download video info webpage')
-            video_info = compat_parse_qs(video_info_webpage)
-            if 'token' in video_info:
-                break
-        if 'token' not in video_info:
-            if 'reason' in video_info:
-                raise ExtractorError(u'YouTube said: %s' % video_info['reason'][0])
-            else:
-                raise ExtractorError(u'"token" parameter not in video info for unknown reason')
-
-        # Check for "rental" videos
-        if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
-            raise ExtractorError(u'"rental" videos not supported')
-
-        # Start extracting information
-        self.report_information_extraction(video_id)
-
-        # uploader
-        if 'author' not in video_info:
-            raise ExtractorError(u'Unable to extract uploader name')
-        video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
-
-        # uploader_id
-        video_uploader_id = None
-        mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
-        if mobj is not None:
-            video_uploader_id = mobj.group(1)
-        else:
-            self._downloader.report_warning(u'unable to extract uploader nickname')
-
-        # title
-        if 'title' not in video_info:
-            raise ExtractorError(u'Unable to extract video title')
-        video_title = compat_urllib_parse.unquote_plus(video_info['title'][0])
-
-        # thumbnail image
-        if 'thumbnail_url' not in video_info:
-            self._downloader.report_warning(u'unable to extract video thumbnail')
-            video_thumbnail = ''
-        else:   # don't panic if we can't find it
-            video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
-
-        # upload date
-        upload_date = None
-        mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL)
-        if mobj is not None:
-            upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
-            upload_date = unified_strdate(upload_date)
-
-        # description
-        video_description = get_element_by_id("eow-description", video_webpage)
-        if video_description:
-            video_description = clean_html(video_description)
-        else:
-            fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
-            if fd_mobj:
-                video_description = unescapeHTML(fd_mobj.group(1))
-            else:
-                video_description = u''
-
-        # subtitles
-        video_subtitles = None
-
-        if self._downloader.params.get('writesubtitles', False):
-            video_subtitles = self._extract_subtitle(video_id)
-            if video_subtitles:
-                (sub_error, sub_lang, sub) = video_subtitles[0]
-                if sub_error:
-                    # We try with the automatic captions
-                    video_subtitles = self._request_automatic_caption(video_id, video_webpage)
-                    (sub_error_auto, sub_lang, sub) = video_subtitles[0]
-                    if sub is not None:
-                        pass
-                    else:
-                        # We report the original error
-                        self._downloader.report_warning(sub_error)
-
-        if self._downloader.params.get('allsubtitles', False):
-            video_subtitles = self._extract_all_subtitles(video_id)
-            for video_subtitle in video_subtitles:
-                (sub_error, sub_lang, sub) = video_subtitle
-                if sub_error:
-                    self._downloader.report_warning(sub_error)
-
-        if self._downloader.params.get('listsubtitles', False):
-            sub_lang_list = self._list_available_subtitles(video_id)
-            return
-
-        if 'length_seconds' not in video_info:
-            self._downloader.report_warning(u'unable to extract video duration')
-            video_duration = ''
-        else:
-            video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0])
-
-        # token
-        video_token = compat_urllib_parse.unquote_plus(video_info['token'][0])
-
-        # Decide which formats to download
-        req_format = self._downloader.params.get('format', None)
-
-        if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
-            self.report_rtmp_download()
-            video_url_list = [(None, video_info['conn'][0])]
-        elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
-            url_map = {}
-            for url_data_str in video_info['url_encoded_fmt_stream_map'][0].split(','):
-                url_data = compat_parse_qs(url_data_str)
-                if 'itag' in url_data and 'url' in url_data:
-                    url = url_data['url'][0]
-                    if 'sig' in url_data:
-                        url += '&signature=' + url_data['sig'][0]
-                    if 'ratebypass' not in url:
-                        url += '&ratebypass=yes'
-                    url_map[url_data['itag'][0]] = url
-
-            format_limit = self._downloader.params.get('format_limit', None)
-            available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
-            if format_limit is not None and format_limit in available_formats:
-                format_list = available_formats[available_formats.index(format_limit):]
-            else:
-                format_list = available_formats
-            existing_formats = [x for x in format_list if x in url_map]
-            if len(existing_formats) == 0:
-                raise ExtractorError(u'no known formats available for video')
-            if self._downloader.params.get('listformats', None):
-                self._print_formats(existing_formats)
-                return
-            if req_format is None or req_format == 'best':
-                video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
-            elif req_format == 'worst':
-                video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
-            elif req_format in ('-1', 'all'):
-                video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
-            else:
-                # Specific formats. We pick the first in a slash-delimeted sequence.
-                # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'.
-                req_formats = req_format.split('/')
-                video_url_list = None
-                for rf in req_formats:
-                    if rf in url_map:
-                        video_url_list = [(rf, url_map[rf])]
-                        break
-                if video_url_list is None:
-                    raise ExtractorError(u'requested format not available')
-        else:
-            raise ExtractorError(u'no conn or url_encoded_fmt_stream_map information found in video info')
-
-        results = []
-        for format_param, video_real_url in video_url_list:
-            # Extension
-            video_extension = self._video_extensions.get(format_param, 'flv')
-
-            video_format = '{0} - {1}'.format(format_param if format_param else video_extension,
-                                              self._video_dimensions.get(format_param, '???'))
-
-            results.append({
-                'id':       video_id,
-                'url':      video_real_url,
-                'uploader': video_uploader,
-                'uploader_id': video_uploader_id,
-                'upload_date':  upload_date,
-                'title':    video_title,
-                'ext':      video_extension,
-                'format':   video_format,
-                'thumbnail':    video_thumbnail,
-                'description':  video_description,
-                'player_url':   player_url,
-                'subtitles':    video_subtitles,
-                'duration':     video_duration
-            })
-        return results
-
-
-class MetacafeIE(InfoExtractor):
-    """Information Extractor for metacafe.com."""
-
-    _VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
-    _DISCLAIMER = 'http://www.metacafe.com/family_filter/'
-    _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
-    IE_NAME = u'metacafe'
-
-    def report_disclaimer(self):
-        """Report disclaimer retrieval."""
-        self.to_screen(u'Retrieving disclaimer')
-
-    def _real_initialize(self):
-        # Retrieve disclaimer
-        request = compat_urllib_request.Request(self._DISCLAIMER)
-        try:
-            self.report_disclaimer()
-            disclaimer = compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            raise ExtractorError(u'Unable to retrieve disclaimer: %s' % compat_str(err))
-
-        # Confirm age
-        disclaimer_form = {
-            'filters': '0',
-            'submit': "Continue - I'm over 18",
-            }
-        request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
-        try:
-            self.report_age_confirmation()
-            disclaimer = compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err))
-
-    def _real_extract(self, url):
-        # Extract id and simplified title from URL
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        video_id = mobj.group(1)
-
-        # Check if video comes from YouTube
-        mobj2 = re.match(r'^yt-(.*)$', video_id)
-        if mobj2 is not None:
-            return [self.url_result('http://www.youtube.com/watch?v=%s' % mobj2.group(1), 'Youtube')]
-
-        # Retrieve video webpage to extract further information
-        webpage = self._download_webpage('http://www.metacafe.com/watch/%s/' % video_id, video_id)
-
-        # Extract URL, uploader and title from webpage
-        self.report_extraction(video_id)
-        mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
-        if mobj is not None:
-            mediaURL = compat_urllib_parse.unquote(mobj.group(1))
-            video_extension = mediaURL[-3:]
-
-            # Extract gdaKey if available
-            mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
-            if mobj is None:
-                video_url = mediaURL
-            else:
-                gdaKey = mobj.group(1)
-                video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
-        else:
-            mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
-            if mobj is None:
-                raise ExtractorError(u'Unable to extract media URL')
-            vardict = compat_parse_qs(mobj.group(1))
-            if 'mediaData' not in vardict:
-                raise ExtractorError(u'Unable to extract media URL')
-            mobj = re.search(r'"mediaURL":"(?P<mediaURL>http.*?)",(.*?)"key":"(?P<key>.*?)"', vardict['mediaData'][0])
-            if mobj is None:
-                raise ExtractorError(u'Unable to extract media URL')
-            mediaURL = mobj.group('mediaURL').replace('\\/', '/')
-            video_extension = mediaURL[-3:]
-            video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key'))
-
-        mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract title')
-        video_title = mobj.group(1).decode('utf-8')
-
-        mobj = re.search(r'submitter=(.*?);', webpage)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract uploader nickname')
-        video_uploader = mobj.group(1)
-
-        return [{
-            'id':       video_id.decode('utf-8'),
-            'url':      video_url.decode('utf-8'),
-            'uploader': video_uploader.decode('utf-8'),
-            'upload_date':  None,
-            'title':    video_title,
-            'ext':      video_extension.decode('utf-8'),
-        }]
-
-class DailymotionIE(InfoExtractor):
-    """Information Extractor for Dailymotion"""
-
-    _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
-    IE_NAME = u'dailymotion'
-
-    def _real_extract(self, url):
-        # Extract id and simplified title from URL
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        video_id = mobj.group(1).split('_')[0].split('?')[0]
-
-        video_extension = 'mp4'
-
-        # Retrieve video webpage to extract further information
-        request = compat_urllib_request.Request(url)
-        request.add_header('Cookie', 'family_filter=off')
-        webpage = self._download_webpage(request, video_id)
-
-        # Extract URL, uploader and title from webpage
-        self.report_extraction(video_id)
-        mobj = re.search(r'\s*var flashvars = (.*)', webpage)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract media URL')
-        flashvars = compat_urllib_parse.unquote(mobj.group(1))
-
-        for key in ['hd1080URL', 'hd720URL', 'hqURL', 'sdURL', 'ldURL', 'video_url']:
-            if key in flashvars:
-                max_quality = key
-                self.to_screen(u'Using %s' % key)
-                break
-        else:
-            raise ExtractorError(u'Unable to extract video URL')
-
-        mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract video URL')
-
-        video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/')
-
-        # TODO: support choosing qualities
-
-        mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract title')
-        video_title = unescapeHTML(mobj.group('title'))
-
-        video_uploader = None
-        video_uploader = self._search_regex([r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>',
-                                             # Looking for official user
-                                             r'<(?:span|a) .*?rel="author".*?>([^<]+?)</'],
-                                            webpage, 'video uploader')
-
-        video_upload_date = None
-        mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
-        if mobj is not None:
-            video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
-
-        return [{
-            'id':       video_id,
-            'url':      video_url,
-            'uploader': video_uploader,
-            'upload_date':  video_upload_date,
-            'title':    video_title,
-            'ext':      video_extension,
-        }]
-
-
-class PhotobucketIE(InfoExtractor):
-    """Information extractor for photobucket.com."""
-
-    # TODO: the original _VALID_URL was:
-    # r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
-    # Check if it's necessary to keep the old extracion process
-    _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*(([\?\&]current=)|_)(?P<id>.*)\.(?P<ext>(flv)|(mp4))'
-    IE_NAME = u'photobucket'
-
-    def _real_extract(self, url):
-        # Extract id from URL
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        video_id = mobj.group('id')
-
-        video_extension = mobj.group('ext')
-
-        # Retrieve video webpage to extract further information
-        webpage = self._download_webpage(url, video_id)
-
-        # Extract URL, uploader, and title from webpage
-        self.report_extraction(video_id)
-        # We try first by looking the javascript code:
-        mobj = re.search(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (?P<json>.*?)\);', webpage)
-        if mobj is not None:
-            info = json.loads(mobj.group('json'))
-            return [{
-                'id':       video_id,
-                'url':      info[u'downloadUrl'],
-                'uploader': info[u'username'],
-                'upload_date':  datetime.date.fromtimestamp(info[u'creationDate']).strftime('%Y%m%d'),
-                'title':    info[u'title'],
-                'ext':      video_extension,
-                'thumbnail': info[u'thumbUrl'],
-            }]
-
-        # We try looking in other parts of the webpage
-        video_url = self._search_regex(r'<link rel="video_src" href=".*\?file=([^"]+)" />',
-            webpage, u'video URL')
-
-        mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract title')
-        video_title = mobj.group(1).decode('utf-8')
-        video_uploader = mobj.group(2).decode('utf-8')
-
-        return [{
-            'id':       video_id.decode('utf-8'),
-            'url':      video_url.decode('utf-8'),
-            'uploader': video_uploader,
-            'upload_date':  None,
-            'title':    video_title,
-            'ext':      video_extension.decode('utf-8'),
-        }]
-
-
-class YahooIE(InfoExtractor):
-    """Information extractor for screen.yahoo.com."""
-    _VALID_URL = r'http://screen\.yahoo\.com/.*?-(?P<id>\d*?)\.html'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        video_id = mobj.group('id')
-        webpage = self._download_webpage(url, video_id)
-        m_id = re.search(r'YUI\.namespace\("Media"\)\.CONTENT_ID = "(?P<new_id>.+?)";', webpage)
-
-        if m_id is None: 
-            # TODO: Check which url parameters are required
-            info_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;lmsoverride=1;outputformat=mrss;cb=974419660;id=%s;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id
-            webpage = self._download_webpage(info_url, video_id, u'Downloading info webpage')
-            info_re = r'''<title><!\[CDATA\[(?P<title>.*?)\]\]></title>.*
-                        <description><!\[CDATA\[(?P<description>.*?)\]\]></description>.*
-                        <media:pubStart><!\[CDATA\[(?P<date>.*?)\ .*\]\]></media:pubStart>.*
-                        <media:content\ medium="image"\ url="(?P<thumb>.*?)"\ name="LARGETHUMB"
-                        '''
-            self.report_extraction(video_id)
-            m_info = re.search(info_re, webpage, re.VERBOSE|re.DOTALL)
-            if m_info is None:
-                raise ExtractorError(u'Unable to extract video info')
-            video_title = m_info.group('title')
-            video_description = m_info.group('description')
-            video_thumb = m_info.group('thumb')
-            video_date = m_info.group('date')
-            video_date = datetime.datetime.strptime(video_date, '%m/%d/%Y').strftime('%Y%m%d')
-    
-            # TODO: Find a way to get mp4 videos
-            rest_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;element=stream;outputformat=mrss;id=%s;lmsoverride=1;bw=375;dynamicstream=1;cb=83521105;tech=flv,mp4;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id
-            webpage = self._download_webpage(rest_url, video_id, u'Downloading video url webpage')
-            m_rest = re.search(r'<media:content url="(?P<url>.*?)" path="(?P<path>.*?)"', webpage)
-            video_url = m_rest.group('url')
-            video_path = m_rest.group('path')
-            if m_rest is None:
-                raise ExtractorError(u'Unable to extract video url')
-
-        else: # We have to use a different method if another id is defined
-            long_id = m_id.group('new_id')
-            info_url = 'http://video.query.yahoo.com/v1/public/yql?q=SELECT%20*%20FROM%20yahoo.media.video.streams%20WHERE%20id%3D%22' + long_id + '%22%20AND%20format%3D%22mp4%2Cflv%22%20AND%20protocol%3D%22rtmp%2Chttp%22%20AND%20plrs%3D%2286Gj0vCaSzV_Iuf6hNylf2%22%20AND%20acctid%3D%22389%22%20AND%20plidl%3D%22%22%20AND%20pspid%3D%22792700001%22%20AND%20offnetwork%3D%22false%22%20AND%20site%3D%22ivy%22%20AND%20lang%3D%22en-US%22%20AND%20region%3D%22US%22%20AND%20override%3D%22none%22%3B&env=prod&format=json&callback=YUI.Env.JSONP.yui_3_8_1_1_1368368376830_335'
-            webpage = self._download_webpage(info_url, video_id, u'Downloading info json')
-            json_str = re.search(r'YUI.Env.JSONP.yui.*?\((.*?)\);', webpage).group(1)
-            info = json.loads(json_str)
-            res = info[u'query'][u'results'][u'mediaObj'][0]
-            stream = res[u'streams'][0]
-            video_path = stream[u'path']
-            video_url = stream[u'host']
-            meta = res[u'meta']
-            video_title = meta[u'title']
-            video_description = meta[u'description']
-            video_thumb = meta[u'thumbnail']
-            video_date = None # I can't find it
-
-        info_dict = {
-                     'id': video_id,
-                     'url': video_url,
-                     'play_path': video_path,
-                     'title':video_title,
-                     'description': video_description,
-                     'thumbnail': video_thumb,
-                     'upload_date': video_date,
-                     'ext': 'flv',
-                     }
-        return info_dict
-
-class VimeoIE(InfoExtractor):
-    """Information extractor for vimeo.com."""
-
-    # _VALID_URL matches Vimeo URLs
-    _VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo(?P<pro>pro)?\.com/(?:(?:(?:groups|album)/[^/]+)|(?:.*?)/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)'
-    IE_NAME = u'vimeo'
-
-    def _real_extract(self, url, new_video=True):
-        # Extract ID from URL
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        video_id = mobj.group('id')
-        if not mobj.group('proto'):
-            url = 'https://' + url
-        if mobj.group('direct_link') or mobj.group('pro'):
-            url = 'https://vimeo.com/' + video_id
-
-        # Retrieve video webpage to extract further information
-        request = compat_urllib_request.Request(url, None, std_headers)
-        webpage = self._download_webpage(request, video_id)
-
-        # Now we begin extracting as much information as we can from what we
-        # retrieved. First we extract the information common to all extractors,
-        # and latter we extract those that are Vimeo specific.
-        self.report_extraction(video_id)
-
-        # Extract the config JSON
-        try:
-            config = webpage.split(' = {config:')[1].split(',assets:')[0]
-            config = json.loads(config)
-        except:
-            if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
-                raise ExtractorError(u'The author has restricted the access to this video, try with the "--referer" option')
-            else:
-                raise ExtractorError(u'Unable to extract info section')
-
-        # Extract title
-        video_title = config["video"]["title"]
-
-        # Extract uploader and uploader_id
-        video_uploader = config["video"]["owner"]["name"]
-        video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None
-
-        # Extract video thumbnail
-        video_thumbnail = config["video"]["thumbnail"]
-
-        # Extract video description
-        video_description = get_element_by_attribute("itemprop", "description", webpage)
-        if video_description: video_description = clean_html(video_description)
-        else: video_description = u''
-
-        # Extract upload date
-        video_upload_date = None
-        mobj = re.search(r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage)
-        if mobj is not None:
-            video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
-
-        # Vimeo specific: extract request signature and timestamp
-        sig = config['request']['signature']
-        timestamp = config['request']['timestamp']
-
-        # Vimeo specific: extract video codec and quality information
-        # First consider quality, then codecs, then take everything
-        # TODO bind to format param
-        codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')]
-        files = { 'hd': [], 'sd': [], 'other': []}
-        for codec_name, codec_extension in codecs:
-            if codec_name in config["video"]["files"]:
-                if 'hd' in config["video"]["files"][codec_name]:
-                    files['hd'].append((codec_name, codec_extension, 'hd'))
-                elif 'sd' in config["video"]["files"][codec_name]:
-                    files['sd'].append((codec_name, codec_extension, 'sd'))
-                else:
-                    files['other'].append((codec_name, codec_extension, config["video"]["files"][codec_name][0]))
-
-        for quality in ('hd', 'sd', 'other'):
-            if len(files[quality]) > 0:
-                video_quality = files[quality][0][2]
-                video_codec = files[quality][0][0]
-                video_extension = files[quality][0][1]
-                self.to_screen(u'%s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
-                break
-        else:
-            raise ExtractorError(u'No known codec found')
-
-        video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
-                    %(video_id, sig, timestamp, video_quality, video_codec.upper())
-
-        return [{
-            'id':       video_id,
-            'url':      video_url,
-            'uploader': video_uploader,
-            'uploader_id': video_uploader_id,
-            'upload_date':  video_upload_date,
-            'title':    video_title,
-            'ext':      video_extension,
-            'thumbnail':    video_thumbnail,
-            'description':  video_description,
-        }]
-
-
-class ArteTvIE(InfoExtractor):
-    """arte.tv information extractor."""
-
-    _VALID_URL = r'(?:http://)?videos\.arte\.tv/(?:fr|de)/videos/.*'
-    _LIVE_URL = r'index-[0-9]+\.html$'
-
-    IE_NAME = u'arte.tv'
-
-    def fetch_webpage(self, url):
-        request = compat_urllib_request.Request(url)
-        try:
-            self.report_download_webpage(url)
-            webpage = compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            raise ExtractorError(u'Unable to retrieve video webpage: %s' % compat_str(err))
-        except ValueError as err:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        return webpage
-
-    def grep_webpage(self, url, regex, regexFlags, matchTuples):
-        page = self.fetch_webpage(url)
-        mobj = re.search(regex, page, regexFlags)
-        info = {}
-
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        for (i, key, err) in matchTuples:
-            if mobj.group(i) is None:
-                raise ExtractorError(err)
-            else:
-                info[key] = mobj.group(i)
-
-        return info
-
-    def extractLiveStream(self, url):
-        video_lang = url.split('/')[-4]
-        info = self.grep_webpage(
-            url,
-            r'src="(.*?/videothek_js.*?\.js)',
-            0,
-            [
-                (1, 'url', u'Invalid URL: %s' % url)
-            ]
-        )
-        http_host = url.split('/')[2]
-        next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
-        info = self.grep_webpage(
-            next_url,
-            r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
-                '(http://.*?\.swf).*?' +
-                '(rtmp://.*?)\'',
-            re.DOTALL,
-            [
-                (1, 'path',   u'could not extract video path: %s' % url),
-                (2, 'player', u'could not extract video player: %s' % url),
-                (3, 'url',    u'could not extract video url: %s' % url)
-            ]
-        )
-        video_url = u'%s/%s' % (info.get('url'), info.get('path'))
-
-    def extractPlus7Stream(self, url):
-        video_lang = url.split('/')[-3]
-        info = self.grep_webpage(
-            url,
-            r'param name="movie".*?videorefFileUrl=(http[^\'"&]*)',
-            0,
-            [
-                (1, 'url', u'Invalid URL: %s' % url)
-            ]
-        )
-        next_url = compat_urllib_parse.unquote(info.get('url'))
-        info = self.grep_webpage(
-            next_url,
-            r'<video lang="%s" ref="(http[^\'"&]*)' % video_lang,
-            0,
-            [
-                (1, 'url', u'Could not find <video> tag: %s' % url)
-            ]
-        )
-        next_url = compat_urllib_parse.unquote(info.get('url'))
-
-        info = self.grep_webpage(
-            next_url,
-            r'<video id="(.*?)".*?>.*?' +
-                '<name>(.*?)</name>.*?' +
-                '<dateVideo>(.*?)</dateVideo>.*?' +
-                '<url quality="hd">(.*?)</url>',
-            re.DOTALL,
-            [
-                (1, 'id',    u'could not extract video id: %s' % url),
-                (2, 'title', u'could not extract video title: %s' % url),
-                (3, 'date',  u'could not extract video date: %s' % url),
-                (4, 'url',   u'could not extract video url: %s' % url)
-            ]
-        )
-
-        return {
-            'id':           info.get('id'),
-            'url':          compat_urllib_parse.unquote(info.get('url')),
-            'uploader':     u'arte.tv',
-            'upload_date':  unified_strdate(info.get('date')),
-            'title':        info.get('title').decode('utf-8'),
-            'ext':          u'mp4',
-            'format':       u'NA',
-            'player_url':   None,
-        }
-
-    def _real_extract(self, url):
-        video_id = url.split('/')[-1]
-        self.report_extraction(video_id)
-
-        if re.search(self._LIVE_URL, video_id) is not None:
-            self.extractLiveStream(url)
-            return
-        else:
-            info = self.extractPlus7Stream(url)
-
-        return [info]
 
 
 class GenericIE(InfoExtractor):
@@ -1409,6 +140,13 @@ class GenericIE(InfoExtractor):
         if mobj is None:
             # Try to find twitter cards info
             mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
+        if mobj is None:
+            # We look for Open Graph info:
+            # We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
+            m_video_type = re.search(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
+            # We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
+            if m_video_type is not None:
+                mobj = re.search(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)
         if mobj is None:
             raise ExtractorError(u'Invalid URL: %s' % url)
 
@@ -1556,191 +294,6 @@ class YahooSearchIE(SearchInfoExtractor):
         return res
 
 
-class YoutubePlaylistIE(InfoExtractor):
-    """Information Extractor for YouTube playlists."""
-
-    _VALID_URL = r"""(?:
-                        (?:https?://)?
-                        (?:\w+\.)?
-                        youtube\.com/
-                        (?:
-                           (?:course|view_play_list|my_playlists|artist|playlist|watch)
-                           \? (?:.*?&)*? (?:p|a|list)=
-                        |  p/
-                        )
-                        ((?:PL|EC|UU)?[0-9A-Za-z-_]{10,})
-                        .*
-                     |
-                        ((?:PL|EC|UU)[0-9A-Za-z-_]{10,})
-                     )"""
-    _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/playlists/%s?max-results=%i&start-index=%i&v=2&alt=json&safeSearch=none'
-    _MAX_RESULTS = 50
-    IE_NAME = u'youtube:playlist'
-
-    @classmethod
-    def suitable(cls, url):
-        """Receives a URL and returns True if suitable for this IE."""
-        return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
-
-    def _real_extract(self, url):
-        # Extract playlist id
-        mobj = re.match(self._VALID_URL, url, re.VERBOSE)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        # Download playlist videos from API
-        playlist_id = mobj.group(1) or mobj.group(2)
-        page_num = 1
-        videos = []
-
-        while True:
-            url = self._TEMPLATE_URL % (playlist_id, self._MAX_RESULTS, self._MAX_RESULTS * (page_num - 1) + 1)
-            page = self._download_webpage(url, playlist_id, u'Downloading page #%s' % page_num)
-
-            try:
-                response = json.loads(page)
-            except ValueError as err:
-                raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err))
-
-            if 'feed' not in response:
-                raise ExtractorError(u'Got a malformed response from YouTube API')
-            playlist_title = response['feed']['title']['$t']
-            if 'entry' not in response['feed']:
-                # Number of videos is a multiple of self._MAX_RESULTS
-                break
-
-            for entry in response['feed']['entry']:
-                index = entry['yt$position']['$t']
-                if 'media$group' in entry and 'media$player' in entry['media$group']:
-                    videos.append((index, entry['media$group']['media$player']['url']))
-
-            if len(response['feed']['entry']) < self._MAX_RESULTS:
-                break
-            page_num += 1
-
-        videos = [v[1] for v in sorted(videos)]
-
-        url_results = [self.url_result(url, 'Youtube') for url in videos]
-        return [self.playlist_result(url_results, playlist_id, playlist_title)]
-
-
-class YoutubeChannelIE(InfoExtractor):
-    """Information Extractor for YouTube channels."""
-
-    _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)"
-    _TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en'
-    _MORE_PAGES_INDICATOR = 'yt-uix-load-more'
-    _MORE_PAGES_URL = 'http://www.youtube.com/channel_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s'
-    IE_NAME = u'youtube:channel'
-
-    def extract_videos_from_page(self, page):
-        ids_in_page = []
-        for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&?', page):
-            if mobj.group(1) not in ids_in_page:
-                ids_in_page.append(mobj.group(1))
-        return ids_in_page
-
-    def _real_extract(self, url):
-        # Extract channel id
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        # Download channel page
-        channel_id = mobj.group(1)
-        video_ids = []
-        pagenum = 1
-
-        url = self._TEMPLATE_URL % (channel_id, pagenum)
-        page = self._download_webpage(url, channel_id,
-                                      u'Downloading page #%s' % pagenum)
-
-        # Extract video identifiers
-        ids_in_page = self.extract_videos_from_page(page)
-        video_ids.extend(ids_in_page)
-
-        # Download any subsequent channel pages using the json-based channel_ajax query
-        if self._MORE_PAGES_INDICATOR in page:
-            while True:
-                pagenum = pagenum + 1
-
-                url = self._MORE_PAGES_URL % (pagenum, channel_id)
-                page = self._download_webpage(url, channel_id,
-                                              u'Downloading page #%s' % pagenum)
-
-                page = json.loads(page)
-
-                ids_in_page = self.extract_videos_from_page(page['content_html'])
-                video_ids.extend(ids_in_page)
-
-                if self._MORE_PAGES_INDICATOR  not in page['load_more_widget_html']:
-                    break
-
-        self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
-
-        urls = ['http://www.youtube.com/watch?v=%s' % id for id in video_ids]
-        url_entries = [self.url_result(url, 'Youtube') for url in urls]
-        return [self.playlist_result(url_entries, channel_id)]
-
-
-class YoutubeUserIE(InfoExtractor):
-    """Information Extractor for YouTube users."""
-
-    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)'
-    _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
-    _GDATA_PAGE_SIZE = 50
-    _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
-    _VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]'
-    IE_NAME = u'youtube:user'
-
-    def _real_extract(self, url):
-        # Extract username
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        username = mobj.group(1)
-
-        # Download video ids using YouTube Data API. Result size per
-        # query is limited (currently to 50 videos) so we need to query
-        # page by page until there are no video ids - it means we got
-        # all of them.
-
-        video_ids = []
-        pagenum = 0
-
-        while True:
-            start_index = pagenum * self._GDATA_PAGE_SIZE + 1
-
-            gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
-            page = self._download_webpage(gdata_url, username,
-                                          u'Downloading video ids from %d to %d' % (start_index, start_index + self._GDATA_PAGE_SIZE))
-
-            # Extract video identifiers
-            ids_in_page = []
-
-            for mobj in re.finditer(self._VIDEO_INDICATOR, page):
-                if mobj.group(1) not in ids_in_page:
-                    ids_in_page.append(mobj.group(1))
-
-            video_ids.extend(ids_in_page)
-
-            # A little optimization - if current page is not
-            # "full", ie. does not contain PAGE_SIZE video ids then
-            # we can assume that this page is the last one - there
-            # are no more ids on further pages - no need to query
-            # again.
-
-            if len(ids_in_page) < self._GDATA_PAGE_SIZE:
-                break
-
-            pagenum += 1
-
-        urls = ['http://www.youtube.com/watch?v=%s' % video_id for video_id in video_ids]
-        url_results = [self.url_result(url, 'Youtube') for url in urls]
-        return [self.playlist_result(url_results, playlist_title = username)]
-
-
 class BlipTVUserIE(InfoExtractor):
     """Information Extractor for blip.tv users."""
 
@@ -3968,102 +2521,7 @@ class LiveLeakIE(InfoExtractor):
 
         return [info]
 
-class ARDIE(InfoExtractor):
-    _VALID_URL = r'^(?:https?://)?(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[^/\?]+)(?:\?.*)?'
-    _TITLE = r'<h1(?: class="boxTopHeadline")?>(?P<title>.*)</h1>'
-    _MEDIA_STREAM = r'mediaCollection\.addMediaStream\((?P<media_type>\d+), (?P<quality>\d+), "(?P<rtmp_url>[^"]*)", "(?P<video_url>[^"]*)", "[^"]*"\)'
-
-    def _real_extract(self, url):
-        # determine video id from url
-        m = re.match(self._VALID_URL, url)
-
-        numid = re.search(r'documentId=([0-9]+)', url)
-        if numid:
-            video_id = numid.group(1)
-        else:
-            video_id = m.group('video_id')
-
-        # determine title and media streams from webpage
-        html = self._download_webpage(url, video_id)
-        title = re.search(self._TITLE, html).group('title')
-        streams = [m.groupdict() for m in re.finditer(self._MEDIA_STREAM, html)]
-        if not streams:
-            assert '"fsk"' in html
-            raise ExtractorError(u'This video is only available after 8:00 pm')
-
-        # choose default media type and highest quality for now
-        stream = max([s for s in streams if int(s["media_type"]) == 0],
-                     key=lambda s: int(s["quality"]))
-
-        # there's two possibilities: RTMP stream or HTTP download
-        info = {'id': video_id, 'title': title, 'ext': 'mp4'}
-        if stream['rtmp_url']:
-            self.to_screen(u'RTMP download detected')
-            assert stream['video_url'].startswith('mp4:')
-            info["url"] = stream["rtmp_url"]
-            info["play_path"] = stream['video_url']
-        else:
-            assert stream["video_url"].endswith('.mp4')
-            info["url"] = stream["video_url"]
-        return [info]
-
-class ZDFIE(InfoExtractor):
-    _VALID_URL = r'^http://www\.zdf\.de\/ZDFmediathek\/(.*beitrag\/video\/)(?P<video_id>[^/\?]+)(?:\?.*)?'
-    _TITLE = r'<h1(?: class="beitragHeadline")?>(?P<title>.*)</h1>'
-    _MEDIA_STREAM = r'<a href="(?P<video_url>.+(?P<media_type>.streaming).+/zdf/(?P<quality>[^\/]+)/[^"]*)".+class="play".+>'
-    _MMS_STREAM = r'href="(?P<video_url>mms://[^"]*)"'
-    _RTSP_STREAM = r'(?P<video_url>rtsp://[^"]*.mp4)'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        video_id = mobj.group('video_id')
-
-        html = self._download_webpage(url, video_id)
-        streams = [m.groupdict() for m in re.finditer(self._MEDIA_STREAM, html)]
-        if streams is None:
-            raise ExtractorError(u'No media url found.')
-
-        # s['media_type'] == 'wstreaming' -> use 'Windows Media Player' and mms url
-        # s['media_type'] == 'hstreaming' -> use 'Quicktime' and rtsp url
-        # choose first/default media type and highest quality for now
-        for s in streams:        #find 300 - dsl1000mbit
-            if s['quality'] == '300' and s['media_type'] == 'wstreaming':
-                stream_=s
-                break
-        for s in streams:        #find veryhigh - dsl2000mbit
-            if s['quality'] == 'veryhigh' and s['media_type'] == 'wstreaming': # 'hstreaming' - rtsp is not working
-                stream_=s
-                break
-        if stream_ is None:
-            raise ExtractorError(u'No stream found.')
-
-        media_link = self._download_webpage(stream_['video_url'], video_id,'Get stream URL')
 
-        self.report_extraction(video_id)
-        mobj = re.search(self._TITLE, html)
-        if mobj is None:
-            raise ExtractorError(u'Cannot extract title')
-        title = unescapeHTML(mobj.group('title'))
-
-        mobj = re.search(self._MMS_STREAM, media_link)
-        if mobj is None:
-            mobj = re.search(self._RTSP_STREAM, media_link)
-            if mobj is None:
-                raise ExtractorError(u'Cannot extract mms:// or rtsp:// URL')
-        mms_url = mobj.group('video_url')
-
-        mobj = re.search('(.*)[.](?P<ext>[^.]+)', mms_url)
-        if mobj is None:
-            raise ExtractorError(u'Cannot extract extention')
-        ext = mobj.group('ext')
-
-        return [{'id': video_id,
-                 'url': mms_url,
-                 'title': title,
-                 'ext': ext
-                 }]
 
 class TumblrIE(InfoExtractor):
     _VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/((post)|(video))/(?P<id>\d*)/(.*?)'
@@ -4611,6 +3069,7 @@ def gen_extractors():
         HypemIE(),
         Vbox7IE(),
         GametrailersIE(),
+        StatigramIE(),
         GenericIE()
     ]