From 0deac3a2d8aeca756ae9f0620af9185449c6feb2 Mon Sep 17 00:00:00 2001 From: Philipp Hagemeister Date: Sat, 29 Dec 2012 16:56:52 +0100 Subject: [PATCH] Revert "Removed a spurious increment_downloads" This reverts commit 92e3e18a1d1fb35b6d73b609725c250123835969. --- youtube_dl/InfoExtractors.py | 2604 ---------------------------------- 1 file changed, 2604 deletions(-) diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 3715bef50..e3cf7e115 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -1073,2610 +1073,6 @@ class VimeoIE(InfoExtractor): }] -class ArteTvIE(InfoExtractor): - """arte.tv information extractor.""" - - _VALID_URL = r'(?:http://)?videos\.arte\.tv/(?:fr|de)/videos/.*' - _LIVE_URL = r'index-[0-9]+\.html$' - - IE_NAME = u'arte.tv' - - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) - - def report_download_webpage(self, video_id): - """Report webpage download.""" - self._downloader.to_screen(u'[arte.tv] %s: Downloading webpage' % video_id) - - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[arte.tv] %s: Extracting information' % video_id) - - def fetch_webpage(self, url): - request = compat_urllib_request.Request(url) - try: - self.report_download_webpage(url) - webpage = compat_urllib_request.urlopen(request).read() - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) - return - except ValueError as err: - self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) - return - return webpage - - def grep_webpage(self, url, regex, regexFlags, matchTuples): - page = self.fetch_webpage(url) - mobj = re.search(regex, page, regexFlags) - info = {} - - if mobj is None: - self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) - return - - for (i, key, err) in matchTuples: - if mobj.group(i) is None: - self._downloader.trouble(err) - return - else: - info[key] = mobj.group(i) - - return info - - def extractLiveStream(self, url): - video_lang = url.split('/')[-4] - info = self.grep_webpage( - url, - r'src="(.*?/videothek_js.*?\.js)', - 0, - [ - (1, 'url', u'ERROR: Invalid URL: %s' % url) - ] - ) - http_host = url.split('/')[2] - next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url'))) - info = self.grep_webpage( - next_url, - r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' + - '(http://.*?\.swf).*?' + - '(rtmp://.*?)\'', - re.DOTALL, - [ - (1, 'path', u'ERROR: could not extract video path: %s' % url), - (2, 'player', u'ERROR: could not extract video player: %s' % url), - (3, 'url', u'ERROR: could not extract video url: %s' % url) - ] - ) - video_url = u'%s/%s' % (info.get('url'), info.get('path')) - - def extractPlus7Stream(self, url): - video_lang = url.split('/')[-3] - info = self.grep_webpage( - url, - r'param name="movie".*?videorefFileUrl=(http[^\'"&]*)', - 0, - [ - (1, 'url', u'ERROR: Invalid URL: %s' % url) - ] - ) - next_url = compat_urllib_parse.unquote(info.get('url')) - info = self.grep_webpage( - next_url, - r'