The dictionaries must include the following fields:
- id: Video identifier.
- url: Final video URL.
- uploader: Nickname of the video uploader.
- title: Video title, unescaped.
- ext: Video filename extension.
+ id: Video identifier.
+ url: Final video URL.
+ uploader: Nickname of the video uploader, unescaped.
+ upload_date: Video upload date (YYYYMMDD).
+ title: Video title, unescaped.
+ ext: Video filename extension.
The following fields are optional:
format: The video format, defaults to ext (used for --get-format)
thumbnail: Full URL to a video thumbnail image.
- description One-line video description.
+ description: One-line video description.
player_url: SWF Player URL (used for rtmpdump).
+ subtitles: The .srt file contents.
+ urlhandle: [internal] The urlHandle to be used to download the file,
+ like returned by urllib2.urlopen
+
+ The fields should all be Unicode strings.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
_real_extract() must return a *list* of information dictionaries as
described above.
+
+ Finally, the _WORKING attribute should be set to False for broken IEs
+ in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
+ _WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
"""Receives a URL and returns True if suitable for this IE."""
return re.match(self._VALID_URL, url) is not None
+ def working(self):
+ """Getter method for _WORKING."""
+ return self._WORKING
+
def initialize(self):
"""Initializes an instance (authentication, etc)."""
if not self._ready:
video_thumbnail = urllib.unquote_plus(video_info['thumbnail_url'][0])
# upload date
- upload_date = u'NA'
+ upload_date = None
mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL)
if mobj is not None:
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'),
'uploader': video_uploader.decode('utf-8'),
- 'upload_date': u'NA',
+ 'upload_date': None,
'title': video_title,
'ext': video_extension.decode('utf-8'),
}]
return
video_title = unescapeHTML(mobj.group('title').decode('utf-8'))
- video_uploader = u'NA'
+ video_uploader = None
mobj = re.search(r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>', webpage)
if mobj is None:
# lookin for official user
else:
video_uploader = mobj.group(1)
- video_upload_date = u'NA'
+ video_upload_date = None
mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
if mobj is not None:
video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
return [{
'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'),
- 'uploader': u'NA',
- 'upload_date': u'NA',
+ 'uploader': None,
+ 'upload_date': None,
'title': video_title,
'ext': video_extension.decode('utf-8'),
}]
'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'),
'uploader': video_uploader,
- 'upload_date': u'NA',
+ 'upload_date': None,
'title': video_title,
'ext': video_extension.decode('utf-8'),
}]
'id': video_id.decode('utf-8'),
'url': video_url,
'uploader': video_uploader,
- 'upload_date': u'NA',
+ 'upload_date': None,
'title': video_title,
'ext': video_extension.decode('utf-8'),
'thumbnail': video_thumbnail.decode('utf-8'),
'description': video_description,
- 'thumbnail': video_thumbnail,
}]
else: video_description = ''
# Extract upload date
- video_upload_date = u'NA'
+ video_upload_date = None
mobj = re.search(r'<span id="clip-date" style="display:none">[^:]*: (.*?)( \([^\(]*\))?</span>', webpage)
if mobj is not None:
video_upload_date = mobj.group(1)
}]
+class ArteTvIE(InfoExtractor):
+ """arte.tv information extractor."""
+
+ _VALID_URL = r'(?:http://)?videos\.arte\.tv/(?:fr|de)/videos/.*'
+ _LIVE_URL = r'index-[0-9]+\.html$'
+
+ IE_NAME = u'arte.tv'
+
+ def __init__(self, downloader=None):
+ InfoExtractor.__init__(self, downloader)
+
+ def report_download_webpage(self, video_id):
+ """Report webpage download."""
+ self._downloader.to_screen(u'[arte.tv] %s: Downloading webpage' % video_id)
+
+ def report_extraction(self, video_id):
+ """Report information extraction."""
+ self._downloader.to_screen(u'[arte.tv] %s: Extracting information' % video_id)
+
+ def fetch_webpage(self, url):
+ self._downloader.increment_downloads()
+ request = urllib2.Request(url)
+ try:
+ self.report_download_webpage(url)
+ webpage = urllib2.urlopen(request).read()
+ except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+ self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+ return
+ except ValueError, err:
+ self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+ return
+ return webpage
+
+ def grep_webpage(self, url, regex, regexFlags, matchTuples):
+ page = self.fetch_webpage(url)
+ mobj = re.search(regex, page, regexFlags)
+ info = {}
+
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+ return
+
+ for (i, key, err) in matchTuples:
+ if mobj.group(i) is None:
+ self._downloader.trouble(err)
+ return
+ else:
+ info[key] = mobj.group(i)
+
+ return info
+
+ def extractLiveStream(self, url):
+ video_lang = url.split('/')[-4]
+ info = self.grep_webpage(
+ url,
+ r'src="(.*?/videothek_js.*?\.js)',
+ 0,
+ [
+ (1, 'url', u'ERROR: Invalid URL: %s' % url)
+ ]
+ )
+ http_host = url.split('/')[2]
+ next_url = 'http://%s%s' % (http_host, urllib.unquote(info.get('url')))
+ info = self.grep_webpage(
+ next_url,
+ r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
+ '(http://.*?\.swf).*?' +
+ '(rtmp://.*?)\'',
+ re.DOTALL,
+ [
+ (1, 'path', u'ERROR: could not extract video path: %s' % url),
+ (2, 'player', u'ERROR: could not extract video player: %s' % url),
+ (3, 'url', u'ERROR: could not extract video url: %s' % url)
+ ]
+ )
+ video_url = u'%s/%s' % (info.get('url'), info.get('path'))
+
+ def extractPlus7Stream(self, url):
+ video_lang = url.split('/')[-3]
+ info = self.grep_webpage(
+ url,
+ r'param name="movie".*?videorefFileUrl=(http[^\'"&]*)',
+ 0,
+ [
+ (1, 'url', u'ERROR: Invalid URL: %s' % url)
+ ]
+ )
+ next_url = urllib.unquote(info.get('url'))
+ info = self.grep_webpage(
+ next_url,
+ r'<video lang="%s" ref="(http[^\'"&]*)' % video_lang,
+ 0,
+ [
+ (1, 'url', u'ERROR: Could not find <video> tag: %s' % url)
+ ]
+ )
+ next_url = urllib.unquote(info.get('url'))
+
+ info = self.grep_webpage(
+ next_url,
+ r'<video id="(.*?)".*?>.*?' +
+ '<name>(.*?)</name>.*?' +
+ '<dateVideo>(.*?)</dateVideo>.*?' +
+ '<url quality="hd">(.*?)</url>',
+ re.DOTALL,
+ [
+ (1, 'id', u'ERROR: could not extract video id: %s' % url),
+ (2, 'title', u'ERROR: could not extract video title: %s' % url),
+ (3, 'date', u'ERROR: could not extract video date: %s' % url),
+ (4, 'url', u'ERROR: could not extract video url: %s' % url)
+ ]
+ )
+
+ return {
+ 'id': info.get('id'),
+ 'url': urllib.unquote(info.get('url')),
+ 'uploader': u'arte.tv',
+ 'upload_date': info.get('date'),
+ 'title': info.get('title'),
+ 'ext': u'mp4',
+ 'format': u'NA',
+ 'player_url': None,
+ }
+
+ def _real_extract(self, url):
+ video_id = url.split('/')[-1]
+ self.report_extraction(video_id)
+
+ if re.search(self._LIVE_URL, video_id) is not None:
+ self.extractLiveStream(url)
+ return
+ else:
+ info = self.extractPlus7Stream(url)
+
+ return [info]
+
+
class GenericIE(InfoExtractor):
"""Generic last-resort information extractor."""
'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'),
'uploader': video_uploader,
- 'upload_date': u'NA',
+ 'upload_date': None,
'title': video_title,
'ext': video_extension.decode('utf-8'),
}]
return
else:
try:
- n = long(prefix)
+ n = int(prefix)
if n <= 0:
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return
return
else:
try:
- n = long(prefix)
+ n = int(prefix)
if n <= 0:
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return
return
else:
try:
- n = long(prefix)
+ n = int(prefix)
if n <= 0:
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return
return [{
'id': file_id.decode('utf-8'),
'url': file_url.decode('utf-8'),
- 'uploader': u'NA',
- 'upload_date': u'NA',
+ 'uploader': None,
+ 'upload_date': None,
'title': file_title,
'ext': file_extension.decode('utf-8'),
}]
class FacebookIE(InfoExtractor):
"""Information Extractor for Facebook"""
+ _WORKING = False
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
_LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
_NETRC_MACHINE = 'facebook'
video_thumbnail = video_info['thumbnail']
# upload date
- upload_date = u'NA'
+ upload_date = None
if 'upload_date' in video_info:
upload_time = video_info['upload_date']
timetuple = email.utils.parsedate_tz(upload_time)
info = {
'id': title,
'url': url,
+ 'uploader': None,
+ 'upload_date': None,
'title': title,
'ext': ext,
'urlhandle': urlh
return [{
'id': video_id,
'url': video_url,
- 'uploader': u'NA',
- 'upload_date': u'NA',
+ 'uploader': None,
+ 'upload_date': None,
'title': video_title,
'ext': u'flv',
}]
htmlHandle = urllib2.urlopen(req)
html = htmlHandle.read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err))
+ self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return
if dlNewest:
url = htmlHandle.geturl()
urlHandle = urllib2.urlopen(playerUrl_raw)
playerUrl = urlHandle.geturl()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self._downloader.trouble(u'ERROR: unable to find out player URL: ' + unicode(err))
+ self._downloader.trouble(u'ERROR: unable to find out player URL: ' + compat_str(err))
return
uri = mMovieParams[0][1]
try:
indexXml = urllib2.urlopen(indexUrl).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self._downloader.trouble(u'ERROR: unable to download episode index: ' + unicode(err))
+ self._downloader.trouble(u'ERROR: unable to download episode index: ' + compat_str(err))
return
results = []
try:
configXml = urllib2.urlopen(configReq).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err))
+ self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return
cdoc = xml.etree.ElementTree.fromstring(configXml)
m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type'])
webPage = webPageBytes.decode(m.group(1) if m else 'utf-8')
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self._downloader.trouble(u'ERROR: unable to download webpage: ' + unicode(err))
+ self._downloader.trouble(u'ERROR: unable to download webpage: ' + compat_str(err))
return
descMatch = re.search('<meta name="description" content="([^"]*)"', webPage)
try:
configJSON = urllib2.urlopen(configUrl).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self._downloader.trouble(u'ERROR: unable to download configuration: ' + unicode(err))
+ self._downloader.trouble(u'ERROR: unable to download configuration: ' + compat_str(err))
return
# Technically, it's JavaScript, not JSON
try:
config = json.loads(configJSON)
except (ValueError,), err:
- self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + unicode(err))
+ self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + compat_str(err))
return
playlist = config['playlist']
info = {
'id': video_id,
'internal_id': internal_video_id,
+ 'uploader': None,
+ 'upload_date': None,
}
self.report_extraction(video_id)
'id': file_id.decode('utf-8'),
'url': file_url.decode('utf-8'),
'uploader': uploader.decode('utf-8'),
- 'upload_date': u'NA',
+ 'upload_date': None,
'title': json_data['name'],
'ext': file_url.split('.')[-1].decode('utf-8'),
'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
video = mobj.group('video')
info = {
'id': course + '_' + video,
+ 'uploader': None,
+ 'upload_date': None,
}
self.report_extraction(info['id'])
try:
metaXml = urllib2.urlopen(xmlUrl).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % unicode(err))
+ self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
return
mdoc = xml.etree.ElementTree.fromstring(metaXml)
try:
info = {
'id': course,
'type': 'playlist',
+ 'uploader': None,
+ 'upload_date': None,
}
self.report_download_webpage(info['id'])
try:
coursepage = urllib2.urlopen(url).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self._downloader.trouble(u'ERROR: unable to download course info page: ' + unicode(err))
+ self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
return
m = re.search('<h1>([^<]+)</h1>', coursepage)
info = {
'id': 'Stanford OpenClassroom',
'type': 'playlist',
+ 'uploader': None,
+ 'upload_date': None,
}
self.report_download_webpage(info['id'])
try:
rootpage = urllib2.urlopen(rootURL).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self._downloader.trouble(u'ERROR: unable to download course info page: ' + unicode(err))
+ self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
return
info['title'] = info['id']
'id': video_id,
'url': video_url,
'uploader': performer,
+ 'upload_date': None,
'title': video_title,
'ext': ext,
'format': format,
'id': '%s_part%02d' % (video_id, index),
'url': download_url,
'uploader': None,
+ 'upload_date': None,
'title': video_title,
'ext': ext,
}
return
# Extract update date
- upload_date = u'NA'
+ upload_date = None
pattern = 'title="Timestamp">(.*?)</a>'
mobj = re.search(pattern, webpage)
if mobj:
self.report_date(upload_date)
# Extract uploader
- uploader = u'NA'
+ uploader = None
pattern = r'rel\="author".*?>(.*?)</a>'
mobj = re.search(pattern, webpage)
if mobj: