import urllib2
import email.utils
import xml.etree.ElementTree
+import random
+import math
from urlparse import parse_qs
try:
url: Final video URL.
uploader: Nickname of the video uploader.
title: Literal title.
- stitle: Simplified title.
ext: Video filename extension.
format: Video format.
player_url: SWF Player URL (may be None).
class YoutubeIE(InfoExtractor):
"""Information extractor for youtube.com."""
- _VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)(?!view_play_list|my_playlists|artist|playlist)(?:(?:(?:v|embed|e)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))?)?([0-9A-Za-z_-]+)(?(1).+)?$'
+ _VALID_URL = r"""^
+ (
+ (?:https?://)? # http(s):// (optional)
+ (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|
+ tube\.majestyc\.net/) # the various hostnames, with wildcard subdomains
+ (?!view_play_list|my_playlists|artist|playlist) # ignore playlist URLs
+ (?: # the various things that can precede the ID:
+ (?:(?:v|embed|e)/) # v/ or embed/ or e/
+ |(?: # or the v= param in all its forms
+ (?:watch(?:_popup)?(?:\.php)?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
+ (?:\?|\#!?) # the params delimiter ? or # or #!
+ (?:.+&)? # any other preceding param (like /?s=tuff&v=xxxx)
+ v=
+ )
+ )? # optional -> youtube.com/xxxx is OK
+ )? # all until now is optional -> you can pass the naked ID
+ ([0-9A-Za-z_-]+) # here is it! the YouTube video ID
+ (?(1).+)? # if we found the ID, everything can follow
+ $"""
_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
_LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_NETRC_MACHINE = 'youtube'
# Listed in order of quality
- _available_formats = ['38', '37', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13']
- _available_formats_prefer_free = ['38', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13']
+ _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13']
+ _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13']
_video_extensions = {
'13': '3gp',
'17': 'mp4',
'43': 'webm',
'44': 'webm',
'45': 'webm',
+ '46': 'webm',
}
_video_dimensions = {
'5': '240x400',
'43': '360x640',
'44': '480x854',
'45': '720x1280',
+ '46': '1080x1920',
}
IE_NAME = u'youtube'
+ def suitable(self, url):
+ """Receives a URL and returns True if suitable for this IE."""
+ return re.match(self._VALID_URL, url, re.VERBOSE) is not None
+
def report_lang(self):
"""Report attempt to set language."""
self._downloader.to_screen(u'[youtube] Setting language')
start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000)
end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000)
caption = unescapeHTML(caption)
- caption = unescapeHTML(caption) # double cycle, inentional
- srt += str(n) + '\n'
+ caption = unescapeHTML(caption) # double cycle, intentional
+ srt += str(n+1) + '\n'
srt += start + ' --> ' + end + '\n'
srt += caption + '\n\n'
return srt
url = 'http://www.youtube.com/' + urllib.unquote(mobj.group(1)).lstrip('/')
# Extract video id from URL
- mobj = re.match(self._VALID_URL, url)
+ mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
return
+ # Check for "rental" videos
+ if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
+ self._downloader.trouble(u'ERROR: "rental" videos not supported')
+ return
+
# Start extracting information
self.report_information_extraction(video_id)
return
video_title = urllib.unquote_plus(video_info['title'][0])
video_title = video_title.decode('utf-8')
- video_title = sanitize_title(video_title)
-
- # simplified title
- simple_title = simplify_title(video_title)
# thumbnail image
if 'thumbnail_url' not in video_info:
# closed captions
video_subtitles = None
if self._downloader.params.get('writesubtitles', False):
- self.report_video_subtitles_download(video_id)
- request = urllib2.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
try:
- srt_list = urllib2.urlopen(request).read()
- except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self._downloader.trouble(u'WARNING: unable to download video subtitles: %s' % str(err))
- else:
- srt_lang_list = re.findall(r'lang_code="([\w\-]+)"', srt_list)
- if srt_lang_list:
- if self._downloader.params.get('subtitleslang', False):
- srt_lang = self._downloader.params.get('subtitleslang')
- elif 'en' in srt_lang_list:
- srt_lang = 'en'
- else:
- srt_lang = srt_lang_list[0]
- if not srt_lang in srt_lang_list:
- self._downloader.trouble(u'WARNING: no closed captions found in the specified language')
- else:
- request = urllib2.Request('http://video.google.com/timedtext?hl=en&lang=%s&v=%s' % (srt_lang, video_id))
- try:
- srt_xml = urllib2.urlopen(request).read()
- except (urllib2.URLError, httplib.HTTPException, socket.error), err:
- self._downloader.trouble(u'WARNING: unable to download video subtitles: %s' % str(err))
- else:
- video_subtitles = self._closed_captions_xml_to_srt(srt_xml.decode('utf-8'))
+ self.report_video_subtitles_download(video_id)
+ request = urllib2.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
+ try:
+ srt_list = urllib2.urlopen(request).read()
+ except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+ raise Trouble(u'WARNING: unable to download video subtitles: %s' % str(err))
+ srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list)
+ srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list)
+ if not srt_lang_list:
+ raise Trouble(u'WARNING: video has no closed captions')
+ if self._downloader.params.get('subtitleslang', False):
+ srt_lang = self._downloader.params.get('subtitleslang')
+ elif 'en' in srt_lang_list:
+ srt_lang = 'en'
else:
- self._downloader.trouble(u'WARNING: video has no closed captions')
+ srt_lang = srt_lang_list.keys()[0]
+ if not srt_lang in srt_lang_list:
+ raise Trouble(u'WARNING: no closed captions found in the specified language')
+ request = urllib2.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id))
+ try:
+ srt_xml = urllib2.urlopen(request).read()
+ except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+ raise Trouble(u'WARNING: unable to download video subtitles: %s' % str(err))
+ if not srt_xml:
+ raise Trouble(u'WARNING: unable to download video subtitles')
+ video_subtitles = self._closed_captions_xml_to_srt(srt_xml.decode('utf-8'))
+ except Trouble as trouble:
+ self._downloader.trouble(trouble[0])
# token
video_token = urllib.unquote_plus(video_info['token'][0])
url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')
url_data = [parse_qs(uds) for uds in url_data_strs]
url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data)
- url_map = dict((ud['itag'][0], ud['url'][0]) for ud in url_data)
+ url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data)
format_limit = self._downloader.params.get('format_limit', None)
available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
'uploader': video_uploader.decode('utf-8'),
'upload_date': upload_date,
'title': video_title,
- 'stitle': simple_title,
'ext': video_extension.decode('utf-8'),
'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
'thumbnail': video_thumbnail.decode('utf-8'),
self._downloader.download(['http://www.youtube.com/watch?v=%s' % mobj2.group(1)])
return
- simple_title = mobj.group(2).decode('utf-8')
-
# Retrieve video webpage to extract further information
request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id)
try:
self._downloader.trouble(u'ERROR: unable to extract title')
return
video_title = mobj.group(1).decode('utf-8')
- video_title = sanitize_title(video_title)
mobj = re.search(r'(?ms)By:\s*<a .*?>(.+?)<', webpage)
if mobj is None:
'uploader': video_uploader.decode('utf-8'),
'upload_date': u'NA',
'title': video_title,
- 'stitle': simple_title,
'ext': video_extension.decode('utf-8'),
'format': u'NA',
'player_url': None,
class DailymotionIE(InfoExtractor):
"""Information Extractor for Dailymotion"""
- _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^_/]+)_([^/]+)'
+ _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
IE_NAME = u'dailymotion'
def __init__(self, downloader=None):
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
- video_id = mobj.group(1)
+ video_id = mobj.group(1).split('_')[0].split('?')[0]
- video_extension = 'flv'
+ video_extension = 'mp4'
# Retrieve video webpage to extract further information
request = urllib2.Request(url)
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
- mobj = re.search(r'(?i)addVariable\(\"sequence\"\s*,\s*\"([^\"]+?)\"\)', webpage)
+ mobj = re.search(r'\s*var flashvars = (.*)', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
- sequence = urllib.unquote(mobj.group(1))
- mobj = re.search(r',\"sdURL\"\:\"([^\"]+?)\",', sequence)
+ flashvars = urllib.unquote(mobj.group(1))
+ if 'hqURL' in flashvars: max_quality = 'hqURL'
+ elif 'sdURL' in flashvars: max_quality = 'sdURL'
+ else: max_quality = 'ldURL'
+ mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
+ if mobj is None:
+ mobj = re.search(r'"video_url":"(.*?)",', flashvars)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
- mediaURL = urllib.unquote(mobj.group(1)).replace('\\', '')
-
- # if needed add http://www.dailymotion.com/ if relative URL
+ video_url = urllib.unquote(mobj.group(1)).replace('\\/', '/')
- video_url = mediaURL
+ # TODO: support choosing qualities
mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title')
return
video_title = unescapeHTML(mobj.group('title').decode('utf-8'))
- video_title = sanitize_title(video_title)
- simple_title = simplify_title(video_title)
mobj = re.search(r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a></span>', webpage)
if mobj is None:
return
video_uploader = mobj.group(1)
+ video_upload_date = u'NA'
+ mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
+ if mobj is not None:
+ video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
+
return [{
'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'),
'uploader': video_uploader.decode('utf-8'),
- 'upload_date': u'NA',
+ 'upload_date': video_upload_date,
'title': video_title,
- 'stitle': simple_title,
'ext': video_extension.decode('utf-8'),
'format': u'NA',
'player_url': None,
self._downloader.trouble(u'ERROR: unable to extract title')
return
video_title = mobj.group(1).decode('utf-8')
- video_title = sanitize_title(video_title)
- simple_title = simplify_title(video_title)
# Extract video description
mobj = re.search(r'<span id=short-desc-content>([^<]*)</span>', webpage)
'uploader': u'NA',
'upload_date': u'NA',
'title': video_title,
- 'stitle': simple_title,
'ext': video_extension.decode('utf-8'),
'format': u'NA',
'player_url': None,
self._downloader.trouble(u'ERROR: unable to extract title')
return
video_title = mobj.group(1).decode('utf-8')
- video_title = sanitize_title(video_title)
- simple_title = simplify_title(video_title)
video_uploader = mobj.group(2).decode('utf-8')
'uploader': video_uploader,
'upload_date': u'NA',
'title': video_title,
- 'stitle': simple_title,
'ext': video_extension.decode('utf-8'),
'format': u'NA',
'player_url': None,
self._downloader.trouble(u'ERROR: unable to extract video title')
return
video_title = mobj.group(1).decode('utf-8')
- simple_title = simplify_title(video_title)
mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
if mobj is None:
'uploader': video_uploader,
'upload_date': u'NA',
'title': video_title,
- 'stitle': simple_title,
'ext': video_extension.decode('utf-8'),
'thumbnail': video_thumbnail.decode('utf-8'),
'description': video_description,
# Extract title
video_title = config["video"]["title"]
- simple_title = simplify_title(video_title)
# Extract uploader
video_uploader = config["video"]["owner"]["name"]
'uploader': video_uploader,
'upload_date': video_upload_date,
'title': video_title,
- 'stitle': simple_title,
'ext': video_extension,
'thumbnail': video_thumbnail,
'description': video_description,
self._downloader.trouble(u'ERROR: unable to extract title')
return
video_title = mobj.group(1).decode('utf-8')
- video_title = sanitize_title(video_title)
- simple_title = simplify_title(video_title)
# video uploader is domain name
mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
'uploader': video_uploader,
'upload_date': u'NA',
'title': video_title,
- 'stitle': simple_title,
'ext': video_extension.decode('utf-8'),
'format': u'NA',
'player_url': None,
InfoExtractor.__init__(self, downloader)
def report_download_page(self, query, pagenum):
- """Report attempt to download playlist page with given number."""
+ """Report attempt to download search page with given number."""
query = query.decode(preferredencoding())
self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
_VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
- _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&list=PL%s&'
- _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
+ _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&list=.*?%s'
+ _MORE_PAGES_INDICATOR = r'yt-uix-pager-next'
IE_NAME = u'youtube:playlist'
def __init__(self, downloader=None):
self._downloader.download(['http://www.youtube.com/watch?v=%s' % video_id])
+class BlipTVUserIE(InfoExtractor):
+ """Information Extractor for blip.tv users."""
+
+ _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
+ _PAGE_SIZE = 12
+ IE_NAME = u'blip.tv:user'
+
+ def __init__(self, downloader=None):
+ InfoExtractor.__init__(self, downloader)
+
+ def report_download_page(self, username, pagenum):
+ """Report attempt to download user page."""
+ self._downloader.to_screen(u'[%s] user %s: Downloading video ids from page %d' %
+ (self.IE_NAME, username, pagenum))
+
+ def _real_extract(self, url):
+ # Extract username
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+ return
+
+ username = mobj.group(1)
+
+ page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
+
+ request = urllib2.Request(url)
+
+ try:
+ page = urllib2.urlopen(request).read().decode('utf-8')
+ mobj = re.search(r'data-users-id="([^"]+)"', page)
+ page_base = page_base % mobj.group(1)
+ except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+ self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+ return
+
+
+ # Download video ids using BlipTV Ajax calls. Result size per
+ # query is limited (currently to 12 videos) so we need to query
+ # page by page until there are no video ids - it means we got
+ # all of them.
+
+ video_ids = []
+ pagenum = 1
+
+ while True:
+ self.report_download_page(username, pagenum)
+
+ request = urllib2.Request( page_base + "&page=" + str(pagenum) )
+
+ try:
+ page = urllib2.urlopen(request).read().decode('utf-8')
+ except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+ self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+ return
+
+ # Extract video identifiers
+ ids_in_page = []
+
+ for mobj in re.finditer(r'href="/([^"]+)"', page):
+ if mobj.group(1) not in ids_in_page:
+ ids_in_page.append(unescapeHTML(mobj.group(1)))
+
+ video_ids.extend(ids_in_page)
+
+ # A little optimization - if current page is not
+ # "full", ie. does not contain PAGE_SIZE video ids then
+ # we can assume that this page is the last one - there
+ # are no more ids on further pages - no need to query
+ # again.
+
+ if len(ids_in_page) < self._PAGE_SIZE:
+ break
+
+ pagenum += 1
+
+ all_ids_count = len(video_ids)
+ playliststart = self._downloader.params.get('playliststart', 1) - 1
+ playlistend = self._downloader.params.get('playlistend', -1)
+
+ if playlistend == -1:
+ video_ids = video_ids[playliststart:]
+ else:
+ video_ids = video_ids[playliststart:playlistend]
+
+ self._downloader.to_screen(u"[%s] user %s: Collected %d video ids (downloading %d of them)" %
+ (self.IE_NAME, username, all_ids_count, len(video_ids)))
+
+ for video_id in video_ids:
+ self._downloader.download([u'http://blip.tv/'+video_id])
+
+
class DepositFilesIE(InfoExtractor):
"""Information extractor for depositfiles.com"""
'uploader': u'NA',
'upload_date': u'NA',
'title': file_title,
- 'stitle': file_title,
'ext': file_extension.decode('utf-8'),
'format': u'NA',
'player_url': None,
return
video_title = video_info['title']
video_title = video_title.decode('utf-8')
- video_title = sanitize_title(video_title)
-
- simple_title = simplify_title(video_title)
# thumbnail image
if 'thumbnail' not in video_info:
'uploader': video_uploader.decode('utf-8'),
'upload_date': upload_date,
'title': video_title,
- 'stitle': simple_title,
'ext': video_extension.decode('utf-8'),
'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
'thumbnail': video_thumbnail.decode('utf-8'),
else:
cchar = '?'
json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
- request = urllib2.Request(json_url)
+ request = urllib2.Request(json_url.encode('utf-8'))
self.report_extraction(mobj.group(1))
info = None
try:
'id': title,
'url': url,
'title': title,
- 'stitle': simplify_title(title),
'ext': ext,
'urlhandle': urlh
}
data = json_data['Post']
else:
data = json_data
-
+
upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
video_url = data['media']['url']
umobj = re.match(self._URL_EXT, video_url)
if umobj is None:
raise ValueError('Can not determine filename extension')
ext = umobj.group(1)
-
+
info = {
'id': data['item_id'],
'url': video_url,
'uploader': data['display_name'],
'upload_date': upload_date,
'title': data['title'],
- 'stitle': simplify_title(data['title']),
'ext': ext,
'format': data['media']['mimeType'],
'thumbnail': data['thumbnailUrl'],
self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
return
+ std_headers['User-Agent'] = 'iTunes/10.6.1'
return [info]
return
video_title = mobj.group(1)
- video_title = sanitize_title(video_title)
-
- simple_title = simplify_title(video_title)
return [{
'id': video_id,
'uploader': u'NA',
'upload_date': u'NA',
'title': video_title,
- 'stitle': simple_title,
'ext': u'flv',
'format': u'NA',
'player_url': None,
def report_extraction(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
-
+
def report_config_download(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id)
'uploader': showId,
'upload_date': officialDate,
'title': effTitle,
- 'stitle': simplify_title(effTitle),
'ext': 'mp4',
'format': format,
'thumbnail': None,
self.report_extraction(showName)
try:
- webPage = urllib2.urlopen(url).read()
+ webPage = urllib2.urlopen(url)
+ webPageBytes = webPage.read()
+ m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type'])
+ webPage = webPageBytes.decode(m.group(1) if m else 'utf-8')
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download webpage: ' + unicode(err))
return
'uploader': showName,
'upload_date': None,
'title': showName,
- 'stitle': simplify_title(showName),
'ext': 'flv',
'format': 'flv',
'thumbnail': imgUrl,
videoNode = mdoc.findall('./video')[0]
info['description'] = videoNode.findall('./description')[0].text
info['title'] = videoNode.findall('./caption')[0].text
- info['stitle'] = simplify_title(info['title'])
info['url'] = videoNode.findall('./file')[0].text
info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
info['ext'] = info['url'].rpartition('.')[2]
# Extract video thumbnail
- mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]/[a-fA-F0-9]/[a-fA-F0-9]/([a-fA-F0-9.]+jpg)', webpage)
+ mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
return
- video_thumbnail = mobj.group(1).decode('utf-8')
+ video_thumbnail = mobj.group(0).decode('utf-8')
info = {
'id': video_id,
'uploader': None,
'upload_date': None,
'title': video_title,
- 'stitle': simplify_title(video_title),
'ext': 'flv',
'format': 'flv',
'thumbnail': video_thumbnail,
uploader = mobj.group(1).decode('utf-8')
# extract simple title (uploader + slug of song title)
slug_title = mobj.group(2).decode('utf-8')
- simple_title = uploader + '-' + slug_title
+ simple_title = uploader + u'-' + slug_title
self.report_webpage('%s/%s' % (uploader, slug_title))
# extract unsimplified title
mobj = re.search('"title":"(.*?)",', webpage)
if mobj:
- title = mobj.group(1)
+ title = mobj.group(1).decode('utf-8')
+ else:
+ title = simple_title
# construct media url (with uid/token)
mediaURL = "http://media.soundcloud.com/stream/%s?stream_token=%s"
try:
upload_date = datetime.datetime.strptime(mobj.group(1), '%B %d, %Y %H:%M').strftime('%Y%m%d')
except Exception, e:
- print str(e)
+ self._downloader.to_stderr(str(e))
# for soundcloud, a request to a cross domain is required for cookies
request = urllib2.Request('http://media.soundcloud.com/crossdomain.xml', std_headers)
'url': mediaURL,
'uploader': uploader.decode('utf-8'),
'upload_date': upload_date,
- 'title': simple_title.decode('utf-8'),
- 'stitle': simple_title.decode('utf-8'),
+ 'title': title,
'ext': u'mp3',
'format': u'NA',
'player_url': None,
'uploader': None,
'upload_date': None,
'title': video_title,
- 'stitle': simplify_title(video_title),
'ext': extension,
'format': extension, # Extension is always(?) mp4, but seems to be flv
'thumbnail': None,
url_list = jsonData[fmt][bitrate]
except TypeError: # we have no bitrate info.
url_list = jsonData[fmt]
-
return url_list
def check_urls(self, url_list):
'uploader': uploader.decode('utf-8'),
'upload_date': u'NA',
'title': json_data['name'],
- 'stitle': simplify_title(json_data['name']),
'ext': file_url.split('.')[-1].decode('utf-8'),
'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
'thumbnail': json_data['thumbnail_url'],
course = mobj.group('course')
video = mobj.group('video')
info = {
- 'id': simplify_title(course + '_' + video),
+ 'id': course + '_' + video,
}
-
+
self.report_extraction(info['id'])
baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
xmlUrl = baseUrl + video + '.xml'
except IndexError:
self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
return
- info['stitle'] = simplify_title(info['title'])
info['ext'] = info['url'].rpartition('.')[2]
info['format'] = info['ext']
return [info]
elif mobj.group('course'): # A course page
course = mobj.group('course')
info = {
- 'id': simplify_title(course),
+ 'id': course,
'type': 'playlist',
}
info['title'] = unescapeHTML(m.group(1))
else:
info['title'] = info['id']
- info['stitle'] = simplify_title(info['title'])
m = re.search('<description>([^<]+)</description>', coursepage)
if m:
return
info['title'] = info['id']
- info['stitle'] = simplify_title(info['title'])
links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage))
info['list'] = [
'url': video_url,
'uploader': performer,
'title': video_title,
- 'stitle': simplify_title(video_title),
'ext': ext,
'format': format,
}
return [info]
+
+
+class YoukuIE(InfoExtractor):
+
+ _VALID_URL = r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html'
+ IE_NAME = u'Youku'
+
+ def __init__(self, downloader=None):
+ InfoExtractor.__init__(self, downloader)
+
+ def report_download_webpage(self, file_id):
+ """Report webpage download."""
+ self._downloader.to_screen(u'[Youku] %s: Downloading webpage' % file_id)
+
+ def report_extraction(self, file_id):
+ """Report information extraction."""
+ self._downloader.to_screen(u'[Youku] %s: Extracting information' % file_id)
+
+ def _gen_sid(self):
+ nowTime = int(time.time() * 1000)
+ random1 = random.randint(1000,1998)
+ random2 = random.randint(1000,9999)
+
+ return "%d%d%d" %(nowTime,random1,random2)
+
+ def _get_file_ID_mix_string(self, seed):
+ mixed = []
+ source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
+ seed = float(seed)
+ for i in range(len(source)):
+ seed = (seed * 211 + 30031 ) % 65536
+ index = math.floor(seed / 65536 * len(source) )
+ mixed.append(source[int(index)])
+ source.remove(source[int(index)])
+ #return ''.join(mixed)
+ return mixed
+
+ def _get_file_id(self, fileId, seed):
+ mixed = self._get_file_ID_mix_string(seed)
+ ids = fileId.split('*')
+ realId = []
+ for ch in ids:
+ if ch:
+ realId.append(mixed[int(ch)])
+ return ''.join(realId)
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+ video_id = mobj.group('ID')
+
+ info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
+
+ request = urllib2.Request(info_url, None, std_headers)
+ try:
+ self.report_download_webpage(video_id)
+ jsondata = urllib2.urlopen(request).read()
+ except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
+ self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+ return
+
+ self.report_extraction(video_id)
+ try:
+ config = json.loads(jsondata)
+
+ video_title = config['data'][0]['title']
+ seed = config['data'][0]['seed']
+
+ format = self._downloader.params.get('format', None)
+ supported_format = config['data'][0]['streamfileids'].keys()
+
+ if format is None or format == 'best':
+ if 'hd2' in supported_format:
+ format = 'hd2'
+ else:
+ format = 'flv'
+ ext = u'flv'
+ elif format == 'worst':
+ format = 'mp4'
+ ext = u'mp4'
+ else:
+ format = 'flv'
+ ext = u'flv'
+
+
+ fileid = config['data'][0]['streamfileids'][format]
+ seg_number = len(config['data'][0]['segs'][format])
+
+ keys=[]
+ for i in xrange(seg_number):
+ keys.append(config['data'][0]['segs'][format][i]['k'])
+
+ #TODO check error
+ #youku only could be viewed from mainland china
+ except:
+ self._downloader.trouble(u'ERROR: unable to extract info section')
+ return
+
+ files_info=[]
+ sid = self._gen_sid()
+ fileid = self._get_file_id(fileid, seed)
+
+ #column 8,9 of fileid represent the segment number
+ #fileid[7:9] should be changed
+ for index, key in enumerate(keys):
+
+ temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
+ download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
+
+ info = {
+ 'id': '%s_part%02d' % (video_id, index),
+ 'url': download_url,
+ 'uploader': None,
+ 'title': video_title,
+ 'ext': ext,
+ 'format': u'NA'
+ }
+ files_info.append(info)
+
+ return files_info
+
+
+class XNXXIE(InfoExtractor):
+ """Information extractor for xnxx.com"""
+
+ _VALID_URL = r'^http://video\.xnxx\.com/video([0-9]+)/(.*)'
+ IE_NAME = u'xnxx'
+ VIDEO_URL_RE = r'flv_url=(.*?)&'
+ VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM'
+ VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&'
+
+ def report_webpage(self, video_id):
+ """Report information extraction"""
+ self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
+
+ def report_extraction(self, video_id):
+ """Report information extraction"""
+ self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+ video_id = mobj.group(1).decode('utf-8')
+
+ self.report_webpage(video_id)
+
+ # Get webpage content
+ try:
+ webpage = urllib2.urlopen(url).read()
+ except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+ self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
+ return
+
+ result = re.search(self.VIDEO_URL_RE, webpage)
+ if result is None:
+ self._downloader.trouble(u'ERROR: unable to extract video url')
+ return
+ video_url = urllib.unquote(result.group(1).decode('utf-8'))
+
+ result = re.search(self.VIDEO_TITLE_RE, webpage)
+ if result is None:
+ self._downloader.trouble(u'ERROR: unable to extract video title')
+ return
+ video_title = result.group(1).decode('utf-8')
+
+ result = re.search(self.VIDEO_THUMB_RE, webpage)
+ if result is None:
+ self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+ return
+ video_thumbnail = result.group(1).decode('utf-8')
+
+ info = {'id': video_id,
+ 'url': video_url,
+ 'uploader': None,
+ 'upload_date': None,
+ 'title': video_title,
+ 'ext': 'flv',
+ 'format': 'flv',
+ 'thumbnail': video_thumbnail,
+ 'description': None,
+ 'player_url': None}
+
+ return [info]