(?(1).+)? # if we found the ID, everything can follow
$"""
_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
- _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
+ _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_NETRC_MACHINE = 'youtube'
srt_lang = list(srt_lang_list.keys())[0]
if not srt_lang in srt_lang_list:
return (u'WARNING: no closed captions found in the specified language', None)
- request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id))
+ params = compat_urllib_parse.urlencode({
+ 'lang': srt_lang,
+ 'name': srt_lang_list[srt_lang].encode('utf-8'),
+ 'v': video_id,
+ })
+ url = 'http://www.youtube.com/api/timedtext?' + params
try:
- srt_xml = compat_urllib_request.urlopen(request).read().decode('utf-8')
+ srt_xml = compat_urllib_request.urlopen(url).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
if not srt_xml:
- return (u'WARNING: unable to download video subtitles', None)
+ return (u'WARNING: Did not fetch video subtitles', None)
return (None, self._closed_captions_xml_to_srt(srt_xml))
def _print_formats(self, formats):
if username is None:
return
+ request = compat_urllib_request.Request(self._LOGIN_URL)
+ try:
+ login_page = compat_urllib_request.urlopen(request).read().decode('utf-8')
+ except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+ self._downloader.to_stderr(u'WARNING: unable to fetch login page: %s' % compat_str(err))
+ return
+
+ galx = None
+ dsh = None
+ match = re.search(re.compile(r'<input.+?name="GALX".+?value="(.+?)"', re.DOTALL), login_page)
+ if match:
+ galx = match.group(1)
+
+ match = re.search(re.compile(r'<input.+?name="dsh".+?value="(.+?)"', re.DOTALL), login_page)
+ if match:
+ dsh = match.group(1)
+
# Log in
- login_form = {
- 'current_form': 'loginForm',
- 'next': '/',
- 'action_login': 'Log In',
- 'username': username,
- 'password': password,
- }
- request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
+ login_form_strs = {
+ u'continue': u'http://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
+ u'Email': username,
+ u'GALX': galx,
+ u'Passwd': password,
+ u'PersistentCookie': u'yes',
+ u'_utf8': u'霱',
+ u'bgresponse': u'js_disabled',
+ u'checkConnection': u'',
+ u'checkedDomains': u'youtube',
+ u'dnConn': u'',
+ u'dsh': dsh,
+ u'pstMsg': u'0',
+ u'rmShown': u'1',
+ u'secTok': u'',
+ u'signIn': u'Sign in',
+ u'timeStmp': u'',
+ u'service': u'youtube',
+ u'uilel': u'3',
+ u'hl': u'en_US',
+ }
+ # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
+ # chokes on unicode
+ login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
+ login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
+ request = compat_urllib_request.Request(self._LOGIN_URL, login_data)
try:
self.report_login()
login_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
- if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
+ if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
return
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
IE_NAME = u'dailymotion'
+ _WORKING = False
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
"""Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs
- _VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?:videos?/)?([0-9]+)'
+ _VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)'
IE_NAME = u'vimeo'
def __init__(self, downloader=None):
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
- video_id = mobj.group(1)
+ video_id = mobj.group('id')
+ if not mobj.group('proto'):
+ url = 'https://' + url
+ if mobj.group('direct_link'):
+ url = 'https://vimeo.com/' + video_id
# Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url, None, std_headers)
params_raw = compat_urllib_parse.unquote(data['params'])
params = json.loads(params_raw)
video_url = params['hd_src']
+ if not video_url:
+ video_url = params['sd_src']
+ if not video_url:
+ raise ExtractorError(u'Cannot find video URL')
video_duration = int(params['video_duration'])
m = re.search('<h2 class="uiHeaderTitle">([^<]+)</h2>', webpage)
webpage = self._download_webpage(webpage_url, video_id)
self.report_extraction(video_id)
- mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />',
+ mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\' />',
webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
webpage = self._download_webpage(url, playlist_id)
- m = re.search(r"new TRAX.Mix\((.*?)\);\n*\s*TRAX.initSearchAutocomplete\('#search'\);", webpage, flags=re.DOTALL)
+ m = re.search(r"PAGE.mix = (.*?);\n", webpage, flags=re.DOTALL)
if not m:
raise ExtractorError(u'Cannot find trax information')
json_like = m.group(1)
next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])
return res
+class KeekIE(InfoExtractor):
+ _VALID_URL = r'http://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<videoID>\w+)'
+ IE_NAME = u'keek'
+
+ def _real_extract(self, url):
+ m = re.match(self._VALID_URL, url)
+ video_id = m.group('videoID')
+ video_url = u'http://cdn.keek.com/keek/video/%s' % video_id
+ thumbnail = u'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
+ webpage = self._download_webpage(url, video_id)
+ m = re.search(r'<meta property="og:title" content="(?P<title>.+)"', webpage)
+ title = unescapeHTML(m.group('title'))
+ m = re.search(r'<div class="bio-names-and-report">[\s\n]+<h4>(?P<uploader>\w+)</h4>', webpage)
+ uploader = unescapeHTML(m.group('uploader'))
+ info = {
+ 'id':video_id,
+ 'url':video_url,
+ 'ext': 'mp4',
+ 'title': title,
+ 'thumbnail': thumbnail,
+ 'uploader': uploader
+ }
+ return [info]
+
+class TEDIE(InfoExtractor):
+ _VALID_URL=r'''http://www.ted.com/
+ (
+ ((?P<type_playlist>playlists)/(?P<playlist_id>\d+)) # We have a playlist
+ |
+ ((?P<type_talk>talks)) # We have a simple talk
+ )
+ /(?P<name>\w+) # Here goes the name and then ".html"
+ '''
+
+ def suitable(self, url):
+ """Receives a URL and returns True if suitable for this IE."""
+ return re.match(self._VALID_URL, url, re.VERBOSE) is not None
+
+ def _real_extract(self, url):
+ m=re.match(self._VALID_URL, url, re.VERBOSE)
+ if m.group('type_talk'):
+ return [self._talk_info(url)]
+ else :
+ playlist_id=m.group('playlist_id')
+ name=m.group('name')
+ self._downloader.to_screen(u'[%s] Getting info of playlist %s: "%s"' % (self.IE_NAME,playlist_id,name))
+ return self._playlist_videos_info(url,name,playlist_id)
+
+ def _talk_video_link(self,mediaSlug):
+ '''Returns the video link for that mediaSlug'''
+ return 'http://download.ted.com/talks/%s.mp4' % mediaSlug
+
+ def _playlist_videos_info(self,url,name,playlist_id=0):
+ '''Returns the videos of the playlist'''
+ video_RE=r'''
+ <li\ id="talk_(\d+)"([.\s]*?)data-id="(?P<video_id>\d+)"
+ ([.\s]*?)data-playlist_item_id="(\d+)"
+ ([.\s]*?)data-mediaslug="(?P<mediaSlug>.+?)"
+ '''
+ video_name_RE=r'<p\ class="talk-title"><a href="/talks/(.+).html">(?P<fullname>.+?)</a></p>'
+ webpage=self._download_webpage(url, playlist_id, 'Downloading playlist webpage')
+ m_videos=re.finditer(video_RE,webpage,re.VERBOSE)
+ m_names=re.finditer(video_name_RE,webpage)
+ info=[]
+ for m_video, m_name in zip(m_videos,m_names):
+ video_dic={
+ 'id': m_video.group('video_id'),
+ 'url': self._talk_video_link(m_video.group('mediaSlug')),
+ 'ext': 'mp4',
+ 'title': m_name.group('fullname')
+ }
+ info.append(video_dic)
+ return info
+ def _talk_info(self, url, video_id=0):
+ """Return the video for the talk in the url"""
+ m=re.match(self._VALID_URL, url,re.VERBOSE)
+ videoName=m.group('name')
+ webpage=self._download_webpage(url, video_id, 'Downloading \"%s\" page' % videoName)
+ # If the url includes the language we get the title translated
+ title_RE=r'<h1><span id="altHeadline" >(?P<title>[\s\w:/\.\?=\+-\\\']*)</span></h1>'
+ title=re.search(title_RE, webpage).group('title')
+ info_RE=r'''<script\ type="text/javascript">var\ talkDetails\ =(.*?)
+ "id":(?P<videoID>[\d]+).*?
+ "mediaSlug":"(?P<mediaSlug>[\w\d]+?)"'''
+ info_match=re.search(info_RE,webpage,re.VERBOSE)
+ video_id=info_match.group('videoID')
+ mediaSlug=info_match.group('mediaSlug')
+ video_url=self._talk_video_link(mediaSlug)
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'title': title
+ }
+ return info
+
+class MySpassIE(InfoExtractor):
+ _VALID_URL = r'http://www.myspass.de/.*'
+
+ def _real_extract(self, url):
+ META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s'
+
+ # video id is the last path element of the URL
+ # usually there is a trailing slash, so also try the second but last
+ url_path = compat_urllib_parse_urlparse(url).path
+ url_parent_path, video_id = os.path.split(url_path)
+ if not video_id:
+ _, video_id = os.path.split(url_parent_path)
+
+ # get metadata
+ metadata_url = META_DATA_URL_TEMPLATE % video_id
+ metadata_text = self._download_webpage(metadata_url, video_id)
+ metadata = xml.etree.ElementTree.fromstring(metadata_text.encode('utf-8'))
+
+ # extract values from metadata
+ url_flv_el = metadata.find('url_flv')
+ if url_flv_el is None:
+ self._downloader.trouble(u'ERROR: unable to extract download url')
+ return
+ video_url = url_flv_el.text
+ extension = os.path.splitext(video_url)[1][1:]
+ title_el = metadata.find('title')
+ if title_el is None:
+ self._downloader.trouble(u'ERROR: unable to extract title')
+ return
+ title = title_el.text
+ format_id_el = metadata.find('format_id')
+ if format_id_el is None:
+ format = ext
+ else:
+ format = format_id_el.text
+ description_el = metadata.find('description')
+ if description_el is not None:
+ description = description_el.text
+ else:
+ description = None
+ imagePreview_el = metadata.find('imagePreview')
+ if imagePreview_el is not None:
+ thumbnail = imagePreview_el.text
+ else:
+ thumbnail = None
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'title': title,
+ 'ext': extension,
+ 'format': format,
+ 'thumbnail': thumbnail,
+ 'description': description
+ }
+ return [info]
+
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
UstreamIE(),
RBMARadioIE(),
EightTracksIE(),
+ KeekIE(),
+ TEDIE(),
+ MySpassIE(),
GenericIE()
]