else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
- self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))
+ self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
return
# Set language
self.report_lang()
compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.to_stderr(u'WARNING: unable to set language: %s' % compat_str(err))
+ self._downloader.report_warning(u'unable to set language: %s' % compat_str(err))
return
# No authentication to be performed
try:
login_page = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.to_stderr(u'WARNING: unable to fetch login page: %s' % compat_str(err))
+ self._downloader.report_warning(u'unable to fetch login page: %s' % compat_str(err))
return
galx = None
self.report_login()
login_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
- self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
+ self._downloader.report_warning(u'unable to log in: bad username or password')
return
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))
+ self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
return
# Confirm age
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return
elif n > self._max_youtube_results:
- self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
+ self._downloader.report_warning(u'ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
n = self._max_youtube_results
self._download_n_results(query, n)
return
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return
elif n > self._max_google_results:
- self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
+ self._downloader.report_warning(u'gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
n = self._max_google_results
self._download_n_results(query, n)
return
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return
elif n > self._max_yahoo_results:
- self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
+ self._downloader.report_warning(u'yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
n = self._max_yahoo_results
self._download_n_results(query, n)
return
break
page_num += 1
- videos = map(operator.itemgetter(1), sorted(videos))
-
+ videos = [v[1] for v in sorted(videos)]
total = len(videos)
playliststart = self._downloader.params.get('playliststart', 1) - 1
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
- self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))
+ self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
return
if useremail is None:
self.report_login()
login_results = compat_urllib_request.urlopen(request).read()
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
- self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
+ self._downloader.report_warning(u'unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
return
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))
+ self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
return
def _real_extract(self, url):
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
+ urlp = compat_urllib_parse_urlparse(url)
+ if urlp.path.startswith('/play/'):
+ request = compat_urllib_request.Request(url)
+ response = compat_urllib_request.urlopen(request)
+ redirecturl = response.geturl()
+ rurlp = compat_urllib_parse_urlparse(redirecturl)
+ file_id = compat_parse_qs(rurlp.fragment)['file'][0].rpartition('/')[2]
+ url = 'http://blip.tv/a/a-' + file_id
+ return self._real_extract(url)
+
+
if '?' in url:
cchar = '&'
else:
'uploader': showName,
'upload_date': None,
'title': showName,
- 'ext': 'flv',
+ 'ext': 'mp4',
'thumbnail': imgUrl,
'description': description,
'player_url': playerUrl,
}
return [info]
-class TweetReelIE(InfoExtractor):
- _VALID_URL = r'^(?:https?://)?(?:www\.)?tweetreel\.com/[?](?P<id>[0-9a-z]+)$'
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
- return
-
- video_id = mobj.group('id')
- webpage = self._download_webpage(url, video_id)
-
- m = re.search(r'<div id="left" status_id="([0-9]+)">', webpage)
- if not m:
- self._downloader.trouble(u'ERROR: Cannot find status ID')
- status_id = m.group(1)
-
- m = re.search(r'<div class="tweet_text">(.*?)</div>', webpage, flags=re.DOTALL)
- if not m:
- self._downloader.trouble(u'WARNING: Cannot find description')
- desc = unescapeHTML(re.sub('<a.*?</a>', '', m.group(1))).strip()
-
- m = re.search(r'<div class="tweet_info">.*?from <a target="_blank" href="https?://twitter.com/(?P<uploader_id>.+?)">(?P<uploader>.+?)</a>', webpage, flags=re.DOTALL)
- if not m:
- self._downloader.trouble(u'ERROR: Cannot find uploader')
- uploader = unescapeHTML(m.group('uploader'))
- uploader_id = unescapeHTML(m.group('uploader_id'))
-
- m = re.search(r'<span unixtime="([0-9]+)"', webpage)
- if not m:
- self._downloader.trouble(u'ERROR: Cannot find upload date')
- upload_date = datetime.datetime.fromtimestamp(int(m.group(1))).strftime('%Y%m%d')
-
- title = desc
- video_url = 'http://files.tweetreel.com/video/' + status_id + '.mov'
-
- info = {
- 'id': video_id,
- 'url': video_url,
- 'ext': 'mov',
- 'title': title,
- 'description': desc,
- 'uploader': uploader,
- 'uploader_id': uploader_id,
- 'internal_id': status_id,
- 'upload_date': upload_date
- }
- return [info]
-
class SteamIE(InfoExtractor):
_VALID_URL = r"""http://store.steampowered.com/
(?P<urltype>video|app)/ #If the page is only for videos or for a game
}
return [info]
+class WorldStarHipHopIE(InfoExtractor):
+ _VALID_URL = r"""(http://(?:www|m).worldstar(?:candy|hiphop)\.com.*)"""
+ IE_NAME = u'WorldStarHipHop'
+
+ def _real_extract(self, url):
+ results = []
+
+ _src_url = r"""(http://hw-videos.*(?:mp4|flv))"""
+
+ webpage_src = compat_urllib_request.urlopen(str(url)).read()
+
+ mobj = re.search(_src_url, webpage_src)
+
+ if mobj is not None:
+ video_url = mobj.group()
+ if 'mp4' in video_url:
+ ext = '.mp4'
+ else:
+ ext = '.flv'
+ else:
+ video_url = None
+ ext = None
+
+ _title = r"""<title>(.*)</title>"""
+
+ mobj = re.search(_title, webpage_src)
+
+ if mobj is not None:
+ title = mobj.group(1)
+ title = title.replace("'", "")
+ title = title.replace("'", "")
+ title = title.replace('Video: ', '')
+ title = title.replace('"', '"')
+ title = title.replace('&', 'n')
+ else:
+ title = None
+
+ _thumbnail = r"""rel="image_src" href="(.*)" />"""
+
+ mobj = re.search(_thumbnail, webpage_src)
+
+ # Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
+ if mobj is not None:
+ thumbnail = mobj.group(1)
+ else:
+ _title = r"""candytitles.*>(.*)</span>"""
+ mobj = re.search(_title, webpage_src)
+ if mobj is not None:
+ title = mobj.group(1)
+ thumbnail = None
+
+ results.append({
+ 'url' : video_url,
+ 'title' : title,
+ 'thumbnail' : thumbnail,
+ 'ext' : ext
+ })
+
+ return results
+
class RBMARadioIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?rbmaradio\.com/shows/(?P<videoID>[^/]+)$'
# Get the video date
result = re.search(r'Date:</label>(?P<date>.*) </li>', webpage)
if result is None:
- self._downloader.to_stderr(u'WARNING: unable to extract video date')
+ self._downloader.report_warning(u'unable to extract video date')
upload_date = None
else:
upload_date = result.group('date').strip()
# Get the video uploader
result = re.search(r'Submitted:</label>(?P<uploader>.*)</li>', webpage)
if result is None:
- self._downloader.to_stderr(u'WARNING: unable to extract uploader')
+ self._downloader.report_warning(u'unable to extract uploader')
video_uploader = None
else:
video_uploader = result.group('uploader').strip()
GooglePlusIE(),
ArteTvIE(),
NBAIE(),
+ WorldStarHipHopIE(),
JustinTVIE(),
FunnyOrDieIE(),
- TweetReelIE(),
SteamIE(),
UstreamIE(),
RBMARadioIE(),