projects
/
youtube-dl
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Credit @daohoangson for zingmp3 (#4288)
[youtube-dl]
/
youtube_dl
/
extractor
/
bambuser.py
diff --git
a/youtube_dl/extractor/bambuser.py
b/youtube_dl/extractor/bambuser.py
index ccd31c4c7093d54e86df50a42600d08e12e55005..eab99faaaeb1ca744b07d827144556956e95d150 100644
(file)
--- a/
youtube_dl/extractor/bambuser.py
+++ b/
youtube_dl/extractor/bambuser.py
@@
-18,7
+18,7
@@
class BambuserIE(InfoExtractor):
_TEST = {
'url': 'http://bambuser.com/v/4050584',
# MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
_TEST = {
'url': 'http://bambuser.com/v/4050584',
# MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
- #u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
+ #
u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
'info_dict': {
'id': '4050584',
'ext': 'flv',
'info_dict': {
'id': '4050584',
'ext': 'flv',
@@
-59,6
+59,13
@@
class BambuserChannelIE(InfoExtractor):
_VALID_URL = r'https?://bambuser\.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
# The maximum number we can get with each request
_STEP = 50
_VALID_URL = r'https?://bambuser\.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
# The maximum number we can get with each request
_STEP = 50
+ _TEST = {
+ 'url': 'http://bambuser.com/channel/pixelversity',
+ 'info_dict': {
+ 'title': 'pixelversity',
+ },
+ 'playlist_mincount': 60,
+ }
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@@
-73,10
+80,10
@@
class BambuserChannelIE(InfoExtractor):
req = compat_urllib_request.Request(req_url)
# Without setting this header, we wouldn't get any result
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
req = compat_urllib_request.Request(req_url)
# Without setting this header, we wouldn't get any result
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
- info_json = self._download_webpage(req, user,
- 'Downloading page %d' % i)
- results =
json.loads(info_json)
['result']
- if
len(results) == 0
:
+ data = self._download_json(
+
req, user,
'Downloading page %d' % i)
+ results =
data
['result']
+ if
not results
:
break
last_id = results[-1]['vid']
urls.extend(self.url_result(v['page'], 'Bambuser') for v in results)
break
last_id = results[-1]['vid']
urls.extend(self.url_result(v['page'], 'Bambuser') for v in results)