projects
/
youtube-dl
/ commitdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
| commitdiff |
tree
raw
|
patch
|
inline
| side by side (parent:
417b453
)
[soundcloud] Remove limit on search results
author
reiv
<metareiv@gmail.com>
Fri, 30 Oct 2015 22:56:07 +0000
(23:56 +0100)
committer
Sergey M․
<dstftw@gmail.com>
Sat, 21 Nov 2015 13:41:36 +0000
(19:41 +0600)
youtube_dl/extractor/soundcloud.py
patch
|
blob
|
history
diff --git
a/youtube_dl/extractor/soundcloud.py
b/youtube_dl/extractor/soundcloud.py
index 6b510a43b84f978b56b9128479e539a452775802..bba29d8453f4d66b3c59ca4b80b1ac1a84539c36 100644
(file)
--- a/
youtube_dl/extractor/soundcloud.py
+++ b/
youtube_dl/extractor/soundcloud.py
@@
-477,7
+477,7
@@
class SoundcloudPlaylistIE(SoundcloudIE):
class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE):
IE_NAME = 'soundcloud:search'
IE_DESC = 'Soundcloud search'
class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE):
IE_NAME = 'soundcloud:search'
IE_DESC = 'Soundcloud search'
- _MAX_RESULTS =
200
+ _MAX_RESULTS =
float('inf')
_TESTS = [{
'url': 'scsearch15:post-avant jazzcore',
'info_dict': {
_TESTS = [{
'url': 'scsearch15:post-avant jazzcore',
'info_dict': {
@@
-487,24
+487,28
@@
class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE):
}]
_SEARCH_KEY = 'scsearch'
}]
_SEARCH_KEY = 'scsearch'
- _RESULTS_PER_PAGE = 50
+ _MAX_RESULTS_PER_PAGE = 200
+ _DEFAULT_RESULTS_PER_PAGE = 50
_API_V2_BASE = 'https://api-v2.soundcloud.com'
def _get_collection(self, endpoint, collection_id, **query):
_API_V2_BASE = 'https://api-v2.soundcloud.com'
def _get_collection(self, endpoint, collection_id, **query):
- query['limit'] = self._RESULTS_PER_PAGE
+ query['limit'] = results_per_page = min(
+ query.get('limit', self._DEFAULT_RESULTS_PER_PAGE),
+ self._MAX_RESULTS_PER_PAGE)
query['client_id'] = self._CLIENT_ID
query['linked_partitioning'] = '1'
query['client_id'] = self._CLIENT_ID
query['linked_partitioning'] = '1'
- total_results =
self._MAX_RESULTS
+ total_results =
None
collected_results = 0
next_url = None
for i in itertools.count():
if not next_url:
collected_results = 0
next_url = None
for i in itertools.count():
if not next_url:
- query['offset'] = i *
self._RESULTS_PER_PAGE
+ query['offset'] = i *
results_per_page
data = compat_urllib_parse.urlencode(query)
data = compat_urllib_parse.urlencode(query)
- next_url = '{0}{1}?{2}'.format(self._API_V2_BASE, endpoint, data)
+ next_url = '{0}{1}?{2}'.format(
+ self._API_V2_BASE, endpoint, data)
response = self._download_json(next_url,
video_id=collection_id,
response = self._download_json(next_url,
video_id=collection_id,
@@
-520,7
+524,8
@@
class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE):
for item in filter(bool, collection):
yield item
for item in filter(bool, collection):
yield item
- if collected_results >= total_results or not collection:
+ if (total_results is not None and
+ collected_results >= total_results) or not collection:
break
next_url = response.get('next_href', None)
break
next_url = response.get('next_href', None)
@@
-528,7
+533,7
@@
class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE):
def _get_n_results(self, query, n):
tracks = self._get_collection('/search/tracks',
collection_id='Query "{0}"'.format(query),
def _get_n_results(self, query, n):
tracks = self._get_collection('/search/tracks',
collection_id='Query "{0}"'.format(query),
-
q=query.encode('utf-8')
)
+
limit=n, q=query
)
results = [self.url_result(url=track['uri'])
for track in itertools.islice(tracks, n)]
results = [self.url_result(url=track['uri'])
for track in itertools.islice(tracks, n)]