import re
from .common import InfoExtractor
-from ..utils import clean_html
+from ..compat import compat_b64decode
+from ..utils import parse_duration
class ChirbitIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?chirb\.it/(?P<id>[^/]+)'
- _TEST = {
- 'url': 'http://chirb.it/PrIPv5',
- 'md5': '9847b0dad6ac3e074568bf2cfb197de8',
+ IE_NAME = 'chirbit'
+ _VALID_URL = r'https?://(?:www\.)?chirb\.it/(?:(?:wp|pl)/|fb_chirbit_player\.swf\?key=)?(?P<id>[\da-zA-Z]+)'
+ _TESTS = [{
+ 'url': 'http://chirb.it/be2abG',
'info_dict': {
- 'id': 'PrIPv5',
- 'display_id': 'kukushtv_1423231243',
+ 'id': 'be2abG',
'ext': 'mp3',
- 'title': 'Фасадстрой',
- 'url': 'http://audio.chirbit.com/kukushtv_1423231243.mp3'
+ 'title': 'md5:f542ea253f5255240be4da375c6a5d7e',
+ 'description': 'md5:f24a4e22a71763e32da5fed59e47c770',
+ 'duration': 306,
+ 'uploader': 'Gerryaudio',
+ },
+ 'params': {
+ 'skip_download': True,
}
- }
+ }, {
+ 'url': 'https://chirb.it/fb_chirbit_player.swf?key=PrIPv5',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://chirb.it/wp/MN58c2',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
- audio_linkid = self._match_id(url)
- webpage = self._download_webpage(url, audio_linkid)
+ audio_id = self._match_id(url)
+
+ webpage = self._download_webpage(
+ 'http://chirb.it/%s' % audio_id, audio_id)
+
+ data_fd = self._search_regex(
+ r'data-fd=(["\'])(?P<url>(?:(?!\1).)+)\1',
+ webpage, 'data fd', group='url')
+
+ # Reverse engineered from https://chirb.it/js/chirbit.player.js (look
+ # for soundURL)
+ audio_url = compat_b64decode(data_fd[::-1]).decode('utf-8')
- audio_title = self._html_search_regex(r'<h2\s+itemprop="name">(.*?)</h2>', webpage, 'title')
- audio_id = self._html_search_regex(r'\("setFile",\s+"http://audio.chirbit.com/(.*?).mp3"\)', webpage, 'audio ID')
- audio_url = 'http://audio.chirbit.com/' + audio_id + '.mp3';
+ title = self._search_regex(
+ r'class=["\']chirbit-title["\'][^>]*>([^<]+)', webpage, 'title')
+ description = self._search_regex(
+ r'<h3>Description</h3>\s*<pre[^>]*>([^<]+)</pre>',
+ webpage, 'description', default=None)
+ duration = parse_duration(self._search_regex(
+ r'class=["\']c-length["\'][^>]*>([^<]+)',
+ webpage, 'duration', fatal=False))
+ uploader = self._search_regex(
+ r'id=["\']chirbit-username["\'][^>]*>([^<]+)',
+ webpage, 'uploader', fatal=False)
return {
- 'id': audio_linkid,
- 'display_id': audio_id,
- 'title': audio_title,
- 'url': audio_url
+ 'id': audio_id,
+ 'url': audio_url,
+ 'title': title,
+ 'description': description,
+ 'duration': duration,
+ 'uploader': uploader,
}
+
class ChirbitProfileIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?chirbit.com/(?P<id>[^/]+)'
+ IE_NAME = 'chirbit:profile'
+ _VALID_URL = r'https?://(?:www\.)?chirbit\.com/(?:rss/)?(?P<id>[^/]+)'
_TEST = {
'url': 'http://chirbit.com/ScarletBeauty',
- 'playlist_count': 3,
'info_dict': {
- '_type': 'playlist',
- 'title': 'ScarletBeauty',
- 'id': 'ScarletBeauty'
- }
+ 'id': 'ScarletBeauty',
+ },
+ 'playlist_mincount': 3,
}
def _real_extract(self, url):
profile_id = self._match_id(url)
- # Chirbit has a pretty weird "Last Page" navigation behavior.
- # We grab the profile's oldest entry to determine when to
- # stop fetching entries.
- oldestpage = self._download_webpage(url + '/24599', profile_id)
- oldest_page_entries = re.findall(
- r'''soundFile:\s*"http://audio.chirbit.com/(.*?).mp3"''',
- oldestpage);
- oldestentry = clean_html(oldest_page_entries[-1]);
-
- ids = []
- titles = []
- n = 0
- while True:
- page = self._download_webpage(url + '/' + str(n), profile_id)
- page_ids = re.findall(
- r'''soundFile:\s*"http://audio.chirbit.com/(.*?).mp3"''',
- page);
- page_titles = re.findall(
- r'''<div\s+class="chirbit_title"\s*>(.*?)</div>''',
- page);
- ids += page_ids
- titles += page_titles
- if oldestentry in page_ids:
- break
- n += 1
-
- entries = []
- i = 0
- for id in ids:
- entries.append({
- 'id': id,
- 'title': titles[i],
- 'url': 'http://audio.chirbit.com/' + id + '.mp3'
- });
- i += 1
-
- info_dict = {
- '_type': 'playlist',
- 'id': profile_id,
- 'title': profile_id,
- 'entries': entries
- }
+ webpage = self._download_webpage(url, profile_id)
+
+ entries = [
+ self.url_result(self._proto_relative_url('//chirb.it/' + video_id))
+ for _, video_id in re.findall(r'<input[^>]+id=([\'"])copy-btn-(?P<id>[0-9a-zA-Z]+)\1', webpage)]
- return info_dict;
+ return self.playlist_result(entries, profile_id)