[youtube] Skip unsupported adaptive stream type (#18804)
[youtube-dl] / youtube_dl / extractor / wimp.py
1 from __future__ import unicode_literals
2
3 from .common import InfoExtractor
4 from .youtube import YoutubeIE
5
6
7 class WimpIE(InfoExtractor):
8     _VALID_URL = r'https?://(?:www\.)?wimp\.com/(?P<id>[^/]+)'
9     _TESTS = [{
10         'url': 'http://www.wimp.com/maru-is-exhausted/',
11         'md5': 'ee21217ffd66d058e8b16be340b74883',
12         'info_dict': {
13             'id': 'maru-is-exhausted',
14             'ext': 'mp4',
15             'title': 'Maru is exhausted.',
16             'description': 'md5:57e099e857c0a4ea312542b684a869b8',
17         }
18     }, {
19         'url': 'http://www.wimp.com/clowncar/',
20         'md5': '5c31ad862a90dc5b1f023956faec13fe',
21         'info_dict': {
22             'id': 'cG4CEr2aiSg',
23             'ext': 'webm',
24             'title': 'Basset hound clown car...incredible!',
25             'description': '5 of my Bassets crawled in this dog loo! www.bellinghambassets.com\n\nFor licensing/usage please contact: licensing(at)jukinmediadotcom',
26             'upload_date': '20140303',
27             'uploader': 'Gretchen Hoey',
28             'uploader_id': 'gretchenandjeff1',
29         },
30         'add_ie': ['Youtube'],
31     }]
32
33     def _real_extract(self, url):
34         video_id = self._match_id(url)
35
36         webpage = self._download_webpage(url, video_id)
37
38         youtube_id = self._search_regex(
39             (r"videoId\s*:\s*[\"']([0-9A-Za-z_-]{11})[\"']",
40              r'data-id=["\']([0-9A-Za-z_-]{11})'),
41             webpage, 'video URL', default=None)
42         if youtube_id:
43             return self.url_result(youtube_id, YoutubeIE.ie_key())
44
45         info_dict = self._extract_jwplayer_data(
46             webpage, video_id, require_title=False)
47
48         info_dict.update({
49             'id': video_id,
50             'title': self._og_search_title(webpage),
51             'description': self._og_search_description(webpage),
52         })
53
54         return info_dict