extractor: youtube: Swap video dimensions to match standard practice.
[youtube-dl] / youtube_dl / extractor / dailymotion.py
1 import re
2 import json
3 import itertools
4
5 from .common import InfoExtractor
6 from .subtitles import SubtitlesInfoExtractor
7
8 from ..utils import (
9     compat_urllib_request,
10     compat_str,
11     get_element_by_attribute,
12     get_element_by_id,
13     orderedSet,
14
15     ExtractorError,
16 )
17
18 class DailymotionBaseInfoExtractor(InfoExtractor):
19     @staticmethod
20     def _build_request(url):
21         """Build a request with the family filter disabled"""
22         request = compat_urllib_request.Request(url)
23         request.add_header('Cookie', 'family_filter=off')
24         return request
25
26 class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
27     """Information Extractor for Dailymotion"""
28
29     _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/(?:embed/)?video/([^/]+)'
30     IE_NAME = u'dailymotion'
31     _TESTS = [
32         {
33             u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
34             u'file': u'x33vw9.mp4',
35             u'md5': u'392c4b85a60a90dc4792da41ce3144eb',
36             u'info_dict': {
37                 u"uploader": u"Amphora Alex and Van .", 
38                 u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\""
39             }
40         },
41         # Vevo video
42         {
43             u'url': u'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
44             u'file': u'USUV71301934.mp4',
45             u'info_dict': {
46                 u'title': u'Roar (Official)',
47                 u'uploader': u'Katy Perry',
48                 u'upload_date': u'20130905',
49             },
50             u'params': {
51                 u'skip_download': True,
52             },
53             u'skip': u'VEVO is only available in some countries',
54         },
55     ]
56
57     def _real_extract(self, url):
58         # Extract id and simplified title from URL
59         mobj = re.match(self._VALID_URL, url)
60
61         video_id = mobj.group(1).split('_')[0].split('?')[0]
62
63         video_extension = 'mp4'
64         url = 'http://www.dailymotion.com/video/%s' % video_id
65
66         # Retrieve video webpage to extract further information
67         request = self._build_request(url)
68         webpage = self._download_webpage(request, video_id)
69
70         # Extract URL, uploader and title from webpage
71         self.report_extraction(video_id)
72
73         # It may just embed a vevo video:
74         m_vevo = re.search(
75             r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?videoId=(?P<id>[\w]*)',
76             webpage)
77         if m_vevo is not None:
78             vevo_id = m_vevo.group('id')
79             self.to_screen(u'Vevo video detected: %s' % vevo_id)
80             return self.url_result(u'vevo:%s' % vevo_id, ie='Vevo')
81
82         video_uploader = self._search_regex([r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>',
83                                              # Looking for official user
84                                              r'<(?:span|a) .*?rel="author".*?>([^<]+?)</'],
85                                             webpage, 'video uploader')
86
87         video_upload_date = None
88         mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
89         if mobj is not None:
90             video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
91
92         embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id
93         embed_page = self._download_webpage(embed_url, video_id,
94                                             u'Downloading embed page')
95         info = self._search_regex(r'var info = ({.*?}),$', embed_page,
96             'video info', flags=re.MULTILINE)
97         info = json.loads(info)
98         if info.get('error') is not None:
99             msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
100             raise ExtractorError(msg, expected=True)
101
102         # TODO: support choosing qualities
103
104         for key in ['stream_h264_hd1080_url','stream_h264_hd_url',
105                     'stream_h264_hq_url','stream_h264_url',
106                     'stream_h264_ld_url']:
107             if info.get(key):#key in info and info[key]:
108                 max_quality = key
109                 self.to_screen(u'Using %s' % key)
110                 break
111         else:
112             raise ExtractorError(u'Unable to extract video URL')
113         video_url = info[max_quality]
114
115         # subtitles
116         video_subtitles = self.extract_subtitles(video_id)
117         if self._downloader.params.get('listsubtitles', False):
118             self._list_available_subtitles(video_id)
119             return
120
121         return [{
122             'id':       video_id,
123             'url':      video_url,
124             'uploader': video_uploader,
125             'upload_date':  video_upload_date,
126             'title':    self._og_search_title(webpage),
127             'ext':      video_extension,
128             'subtitles':    video_subtitles,
129             'thumbnail': info['thumbnail_url']
130         }]
131
132     def _get_available_subtitles(self, video_id):
133         try:
134             sub_list = self._download_webpage(
135                 'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
136                 video_id, note=False)
137         except ExtractorError as err:
138             self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
139             return {}
140         info = json.loads(sub_list)
141         if (info['total'] > 0):
142             sub_lang_list = dict((l['language'], l['url']) for l in info['list'])
143             return sub_lang_list
144         self._downloader.report_warning(u'video doesn\'t have subtitles')
145         return {}
146
147
148 class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
149     IE_NAME = u'dailymotion:playlist'
150     _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
151     _MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/playlist/.+?".*?>.*?</a>.*?</div>'
152     _PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
153
154     def _extract_entries(self, id):
155         video_ids = []
156         for pagenum in itertools.count(1):
157             request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum))
158             webpage = self._download_webpage(request,
159                                              id, u'Downloading page %s' % pagenum)
160
161             playlist_el = get_element_by_attribute(u'class', u'video_list', webpage)
162             video_ids.extend(re.findall(r'data-id="(.+?)"', playlist_el))
163
164             if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
165                 break
166         return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
167                    for video_id in orderedSet(video_ids)]
168
169     def _real_extract(self, url):
170         mobj = re.match(self._VALID_URL, url)
171         playlist_id = mobj.group('id')
172         webpage = self._download_webpage(url, playlist_id)
173
174         return {'_type': 'playlist',
175                 'id': playlist_id,
176                 'title': get_element_by_id(u'playlist_name', webpage),
177                 'entries': self._extract_entries(playlist_id),
178                 }
179
180
181 class DailymotionUserIE(DailymotionPlaylistIE):
182     IE_NAME = u'dailymotion:user'
183     _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/user/(?P<user>[^/]+)'
184     _MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/user/.+?".*?>.*?</a>.*?</div>'
185     _PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
186
187     def _real_extract(self, url):
188         mobj = re.match(self._VALID_URL, url)
189         user = mobj.group('user')
190         webpage = self._download_webpage(url, user)
191         full_user = self._html_search_regex(
192             r'<a class="label" href="/%s".*?>(.*?)</' % re.escape(user),
193             webpage, u'user', flags=re.DOTALL)
194
195         return {
196             '_type': 'playlist',
197             'id': user,
198             'title': full_user,
199             'entries': self._extract_entries(user),
200         }