[subtitles] Simplify the extraction of subtitles in subclasses and remove NoAutoSubti...
[youtube-dl] / youtube_dl / extractor / dailymotion.py
1 import re
2 import json
3 import itertools
4 import socket
5
6 from .common import InfoExtractor
7 from .subtitles import SubtitlesInfoExtractor
8
9 from ..utils import (
10     compat_http_client,
11     compat_urllib_error,
12     compat_urllib_request,
13     compat_str,
14     get_element_by_attribute,
15     get_element_by_id,
16
17     ExtractorError,
18 )
19
20
21 class DailymotionIE(SubtitlesInfoExtractor):
22     """Information Extractor for Dailymotion"""
23
24     _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/(?:embed/)?video/([^/]+)'
25     IE_NAME = u'dailymotion'
26     _TEST = {
27         u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
28         u'file': u'x33vw9.mp4',
29         u'md5': u'392c4b85a60a90dc4792da41ce3144eb',
30         u'info_dict': {
31             u"uploader": u"Amphora Alex and Van .", 
32             u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\""
33         }
34     }
35
36     def _real_extract(self, url):
37         # Extract id and simplified title from URL
38         mobj = re.match(self._VALID_URL, url)
39
40         video_id = mobj.group(1).split('_')[0].split('?')[0]
41
42         video_extension = 'mp4'
43         url = 'http://www.dailymotion.com/video/%s' % video_id
44
45         # Retrieve video webpage to extract further information
46         request = compat_urllib_request.Request(url)
47         request.add_header('Cookie', 'family_filter=off')
48         webpage = self._download_webpage(request, video_id)
49
50         # Extract URL, uploader and title from webpage
51         self.report_extraction(video_id)
52
53         video_uploader = self._search_regex([r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>',
54                                              # Looking for official user
55                                              r'<(?:span|a) .*?rel="author".*?>([^<]+?)</'],
56                                             webpage, 'video uploader')
57
58         video_upload_date = None
59         mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
60         if mobj is not None:
61             video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
62
63         embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id
64         embed_page = self._download_webpage(embed_url, video_id,
65                                             u'Downloading embed page')
66         info = self._search_regex(r'var info = ({.*?}),$', embed_page,
67             'video info', flags=re.MULTILINE)
68         info = json.loads(info)
69
70         # TODO: support choosing qualities
71
72         for key in ['stream_h264_hd1080_url','stream_h264_hd_url',
73                     'stream_h264_hq_url','stream_h264_url',
74                     'stream_h264_ld_url']:
75             if info.get(key):#key in info and info[key]:
76                 max_quality = key
77                 self.to_screen(u'Using %s' % key)
78                 break
79         else:
80             raise ExtractorError(u'Unable to extract video URL')
81         video_url = info[max_quality]
82
83         # subtitles
84         video_subtitles = self.extract_subtitles(video_id)
85         if self._downloader.params.get('listsubtitles', False):
86             self._list_available_subtitles(video_id)
87             return
88
89         return [{
90             'id':       video_id,
91             'url':      video_url,
92             'uploader': video_uploader,
93             'upload_date':  video_upload_date,
94             'title':    self._og_search_title(webpage),
95             'ext':      video_extension,
96             'subtitles':    video_subtitles,
97             'thumbnail': info['thumbnail_url']
98         }]
99
100     def _get_available_subtitles(self, video_id):
101         request = compat_urllib_request.Request('https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id)
102         try:
103             sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
104         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
105             self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
106             return {}
107         info = json.loads(sub_list)
108         if (info['total'] > 0):
109             sub_lang_list = dict((l['language'], l['url']) for l in info['list'])
110             return sub_lang_list
111         self._downloader.report_warning(u'video doesn\'t have subtitles')
112         return {}
113
114
115 class DailymotionPlaylistIE(InfoExtractor):
116     _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
117     _MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/playlist/.+?".*?>.*?</a>.*?</div>'
118
119     def _real_extract(self, url):
120         mobj = re.match(self._VALID_URL, url)
121         playlist_id =  mobj.group('id')
122         video_ids = []
123
124         for pagenum in itertools.count(1):
125             webpage = self._download_webpage('https://www.dailymotion.com/playlist/%s/%s' % (playlist_id, pagenum),
126                                              playlist_id, u'Downloading page %s' % pagenum)
127
128             playlist_el = get_element_by_attribute(u'class', u'video_list', webpage)
129             video_ids.extend(re.findall(r'data-id="(.+?)" data-ext-id', playlist_el))
130
131             if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
132                 break
133
134         entries = [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
135                    for video_id in video_ids]
136         return {'_type': 'playlist',
137                 'id': playlist_id,
138                 'title': get_element_by_id(u'playlist_name', webpage),
139                 'entries': entries,
140                 }