]> git.bitcoin.ninja Git - youtube-dl/blob - youtube_dl/extractor/vbox7.py
Merge remote-tracking branch 'yan12125/download-dash-segments' (#5886)
[youtube-dl] / youtube_dl / extractor / vbox7.py
1 # encoding: utf-8
2 from __future__ import unicode_literals
3
4 from .common import InfoExtractor
5 from ..compat import (
6     compat_urllib_parse,
7     compat_urllib_request,
8     compat_urlparse,
9 )
10 from ..utils import (
11     ExtractorError,
12 )
13
14
15 class Vbox7IE(InfoExtractor):
16     _VALID_URL = r'http://(?:www\.)?vbox7\.com/play:(?P<id>[^/]+)'
17     _TEST = {
18         'url': 'http://vbox7.com/play:249bb972c2',
19         'md5': '99f65c0c9ef9b682b97313e052734c3f',
20         'info_dict': {
21             'id': '249bb972c2',
22             'ext': 'mp4',
23             'title': 'Смях! Чудо - чист за секунди - Скрита камера',
24         },
25     }
26
27     def _real_extract(self, url):
28         video_id = self._match_id(url)
29
30         # need to get the page 3 times for the correct jsSecretToken cookie
31         # which is necessary for the correct title
32         def get_session_id():
33             redirect_page = self._download_webpage(url, video_id)
34             session_id_url = self._search_regex(
35                 r'var\s*url\s*=\s*\'([^\']+)\';', redirect_page,
36                 'session id url')
37             self._download_webpage(
38                 compat_urlparse.urljoin(url, session_id_url), video_id,
39                 'Getting session id')
40
41         get_session_id()
42         get_session_id()
43
44         webpage = self._download_webpage(url, video_id,
45                                          'Downloading redirect page')
46
47         title = self._html_search_regex(r'<title>(.*)</title>',
48                                         webpage, 'title').split('/')[0].strip()
49
50         info_url = "http://vbox7.com/play/magare.do"
51         data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id})
52         info_request = compat_urllib_request.Request(info_url, data)
53         info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
54         info_response = self._download_webpage(info_request, video_id, 'Downloading info webpage')
55         if info_response is None:
56             raise ExtractorError('Unable to extract the media url')
57         (final_url, thumbnail_url) = map(lambda x: x.split('=')[1], info_response.split('&'))
58
59         return {
60             'id': video_id,
61             'url': final_url,
62             'title': title,
63             'thumbnail': thumbnail_url,
64         }