Merge branch 'akamai_pv' of https://github.com/remitamine/youtube-dl into remitamine...
[youtube-dl] / youtube_dl / extractor / vbox7.py
1 # encoding: utf-8
2 from __future__ import unicode_literals
3
4 from .common import InfoExtractor
5 from ..compat import compat_urlparse
6 from ..utils import (
7     ExtractorError,
8     sanitized_Request,
9     urlencode_postdata,
10 )
11
12
13 class Vbox7IE(InfoExtractor):
14     _VALID_URL = r'https?://(?:www\.)?vbox7\.com/play:(?P<id>[^/]+)'
15     _TEST = {
16         'url': 'http://vbox7.com/play:249bb972c2',
17         'md5': '99f65c0c9ef9b682b97313e052734c3f',
18         'info_dict': {
19             'id': '249bb972c2',
20             'ext': 'mp4',
21             'title': 'Смях! Чудо - чист за секунди - Скрита камера',
22         },
23     }
24
25     def _real_extract(self, url):
26         video_id = self._match_id(url)
27
28         # need to get the page 3 times for the correct jsSecretToken cookie
29         # which is necessary for the correct title
30         def get_session_id():
31             redirect_page = self._download_webpage(url, video_id)
32             session_id_url = self._search_regex(
33                 r'var\s*url\s*=\s*\'([^\']+)\';', redirect_page,
34                 'session id url')
35             self._download_webpage(
36                 compat_urlparse.urljoin(url, session_id_url), video_id,
37                 'Getting session id')
38
39         get_session_id()
40         get_session_id()
41
42         webpage = self._download_webpage(url, video_id,
43                                          'Downloading redirect page')
44
45         title = self._html_search_regex(r'<title>(.*)</title>',
46                                         webpage, 'title').split('/')[0].strip()
47
48         info_url = 'http://vbox7.com/play/magare.do'
49         data = urlencode_postdata({'as3': '1', 'vid': video_id})
50         info_request = sanitized_Request(info_url, data)
51         info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
52         info_response = self._download_webpage(info_request, video_id, 'Downloading info webpage')
53         if info_response is None:
54             raise ExtractorError('Unable to extract the media url')
55         (final_url, thumbnail_url) = map(lambda x: x.split('=')[1], info_response.split('&'))
56
57         return {
58             'id': video_id,
59             'url': final_url,
60             'title': title,
61             'thumbnail': thumbnail_url,
62         }