[facebook] Add md5 for the test case with DASH
[youtube-dl] / youtube_dl / extractor / facebook.py
1 from __future__ import unicode_literals
2
3 import json
4 import re
5 import socket
6
7 from .common import InfoExtractor
8 from ..compat import (
9     compat_etree_fromstring,
10     compat_http_client,
11     compat_urllib_error,
12     compat_urllib_parse_unquote,
13     compat_urllib_parse_unquote_plus,
14 )
15 from ..utils import (
16     error_to_compat_str,
17     ExtractorError,
18     limit_length,
19     sanitized_Request,
20     urlencode_postdata,
21     get_element_by_id,
22     clean_html,
23 )
24
25
26 class FacebookIE(InfoExtractor):
27     _VALID_URL = r'''(?x)
28                 (?:
29                     https?://
30                         (?:\w+\.)?facebook\.com/
31                         (?:[^#]*?\#!/)?
32                         (?:
33                             (?:
34                                 video/video\.php|
35                                 photo\.php|
36                                 video\.php|
37                                 video/embed
38                             )\?(?:.*?)(?:v|video_id)=|
39                             [^/]+/videos/(?:[^/]+/)?
40                         )|
41                     facebook:
42                 )
43                 (?P<id>[0-9]+)
44                 '''
45     _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
46     _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
47     _NETRC_MACHINE = 'facebook'
48     IE_NAME = 'facebook'
49
50     _CHROME_USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36'
51
52     _TESTS = [{
53         'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
54         'md5': '6a40d33c0eccbb1af76cf0485a052659',
55         'info_dict': {
56             'id': '637842556329505',
57             'ext': 'mp4',
58             'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
59             'uploader': 'Tennis on Facebook',
60         }
61     }, {
62         'note': 'Video without discernible title',
63         'url': 'https://www.facebook.com/video.php?v=274175099429670',
64         'info_dict': {
65             'id': '274175099429670',
66             'ext': 'mp4',
67             'title': 'Facebook video #274175099429670',
68             'uploader': 'Asif Nawab Butt',
69         },
70         'expected_warnings': [
71             'title'
72         ]
73     }, {
74         'note': 'Video with DASH manifest',
75         'url': 'https://www.facebook.com/video.php?v=957955867617029',
76         'md5': '54706e4db4f5ad58fbad82dde1f1213f',
77         'info_dict': {
78             'id': '957955867617029',
79             'ext': 'mp4',
80             'title': 'When you post epic content on instagram.com/433 8 million followers, this is ...',
81             'uploader': 'Demy de Zeeuw',
82         },
83     }, {
84         'url': 'https://www.facebook.com/video.php?v=10204634152394104',
85         'only_matching': True,
86     }, {
87         'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
88         'only_matching': True,
89     }, {
90         'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
91         'only_matching': True,
92     }, {
93         'url': 'facebook:544765982287235',
94         'only_matching': True,
95     }]
96
97     def _login(self):
98         (useremail, password) = self._get_login_info()
99         if useremail is None:
100             return
101
102         login_page_req = sanitized_Request(self._LOGIN_URL)
103         self._set_cookie('facebook.com', 'locale', 'en_US')
104         login_page = self._download_webpage(login_page_req, None,
105                                             note='Downloading login page',
106                                             errnote='Unable to download login page')
107         lsd = self._search_regex(
108             r'<input type="hidden" name="lsd" value="([^"]*)"',
109             login_page, 'lsd')
110         lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
111
112         login_form = {
113             'email': useremail,
114             'pass': password,
115             'lsd': lsd,
116             'lgnrnd': lgnrnd,
117             'next': 'http://facebook.com/home.php',
118             'default_persistent': '0',
119             'legacy_return': '1',
120             'timezone': '-60',
121             'trynum': '1',
122         }
123         request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form))
124         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
125         try:
126             login_results = self._download_webpage(request, None,
127                                                    note='Logging in', errnote='unable to fetch login page')
128             if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
129                 error = self._html_search_regex(
130                     r'(?s)<div[^>]+class=(["\']).*?login_error_box.*?\1[^>]*><div[^>]*>.*?</div><div[^>]*>(?P<error>.+?)</div>',
131                     login_results, 'login error', default=None, group='error')
132                 if error:
133                     raise ExtractorError('Unable to login: %s' % error, expected=True)
134                 self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
135                 return
136
137             fb_dtsg = self._search_regex(
138                 r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg', default=None)
139             h = self._search_regex(
140                 r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h', default=None)
141
142             if not fb_dtsg or not h:
143                 return
144
145             check_form = {
146                 'fb_dtsg': fb_dtsg,
147                 'h': h,
148                 'name_action_selected': 'dont_save',
149             }
150             check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
151             check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
152             check_response = self._download_webpage(check_req, None,
153                                                     note='Confirming login')
154             if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
155                 self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
156         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
157             self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
158             return
159
160     def _real_initialize(self):
161         self._login()
162
163     def _real_extract(self, url):
164         video_id = self._match_id(url)
165         req = sanitized_Request('https://www.facebook.com/video/video.php?v=%s' % video_id)
166         req.add_header('User-Agent', self._CHROME_USER_AGENT)
167         webpage = self._download_webpage(req, video_id)
168
169         video_data = None
170
171         BEFORE = '{swf.addParam(param[0], param[1]);});\n'
172         AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
173         m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
174         if m:
175             data = dict(json.loads(m.group(1)))
176             params_raw = compat_urllib_parse_unquote(data['params'])
177             video_data = json.loads(params_raw)['video_data']
178
179         def video_data_list2dict(video_data):
180             ret = {}
181             for item in video_data:
182                 format_id = item['stream_type']
183                 ret.setdefault(format_id, []).append(item)
184             return ret
185
186         if not video_data:
187             server_js_data = self._parse_json(self._search_regex(
188                 r'handleServerJS\(({.+})\);', webpage, 'server js data'), video_id)
189             for item in server_js_data['instances']:
190                 if item[1][0] == 'VideoConfig':
191                     video_data = video_data_list2dict(item[2][0]['videoData'])
192                     break
193
194         if not video_data:
195             m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
196             if m_msg is not None:
197                 raise ExtractorError(
198                     'The video is not available, Facebook said: "%s"' % m_msg.group(1),
199                     expected=True)
200             else:
201                 raise ExtractorError('Cannot parse data')
202
203         formats = []
204         for format_id, f in video_data.items():
205             if not f or not isinstance(f, list):
206                 continue
207             for quality in ('sd', 'hd'):
208                 for src_type in ('src', 'src_no_ratelimit'):
209                     src = f[0].get('%s_%s' % (quality, src_type))
210                     if src:
211                         formats.append({
212                             'format_id': '%s_%s_%s' % (format_id, quality, src_type),
213                             'url': src,
214                             'preference': -10 if format_id == 'progressive' else 0,
215                         })
216             dash_manifest = f[0].get('dash_manifest')
217             if dash_manifest:
218                 formats.extend(self._parse_dash_manifest(
219                     compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest)),
220                     namespace='urn:mpeg:dash:schema:mpd:2011'))
221         if not formats:
222             raise ExtractorError('Cannot find video formats')
223
224         self._sort_formats(formats)
225
226         video_title = self._html_search_regex(
227             r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage, 'title',
228             default=None)
229         if not video_title:
230             video_title = self._html_search_regex(
231                 r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
232                 webpage, 'alternative title', default=None)
233             video_title = limit_length(video_title, 80)
234         if not video_title:
235             video_title = 'Facebook video #%s' % video_id
236         uploader = clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage))
237
238         return {
239             'id': video_id,
240             'title': video_title,
241             'formats': formats,
242             'uploader': uploader,
243         }
244
245
246 class FacebookPostIE(InfoExtractor):
247     IE_NAME = 'facebook:post'
248     _VALID_URL = r'https?://(?:\w+\.)?facebook\.com/[^/]+/posts/(?P<id>\d+)'
249     _TEST = {
250         'url': 'https://www.facebook.com/maxlayn/posts/10153807558977570',
251         'md5': '037b1fa7f3c2d02b7a0d7bc16031ecc6',
252         'info_dict': {
253             'id': '544765982287235',
254             'ext': 'mp4',
255             'title': '"What are you doing running in the snow?"',
256             'uploader': 'FailArmy',
257         }
258     }
259
260     def _real_extract(self, url):
261         post_id = self._match_id(url)
262
263         webpage = self._download_webpage(url, post_id)
264
265         entries = [
266             self.url_result('facebook:%s' % video_id, FacebookIE.ie_key())
267             for video_id in self._parse_json(
268                 self._search_regex(
269                     r'(["\'])video_ids\1\s*:\s*(?P<ids>\[.+?\])',
270                     webpage, 'video ids', group='ids'),
271                 post_id)]
272
273         return self.playlist_result(entries, post_id)