[facebook] Don't override variable in list comprehension
[youtube-dl] / youtube_dl / extractor / facebook.py
1 from __future__ import unicode_literals
2
3 import json
4 import re
5 import socket
6
7 from .common import InfoExtractor
8 from ..compat import (
9     compat_etree_fromstring,
10     compat_http_client,
11     compat_urllib_error,
12     compat_urllib_parse_unquote,
13     compat_urllib_parse_unquote_plus,
14 )
15 from ..utils import (
16     error_to_compat_str,
17     ExtractorError,
18     limit_length,
19     sanitized_Request,
20     urlencode_postdata,
21     get_element_by_id,
22     clean_html,
23 )
24
25
26 class FacebookIE(InfoExtractor):
27     _VALID_URL = r'''(?x)
28                 (?:
29                     https?://
30                         (?:\w+\.)?facebook\.com/
31                         (?:[^#]*?\#!/)?
32                         (?:
33                             (?:
34                                 video/video\.php|
35                                 photo\.php|
36                                 video\.php|
37                                 video/embed|
38                                 story\.php
39                             )\?(?:.*?)(?:v|video_id|story_fbid)=|
40                             [^/]+/videos/(?:[^/]+/)?|
41                             [^/]+/posts/
42                         )|
43                     facebook:
44                 )
45                 (?P<id>[0-9]+)
46                 '''
47     _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
48     _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
49     _NETRC_MACHINE = 'facebook'
50     IE_NAME = 'facebook'
51
52     _CHROME_USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36'
53
54     _VIDEO_PAGE_TEMPLATE = 'https://www.facebook.com/video/video.php?v=%s'
55
56     _TESTS = [{
57         'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
58         'md5': '6a40d33c0eccbb1af76cf0485a052659',
59         'info_dict': {
60             'id': '637842556329505',
61             'ext': 'mp4',
62             'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
63             'uploader': 'Tennis on Facebook',
64         }
65     }, {
66         'note': 'Video without discernible title',
67         'url': 'https://www.facebook.com/video.php?v=274175099429670',
68         'info_dict': {
69             'id': '274175099429670',
70             'ext': 'mp4',
71             'title': 'Facebook video #274175099429670',
72             'uploader': 'Asif Nawab Butt',
73         },
74         'expected_warnings': [
75             'title'
76         ]
77     }, {
78         'note': 'Video with DASH manifest',
79         'url': 'https://www.facebook.com/video.php?v=957955867617029',
80         'md5': '54706e4db4f5ad58fbad82dde1f1213f',
81         'info_dict': {
82             'id': '957955867617029',
83             'ext': 'mp4',
84             'title': 'When you post epic content on instagram.com/433 8 million followers, this is ...',
85             'uploader': 'Demy de Zeeuw',
86         },
87     }, {
88         'url': 'https://www.facebook.com/maxlayn/posts/10153807558977570',
89         'md5': '037b1fa7f3c2d02b7a0d7bc16031ecc6',
90         'info_dict': {
91             'id': '544765982287235',
92             'ext': 'mp4',
93             'title': '"What are you doing running in the snow?"',
94             'uploader': 'FailArmy',
95         }
96     }, {
97         'url': 'https://m.facebook.com/story.php?story_fbid=1035862816472149&id=116132035111903',
98         'md5': '1deb90b6ac27f7efcf6d747c8a27f5e3',
99         'info_dict': {
100             'id': '1035862816472149',
101             'ext': 'mp4',
102             'title': 'What the Flock Is Going On In New Zealand  Credit: ViralHog',
103             'uploader': 'S. Saint',
104         },
105     }, {
106         'note': 'swf params escaped',
107         'url': 'https://www.facebook.com/barackobama/posts/10153664894881749',
108         'md5': '97ba073838964d12c70566e0085c2b91',
109         'info_dict': {
110             'id': '10153664894881749',
111             'ext': 'mp4',
112             'title': 'Facebook video #10153664894881749',
113         },
114     }, {
115         'url': 'https://www.facebook.com/video.php?v=10204634152394104',
116         'only_matching': True,
117     }, {
118         'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
119         'only_matching': True,
120     }, {
121         'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
122         'only_matching': True,
123     }, {
124         'url': 'facebook:544765982287235',
125         'only_matching': True,
126     }]
127
128     def _login(self):
129         (useremail, password) = self._get_login_info()
130         if useremail is None:
131             return
132
133         login_page_req = sanitized_Request(self._LOGIN_URL)
134         self._set_cookie('facebook.com', 'locale', 'en_US')
135         login_page = self._download_webpage(login_page_req, None,
136                                             note='Downloading login page',
137                                             errnote='Unable to download login page')
138         lsd = self._search_regex(
139             r'<input type="hidden" name="lsd" value="([^"]*)"',
140             login_page, 'lsd')
141         lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
142
143         login_form = {
144             'email': useremail,
145             'pass': password,
146             'lsd': lsd,
147             'lgnrnd': lgnrnd,
148             'next': 'http://facebook.com/home.php',
149             'default_persistent': '0',
150             'legacy_return': '1',
151             'timezone': '-60',
152             'trynum': '1',
153         }
154         request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form))
155         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
156         try:
157             login_results = self._download_webpage(request, None,
158                                                    note='Logging in', errnote='unable to fetch login page')
159             if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
160                 error = self._html_search_regex(
161                     r'(?s)<div[^>]+class=(["\']).*?login_error_box.*?\1[^>]*><div[^>]*>.*?</div><div[^>]*>(?P<error>.+?)</div>',
162                     login_results, 'login error', default=None, group='error')
163                 if error:
164                     raise ExtractorError('Unable to login: %s' % error, expected=True)
165                 self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
166                 return
167
168             fb_dtsg = self._search_regex(
169                 r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg', default=None)
170             h = self._search_regex(
171                 r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h', default=None)
172
173             if not fb_dtsg or not h:
174                 return
175
176             check_form = {
177                 'fb_dtsg': fb_dtsg,
178                 'h': h,
179                 'name_action_selected': 'dont_save',
180             }
181             check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
182             check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
183             check_response = self._download_webpage(check_req, None,
184                                                     note='Confirming login')
185             if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
186                 self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
187         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
188             self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
189             return
190
191     def _real_initialize(self):
192         self._login()
193
194     def _extract_from_url(self, url, video_id, fatal_if_no_video=True):
195         req = sanitized_Request(url)
196         req.add_header('User-Agent', self._CHROME_USER_AGENT)
197         webpage = self._download_webpage(req, video_id)
198
199         video_data = None
200
201         BEFORE = '{swf.addParam(param[0], param[1]);});'
202         AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
203         m = re.search(re.escape(BEFORE) + '(?:\n|\\\\n)(.*?)' + re.escape(AFTER), webpage)
204         if m:
205             swf_params = m.group(1).replace('\\\\', '\\').replace('\\"', '"')
206             data = dict(json.loads(swf_params))
207             params_raw = compat_urllib_parse_unquote(data['params'])
208             video_data = json.loads(params_raw)['video_data']
209
210         def video_data_list2dict(video_data):
211             ret = {}
212             for item in video_data:
213                 format_id = item['stream_type']
214                 ret.setdefault(format_id, []).append(item)
215             return ret
216
217         if not video_data:
218             server_js_data = self._parse_json(self._search_regex(
219                 r'handleServerJS\(({.+})\);', webpage, 'server js data', default='{}'), video_id)
220             for item in server_js_data.get('instances', []):
221                 if item[1][0] == 'VideoConfig':
222                     video_data = video_data_list2dict(item[2][0]['videoData'])
223                     break
224
225         if not video_data:
226             if not fatal_if_no_video:
227                 return webpage, False
228             m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
229             if m_msg is not None:
230                 raise ExtractorError(
231                     'The video is not available, Facebook said: "%s"' % m_msg.group(1),
232                     expected=True)
233             else:
234                 raise ExtractorError('Cannot parse data')
235
236         formats = []
237         for format_id, f in video_data.items():
238             if not f or not isinstance(f, list):
239                 continue
240             for quality in ('sd', 'hd'):
241                 for src_type in ('src', 'src_no_ratelimit'):
242                     src = f[0].get('%s_%s' % (quality, src_type))
243                     if src:
244                         preference = -10 if format_id == 'progressive' else 0
245                         if quality == 'hd':
246                             preference += 5
247                         formats.append({
248                             'format_id': '%s_%s_%s' % (format_id, quality, src_type),
249                             'url': src,
250                             'preference': preference,
251                         })
252             dash_manifest = f[0].get('dash_manifest')
253             if dash_manifest:
254                 formats.extend(self._parse_mpd_formats(
255                     compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest))))
256         if not formats:
257             raise ExtractorError('Cannot find video formats')
258
259         self._sort_formats(formats)
260
261         video_title = self._html_search_regex(
262             r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage, 'title',
263             default=None)
264         if not video_title:
265             video_title = self._html_search_regex(
266                 r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
267                 webpage, 'alternative title', default=None)
268             video_title = limit_length(video_title, 80)
269         if not video_title:
270             video_title = 'Facebook video #%s' % video_id
271         uploader = clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage))
272
273         info_dict = {
274             'id': video_id,
275             'title': video_title,
276             'formats': formats,
277             'uploader': uploader,
278         }
279
280         return webpage, info_dict
281
282     def _real_extract(self, url):
283         video_id = self._match_id(url)
284
285         real_url = self._VIDEO_PAGE_TEMPLATE % video_id if url.startswith('facebook:') else url
286         webpage, info_dict = self._extract_from_url(real_url, video_id, fatal_if_no_video=False)
287
288         if info_dict:
289             return info_dict
290
291         if '/posts/' in url:
292             entries = [
293                 self.url_result('facebook:%s' % vid, FacebookIE.ie_key())
294                 for vid in self._parse_json(
295                     self._search_regex(
296                         r'(["\'])video_ids\1\s*:\s*(?P<ids>\[.+?\])',
297                         webpage, 'video ids', group='ids'),
298                     video_id)]
299
300             return self.playlist_result(entries, video_id)
301         else:
302             _, info_dict = self._extract_from_url(
303                 self._VIDEO_PAGE_TEMPLATE % video_id,
304                 video_id, fatal_if_no_video=True)
305             return info_dict