[facebook] Extend _VALID_URL
[youtube-dl] / youtube_dl / extractor / facebook.py
1 from __future__ import unicode_literals
2
3 import json
4 import re
5 import socket
6
7 from .common import InfoExtractor
8 from ..compat import (
9     compat_http_client,
10     compat_str,
11     compat_urllib_error,
12     compat_urllib_parse,
13     compat_urllib_request,
14 )
15 from ..utils import (
16     ExtractorError,
17     int_or_none,
18     limit_length,
19     urlencode_postdata,
20 )
21
22
23 class FacebookIE(InfoExtractor):
24     _VALID_URL = r'''(?x)
25         https?://(?:\w+\.)?facebook\.com/
26         (?:[^#]*?\#!/)?
27         (?:
28             (?:video/video\.php|photo\.php|video\.php|video/embed)\?(?:.*?)
29             (?:v|video_id)=|
30             [^/]+/videos/
31         )
32         (?P<id>[0-9]+)
33         (?:.*)'''
34     _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
35     _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
36     _NETRC_MACHINE = 'facebook'
37     IE_NAME = 'facebook'
38     _TESTS = [{
39         'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
40         'md5': '6a40d33c0eccbb1af76cf0485a052659',
41         'info_dict': {
42             'id': '637842556329505',
43             'ext': 'mp4',
44             'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
45         }
46     }, {
47         'note': 'Video without discernible title',
48         'url': 'https://www.facebook.com/video.php?v=274175099429670',
49         'info_dict': {
50             'id': '274175099429670',
51             'ext': 'mp4',
52             'title': 'Facebook video #274175099429670',
53         }
54     }, {
55         'url': 'https://www.facebook.com/video.php?v=10204634152394104',
56         'only_matching': True,
57     }, {
58         'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
59         'only_matching': True,
60     }]
61
62     def _login(self):
63         (useremail, password) = self._get_login_info()
64         if useremail is None:
65             return
66
67         login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
68         login_page_req.add_header('Cookie', 'locale=en_US')
69         login_page = self._download_webpage(login_page_req, None,
70                                             note='Downloading login page',
71                                             errnote='Unable to download login page')
72         lsd = self._search_regex(
73             r'<input type="hidden" name="lsd" value="([^"]*)"',
74             login_page, 'lsd')
75         lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
76
77         login_form = {
78             'email': useremail,
79             'pass': password,
80             'lsd': lsd,
81             'lgnrnd': lgnrnd,
82             'next': 'http://facebook.com/home.php',
83             'default_persistent': '0',
84             'legacy_return': '1',
85             'timezone': '-60',
86             'trynum': '1',
87         }
88         request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form))
89         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
90         try:
91             login_results = self._download_webpage(request, None,
92                                                    note='Logging in', errnote='unable to fetch login page')
93             if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
94                 self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
95                 return
96
97             check_form = {
98                 'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'),
99                 'h': self._search_regex(
100                     r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'),
101                 'name_action_selected': 'dont_save',
102             }
103             check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
104             check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
105             check_response = self._download_webpage(check_req, None,
106                                                     note='Confirming login')
107             if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
108                 self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
109         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
110             self._downloader.report_warning('unable to log in: %s' % compat_str(err))
111             return
112
113     def _real_initialize(self):
114         self._login()
115
116     def _real_extract(self, url):
117         video_id = self._match_id(url)
118         url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
119         webpage = self._download_webpage(url, video_id)
120
121         BEFORE = '{swf.addParam(param[0], param[1]);});\n'
122         AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
123         m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
124         if not m:
125             m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
126             if m_msg is not None:
127                 raise ExtractorError(
128                     'The video is not available, Facebook said: "%s"' % m_msg.group(1),
129                     expected=True)
130             else:
131                 raise ExtractorError('Cannot parse data')
132         data = dict(json.loads(m.group(1)))
133         params_raw = compat_urllib_parse.unquote(data['params'])
134         params = json.loads(params_raw)
135         video_data = params['video_data'][0]
136
137         formats = []
138         for quality in ['sd', 'hd']:
139             src = video_data.get('%s_src' % quality)
140             if src is not None:
141                 formats.append({
142                     'format_id': quality,
143                     'url': src,
144                 })
145         if not formats:
146             raise ExtractorError('Cannot find video formats')
147
148         video_title = self._html_search_regex(
149             r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title',
150             fatal=False)
151         if not video_title:
152             video_title = self._html_search_regex(
153                 r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
154                 webpage, 'alternative title', default=None)
155             video_title = limit_length(video_title, 80)
156         if not video_title:
157             video_title = 'Facebook video #%s' % video_id
158
159         return {
160             'id': video_id,
161             'title': video_title,
162             'formats': formats,
163             'duration': int_or_none(video_data.get('video_duration')),
164             'thumbnail': video_data.get('thumbnail_src'),
165         }