1 from __future__ import unicode_literals
7 from .common import InfoExtractor
21 class LinuxAcademyIE(InfoExtractor):
24 (?:www\.)?linuxacademy\.com/cp/
26 courses/lesson/course/(?P<chapter_id>\d+)/lesson/(?P<lesson_id>\d+)|
27 modules/view/id/(?P<course_id>\d+)
31 'url': 'https://linuxacademy.com/cp/courses/lesson/course/1498/lesson/2/module/154',
35 'title': "Introduction to the Practitioner's Brief",
38 'skip_download': True,
40 'skip': 'Requires Linux Academy account credentials',
42 'url': 'https://linuxacademy.com/cp/courses/lesson/course/1498/lesson/2',
43 'only_matching': True,
45 'url': 'https://linuxacademy.com/cp/modules/view/id/154',
48 'title': 'AWS Certified Cloud Practitioner',
49 'description': 'md5:039db7e60e4aac9cf43630e0a75fa834',
52 'skip': 'Requires Linux Academy account credentials',
55 _AUTHORIZE_URL = 'https://login.linuxacademy.com/authorize'
56 _ORIGIN_URL = 'https://linuxacademy.com'
57 _CLIENT_ID = 'KaWxNn1C2Gc7n83W9OFeXltd8Utb5vvx'
58 _NETRC_MACHINE = 'linuxacademy'
60 def _real_initialize(self):
64 username, password = self._get_login_info()
70 random.choice('0123456789ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvwxyz-._~')
73 webpage, urlh = self._download_webpage_handle(
74 self._AUTHORIZE_URL, None, 'Downloading authorize page', query={
75 'client_id': self._CLIENT_ID,
76 'response_type': 'token id_token',
77 'redirect_uri': self._ORIGIN_URL,
78 'scope': 'openid email user_impersonation profile',
79 'audience': self._ORIGIN_URL,
80 'state': random_string(),
81 'nonce': random_string(),
84 login_data = self._parse_json(
86 r'atob\(\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage,
87 'login info', group='value'), None,
88 transform_source=lambda x: compat_b64decode(x).decode('utf-8')
92 'client_id': self._CLIENT_ID,
93 'redirect_uri': self._ORIGIN_URL,
94 'tenant': 'lacausers',
95 'connection': 'Username-Password-Authentication',
101 login_state_url = urlh.geturl()
104 login_page = self._download_webpage(
105 'https://login.linuxacademy.com/usernamepassword/login', None,
106 'Downloading login page', data=json.dumps(login_data).encode(),
108 'Content-Type': 'application/json',
109 'Origin': 'https://login.linuxacademy.com',
110 'Referer': login_state_url,
112 except ExtractorError as e:
113 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
114 error = self._parse_json(e.cause.read(), None)
115 message = error.get('description') or error['code']
116 raise ExtractorError(
117 '%s said: %s' % (self.IE_NAME, message), expected=True)
120 callback_page, urlh = self._download_webpage_handle(
121 'https://login.linuxacademy.com/login/callback', None,
122 'Downloading callback page',
123 data=urlencode_postdata(self._hidden_inputs(login_page)),
125 'Content-Type': 'application/x-www-form-urlencoded',
126 'Origin': 'https://login.linuxacademy.com',
127 'Referer': login_state_url,
130 access_token = self._search_regex(
131 r'access_token=([^=&]+)', urlh.geturl(),
134 self._download_webpage(
135 'https://linuxacademy.com/cp/login/tokenValidateLogin/token/%s'
136 % access_token, None, 'Downloading token validation page')
138 def _real_extract(self, url):
139 mobj = re.match(self._VALID_URL, url)
140 chapter_id, lecture_id, course_id = mobj.group('chapter_id', 'lesson_id', 'course_id')
141 item_id = course_id if course_id else '%s-%s' % (chapter_id, lecture_id)
143 webpage = self._download_webpage(url, item_id)
149 urljoin(url, lesson_url), ie=LinuxAcademyIE.ie_key())
150 for lesson_url in orderedSet(re.findall(
151 r'<a[^>]+\bhref=["\'](/cp/courses/lesson/course/\d+/lesson/\d+/module/\d+)',
153 title = unescapeHTML(self._html_search_regex(
154 (r'class=["\']course-title["\'][^>]*>(?P<value>[^<]+)',
155 r'var\s+title\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1'),
156 webpage, 'title', default=None, group='value'))
157 description = unescapeHTML(self._html_search_regex(
158 r'var\s+description\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
159 webpage, 'description', default=None, group='value'))
160 return self.playlist_result(entries, course_id, title, description)
163 info = self._extract_jwplayer_data(
164 webpage, item_id, require_title=False, m3u8_id='hls',)
165 title = self._search_regex(
166 (r'>Lecture\s*:\s*(?P<value>[^<]+)',
167 r'lessonName\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1'), webpage,
168 'title', group='value')