1 from __future__ import unicode_literals
10 from .common import InfoExtractor
11 from ..compat import compat_str
15 get_element_by_attribute,
23 class InstagramIE(InfoExtractor):
24 _VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com/p/(?P<id>[^/?#&]+))'
26 'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
27 'md5': '0d2da106a9d2631273e192b372806516',
31 'title': 'Video by naomipq',
32 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
33 'thumbnail': r're:^https?://.*\.jpg',
34 'timestamp': 1371748545,
35 'upload_date': '20130620',
36 'uploader_id': 'naomipq',
37 'uploader': 'Naomi Leonor Phan-Quang',
44 'url': 'https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears',
48 'title': 'Video by britneyspears',
49 'thumbnail': r're:^https?://.*\.jpg',
50 'timestamp': 1453760977,
51 'upload_date': '20160125',
52 'uploader_id': 'britneyspears',
53 'uploader': 'Britney Spears',
59 'skip_download': True,
63 'url': 'https://www.instagram.com/p/BQ0eAlwhDrw/',
85 'title': 'Post by instagram',
86 'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957',
89 'url': 'https://instagram.com/p/-Cmh1cukG2/',
90 'only_matching': True,
92 'url': 'http://instagram.com/p/9o6LshA7zy/embed/',
93 'only_matching': True,
97 def _extract_embed_url(webpage):
99 r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1',
102 return mobj.group('url')
104 blockquote_el = get_element_by_attribute(
105 'class', 'instagram-media', webpage)
106 if blockquote_el is None:
110 r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1', blockquote_el)
112 return mobj.group('link')
114 def _real_extract(self, url):
115 mobj = re.match(self._VALID_URL, url)
116 video_id = mobj.group('id')
117 url = mobj.group('url')
119 webpage = self._download_webpage(url, video_id)
121 (video_url, description, thumbnail, timestamp, uploader,
122 uploader_id, like_count, comment_count, comments, height,
125 shared_data = self._parse_json(
127 r'window\._sharedData\s*=\s*({.+?});',
128 webpage, 'shared data', default='{}'),
129 video_id, fatal=False)
133 (lambda x: x['entry_data']['PostPage'][0]['graphql']['shortcode_media'],
134 lambda x: x['entry_data']['PostPage'][0]['media']),
137 video_url = media.get('video_url')
138 height = int_or_none(media.get('dimensions', {}).get('height'))
139 width = int_or_none(media.get('dimensions', {}).get('width'))
140 description = try_get(
141 media, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
142 compat_str) or media.get('caption')
143 thumbnail = media.get('display_src')
144 timestamp = int_or_none(media.get('taken_at_timestamp') or media.get('date'))
145 uploader = media.get('owner', {}).get('full_name')
146 uploader_id = media.get('owner', {}).get('username')
148 def get_count(key, kind):
149 return int_or_none(try_get(
150 media, (lambda x: x['edge_media_%s' % key]['count'],
151 lambda x: x['%ss' % kind]['count'])))
152 like_count = get_count('preview_like', 'like')
153 comment_count = get_count('to_comment', 'comment')
156 'author': comment.get('user', {}).get('username'),
157 'author_id': comment.get('user', {}).get('id'),
158 'id': comment.get('id'),
159 'text': comment.get('text'),
160 'timestamp': int_or_none(comment.get('created_at')),
161 } for comment in media.get(
162 'comments', {}).get('nodes', []) if comment.get('text')]
165 media, lambda x: x['edge_sidecar_to_children']['edges'],
169 for edge_num, edge in enumerate(edges, start=1):
170 node = try_get(edge, lambda x: x['node'], dict)
173 node_video_url = try_get(node, lambda x: x['video_url'], compat_str)
174 if not node_video_url:
177 'id': node.get('shortcode') or node['id'],
178 'title': 'Video %d' % edge_num,
179 'url': node_video_url,
180 'thumbnail': node.get('display_url'),
181 'width': int_or_none(try_get(node, lambda x: x['dimensions']['width'])),
182 'height': int_or_none(try_get(node, lambda x: x['dimensions']['height'])),
183 'view_count': int_or_none(node.get('video_view_count')),
185 return self.playlist_result(
187 'Post by %s' % uploader_id if uploader_id else None,
191 video_url = self._og_search_video_url(webpage, secure=False)
200 uploader_id = self._search_regex(
201 r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"',
202 webpage, 'uploader id', fatal=False)
205 description = self._search_regex(
206 r'"caption"\s*:\s*"(.+?)"', webpage, 'description', default=None)
207 if description is not None:
208 description = lowercase_escape(description)
211 thumbnail = self._og_search_thumbnail(webpage)
217 'title': 'Video by %s' % uploader_id,
218 'description': description,
219 'thumbnail': thumbnail,
220 'timestamp': timestamp,
221 'uploader_id': uploader_id,
222 'uploader': uploader,
223 'like_count': like_count,
224 'comment_count': comment_count,
225 'comments': comments,
229 class InstagramUserIE(InfoExtractor):
230 _VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<id>[^/]{2,})/?(?:$|[?#])'
231 IE_DESC = 'Instagram user profile'
232 IE_NAME = 'instagram:user'
234 'url': 'https://instagram.com/porsche',
241 'extract_flat': True,
242 'skip_download': True,
250 var r = (65535 & e) + (65535 & t);
251 return (e >> 16) + (t >> 16) + (r >> 16) << 16 | 65535 & r
253 function a(e, t, r, n, o, a) {
254 return i((s = i(i(t, e), i(n, a))) << (c = o) | s >>> 32 - c, r);
257 function s(e, t, r, n, o, i, s) {
258 return a(t & r | ~t & n, e, t, o, i, s)
260 function c(e, t, r, n, o, i, s) {
261 return a(t & n | r & ~n, e, t, o, i, s)
263 function u(e, t, r, n, o, i, s) {
264 return a(t ^ r ^ n, e, t, o, i, s)
266 function l(e, t, r, n, o, i, s) {
267 return a(r ^ (t | ~n), e, t, o, i, s)
271 e[t >> 5] |= 128 << t % 32,
272 e[14 + (t + 64 >>> 9 << 4)] = t;
277 for (r = 0; r < e.length; r += 16)
282 f = l(f = l(f = l(f = l(f = u(f = u(f = u(f = u(f = c(f = c(f = c(f = c(f = s(f = s(f = s(f = s(f, h = s(h, g = s(g, d = s(d, f, h, g, e[r], 7, -680876936), f, h, e[r + 1], 12, -389564586), d, f, e[r + 2], 17, 606105819), g, d, e[r + 3], 22, -1044525330), h = s(h, g = s(g, d = s(d, f, h, g, e[r + 4], 7, -176418897), f, h, e[r + 5], 12, 1200080426), d, f, e[r + 6], 17, -1473231341), g, d, e[r + 7], 22, -45705983), h = s(h, g = s(g, d = s(d, f, h, g, e[r + 8], 7, 1770035416), f, h, e[r + 9], 12, -1958414417), d, f, e[r + 10], 17, -42063), g, d, e[r + 11], 22, -1990404162), h = s(h, g = s(g, d = s(d, f, h, g, e[r + 12], 7, 1804603682), f, h, e[r + 13], 12, -40341101), d, f, e[r + 14], 17, -1502002290), g, d, e[r + 15], 22, 1236535329), h = c(h, g = c(g, d = c(d, f, h, g, e[r + 1], 5, -165796510), f, h, e[r + 6], 9, -1069501632), d, f, e[r + 11], 14, 643717713), g, d, e[r], 20, -373897302), h = c(h, g = c(g, d = c(d, f, h, g, e[r + 5], 5, -701558691), f, h, e[r + 10], 9, 38016083), d, f, e[r + 15], 14, -660478335), g, d, e[r + 4], 20, -405537848), h = c(h, g = c(g, d = c(d, f, h, g, e[r + 9], 5, 568446438), f, h, e[r + 14], 9, -1019803690), d, f, e[r + 3], 14, -187363961), g, d, e[r + 8], 20, 1163531501), h = c(h, g = c(g, d = c(d, f, h, g, e[r + 13], 5, -1444681467), f, h, e[r + 2], 9, -51403784), d, f, e[r + 7], 14, 1735328473), g, d, e[r + 12], 20, -1926607734), h = u(h, g = u(g, d = u(d, f, h, g, e[r + 5], 4, -378558), f, h, e[r + 8], 11, -2022574463), d, f, e[r + 11], 16, 1839030562), g, d, e[r + 14], 23, -35309556), h = u(h, g = u(g, d = u(d, f, h, g, e[r + 1], 4, -1530992060), f, h, e[r + 4], 11, 1272893353), d, f, e[r + 7], 16, -155497632), g, d, e[r + 10], 23, -1094730640), h = u(h, g = u(g, d = u(d, f, h, g, e[r + 13], 4, 681279174), f, h, e[r], 11, -358537222), d, f, e[r + 3], 16, -722521979), g, d, e[r + 6], 23, 76029189), h = u(h, g = u(g, d = u(d, f, h, g, e[r + 9], 4, -640364487), f, h, e[r + 12], 11, -421815835), d, f, e[r + 15], 16, 530742520), g, d, e[r + 2], 23, -995338651), h = l(h, g = l(g, d = l(d, f, h, g, e[r], 6, -198630844), f, h, e[r + 7], 10, 1126891415), d, f, e[r + 14], 15, -1416354905), g, d, e[r + 5], 21, -57434055), h = l(h, g = l(g, d = l(d, f, h, g, e[r + 12], 6, 1700485571), f, h, e[r + 3], 10, -1894986606), d, f, e[r + 10], 15, -1051523), g, d, e[r + 1], 21, -2054922799), h = l(h, g = l(g, d = l(d, f, h, g, e[r + 8], 6, 1873313359), f, h, e[r + 15], 10, -30611744), d, f, e[r + 6], 15, -1560198380), g, d, e[r + 13], 21, 1309151649), h = l(h, g = l(g, d = l(d, f, h, g, e[r + 4], 6, -145523070), f, h, e[r + 11], 10, -1120210379), d, f, e[r + 2], 15, 718787259), g, d, e[r + 9], 21, -343485551),
290 var t, r = "", n = 32 * e.length;
291 for (t = 0; t < n; t += 8)
292 r += String.fromCharCode(e[t >> 5] >>> t % 32 & 255);
297 for (r[(e.length >> 2) - 1] = void 0,
298 t = 0; t < r.length; t += 1)
300 var n = 8 * e.length;
301 for (t = 0; t < n; t += 8)
302 r[t >> 5] |= (255 & e.charCodeAt(t / 8)) << t % 32;
307 for (r = 0; r < e.length; r += 1)
309 n += "0123456789abcdef".charAt(t >>> 4 & 15) + "0123456789abcdef".charAt(15 & t);
313 return unescape(encodeURIComponent(e))
317 return d(p(f(e), 8 * e.length))
321 return function(e, t) {
322 var r, n, o = f(e), i = [], a = [];
323 for (i[15] = a[15] = void 0,
324 o.length > 16 && (o = p(o, 8 * e.length)),
325 r = 0; r < 16; r += 1)
326 i[r] = 909522486 ^ o[r],
327 a[r] = 1549556828 ^ o[r];
328 return n = p(i.concat(f(t)), 512 + 8 * t.length),
329 d(p(a.concat(n), 640))
332 function v(e, t, r) {
333 return t ? r ? m(t, e) : h(m(t, e)) : r ? b(e) : h(b(e))
340 def _entries(self, data):
341 def get_count(suffix):
342 return int_or_none(try_get(
343 node, lambda x: x['edge_media_' + suffix]['count']))
345 uploader_id = data['entry_data']['ProfilePage'][0]['graphql']['user']['id']
346 csrf_token = data['config']['csrf_token']
347 rhx_gis = data.get('rhx_gis') or '3c7ca9dcefcf966d11dacf1f151335e8'
349 self._set_cookie('instagram.com', 'ig_pr', '1')
352 js_code = self._SIGN_CODE + "console.log(sign('%s')); phantom.exit();" % s
353 with open(self._phantomjs_script.name, 'w') as f:
355 p = subprocess.Popen(
356 ['phantomjs', '--ssl-protocol=any', f.name],
357 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
358 gis, err = p.communicate()
359 if p.returncode != 0:
360 raise ExtractorError('Failed to sign request\n:' + err.decode('utf-8'))
361 return gis.decode('utf-8').strip()
364 for page_num in itertools.count(1):
365 variables = json.dumps({
372 % (rhx_gis, csrf_token, std_headers['User-Agent'], variables))
373 media = self._download_json(
374 'https://www.instagram.com/graphql/query/', uploader_id,
375 'Downloading JSON page %d' % page_num, headers={
376 'X-Requested-With': 'XMLHttpRequest',
377 'X-Instagram-GIS': gis,
379 'query_hash': '472f257a40c653c64c666ce877d59d2b',
380 'variables': variables,
381 })['data']['user']['edge_owner_to_timeline_media']
383 edges = media.get('edges')
384 if not edges or not isinstance(edges, list):
388 node = edge.get('node')
389 if not node or not isinstance(node, dict):
391 if node.get('__typename') != 'GraphVideo' and node.get('is_video') is not True:
393 video_id = node.get('shortcode')
397 info = self.url_result(
398 'https://instagram.com/p/%s/' % video_id,
399 ie=InstagramIE.ie_key(), video_id=video_id)
401 description = try_get(
402 node, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
404 thumbnail = node.get('thumbnail_src') or node.get('display_src')
405 timestamp = int_or_none(node.get('taken_at_timestamp'))
407 comment_count = get_count('to_comment')
408 like_count = get_count('preview_like')
409 view_count = int_or_none(node.get('video_view_count'))
412 'description': description,
413 'thumbnail': thumbnail,
414 'timestamp': timestamp,
415 'comment_count': comment_count,
416 'like_count': like_count,
417 'view_count': view_count,
422 page_info = media.get('page_info')
423 if not page_info or not isinstance(page_info, dict):
426 has_next_page = page_info.get('has_next_page')
427 if not has_next_page:
430 cursor = page_info.get('end_cursor')
431 if not cursor or not isinstance(cursor, compat_str):
434 def _real_initialize(self):
435 if not check_executable('phantomjs', ['-v']):
436 raise ExtractorError(
437 'PhantomJS executable not found in PATH, download it from http://phantomjs.org',
439 self._phantomjs_script = tempfile.NamedTemporaryFile(delete=False)
440 self._phantomjs_script.close()
443 os.unlink(self._phantomjs_script.name)
445 def _real_extract(self, url):
446 username = self._match_id(url)
448 webpage = self._download_webpage(url, username)
450 data = self._parse_json(
452 r'sharedData\s*=\s*({.+?})\s*;\s*[<\n]', webpage, 'data'),
455 return self.playlist_result(
456 self._entries(data), username, username)