[youtube] Remove info el for get_video_info request
[youtube-dl] / youtube_dl / extractor / instagram.py
1 from __future__ import unicode_literals
2
3 import itertools
4 import hashlib
5 import json
6 import re
7
8 from .common import InfoExtractor
9 from ..compat import (
10     compat_str,
11     compat_HTTPError,
12 )
13 from ..utils import (
14     ExtractorError,
15     get_element_by_attribute,
16     int_or_none,
17     lowercase_escape,
18     std_headers,
19     try_get,
20     url_or_none,
21 )
22
23
24 class InstagramIE(InfoExtractor):
25     _VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com/p/(?P<id>[^/?#&]+))'
26     _TESTS = [{
27         'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
28         'md5': '0d2da106a9d2631273e192b372806516',
29         'info_dict': {
30             'id': 'aye83DjauH',
31             'ext': 'mp4',
32             'title': 'Video by naomipq',
33             'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
34             'thumbnail': r're:^https?://.*\.jpg',
35             'timestamp': 1371748545,
36             'upload_date': '20130620',
37             'uploader_id': 'naomipq',
38             'uploader': 'Naomi Leonor Phan-Quang',
39             'like_count': int,
40             'comment_count': int,
41             'comments': list,
42         },
43     }, {
44         # missing description
45         'url': 'https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears',
46         'info_dict': {
47             'id': 'BA-pQFBG8HZ',
48             'ext': 'mp4',
49             'title': 'Video by britneyspears',
50             'thumbnail': r're:^https?://.*\.jpg',
51             'timestamp': 1453760977,
52             'upload_date': '20160125',
53             'uploader_id': 'britneyspears',
54             'uploader': 'Britney Spears',
55             'like_count': int,
56             'comment_count': int,
57             'comments': list,
58         },
59         'params': {
60             'skip_download': True,
61         },
62     }, {
63         # multi video post
64         'url': 'https://www.instagram.com/p/BQ0eAlwhDrw/',
65         'playlist': [{
66             'info_dict': {
67                 'id': 'BQ0dSaohpPW',
68                 'ext': 'mp4',
69                 'title': 'Video 1',
70             },
71         }, {
72             'info_dict': {
73                 'id': 'BQ0dTpOhuHT',
74                 'ext': 'mp4',
75                 'title': 'Video 2',
76             },
77         }, {
78             'info_dict': {
79                 'id': 'BQ0dT7RBFeF',
80                 'ext': 'mp4',
81                 'title': 'Video 3',
82             },
83         }],
84         'info_dict': {
85             'id': 'BQ0eAlwhDrw',
86             'title': 'Post by instagram',
87             'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957',
88         },
89     }, {
90         'url': 'https://instagram.com/p/-Cmh1cukG2/',
91         'only_matching': True,
92     }, {
93         'url': 'http://instagram.com/p/9o6LshA7zy/embed/',
94         'only_matching': True,
95     }]
96
97     @staticmethod
98     def _extract_embed_url(webpage):
99         mobj = re.search(
100             r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1',
101             webpage)
102         if mobj:
103             return mobj.group('url')
104
105         blockquote_el = get_element_by_attribute(
106             'class', 'instagram-media', webpage)
107         if blockquote_el is None:
108             return
109
110         mobj = re.search(
111             r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1', blockquote_el)
112         if mobj:
113             return mobj.group('link')
114
115     def _real_extract(self, url):
116         mobj = re.match(self._VALID_URL, url)
117         video_id = mobj.group('id')
118         url = mobj.group('url')
119
120         webpage = self._download_webpage(url, video_id)
121
122         (video_url, description, thumbnail, timestamp, uploader,
123          uploader_id, like_count, comment_count, comments, height,
124          width) = [None] * 11
125
126         shared_data = self._parse_json(
127             self._search_regex(
128                 r'window\._sharedData\s*=\s*({.+?});',
129                 webpage, 'shared data', default='{}'),
130             video_id, fatal=False)
131         if shared_data:
132             media = try_get(
133                 shared_data,
134                 (lambda x: x['entry_data']['PostPage'][0]['graphql']['shortcode_media'],
135                  lambda x: x['entry_data']['PostPage'][0]['media']),
136                 dict)
137             if media:
138                 video_url = media.get('video_url')
139                 height = int_or_none(media.get('dimensions', {}).get('height'))
140                 width = int_or_none(media.get('dimensions', {}).get('width'))
141                 description = try_get(
142                     media, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
143                     compat_str) or media.get('caption')
144                 thumbnail = media.get('display_src')
145                 timestamp = int_or_none(media.get('taken_at_timestamp') or media.get('date'))
146                 uploader = media.get('owner', {}).get('full_name')
147                 uploader_id = media.get('owner', {}).get('username')
148
149                 def get_count(key, kind):
150                     return int_or_none(try_get(
151                         media, (lambda x: x['edge_media_%s' % key]['count'],
152                                 lambda x: x['%ss' % kind]['count'])))
153                 like_count = get_count('preview_like', 'like')
154                 comment_count = get_count('to_comment', 'comment')
155
156                 comments = [{
157                     'author': comment.get('user', {}).get('username'),
158                     'author_id': comment.get('user', {}).get('id'),
159                     'id': comment.get('id'),
160                     'text': comment.get('text'),
161                     'timestamp': int_or_none(comment.get('created_at')),
162                 } for comment in media.get(
163                     'comments', {}).get('nodes', []) if comment.get('text')]
164                 if not video_url:
165                     edges = try_get(
166                         media, lambda x: x['edge_sidecar_to_children']['edges'],
167                         list) or []
168                     if edges:
169                         entries = []
170                         for edge_num, edge in enumerate(edges, start=1):
171                             node = try_get(edge, lambda x: x['node'], dict)
172                             if not node:
173                                 continue
174                             node_video_url = url_or_none(node.get('video_url'))
175                             if not node_video_url:
176                                 continue
177                             entries.append({
178                                 'id': node.get('shortcode') or node['id'],
179                                 'title': 'Video %d' % edge_num,
180                                 'url': node_video_url,
181                                 'thumbnail': node.get('display_url'),
182                                 'width': int_or_none(try_get(node, lambda x: x['dimensions']['width'])),
183                                 'height': int_or_none(try_get(node, lambda x: x['dimensions']['height'])),
184                                 'view_count': int_or_none(node.get('video_view_count')),
185                             })
186                         return self.playlist_result(
187                             entries, video_id,
188                             'Post by %s' % uploader_id if uploader_id else None,
189                             description)
190
191         if not video_url:
192             video_url = self._og_search_video_url(webpage, secure=False)
193
194         formats = [{
195             'url': video_url,
196             'width': width,
197             'height': height,
198         }]
199
200         if not uploader_id:
201             uploader_id = self._search_regex(
202                 r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"',
203                 webpage, 'uploader id', fatal=False)
204
205         if not description:
206             description = self._search_regex(
207                 r'"caption"\s*:\s*"(.+?)"', webpage, 'description', default=None)
208             if description is not None:
209                 description = lowercase_escape(description)
210
211         if not thumbnail:
212             thumbnail = self._og_search_thumbnail(webpage)
213
214         return {
215             'id': video_id,
216             'formats': formats,
217             'ext': 'mp4',
218             'title': 'Video by %s' % uploader_id,
219             'description': description,
220             'thumbnail': thumbnail,
221             'timestamp': timestamp,
222             'uploader_id': uploader_id,
223             'uploader': uploader,
224             'like_count': like_count,
225             'comment_count': comment_count,
226             'comments': comments,
227         }
228
229
230 class InstagramPlaylistIE(InfoExtractor):
231     # A superclass for handling any kind of query based on GraphQL which
232     # results in a playlist.
233
234     _gis_tmpl = None  # used to cache GIS request type
235
236     def _parse_graphql(self, webpage, item_id):
237         # Reads a webpage and returns its GraphQL data.
238         return self._parse_json(
239             self._search_regex(
240                 r'sharedData\s*=\s*({.+?})\s*;\s*[<\n]', webpage, 'data'),
241             item_id)
242
243     def _extract_graphql(self, data, url):
244         # Parses GraphQL queries containing videos and generates a playlist.
245         def get_count(suffix):
246             return int_or_none(try_get(
247                 node, lambda x: x['edge_media_' + suffix]['count']))
248
249         uploader_id = self._match_id(url)
250         csrf_token = data['config']['csrf_token']
251         rhx_gis = data.get('rhx_gis') or '3c7ca9dcefcf966d11dacf1f151335e8'
252
253         cursor = ''
254         for page_num in itertools.count(1):
255             variables = {
256                 'first': 12,
257                 'after': cursor,
258             }
259             variables.update(self._query_vars_for(data))
260             variables = json.dumps(variables)
261
262             if self._gis_tmpl:
263                 gis_tmpls = [self._gis_tmpl]
264             else:
265                 gis_tmpls = [
266                     '%s' % rhx_gis,
267                     '',
268                     '%s:%s' % (rhx_gis, csrf_token),
269                     '%s:%s:%s' % (rhx_gis, csrf_token, std_headers['User-Agent']),
270                 ]
271
272             # try all of the ways to generate a GIS query, and not only use the
273             # first one that works, but cache it for future requests
274             for gis_tmpl in gis_tmpls:
275                 try:
276                     json_data = self._download_json(
277                         'https://www.instagram.com/graphql/query/', uploader_id,
278                         'Downloading JSON page %d' % page_num, headers={
279                             'X-Requested-With': 'XMLHttpRequest',
280                             'X-Instagram-GIS': hashlib.md5(
281                                 ('%s:%s' % (gis_tmpl, variables)).encode('utf-8')).hexdigest(),
282                         }, query={
283                             'query_hash': self._QUERY_HASH,
284                             'variables': variables,
285                         })
286                     media = self._parse_timeline_from(json_data)
287                     self._gis_tmpl = gis_tmpl
288                     break
289                 except ExtractorError as e:
290                     # if it's an error caused by a bad query, and there are
291                     # more GIS templates to try, ignore it and keep trying
292                     if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
293                         if gis_tmpl != gis_tmpls[-1]:
294                             continue
295                     raise
296
297             edges = media.get('edges')
298             if not edges or not isinstance(edges, list):
299                 break
300
301             for edge in edges:
302                 node = edge.get('node')
303                 if not node or not isinstance(node, dict):
304                     continue
305                 if node.get('__typename') != 'GraphVideo' and node.get('is_video') is not True:
306                     continue
307                 video_id = node.get('shortcode')
308                 if not video_id:
309                     continue
310
311                 info = self.url_result(
312                     'https://instagram.com/p/%s/' % video_id,
313                     ie=InstagramIE.ie_key(), video_id=video_id)
314
315                 description = try_get(
316                     node, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
317                     compat_str)
318                 thumbnail = node.get('thumbnail_src') or node.get('display_src')
319                 timestamp = int_or_none(node.get('taken_at_timestamp'))
320
321                 comment_count = get_count('to_comment')
322                 like_count = get_count('preview_like')
323                 view_count = int_or_none(node.get('video_view_count'))
324
325                 info.update({
326                     'description': description,
327                     'thumbnail': thumbnail,
328                     'timestamp': timestamp,
329                     'comment_count': comment_count,
330                     'like_count': like_count,
331                     'view_count': view_count,
332                 })
333
334                 yield info
335
336             page_info = media.get('page_info')
337             if not page_info or not isinstance(page_info, dict):
338                 break
339
340             has_next_page = page_info.get('has_next_page')
341             if not has_next_page:
342                 break
343
344             cursor = page_info.get('end_cursor')
345             if not cursor or not isinstance(cursor, compat_str):
346                 break
347
348     def _real_extract(self, url):
349         user_or_tag = self._match_id(url)
350         webpage = self._download_webpage(url, user_or_tag)
351         data = self._parse_graphql(webpage, user_or_tag)
352
353         self._set_cookie('instagram.com', 'ig_pr', '1')
354
355         return self.playlist_result(
356             self._extract_graphql(data, url), user_or_tag, user_or_tag)
357
358
359 class InstagramUserIE(InstagramPlaylistIE):
360     _VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<id>[^/]{2,})/?(?:$|[?#])'
361     IE_DESC = 'Instagram user profile'
362     IE_NAME = 'instagram:user'
363     _TEST = {
364         'url': 'https://instagram.com/porsche',
365         'info_dict': {
366             'id': 'porsche',
367             'title': 'porsche',
368         },
369         'playlist_count': 5,
370         'params': {
371             'extract_flat': True,
372             'skip_download': True,
373             'playlistend': 5,
374         }
375     }
376
377     _QUERY_HASH = '42323d64886122307be10013ad2dcc44',
378
379     @staticmethod
380     def _parse_timeline_from(data):
381         # extracts the media timeline data from a GraphQL result
382         return data['data']['user']['edge_owner_to_timeline_media']
383
384     @staticmethod
385     def _query_vars_for(data):
386         # returns a dictionary of variables to add to the timeline query based
387         # on the GraphQL of the original page
388         return {
389             'id': data['entry_data']['ProfilePage'][0]['graphql']['user']['id']
390         }
391
392
393 class InstagramTagIE(InstagramPlaylistIE):
394     _VALID_URL = r'https?://(?:www\.)?instagram\.com/explore/tags/(?P<id>[^/]+)'
395     IE_DESC = 'Instagram hashtag search'
396     IE_NAME = 'instagram:tag'
397     _TEST = {
398         'url': 'https://instagram.com/explore/tags/lolcats',
399         'info_dict': {
400             'id': 'lolcats',
401             'title': 'lolcats',
402         },
403         'playlist_count': 50,
404         'params': {
405             'extract_flat': True,
406             'skip_download': True,
407             'playlistend': 50,
408         }
409     }
410
411     _QUERY_HASH = 'f92f56d47dc7a55b606908374b43a314',
412
413     @staticmethod
414     def _parse_timeline_from(data):
415         # extracts the media timeline data from a GraphQL result
416         return data['data']['hashtag']['edge_hashtag_to_media']
417
418     @staticmethod
419     def _query_vars_for(data):
420         # returns a dictionary of variables to add to the timeline query based
421         # on the GraphQL of the original page
422         return {
423             'tag_name':
424                 data['entry_data']['TagPage'][0]['graphql']['hashtag']['name']
425         }