Merge pull request #9195 from remitamine/ffmpeg-pipe
[youtube-dl] / youtube_dl / extractor / instagram.py
1 from __future__ import unicode_literals
2
3 import re
4
5 from .common import InfoExtractor
6 from ..utils import (
7     get_element_by_attribute,
8     int_or_none,
9     limit_length,
10     lowercase_escape,
11 )
12
13
14 class InstagramIE(InfoExtractor):
15     _VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com/p/(?P<id>[^/?#&]+))'
16     _TESTS = [{
17         'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
18         'md5': '0d2da106a9d2631273e192b372806516',
19         'info_dict': {
20             'id': 'aye83DjauH',
21             'ext': 'mp4',
22             'uploader_id': 'naomipq',
23             'title': 'Video by naomipq',
24             'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
25         }
26     }, {
27         # missing description
28         'url': 'https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears',
29         'info_dict': {
30             'id': 'BA-pQFBG8HZ',
31             'ext': 'mp4',
32             'uploader_id': 'britneyspears',
33             'title': 'Video by britneyspears',
34         },
35         'params': {
36             'skip_download': True,
37         },
38     }, {
39         'url': 'https://instagram.com/p/-Cmh1cukG2/',
40         'only_matching': True,
41     }, {
42         'url': 'http://instagram.com/p/9o6LshA7zy/embed/',
43         'only_matching': True,
44     }]
45
46     @staticmethod
47     def _extract_embed_url(webpage):
48         mobj = re.search(
49             r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1',
50             webpage)
51         if mobj:
52             return mobj.group('url')
53
54         blockquote_el = get_element_by_attribute(
55             'class', 'instagram-media', webpage)
56         if blockquote_el is None:
57             return
58
59         mobj = re.search(
60             r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1', blockquote_el)
61         if mobj:
62             return mobj.group('link')
63
64     def _real_extract(self, url):
65         mobj = re.match(self._VALID_URL, url)
66         video_id = mobj.group('id')
67         url = mobj.group('url')
68
69         webpage = self._download_webpage(url, video_id)
70         uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
71                                          webpage, 'uploader id', fatal=False)
72         desc = self._search_regex(
73             r'"caption":"(.+?)"', webpage, 'description', default=None)
74         if desc is not None:
75             desc = lowercase_escape(desc)
76
77         return {
78             'id': video_id,
79             'url': self._og_search_video_url(webpage, secure=False),
80             'ext': 'mp4',
81             'title': 'Video by %s' % uploader_id,
82             'thumbnail': self._og_search_thumbnail(webpage),
83             'uploader_id': uploader_id,
84             'description': desc,
85         }
86
87
88 class InstagramUserIE(InfoExtractor):
89     _VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<username>[^/]{2,})/?(?:$|[?#])'
90     IE_DESC = 'Instagram user profile'
91     IE_NAME = 'instagram:user'
92     _TEST = {
93         'url': 'https://instagram.com/porsche',
94         'info_dict': {
95             'id': 'porsche',
96             'title': 'porsche',
97         },
98         'playlist_mincount': 2,
99         'playlist': [{
100             'info_dict': {
101                 'id': '614605558512799803_462752227',
102                 'ext': 'mp4',
103                 'title': '#Porsche Intelligent Performance.',
104                 'thumbnail': 're:^https?://.*\.jpg',
105                 'uploader': 'Porsche',
106                 'uploader_id': 'porsche',
107                 'timestamp': 1387486713,
108                 'upload_date': '20131219',
109             },
110         }],
111         'params': {
112             'extract_flat': True,
113             'skip_download': True,
114         }
115     }
116
117     def _real_extract(self, url):
118         mobj = re.match(self._VALID_URL, url)
119         uploader_id = mobj.group('username')
120
121         entries = []
122         page_count = 0
123         media_url = 'http://instagram.com/%s/media' % uploader_id
124         while True:
125             page = self._download_json(
126                 media_url, uploader_id,
127                 note='Downloading page %d ' % (page_count + 1),
128             )
129             page_count += 1
130
131             for it in page['items']:
132                 if it.get('type') != 'video':
133                     continue
134                 like_count = int_or_none(it.get('likes', {}).get('count'))
135                 user = it.get('user', {})
136
137                 formats = [{
138                     'format_id': k,
139                     'height': v.get('height'),
140                     'width': v.get('width'),
141                     'url': v['url'],
142                 } for k, v in it['videos'].items()]
143                 self._sort_formats(formats)
144
145                 thumbnails_el = it.get('images', {})
146                 thumbnail = thumbnails_el.get('thumbnail', {}).get('url')
147
148                 # In some cases caption is null, which corresponds to None
149                 # in python. As a result, it.get('caption', {}) gives None
150                 title = (it.get('caption') or {}).get('text', it['id'])
151
152                 entries.append({
153                     'id': it['id'],
154                     'title': limit_length(title, 80),
155                     'formats': formats,
156                     'thumbnail': thumbnail,
157                     'webpage_url': it.get('link'),
158                     'uploader': user.get('full_name'),
159                     'uploader_id': user.get('username'),
160                     'like_count': like_count,
161                     'timestamp': int_or_none(it.get('created_time')),
162                 })
163
164             if not page['items']:
165                 break
166             max_id = page['items'][-1]['id'].split('_')[0]
167             media_url = (
168                 'http://instagram.com/%s/media?max_id=%s' % (
169                     uploader_id, max_id))
170
171         return {
172             '_type': 'playlist',
173             'entries': entries,
174             'id': uploader_id,
175             'title': uploader_id,
176         }