[youtube] Add ability to authenticate with cookies
[youtube-dl] / youtube_dl / extractor / reddit.py
1 from __future__ import unicode_literals
2
3 import re
4
5 from .common import InfoExtractor
6 from ..utils import (
7     ExtractorError,
8     int_or_none,
9     float_or_none,
10 )
11
12
13 class RedditIE(InfoExtractor):
14     _VALID_URL = r'https?://v\.redd\.it/(?P<id>[^/?#&]+)'
15     _TEST = {
16         # from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/
17         'url': 'https://v.redd.it/zv89llsvexdz',
18         'md5': '0a070c53eba7ec4534d95a5a1259e253',
19         'info_dict': {
20             'id': 'zv89llsvexdz',
21             'ext': 'mp4',
22             'title': 'zv89llsvexdz',
23         },
24         'params': {
25             'format': 'bestvideo',
26         },
27     }
28
29     def _real_extract(self, url):
30         video_id = self._match_id(url)
31
32         formats = self._extract_m3u8_formats(
33             'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id, video_id,
34             'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
35
36         formats.extend(self._extract_mpd_formats(
37             'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id, video_id,
38             mpd_id='dash', fatal=False))
39
40         self._sort_formats(formats)
41
42         return {
43             'id': video_id,
44             'title': video_id,
45             'formats': formats,
46         }
47
48
49 class RedditRIE(InfoExtractor):
50     _VALID_URL = r'(?P<url>https?://(?:www\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))'
51     _TESTS = [{
52         'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
53         'info_dict': {
54             'id': 'zv89llsvexdz',
55             'ext': 'mp4',
56             'title': 'That small heart attack.',
57             'thumbnail': r're:^https?://.*\.jpg$',
58             'timestamp': 1501941939,
59             'upload_date': '20170805',
60             'uploader': 'Antw87',
61             'like_count': int,
62             'dislike_count': int,
63             'comment_count': int,
64             'age_limit': 0,
65         },
66         'params': {
67             'format': 'bestvideo',
68             'skip_download': True,
69         },
70     }, {
71         'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
72         'only_matching': True,
73     }, {
74         # imgur
75         'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
76         'only_matching': True,
77     }, {
78         # streamable
79         'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
80         'only_matching': True,
81     }, {
82         # youtube
83         'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
84         'only_matching': True,
85     }]
86
87     def _real_extract(self, url):
88         mobj = re.match(self._VALID_URL, url)
89         url, video_id = mobj.group('url', 'id')
90
91         video_id = self._match_id(url)
92
93         data = self._download_json(
94             url + '/.json', video_id)[0]['data']['children'][0]['data']
95
96         video_url = data['url']
97
98         # Avoid recursing into the same reddit URL
99         if 'reddit.com/' in video_url and '/%s/' % video_id in video_url:
100             raise ExtractorError('No media found', expected=True)
101
102         over_18 = data.get('over_18')
103         if over_18 is True:
104             age_limit = 18
105         elif over_18 is False:
106             age_limit = 0
107         else:
108             age_limit = None
109
110         return {
111             '_type': 'url_transparent',
112             'url': video_url,
113             'title': data.get('title'),
114             'thumbnail': data.get('thumbnail'),
115             'timestamp': float_or_none(data.get('created_utc')),
116             'uploader': data.get('author'),
117             'like_count': int_or_none(data.get('ups')),
118             'dislike_count': int_or_none(data.get('downs')),
119             'comment_count': int_or_none(data.get('num_comments')),
120             'age_limit': age_limit,
121         }