[wsj:article] Add extractor
[youtube-dl] / youtube_dl / extractor / wsj.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 from .common import InfoExtractor
5 from ..utils import (
6     int_or_none,
7     float_or_none,
8     unified_strdate,
9 )
10
11
12 class WSJIE(InfoExtractor):
13     _VALID_URL = r'''(?x)
14         (?:
15             https?://video-api\.wsj\.com/api-video/player/iframe\.html\?guid=|
16             https?://(?:www\.)?wsj\.com/video/[^/]+/|
17             wsj:
18         )
19         (?P<id>[a-zA-Z0-9-]+)'''
20     IE_DESC = 'Wall Street Journal'
21     _TESTS = [{
22         'url': 'http://video-api.wsj.com/api-video/player/iframe.html?guid=1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
23         'md5': 'e230a5bb249075e40793b655a54a02e4',
24         'info_dict': {
25             'id': '1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
26             'ext': 'mp4',
27             'upload_date': '20150202',
28             'uploader_id': 'jdesai',
29             'creator': 'jdesai',
30             'categories': list,  # a long list
31             'duration': 90,
32             'title': 'Bills Coach Rex Ryan Updates His Old Jets Tattoo',
33         },
34     }, {
35         'url': 'http://www.wsj.com/video/can-alphabet-build-a-smarter-city/359DDAA8-9AC1-489C-82E6-0429C1E430E0.html',
36         'only_matching': True,
37     }]
38
39     def _real_extract(self, url):
40         video_id = self._match_id(url)
41
42         api_url = (
43             'http://video-api.wsj.com/api-video/find_all_videos.asp?'
44             'type=guid&count=1&query=%s&fields=type,hls,videoMP4List,'
45             'thumbnailList,author,description,name,duration,videoURL,'
46             'titletag,formattedCreationDate,keywords,editor' % video_id)
47         info = self._download_json(api_url, video_id)['items'][0]
48         title = info.get('name', info.get('titletag'))
49
50         formats = []
51
52         f4m_url = info.get('videoURL')
53         if f4m_url:
54             formats.extend(self._extract_f4m_formats(
55                 f4m_url, video_id, f4m_id='hds', fatal=False))
56
57         m3u8_url = info.get('hls')
58         if m3u8_url:
59             formats.extend(self._extract_m3u8_formats(
60                 info['hls'], video_id, ext='mp4',
61                 entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
62
63         for v in info.get('videoMP4List', []):
64             mp4_url = v.get('url')
65             if not mp4_url:
66                 continue
67             tbr = int_or_none(v.get('bitrate'))
68             formats.append({
69                 'url': mp4_url,
70                 'format_id': 'http' + ('-%d' % tbr if tbr else ''),
71                 'tbr': tbr,
72                 'width': int_or_none(v.get('width')),
73                 'height': int_or_none(v.get('height')),
74                 'fps': float_or_none(v.get('fps')),
75             })
76         self._sort_formats(formats)
77
78         return {
79             'id': video_id,
80             'formats': formats,
81             # Thumbnails are conveniently in the correct format already
82             'thumbnails': info.get('thumbnailList'),
83             'creator': info.get('author'),
84             'uploader_id': info.get('editor'),
85             'duration': int_or_none(info.get('duration')),
86             'upload_date': unified_strdate(info.get(
87                 'formattedCreationDate'), day_first=False),
88             'title': title,
89             'categories': info.get('keywords'),
90         }
91
92
93 class WSJArticleIE(InfoExtractor):
94     _VALID_URL = r'(?i)https?://(?:www\.)?wsj\.com/articles/(?P<id>\w[^/]+)'
95     _TESTS = [{
96         'url': 'https://www.wsj.com/articles/dont-like-china-no-pandas-for-you-1490366939?',
97         'info_dict': {
98             'id': '4B13FA62-1D8C-45DB-8EA1-4105CB20B362',
99             'ext': 'mp4',
100             'upload_date': '20170221',
101             'uploader_id': 'ralcaraz',
102             'title': 'Bao Bao the Panda Leaves for China',
103         }
104     }]
105
106     def _real_extract(self, url):
107         article_id = self._match_id(url)
108         webpage = self._download_webpage(url, article_id)
109         video_id = self._search_regex(r'data-src=["\']([A-Z0-9\-]+)',
110                                       webpage, 'video id')
111         return self.url_result('wsj:%s' % video_id, WSJIE.ie_key(), video_id)