[douyutv] Add new extractor
[youtube-dl] / youtube_dl / extractor / aftenposten.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..utils import (
8     int_or_none,
9     parse_iso8601,
10     xpath_with_ns,
11     xpath_text,
12     find_xpath_attr,
13 )
14
15
16 class AftenpostenIE(InfoExtractor):
17     _VALID_URL = r'https?://(?:www\.)?aftenposten\.no/webtv/([^/]+/)*(?P<id>[^/]+)-\d+\.html'
18
19     _TEST = {
20         'url': 'http://www.aftenposten.no/webtv/serier-og-programmer/sweatshopenglish/TRAILER-SWEATSHOP---I-cant-take-any-more-7800835.html?paging=&section=webtv_serierogprogrammer_sweatshop_sweatshopenglish',
21         'md5': 'fd828cd29774a729bf4d4425fe192972',
22         'info_dict': {
23             'id': '21039',
24             'ext': 'mov',
25             'title': 'TRAILER: "Sweatshop" - I canĀ“t take any more',
26             'description': 'md5:21891f2b0dd7ec2f78d84a50e54f8238',
27             'timestamp': 1416927969,
28             'upload_date': '20141125',
29         }
30     }
31
32     def _real_extract(self, url):
33         display_id = self._match_id(url)
34
35         webpage = self._download_webpage(url, display_id)
36
37         video_id = self._html_search_regex(
38             r'data-xs-id="(\d+)"', webpage, 'video id')
39
40         data = self._download_xml(
41             'http://frontend.xstream.dk/ap/feed/video/?platform=web&id=%s' % video_id, video_id)
42
43         NS_MAP = {
44             'atom': 'http://www.w3.org/2005/Atom',
45             'xt': 'http://xstream.dk/',
46             'media': 'http://search.yahoo.com/mrss/',
47         }
48
49         entry = data.find(xpath_with_ns('./atom:entry', NS_MAP))
50
51         title = xpath_text(
52             entry, xpath_with_ns('./atom:title', NS_MAP), 'title')
53         description = xpath_text(
54             entry, xpath_with_ns('./atom:summary', NS_MAP), 'description')
55         timestamp = parse_iso8601(xpath_text(
56             entry, xpath_with_ns('./atom:published', NS_MAP), 'upload date'))
57
58         formats = []
59         media_group = entry.find(xpath_with_ns('./media:group', NS_MAP))
60         for media_content in media_group.findall(xpath_with_ns('./media:content', NS_MAP)):
61             media_url = media_content.get('url')
62             if not media_url:
63                 continue
64             tbr = int_or_none(media_content.get('bitrate'))
65             mobj = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', media_url)
66             if mobj:
67                 formats.append({
68                     'url': mobj.group('url'),
69                     'play_path': 'mp4:%s' % mobj.group('playpath'),
70                     'app': mobj.group('app'),
71                     'ext': 'flv',
72                     'tbr': tbr,
73                     'format_id': 'rtmp-%d' % tbr,
74                 })
75             else:
76                 formats.append({
77                     'url': media_url,
78                     'tbr': tbr,
79                 })
80         self._sort_formats(formats)
81
82         link = find_xpath_attr(
83             entry, xpath_with_ns('./atom:link', NS_MAP), 'rel', 'original')
84         if link is not None:
85             formats.append({
86                 'url': link.get('href'),
87                 'format_id': link.get('rel'),
88             })
89
90         thumbnails = [{
91             'url': splash.get('url'),
92             'width': int_or_none(splash.get('width')),
93             'height': int_or_none(splash.get('height')),
94         } for splash in media_group.findall(xpath_with_ns('./xt:splash', NS_MAP))]
95
96         return {
97             'id': video_id,
98             'title': title,
99             'description': description,
100             'timestamp': timestamp,
101             'formats': formats,
102             'thumbnails': thumbnails,
103         }