Merge branch '_stream' of https://github.com/pulpe/youtube-dl into pulpe-_stream
[youtube-dl] / youtube_dl / extractor / googleplus.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import datetime
5 import re
6
7 from .common import InfoExtractor
8 from ..utils import (
9     ExtractorError,
10 )
11
12
13 class GooglePlusIE(InfoExtractor):
14     IE_DESC = 'Google Plus'
15     _VALID_URL = r'https://plus\.google\.com/(?:[^/]+/)*?posts/(?P<id>\w+)'
16     IE_NAME = 'plus.google'
17     _TEST = {
18         'url': 'https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH',
19         'info_dict': {
20             'id': 'ZButuJc6CtH',
21             'ext': 'flv',
22             'upload_date': '20120613',
23             'uploader': '井上ヨシマサ',
24             'title': '嘆きの天使 降臨',
25         }
26     }
27
28     def _real_extract(self, url):
29         # Extract id from URL
30         mobj = re.match(self._VALID_URL, url)
31
32         video_id = mobj.group('id')
33
34         # Step 1, Retrieve post webpage to extract further information
35         webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
36
37         self.report_extraction(video_id)
38
39         # Extract update date
40         upload_date = self._html_search_regex(
41             r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
42                     ([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
43             webpage, 'upload date', fatal=False, flags=re.VERBOSE)
44         if upload_date:
45             # Convert timestring to a format suitable for filename
46             upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
47             upload_date = upload_date.strftime('%Y%m%d')
48
49         # Extract uploader
50         uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
51             webpage, 'uploader', fatal=False)
52
53         # Extract title
54         # Get the first line for title
55         video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
56             webpage, 'title', default='NA')
57
58         # Step 2, Simulate clicking the image box to launch video
59         DOMAIN = 'https://plus.google.com/'
60         video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
61             webpage, 'video page URL')
62         if not video_page.startswith(DOMAIN):
63             video_page = DOMAIN + video_page
64
65         webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
66
67         # Extract video links all sizes
68         pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
69         mobj = re.findall(pattern, webpage)
70         if len(mobj) == 0:
71             raise ExtractorError('Unable to extract video links')
72
73         # Sort in resolution
74         links = sorted(mobj)
75
76         # Choose the lowest of the sort, i.e. highest resolution
77         video_url = links[-1]
78         # Only get the url. The resolution part in the tuple has no use anymore
79         video_url = video_url[-1]
80         # Treat escaped \u0026 style hex
81         try:
82             video_url = video_url.decode("unicode_escape")
83         except AttributeError: # Python 3
84             video_url = bytes(video_url, 'ascii').decode('unicode-escape')
85
86         return {
87             'id': video_id,
88             'url': video_url,
89             'uploader': uploader,
90             'upload_date': upload_date,
91             'title': video_title,
92             'ext': 'flv',
93         }