simply get the correct webpage, but not parsed to extract information
[youtube-dl] / youtube_dl / extractor / weibo.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 from .common import InfoExtractor
5
6 from urllib.request import Request
7 from urllib.parse import urlencode
8 import json
9 import random as rnd
10
11 class WeiboIE(InfoExtractor):
12     _VALID_URL = r'https?://weibo\.com/[0-9]+/(?P<id>[a-zA-Z0-9]+)'
13     _TEST = {
14             'url': 'https://weibo.com/6275294458/Fp6RGfbff?from=page_1005056275294458_profile&wvr=6&mod=weibotime&type=comment',
15             'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
16             'info_dict': {
17                 'id': '42',
18                 'ext': 'mp4',
19                 'title': 'Video title goes here',
20                 'thumbnail': r're:^https?://.*\.jpg$',
21                 # TODO more properties, either as:
22                 # * A value
23                 # * MD5 checksum; start the string with md5:
24                 # * A regular expression; start the string with re:
25                 # * Any Python type (for example int or float)
26                 }
27             }
28
29     def _real_extract(self, url):
30         video_id = self._match_id(url)
31         headers = {
32             'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
33             'Accept-Encoding': 'gzip, deflate, br',
34             'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8',
35             'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
36             'Upgrade-Insecure-Requests': '1',
37         }
38         # to get Referer url for genvisitor
39         webpage,urlh = self._download_webpage_handle(url, video_id, headers=headers, note="first visit the page")
40
41         visitor_url = urlh.geturl()
42
43         data = urlencode({
44             "cb": "gen_callback",
45             "fp": '{"os":"2","browser":"Gecko57,0,0,0","fonts":"undefined","screenInfo":"1440*900*24","plugins":""}',
46             }).encode()
47         headers = {
48                 'Accept-Encoding': 'gzip, deflate, br',
49                 'Accept': '*/*',
50                 'Referer': visitor_url,
51                 }
52
53         r_genvisitor = Request(
54             'https://passport.weibo.com/visitor/genvisitor',
55             data = data,
56             headers = headers,
57             method = 'POST'
58             )
59         webpage,urlh = self._download_webpage_handle(r_genvisitor, video_id, note="gen visitor")
60         print("webpage", webpage)
61
62         p = webpage.split("&&")[1] # split "gen_callback && gen_callback(...)"
63         i1 = p.find('{')
64         i2 = p.rfind('}')
65         j = p[i1:i2+1] # get JSON object
66         d = json.loads(j)
67         tid = d["data"]["tid"]
68         cnfd = "%03d" % d["data"]["confidence"]
69
70         param = urlencode({
71             'a': 'incarnate',
72             't': tid,
73             'w': 2,
74             'c': cnfd,
75             'cb': 'cross_domain',
76             'from': 'weibo',
77             '_rand': rnd.random()
78             })
79         gencallback_url = "https://passport.weibo.com/visitor/visitor?" + param
80         webpage,urlh = self._download_webpage_handle(gencallback_url, video_id, note="gen callback")
81         print("webpage", webpage)
82
83         webpage,urlh = self._download_webpage_handle(url, video_id, headers=headers, note="retry to visit the page")
84         print("webpage", webpage)
85
86         # TODO more code goes here, for example ...
87         title = self._html_search_regex(r'<title>(.+?)</title>', webpage, 'title')
88
89         video_sources = self._search_regex(r'video-sources=(.+?)', webpage, 'video_sources')
90         print("video_sources:", video_sources)
91         return {
92                 'id': video_id,
93                 'title': title,
94                 'description': self._og_search_description(webpage),
95                 'uploader': self._search_regex(r'<div[^>]+id="uploader"[^>]*>([^<]+)<', webpage, 'uploader', fatal=False),
96                 # TODO more properties (see youtube_dl/extractor/common.py)
97                 }