2 from __future__ import unicode_literals
6 from .common import InfoExtractor
18 class XFileShareIE(InfoExtractor):
20 ('daclips.in', 'DaClips'),
21 ('filehoot.com', 'FileHoot'),
22 ('gorillavid.in', 'GorillaVid'),
23 ('movpod.in', 'MovPod'),
24 ('powerwatch.pw', 'PowerWatch'),
25 ('rapidvideo.ws', 'Rapidvideo.ws'),
26 ('thevideobee.to', 'TheVideoBee'),
27 ('vidto.me', 'Vidto'),
28 ('streamin.to', 'Streamin.To'),
29 ('xvidstage.com', 'XVIDSTAGE'),
30 ('vidabc.com', 'Vid ABC'),
33 IE_DESC = 'XFileShare based sites: %s' % ', '.join(list(zip(*_SITES))[1])
34 _VALID_URL = (r'https?://(?P<host>(?:www\.)?(?:%s))/(?:embed-)?(?P<id>[0-9a-zA-Z]+)'
35 % '|'.join(re.escape(site) for site in list(zip(*_SITES))[0]))
37 _FILE_NOT_FOUND_REGEXES = (
38 r'>(?:404 - )?File Not Found<',
39 r'>The file was removed by administrator<',
43 'url': 'http://gorillavid.in/06y9juieqpmi',
44 'md5': '5ae4a3580620380619678ee4875893ba',
48 'title': 'Rebecca Black My Moment Official Music Video Reaction-6GK87Rc8bzQ',
49 'thumbnail': r're:http://.*\.jpg',
52 'url': 'http://gorillavid.in/embed-z08zf8le23c6-960x480.html',
53 'only_matching': True,
55 'url': 'http://daclips.in/3rso4kdn6f9m',
56 'md5': '1ad8fd39bb976eeb66004d3a4895f106',
60 'title': 'Micro Pig piglets ready on 16th July 2009-bG0PdrCdxUc',
61 'thumbnail': r're:http://.*\.jpg',
64 'url': 'http://movpod.in/0wguyyxi1yca',
65 'only_matching': True,
67 'url': 'http://filehoot.com/3ivfabn7573c.html',
71 'title': 'youtube-dl test video \'äBaW_jenozKc.mp4.mp4',
72 'thumbnail': r're:http://.*\.jpg',
74 'skip': 'Video removed',
76 'url': 'http://vidto.me/ku5glz52nqe1.html',
83 'url': 'http://powerwatch.pw/duecjibvicbu',
87 'title': 'Big Buck Bunny trailer',
90 'url': 'http://xvidstage.com/e0qcnl03co6z',
94 'title': 'Chucky Prank 2015.mp4',
97 # removed by administrator
98 'url': 'http://xvidstage.com/amfy7atlkx25',
99 'only_matching': True,
101 'url': 'http://vidabc.com/i8ybqscrphfv',
103 'id': 'i8ybqscrphfv',
105 'title': 're:Beauty and the Beast 2017',
108 'skip_download': True,
112 def _real_extract(self, url):
113 mobj = re.match(self._VALID_URL, url)
114 video_id = mobj.group('id')
116 url = 'http://%s/%s' % (mobj.group('host'), video_id)
117 webpage = self._download_webpage(url, video_id)
119 if any(re.search(p, webpage) for p in self._FILE_NOT_FOUND_REGEXES):
120 raise ExtractorError('Video %s does not exist' % video_id, expected=True)
122 fields = self._hidden_inputs(webpage)
124 if fields['op'] == 'download1':
125 countdown = int_or_none(self._search_regex(
126 r'<span id="countdown_str">(?:[Ww]ait)?\s*<span id="cxc">(\d+)</span>\s*(?:seconds?)?</span>',
127 webpage, 'countdown', default=None))
129 self._sleep(countdown, video_id)
131 post = urlencode_postdata(fields)
133 req = sanitized_Request(url, post)
134 req.add_header('Content-type', 'application/x-www-form-urlencoded')
136 webpage = self._download_webpage(req, video_id, 'Downloading video page')
138 title = (self._search_regex(
139 (r'style="z-index: [0-9]+;">([^<]+)</span>',
140 r'<td nowrap>([^<]+)</td>',
141 r'h4-fine[^>]*>([^<]+)<',
143 r'<h2 class="video-page-head">([^<]+)</h2>',
144 r'<h2 style="[^"]*color:#403f3d[^"]*"[^>]*>([^<]+)<'), # streamin.to
145 webpage, 'title', default=None) or self._og_search_title(
146 webpage, default=None) or video_id).strip()
148 def extract_formats(default=NO_DEFAULT):
151 r'file\s*:\s*(["\'])(?P<url>http(?:(?!\1).)+\.(?:m3u8|mp4|flv)(?:(?!\1).)*)\1',
152 r'file_link\s*=\s*(["\'])(?P<url>http(?:(?!\1).)+)\1',
153 r'addVariable\((\\?["\'])file\1\s*,\s*(\\?["\'])(?P<url>http(?:(?!\2).)+)\2\)',
154 r'<embed[^>]+src=(["\'])(?P<url>http(?:(?!\1).)+\.(?:m3u8|mp4|flv)(?:(?!\1).)*)\1'):
155 for mobj in re.finditer(regex, webpage):
156 video_url = mobj.group('url')
157 if video_url not in urls:
158 urls.append(video_url)
160 for video_url in urls:
161 if determine_ext(video_url) == 'm3u8':
162 formats.extend(self._extract_m3u8_formats(
163 video_url, video_id, 'mp4',
164 entry_protocol='m3u8_native', m3u8_id='hls',
171 if not formats and default is not NO_DEFAULT:
173 self._sort_formats(formats)
176 formats = extract_formats(default=None)
179 webpage = decode_packed_codes(self._search_regex(
180 r"(}\('(.+)',(\d+),(\d+),'[^']*\b(?:file|embed)\b[^']*'\.split\('\|'\))",
181 webpage, 'packed code'))
182 formats = extract_formats()
184 thumbnail = self._search_regex(
185 r'image\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'thumbnail', default=None)
190 'thumbnail': thumbnail,