Fix typo
[youtube-dl] / youtube_dl / InfoExtractors.py
index fe9bd97d0070dfbbee5b0b6904d048ab1935a5b2..d661d517dea96e09819ec52f58cf0205f40b0d72 100755 (executable)
@@ -718,6 +718,7 @@ class DailymotionIE(InfoExtractor):
 
     _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
     IE_NAME = u'dailymotion'
+    _WORKING = False
 
     def __init__(self, downloader=None):
         InfoExtractor.__init__(self, downloader)
@@ -1329,7 +1330,7 @@ class GenericIE(InfoExtractor):
         opener = compat_urllib_request.OpenerDirector()
         for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
                         HTTPMethodFallback, HEADRedirectHandler,
-                        compat_urllib_error.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
+                        compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
             opener.add_handler(handler())
 
         response = opener.open(HeadRequest(url))
@@ -1365,6 +1366,9 @@ class GenericIE(InfoExtractor):
         if mobj is None:
             # Broaden the search a little bit
             mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
+        if mobj is None:
+            # Broaden the search a little bit: JWPlayer JS loader
+            mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
         if mobj is None:
             self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
             return
@@ -1468,7 +1472,7 @@ class YoutubeSearchIE(InfoExtractor):
             result_url = self._API_URL % (compat_urllib_parse.quote_plus(query), (50*pagenum)+1)
             request = compat_urllib_request.Request(result_url)
             try:
-                data = compat_urllib_request.urlopen(request).read()
+                data = compat_urllib_request.urlopen(request).read().decode('utf-8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
                 self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err))
                 return
@@ -1917,9 +1921,8 @@ class BlipTVUserIE(InfoExtractor):
 
         while True:
             self.report_download_page(username, pagenum)
-
-            request = compat_urllib_request.Request( page_base + "&page=" + str(pagenum) )
-
+            url = page_base + "&page=" + str(pagenum)
+            request = compat_urllib_request.Request( url )
             try:
                 page = compat_urllib_request.urlopen(request).read().decode('utf-8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
@@ -2097,6 +2100,10 @@ class FacebookIE(InfoExtractor):
         params_raw = compat_urllib_parse.unquote(data['params'])
         params = json.loads(params_raw)
         video_url = params['hd_src']
+        if not video_url:
+            video_url = params['sd_src']
+        if not video_url:
+            raise ExtractorError(u'Cannot find video URL')
         video_duration = int(params['video_duration'])
 
         m = re.search('<h2 class="uiHeaderTitle">([^<]+)</h2>', webpage)
@@ -2232,7 +2239,7 @@ class MyVideoIE(InfoExtractor):
         webpage = self._download_webpage(webpage_url, video_id)
 
         self.report_extraction(video_id)
-        mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />',
+        mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\' />',
                  webpage)
         if mobj is None:
             self._downloader.trouble(u'ERROR: unable to extract media URL')
@@ -3619,18 +3626,22 @@ class SteamIE(InfoExtractor):
         mweb = re.finditer(urlRE, webpage)
         namesRE = r'<span class="title">(?P<videoName>.+?)</span>'
         titles = re.finditer(namesRE, webpage)
+        thumbsRE = r'<img class="movie_thumb" src="(?P<thumbnail>.+?)">'
+        thumbs = re.finditer(thumbsRE, webpage)
         videos = []
-        for vid,vtitle in zip(mweb,titles):
+        for vid,vtitle,thumb in zip(mweb,titles,thumbs):
             video_id = vid.group('videoID')
             title = vtitle.group('videoName')
             video_url = vid.group('videoURL')
+            video_thumb = thumb.group('thumbnail')
             if not video_url:
                 self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id)
             info = {
                 'id':video_id,
                 'url':video_url,
                 'ext': 'flv',
-                'title': unescapeHTML(title)
+                'title': unescapeHTML(title),
+                'thumbnail': video_thumb
                   }
             videos.append(info)
         return videos
@@ -3724,13 +3735,13 @@ class YouPornIE(InfoExtractor):
         webpage = self._download_webpage(req, video_id)
 
         # Get the video title
-        result = re.search(r'videoTitleArea">(?P<title>.*)</h1>', webpage)
+        result = re.search(r'<h1.*?>(?P<title>.*)</h1>', webpage)
         if result is None:
-            raise ExtractorError(u'ERROR: unable to extract video title')
+            raise ExtractorError(u'Unable to extract video title')
         video_title = result.group('title').strip()
 
         # Get the video date
-        result = re.search(r'Date:</b>(?P<date>.*)</li>', webpage)
+        result = re.search(r'Date:</label>(?P<date>.*) </li>', webpage)
         if result is None:
             self._downloader.to_stderr(u'WARNING: unable to extract video date')
             upload_date = None
@@ -3738,9 +3749,9 @@ class YouPornIE(InfoExtractor):
             upload_date = result.group('date').strip()
 
         # Get the video uploader
-        result = re.search(r'Submitted:</b>(?P<uploader>.*)</li>', webpage)
+        result = re.search(r'Submitted:</label>(?P<uploader>.*)</li>', webpage)
         if result is None:
-            self._downloader.to_stderr(u'ERROR: unable to extract uploader')
+            self._downloader.to_stderr(u'WARNING: unable to extract uploader')
             video_uploader = None
         else:
             video_uploader = result.group('uploader').strip()
@@ -3912,7 +3923,7 @@ class EightTracksIE(InfoExtractor):
 
         webpage = self._download_webpage(url, playlist_id)
 
-        m = re.search(r"new TRAX.Mix\((.*?)\);\n*\s*TRAX.initSearchAutocomplete\('#search'\);", webpage, flags=re.DOTALL)
+        m = re.search(r"PAGE.mix = (.*?);\n", webpage, flags=re.DOTALL)
         if not m:
             raise ExtractorError(u'Cannot find trax information')
         json_like = m.group(1)
@@ -3969,28 +3980,76 @@ class KeekIE(InfoExtractor):
         return [info]
 
 class TEDIE(InfoExtractor):
-    _VALID_URL=r'http://www.ted.com/talks/(?P<videoName>\w+)'
+    _VALID_URL=r'''http://www.ted.com/
+                   (
+                        ((?P<type_playlist>playlists)/(?P<playlist_id>\d+)) # We have a playlist
+                        |
+                        ((?P<type_talk>talks)) # We have a simple talk
+                   )
+                   /(?P<name>\w+) # Here goes the name and then ".html"
+                   '''
+
+    def suitable(self, url):
+        """Receives a URL and returns True if suitable for this IE."""
+        return re.match(self._VALID_URL, url, re.VERBOSE) is not None
+
     def _real_extract(self, url):
-        m=re.match(self._VALID_URL, url)
-        videoName=m.group('videoName')
-        webpage=self._download_webpage(url, 0, 'Downloading \"%s\" page' % videoName)
-        #If the url includes the language we get the title translated
-        title_RE=r'<h1><span id="altHeadline" >(?P<title>[\s\w:/\.\?=\+-\\\']*)</span></h1>'
+        m=re.match(self._VALID_URL, url, re.VERBOSE)
+        if m.group('type_talk'):
+            return [self._talk_info(url)]
+        else :
+            playlist_id=m.group('playlist_id')
+            name=m.group('name')
+            self._downloader.to_screen(u'[%s] Getting info of playlist %s: "%s"' % (self.IE_NAME,playlist_id,name))
+            return self._playlist_videos_info(url,name,playlist_id)
+
+    def _talk_video_link(self,mediaSlug):
+        '''Returns the video link for that mediaSlug'''
+        return 'http://download.ted.com/talks/%s.mp4' % mediaSlug
+
+    def _playlist_videos_info(self,url,name,playlist_id=0):
+        '''Returns the videos of the playlist'''
+        video_RE=r'''
+                     <li\ id="talk_(\d+)"([.\s]*?)data-id="(?P<video_id>\d+)"
+                     ([.\s]*?)data-playlist_item_id="(\d+)"
+                     ([.\s]*?)data-mediaslug="(?P<mediaSlug>.+?)"
+                     '''
+        video_name_RE=r'<p\ class="talk-title"><a href="(?P<talk_url>/talks/(.+).html)">(?P<fullname>.+?)</a></p>'
+        webpage=self._download_webpage(url, playlist_id, 'Downloading playlist webpage')
+        m_videos=re.finditer(video_RE,webpage,re.VERBOSE)
+        m_names=re.finditer(video_name_RE,webpage)
+        info=[]
+        for m_video, m_name in zip(m_videos,m_names):
+            video_id=m_video.group('video_id')
+            talk_url='http://www.ted.com%s' % m_name.group('talk_url')
+            info.append(self._talk_info(talk_url,video_id))
+        return info
+
+    def _talk_info(self, url, video_id=0):
+        """Return the video for the talk in the url"""
+        m=re.match(self._VALID_URL, url,re.VERBOSE)
+        videoName=m.group('name')
+        webpage=self._download_webpage(url, video_id, 'Downloading \"%s\" page' % videoName)
+        # If the url includes the language we get the title translated
+        title_RE=r'<h1><span id="altHeadline" >(?P<title>.*)</span></h1>'
         title=re.search(title_RE, webpage).group('title')
         info_RE=r'''<script\ type="text/javascript">var\ talkDetails\ =(.*?)
                         "id":(?P<videoID>[\d]+).*?
                         "mediaSlug":"(?P<mediaSlug>[\w\d]+?)"'''
+        thumb_RE=r'</span>[\s.]*</div>[\s.]*<img src="(?P<thumbnail>.*?)"'
+        thumb_match=re.search(thumb_RE,webpage)
         info_match=re.search(info_RE,webpage,re.VERBOSE)
         video_id=info_match.group('videoID')
         mediaSlug=info_match.group('mediaSlug')
-        video_url='http://download.ted.com/talks/%s.mp4' % mediaSlug
+        video_url=self._talk_video_link(mediaSlug)
         info = {
-                'id':video_id,
-                'url':video_url,
+                'id': video_id,
+                'url': video_url,
                 'ext': 'mp4',
-                'title': title
-        }
-        return [info]
+                'title': title,
+                'thumbnail': thumb_match.group('thumbnail')
+                }
+        return info
 
 class MySpassIE(InfoExtractor):
     _VALID_URL = r'http://www.myspass.de/.*'