Removed conversion from youtube closed caption format to srt since youtube api suppor...
[youtube-dl] / youtube_dl / InfoExtractors.py
index 627329ecd3848e8c15e1d9505c66880b2aff7e0c..e3998fbe88173ae1cb949d2f6256fa0fc06fb530 100755 (executable)
@@ -228,23 +228,6 @@ class YoutubeIE(InfoExtractor):
         """Indicate the download will use the RTMP protocol."""
         self._downloader.to_screen(u'[youtube] RTMP download detected')
 
-    def _closed_captions_xml_to_srt(self, xml_string):
-        srt = ''
-        texts = re.findall(r'<text start="([\d\.]+)"( dur="([\d\.]+)")?>([^<]+)</text>', xml_string, re.MULTILINE)
-        # TODO parse xml instead of regex
-        for n, (start, dur_tag, dur, caption) in enumerate(texts):
-            if not dur: dur = '4'
-            start = float(start)
-            end = start + float(dur)
-            start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000)
-            end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000)
-            caption = unescapeHTML(caption)
-            caption = unescapeHTML(caption) # double cycle, intentional
-            srt += str(n+1) + '\n'
-            srt += start + ' --> ' + end + '\n'
-            srt += caption + '\n\n'
-        return srt
-
     def _extract_subtitles(self, video_id):
         self.report_video_subtitles_download(video_id)
         request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
@@ -268,15 +251,16 @@ class YoutubeIE(InfoExtractor):
             'lang': srt_lang,
             'name': srt_lang_list[srt_lang].encode('utf-8'),
             'v': video_id,
+            'fmt': 'srt',
         })
         url = 'http://www.youtube.com/api/timedtext?' + params
         try:
-            srt_xml = compat_urllib_request.urlopen(url).read().decode('utf-8')
+            srt = compat_urllib_request.urlopen(url).read().decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
             return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
-        if not srt_xml:
+        if not srt:
             return (u'WARNING: Did not fetch video subtitles', None)
-        return (None, self._closed_captions_xml_to_srt(srt_xml))
+        return (None, srt)
 
     def _print_formats(self, formats):
         print('Available formats:')
@@ -1330,7 +1314,7 @@ class GenericIE(InfoExtractor):
         opener = compat_urllib_request.OpenerDirector()
         for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
                         HTTPMethodFallback, HEADRedirectHandler,
-                        compat_urllib_error.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
+                        compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
             opener.add_handler(handler())
 
         response = opener.open(HeadRequest(url))
@@ -1366,6 +1350,9 @@ class GenericIE(InfoExtractor):
         if mobj is None:
             # Broaden the search a little bit
             mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
+        if mobj is None:
+            # Broaden the search a little bit: JWPlayer JS loader
+            mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
         if mobj is None:
             self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
             return
@@ -2098,6 +2085,10 @@ class FacebookIE(InfoExtractor):
         params_raw = compat_urllib_parse.unquote(data['params'])
         params = json.loads(params_raw)
         video_url = params['hd_src']
+        if not video_url:
+            video_url = params['sd_src']
+        if not video_url:
+            raise ExtractorError(u'Cannot find video URL')
         video_duration = int(params['video_duration'])
 
         m = re.search('<h2 class="uiHeaderTitle">([^<]+)</h2>', webpage)
@@ -2233,7 +2224,7 @@ class MyVideoIE(InfoExtractor):
         webpage = self._download_webpage(webpage_url, video_id)
 
         self.report_extraction(video_id)
-        mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />',
+        mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\' />',
                  webpage)
         if mobj is None:
             self._downloader.trouble(u'ERROR: unable to extract media URL')
@@ -3725,13 +3716,13 @@ class YouPornIE(InfoExtractor):
         webpage = self._download_webpage(req, video_id)
 
         # Get the video title
-        result = re.search(r'videoTitleArea">(?P<title>.*)</h1>', webpage)
+        result = re.search(r'<h1.*?>(?P<title>.*)</h1>', webpage)
         if result is None:
-            raise ExtractorError(u'ERROR: unable to extract video title')
+            raise ExtractorError(u'Unable to extract video title')
         video_title = result.group('title').strip()
 
         # Get the video date
-        result = re.search(r'Date:</b>(?P<date>.*)</li>', webpage)
+        result = re.search(r'Date:</label>(?P<date>.*) </li>', webpage)
         if result is None:
             self._downloader.to_stderr(u'WARNING: unable to extract video date')
             upload_date = None
@@ -3739,9 +3730,9 @@ class YouPornIE(InfoExtractor):
             upload_date = result.group('date').strip()
 
         # Get the video uploader
-        result = re.search(r'Submitted:</b>(?P<uploader>.*)</li>', webpage)
+        result = re.search(r'Submitted:</label>(?P<uploader>.*)</li>', webpage)
         if result is None:
-            self._downloader.to_stderr(u'ERROR: unable to extract uploader')
+            self._downloader.to_stderr(u'WARNING: unable to extract uploader')
             video_uploader = None
         else:
             video_uploader = result.group('uploader').strip()
@@ -3970,12 +3961,60 @@ class KeekIE(InfoExtractor):
         return [info]
 
 class TEDIE(InfoExtractor):
-    _VALID_URL=r'http://www.ted.com/talks/(?P<videoName>\w+)'
+    _VALID_URL=r'''http://www.ted.com/
+                   (
+                        ((?P<type_playlist>playlists)/(?P<playlist_id>\d+)) # We have a playlist
+                        |
+                        ((?P<type_talk>talks)) # We have a simple talk
+                   )
+                   /(?P<name>\w+) # Here goes the name and then ".html"
+                   '''
+
+    def suitable(self, url):
+        """Receives a URL and returns True if suitable for this IE."""
+        return re.match(self._VALID_URL, url, re.VERBOSE) is not None
+
     def _real_extract(self, url):
-        m=re.match(self._VALID_URL, url)
-        videoName=m.group('videoName')
-        webpage=self._download_webpage(url, 0, 'Downloading \"%s\" page' % videoName)
-        #If the url includes the language we get the title translated
+        m=re.match(self._VALID_URL, url, re.VERBOSE)
+        if m.group('type_talk'):
+            return [self._talk_info(url)]
+        else :
+            playlist_id=m.group('playlist_id')
+            name=m.group('name')
+            self._downloader.to_screen(u'[%s] Getting info of playlist %s: "%s"' % (self.IE_NAME,playlist_id,name))
+            return self._playlist_videos_info(url,name,playlist_id)
+
+    def _talk_video_link(self,mediaSlug):
+        '''Returns the video link for that mediaSlug'''
+        return 'http://download.ted.com/talks/%s.mp4' % mediaSlug
+
+    def _playlist_videos_info(self,url,name,playlist_id=0):
+        '''Returns the videos of the playlist'''
+        video_RE=r'''
+                     <li\ id="talk_(\d+)"([.\s]*?)data-id="(?P<video_id>\d+)"
+                     ([.\s]*?)data-playlist_item_id="(\d+)"
+                     ([.\s]*?)data-mediaslug="(?P<mediaSlug>.+?)"
+                     '''
+        video_name_RE=r'<p\ class="talk-title"><a href="/talks/(.+).html">(?P<fullname>.+?)</a></p>'
+        webpage=self._download_webpage(url, playlist_id, 'Downloading playlist webpage')
+        m_videos=re.finditer(video_RE,webpage,re.VERBOSE)
+        m_names=re.finditer(video_name_RE,webpage)
+        info=[]
+        for m_video, m_name in zip(m_videos,m_names):
+            video_dic={
+                       'id': m_video.group('video_id'),
+                       'url': self._talk_video_link(m_video.group('mediaSlug')),
+                       'ext': 'mp4',
+                       'title': m_name.group('fullname')
+                       }
+            info.append(video_dic)
+        return info
+    def _talk_info(self, url, video_id=0):
+        """Return the video for the talk in the url"""
+        m=re.match(self._VALID_URL, url,re.VERBOSE)
+        videoName=m.group('name')
+        webpage=self._download_webpage(url, video_id, 'Downloading \"%s\" page' % videoName)
+        # If the url includes the language we get the title translated
         title_RE=r'<h1><span id="altHeadline" >(?P<title>[\s\w:/\.\?=\+-\\\']*)</span></h1>'
         title=re.search(title_RE, webpage).group('title')
         info_RE=r'''<script\ type="text/javascript">var\ talkDetails\ =(.*?)
@@ -3984,14 +4023,14 @@ class TEDIE(InfoExtractor):
         info_match=re.search(info_RE,webpage,re.VERBOSE)
         video_id=info_match.group('videoID')
         mediaSlug=info_match.group('mediaSlug')
-        video_url='http://download.ted.com/talks/%s.mp4' % mediaSlug
+        video_url=self._talk_video_link(mediaSlug)
         info = {
-                'id':video_id,
-                'url':video_url,
+                'id': video_id,
+                'url': video_url,
                 'ext': 'mp4',
                 'title': title
-        }
-        return [info]
+                }
+        return info
 
 class MySpassIE(InfoExtractor):
     _VALID_URL = r'http://www.myspass.de/.*'