]> git.bitcoin.ninja Git - youtube-dl/commitdiff
Merge branch 'master' into subtitles_rework
authorIsmael Mejia <iemejia@gmail.com>
Thu, 22 Aug 2013 21:29:36 +0000 (23:29 +0200)
committerIsmael Mejia <iemejia@gmail.com>
Thu, 22 Aug 2013 21:29:36 +0000 (23:29 +0200)
1  2 
.gitignore
youtube_dl/YoutubeDL.py
youtube_dl/__init__.py
youtube_dl/extractor/common.py
youtube_dl/extractor/dailymotion.py
youtube_dl/extractor/youtube.py

diff --cc .gitignore
Simple merge
Simple merge
Simple merge
Simple merge
index 8fab16005781c54012d0c048979193bd4a5bcb10,fa8c630d053168bf30d835952debd67536555c0c..f54ecc569cbe02714df09cdaf55c72ffd7129895
@@@ -82,12 -60,12 +82,12 @@@ class DailymotionIE(DailyMotionSubtitle
  
          # TODO: support choosing qualities
  
-         for key in ['stream_h264_hd1080_url', 'stream_h264_hd_url',
-                     'stream_h264_hq_url', 'stream_h264_url',
+         for key in ['stream_h264_hd1080_url','stream_h264_hd_url',
+                     'stream_h264_hq_url','stream_h264_url',
                      'stream_h264_ld_url']:
 -            if info.get(key):#key in info and info[key]:
 +            if info.get(key):  # key in info and info[key]:
                  max_quality = key
 -                self.to_screen(u'Using %s' % key)
 +                self.to_screen(u'%s: Using %s' % (video_id, key))
                  break
          else:
              raise ExtractorError(u'Unable to extract video URL')
              'upload_date':  video_upload_date,
              'title':    self._og_search_title(webpage),
              'ext':      video_extension,
 +            'subtitles':    video_subtitles,
              'thumbnail': info['thumbnail_url']
          }]
+ class DailymotionPlaylistIE(InfoExtractor):
+     _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
+     _MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/playlist/.+?".*?>.*?</a>.*?</div>'
+     def _real_extract(self, url):
+         mobj = re.match(self._VALID_URL, url)
+         playlist_id =  mobj.group('id')
+         video_ids = []
+         for pagenum in itertools.count(1):
+             webpage = self._download_webpage('https://www.dailymotion.com/playlist/%s/%s' % (playlist_id, pagenum),
+                                              playlist_id, u'Downloading page %s' % pagenum)
+             playlist_el = get_element_by_attribute(u'class', u'video_list', webpage)
+             video_ids.extend(re.findall(r'data-id="(.+?)" data-ext-id', playlist_el))
+             if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
+                 break
+         entries = [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
+                    for video_id in video_ids]
+         return {'_type': 'playlist',
+                 'id': playlist_id,
+                 'title': get_element_by_id(u'playlist_name', webpage),
+                 'entries': entries,
+                 }
Simple merge