Merge remote-tracking branch 'gcmalloc/master' into fork_master
authorFilippo Valsorda <filippo.valsorda@gmail.com>
Wed, 12 Dec 2012 13:11:40 +0000 (14:11 +0100)
committerFilippo Valsorda <filippo.valsorda@gmail.com>
Wed, 12 Dec 2012 13:11:40 +0000 (14:11 +0100)
.gitignore
.travis.yml
Makefile
test/test_youtube_lists.py [new file with mode: 0644]
test/test_youtube_playlist_ids.py
youtube_dl/InfoExtractors.py
youtube_dl/version.py

index 996489d8ec6becd3a441cdbecd1f83c31eea944d..a4204620b8d3cd62fb067aa5983cbea55d08e7c2 100644 (file)
@@ -13,3 +13,5 @@ youtube-dl.bash-completion
 youtube-dl
 youtube-dl.exe
 youtube-dl.tar.gz
+.coverage
+cover/
index 0aabce6a73cf96dc70bff38901a6e4f98521bf17..1e9b43e16433c89d814b5849623a7a2546dc1e82 100644 (file)
@@ -7,6 +7,7 @@ script: nosetests test --verbose
 notifications:
   email:
     - filippo.valsorda@gmail.com
+    - phihag@phihag.de
   irc:
     channels:
       - "irc.freenode.org#youtube-dl"
index 5073a211ba818971e194dd418c9034a6def3445f..818d93bdbee417c853fd3e0670d12e942a1866f0 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -17,7 +17,8 @@ install: youtube-dl youtube-dl.1 youtube-dl.bash-completion
        install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
 
 test:
-       nosetests2 --nocapture test
+       #nosetests --with-coverage --cover-package=youtube_dl --cover-html --verbose --processes 4 test
+       nosetests --verbose test
 
 .PHONY: all clean install test
 
diff --git a/test/test_youtube_lists.py b/test/test_youtube_lists.py
new file mode 100644 (file)
index 0000000..8939a52
--- /dev/null
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+
+import sys
+import unittest
+import socket
+
+# Allow direct execution
+import os
+sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from youtube_dl.InfoExtractors import YoutubeUserIE,YoutubePlaylistIE
+from youtube_dl.utils import *
+
+# General configuration (from __init__, not very elegant...)
+jar = compat_cookiejar.CookieJar()
+cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
+proxy_handler = compat_urllib_request.ProxyHandler()
+opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
+compat_urllib_request.install_opener(opener)
+socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
+
+class FakeDownloader(object):
+    def __init__(self):
+        self.result = []
+        self.params = {}
+    def to_screen(self, s):
+        print(s)
+    def trouble(self, s):
+        raise Exception(s)
+    def download(self, x):
+        self.result.append(x)
+
+class TestYoutubeLists(unittest.TestCase):
+    def test_youtube_playlist(self):
+        DL = FakeDownloader()
+        IE = YoutubePlaylistIE(DL)
+        IE.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
+        self.assertEqual(DL.result, [
+            ['http://www.youtube.com/watch?v=bV9L5Ht9LgY'],
+            ['http://www.youtube.com/watch?v=FXxLjLQi3Fg'],
+            ['http://www.youtube.com/watch?v=tU3Bgo5qJZE']
+        ])
+
+    def test_youtube_playlist_long(self):
+        DL = FakeDownloader()
+        IE = YoutubePlaylistIE(DL)
+        IE.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
+        self.assertTrue(len(DL.result) >= 799)
+
+    def test_youtube_course(self):
+        DL = FakeDownloader()
+        IE = YoutubePlaylistIE(DL)
+        # TODO find a > 100 (paginating?) videos course
+        IE.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
+        self.assertEqual(DL.result[0], ['http://www.youtube.com/watch?v=j9WZyLZCBzs'])
+        self.assertEqual(len(DL.result), 25)
+        self.assertEqual(DL.result[-1], ['http://www.youtube.com/watch?v=rYefUsYuEp0'])
+
+    def test_youtube_channel(self):
+        """I give up, please find a channel that does paginate and test this like test_youtube_playlist_long"""
+        pass # TODO
+
+    def test_youtube_user(self):
+        DL = FakeDownloader()
+        IE = YoutubeUserIE(DL)
+        IE.extract('https://www.youtube.com/user/TheLinuxFoundation')
+        self.assertTrue(len(DL.result) >= 320)
+
+if __name__ == '__main__':
+    unittest.main()
index b4dcedb4541f66c5e7c6a9a04c49f8f7c6082c1f..2eeb3216c70ee0078c2b1f956e4676e21f027f9a 100644 (file)
@@ -11,12 +11,12 @@ from youtube_dl.InfoExtractors import YoutubeIE, YoutubePlaylistIE
 
 class TestYoutubePlaylistMatching(unittest.TestCase):
     def test_playlist_matching(self):
-        assert YoutubePlaylistIE().suitable(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
-        assert YoutubePlaylistIE().suitable(u'PL63F0C78739B09958')
-        assert not YoutubePlaylistIE().suitable(u'PLtS2H6bU1M')
+        self.assertTrue(YoutubePlaylistIE().suitable(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8'))
+        self.assertTrue(YoutubePlaylistIE().suitable(u'PL63F0C78739B09958'))
+        self.assertFalse(YoutubePlaylistIE().suitable(u'PLtS2H6bU1M'))
 
     def test_youtube_matching(self):
-        assert YoutubeIE().suitable(u'PLtS2H6bU1M')
+        self.assertTrue(YoutubeIE().suitable(u'PLtS2H6bU1M'))
 
 if __name__ == '__main__':
     unittest.main()
index 9cfff153b12fce1c4db4c1420b6f0caab323a8c2..1b37eb648bac41ef29f6fb4b638fc669355affa4 100644 (file)
@@ -1674,7 +1674,7 @@ class YoutubePlaylistIE(InfoExtractor):
     _VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]{10,})(?:/.*?/([0-9A-Za-z_-]+))?.*'
     _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
     _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&amp;([^&"]+&amp;)*list=.*?%s'
-    _MORE_PAGES_INDICATOR = r'yt-uix-pager-next'
+    _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}"
     IE_NAME = u'youtube:playlist'
 
     def __init__(self, downloader=None):
@@ -1713,7 +1713,7 @@ class YoutubePlaylistIE(InfoExtractor):
             url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum)
             request = compat_urllib_request.Request(url)
             try:
-                page = compat_urllib_request.urlopen(request).read()
+                page = compat_urllib_request.urlopen(request).read().decode('utf8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
                 return
@@ -1725,10 +1725,12 @@ class YoutubePlaylistIE(InfoExtractor):
                     ids_in_page.append(mobj.group(1))
             video_ids.extend(ids_in_page)
 
-            if re.search(self._MORE_PAGES_INDICATOR, page) is None:
+            if self._MORE_PAGES_INDICATOR not in page:
                 break
             pagenum = pagenum + 1
 
+        total = len(video_ids)
+
         playliststart = self._downloader.params.get('playliststart', 1) - 1
         playlistend = self._downloader.params.get('playlistend', -1)
         if playlistend == -1:
@@ -1736,6 +1738,11 @@ class YoutubePlaylistIE(InfoExtractor):
         else:
             video_ids = video_ids[playliststart:playlistend]
 
+        if len(video_ids) == total:
+            self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total))
+        else:
+            self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(video_ids)))
+
         for id in video_ids:
             self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
         return
@@ -1746,7 +1753,7 @@ class YoutubeChannelIE(InfoExtractor):
 
     _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)(?:/.*)?$"
     _TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en'
-    _MORE_PAGES_INDICATOR = r'yt-uix-button-content">Next' # TODO
+    _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}"
     IE_NAME = u'youtube:channel'
 
     def report_download_page(self, channel_id, pagenum):
@@ -1770,7 +1777,7 @@ class YoutubeChannelIE(InfoExtractor):
             url = self._TEMPLATE_URL % (channel_id, pagenum)
             request = compat_urllib_request.Request(url)
             try:
-                page = compat_urllib_request.urlopen(request).read()
+                page = compat_urllib_request.urlopen(request).read().decode('utf8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
                 return
@@ -1782,10 +1789,12 @@ class YoutubeChannelIE(InfoExtractor):
                     ids_in_page.append(mobj.group(1))
             video_ids.extend(ids_in_page)
 
-            if re.search(self._MORE_PAGES_INDICATOR, page) is None:
+            if self._MORE_PAGES_INDICATOR not in page:
                 break
             pagenum = pagenum + 1
 
+        self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
+
         for id in video_ids:
             self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
         return
@@ -2262,7 +2271,7 @@ class BlipTVIE(InfoExtractor):
         else:
             cchar = '?'
         json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
-        request = compat_urllib_request.Request(json_url.encode('utf-8'))
+        request = compat_urllib_request.Request(json_url)
         self.report_extraction(mobj.group(1))
         info = None
         try:
@@ -2287,7 +2296,8 @@ class BlipTVIE(InfoExtractor):
             return
         if info is None: # Regular URL
             try:
-                json_code = urlh.read()
+                json_code_bytes = urlh.read()
+                json_code = json_code_bytes.decode('utf-8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
                 self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err))
                 return
@@ -2387,7 +2397,19 @@ class MyVideoIE(InfoExtractor):
 class ComedyCentralIE(InfoExtractor):
     """Information extractor for The Daily Show and Colbert Report """
 
-    _VALID_URL = r'^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport))|(https?://)?(www\.)?(?P<showname>thedailyshow|colbertnation)\.com/full-episodes/(?P<episode>.*)$'
+    # urls can be abbreviations like :thedailyshow or :colbert
+    # urls for episodes like: 
+    # or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
+    #                     or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
+    #                     or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524    
+    _VALID_URL = r"""^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
+                      |(https?://)?(www\.)?
+                          (?P<showname>thedailyshow|colbertnation)\.com/
+                         (full-episodes/(?P<episode>.*)|
+                          (?P<clip>
+                              (the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
+                              |(watch/(?P<date>[^/]*)/(?P<tdstitle>.*)))))
+                     $"""                        
     IE_NAME = u'comedycentral'
 
     _available_formats = ['3500', '2200', '1700', '1200', '750', '400']
@@ -2409,6 +2431,10 @@ class ComedyCentralIE(InfoExtractor):
         '400': '384x216',
     }
 
+    def suitable(self, url):
+        """Receives a URL and returns True if suitable for this IE."""
+        return re.match(self._VALID_URL, url, re.VERBOSE) is not None
+
     def report_extraction(self, episode_id):
         self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
 
@@ -2429,7 +2455,7 @@ class ComedyCentralIE(InfoExtractor):
 
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
+        mobj = re.match(self._VALID_URL, url, re.VERBOSE)
         if mobj is None:
             self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
             return
@@ -2439,14 +2465,21 @@ class ComedyCentralIE(InfoExtractor):
                 url = u'http://www.thedailyshow.com/full-episodes/'
             else:
                 url = u'http://www.colbertnation.com/full-episodes/'
-            mobj = re.match(self._VALID_URL, url)
+            mobj = re.match(self._VALID_URL, url, re.VERBOSE)
             assert mobj is not None
 
-        dlNewest = not mobj.group('episode')
-        if dlNewest:
-            epTitle = mobj.group('showname')
+        if mobj.group('clip'):
+            if mobj.group('showname') == 'thedailyshow':
+                epTitle = mobj.group('tdstitle')
+            else:
+                epTitle = mobj.group('cntitle')
+            dlNewest = False
         else:
-            epTitle = mobj.group('episode')
+            dlNewest = not mobj.group('episode')
+            if dlNewest:
+                epTitle = mobj.group('showname')
+            else:
+                epTitle = mobj.group('episode')
 
         req = compat_urllib_request.Request(url)
         self.report_extraction(epTitle)
@@ -2458,7 +2491,7 @@ class ComedyCentralIE(InfoExtractor):
             return
         if dlNewest:
             url = htmlHandle.geturl()
-            mobj = re.match(self._VALID_URL, url)
+            mobj = re.match(self._VALID_URL, url, re.VERBOSE)
             if mobj is None:
                 self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url)
                 return
@@ -2467,14 +2500,14 @@ class ComedyCentralIE(InfoExtractor):
                 return
             epTitle = mobj.group('episode')
 
-        mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*episode.*?:.*?))"', html)
+        mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', html)
 
         if len(mMovieParams) == 0:
             # The Colbert Report embeds the information in a without
             # a URL prefix; so extract the alternate reference
             # and then add the URL prefix manually.
 
-            altMovieParams = re.findall('data-mgid="([^"]*episode.*?:.*?)"', html)
+            altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', html)
             if len(altMovieParams) == 0:
                 self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url)
                 return
index f30283a62c7f213ddaf167b545470db417f882c3..d5986f5e64885aed3c8305c8cadac433df85701f 100644 (file)
@@ -1,2 +1,2 @@
 
-__version__ = '2012.11.29'
+__version__ = '2012.12.11'