Merge branch 'master' of github.com:rg3/youtube-dl
[youtube-dl] / youtube_dl / InfoExtractors.py
index 71b6a136eb13e3d2d0d1d462f08b6ff369009998..ae17acd5dcd67e5dc10046174a17611924c1b187 100644 (file)
@@ -1072,8 +1072,8 @@ class VimeoIE(InfoExtractor):
         self.report_extraction(video_id)
 
         # Extract the config JSON
-        config = webpage.split(' = {config:')[1].split(',assets:')[0]
         try:
+            config = webpage.split(' = {config:')[1].split(',assets:')[0]
             config = json.loads(config)
         except:
             self._downloader.trouble(u'ERROR: unable to extract info section')
@@ -1674,7 +1674,7 @@ class YoutubePlaylistIE(InfoExtractor):
     _VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]{10,})(?:/.*?/([0-9A-Za-z_-]+))?.*'
     _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
     _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&([^&"]+&)*list=.*?%s'
-    _MORE_PAGES_INDICATOR = r'yt-uix-pager-next'
+    _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}"
     IE_NAME = u'youtube:playlist'
 
     def __init__(self, downloader=None):
@@ -1713,7 +1713,7 @@ class YoutubePlaylistIE(InfoExtractor):
             url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum)
             request = compat_urllib_request.Request(url)
             try:
-                page = compat_urllib_request.urlopen(request).read()
+                page = compat_urllib_request.urlopen(request).read().decode('utf8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
                 return
@@ -1725,10 +1725,12 @@ class YoutubePlaylistIE(InfoExtractor):
                     ids_in_page.append(mobj.group(1))
             video_ids.extend(ids_in_page)
 
-            if re.search(self._MORE_PAGES_INDICATOR, page) is None:
+            if self._MORE_PAGES_INDICATOR not in page:
                 break
             pagenum = pagenum + 1
 
+        total = len(video_ids)
+
         playliststart = self._downloader.params.get('playliststart', 1) - 1
         playlistend = self._downloader.params.get('playlistend', -1)
         if playlistend == -1:
@@ -1736,6 +1738,11 @@ class YoutubePlaylistIE(InfoExtractor):
         else:
             video_ids = video_ids[playliststart:playlistend]
 
+        if len(video_ids) == total:
+            self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total))
+        else:
+            self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(video_ids)))
+
         for id in video_ids:
             self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
         return
@@ -1746,7 +1753,7 @@ class YoutubeChannelIE(InfoExtractor):
 
     _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)(?:/.*)?$"
     _TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en'
-    _MORE_PAGES_INDICATOR = r'yt-uix-button-content">Next' # TODO
+    _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}"
     IE_NAME = u'youtube:channel'
 
     def report_download_page(self, channel_id, pagenum):
@@ -1770,7 +1777,7 @@ class YoutubeChannelIE(InfoExtractor):
             url = self._TEMPLATE_URL % (channel_id, pagenum)
             request = compat_urllib_request.Request(url)
             try:
-                page = compat_urllib_request.urlopen(request).read()
+                page = compat_urllib_request.urlopen(request).read().decode('utf8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
                 return
@@ -1782,10 +1789,12 @@ class YoutubeChannelIE(InfoExtractor):
                     ids_in_page.append(mobj.group(1))
             video_ids.extend(ids_in_page)
 
-            if re.search(self._MORE_PAGES_INDICATOR, page) is None:
+            if self._MORE_PAGES_INDICATOR not in page:
                 break
             pagenum = pagenum + 1
 
+        self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
+
         for id in video_ids:
             self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
         return
@@ -2262,7 +2271,7 @@ class BlipTVIE(InfoExtractor):
         else:
             cchar = '?'
         json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
-        request = compat_urllib_request.Request(json_url.encode('utf-8'))
+        request = compat_urllib_request.Request(json_url)
         self.report_extraction(mobj.group(1))
         info = None
         try:
@@ -2287,7 +2296,8 @@ class BlipTVIE(InfoExtractor):
             return
         if info is None: # Regular URL
             try:
-                json_code = urlh.read()
+                json_code_bytes = urlh.read()
+                json_code = json_code_bytes.decode('utf-8')
             except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
                 self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err))
                 return
@@ -2799,18 +2809,17 @@ class SoundcloudIE(InfoExtractor):
 
     _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)'
     IE_NAME = u'soundcloud'
-    _WORKING = False
 
     def __init__(self, downloader=None):
         InfoExtractor.__init__(self, downloader)
 
-    def report_webpage(self, video_id):
+    def report_resolve(self, video_id):
         """Report information extraction."""
-        self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
+        self._downloader.to_screen(u'[%s] %s: Resolving id' % (self.IE_NAME, video_id))
 
     def report_extraction(self, video_id):
         """Report information extraction."""
-        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+        self._downloader.to_screen(u'[%s] %s: Retrieving stream' % (self.IE_NAME, video_id))
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -2824,65 +2833,42 @@ class SoundcloudIE(InfoExtractor):
         slug_title =  mobj.group(2)
         simple_title = uploader + u'-' + slug_title
 
-        self.report_webpage('%s/%s' % (uploader, slug_title))
+        self.report_resolve('%s/%s' % (uploader, slug_title))
 
-        url = 'https://soundcloud.com/%s/%s' % (uploader, slug_title)
-        request = compat_urllib_request.Request(url)
+        url = 'http://soundcloud.com/%s/%s' % (uploader, slug_title)
+        resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
+        request = compat_urllib_request.Request(resolv_url)
         try:
-            urlo = compat_urllib_request.urlopen(request).read()
-            webpage = webpage_bytes.decode('utf-8')
+            info_json_bytes = compat_urllib_request.urlopen(request).read()
+            info_json = info_json_bytes.decode('utf-8')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
             self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
             return
 
+        info = json.loads(info_json)
+        video_id = info['id']
         self.report_extraction('%s/%s' % (uploader, slug_title))
 
-        # extract uid and stream token that soundcloud hands out for access
-        mobj = re.search('"uid":"([\w\d]+?)".*?stream_token=([\w\d]+)', webpage)
-        if mobj:
-            video_id = mobj.group(1)
-            stream_token = mobj.group(2)
-        else:
-            self._downloader.trouble(u'ERROR: unable to find video ID in Soundcloud file')
+        streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
+        request = compat_urllib_request.Request(streams_url)
+        try:
+            stream_json_bytes = compat_urllib_request.urlopen(request).read()
+            stream_json = stream_json_bytes.decode('utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
             return
 
-        # extract unsimplified title
-        mobj = re.search('"title":"(.*?)",', webpage)
-        if mobj:
-            title = mobj.group(1)
-        else:
-            title = simple_title
-
-        # construct media url (with uid/token)
-        mediaURL = "http://media.soundcloud.com/stream/%s?stream_token=%s"
-        mediaURL = mediaURL % (video_id, stream_token)
-
-        # description
-        description = u'No description available'
-        mobj = re.search('track-description-value"><p>(.*?)</p>', webpage)
-        if mobj:
-            description = mobj.group(1)
-
-        # upload date
-        upload_date = None
-        mobj = re.search("pretty-date'>on ([\w]+ [\d]+, [\d]+ \d+:\d+)</abbr></h2>", webpage)
-        if mobj:
-            try:
-                upload_date = datetime.datetime.strptime(mobj.group(1), '%B %d, %Y %H:%M').strftime('%Y%m%d')
-            except Exception as err:
-                self._downloader.to_stderr(compat_str(err))
-
-        # for soundcloud, a request to a cross domain is required for cookies
-        request = compat_urllib_request.Request('http://media.soundcloud.com/crossdomain.xml', std_headers)
+        streams = json.loads(stream_json)
+        mediaURL = streams['http_mp3_128_url']
 
         return [{
-            'id':       video_id,
+            'id':       info['id'],
             'url':      mediaURL,
-            'uploader': uploader,
-            'upload_date':  upload_date,
-            'title':    title,
+            'uploader': info['user']['username'],
+            'upload_date':  info['created_at'],
+            'title':    info['title'],
             'ext':      u'mp3',
-            'description': description
+            'description': info['description'],
         }]