8tracks: Better default titles
[youtube-dl] / youtube_dl / InfoExtractors.py
index 092bfef22ba7cdf7e4847ebc562bae0814037141..ff085b0ee2fcf72a3e7b82e289a3378d3d02ac78 100755 (executable)
@@ -5,6 +5,7 @@ from __future__ import absolute_import
 
 import base64
 import datetime
+import itertools
 import netrc
 import os
 import re
@@ -35,15 +36,16 @@ class InfoExtractor(object):
     url:            Final video URL.
     title:          Video title, unescaped.
     ext:            Video filename extension.
-    uploader:       Full name of the video uploader.
-    upload_date:    Video upload date (YYYYMMDD).
 
     The following fields are optional:
 
     format:         The video format, defaults to ext (used for --get-format)
     thumbnail:      Full URL to a video thumbnail image.
     description:    One-line video description.
+    uploader:       Full name of the video uploader.
+    upload_date:    Video upload date (YYYYMMDD).
     uploader_id:    Nickname or id of the video uploader.
+    location:       Physical location of the video.
     player_url:     SWF Player URL (used for rtmpdump).
     subtitles:      The .srt file contents.
     urlhandle:      [internal] The urlHandle to be used to download the file,
@@ -106,19 +108,24 @@ class InfoExtractor(object):
     def IE_NAME(self):
         return type(self).__name__[:-2]
 
-    def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
+    def _request_webpage(self, url_or_request, video_id, note=None, errnote=None):
+        """ Returns the response handle """
         if note is None:
             note = u'Downloading video webpage'
         self._downloader.to_screen(u'[%s] %s: %s' % (self.IE_NAME, video_id, note))
         try:
-            urlh = compat_urllib_request.urlopen(url_or_request)
-            webpage_bytes = urlh.read()
-            return webpage_bytes.decode('utf-8', 'replace')
+            return compat_urllib_request.urlopen(url_or_request)
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
             if errnote is None:
                 errnote = u'Unable to download webpage'
             raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2])
 
+    def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
+        """ Returns the data of the page as a string """
+        urlh = self._request_webpage(url_or_request, video_id, note, errnote)
+        webpage_bytes = urlh.read()
+        return webpage_bytes.decode('utf-8', 'replace')
+
 
 class YoutubeIE(InfoExtractor):
     """Information extractor for youtube.com."""
@@ -1974,62 +1981,14 @@ class DepositFilesIE(InfoExtractor):
 class FacebookIE(InfoExtractor):
     """Information Extractor for Facebook"""
 
-    _WORKING = False
     _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
     _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
     _NETRC_MACHINE = 'facebook'
-    _available_formats = ['video', 'highqual', 'lowqual']
-    _video_extensions = {
-        'video': 'mp4',
-        'highqual': 'mp4',
-        'lowqual': 'mp4',
-    }
     IE_NAME = u'facebook'
 
-    def __init__(self, downloader=None):
-        InfoExtractor.__init__(self, downloader)
-
-    def _reporter(self, message):
-        """Add header and report message."""
-        self._downloader.to_screen(u'[facebook] %s' % message)
-
     def report_login(self):
         """Report attempt to log in."""
-        self._reporter(u'Logging in')
-
-    def report_video_webpage_download(self, video_id):
-        """Report attempt to download video webpage."""
-        self._reporter(u'%s: Downloading video webpage' % video_id)
-
-    def report_information_extraction(self, video_id):
-        """Report attempt to extract video information."""
-        self._reporter(u'%s: Extracting video information' % video_id)
-
-    def _parse_page(self, video_webpage):
-        """Extract video information from page"""
-        # General data
-        data = {'title': r'\("video_title", "(.*?)"\)',
-            'description': r'<div class="datawrap">(.*?)</div>',
-            'owner': r'\("video_owner_name", "(.*?)"\)',
-            'thumbnail':  r'\("thumb_url", "(?P<THUMB>.*?)"\)',
-            }
-        video_info = {}
-        for piece in data.keys():
-            mobj = re.search(data[piece], video_webpage)
-            if mobj is not None:
-                video_info[piece] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape"))
-
-        # Video urls
-        video_urls = {}
-        for fmt in self._available_formats:
-            mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage)
-            if mobj is not None:
-                # URL is in a Javascript segment inside an escaped Unicode format within
-                # the generally utf-8 page
-                video_urls[fmt] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape"))
-        video_info['video_urls'] = video_urls
-
-        return video_info
+        self._downloader.to_screen(u'[%s] Logging in' % self.IE_NAME)
 
     def _real_initialize(self):
         if self._downloader is None:
@@ -2082,100 +2041,33 @@ class FacebookIE(InfoExtractor):
             return
         video_id = mobj.group('ID')
 
-        # Get video webpage
-        self.report_video_webpage_download(video_id)
-        request = compat_urllib_request.Request('https://www.facebook.com/video/video.php?v=%s' % video_id)
-        try:
-            page = compat_urllib_request.urlopen(request)
-            video_webpage = page.read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
-            return
-
-        # Start extracting information
-        self.report_information_extraction(video_id)
-
-        # Extract information
-        video_info = self._parse_page(video_webpage)
-
-        # uploader
-        if 'owner' not in video_info:
-            self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
-            return
-        video_uploader = video_info['owner']
-
-        # title
-        if 'title' not in video_info:
-            self._downloader.trouble(u'ERROR: unable to extract video title')
-            return
-        video_title = video_info['title']
-        video_title = video_title.decode('utf-8')
-
-        # thumbnail image
-        if 'thumbnail' not in video_info:
-            self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
-            video_thumbnail = ''
-        else:
-            video_thumbnail = video_info['thumbnail']
-
-        # upload date
-        upload_date = None
-        if 'upload_date' in video_info:
-            upload_time = video_info['upload_date']
-            timetuple = email.utils.parsedate_tz(upload_time)
-            if timetuple is not None:
-                try:
-                    upload_date = time.strftime('%Y%m%d', timetuple[0:9])
-                except:
-                    pass
+        url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
+        webpage = self._download_webpage(url, video_id)
 
-        # description
-        video_description = video_info.get('description', 'No description available.')
+        BEFORE = '[["allowFullScreen","true"],["allowScriptAccess","always"],["salign","tl"],["scale","noscale"],["wmode","opaque"]].forEach(function(param) {swf.addParam(param[0], param[1]);});\n'
+        AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
+        m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
+        if not m:
+            raise ExtractorError(u'Cannot parse data')
+        data = dict(json.loads(m.group(1)))
+        video_url = compat_urllib_parse.unquote(data['hd_src'])
+        video_duration = int(data['video_duration'])
 
-        url_map = video_info['video_urls']
-        if url_map:
-            # Decide which formats to download
-            req_format = self._downloader.params.get('format', None)
-            format_limit = self._downloader.params.get('format_limit', None)
+        m = re.search('<h2 class="uiHeaderTitle">([^<]+)</h2>', webpage)
+        if not m:
+            raise ExtractorError(u'Cannot find title in webpage')
+        video_title = unescapeHTML(m.group(1))
 
-            if format_limit is not None and format_limit in self._available_formats:
-                format_list = self._available_formats[self._available_formats.index(format_limit):]
-            else:
-                format_list = self._available_formats
-            existing_formats = [x for x in format_list if x in url_map]
-            if len(existing_formats) == 0:
-                self._downloader.trouble(u'ERROR: no known formats available for video')
-                return
-            if req_format is None:
-                video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
-            elif req_format == 'worst':
-                video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
-            elif req_format == '-1':
-                video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
-            else:
-                # Specific format
-                if req_format not in url_map:
-                    self._downloader.trouble(u'ERROR: requested format not available')
-                    return
-                video_url_list = [(req_format, url_map[req_format])] # Specific format
+        info = {
+            'id': video_id,
+            'title': video_title,
+            'url': video_url,
+            'ext': 'mp4',
+            'duration': video_duration,
+            'thumbnail': data['thumbnail_src'],
+        }
+        return [info]
 
-        results = []
-        for format_param, video_real_url in video_url_list:
-            # Extension
-            video_extension = self._video_extensions.get(format_param, 'mp4')
-
-            results.append({
-                'id':       video_id.decode('utf-8'),
-                'url':      video_real_url.decode('utf-8'),
-                'uploader': video_uploader.decode('utf-8'),
-                'upload_date':  upload_date,
-                'title':    video_title,
-                'ext':      video_extension.decode('utf-8'),
-                'format':   (format_param is None and u'NA' or format_param.decode('utf-8')),
-                'thumbnail':    video_thumbnail.decode('utf-8'),
-                'description':  video_description.decode('utf-8'),
-            })
-        return results
 
 class BlipTVIE(InfoExtractor):
     """Information extractor for blip.tv"""
@@ -2204,6 +2096,7 @@ class BlipTVIE(InfoExtractor):
             cchar = '?'
         json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
         request = compat_urllib_request.Request(json_url)
+        request.add_header('User-Agent', 'iTunes/10.6.1')
         self.report_extraction(mobj.group(1))
         info = None
         try:
@@ -2224,8 +2117,7 @@ class BlipTVIE(InfoExtractor):
                     'urlhandle': urlh
                 }
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
-            return
+            raise ExtractorError(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
         if info is None: # Regular URL
             try:
                 json_code_bytes = urlh.read()
@@ -2258,13 +2150,13 @@ class BlipTVIE(InfoExtractor):
                     'format': data['media']['mimeType'],
                     'thumbnail': data['thumbnailUrl'],
                     'description': data['description'],
-                    'player_url': data['embedUrl']
+                    'player_url': data['embedUrl'],
+                    'user_agent': 'iTunes/10.6.1',
                 }
             except (ValueError,KeyError) as err:
                 self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
                 return
 
-        std_headers['User-Agent'] = 'iTunes/10.6.1'
         return [info]
 
 
@@ -3535,10 +3427,12 @@ class JustinTVIE(InfoExtractor):
                 video_extension = os.path.splitext(video_url)[1][1:]
                 video_date = re.sub('-', '', clip['start_time'][:10])
                 video_uploader_id = clip.get('user_id', clip.get('channel_id'))
+                video_id = clip['id']
+                video_title = clip.get('title', video_id)
                 info.append({
-                    'id': clip['id'],
+                    'id': video_id,
                     'url': video_url,
-                    'title': clip['title'],
+                    'title': video_title,
                     'uploader': clip.get('channel_name', video_uploader_id),
                     'uploader_id': video_uploader_id,
                     'upload_date': video_date,
@@ -3722,39 +3616,49 @@ class UstreamIE(InfoExtractor):
                   }
         return [info]
 
+class RBMARadioIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?rbmaradio\.com/shows/(?P<videoID>[^/]+)$'
 
+    def _real_extract(self, url):
+        m = re.match(self._VALID_URL, url)
+        video_id = m.group('videoID')
 
-class YouPornIE(InfoExtractor):
-    """Information extractor for youporn.com."""
-
-    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)'
-   
-    def __init__(self, downloader=None):
-        InfoExtractor.__init__(self, downloader)
-
-    # def report_id(self, video_id):
-    #     """Report finding video ID"""
-    #     self._downloader.to_screen(u'[youporn] Video ID: %s' % video_id)
+        webpage = self._download_webpage(url, video_id)
+        m = re.search(r'<script>window.gon = {.*?};gon\.show=(.+?);</script>', webpage)
+        if not m:
+            raise ExtractorError(u'Cannot find metadata')
+        json_data = m.group(1)
 
-    # def report_webpage(self, url):
-    #     """Report downloading page"""
-    #     self._downloader.to_screen(u'[youporn] Downloaded page: %s' % url)
+        try:
+            data = json.loads(json_data)
+        except ValueError as e:
+            raise ExtractorError(u'Invalid JSON: ' + str(e))
 
-    # def report_title(self, video_title):
-    #     """Report dfinding title"""
-    #     self._downloader.to_screen(u'[youporn] Title: %s' % video_title)
-    
-    # def report_uploader(self, uploader):
-    #     """Report dfinding title"""
-    #     self._downloader.to_screen(u'[youporn] Uploader: %s' % uploader)
+        video_url = data['akamai_url'] + '&cbr=256'
+        url_parts = compat_urllib_parse_urlparse(video_url)
+        video_ext = url_parts.path.rpartition('.')[2]
+        info = {
+                'id': video_id,
+                'url': video_url,
+                'ext': video_ext,
+                'title': data['title'],
+                'description': data.get('teaser_text'),
+                'location': data.get('country_of_origin'),
+                'uploader': data.get('host', {}).get('name'),
+                'uploader_id': data.get('host', {}).get('slug'),
+                'thumbnail': data.get('image', {}).get('large_url_2x'),
+                'duration': data.get('duration'),
+        }
+        return [info]
 
-    # def report_upload_date(self, video_date):
-    #     """Report finding date"""
-    #     self._downloader.to_screen(u'[youporn] Date: %s' % video_date)
 
+class YouPornIE(InfoExtractor):
+    """Information extractor for youporn.com."""
+    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)'
+   
     def _print_formats(self, formats):
         """Print all available formats"""
-        print('Available formats:')
+        print(u'Available formats:')
         print(u'ext\t\tformat')
         print(u'---------------------------------')
         for format in formats:
@@ -3773,53 +3677,46 @@ class YouPornIE(InfoExtractor):
             return
 
         video_id = mobj.group('videoid')
-        #self.report_id(video_id)        
 
-        webpage = self._download_webpage(url, video_id)
-        #self.report_webpage(url)
+        req = compat_urllib_request.Request(url)
+        req.add_header('Cookie', 'age_verified=1')
+        webpage = self._download_webpage(req, video_id)
 
         # Get the video title
-        VIDEO_TITLE_RE = r'videoTitleArea">(?P<title>.*)</h1>'
-        result = re.search(VIDEO_TITLE_RE, webpage)
+        result = re.search(r'videoTitleArea">(?P<title>.*)</h1>', webpage)
         if result is None:
-            self._downloader.trouble(u'ERROR: unable to extract video title')
-            return
+            raise ExtractorError(u'ERROR: unable to extract video title')
         video_title = result.group('title').strip()
-        #self.report_title(video_title)
 
         # Get the video date
-        VIDEO_DATE_RE = r'Date:</b>(?P<date>.*)</li>'
-        result = re.search(VIDEO_DATE_RE, webpage)
+        result = re.search(r'Date:</b>(?P<date>.*)</li>', webpage)
         if result is None:
-            self._downloader.trouble(u'ERROR: unable to extract video date')
-            return
-        upload_date = result.group('date').strip()
-        #self.report_upload_date(upload_date)
+            self._downloader.to_stderr(u'WARNING: unable to extract video date')
+            upload_date = None
+        else:
+            upload_date = result.group('date').strip()
 
         # Get the video uploader
-        VIDEO_UPLOADER_RE = r'Submitted:</b>(?P<uploader>.*)</li>'
-        result = re.search(VIDEO_UPLOADER_RE, webpage)
+        result = re.search(r'Submitted:</b>(?P<uploader>.*)</li>', webpage)
         if result is None:
-            self._downloader.trouble(u'ERROR: unable to extract uploader')
-            return
-        video_uploader = result.group('uploader').strip()
-        video_uploader = clean_html( video_uploader )
-        #self.report_uploader(video_uploader)
+            self._downloader.to_stderr(u'ERROR: unable to extract uploader')
+            video_uploader = None
+        else:
+            video_uploader = result.group('uploader').strip()
+            video_uploader = clean_html( video_uploader )
 
         # Get all of the formats available
         DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
         result = re.search(DOWNLOAD_LIST_RE, webpage)
         if result is None:
-            self._downloader.trouble(u'ERROR: unable to extract download list')
-            return
+            raise ExtractorError(u'Unable to extract download list')
         download_list_html = result.group('download_list').strip()
 
         # Get all of the links from the page
         LINK_RE = r'(?s)<a href="(?P<url>[^"]+)">'
         links = re.findall(LINK_RE, download_list_html)
         if(len(links) == 0):
-            self._downloader.trouble(u'ERROR: no known formats available for video')
-            return
+            raise ExtractorError(u'ERROR: no known formats available for video')
         
         self._downloader.to_screen(u'[youporn] Links found: %d' % len(links))   
 
@@ -3857,10 +3754,8 @@ class YouPornIE(InfoExtractor):
             return
 
         req_format = self._downloader.params.get('format', None)
-        #format_limit = self._downloader.params.get('format_limit', None)
         self._downloader.to_screen(u'[youporn] Format: %s' % req_format)
 
-
         if req_format is None or req_format == 'best':
             return [formats[0]]
         elif req_format == 'worst':
@@ -3878,28 +3773,8 @@ class YouPornIE(InfoExtractor):
 
 class PornotubeIE(InfoExtractor):
     """Information extractor for pornotube.com."""
-
     _VALID_URL = r'^(?:https?://)?(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
 
-    # def __init__(self, downloader=None):
-    #     InfoExtractor.__init__(self, downloader)
-
-    # def report_extract_entry(self, url):
-    #     """Report downloading extry"""
-    #     self._downloader.to_screen(u'[pornotube] Downloading entry: %s' % url.decode('utf-8'))
-
-    # def report_date(self, upload_date):
-    #     """Report finding uploaded date"""
-    #     self._downloader.to_screen(u'[pornotube] Entry date: %s' % upload_date)
-
-    # def report_webpage(self, url):
-    #     """Report downloading page"""
-    #     self._downloader.to_screen(u'[pornotube] Downloaded page: %s' % url)
-
-    # def report_title(self, video_title):
-    #     """Report downloading extry"""
-    #     self._downloader.to_screen(u'[pornotube] Title: %s' % video_title.decode('utf-8'))
-
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
@@ -3908,11 +3783,9 @@ class PornotubeIE(InfoExtractor):
 
         video_id = mobj.group('videoid')
         video_title = mobj.group('title')
-        #self.report_title(video_title);
 
         # Get webpage content
         webpage = self._download_webpage(url, video_id)
-        #self.report_webpage(url)
 
         # Get the video URL
         VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
@@ -3921,7 +3794,6 @@ class PornotubeIE(InfoExtractor):
             self._downloader.trouble(u'ERROR: unable to extract video url')
             return
         video_url = compat_urllib_parse.unquote(result.group('url'))
-        #self.report_extract_entry(video_url)
 
         #Get the uploaded date
         VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
@@ -3930,7 +3802,6 @@ class PornotubeIE(InfoExtractor):
             self._downloader.trouble(u'ERROR: unable to extract video title')
             return
         upload_date = result.group('date')
-        #self.report_date(upload_date);
 
         info = {'id': video_id,
                 'url': video_url,
@@ -3938,39 +3809,14 @@ class PornotubeIE(InfoExtractor):
                 'upload_date': upload_date,
                 'title': video_title,
                 'ext': 'flv',
-                'format': 'flv',
-                'thumbnail': None,
-                'description': None,
-                'player_url': None}
+                'format': 'flv'}
 
         return [info]
 
-
-
 class YouJizzIE(InfoExtractor):
     """Information extractor for youjizz.com."""
-
     _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youjizz\.com/videos/(?P<videoid>[^.]+).html$'
 
-    def __init__(self, downloader=None):
-        InfoExtractor.__init__(self, downloader)
-
-    # def report_extract_entry(self, url):
-    #     """Report downloading extry"""
-    #     self._downloader.to_screen(u'[youjizz] Downloading entry: %s' % url.decode('utf-8'))
-
-    # def report_webpage(self, url):
-    #     """Report downloading page"""
-    #     self._downloader.to_screen(u'[youjizz] Downloaded page: %s' % url)
-
-    # def report_title(self, video_title):
-    #     """Report downloading extry"""
-    #     self._downloader.to_screen(u'[youjizz] Title: %s' % video_title.decode('utf-8'))
-
-    # def report_embed_page(self, embed_page):
-    #     """Report downloading extry"""
-    #     self._downloader.to_screen(u'[youjizz] Embed Page: %s' % embed_page.decode('utf-8'))
-
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
@@ -3981,52 +3827,81 @@ class YouJizzIE(InfoExtractor):
 
         # Get webpage content
         webpage = self._download_webpage(url, video_id)
-        #self.report_webpage(url)
 
         # Get the video title
-        VIDEO_TITLE_RE = r'<title>(?P<title>.*)</title>'
-        result = re.search(VIDEO_TITLE_RE, webpage)
+        result = re.search(r'<title>(?P<title>.*)</title>', webpage)
         if result is None:
-            self._downloader.trouble(u'ERROR: unable to extract video title')
-            return
+            raise ExtractorError(u'ERROR: unable to extract video title')
         video_title = result.group('title').strip()
-        #self.report_title(video_title)
 
         # Get the embed page
-        EMBED_PAGE_RE = r'http://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)'
-        result = re.search(EMBED_PAGE_RE, webpage)
+        result = re.search(r'https?://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)', webpage)
         if result is None:
-            self._downloader.trouble(u'ERROR: unable to extract embed page')
-            return
+            raise ExtractorError(u'ERROR: unable to extract embed page')
 
         embed_page_url = result.group(0).strip()
         video_id = result.group('videoid')
-        #self.report_embed_page(embed_page_url)
     
         webpage = self._download_webpage(embed_page_url, video_id)
 
         # Get the video URL
-        SOURCE_RE = r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);'
-        result = re.search(SOURCE_RE, webpage)
+        result = re.search(r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);', webpage)
         if result is None:
-            self._downloader.trouble(u'ERROR: unable to extract video url')
-            return
+            raise ExtractorError(u'ERROR: unable to extract video url')
         video_url = result.group('source')
-        #self.report_extract_entry(video_url)
 
         info = {'id': video_id,
                 'url': video_url,
-                'uploader': None,
-                'upload_date': None,
                 'title': video_title,
                 'ext': 'flv',
                 'format': 'flv',
-                'thumbnail': None,
-                'description': None,
                 'player_url': embed_page_url}
 
         return [info]
 
+class EightTracksIE(InfoExtractor):
+    IE_NAME = '8tracks'
+    _VALID_URL = r'https?://8tracks.com/(?P<user>[^/]+)/(?P<id>[^/]+)'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        playlist_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, playlist_id)
+
+        m = re.search(r"new TRAX.Mix\((.*?)\);\n*\s*TRAX.initSearchAutocomplete\('#search'\);", webpage, flags=re.DOTALL)
+        if not m:
+            raise ExtractorError(u'Cannot find trax information')
+        json_like = m.group(1)
+        data = json.loads(json_like)
+
+        session = str(random.randint(0, 1000000000))
+        mix_id = data['id']
+        track_count = data['tracks_count']
+        first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
+        next_url = first_url
+        res = []
+        for i in itertools.count():
+            api_json = self._download_webpage(next_url, playlist_id,
+                note=u'Downloading song information %s/%s' % (str(i+1), track_count),
+                errnote=u'Failed to download song information')
+            api_data = json.loads(api_json)
+            track_data = api_data[u'set']['track']
+            info = {
+                'id': track_data['id'],
+                'url': track_data['track_file_stream_url'],
+                'title': track_data['performer'] + u' - ' + track_data['name'],
+                'raw_title': track_data['name'],
+                'uploader_id': data['user']['login'],
+                'ext': 'm4a',
+            }
+            res.append(info)
+            if api_data['set']['at_last_track']:
+                break
+            next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])
+        return res
 
 def gen_extractors():
     """ Return a list of an instance of every supported extractor.
@@ -4072,6 +3947,8 @@ def gen_extractors():
         TweetReelIE(),
         SteamIE(),
         UstreamIE(),
+        RBMARadioIE(),
+        EightTracksIE(),
         GenericIE()
     ]