Merge branch 'extract_info_rewrite'
authorJaime Marquínez Ferrándiz <jaime.marquinez.ferrandiz@gmail.com>
Fri, 19 Apr 2013 19:57:08 +0000 (21:57 +0200)
committerJaime Marquínez Ferrándiz <jaime.marquinez.ferrandiz@gmail.com>
Fri, 19 Apr 2013 19:57:08 +0000 (21:57 +0200)
1  2 
README.md
youtube_dl/FileDownloader.py
youtube_dl/InfoExtractors.py

diff --combined README.md
index cf95187e6ea76fe0d1ef3b2cb5f029585ea55a77,e2958a9b06af767c2199cae1a289d631a85dc302..d42aab44a8ddbf9f13c5ffc3fa4a228fd850bff1
+++ b/README.md
@@@ -14,125 -14,119 +14,125 @@@ your Unix box, on Windows or on Mac OS 
  which means you can modify it, redistribute it or use it however you like.
  
  # OPTIONS
 -    -h, --help               print this help text and exit
 -    --version                print program version and exit
 -    -U, --update             update this program to latest version
 -    -i, --ignore-errors      continue on download errors
 -    -r, --rate-limit LIMIT   maximum download rate (e.g. 50k or 44.6m)
 -    -R, --retries RETRIES    number of retries (default is 10)
 -    --buffer-size SIZE       size of download buffer (e.g. 1024 or 16k) (default
 -                             is 1024)
 -    --no-resize-buffer       do not automatically adjust the buffer size. By
 -                             default, the buffer size is automatically resized
 -                             from an initial value of SIZE.
 -    --dump-user-agent        display the current browser identification
 -    --user-agent UA          specify a custom user agent
 -    --list-extractors        List all supported extractors and the URLs they
 -                             would handle
 +    -h, --help                 print this help text and exit
 +    --version                  print program version and exit
 +    -U, --update               update this program to latest version
 +    -i, --ignore-errors        continue on download errors
 +    -r, --rate-limit LIMIT     maximum download rate (e.g. 50k or 44.6m)
 +    -R, --retries RETRIES      number of retries (default is 10)
 +    --buffer-size SIZE         size of download buffer (e.g. 1024 or 16k)
 +                               (default is 1024)
 +    --no-resize-buffer         do not automatically adjust the buffer size. By
 +                               default, the buffer size is automatically resized
 +                               from an initial value of SIZE.
 +    --dump-user-agent          display the current browser identification
 +    --user-agent UA            specify a custom user agent
 +    --list-extractors          List all supported extractors and the URLs they
 +                               would handle
  
  ## Video Selection:
 -    --playlist-start NUMBER  playlist video to start at (default is 1)
 -    --playlist-end NUMBER    playlist video to end at (default is last)
 -    --match-title REGEX      download only matching titles (regex or caseless
 -                             sub-string)
 -    --reject-title REGEX     skip download for matching titles (regex or
 -                             caseless sub-string)
 -    --max-downloads NUMBER   Abort after downloading NUMBER files
 -    --min-filesize SIZE      Do not download any videos smaller than SIZE (e.g.
 -                             50k or 44.6m)
 -    --max-filesize SIZE      Do not download any videos larger than SIZE (e.g.
 -                             50k or 44.6m)
 +    --playlist-start NUMBER    playlist video to start at (default is 1)
 +    --playlist-end NUMBER      playlist video to end at (default is last)
 +    --match-title REGEX        download only matching titles (regex or caseless
 +                               sub-string)
 +    --reject-title REGEX       skip download for matching titles (regex or
 +                               caseless sub-string)
 +    --max-downloads NUMBER     Abort after downloading NUMBER files
 +    --min-filesize SIZE        Do not download any videos smaller than SIZE
 +                               (e.g. 50k or 44.6m)
 +    --max-filesize SIZE        Do not download any videos larger than SIZE (e.g.
 +                               50k or 44.6m)
  
  ## Filesystem Options:
 -    -t, --title              use title in file name
 -    --id                     use video ID in file name
 -    -l, --literal            [deprecated] alias of --title
 -    -A, --auto-number        number downloaded files starting from 00000
 -    -o, --output TEMPLATE    output filename template. Use %(title)s to get the
 -                             title, %(uploader)s for the uploader name,
 -                             %(uploader_id)s for the uploader nickname if
 -                             different, %(autonumber)s to get an automatically
 -                             incremented number, %(ext)s for the filename
 -                             extension, %(upload_date)s for the upload date
 -                             (YYYYMMDD), %(extractor)s for the provider
 -                             (youtube, metacafe, etc), %(id)s for the video id
 -                             and %% for a literal percent. Use - to output to
 -                             stdout. Can also be used to download to a different
 -                             directory, for example with -o '/my/downloads/%(upl
 -                             oader)s/%(title)s-%(id)s.%(ext)s' .
 -    --restrict-filenames     Restrict filenames to only ASCII characters, and
 -                             avoid "&" and spaces in filenames
 -    -a, --batch-file FILE    file containing URLs to download ('-' for stdin)
 -    -w, --no-overwrites      do not overwrite files
 -    -c, --continue           resume partially downloaded files
 -    --no-continue            do not resume partially downloaded files (restart
 -                             from beginning)
 -    --cookies FILE           file to read cookies from and dump cookie jar in
 -    --no-part                do not use .part files
 -    --no-mtime               do not use the Last-modified header to set the file
 -                             modification time
 -    --write-description      write video description to a .description file
 -    --write-info-json        write video metadata to a .info.json file
 +    -t, --title                use title in file name
 +    --id                       use video ID in file name
 +    -l, --literal              [deprecated] alias of --title
 +    -A, --auto-number          number downloaded files starting from 00000
 +    -o, --output TEMPLATE      output filename template. Use %(title)s to get
 +                               the title, %(uploader)s for the uploader name,
 +                               %(uploader_id)s for the uploader nickname if
 +                               different, %(autonumber)s to get an automatically
 +                               incremented number, %(ext)s for the filename
 +                               extension, %(upload_date)s for the upload date
 +                               (YYYYMMDD), %(extractor)s for the provider
 +                               (youtube, metacafe, etc), %(id)s for the video id
 +                               and %% for a literal percent. Use - to output to
 +                               stdout. Can also be used to download to a
 +                               different directory, for example with -o '/my/dow
 +                               nloads/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
 +    --autonumber-size NUMBER   Specifies the number of digits in %(autonumber)s
 +                               when it is present in output filename template or
 +                               --autonumber option is given
 +    --restrict-filenames       Restrict filenames to only ASCII characters, and
 +                               avoid "&" and spaces in filenames
 +    -a, --batch-file FILE      file containing URLs to download ('-' for stdin)
 +    -w, --no-overwrites        do not overwrite files
 +    -c, --continue             resume partially downloaded files
 +    --no-continue              do not resume partially downloaded files (restart
 +                               from beginning)
 +    --cookies FILE             file to read cookies from and dump cookie jar in
 +    --no-part                  do not use .part files
 +    --no-mtime                 do not use the Last-modified header to set the
 +                               file modification time
 +    --write-description        write video description to a .description file
 +    --write-info-json          write video metadata to a .info.json file
  
  ## Verbosity / Simulation Options:
 -    -q, --quiet              activates quiet mode
 -    -s, --simulate           do not download the video and do not write anything
 -                             to disk
 -    --skip-download          do not download the video
 -    -g, --get-url            simulate, quiet but print URL
 -    -e, --get-title          simulate, quiet but print title
 -    --get-thumbnail          simulate, quiet but print thumbnail URL
 -    --get-description        simulate, quiet but print video description
 -    --get-filename           simulate, quiet but print output filename
 -    --get-format             simulate, quiet but print output format
 -    --newline                output progress bar as new lines
 -    --no-progress            do not print progress bar
 -    --console-title          display progress in console titlebar
 -    -v, --verbose            print various debugging information
 +    -q, --quiet                activates quiet mode
 +    -s, --simulate             do not download the video and do not write
 +                               anything to disk
 +    --skip-download            do not download the video
 +    -g, --get-url              simulate, quiet but print URL
 +    -e, --get-title            simulate, quiet but print title
 +    --get-thumbnail            simulate, quiet but print thumbnail URL
 +    --get-description          simulate, quiet but print video description
 +    --get-filename             simulate, quiet but print output filename
 +    --get-format               simulate, quiet but print output format
 +    --newline                  output progress bar as new lines
 +    --no-progress              do not print progress bar
 +    --console-title            display progress in console titlebar
 +    -v, --verbose              print various debugging information
 +    --dump-intermediate-pages  print downloaded pages to debug problems(very
 +                               verbose)
  
  ## Video Format Options:
 -    -f, --format FORMAT      video format code
 -    --all-formats            download all available video formats
 -    --prefer-free-formats    prefer free video formats unless a specific one is
 -                             requested
 -    --max-quality FORMAT     highest quality format to download
 -    -F, --list-formats       list all available formats (currently youtube only)
 -    --write-sub              write subtitle file (currently youtube only)
 -    --only-sub               downloads only the subtitles (no video)
 -    --all-subs               downloads all the available subtitles of the video
 -                             (currently youtube only)
 -    --list-subs              lists all available subtitles for the video
 -                             (currently youtube only)
 -    --sub-format LANG        subtitle format [srt/sbv] (default=srt) (currently
 -                             youtube only)
 -    --sub-lang LANG          language of the subtitles to download (optional)
 -                             use IETF language tags like 'en'
 +    -f, --format FORMAT        video format code
 +    --all-formats              download all available video formats
 +    --prefer-free-formats      prefer free video formats unless a specific one
 +                               is requested
 +    --max-quality FORMAT       highest quality format to download
 +    -F, --list-formats         list all available formats (currently youtube
 +                               only)
 +    --write-sub                write subtitle file (currently youtube only)
 +    --only-sub                 downloads only the subtitles (no video)
 +    --all-subs                 downloads all the available subtitles of the
 +                               video (currently youtube only)
 +    --list-subs                lists all available subtitles for the video
 +                               (currently youtube only)
 +    --sub-format LANG          subtitle format [srt/sbv] (default=srt)
 +                               (currently youtube only)
 +    --sub-lang LANG            language of the subtitles to download (optional)
 +                               use IETF language tags like 'en'
  
  ## Authentication Options:
 -    -u, --username USERNAME  account username
 -    -p, --password PASSWORD  account password
 -    -n, --netrc              use .netrc authentication data
 +    -u, --username USERNAME    account username
 +    -p, --password PASSWORD    account password
 +    -n, --netrc                use .netrc authentication data
  
  ## Post-processing Options:
 -    -x, --extract-audio      convert video files to audio-only files (requires
 -                             ffmpeg or avconv and ffprobe or avprobe)
 -    --audio-format FORMAT    "best", "aac", "vorbis", "mp3", "m4a", "opus", or
 -                             "wav"; best by default
 -    --audio-quality QUALITY  ffmpeg/avconv audio quality specification, insert a
 -                             value between 0 (better) and 9 (worse) for VBR or a
 -                             specific bitrate like 128K (default 5)
 -    --recode-video FORMAT    Encode the video to another format if necessary
 -                             (currently supported: mp4|flv|ogg|webm)
 -    -k, --keep-video         keeps the video file on disk after the post-
 -                             processing; the video is erased by default
 -    --no-post-overwrites     do not overwrite post-processed files; the post-
 -                             processed files are overwritten by default
 +    -x, --extract-audio        convert video files to audio-only files (requires
 +                               ffmpeg or avconv and ffprobe or avprobe)
 +    --audio-format FORMAT      "best", "aac", "vorbis", "mp3", "m4a", "opus", or
 +                               "wav"; best by default
 +    --audio-quality QUALITY    ffmpeg/avconv audio quality specification, insert
 +                               a value between 0 (better) and 9 (worse) for VBR
 +                               or a specific bitrate like 128K (default 5)
 +    --recode-video FORMAT      Encode the video to another format if necessary
 +                               (currently supported: mp4|flv|ogg|webm)
 +    -k, --keep-video           keeps the video file on disk after the post-
 +                               processing; the video is erased by default
 +    --no-post-overwrites       do not overwrite post-processed files; the post-
 +                               processed files are overwritten by default
  
  # CONFIGURATION
  
@@@ -150,6 -144,8 +150,8 @@@ The `-o` option allows users to indicat
   - `ext`: The sequence will be replaced by the appropriate extension (like flv or mp4).
   - `epoch`: The sequence will be replaced by the Unix epoch when creating the file.
   - `autonumber`: The sequence will be replaced by a five-digit number that will be increased with each download, starting at zero.
+  - `playlist`: The name or the id of the playlist that contains the video.
+  - `playlist_index`: The index of the video in the playlist, a five-digit number.
  
  The current default template is `%(id)s.%(ext)s`, but that will be switchted to `%(title)s-%(id)s.%(ext)s` (which can be requested with `-t` at the moment).
  
index 96da754fb11ea083e4c2b04d4da53aa54869b3ed,4dabbb440837e0a3bbbf41032ef9d6001609f041..03346ab04263a9e7c11f829053f8e6d96101781e
@@@ -388,11 -388,9 +388,13 @@@ class FileDownloader(object)
              template_dict = dict(info_dict)
  
              template_dict['epoch'] = int(time.time())
 -            template_dict['autonumber'] = u'%05d' % self._num_downloads
 +            autonumber_size = self.params.get('autonumber_size')
 +            if autonumber_size is None:
 +                autonumber_size = 5
 +            autonumber_templ = u'%0' + str(autonumber_size) + u'd'
 +            template_dict['autonumber'] = autonumber_templ % self._num_downloads
+             if template_dict['playlist_index'] is not None:
+                 template_dict['playlist_index'] = u'%05d' % template_dict['playlist_index']
  
              sanitize = lambda k,v: sanitize_filename(
                  u'NA' if v is None else compat_str(v),
              if re.search(rejecttitle, title, re.IGNORECASE):
                  return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
          return None
+         
+     def extract_info(self, url, download = True):
+         '''
+         Returns a list with a dictionary for each video we find.
+         If 'download', also downloads the videos.
+          '''
+         suitable_found = False
+         for ie in self._ies:
+             # Go to next InfoExtractor if not suitable
+             if not ie.suitable(url):
+                 continue
+             # Warn if the _WORKING attribute is False
+             if not ie.working():
+                 self.to_stderr(u'WARNING: the program functionality for this site has been marked as broken, '
+                                u'and will probably not work. If you want to go on, use the -i option.')
+             # Suitable InfoExtractor found
+             suitable_found = True
+             # Extract information from URL and process it
+             try:
+                 ie_results = ie.extract(url)
+                 results = []
+                 for ie_result in ie_results:
+                     if not 'extractor' in ie_result:
+                         #The extractor has already been set somewhere else
+                         ie_result['extractor'] = ie.IE_NAME
+                     results.append(self.process_ie_result(ie_result, download))
+                 return results
+             except ExtractorError as de: # An error we somewhat expected
+                 self.trouble(u'ERROR: ' + compat_str(de), de.format_traceback())
+                 break
+             except Exception as e:
+                 if self.params.get('ignoreerrors', False):
+                     self.trouble(u'ERROR: ' + compat_str(e), tb=compat_str(traceback.format_exc()))
+                     break
+                 else:
+                     raise
+         if not suitable_found:
+                 self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
+         
+     def process_ie_result(self, ie_result, download = True):
+         """
+         Take the result of the ie and return a list of videos.
+         For url elements it will search the suitable ie and get the videos
+         For playlist elements it will process each of the elements of the 'entries' key
+         
+         It will also download the videos if 'download'.
+         """
+         result_type = ie_result.get('_type', 'video') #If not given we suppose it's a video, support the dafault old system
+         if result_type == 'video':
+             if 'playlist' not in ie_result:
+                 #It isn't part of a playlist
+                 ie_result['playlist'] = None
+                 ie_result['playlist_index'] = None
+             if download:
+                 #Do the download:
+                 self.process_info(ie_result)
+             return ie_result
+         elif result_type == 'url':
+             #We get the video pointed by the url
+             result = self.extract_info(ie_result['url'], download)[0]
+             return result
+         elif result_type == 'playlist':
+             #We process each entry in the playlist
+             playlist = ie_result.get('title', None) or ie_result.get('id', None)
+             self.to_screen(u'[download] Downloading playlist: %s'  % playlist)
+             playlist_results = []
+             n_all_entries = len(ie_result['entries'])
+             playliststart = self.params.get('playliststart', 1) - 1
+             playlistend = self.params.get('playlistend', -1)
+             if playlistend == -1:
+                 entries = ie_result['entries'][playliststart:]
+             else:
+                 entries = ie_result['entries'][playliststart:playlistend]
+             n_entries = len(entries)
+             self.to_screen(u"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" %
+                 (ie_result['extractor'], playlist, n_all_entries, n_entries))
+             for i,entry in enumerate(entries,1):
+                 self.to_screen(u'[download] Downloading video #%s of %s' %(i, n_entries))
+                 entry_result = self.process_ie_result(entry, False)
+                 entry_result['playlist'] = playlist
+                 entry_result['playlist_index'] = i + playliststart
+                 #We must do the download here to correctly set the 'playlist' key
+                 if download:
+                     self.process_info(entry_result)
+                 playlist_results.append(entry_result)
+             result = ie_result.copy()
+             result['entries'] = playlist_results
+             return result
  
      def process_info(self, info_dict):
          """Process a single dictionary returned by an InfoExtractor."""
  
+         #We increment the download the download count here to match the previous behaviour.
+         self.increment_downloads()
+         
 +        info_dict['fulltitle'] = info_dict['title']
 +        if len(info_dict['title']) > 200:
 +            info_dict['title'] = info_dict['title'][:197] + u'...'
 +
          # Keep for backwards compatibility
          info_dict['stitle'] = info_dict['title']
  
              raise SameFileError(self.params['outtmpl'])
  
          for url in url_list:
-             suitable_found = False
-             for ie in self._ies:
-                 # Go to next InfoExtractor if not suitable
-                 if not ie.suitable(url):
-                     continue
-                 # Warn if the _WORKING attribute is False
-                 if not ie.working():
-                     self.report_warning(u'the program functionality for this site has been marked as broken, '
-                                         u'and will probably not work. If you want to go on, use the -i option.')
-                 # Suitable InfoExtractor found
-                 suitable_found = True
-                 # Extract information from URL and process it
-                 try:
-                     videos = ie.extract(url)
-                 except ExtractorError as de: # An error we somewhat expected
-                     self.trouble(u'ERROR: ' + compat_str(de), de.format_traceback())
-                     break
-                 except MaxDownloadsReached:
-                     self.to_screen(u'[info] Maximum number of downloaded files reached.')
-                     raise
-                 except Exception as e:
-                     if self.params.get('ignoreerrors', False):
-                         self.report_error(u'' + compat_str(e), tb=compat_str(traceback.format_exc()))
-                         break
-                     else:
-                         raise
-                 if len(videos or []) > 1 and self.fixed_template():
-                     raise SameFileError(self.params['outtmpl'])
-                 for video in videos or []:
-                     video['extractor'] = ie.IE_NAME
-                     try:
-                         self.increment_downloads()
-                         self.process_info(video)
-                     except UnavailableVideoError:
-                         self.to_stderr(u"\n")
-                         self.report_error(u'unable to download video')
-                 # Suitable InfoExtractor had been found; go to next URL
-                 break
-             if not suitable_found:
-                 self.report_error(u'no suitable InfoExtractor: %s' % url)
+             try:
+                 #It also downloads the videos
+                 videos = self.extract_info(url)
+             except UnavailableVideoError:
+                 self.trouble(u'\nERROR: unable to download video')
+             except MaxDownloadsReached:
+                 self.to_screen(u'[info] Maximum number of downloaded files reached.')
+                 raise
  
          return self._download_retcode
  
              except (IOError, OSError):
                  self.report_warning(u'Unable to remove downloaded video file')
  
 -    def _download_with_rtmpdump(self, filename, url, player_url, page_url):
 +    def _download_with_rtmpdump(self, filename, url, player_url, page_url, play_path):
          self.report_destination(filename)
          tmpfilename = self.temp_name(filename)
  
              basic_args += ['-W', player_url]
          if page_url is not None:
              basic_args += ['--pageUrl', page_url]
 +        if play_path is not None:
 +            basic_args += ['-y', play_path]
          args = basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)]
          if self.params.get('verbose', False):
              try:
          if url.startswith('rtmp'):
              return self._download_with_rtmpdump(filename, url,
                                                  info_dict.get('player_url', None),
 -                                                info_dict.get('page_url', None))
 +                                                info_dict.get('page_url', None),
 +                                                info_dict.get('play_path', None))
  
          tmpfilename = self.temp_name(filename)
          stream = None
index bac3a747d6ffcecc36af4e4518f8f97c118568c0,a7fdf1607c4c73e4caef623a1598065e84b3a834..ae36558d75839f68facb72300efb5b4c22bcd809
@@@ -115,8 -115,7 +115,8 @@@ class InfoExtractor(object)
          """ Returns the response handle """
          if note is None:
              note = u'Downloading video webpage'
 -        self._downloader.to_screen(u'[%s] %s: %s' % (self.IE_NAME, video_id, note))
 +        if note is not False:
 +            self._downloader.to_screen(u'[%s] %s: %s' % (self.IE_NAME, video_id, note))
          try:
              return compat_urllib_request.urlopen(url_or_request)
          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
          else:
              encoding = 'utf-8'
          webpage_bytes = urlh.read()
 +        if self._downloader.params.get('dump_intermediate_pages', False):
 +            try:
 +                url = url_or_request.get_full_url()
 +            except AttributeError:
 +                url = url_or_request
 +            self._downloader.to_screen(u'Dumping request to ' + url)
 +            dump = base64.b64encode(webpage_bytes).decode('ascii')
 +            self._downloader.to_screen(dump)
          return webpage_bytes.decode(encoding, 'replace')
+         
+     #Methods for following #608
+     #They set the correct value of the '_type' key
+     def video_result(self, video_info):
+         """Returns a video"""
+         video_info['_type'] = 'video'
+         return video_info
+     def url_result(self, url, ie=None):
+         """Returns a url that points to a page that should be processed"""
+         #TODO: ie should be the class used for getting the info
+         video_info = {'_type': 'url',
+                       'url': url}
+         return video_info
+     def playlist_result(self, entries, playlist_id=None, playlist_title=None):
+         """Returns a playlist"""
+         video_info = {'_type': 'playlist',
+                       'entries': entries}
+         if playlist_id:
+             video_info['id'] = playlist_id
+         if playlist_title:
+             video_info['title'] = playlist_title
+         return video_info
  
  
  class YoutubeIE(InfoExtractor):
          # Get video info
          self.report_video_info_webpage_download(video_id)
          for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
 -            video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
 +            video_info_url = ('https://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
                      % (video_id, el_type))
 -            request = compat_urllib_request.Request(video_info_url)
 -            try:
 -                video_info_webpage_bytes = compat_urllib_request.urlopen(request).read()
 -                video_info_webpage = video_info_webpage_bytes.decode('utf-8', 'ignore')
 -                video_info = compat_parse_qs(video_info_webpage)
 -                if 'token' in video_info:
 -                    break
 -            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
 -                self._downloader.report_error(u'unable to download video info webpage: %s' % compat_str(err))
 -                return
 +            video_info_webpage = self._download_webpage(video_info_url, video_id,
 +                                    note=False,
 +                                    errnote='unable to download video info webpage')
 +            video_info = compat_parse_qs(video_info_webpage)
 +            if 'token' in video_info:
 +                break
          if 'token' not in video_info:
              if 'reason' in video_info:
                  self._downloader.report_error(u'YouTube said: %s' % video_info['reason'][0])
@@@ -706,8 -723,7 +728,7 @@@ class MetacafeIE(InfoExtractor)
          # Check if video comes from YouTube
          mobj2 = re.match(r'^yt-(.*)$', video_id)
          if mobj2 is not None:
-             self._downloader.download(['http://www.youtube.com/watch?v=%s' % mobj2.group(1)])
-             return
+             return [self.url_result('http://www.youtube.com/watch?v=%s' % mobj2.group(1))]
  
          # Retrieve video webpage to extract further information
          request = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id)
@@@ -1135,7 -1151,7 +1156,7 @@@ class VimeoIE(InfoExtractor)
          # Extract video description
          video_description = get_element_by_attribute("itemprop", "description", webpage)
          if video_description: video_description = clean_html(video_description)
 -        else: video_description = ''
 +        else: video_description = u''
  
          # Extract upload date
          video_upload_date = None
@@@ -1348,7 -1364,7 +1369,7 @@@ class GenericIE(InfoExtractor)
          self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
  
      def _test_redirect(self, url):
-         """Check if it is a redirect, like url shorteners, in case restart chain."""
+         """Check if it is a redirect, like url shorteners, in case return the new url."""
          class HeadRequest(compat_urllib_request.Request):
              def get_method(self):
                  return "HEAD"
              return False
  
          self.report_following_redirect(new_url)
-         self._downloader.download([new_url])
-         return True
+         return new_url
  
      def _real_extract(self, url):
-         if self._test_redirect(url): return
+         new_url = self._test_redirect(url)
+         if new_url: return [self.url_result(new_url)]
  
          video_id = url.split('/')[-1]
          try:
@@@ -1778,13 -1794,9 +1799,13 @@@ class YoutubePlaylistIE(InfoExtractor)
                  self._downloader.report_error(u'Invalid JSON in API response: ' + compat_str(err))
                  return
  
 -            if not 'feed' in response or not 'entry' in response['feed']:
 +            if 'feed' not in response:
                  self._downloader.report_error(u'Got a malformed response from YouTube API')
                  return
 +            if 'entry' not in response['feed']:
 +                # Number of videos is a multiple of self._MAX_RESULTS
 +                break
 +
              videos += [ (entry['yt$position']['$t'], entry['content']['src'])
                          for entry in response['feed']['entry']
                          if 'content' in entry ]
              page_num += 1
  
          videos = [v[1] for v in sorted(videos)]
-         total = len(videos)
-         playliststart = self._downloader.params.get('playliststart', 1) - 1
-         playlistend = self._downloader.params.get('playlistend', -1)
-         if playlistend == -1:
-             videos = videos[playliststart:]
-         else:
-             videos = videos[playliststart:playlistend]
-         if len(videos) == total:
-             self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total))
-         else:
-             self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(videos)))
  
-         for video in videos:
-             self._downloader.download([video])
-         return
+         url_results = [self.url_result(url) for url in videos]
+         return [self.playlist_result(url_results, playlist_id)]
  
  
  class YoutubeChannelIE(InfoExtractor):
  
          self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
  
-         for id in video_ids:
-             self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
-         return
+         urls = ['http://www.youtube.com/watch?v=%s' % id for id in video_ids]
+         url_entries = [self.url_result(url) for url in urls]
+         return [self.playlist_result(url_entries, channel_id)]
  
  
  class YoutubeUserIE(InfoExtractor):
  
              pagenum += 1
  
-         all_ids_count = len(video_ids)
-         playliststart = self._downloader.params.get('playliststart', 1) - 1
-         playlistend = self._downloader.params.get('playlistend', -1)
-         if playlistend == -1:
-             video_ids = video_ids[playliststart:]
-         else:
-             video_ids = video_ids[playliststart:playlistend]
-         self._downloader.to_screen(u"[youtube] user %s: Collected %d video ids (downloading %d of them)" %
-                 (username, all_ids_count, len(video_ids)))
-         for video_id in video_ids:
-             self._downloader.download(['http://www.youtube.com/watch?v=%s' % video_id])
+         urls = ['http://www.youtube.com/watch?v=%s' % video_id for video_id in video_ids]
+         url_results = [self.url_result(url) for url in urls]
+         return [self.playlist_result(url_results, playlist_title = username)]
  
  
  class BlipTVUserIE(InfoExtractor):
  
              pagenum += 1
  
-         all_ids_count = len(video_ids)
-         playliststart = self._downloader.params.get('playliststart', 1) - 1
-         playlistend = self._downloader.params.get('playlistend', -1)
-         if playlistend == -1:
-             video_ids = video_ids[playliststart:]
-         else:
-             video_ids = video_ids[playliststart:playlistend]
          self._downloader.to_screen(u"[%s] user %s: Collected %d video ids (downloading %d of them)" %
                  (self.IE_NAME, username, all_ids_count, len(video_ids)))
  
-         for video_id in video_ids:
-             self._downloader.download([u'http://blip.tv/'+video_id])
+         urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids]
+         url_entries = [self.url_result(url) for url in urls]
+         return [self.playlist_result(url_entries, playlist_title = username)]
  
  
  class DepositFilesIE(InfoExtractor):
@@@ -2165,7 -2144,7 +2153,7 @@@ class FacebookIE(InfoExtractor)
          url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
          webpage = self._download_webpage(url, video_id)
  
 -        BEFORE = '[["allowFullScreen","true"],["allowScriptAccess","always"],["salign","tl"],["scale","noscale"],["wmode","opaque"]].forEach(function(param) {swf.addParam(param[0], param[1]);});\n'
 +        BEFORE = '{swf.addParam(param[0], param[1]);});\n'
          AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
          m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
          if not m:
          data = dict(json.loads(m.group(1)))
          params_raw = compat_urllib_parse.unquote(data['params'])
          params = json.loads(params_raw)
 -        video_url = params['hd_src']
 +        video_data = params['video_data'][0]
 +        video_url = video_data.get('hd_src')
          if not video_url:
 -            video_url = params['sd_src']
 +            video_url = video_data['sd_src']
          if not video_url:
              raise ExtractorError(u'Cannot find video URL')
 -        video_duration = int(params['video_duration'])
 +        video_duration = int(video_data['video_duration'])
 +        thumbnail = video_data['thumbnail_src']
  
          m = re.search('<h2 class="uiHeaderTitle">([^<]+)</h2>', webpage)
          if not m:
              'url': video_url,
              'ext': 'mp4',
              'duration': video_duration,
 -            'thumbnail': params['thumbnail_src'],
 +            'thumbnail': thumbnail,
          }
          return [info]
  
@@@ -3708,9 -3685,7 +3696,9 @@@ class FunnyOrDieIE(InfoExtractor)
  
          m = re.search(r"<h1 class='player_page_h1'.*?>(?P<title>.*?)</h1>", webpage, flags=re.DOTALL)
          if not m:
 -            self._downloader.trouble(u'Cannot find video title')
 +            m = re.search(r'<title>(?P<title>[^<]+?)</title>', webpage)
 +            if not m:
 +                self._downloader.trouble(u'Cannot find video title')
          title = clean_html(m.group('title'))
  
          m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
@@@ -4144,7 -4119,7 +4132,7 @@@ class KeekIE(InfoExtractor)
          video_url = u'http://cdn.keek.com/keek/video/%s' % video_id
          thumbnail = u'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
          webpage = self._download_webpage(url, video_id)
 -        m = re.search(r'<meta property="og:title" content="(?P<title>.+)"', webpage)
 +        m = re.search(r'<meta property="og:title" content="(?P<title>.*?)"', webpage)
          title = unescapeHTML(m.group('title'))
          m = re.search(r'<div class="user-name-and-bio">[\S\s]+?<h2>(?P<uploader>.+?)</h2>', webpage)
          uploader = clean_html(m.group('uploader'))
@@@ -4369,46 -4344,6 +4357,46 @@@ class LiveLeakIE(InfoExtractor)
  
          return [info]
  
 +class ARDIE(InfoExtractor):
 +    _VALID_URL = r'^(?:https?://)?(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[^/\?]+)(?:\?.*)?'
 +    _TITLE = r'<h1(?: class="boxTopHeadline")?>(?P<title>.*)</h1>'
 +    _MEDIA_STREAM = r'mediaCollection\.addMediaStream\((?P<media_type>\d+), (?P<quality>\d+), "(?P<rtmp_url>[^"]*)", "(?P<video_url>[^"]*)", "[^"]*"\)'
 +
 +    def _real_extract(self, url):
 +        # determine video id from url
 +        m = re.match(self._VALID_URL, url)
 +
 +        numid = re.search(r'documentId=([0-9]+)', url)
 +        if numid:
 +            video_id = numid.group(1)
 +        else:
 +            video_id = m.group('video_id')
 +
 +        # determine title and media streams from webpage
 +        html = self._download_webpage(url, video_id)
 +        title = re.search(self._TITLE, html).group('title')
 +        streams = [m.groupdict() for m in re.finditer(self._MEDIA_STREAM, html)]
 +        if not streams:
 +            assert '"fsk"' in html
 +            self._downloader.report_error(u'this video is only available after 8:00 pm')
 +            return
 +
 +        # choose default media type and highest quality for now
 +        stream = max([s for s in streams if int(s["media_type"]) == 0],
 +                     key=lambda s: int(s["quality"]))
 +
 +        # there's two possibilities: RTMP stream or HTTP download
 +        info = {'id': video_id, 'title': title, 'ext': 'mp4'}
 +        if stream['rtmp_url']:
 +            self._downloader.to_screen(u'[%s] RTMP download detected' % self.IE_NAME)
 +            assert stream['video_url'].startswith('mp4:')
 +            info["url"] = stream["rtmp_url"]
 +            info["play_path"] = stream['video_url']
 +        else:
 +            assert stream["video_url"].endswith('.mp4')
 +            info["url"] = stream["video_url"]
 +        return [info]
 +
  
  def gen_extractors():
      """ Return a list of an instance of every supported extractor.
          MySpassIE(),
          SpiegelIE(),
          LiveLeakIE(),
 +        ARDIE(),
          GenericIE()
      ]