Merge remote-tracking branch 'SyxbEaEQ2/rate-limit'
authorPhilipp Hagemeister <phihag@phihag.de>
Fri, 12 Dec 2014 16:16:13 +0000 (17:16 +0100)
committerPhilipp Hagemeister <phihag@phihag.de>
Fri, 12 Dec 2014 16:16:13 +0000 (17:16 +0100)
429 files changed:
.gitignore
AUTHORS [new file with mode: 0644]
CONTRIBUTING.md [new file with mode: 0644]
LATEST_VERSION [deleted file]
MANIFEST.in
Makefile
README.md
devscripts/bash-completion.py
devscripts/buildserver.py
devscripts/check-porn.py
devscripts/fish-completion.in [new file with mode: 0644]
devscripts/fish-completion.py [new file with mode: 0755]
devscripts/gh-pages/add-version.py
devscripts/gh-pages/generate-download.py
devscripts/gh-pages/sign-versions.py
devscripts/gh-pages/update-copyright.py
devscripts/gh-pages/update-feed.py
devscripts/gh-pages/update-sites.py
devscripts/make_contributing.py [new file with mode: 0755]
devscripts/make_readme.py
devscripts/prepare_manpage.py
devscripts/release.sh
devscripts/transition_helper.py [deleted file]
devscripts/transition_helper_exe/setup.py [deleted file]
devscripts/transition_helper_exe/youtube-dl.py [deleted file]
devscripts/zsh-completion.in [new file with mode: 0644]
devscripts/zsh-completion.py [new file with mode: 0755]
docs/conf.py
setup.py
test/helper.py
test/parameters.json
test/swftests/ConstArrayAccess.as [new file with mode: 0644]
test/swftests/ConstantInt.as [new file with mode: 0644]
test/swftests/DictCall.as [new file with mode: 0644]
test/swftests/EqualsOperator.as [new file with mode: 0644]
test/swftests/MemberAssignment.as [new file with mode: 0644]
test/swftests/NeOperator.as [new file with mode: 0644]
test/swftests/PrivateVoidCall.as [new file with mode: 0644]
test/swftests/StringBasics.as [new file with mode: 0644]
test/swftests/StringCharCodeAt.as [new file with mode: 0644]
test/swftests/StringConversion.as [new file with mode: 0644]
test/test_YoutubeDL.py
test/test_age_restriction.py
test/test_all_urls.py
test/test_cache.py [new file with mode: 0644]
test/test_compat.py [new file with mode: 0644]
test/test_download.py
test/test_execution.py
test/test_playlists.py [deleted file]
test/test_subtitles.py
test/test_swfinterp.py
test/test_unicode_literals.py
test/test_utils.py
test/test_write_annotations.py
test/test_write_info_json.py
test/test_youtube_lists.py
test/test_youtube_signature.py
youtube-dl [deleted file]
youtube-dl.exe [deleted file]
youtube_dl/YoutubeDL.py
youtube_dl/__init__.py
youtube_dl/__main__.py
youtube_dl/aes.py
youtube_dl/cache.py [new file with mode: 0644]
youtube_dl/compat.py [new file with mode: 0644]
youtube_dl/downloader/__init__.py
youtube_dl/downloader/common.py
youtube_dl/downloader/f4m.py
youtube_dl/downloader/hls.py
youtube_dl/downloader/http.py
youtube_dl/downloader/mplayer.py
youtube_dl/downloader/rtmp.py
youtube_dl/extractor/__init__.py
youtube_dl/extractor/abc.py
youtube_dl/extractor/academicearth.py
youtube_dl/extractor/addanime.py
youtube_dl/extractor/adultswim.py
youtube_dl/extractor/allocine.py
youtube_dl/extractor/anysex.py [new file with mode: 0644]
youtube_dl/extractor/aol.py
youtube_dl/extractor/aparat.py
youtube_dl/extractor/appletrailers.py
youtube_dl/extractor/ard.py
youtube_dl/extractor/arte.py
youtube_dl/extractor/audiomack.py [new file with mode: 0644]
youtube_dl/extractor/auengine.py
youtube_dl/extractor/azubu.py [new file with mode: 0644]
youtube_dl/extractor/bambuser.py
youtube_dl/extractor/bandcamp.py
youtube_dl/extractor/bbccouk.py
youtube_dl/extractor/beeg.py [new file with mode: 0644]
youtube_dl/extractor/behindkink.py [new file with mode: 0644]
youtube_dl/extractor/bet.py [new file with mode: 0644]
youtube_dl/extractor/bild.py [new file with mode: 0644]
youtube_dl/extractor/bliptv.py
youtube_dl/extractor/bpb.py [new file with mode: 0644]
youtube_dl/extractor/br.py
youtube_dl/extractor/breakcom.py
youtube_dl/extractor/brightcove.py
youtube_dl/extractor/buzzfeed.py [new file with mode: 0644]
youtube_dl/extractor/byutv.py
youtube_dl/extractor/canalplus.py
youtube_dl/extractor/cbs.py
youtube_dl/extractor/cbsnews.py
youtube_dl/extractor/ceskatelevize.py
youtube_dl/extractor/channel9.py
youtube_dl/extractor/chilloutzone.py
youtube_dl/extractor/cinchcast.py [new file with mode: 0644]
youtube_dl/extractor/cinemassacre.py [deleted file]
youtube_dl/extractor/clipfish.py
youtube_dl/extractor/cliphunter.py
youtube_dl/extractor/clipsyndicate.py
youtube_dl/extractor/cloudy.py [new file with mode: 0644]
youtube_dl/extractor/cnet.py
youtube_dl/extractor/cnn.py
youtube_dl/extractor/collegehumor.py
youtube_dl/extractor/comedycentral.py
youtube_dl/extractor/common.py
youtube_dl/extractor/condenast.py
youtube_dl/extractor/cracked.py
youtube_dl/extractor/crunchyroll.py
youtube_dl/extractor/cspan.py
youtube_dl/extractor/d8.py [deleted file]
youtube_dl/extractor/dailymotion.py
youtube_dl/extractor/daum.py
youtube_dl/extractor/dbtv.py [new file with mode: 0644]
youtube_dl/extractor/deezer.py [new file with mode: 0644]
youtube_dl/extractor/defense.py
youtube_dl/extractor/dfb.py
youtube_dl/extractor/discovery.py
youtube_dl/extractor/divxstage.py
youtube_dl/extractor/dotsub.py
youtube_dl/extractor/dropbox.py
youtube_dl/extractor/drtuber.py [new file with mode: 0644]
youtube_dl/extractor/drtv.py
youtube_dl/extractor/dump.py [new file with mode: 0644]
youtube_dl/extractor/ebaumsworld.py
youtube_dl/extractor/ehow.py
youtube_dl/extractor/eighttracks.py
youtube_dl/extractor/einthusan.py [new file with mode: 0644]
youtube_dl/extractor/eitb.py
youtube_dl/extractor/ellentv.py [new file with mode: 0644]
youtube_dl/extractor/empflix.py
youtube_dl/extractor/eporner.py [new file with mode: 0644]
youtube_dl/extractor/escapist.py
youtube_dl/extractor/everyonesmixtape.py
youtube_dl/extractor/expotv.py [new file with mode: 0644]
youtube_dl/extractor/extremetube.py
youtube_dl/extractor/facebook.py
youtube_dl/extractor/faz.py
youtube_dl/extractor/fc2.py
youtube_dl/extractor/firedrive.py
youtube_dl/extractor/firsttv.py
youtube_dl/extractor/fivemin.py
youtube_dl/extractor/fktv.py
youtube_dl/extractor/flickr.py
youtube_dl/extractor/folketinget.py [new file with mode: 0644]
youtube_dl/extractor/fourtube.py
youtube_dl/extractor/foxgay.py [new file with mode: 0644]
youtube_dl/extractor/foxnews.py [new file with mode: 0644]
youtube_dl/extractor/franceinter.py
youtube_dl/extractor/francetv.py
youtube_dl/extractor/freevideo.py [new file with mode: 0644]
youtube_dl/extractor/funnyordie.py
youtube_dl/extractor/gamekings.py
youtube_dl/extractor/gameone.py
youtube_dl/extractor/gamespot.py
youtube_dl/extractor/generic.py
youtube_dl/extractor/giantbomb.py [new file with mode: 0644]
youtube_dl/extractor/glide.py [new file with mode: 0644]
youtube_dl/extractor/globo.py [new file with mode: 0644]
youtube_dl/extractor/godtube.py
youtube_dl/extractor/goldenmoustache.py [new file with mode: 0644]
youtube_dl/extractor/golem.py [new file with mode: 0644]
youtube_dl/extractor/googleplus.py
youtube_dl/extractor/googlesearch.py
youtube_dl/extractor/gorillavid.py
youtube_dl/extractor/goshgay.py
youtube_dl/extractor/grooveshark.py [new file with mode: 0644]
youtube_dl/extractor/hark.py
youtube_dl/extractor/heise.py [new file with mode: 0644]
youtube_dl/extractor/helsinki.py
youtube_dl/extractor/hornbunny.py [new file with mode: 0644]
youtube_dl/extractor/hostingbulk.py [new file with mode: 0644]
youtube_dl/extractor/hotnewhiphop.py
youtube_dl/extractor/howcast.py
youtube_dl/extractor/howstuffworks.py [new file with mode: 0644]
youtube_dl/extractor/huffpost.py
youtube_dl/extractor/iconosquare.py
youtube_dl/extractor/ign.py
youtube_dl/extractor/imdb.py
youtube_dl/extractor/instagram.py
youtube_dl/extractor/internetvideoarchive.py
youtube_dl/extractor/iprima.py
youtube_dl/extractor/ivi.py
youtube_dl/extractor/izlesene.py [new file with mode: 0644]
youtube_dl/extractor/jadorecettepub.py
youtube_dl/extractor/jeuxvideo.py
youtube_dl/extractor/jove.py [new file with mode: 0644]
youtube_dl/extractor/jpopsukitv.py
youtube_dl/extractor/jukebox.py
youtube_dl/extractor/justintv.py [deleted file]
youtube_dl/extractor/kankan.py
youtube_dl/extractor/khanacademy.py
youtube_dl/extractor/kickstarter.py
youtube_dl/extractor/kontrtube.py
youtube_dl/extractor/ku6.py
youtube_dl/extractor/laola1tv.py [new file with mode: 0644]
youtube_dl/extractor/lifenews.py
youtube_dl/extractor/liveleak.py
youtube_dl/extractor/livestream.py
youtube_dl/extractor/lrt.py [new file with mode: 0644]
youtube_dl/extractor/lynda.py
youtube_dl/extractor/m6.py
youtube_dl/extractor/mailru.py
youtube_dl/extractor/malemotion.py
youtube_dl/extractor/mdr.py
youtube_dl/extractor/metacafe.py
youtube_dl/extractor/metacritic.py
youtube_dl/extractor/mgoon.py [new file with mode: 0644]
youtube_dl/extractor/minhateca.py [new file with mode: 0644]
youtube_dl/extractor/ministrygrid.py [new file with mode: 0644]
youtube_dl/extractor/mitele.py [new file with mode: 0644]
youtube_dl/extractor/mixcloud.py
youtube_dl/extractor/mlb.py
youtube_dl/extractor/moevideo.py [new file with mode: 0644]
youtube_dl/extractor/mofosex.py
youtube_dl/extractor/mojvideo.py [new file with mode: 0644]
youtube_dl/extractor/moniker.py [new file with mode: 0644]
youtube_dl/extractor/mooshare.py
youtube_dl/extractor/motherless.py
youtube_dl/extractor/movieclips.py [new file with mode: 0644]
youtube_dl/extractor/moviezine.py
youtube_dl/extractor/movshare.py
youtube_dl/extractor/mpora.py
youtube_dl/extractor/mtv.py
youtube_dl/extractor/muenchentv.py [new file with mode: 0644]
youtube_dl/extractor/musicplayon.py
youtube_dl/extractor/musicvault.py [new file with mode: 0644]
youtube_dl/extractor/muzu.py
youtube_dl/extractor/myspace.py
youtube_dl/extractor/myspass.py
youtube_dl/extractor/myvideo.py
youtube_dl/extractor/myvidster.py [new file with mode: 0644]
youtube_dl/extractor/naver.py
youtube_dl/extractor/nba.py
youtube_dl/extractor/nbc.py
youtube_dl/extractor/ndr.py
youtube_dl/extractor/newgrounds.py
youtube_dl/extractor/newstube.py
youtube_dl/extractor/nfb.py
youtube_dl/extractor/nfl.py [new file with mode: 0644]
youtube_dl/extractor/nhl.py
youtube_dl/extractor/niconico.py
youtube_dl/extractor/ninegag.py
youtube_dl/extractor/noco.py
youtube_dl/extractor/normalboots.py
youtube_dl/extractor/nosvideo.py [new file with mode: 0644]
youtube_dl/extractor/novamov.py
youtube_dl/extractor/nowness.py
youtube_dl/extractor/nowvideo.py
youtube_dl/extractor/npo.py
youtube_dl/extractor/ntv.py
youtube_dl/extractor/nuvid.py
youtube_dl/extractor/nytimes.py
youtube_dl/extractor/oe1.py [deleted file]
youtube_dl/extractor/oktoberfesttv.py [new file with mode: 0644]
youtube_dl/extractor/ooyala.py
youtube_dl/extractor/orf.py
youtube_dl/extractor/patreon.py [new file with mode: 0644]
youtube_dl/extractor/pbs.py
youtube_dl/extractor/phoenix.py [new file with mode: 0644]
youtube_dl/extractor/photobucket.py
youtube_dl/extractor/planetaplay.py [new file with mode: 0644]
youtube_dl/extractor/played.py [new file with mode: 0644]
youtube_dl/extractor/playfm.py [new file with mode: 0644]
youtube_dl/extractor/playvid.py
youtube_dl/extractor/podomatic.py
youtube_dl/extractor/pornhd.py
youtube_dl/extractor/pornhub.py
youtube_dl/extractor/pornotube.py
youtube_dl/extractor/pornoxo.py [new file with mode: 0644]
youtube_dl/extractor/promptfile.py [new file with mode: 0644]
youtube_dl/extractor/prosiebensat1.py
youtube_dl/extractor/quickvid.py [new file with mode: 0644]
youtube_dl/extractor/radiode.py [new file with mode: 0644]
youtube_dl/extractor/rai.py
youtube_dl/extractor/rbmaradio.py
youtube_dl/extractor/redtube.py
youtube_dl/extractor/reverbnation.py
youtube_dl/extractor/ringtv.py
youtube_dl/extractor/ro220.py
youtube_dl/extractor/rtlnl.py [new file with mode: 0644]
youtube_dl/extractor/rtlnow.py
youtube_dl/extractor/rts.py
youtube_dl/extractor/rtve.py
youtube_dl/extractor/ruhd.py
youtube_dl/extractor/rutube.py
youtube_dl/extractor/rutv.py
youtube_dl/extractor/sbs.py [new file with mode: 0644]
youtube_dl/extractor/scivee.py
youtube_dl/extractor/screencast.py
youtube_dl/extractor/screenwavemedia.py [new file with mode: 0644]
youtube_dl/extractor/servingsys.py
youtube_dl/extractor/sexu.py [new file with mode: 0644]
youtube_dl/extractor/sexykarma.py [new file with mode: 0644]
youtube_dl/extractor/shared.py
youtube_dl/extractor/sharesix.py [new file with mode: 0644]
youtube_dl/extractor/sina.py
youtube_dl/extractor/slideshare.py
youtube_dl/extractor/slutload.py
youtube_dl/extractor/smotri.py
youtube_dl/extractor/sockshare.py
youtube_dl/extractor/sohu.py
youtube_dl/extractor/soundcloud.py
youtube_dl/extractor/space.py
youtube_dl/extractor/spankwire.py
youtube_dl/extractor/spiegel.py
youtube_dl/extractor/spiegeltv.py
youtube_dl/extractor/sport5.py [new file with mode: 0644]
youtube_dl/extractor/sportbox.py [new file with mode: 0644]
youtube_dl/extractor/sportdeutschland.py [new file with mode: 0644]
youtube_dl/extractor/srmediathek.py [new file with mode: 0644]
youtube_dl/extractor/stanfordoc.py
youtube_dl/extractor/streamcloud.py
youtube_dl/extractor/subtitles.py
youtube_dl/extractor/sunporno.py [new file with mode: 0644]
youtube_dl/extractor/swrmediathek.py
youtube_dl/extractor/syfy.py
youtube_dl/extractor/sztvhu.py
youtube_dl/extractor/tagesschau.py
youtube_dl/extractor/tapely.py [new file with mode: 0644]
youtube_dl/extractor/tass.py [new file with mode: 0644]
youtube_dl/extractor/teachertube.py
youtube_dl/extractor/teamcoco.py
youtube_dl/extractor/techtalks.py
youtube_dl/extractor/ted.py
youtube_dl/extractor/telebruxelles.py [new file with mode: 0644]
youtube_dl/extractor/telecinco.py [new file with mode: 0644]
youtube_dl/extractor/telemb.py [new file with mode: 0644]
youtube_dl/extractor/tf1.py
youtube_dl/extractor/theonion.py [new file with mode: 0644]
youtube_dl/extractor/theplatform.py
youtube_dl/extractor/thesixtyone.py [new file with mode: 0644]
youtube_dl/extractor/thisav.py
youtube_dl/extractor/thvideo.py [new file with mode: 0644]
youtube_dl/extractor/tinypic.py
youtube_dl/extractor/tlc.py
youtube_dl/extractor/tmz.py [new file with mode: 0644]
youtube_dl/extractor/tnaflix.py [new file with mode: 0644]
youtube_dl/extractor/toypics.py
youtube_dl/extractor/traileraddict.py
youtube_dl/extractor/trilulilu.py
youtube_dl/extractor/trutube.py
youtube_dl/extractor/tube8.py
youtube_dl/extractor/tudou.py
youtube_dl/extractor/tumblr.py
youtube_dl/extractor/tunein.py [new file with mode: 0644]
youtube_dl/extractor/turbo.py [new file with mode: 0644]
youtube_dl/extractor/tvigle.py
youtube_dl/extractor/tvp.py
youtube_dl/extractor/tvplay.py
youtube_dl/extractor/twentyfourvideo.py [new file with mode: 0644]
youtube_dl/extractor/twitch.py [new file with mode: 0644]
youtube_dl/extractor/ubu.py [new file with mode: 0644]
youtube_dl/extractor/udemy.py
youtube_dl/extractor/unistra.py
youtube_dl/extractor/ustream.py
youtube_dl/extractor/vbox7.py
youtube_dl/extractor/veehd.py
youtube_dl/extractor/vesti.py
youtube_dl/extractor/vevo.py
youtube_dl/extractor/vgtv.py [new file with mode: 0644]
youtube_dl/extractor/vh1.py
youtube_dl/extractor/vice.py [new file with mode: 0644]
youtube_dl/extractor/viddler.py
youtube_dl/extractor/videobam.py
youtube_dl/extractor/videofyme.py
youtube_dl/extractor/videomega.py [new file with mode: 0644]
youtube_dl/extractor/videopremium.py
youtube_dl/extractor/videott.py
youtube_dl/extractor/videoweed.py
youtube_dl/extractor/vidme.py [new file with mode: 0644]
youtube_dl/extractor/vidzi.py [new file with mode: 0644]
youtube_dl/extractor/vimeo.py
youtube_dl/extractor/vine.py
youtube_dl/extractor/vk.py
youtube_dl/extractor/vodlocker.py
youtube_dl/extractor/vporn.py [new file with mode: 0644]
youtube_dl/extractor/vrt.py [new file with mode: 0644]
youtube_dl/extractor/vube.py
youtube_dl/extractor/vuclip.py
youtube_dl/extractor/walla.py [new file with mode: 0644]
youtube_dl/extractor/washingtonpost.py
youtube_dl/extractor/wat.py
youtube_dl/extractor/wayofthemaster.py [new file with mode: 0644]
youtube_dl/extractor/wdr.py
youtube_dl/extractor/weibo.py
youtube_dl/extractor/wimp.py
youtube_dl/extractor/wistia.py
youtube_dl/extractor/worldstarhiphop.py
youtube_dl/extractor/wrzuta.py
youtube_dl/extractor/xbef.py
youtube_dl/extractor/xboxclips.py [new file with mode: 0644]
youtube_dl/extractor/xhamster.py
youtube_dl/extractor/xminus.py [new file with mode: 0644]
youtube_dl/extractor/xnxx.py
youtube_dl/extractor/xtube.py
youtube_dl/extractor/yahoo.py
youtube_dl/extractor/ynet.py [new file with mode: 0644]
youtube_dl/extractor/youjizz.py
youtube_dl/extractor/youku.py
youtube_dl/extractor/youporn.py
youtube_dl/extractor/yourupload.py [new file with mode: 0644]
youtube_dl/extractor/youtube.py
youtube_dl/extractor/zdf.py
youtube_dl/extractor/zingmp3.py [new file with mode: 0644]
youtube_dl/jsinterp.py
youtube_dl/options.py [new file with mode: 0644]
youtube_dl/postprocessor/__init__.py
youtube_dl/postprocessor/atomicparsley.py
youtube_dl/postprocessor/common.py
youtube_dl/postprocessor/execafterdownload.py [new file with mode: 0644]
youtube_dl/postprocessor/ffmpeg.py
youtube_dl/postprocessor/xattrpp.py
youtube_dl/swfinterp.py
youtube_dl/update.py
youtube_dl/utils.py
youtube_dl/version.py

index 37b2fa8d3b3a914a55592af8dace119ecc23a824..86312d4e4185cd6dbd56627a3f2ce9f8ef5e9a43 100644 (file)
@@ -11,6 +11,7 @@ MANIFEST
 README.txt
 youtube-dl.1
 youtube-dl.bash-completion
+youtube-dl.fish
 youtube-dl
 youtube-dl.exe
 youtube-dl.tar.gz
@@ -26,5 +27,7 @@ updates_key.pem
 *.m4a
 *.m4v
 *.part
+*.swp
 test/testdata
 .tox
+youtube-dl.zsh
diff --git a/AUTHORS b/AUTHORS
new file mode 100644 (file)
index 0000000..bfa00f9
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,94 @@
+Ricardo Garcia Gonzalez
+Danny Colligan
+Benjamin Johnson
+Vasyl' Vavrychuk
+Witold Baryluk
+Paweł Paprota
+Gergely Imreh
+Rogério Brito
+Philipp Hagemeister
+Sören Schulze
+Kevin Ngo
+Ori Avtalion
+shizeeg
+Filippo Valsorda
+Christian Albrecht
+Dave Vasilevsky
+Jaime Marquínez Ferrándiz
+Jeff Crouse
+Osama Khalid
+Michael Walter
+M. Yasoob Ullah Khalid
+Julien Fraichard
+Johny Mo Swag
+Axel Noack
+Albert Kim
+Pierre Rudloff
+Huarong Huo
+Ismael Mejía
+Steffan 'Ruirize' James
+Andras Elso
+Jelle van der Waa
+Marcin Cieślak
+Anton Larionov
+Takuya Tsuchida
+Sergey M.
+Michael Orlitzky
+Chris Gahan
+Saimadhav Heblikar
+Mike Col
+Oleg Prutz
+pulpe
+Andreas Schmitz
+Michael Kaiser
+Niklas Laxström
+David Triendl
+Anthony Weems
+David Wagner
+Juan C. Olivares
+Mattias Harrysson
+phaer
+Sainyam Kapoor
+Nicolas Évrard
+Jason Normore
+Hoje Lee
+Adam Thalhammer
+Georg Jähnig
+Ralf Haring
+Koki Takahashi
+Ariset Llerena
+Adam Malcontenti-Wilson
+Tobias Bell
+Naglis Jonaitis
+Charles Chen
+Hassaan Ali
+Dobrosław Żybort
+David Fabijan
+Sebastian Haas
+Alexander Kirk
+Erik Johnson
+Keith Beckman
+Ole Ernst
+Aaron McDaniel (mcd1992)
+Magnus Kolstad
+Hari Padmanaban
+Carlos Ramos
+5moufl
+lenaten
+Dennis Scheiba
+Damon Timm
+winwon
+Xavier Beynon
+Gabriel Schubiner
+xantares
+Jan Matějka
+Mauroy Sébastien
+William Sewell
+Dao Hoang Son
+Oskar Jauch
+Matthew Rayfield
+t0mm0
+Tithen-Firion
+Zack Fernandes
+cryptonaut
+Adrian Kretz
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..0ff7b39
--- /dev/null
@@ -0,0 +1,136 @@
+Please include the full output of the command when run with `--verbose`. The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
+
+Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist):
+
+### Is the description of the issue itself sufficient?
+
+We often get issue reports that we cannot really decipher. While in most cases we eventually get the required information after asking back multiple times, this poses an unnecessary drain on our resources. Many contributors, including myself, are also not native speakers, so we may misread some parts.
+
+So please elaborate on what feature you are requesting, or what bug you want to be fixed. Make sure that it's obvious
+
+- What the problem is
+- How it could be fixed
+- How your proposed solution would look like
+
+If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a commiter myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over.
+
+For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the -v flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
+
+Site support requests **must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL.
+
+###  Are you using the latest version?
+
+Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
+
+###  Is the issue already documented?
+
+Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or at https://github.com/rg3/youtube-dl/search?type=Issues . If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity.
+
+###  Why are existing options not enough?
+
+Before requesting a new feature, please have a quick peek at [the list of supported options](https://github.com/rg3/youtube-dl/blob/master/README.md#synopsis). Many feature requests are for features that actually exist already! Please, absolutely do show off your work in the issue report and detail how the existing similar options do *not* solve your problem.
+
+###  Is there enough context in your bug report?
+
+People want to solve problems, and often think they do us a favor by breaking down their larger problems (e.g. wanting to skip already downloaded files) to a specific request (e.g. requesting us to look whether the file exists before downloading the info page). However, what often happens is that they break down the problem into two steps: One simple, and one impossible (or extremely complicated one).
+
+We are then presented with a very complicated request when the original problem could be solved far easier, e.g. by recording the downloaded video IDs in a separate file. To avoid this, you must include the greater context where it is non-obvious. In particular, every feature request that does not consist of adding support for a new site should contain a use case scenario that explains in what situation the missing feature would be useful.
+
+###  Does the issue involve one problem, and one problem only?
+
+Some of our users seem to think there is a limit of issues they can or should open. There is no limit of issues they can or should open. While it may seem appealing to be able to dump all your issues into one ticket, that means that someone who solves one of your issues cannot mark the issue as closed. Typically, reporting a bunch of issues leads to the ticket lingering since nobody wants to attack that behemoth, until someone mercifully splits the issue into multiple ones.
+
+In particular, every site support request issue should only pertain to services at one site (generally under a common domain, but always using the same backend technology). Do not request support for vimeo user videos, Whitehouse podcasts, and Google Plus pages in the same issue. Also, make sure that you don't post bug reports alongside feature requests. As a rule of thumb, a feature request does not include outputs of youtube-dl that are not immediately related to the feature at hand. Do not post reports of a network error alongside the request for a new video service.
+
+###  Is anyone going to need the feature?
+
+Only post features that you (or an incapicated friend you can personally talk to) require. Do not post features because they seem like a good idea. If they are really useful, they will be requested by someone who requires them.
+
+###  Is your question about youtube-dl?
+
+It may sound strange, but some bug reports we receive are completely unrelated to youtube-dl and relate to a different or even the reporter's own application. Please make sure that you are actually using youtube-dl. If you are using a UI for youtube-dl, report the bug to the maintainer of the actual application providing the UI. On the other hand, if your UI for youtube-dl fails in some way you believe is related to youtube-dl, by all means, go ahead and report the bug.
+
+# DEVELOPER INSTRUCTIONS
+
+Most users do not need to build youtube-dl and can [download the builds](http://rg3.github.io/youtube-dl/download.html) or get them from their distribution.
+
+To run youtube-dl as a developer, you don't need to build anything either. Simply execute
+
+    python -m youtube_dl
+
+To run the test, simply invoke your favorite test runner, or execute a test file directly; any of the following work:
+
+    python -m unittest discover
+    python test/test_download.py
+    nosetests
+
+If you want to create a build of youtube-dl yourself, you'll need
+
+* python
+* make
+* pandoc
+* zip
+* nosetests
+
+### Adding support for a new site
+
+If you want to add support for a new site, you can follow this quick list (assuming your service is called `yourextractor`):
+
+1. [Fork this repository](https://github.com/rg3/youtube-dl/fork)
+2. Check out the source code with `git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git`
+3. Start a new git branch with `cd youtube-dl; git checkout -b yourextractor`
+4. Start with this simple template and save it to `youtube_dl/extractor/yourextractor.py`:
+    ```python
+    # coding: utf-8
+    from __future__ import unicode_literals
+
+    from .common import InfoExtractor
+
+
+    class YourExtractorIE(InfoExtractor):
+        _VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
+        _TEST = {
+            'url': 'http://yourextractor.com/watch/42',
+            'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
+            'info_dict': {
+                'id': '42',
+                'ext': 'mp4',
+                'title': 'Video title goes here',
+                'thumbnail': 're:^https?://.*\.jpg$',
+                # TODO more properties, either as:
+                # * A value
+                # * MD5 checksum; start the string with md5:
+                # * A regular expression; start the string with re:
+                # * Any Python type (for example int or float)
+            }
+        }
+
+        def _real_extract(self, url):
+            video_id = self._match_id(url)
+            webpage = self._download_webpage(url, video_id)
+
+            # TODO more code goes here, for example ...
+            title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
+
+            return {
+                'id': video_id,
+                'title': title,
+                'description': self._og_search_description(webpage),
+                # TODO more properties (see youtube_dl/extractor/common.py)
+            }
+    ```
+5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
+6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
+7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
+8. If you can, check the code with [pyflakes](https://pypi.python.org/pypi/pyflakes) (a good idea) and [pep8](https://pypi.python.org/pypi/pep8) (optional, ignore E501).
+9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
+
+        $ git add youtube_dl/extractor/__init__.py
+        $ git add youtube_dl/extractor/yourextractor.py
+        $ git commit -m '[yourextractor] Add new extractor'
+        $ git push origin yourextractor
+
+10. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
+
+In any case, thank you very much for your contributions!
+
diff --git a/LATEST_VERSION b/LATEST_VERSION
deleted file mode 100644 (file)
index a334573..0000000
+++ /dev/null
@@ -1 +0,0 @@
-2012.12.99
index d43cc1f3ba95e2ec16728320b5dd64b8a3558abb..5743f605a2ab4e93e76416732f6e42b252e87150 100644 (file)
@@ -2,5 +2,6 @@ include README.md
 include test/*.py
 include test/*.json
 include youtube-dl.bash-completion
+include youtube-dl.fish
 include youtube-dl.1
 recursive-include docs Makefile conf.py *.rst
index c079761efa9b2e60887575f4cd7626d0abe469a2..d5b6e4307cb7a258016203b225eb2bf99331ecf9 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,15 +1,16 @@
-all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
+all: youtube-dl README.md CONTRIBUTING.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
 
 clean:
-       rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz
+       rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish *.dump *.part *.info.json CONTRIBUTING.md.tmp
 
 cleanall: clean
        rm -f youtube-dl youtube-dl.exe
 
-PREFIX=/usr/local
-BINDIR=$(PREFIX)/bin
-MANDIR=$(PREFIX)/man
-PYTHON=/usr/bin/env python
+PREFIX ?= /usr/local
+BINDIR ?= $(PREFIX)/bin
+MANDIR ?= $(PREFIX)/man
+SHAREDIR ?= $(PREFIX)/share
+PYTHON ?= /usr/bin/env python
 
 # set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
 ifeq ($(PREFIX),/usr)
@@ -22,13 +23,17 @@ else
        endif
 endif
 
-install: youtube-dl youtube-dl.1 youtube-dl.bash-completion
+install: youtube-dl youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
        install -d $(DESTDIR)$(BINDIR)
        install -m 755 youtube-dl $(DESTDIR)$(BINDIR)
        install -d $(DESTDIR)$(MANDIR)/man1
        install -m 644 youtube-dl.1 $(DESTDIR)$(MANDIR)/man1
        install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d
        install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
+       install -d $(DESTDIR)$(SHAREDIR)/zsh/site-functions
+       install -m 644 youtube-dl.zsh $(DESTDIR)$(SHAREDIR)/zsh/site-functions/_youtube-dl
+       install -d $(DESTDIR)$(SYSCONFDIR)/fish/completions
+       install -m 644 youtube-dl.fish $(DESTDIR)$(SYSCONFDIR)/fish/completions/youtube-dl.fish
 
 test:
        #nosetests --with-coverage --cover-package=youtube_dl --cover-html --verbose --processes 4 test
@@ -36,9 +41,9 @@ test:
 
 tar: youtube-dl.tar.gz
 
-.PHONY: all clean install test tar bash-completion pypi-files
+.PHONY: all clean install test tar bash-completion pypi-files zsh-completion fish-completion
 
-pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1
+pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1 youtube-dl.fish
 
 youtube-dl: youtube_dl/*.py youtube_dl/*/*.py
        zip --quiet youtube-dl youtube_dl/*.py youtube_dl/*/*.py
@@ -51,6 +56,9 @@ youtube-dl: youtube_dl/*.py youtube_dl/*/*.py
 README.md: youtube_dl/*.py youtube_dl/*/*.py
        COLUMNS=80 python -m youtube_dl --help | python devscripts/make_readme.py
 
+CONTRIBUTING.md: README.md
+       python devscripts/make_contributing.py README.md CONTRIBUTING.md
+
 README.txt: README.md
        pandoc -f markdown -t plain README.md -o README.txt
 
@@ -64,7 +72,17 @@ youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-co
 
 bash-completion: youtube-dl.bash-completion
 
-youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
+youtube-dl.zsh: youtube_dl/*.py youtube_dl/*/*.py devscripts/zsh-completion.in
+       python devscripts/zsh-completion.py
+
+zsh-completion: youtube-dl.zsh
+
+youtube-dl.fish: youtube_dl/*.py youtube_dl/*/*.py devscripts/fish-completion.in
+       python devscripts/fish-completion.py
+
+fish-completion: youtube-dl.fish
+
+youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
        @tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
                --exclude '*.DS_Store' \
                --exclude '*.kate-swp' \
@@ -78,5 +96,6 @@ youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-
                -- \
                bin devscripts test youtube_dl docs \
                LICENSE README.md README.txt \
-               Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion setup.py \
+               Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion \
+               youtube-dl.zsh youtube-dl.fish setup.py \
                youtube-dl
index a42dfb8567ffb86e926756aec59cbee94cee29bf..e41ee89854c25157a43ac3f3303367aa03a1244d 100644 (file)
--- a/README.md
+++ b/README.md
@@ -17,12 +17,20 @@ If you do not have curl, you can alternatively use a recent wget:
 
 Windows users can [download a .exe file](https://yt-dl.org/latest/youtube-dl.exe) and place it in their home directory or any other location on their [PATH](http://en.wikipedia.org/wiki/PATH_%28variable%29).
 
+OS X users can install **youtube-dl** with [Homebrew](http://brew.sh/).
+
+    brew install youtube-dl
+
+You can also use pip:
+
+    sudo pip install youtube-dl
+
 Alternatively, refer to the developer instructions below for how to check out and work with the git repository. For further options, including PGP signatures, see https://rg3.github.io/youtube-dl/download.html .
 
 # DESCRIPTION
 **youtube-dl** is a small command-line program to download videos from
 YouTube.com and a few more sites. It requires the Python interpreter, version
-2.6, 2.7, or 3.3+, and it is not platform specific. It should work on
+2.6, 2.7, or 3.2+, and it is not platform specific. It should work on
 your Unix box, on Windows or on Mac OS X. It is released to the public domain,
 which means you can modify it, redistribute it or use it however you like.
 
@@ -57,14 +65,17 @@ which means you can modify it, redistribute it or use it however you like.
                                      this is not possible instead of searching.
     --ignore-config                  Do not read configuration files. When given
                                      in the global configuration file /etc
-                                     /youtube-dl.conf: do not read the user
-                                     configuration in ~/.config/youtube-dl.conf
-                                     (%APPDATA%/youtube-dl/config.txt on
-                                     Windows)
+                                     /youtube-dl.conf: Do not read the user
+                                     configuration in ~/.config/youtube-
+                                     dl/config (%APPDATA%/youtube-dl/config.txt
+                                     on Windows)
+    --flat-playlist                  Do not extract the videos of a playlist,
+                                     only list them.
 
 ## Video Selection:
     --playlist-start NUMBER          playlist video to start at (default is 1)
     --playlist-end NUMBER            playlist video to end at (default is last)
+    --playlist-reverse               Download playlist videos in reverse order
     --match-title REGEX              download only matching titles (regex or
                                      caseless sub-string)
     --reject-title REGEX             skip download for matching titles (regex or
@@ -83,7 +94,8 @@ which means you can modify it, redistribute it or use it however you like.
                                      COUNT views
     --max-views COUNT                Do not download any videos with more than
                                      COUNT views
-    --no-playlist                    download only the currently playing video
+    --no-playlist                    If the URL refers to a video and a
+                                     playlist, download only the video.
     --age-limit YEARS                download only videos suitable for the given
                                      age
     --download-archive FILE          Download only videos not listed in the
@@ -91,8 +103,6 @@ which means you can modify it, redistribute it or use it however you like.
                                      downloaded videos in it.
     --include-ads                    Download advertisements as well
                                      (experimental)
-    --youtube-include-dash-manifest  Try to download the DASH manifest on
-                                     YouTube videos (experimental)
 
 ## Download Options:
     -r, --rate-limit LIMIT           maximum download rate in bytes per second
@@ -123,17 +133,19 @@ which means you can modify it, redistribute it or use it however you like.
                                      %(upload_date)s for the upload date
                                      (YYYYMMDD), %(extractor)s for the provider
                                      (youtube, metacafe, etc), %(id)s for the
-                                     video id, %(playlist)s for the playlist the
+                                     video id, %(playlist_title)s,
+                                     %(playlist_id)s, or %(playlist)s (=title if
+                                     present, ID otherwise) for the playlist the
                                      video is in, %(playlist_index)s for the
-                                     position in the playlist and %% for a
-                                     literal percent. %(height)s and %(width)s
-                                     for the width and height of the video
-                                     format. %(resolution)s for a textual
+                                     position in the playlist. %(height)s and
+                                     %(width)s for the width and height of the
+                                     video format. %(resolution)s for a textual
                                      description of the resolution of the video
-                                     format. Use - to output to stdout. Can also
-                                     be used to download to a different
-                                     directory, for example with -o '/my/downloa
-                                     ds/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
+                                     format. %% for a literal percent. Use - to
+                                     output to stdout. Can also be used to
+                                     download to a different directory, for
+                                     example with -o '/my/downloads/%(uploader)s
+                                     /%(title)s-%(id)s.%(ext)s' .
     --autonumber-size NUMBER         Specifies the number of digits in
                                      %(autonumber)s when it is present in output
                                      filename template or --auto-number option
@@ -150,7 +162,8 @@ which means you can modify it, redistribute it or use it however you like.
                                      downloads if possible.
     --no-continue                    do not resume partially downloaded files
                                      (restart from beginning)
-    --no-part                        do not use .part files
+    --no-part                        do not use .part files - write directly
+                                     into output file
     --no-mtime                       do not use the Last-modified header to set
                                      the file modification time
     --write-description              write video description to a .description
@@ -190,6 +203,10 @@ which means you can modify it, redistribute it or use it however you like.
     -j, --dump-json                  simulate, quiet but print JSON information.
                                      See --output for a description of available
                                      keys.
+    -J, --dump-single-json           simulate, quiet but print JSON information
+                                     for each command-line argument. If the URL
+                                     refers to a playlist, dump the whole
+                                     playlist information in a single line.
     --newline                        output progress bar as new lines
     --no-progress                    do not print progress bar
     --console-title                  display progress in console titlebar
@@ -208,7 +225,7 @@ which means you can modify it, redistribute it or use it however you like.
                                      information about the video. (Currently
                                      supported only for YouTube)
     --user-agent UA                  specify a custom user agent
-    --referer REF                    specify a custom referer, use if the video
+    --referer URL                    specify a custom referer, use if the video
                                      access is restricted to one domain
     --add-header FIELD:VALUE         specify a custom HTTP header and its value,
                                      separated by a colon ':'. You can use this
@@ -219,17 +236,27 @@ which means you can modify it, redistribute it or use it however you like.
 
 ## Video Format Options:
     -f, --format FORMAT              video format code, specify the order of
-                                     preference using slashes: "-f 22/17/18".
-                                     "-f mp4" and "-f flv" are also supported.
-                                     You can also use the special names "best",
-                                     "bestvideo", "bestaudio", "worst",
-                                     "worstvideo" and "worstaudio". By default,
-                                     youtube-dl will pick the best quality.
+                                     preference using slashes: -f 22/17/18 .  -f
+                                     mp4 , -f m4a and  -f flv  are also
+                                     supported. You can also use the special
+                                     names "best", "bestvideo", "bestaudio",
+                                     "worst", "worstvideo" and "worstaudio". By
+                                     default, youtube-dl will pick the best
+                                     quality. Use commas to download multiple
+                                     audio formats, such as -f
+                                     136/137/mp4/bestvideo,140/m4a/bestaudio.
+                                     You can merge the video and audio of two
+                                     formats into a single file using -f <video-
+                                     format>+<audio-format> (requires ffmpeg or
+                                     avconv), for example -f
+                                     bestvideo+bestaudio.
     --all-formats                    download all available video formats
     --prefer-free-formats            prefer free video formats unless a specific
                                      one is requested
     --max-quality FORMAT             highest quality format to download
     -F, --list-formats               list all available formats
+    --youtube-skip-dash-manifest     Do not download the DASH manifest on
+                                     YouTube videos
 
 ## Subtitle Options:
     --write-sub                      write subtitle file
@@ -245,8 +272,9 @@ which means you can modify it, redistribute it or use it however you like.
                                      language tags like 'en,pt'
 
 ## Authentication Options:
-    -u, --username USERNAME          account username
+    -u, --username USERNAME          login with this account ID
     -p, --password PASSWORD          account password
+    -2, --twofactor TWOFACTOR        two-factor auth code
     -n, --netrc                      use .netrc authentication data
     --video-password PASSWORD        video password (vimeo, smotri)
 
@@ -255,7 +283,7 @@ which means you can modify it, redistribute it or use it however you like.
                                      (requires ffmpeg or avconv and ffprobe or
                                      avprobe)
     --audio-format FORMAT            "best", "aac", "vorbis", "mp3", "m4a",
-                                     "opus", or "wav"; best by default
+                                     "opus", or "wav"; "best" by default
     --audio-quality QUALITY          ffmpeg/avconv audio quality specification,
                                      insert a value between 0 (better) and 9
                                      (worse) for VBR or a specific bitrate like
@@ -279,6 +307,10 @@ which means you can modify it, redistribute it or use it however you like.
                                      postprocessors (default)
     --prefer-ffmpeg                  Prefer ffmpeg over avconv for running the
                                      postprocessors
+    --exec CMD                       Execute a command on the file after
+                                     downloading, similar to find's -exec
+                                     syntax. Example: --exec 'adb push {}
+                                     /sdcard/Music/ && rm {}'
 
 # CONFIGURATION
 
@@ -303,10 +335,12 @@ The current default template is `%(title)s-%(id)s.%(ext)s`.
 
 In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
 
-    $ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
-    youtube-dl test video ''_ä↭𝕐.mp4    # All kinds of weird characters
-    $ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
-    youtube-dl_test_video_.mp4          # A simple file name
+```bash
+$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
+youtube-dl test video ''_ä↭𝕐.mp4    # All kinds of weird characters
+$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
+youtube-dl_test_video_.mp4          # A simple file name
+```
 
 # VIDEO SELECTION
 
@@ -317,17 +351,51 @@ Videos can be filtered by their upload date using the options `--date`, `--dateb
  
 Examples:
 
-    # Download only the videos uploaded in the last 6 months
-    $ youtube-dl --dateafter now-6months
+```bash
+# Download only the videos uploaded in the last 6 months
+$ youtube-dl --dateafter now-6months
 
-    # Download only the videos uploaded on January 1, 1970
-    $ youtube-dl --date 19700101
+# Download only the videos uploaded on January 1, 1970
+$ youtube-dl --date 19700101
 
-    $ # will only download the videos uploaded in the 200x decade
-    $ youtube-dl --dateafter 20000101 --datebefore 20091231
+$ # will only download the videos uploaded in the 200x decade
+$ youtube-dl --dateafter 20000101 --datebefore 20091231
+```
 
 # FAQ
 
+### How do I update youtube-dl?
+
+If you've followed [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html), you can simply run `youtube-dl -U` (or, on Linux, `sudo youtube-dl -U`).
+
+If you have used pip, a simple `sudo pip install -U youtube-dl` is sufficient to update.
+
+If you have installed youtube-dl using a package manager like *apt-get* or *yum*, use the standard system update mechanism to update. Note that distribution packages are often outdated. As a rule of thumb, youtube-dl releases at least once a month, and often weekly or even daily. Simply go to http://yt-dl.org/ to find out the current version. Unfortunately, there is nothing we youtube-dl developers can do if your distributions serves a really outdated version. You can (and should) complain to your distribution in their bugtracker or support forum.
+
+As a last resort, you can also uninstall the version installed by your package manager and follow our manual installation instructions. For that, remove the distribution's package, with a line like
+
+    sudo apt-get remove -y youtube-dl
+
+Afterwards, simply follow [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html):
+
+```
+sudo wget https://yt-dl.org/latest/youtube-dl -O /usr/local/bin/youtube-dl
+sudo chmod a+x /usr/local/bin/youtube-dl
+hash -r
+```
+
+Again, from then on you'll be able to update with `sudo youtube-dl -U`.
+
+### I'm getting an error `Unable to extract OpenGraph title` on YouTube playlists
+
+YouTube changed their playlist format in March 2014 and later on, so you'll need at least youtube-dl 2014.07.25 to download all YouTube videos.
+
+If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to [report bugs](https://bugs.launchpad.net/ubuntu/+source/youtube-dl/+filebug) to the [Ubuntu packaging guys](mailto:ubuntu-motu@lists.ubuntu.com?subject=outdated%20version%20of%20youtube-dl) - all they have to do is update the package to a somewhat recent version. See above for a way to update.
+
+### Do I always have to pass in `--max-quality FORMAT`, or `-citw`?
+
+By default, youtube-dl intends to have the best options (incidentally, if you have a convincing case that these should be different, [please file an issue where you explain that](https://yt-dl.org/bug)). Therefore, it is unnecessary and sometimes harmful to copy long option strings from webpages. In particular, `--max-quality` *limits* the video quality (so if you want the best quality, do NOT pass it in), and the only option out of `-citw` that is regularly useful is `-i`.
+
 ### Can you please put the -b option back?
 
 Most people asking this question are not aware that youtube-dl now defaults to downloading the highest available quality as reported by YouTube, which will be 1080p or 720p in some cases, so you no longer need the `-b` option. For some specific videos, maybe YouTube does not report them to be available in a specific high quality format you're interested in. In that case, simply request it with the `-f` option and youtube-dl will try to download it.
@@ -399,52 +467,50 @@ If you want to add support for a new site, you can follow this quick list (assum
 2. Check out the source code with `git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git`
 3. Start a new git branch with `cd youtube-dl; git checkout -b yourextractor`
 4. Start with this simple template and save it to `youtube_dl/extractor/yourextractor.py`:
-
-        # coding: utf-8
-        from __future__ import unicode_literals
-
-        import re
-
-        from .common import InfoExtractor
-        
-        
-        class YourExtractorIE(InfoExtractor):
-            _VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
-            _TEST = {
-                'url': 'http://yourextractor.com/watch/42',
-                'md5': 'TODO: md5 sum of the first 10KiB of the video file',
-                'info_dict': {
-                    'id': '42',
-                    'ext': 'mp4',
-                    'title': 'Video title goes here',
-                    # TODO more properties, either as:
-                    # * A value
-                    # * MD5 checksum; start the string with md5:
-                    # * A regular expression; start the string with re:
-                    # * Any Python type (for example int or float)
-                }
+    ```python
+    # coding: utf-8
+    from __future__ import unicode_literals
+
+    from .common import InfoExtractor
+
+
+    class YourExtractorIE(InfoExtractor):
+        _VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
+        _TEST = {
+            'url': 'http://yourextractor.com/watch/42',
+            'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
+            'info_dict': {
+                'id': '42',
+                'ext': 'mp4',
+                'title': 'Video title goes here',
+                'thumbnail': 're:^https?://.*\.jpg$',
+                # TODO more properties, either as:
+                # * A value
+                # * MD5 checksum; start the string with md5:
+                # * A regular expression; start the string with re:
+                # * Any Python type (for example int or float)
             }
+        }
 
-            def _real_extract(self, url):
-                mobj = re.match(self._VALID_URL, url)
-                video_id = mobj.group('id')
-
-                # TODO more code goes here, for example ...
-                webpage = self._download_webpage(url, video_id)
-                title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
-
-                return {
-                    'id': video_id,
-                    'title': title,
-                    # TODO more properties (see youtube_dl/extractor/common.py)
-                }
+        def _real_extract(self, url):
+            video_id = self._match_id(url)
+            webpage = self._download_webpage(url, video_id)
 
+            # TODO more code goes here, for example ...
+            title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
 
+            return {
+                'id': video_id,
+                'title': title,
+                'description': self._og_search_description(webpage),
+                # TODO more properties (see youtube_dl/extractor/common.py)
+            }
+    ```
 5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
-6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done.
+6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
 7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
 8. If you can, check the code with [pyflakes](https://pypi.python.org/pypi/pyflakes) (a good idea) and [pep8](https://pypi.python.org/pypi/pep8) (optional, ignore E501).
-9. When the tests pass, [add](https://www.kernel.org/pub/software/scm/git/docs/git-add.html) the new files and [commit](https://www.kernel.org/pub/software/scm/git/docs/git-commit.html) them and [push](https://www.kernel.org/pub/software/scm/git/docs/git-push.html) the result, like this:
+9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
 
         $ git add youtube_dl/extractor/__init__.py
         $ git add youtube_dl/extractor/yourextractor.py
@@ -455,15 +521,27 @@ If you want to add support for a new site, you can follow this quick list (assum
 
 In any case, thank you very much for your contributions!
 
+# EMBEDDING YOUTUBE-DL
+
+youtube-dl makes the best effort to be a good command-line program, and thus should be callable from any programming language. If you encounter any problems parsing its output, feel free to [create a report](https://github.com/rg3/youtube-dl/issues/new).
+
+From a Python program, you can embed youtube-dl in a more powerful fashion, like this:
+
+    import youtube_dl
+
+    ydl_opts = {}
+    with youtube_dl.YoutubeDL(ydl_opts) as ydl:
+        ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
+
+Most likely, you'll want to use various options. For a list of what can be done, have a look at [youtube_dl/YoutubeDL.py](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L69). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
+
 # BUGS
 
-Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues> . Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email.
+Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues> . Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the irc channel #youtube-dl on freenode.
 
 Please include the full output of the command when run with `--verbose`. The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
 
-For discussions, join us in the irc channel #youtube-dl on freenode.
-
-When you submit a request, please re-read it once to avoid a couple of mistakes (you can and should use this as a checklist):
+Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist):
 
 ### Is the description of the issue itself sufficient?
 
index 49287724d6b8d1cb6215aaadf150b8a441582993..cd26cc0895d033af03541f48815e8dad23f5161d 100755 (executable)
@@ -1,4 +1,6 @@
 #!/usr/bin/env python
+from __future__ import unicode_literals
+
 import os
 from os.path import dirname as dirn
 import sys
@@ -9,16 +11,17 @@ import youtube_dl
 BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
 BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
 
+
 def build_completion(opt_parser):
     opts_flag = []
     for group in opt_parser.option_groups:
         for option in group.option_list:
-            #for every long flag
+            # for every long flag
             opts_flag.append(option.get_opt_string())
     with open(BASH_COMPLETION_TEMPLATE) as f:
         template = f.read()
     with open(BASH_COMPLETION_FILE, "w") as f:
-        #just using the special char
+        # just using the special char
         filled_template = template.replace("{{flags}}", " ".join(opts_flag))
         f.write(filled_template)
 
index e0c3cc83ed5dd73544b383d983aca0281e86408f..7c2f49f8bb63bbe2b47efca151129a7e6b49674d 100644 (file)
@@ -142,7 +142,7 @@ def win_service_set_status(handle, status_code):
 
 def win_service_main(service_name, real_main, argc, argv_raw):
     try:
-        #args = [argv_raw[i].value for i in range(argc)]
+        # args = [argv_raw[i].value for i in range(argc)]
         stop_event = threading.Event()
         handler = HandlerEx(functools.partial(stop_event, win_service_handler))
         h = advapi32.RegisterServiceCtrlHandlerExW(service_name, handler, None)
@@ -233,6 +233,7 @@ def rmtree(path):
 
 #==============================================================================
 
+
 class BuildError(Exception):
     def __init__(self, output, code=500):
         self.output = output
@@ -369,7 +370,7 @@ class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, Clea
 
 
 class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
-    actionDict = { 'build': Builder, 'download': Builder } # They're the same, no more caching.
+    actionDict = {'build': Builder, 'download': Builder}  # They're the same, no more caching.
 
     def do_GET(self):
         path = urlparse.urlparse(self.path)
index 86aa37b5fb687acc91f29753f1bf8ba0db7b8e94..216282712c1b38b96c049f74a6cfe8a0fcd30806 100644 (file)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+from __future__ import unicode_literals
 
 """
 This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check
diff --git a/devscripts/fish-completion.in b/devscripts/fish-completion.in
new file mode 100644 (file)
index 0000000..eb79765
--- /dev/null
@@ -0,0 +1,5 @@
+
+{{commands}}
+
+
+complete --command youtube-dl --arguments ":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory"
diff --git a/devscripts/fish-completion.py b/devscripts/fish-completion.py
new file mode 100755 (executable)
index 0000000..c2f2387
--- /dev/null
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+from __future__ import unicode_literals
+
+import optparse
+import os
+from os.path import dirname as dirn
+import sys
+
+sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
+import youtube_dl
+from youtube_dl.utils import shell_quote
+
+FISH_COMPLETION_FILE = 'youtube-dl.fish'
+FISH_COMPLETION_TEMPLATE = 'devscripts/fish-completion.in'
+
+EXTRA_ARGS = {
+    'recode-video': ['--arguments', 'mp4 flv ogg webm mkv', '--exclusive'],
+
+    # Options that need a file parameter
+    'download-archive': ['--require-parameter'],
+    'cookies': ['--require-parameter'],
+    'load-info': ['--require-parameter'],
+    'batch-file': ['--require-parameter'],
+}
+
+
+def build_completion(opt_parser):
+    commands = []
+
+    for group in opt_parser.option_groups:
+        for option in group.option_list:
+            long_option = option.get_opt_string().strip('-')
+            complete_cmd = ['complete', '--command', 'youtube-dl', '--long-option', long_option]
+            if option._short_opts:
+                complete_cmd += ['--short-option', option._short_opts[0].strip('-')]
+            if option.help != optparse.SUPPRESS_HELP:
+                complete_cmd += ['--description', option.help]
+            complete_cmd.extend(EXTRA_ARGS.get(long_option, []))
+            commands.append(shell_quote(complete_cmd))
+
+    with open(FISH_COMPLETION_TEMPLATE) as f:
+        template = f.read()
+    filled_template = template.replace('{{commands}}', '\n'.join(commands))
+    with open(FISH_COMPLETION_FILE, 'w') as f:
+        f.write(filled_template)
+
+parser = youtube_dl.parseOpts()[0]
+build_completion(parser)
index 35865b2f30f9526f4a05b4bad594f078f5a56443..867ea0048fb88f1ca1382f11b1c60b17110d4fc8 100755 (executable)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python3
+from __future__ import unicode_literals
 
 import json
 import sys
index 55912e12c4e25e400aa68cf8685d2e523d0bd253..392e3ba21ab86070f2df164362fbd177b750c726 100755 (executable)
@@ -1,8 +1,7 @@
 #!/usr/bin/env python3
+from __future__ import unicode_literals
+
 import hashlib
-import shutil
-import subprocess
-import tempfile
 import urllib.request
 import json
 
index 8a824df56fe7677868ca9b03aa24de8d4b6eba76..fa389c35872c3f2b937ef6a875b46bc4dc0d04f8 100755 (executable)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python3
+from __future__ import unicode_literals, with_statement
 
 import rsa
 import json
@@ -11,22 +12,23 @@ except NameError:
 
 versions_info = json.load(open('update/versions.json'))
 if 'signature' in versions_info:
-       del versions_info['signature']
+    del versions_info['signature']
 
 print('Enter the PKCS1 private key, followed by a blank line:')
 privkey = b''
 while True:
-       try:
-               line = input()
-       except EOFError:
-               break
-       if line == '':
-               break
-       privkey += line.encode('ascii') + b'\n'
+    try:
+        line = input()
+    except EOFError:
+        break
+    if line == '':
+        break
+    privkey += line.encode('ascii') + b'\n'
 privkey = rsa.PrivateKey.load_pkcs1(privkey)
 
 signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode()
 print('signature: ' + signature)
 
 versions_info['signature'] = signature
-json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)
\ No newline at end of file
+with open('update/versions.json', 'w') as versionsf:
+    json.dump(versions_info, versionsf, indent=4, sort_keys=True)
index 12c2a91949135d72edf53e41d15ae532690bc6bf..3663c8afef278f132518ed9c0286bdfe34d028a7 100755 (executable)
@@ -1,11 +1,11 @@
 #!/usr/bin/env python
 # coding: utf-8
 
-from __future__ import with_statement
+from __future__ import with_statement, unicode_literals
 
 import datetime
 import glob
-import io # For Python 2 compatibilty
+import io  # For Python 2 compatibilty
 import os
 import re
 
@@ -13,7 +13,7 @@ year = str(datetime.datetime.now().year)
 for fn in glob.glob('*.html*'):
     with io.open(fn, encoding='utf-8') as f:
         content = f.read()
-    newc = re.sub(u'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', u'Copyright © 2006-' + year, content)
+    newc = re.sub(r'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', 'Copyright © 2006-' + year, content)
     if content != newc:
         tmpFn = fn + '.part'
         with io.open(tmpFn, 'wt', encoding='utf-8') as outf:
index 0ba15ae0f7c83a4eb2ac6b2b56aeb72d55b3a951..e93eb60fb8af28a72b7aceb5d62d1c2e16b69bc5 100755 (executable)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python3
+from __future__ import unicode_literals
 
 import datetime
 import io
@@ -73,4 +74,3 @@ atom_template = atom_template.replace('@ENTRIES@', entries_str)
 
 with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
     atom_file.write(atom_template)
-
index 153e15c8ab674f44e3681e5440c9372634b272d8..f0f0481c781ab40f8386de5c87a99c6468e00708 100755 (executable)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python3
+from __future__ import unicode_literals
 
 import sys
 import os
@@ -9,6 +10,7 @@ sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(
 
 import youtube_dl
 
+
 def main():
     with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
         template = tmplf.read()
@@ -21,7 +23,7 @@ def main():
             continue
         elif ie_desc is not None:
             ie_html += ': {}'.format(ie.IE_DESC)
-        if ie.working() == False:
+        if not ie.working():
             ie_html += ' (Currently broken)'
         ie_htmls.append('<li>{}</li>'.format(ie_html))
 
diff --git a/devscripts/make_contributing.py b/devscripts/make_contributing.py
new file mode 100755 (executable)
index 0000000..5fa8cf8
--- /dev/null
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+from __future__ import unicode_literals
+
+import argparse
+import io
+import re
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        'INFILE', help='README.md file name to read from')
+    parser.add_argument(
+        'OUTFILE', help='CONTRIBUTING.md file name to write to')
+    args = parser.parse_args()
+
+    with io.open(args.INFILE, encoding='utf-8') as inf:
+        readme = inf.read()
+
+    bug_text = re.search(
+        r'(?s)#\s*BUGS\s*[^\n]*\s*(.*?)#\s*COPYRIGHT', readme).group(1)
+    dev_text = re.search(
+        r'(?s)(#\s*DEVELOPER INSTRUCTIONS.*?)#\s*EMBEDDING YOUTUBE-DL',
+        readme).group(1)
+
+    out = bug_text + dev_text
+
+    with io.open(args.OUTFILE, 'w', encoding='utf-8') as outf:
+        outf.write(out)
+
+if __name__ == '__main__':
+    main()
index 70fa942dd12f7a75ee71b1fc9668e615cda0d747..8fbce07967c177217f5d39162d9f0958f1d41bf5 100755 (executable)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import io
 import sys
 import re
index d9c857015c520fcd5b6dd46ecf3663e167a458df..f66bebfea6de2cec60a10f943380b9247e5c3d60 100644 (file)
@@ -1,3 +1,4 @@
+from __future__ import unicode_literals
 
 import io
 import os.path
index 453087e5f70fa92906926ef12ab3b192087c51c3..691517ceb9b34394115ed4e54521bad1d4f3b54b 100755 (executable)
@@ -73,7 +73,6 @@ RELEASE_FILES="youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz"
 (cd build/$version/ && sha1sum $RELEASE_FILES > SHA1SUMS)
 (cd build/$version/ && sha256sum $RELEASE_FILES > SHA2-256SUMS)
 (cd build/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS)
-git checkout HEAD -- youtube-dl youtube-dl.exe
 
 /bin/echo -e "\n### Signing and uploading the new binaries to yt-dl.org ..."
 for f in $RELEASE_FILES; do gpg --passphrase-repeat 5 --detach-sig "build/$version/$f"; done
diff --git a/devscripts/transition_helper.py b/devscripts/transition_helper.py
deleted file mode 100644 (file)
index d5ca2d4..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-
-import sys, os
-
-try:
-    import urllib.request as compat_urllib_request
-except ImportError: # Python 2
-    import urllib2 as compat_urllib_request
-
-sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
-sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
-sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n')
-
-try:
-       raw_input()
-except NameError: # Python 3
-       input()
-
-filename = sys.argv[0]
-
-API_URL = "https://api.github.com/repos/rg3/youtube-dl/downloads"
-BIN_URL = "https://github.com/downloads/rg3/youtube-dl/youtube-dl"
-
-if not os.access(filename, os.W_OK):
-    sys.exit('ERROR: no write permissions on %s' % filename)
-
-try:
-    urlh = compat_urllib_request.urlopen(BIN_URL)
-    newcontent = urlh.read()
-    urlh.close()
-except (IOError, OSError) as err:
-    sys.exit('ERROR: unable to download latest version')
-
-try:
-    with open(filename, 'wb') as outf:
-        outf.write(newcontent)
-except (IOError, OSError) as err:
-    sys.exit('ERROR: unable to overwrite current version')
-
-sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
diff --git a/devscripts/transition_helper_exe/setup.py b/devscripts/transition_helper_exe/setup.py
deleted file mode 100644 (file)
index aaf5c29..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-from distutils.core import setup
-import py2exe
-
-py2exe_options = {
-    "bundle_files": 1,
-    "compressed": 1,
-    "optimize": 2,
-    "dist_dir": '.',
-    "dll_excludes": ['w9xpopen.exe']
-}
-
-setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None)
\ No newline at end of file
diff --git a/devscripts/transition_helper_exe/youtube-dl.py b/devscripts/transition_helper_exe/youtube-dl.py
deleted file mode 100644 (file)
index 6297dfd..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-#!/usr/bin/env python
-
-import sys, os
-import urllib2
-import json, hashlib
-
-def rsa_verify(message, signature, key):
-    from struct import pack
-    from hashlib import sha256
-    from sys import version_info
-    def b(x):
-        if version_info[0] == 2: return x
-        else: return x.encode('latin1')
-    assert(type(message) == type(b('')))
-    block_size = 0
-    n = key[0]
-    while n:
-        block_size += 1
-        n >>= 8
-    signature = pow(int(signature, 16), key[1], key[0])
-    raw_bytes = []
-    while signature:
-        raw_bytes.insert(0, pack("B", signature & 0xFF))
-        signature >>= 8
-    signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
-    if signature[0:2] != b('\x00\x01'): return False
-    signature = signature[2:]
-    if not b('\x00') in signature: return False
-    signature = signature[signature.index(b('\x00'))+1:]
-    if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
-    signature = signature[19:]
-    if signature != sha256(message).digest(): return False
-    return True
-
-sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
-sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
-sys.stderr.write(u'From now on, get the binaries from http://rg3.github.com/youtube-dl/download.html, not from the git repository.\n\n')
-
-raw_input()
-
-filename = sys.argv[0]
-
-UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
-VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
-JSON_URL = UPDATE_URL + 'versions.json'
-UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
-
-if not os.access(filename, os.W_OK):
-    sys.exit('ERROR: no write permissions on %s' % filename)
-
-exe = os.path.abspath(filename)
-directory = os.path.dirname(exe)
-if not os.access(directory, os.W_OK):
-    sys.exit('ERROR: no write permissions on %s' % directory)
-
-try:
-    versions_info = urllib2.urlopen(JSON_URL).read().decode('utf-8')
-    versions_info = json.loads(versions_info)
-except:
-    sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.')
-if not 'signature' in versions_info:
-    sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.')
-signature = versions_info['signature']
-del versions_info['signature']
-if not rsa_verify(json.dumps(versions_info, sort_keys=True), signature, UPDATES_RSA_KEY):
-    sys.exit(u'ERROR: the versions file signature is invalid. Aborting.')
-
-version = versions_info['versions'][versions_info['latest']]
-
-try:
-    urlh = urllib2.urlopen(version['exe'][0])
-    newcontent = urlh.read()
-    urlh.close()
-except (IOError, OSError) as err:
-    sys.exit('ERROR: unable to download latest version')
-
-newcontent_hash = hashlib.sha256(newcontent).hexdigest()
-if newcontent_hash != version['exe'][1]:
-    sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.')
-
-try:
-    with open(exe + '.new', 'wb') as outf:
-        outf.write(newcontent)
-except (IOError, OSError) as err:
-    sys.exit(u'ERROR: unable to write the new version')
-
-try:
-    bat = os.path.join(directory, 'youtube-dl-updater.bat')
-    b = open(bat, 'w')
-    b.write("""
-echo Updating youtube-dl...
-ping 127.0.0.1 -n 5 -w 1000 > NUL
-move /Y "%s.new" "%s"
-del "%s"
-    \n""" %(exe, exe, bat))
-    b.close()
-
-    os.startfile(bat)
-except (IOError, OSError) as err:
-    sys.exit('ERROR: unable to overwrite current version')
-
-sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
diff --git a/devscripts/zsh-completion.in b/devscripts/zsh-completion.in
new file mode 100644 (file)
index 0000000..b394a1a
--- /dev/null
@@ -0,0 +1,28 @@
+#compdef youtube-dl
+
+__youtube_dl() {
+    local curcontext="$curcontext" fileopts diropts cur prev
+    typeset -A opt_args
+    fileopts="{{fileopts}}"
+    diropts="{{diropts}}"
+    cur=$words[CURRENT]
+    case $cur in
+        :)
+            _arguments '*: :(::ytfavorites ::ytrecommended ::ytsubscriptions ::ytwatchlater ::ythistory)'
+        ;;
+        *)
+            prev=$words[CURRENT-1]
+            if [[ ${prev} =~ ${fileopts} ]]; then
+                _path_files
+            elif [[ ${prev} =~ ${diropts} ]]; then
+                _path_files -/
+            elif [[ ${prev} == "--recode-video" ]]; then
+                _arguments '*: :(mp4 flv ogg webm mkv)'
+            else
+                _arguments '*: :({{flags}})'
+            fi
+        ;;
+    esac
+}
+
+__youtube_dl
\ No newline at end of file
diff --git a/devscripts/zsh-completion.py b/devscripts/zsh-completion.py
new file mode 100755 (executable)
index 0000000..f200f2c
--- /dev/null
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+from __future__ import unicode_literals
+
+import os
+from os.path import dirname as dirn
+import sys
+
+sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
+import youtube_dl
+
+ZSH_COMPLETION_FILE = "youtube-dl.zsh"
+ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in"
+
+
+def build_completion(opt_parser):
+    opts = [opt for group in opt_parser.option_groups
+            for opt in group.option_list]
+    opts_file = [opt for opt in opts if opt.metavar == "FILE"]
+    opts_dir = [opt for opt in opts if opt.metavar == "DIR"]
+
+    fileopts = []
+    for opt in opts_file:
+        if opt._short_opts:
+            fileopts.extend(opt._short_opts)
+        if opt._long_opts:
+            fileopts.extend(opt._long_opts)
+
+    diropts = []
+    for opt in opts_dir:
+        if opt._short_opts:
+            diropts.extend(opt._short_opts)
+        if opt._long_opts:
+            diropts.extend(opt._long_opts)
+
+    flags = [opt.get_opt_string() for opt in opts]
+
+    with open(ZSH_COMPLETION_TEMPLATE) as f:
+        template = f.read()
+
+    template = template.replace("{{fileopts}}", "|".join(fileopts))
+    template = template.replace("{{diropts}}", "|".join(diropts))
+    template = template.replace("{{flags}}", " ".join(flags))
+
+    with open(ZSH_COMPLETION_FILE, "w") as f:
+        f.write(template)
+
+parser = youtube_dl.parseOpts()[0]
+build_completion(parser)
index 4a04ad779722f191085cf5bd3b927d89e4afa385..594ca61a6bf984d173620a3e95eaca28b22cda5a 100644 (file)
@@ -44,8 +44,8 @@ copyright = u'2014, Ricardo Garcia Gonzalez'
 # built documents.
 #
 # The short X.Y version.
-import youtube_dl
-version = youtube_dl.__version__
+from youtube_dl.version import __version__
+version = __version__
 # The full version, including alpha/beta/rc tags.
 release = version
 
index 03e7b358e4ec1b4800e06f6796e386a808b67891..4686260e0bbd25adf39e683bdae1b475e267b0b8 100644 (file)
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,6 @@
 from __future__ import print_function
 
 import os.path
-import pkg_resources
 import warnings
 import sys
 
@@ -48,6 +47,7 @@ if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
 else:
     files_spec = [
         ('etc/bash_completion.d', ['youtube-dl.bash-completion']),
+        ('etc/fish/completions', ['youtube-dl.fish']),
         ('share/doc/youtube_dl', ['README.txt']),
         ('share/man/man1', ['youtube-dl.1'])
     ]
@@ -102,7 +102,9 @@ setup(
         "Programming Language :: Python :: 2.6",
         "Programming Language :: Python :: 2.7",
         "Programming Language :: Python :: 3",
-        "Programming Language :: Python :: 3.3"
+        "Programming Language :: Python :: 3.2",
+        "Programming Language :: Python :: 3.3",
+        "Programming Language :: Python :: 3.4",
     ],
 
     **params
index b7299fb82c2e541fc520ba11c5c52d9edcc972e3..8a820526abfe5dbae31a1921312c60d075667c32 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import errno
 import io
 import hashlib
@@ -12,6 +14,7 @@ from youtube_dl import YoutubeDL
 from youtube_dl.utils import (
     compat_str,
     preferredencoding,
+    write_string,
 )
 
 
@@ -40,10 +43,10 @@ def report_warning(message):
     If stderr is a tty file the 'WARNING:' will be colored
     '''
     if sys.stderr.isatty() and os.name != 'nt':
-        _msg_header = u'\033[0;33mWARNING:\033[0m'
+        _msg_header = '\033[0;33mWARNING:\033[0m'
     else:
-        _msg_header = u'WARNING:'
-    output = u'%s %s\n' % (_msg_header, message)
+        _msg_header = 'WARNING:'
+    output = '%s %s\n' % (_msg_header, message)
     if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
         output = output.encode(preferredencoding())
     sys.stderr.write(output)
@@ -54,9 +57,9 @@ class FakeYDL(YoutubeDL):
         # Different instances of the downloader can't share the same dictionary
         # some test set the "sublang" parameter, which would break the md5 checks.
         params = get_params(override=override)
-        super(FakeYDL, self).__init__(params)
+        super(FakeYDL, self).__init__(params, auto_init=False)
         self.result = []
-        
+
     def to_screen(self, s, skip_eol=None):
         print(s)
 
@@ -69,8 +72,10 @@ class FakeYDL(YoutubeDL):
     def expect_warning(self, regex):
         # Silence an expected warning matching a regex
         old_report_warning = self.report_warning
+
         def report_warning(self, message):
-            if re.match(regex, message): return
+            if re.match(regex, message):
+                return
             old_report_warning(message)
         self.report_warning = types.MethodType(report_warning, self)
 
@@ -102,34 +107,48 @@ def expect_info_dict(self, expected_dict, got_dict):
             match_rex = re.compile(match_str)
 
             self.assertTrue(
-                isinstance(got, compat_str) and match_rex.match(got),
-                u'field %s (value: %r) should match %r' % (info_field, got, match_str))
+                isinstance(got, compat_str),
+                'Expected a %s object, but got %s for field %s' % (
+                    compat_str.__name__, type(got).__name__, info_field))
+            self.assertTrue(
+                match_rex.match(got),
+                'field %s (value: %r) should match %r' % (info_field, got, match_str))
         elif isinstance(expected, type):
             got = got_dict.get(info_field)
             self.assertTrue(isinstance(got, expected),
-                u'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
+                            'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
         else:
             if isinstance(expected, compat_str) and expected.startswith('md5:'):
                 got = 'md5:' + md5(got_dict.get(info_field))
             else:
                 got = got_dict.get(info_field)
             self.assertEqual(expected, got,
-                u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
+                             'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
 
     # Check for the presence of mandatory fields
-    for key in ('id', 'url', 'title', 'ext'):
-        self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
+    if got_dict.get('_type') != 'playlist':
+        for key in ('id', 'url', 'title', 'ext'):
+            self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
     # Check for mandatory fields that are automatically set by YoutubeDL
     for key in ['webpage_url', 'extractor', 'extractor_key']:
-        self.assertTrue(got_dict.get(key), u'Missing field: %s' % key)
+        self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
 
     # Are checkable fields missing from the test case definition?
     test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
-        for key, value in got_dict.items()
-        if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
+                          for key, value in got_dict.items()
+                          if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
     missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
     if missing_keys:
-        sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=4) + u'\n')
+        def _repr(v):
+            if isinstance(v, compat_str):
+                return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n')
+            else:
+                return repr(v)
+        info_dict_str = ''.join(
+            '    %s: %s,\n' % (_repr(k), _repr(v))
+            for k, v in test_info_dict.items())
+        write_string(
+            '\n\'info_dict\': {\n' + info_dict_str + '}\n', out=sys.stderr)
         self.assertFalse(
             missing_keys,
             'Missing keys in test definition: %s' % (
@@ -142,7 +161,9 @@ def assertRegexpMatches(self, text, regexp, msg=None):
     else:
         m = re.match(regexp, text)
         if not m:
-            note = 'Regexp didn\'t match: %r not found in %r' % (regexp, text)
+            note = 'Regexp didn\'t match: %r not found' % (regexp)
+            if len(text) < 1000:
+                note += ' in %r' % text
             if msg is None:
                 msg = note
             else:
@@ -155,3 +176,13 @@ def assertGreaterEqual(self, got, expected, msg=None):
         if msg is None:
             msg = '%r not greater than or equal to %r' % (got, expected)
         self.assertTrue(got >= expected, msg)
+
+
+def expect_warnings(ydl, warnings_re):
+    real_warning = ydl.report_warning
+
+    def _report_warning(w):
+        if not any(re.search(w_re, w) for w_re in warnings_re):
+            real_warning(w)
+
+    ydl.report_warning = _report_warning
index 487a46d56670c1ded91cd71ed35055e54232187b..098cd0cd0c4d17ded161fffbcdc327a7bbb30ee3 100644 (file)
@@ -27,7 +27,6 @@
     "rejecttitle": null, 
     "retries": 10, 
     "simulate": false, 
-    "skip_download": false, 
     "subtitleslang": null, 
     "subtitlesformat": "srt",
     "test": true, 
diff --git a/test/swftests/ConstArrayAccess.as b/test/swftests/ConstArrayAccess.as
new file mode 100644 (file)
index 0000000..07dc3f4
--- /dev/null
@@ -0,0 +1,18 @@
+// input: []
+// output: 4
+
+package {
+public class ConstArrayAccess {
+       private static const x:int = 2;
+       private static const ar:Array = ["42", "3411"];
+
+    public static function main():int{
+        var c:ConstArrayAccess = new ConstArrayAccess();
+        return c.f();
+    }
+
+    public function f(): int {
+       return ar[1].length;
+    }
+}
+}
diff --git a/test/swftests/ConstantInt.as b/test/swftests/ConstantInt.as
new file mode 100644 (file)
index 0000000..e0bbb61
--- /dev/null
@@ -0,0 +1,12 @@
+// input: []
+// output: 2
+
+package {
+public class ConstantInt {
+       private static const x:int = 2;
+
+    public static function main():int{
+        return x;
+    }
+}
+}
diff --git a/test/swftests/DictCall.as b/test/swftests/DictCall.as
new file mode 100644 (file)
index 0000000..c2d174c
--- /dev/null
@@ -0,0 +1,10 @@
+// input: [{"x": 1, "y": 2}]
+// output: 3
+
+package {
+public class DictCall {
+    public static function main(d:Object):int{
+        return d.x + d.y;
+    }
+}
+}
diff --git a/test/swftests/EqualsOperator.as b/test/swftests/EqualsOperator.as
new file mode 100644 (file)
index 0000000..837a69a
--- /dev/null
@@ -0,0 +1,10 @@
+// input: []
+// output: false
+
+package {
+public class EqualsOperator {
+    public static function main():Boolean{
+        return 1 == 2;
+    }
+}
+}
diff --git a/test/swftests/MemberAssignment.as b/test/swftests/MemberAssignment.as
new file mode 100644 (file)
index 0000000..dcba5e3
--- /dev/null
@@ -0,0 +1,22 @@
+// input: [1]
+// output: 2
+
+package {
+public class MemberAssignment {
+    public var v:int;
+
+    public function g():int {
+        return this.v;
+    }
+
+    public function f(a:int):int{
+        this.v = a;
+        return this.v + this.g();
+    }
+
+    public static function main(a:int): int {
+        var v:MemberAssignment = new MemberAssignment();
+        return v.f(a);
+    }
+}
+}
diff --git a/test/swftests/NeOperator.as b/test/swftests/NeOperator.as
new file mode 100644 (file)
index 0000000..61dcbc4
--- /dev/null
@@ -0,0 +1,24 @@
+// input: []
+// output: 123
+
+package {
+public class NeOperator {
+    public static function main(): int {
+        var res:int = 0;
+        if (1 != 2) {
+            res += 3;
+        } else {
+            res += 4;
+        }
+        if (2 != 2) {
+            res += 10;
+        } else {
+            res += 20;
+        }
+        if (9 == 9) {
+            res += 100;
+        }
+        return res;
+    }
+}
+}
diff --git a/test/swftests/PrivateVoidCall.as b/test/swftests/PrivateVoidCall.as
new file mode 100644 (file)
index 0000000..2cc0167
--- /dev/null
@@ -0,0 +1,22 @@
+// input: []
+// output: 9
+
+package {
+public class PrivateVoidCall {
+    public static function main():int{
+        var f:OtherClass = new OtherClass();
+        f.func();
+        return 9;
+    }
+}
+}
+
+class OtherClass {
+    private function pf():void {
+        ;
+    }
+
+    public function func():void {
+        this.pf();
+    }
+}
diff --git a/test/swftests/StringBasics.as b/test/swftests/StringBasics.as
new file mode 100644 (file)
index 0000000..d27430b
--- /dev/null
@@ -0,0 +1,11 @@
+// input: []
+// output: 3
+
+package {
+public class StringBasics {
+    public static function main():int{
+        var s:String = "abc";
+        return s.length;
+    }
+}
+}
diff --git a/test/swftests/StringCharCodeAt.as b/test/swftests/StringCharCodeAt.as
new file mode 100644 (file)
index 0000000..c20d74d
--- /dev/null
@@ -0,0 +1,11 @@
+// input: []
+// output: 9897
+
+package {
+public class StringCharCodeAt {
+    public static function main():int{
+        var s:String = "abc";
+        return s.charCodeAt(1) * 100 + s.charCodeAt();
+    }
+}
+}
diff --git a/test/swftests/StringConversion.as b/test/swftests/StringConversion.as
new file mode 100644 (file)
index 0000000..c976f50
--- /dev/null
@@ -0,0 +1,11 @@
+// input: []
+// output: 2
+
+package {
+public class StringConversion {
+    public static function main():int{
+        var s:String = String(99);
+        return s.length;
+    }
+}
+}
index e794cc97f0e643c5f05539fd3d0313d30dc98f8d..f8e4f930ebe6d5aefdbf128909b1be720ec046f3 100644 (file)
@@ -221,7 +221,7 @@ class TestFormatSelection(unittest.TestCase):
             '138', '137', '248', '136', '247', '135', '246',
             '245', '244', '134', '243', '133', '242', '160',
             # Dash audio
-            '141', '172', '140', '139', '171',
+            '141', '172', '140', '171', '139',
         ]
 
         for f1id, f2id in zip(order, order[1:]):
@@ -266,6 +266,7 @@ class TestFormatSelection(unittest.TestCase):
             'ext': 'mp4',
             'width': None,
         }
+
         def fname(templ):
             ydl = YoutubeDL({'outtmpl': templ})
             return ydl.prepare_filename(info)
index 71e80b037a5cc99fd0cb1a6711d20cfb59e01b34..5be065c437ae978d8c4ccc1a67bea87a599a77ed 100644 (file)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+from __future__ import unicode_literals
 
 # Allow direct execution
 import os
@@ -19,7 +20,7 @@ def _download_restricted(url, filename, age):
         'age_limit': age,
         'skip_download': True,
         'writeinfojson': True,
-        "outtmpl": "%(id)s.%(ext)s",
+        'outtmpl': '%(id)s.%(ext)s',
     }
     ydl = YoutubeDL(params)
     ydl.add_default_info_extractors()
index 0ff47cf1ead4a2c89aa24a83023a1d29cde31717..bd4fe17bf2c0f37b4f9ac2b291a8f7d664f74534 100644 (file)
@@ -14,7 +14,7 @@ from test.helper import gettestcases
 from youtube_dl.extractor import (
     FacebookIE,
     gen_extractors,
-    JustinTVIE,
+    TwitchIE,
     YoutubeIE,
 )
 
@@ -32,19 +32,19 @@ class TestAllURLsMatching(unittest.TestCase):
     def test_youtube_playlist_matching(self):
         assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
         assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
-        assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') #585
+        assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q')  # 585
         assertPlaylist('PL63F0C78739B09958')
         assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
         assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
         assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
-        assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668
+        assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')  # 668
         self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
         # Top tracks
         assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
 
     def test_youtube_matching(self):
         self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
-        self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
+        self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012'))  # 668
         self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
         self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
         self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])
@@ -72,21 +72,17 @@ class TestAllURLsMatching(unittest.TestCase):
         self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
         self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
 
-    def test_justin_tv_channelid_matching(self):
-        self.assertTrue(JustinTVIE.suitable('justin.tv/vanillatv'))
-        self.assertTrue(JustinTVIE.suitable('twitch.tv/vanillatv'))
-        self.assertTrue(JustinTVIE.suitable('www.justin.tv/vanillatv'))
-        self.assertTrue(JustinTVIE.suitable('www.twitch.tv/vanillatv'))
-        self.assertTrue(JustinTVIE.suitable('http://www.justin.tv/vanillatv'))
-        self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv'))
-        self.assertTrue(JustinTVIE.suitable('http://www.justin.tv/vanillatv/'))
-        self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv/'))
+    def test_twitch_channelid_matching(self):
+        self.assertTrue(TwitchIE.suitable('twitch.tv/vanillatv'))
+        self.assertTrue(TwitchIE.suitable('www.twitch.tv/vanillatv'))
+        self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv'))
+        self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv/'))
 
-    def test_justintv_videoid_matching(self):
-        self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv/b/328087483'))
+    def test_twitch_videoid_matching(self):
+        self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv/b/328087483'))
 
-    def test_justin_tv_chapterid_matching(self):
-        self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/tsm_theoddone/c/2349361'))
+    def test_twitch_chapterid_matching(self):
+        self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/tsm_theoddone/c/2349361'))
 
     def test_youtube_extract(self):
         assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)
@@ -99,6 +95,7 @@ class TestAllURLsMatching(unittest.TestCase):
 
     def test_facebook_matching(self):
         self.assertTrue(FacebookIE.suitable('https://www.facebook.com/Shiniknoh#!/photo.php?v=10153317450565268'))
+        self.assertTrue(FacebookIE.suitable('https://www.facebook.com/cindyweather?fref=ts#!/photo.php?v=10152183998945793'))
 
     def test_no_duplicates(self):
         ies = gen_extractors()
@@ -108,7 +105,9 @@ class TestAllURLsMatching(unittest.TestCase):
                 if type(ie).__name__ in ('GenericIE', tc['name'] + 'IE'):
                     self.assertTrue(ie.suitable(url), '%s should match URL %r' % (type(ie).__name__, url))
                 else:
-                    self.assertFalse(ie.suitable(url), '%s should not match URL %r' % (type(ie).__name__, url))
+                    self.assertFalse(
+                        ie.suitable(url),
+                        '%s should not match URL %r . That URL belongs to %s.' % (type(ie).__name__, url, tc['name']))
 
     def test_keywords(self):
         self.assertMatch(':ytsubs', ['youtube:subscriptions'])
@@ -140,32 +139,6 @@ class TestAllURLsMatching(unittest.TestCase):
         self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['PBS'])
         self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['PBS'])
 
-    def test_ComedyCentralShows(self):
-        self.assertMatch(
-            'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
-            ['ComedyCentralShows'])
-        self.assertMatch(
-            'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
-            ['ComedyCentralShows'])
-        self.assertMatch(
-            'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
-            ['ComedyCentralShows'])
-        self.assertMatch(
-            'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
-            ['ComedyCentralShows'])
-        self.assertMatch(
-            'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
-            ['ComedyCentralShows'])
-        self.assertMatch(
-            'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
-            ['ComedyCentralShows'])
-        self.assertMatch(
-            'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
-            ['ComedyCentralShows'])
-        self.assertMatch(
-            'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
-            ['ComedyCentralShows'])
-
     def test_yahoo_https(self):
         # https://github.com/rg3/youtube-dl/issues/2701
         self.assertMatch(
diff --git a/test/test_cache.py b/test/test_cache.py
new file mode 100644 (file)
index 0000000..a161601
--- /dev/null
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+from __future__ import unicode_literals
+
+import shutil
+
+# Allow direct execution
+import os
+import sys
+import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+
+from test.helper import FakeYDL
+from youtube_dl.cache import Cache
+
+
+def _is_empty(d):
+    return not bool(os.listdir(d))
+
+
+def _mkdir(d):
+    if not os.path.exists(d):
+        os.mkdir(d)
+
+
+class TestCache(unittest.TestCase):
+    def setUp(self):
+        TEST_DIR = os.path.dirname(os.path.abspath(__file__))
+        TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
+        _mkdir(TESTDATA_DIR)
+        self.test_dir = os.path.join(TESTDATA_DIR, 'cache_test')
+        self.tearDown()
+
+    def tearDown(self):
+        if os.path.exists(self.test_dir):
+            shutil.rmtree(self.test_dir)
+
+    def test_cache(self):
+        ydl = FakeYDL({
+            'cachedir': self.test_dir,
+        })
+        c = Cache(ydl)
+        obj = {'x': 1, 'y': ['ä', '\\a', True]}
+        self.assertEqual(c.load('test_cache', 'k.'), None)
+        c.store('test_cache', 'k.', obj)
+        self.assertEqual(c.load('test_cache', 'k2'), None)
+        self.assertFalse(_is_empty(self.test_dir))
+        self.assertEqual(c.load('test_cache', 'k.'), obj)
+        self.assertEqual(c.load('test_cache', 'y'), None)
+        self.assertEqual(c.load('test_cache2', 'k.'), None)
+        c.remove()
+        self.assertFalse(os.path.exists(self.test_dir))
+        self.assertEqual(c.load('test_cache', 'k.'), None)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/test/test_compat.py b/test/test_compat.py
new file mode 100644 (file)
index 0000000..1eb454e
--- /dev/null
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+from __future__ import unicode_literals
+
+# Allow direct execution
+import os
+import sys
+import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+
+from youtube_dl.utils import get_filesystem_encoding
+from youtube_dl.compat import (
+    compat_getenv,
+    compat_expanduser,
+)
+
+
+class TestCompat(unittest.TestCase):
+    def test_compat_getenv(self):
+        test_str = 'тест'
+        os.environ['YOUTUBE-DL-TEST'] = (
+            test_str if sys.version_info >= (3, 0)
+            else test_str.encode(get_filesystem_encoding()))
+        self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str)
+
+    def test_compat_expanduser(self):
+        old_home = os.environ.get('HOME')
+        test_str = 'C:\Documents and Settings\тест\Application Data'
+        os.environ['HOME'] = (
+            test_str if sys.version_info >= (3, 0)
+            else test_str.encode(get_filesystem_encoding()))
+        self.assertEqual(compat_expanduser('~'), test_str)
+        os.environ['HOME'] = old_home
+
+    def test_all_present(self):
+        import youtube_dl.compat
+        all_names = youtube_dl.compat.__all__
+        present_names = set(filter(
+            lambda c: '_' in c and not c.startswith('_'),
+            dir(youtube_dl.compat))) - set(['unicode_literals'])
+        self.assertEqual(all_names, sorted(present_names))
+
+if __name__ == '__main__':
+    unittest.main()
index d6540588c130f6bafacd4ef7d077e6debf8d911d..a009aa475442ae588405a99f432af6feade92836 100644 (file)
@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 
+from __future__ import unicode_literals
+
 # Allow direct execution
 import os
 import sys
@@ -7,6 +9,8 @@ import unittest
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 from test.helper import (
+    assertGreaterEqual,
+    expect_warnings,
     get_params,
     gettestcases,
     expect_info_dict,
@@ -21,30 +25,37 @@ import json
 import socket
 
 import youtube_dl.YoutubeDL
-from youtube_dl.utils import (
+from youtube_dl.compat import (
     compat_http_client,
     compat_urllib_error,
     compat_HTTPError,
+)
+from youtube_dl.utils import (
     DownloadError,
     ExtractorError,
+    format_bytes,
     UnavailableVideoError,
 )
 from youtube_dl.extractor import get_info_extractor
 
 RETRIES = 3
 
+
 class YoutubeDL(youtube_dl.YoutubeDL):
     def __init__(self, *args, **kwargs):
         self.to_stderr = self.to_screen
         self.processed_info_dicts = []
         super(YoutubeDL, self).__init__(*args, **kwargs)
+
     def report_warning(self, message):
         # Don't accept warnings during tests
         raise ExtractorError(message)
+
     def process_info(self, info_dict):
         self.processed_info_dicts.append(info_dict)
         return super(YoutubeDL, self).process_info(info_dict)
 
+
 def _file_md5(fn):
     with open(fn, 'rb') as f:
         return hashlib.md5(f.read()).hexdigest()
@@ -54,48 +65,65 @@ defs = gettestcases()
 
 class TestDownload(unittest.TestCase):
     maxDiff = None
+
     def setUp(self):
         self.defs = defs
 
-### Dynamically generate tests
+# Dynamically generate tests
+
+
 def generator(test_case):
 
     def test_template(self):
         ie = youtube_dl.extractor.get_info_extractor(test_case['name'])
         other_ies = [get_info_extractor(ie_key) for ie_key in test_case.get('add_ie', [])]
+        is_playlist = any(k.startswith('playlist') for k in test_case)
+        test_cases = test_case.get(
+            'playlist', [] if is_playlist else [test_case])
+
         def print_skipping(reason):
             print('Skipping %s: %s' % (test_case['name'], reason))
         if not ie.working():
             print_skipping('IE marked as not _WORKING')
             return
-        if 'playlist' not in test_case:
-            info_dict = test_case.get('info_dict', {})
-            if not test_case.get('file') and not (info_dict.get('id') and info_dict.get('ext')):
+
+        for tc in test_cases:
+            info_dict = tc.get('info_dict', {})
+            if not tc.get('file') and not (info_dict.get('id') and info_dict.get('ext')):
                 raise Exception('Test definition incorrect. The output file cannot be known. Are both \'id\' and \'ext\' keys present?')
+
         if 'skip' in test_case:
             print_skipping(test_case['skip'])
             return
         for other_ie in other_ies:
             if not other_ie.working():
-                print_skipping(u'test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
+                print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
                 return
 
         params = get_params(test_case.get('params', {}))
+        if is_playlist and 'playlist' not in test_case:
+            params.setdefault('extract_flat', True)
+            params.setdefault('skip_download', True)
 
-        ydl = YoutubeDL(params)
+        ydl = YoutubeDL(params, auto_init=False)
         ydl.add_default_info_extractors()
         finished_hook_called = set()
+
         def _hook(status):
             if status['status'] == 'finished':
                 finished_hook_called.add(status['filename'])
         ydl.add_progress_hook(_hook)
+        expect_warnings(ydl, test_case.get('expected_warnings', []))
 
         def get_tc_filename(tc):
             return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
 
-        test_cases = test_case.get('playlist', [test_case])
-        def try_rm_tcs_files():
-            for tc in test_cases:
+        res_dict = None
+
+        def try_rm_tcs_files(tcs=None):
+            if tcs is None:
+                tcs = test_cases
+            for tc in tcs:
                 tc_filename = get_tc_filename(tc)
                 try_rm(tc_filename)
                 try_rm(tc_filename + '.part')
@@ -105,14 +133,17 @@ def generator(test_case):
             try_num = 1
             while True:
                 try:
-                    ydl.download([test_case['url']])
+                    # We're not using .download here sine that is just a shim
+                    # for outside error handling, and returns the exit code
+                    # instead of the result dict.
+                    res_dict = ydl.extract_info(test_case['url'])
                 except (DownloadError, ExtractorError) as err:
                     # Check if the exception is not a network related one
                     if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
                         raise
 
                     if try_num == RETRIES:
-                        report_warning(u'Failed due to network errors, skipping...')
+                        report_warning('Failed due to network errors, skipping...')
                         return
 
                     print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num))
@@ -121,34 +152,78 @@ def generator(test_case):
                 else:
                     break
 
+            if is_playlist:
+                self.assertEqual(res_dict['_type'], 'playlist')
+                self.assertTrue('entries' in res_dict)
+                expect_info_dict(self, test_case.get('info_dict', {}), res_dict)
+
+            if 'playlist_mincount' in test_case:
+                assertGreaterEqual(
+                    self,
+                    len(res_dict['entries']),
+                    test_case['playlist_mincount'],
+                    'Expected at least %d in playlist %s, but got only %d' % (
+                        test_case['playlist_mincount'], test_case['url'],
+                        len(res_dict['entries'])))
+            if 'playlist_count' in test_case:
+                self.assertEqual(
+                    len(res_dict['entries']),
+                    test_case['playlist_count'],
+                    'Expected %d entries in playlist %s, but got %d.' % (
+                        test_case['playlist_count'],
+                        test_case['url'],
+                        len(res_dict['entries']),
+                    ))
+            if 'playlist_duration_sum' in test_case:
+                got_duration = sum(e['duration'] for e in res_dict['entries'])
+                self.assertEqual(
+                    test_case['playlist_duration_sum'], got_duration)
+
             for tc in test_cases:
                 tc_filename = get_tc_filename(tc)
                 if not test_case.get('params', {}).get('skip_download', False):
                     self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename)
                     self.assertTrue(tc_filename in finished_hook_called)
+                    expected_minsize = tc.get('file_minsize', 10000)
+                    if expected_minsize is not None:
+                        if params.get('test'):
+                            expected_minsize = max(expected_minsize, 10000)
+                        got_fsize = os.path.getsize(tc_filename)
+                        assertGreaterEqual(
+                            self, got_fsize, expected_minsize,
+                            'Expected %s to be at least %s, but it\'s only %s ' %
+                            (tc_filename, format_bytes(expected_minsize),
+                                format_bytes(got_fsize)))
+                    if 'md5' in tc:
+                        md5_for_file = _file_md5(tc_filename)
+                        self.assertEqual(md5_for_file, tc['md5'])
                 info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
-                self.assertTrue(os.path.exists(info_json_fn))
-                if 'md5' in tc:
-                    md5_for_file = _file_md5(tc_filename)
-                    self.assertEqual(md5_for_file, tc['md5'])
+                self.assertTrue(
+                    os.path.exists(info_json_fn),
+                    'Missing info file %s' % info_json_fn)
                 with io.open(info_json_fn, encoding='utf-8') as infof:
                     info_dict = json.load(infof)
 
                 expect_info_dict(self, tc.get('info_dict', {}), info_dict)
         finally:
             try_rm_tcs_files()
+            if is_playlist and res_dict is not None and res_dict.get('entries'):
+                # Remove all other files that may have been extracted if the
+                # extractor returns full results even with extract_flat
+                res_tcs = [{'info_dict': e} for e in res_dict['entries']]
+                try_rm_tcs_files(res_tcs)
 
     return test_template
 
-### And add them to TestDownload
+# And add them to TestDownload
 for n, test_case in enumerate(defs):
     test_method = generator(test_case)
     tname = 'test_' + str(test_case['name'])
     i = 1
     while hasattr(TestDownload, tname):
-        tname = 'test_'  + str(test_case['name']) + '_' + str(i)
+        tname = 'test_%s_%d' % (test_case['name'], i)
         i += 1
-    test_method.__name__ = tname
+    test_method.__name__ = str(tname)
     setattr(TestDownload, test_method.__name__, test_method)
     del test_method
 
index 2b115fb31a76fd1de7f6ff90733b4d33dd60d0a9..60df187de4921dfa7df808302f55f1ccd66bcb13 100644 (file)
@@ -1,3 +1,6 @@
+#!/usr/bin/env python
+from __future__ import unicode_literals
+
 import unittest
 
 import sys
@@ -6,17 +9,19 @@ import subprocess
 
 rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 
+
 try:
     _DEV_NULL = subprocess.DEVNULL
 except AttributeError:
     _DEV_NULL = open(os.devnull, 'wb')
 
+
 class TestExecution(unittest.TestCase):
     def test_import(self):
         subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
 
     def test_module_exec(self):
-        if sys.version_info >= (2,7): # Python 2.6 doesn't support package execution
+        if sys.version_info >= (2, 7):  # Python 2.6 doesn't support package execution
             subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
 
     def test_main_exec(self):
diff --git a/test/test_playlists.py b/test/test_playlists.py
deleted file mode 100644 (file)
index 4f18834..0000000
+++ /dev/null
@@ -1,400 +0,0 @@
-#!/usr/bin/env python
-# encoding: utf-8
-
-from __future__ import unicode_literals
-
-# Allow direct execution
-import os
-import sys
-import unittest
-sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
-
-from test.helper import (
-    assertRegexpMatches,
-    assertGreaterEqual,
-    expect_info_dict,
-    FakeYDL,
-)
-
-from youtube_dl.extractor import (
-    AcademicEarthCourseIE,
-    DailymotionPlaylistIE,
-    DailymotionUserIE,
-    VimeoChannelIE,
-    VimeoUserIE,
-    VimeoAlbumIE,
-    VimeoGroupsIE,
-    VineUserIE,
-    UstreamChannelIE,
-    SoundcloudSetIE,
-    SoundcloudUserIE,
-    SoundcloudPlaylistIE,
-    TeacherTubeUserIE,
-    LivestreamIE,
-    LivestreamOriginalIE,
-    NHLVideocenterIE,
-    BambuserChannelIE,
-    BandcampAlbumIE,
-    SmotriCommunityIE,
-    SmotriUserIE,
-    IviCompilationIE,
-    ImdbListIE,
-    KhanAcademyIE,
-    EveryonesMixtapeIE,
-    RutubeChannelIE,
-    RutubePersonIE,
-    GoogleSearchIE,
-    GenericIE,
-    TEDIE,
-    ToypicsUserIE,
-    XTubeUserIE,
-    InstagramUserIE,
-    CSpanIE,
-    AolIE,
-)
-
-
-class TestPlaylists(unittest.TestCase):
-    def assertIsPlaylist(self, info):
-        """Make sure the info has '_type' set to 'playlist'"""
-        self.assertEqual(info['_type'], 'playlist')
-
-    def test_dailymotion_playlist(self):
-        dl = FakeYDL()
-        ie = DailymotionPlaylistIE(dl)
-        result = ie.extract('http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], 'SPORT')
-        self.assertTrue(len(result['entries']) > 20)
-
-    def test_dailymotion_user(self):
-        dl = FakeYDL()
-        ie = DailymotionUserIE(dl)
-        result = ie.extract('https://www.dailymotion.com/user/nqtv')
-        self.assertIsPlaylist(result)
-        assertGreaterEqual(self, len(result['entries']), 100)
-        self.assertEqual(result['title'], 'Rémi Gaillard')
-
-    def test_vimeo_channel(self):
-        dl = FakeYDL()
-        ie = VimeoChannelIE(dl)
-        result = ie.extract('http://vimeo.com/channels/tributes')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], 'Vimeo Tributes')
-        self.assertTrue(len(result['entries']) > 24)
-
-    def test_vimeo_user(self):
-        dl = FakeYDL()
-        ie = VimeoUserIE(dl)
-        result = ie.extract('http://vimeo.com/nkistudio/videos')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], 'Nki')
-        self.assertTrue(len(result['entries']) > 65)
-
-    def test_vimeo_album(self):
-        dl = FakeYDL()
-        ie = VimeoAlbumIE(dl)
-        result = ie.extract('http://vimeo.com/album/2632481')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], 'Staff Favorites: November 2013')
-        self.assertTrue(len(result['entries']) > 12)
-
-    def test_vimeo_groups(self):
-        dl = FakeYDL()
-        ie = VimeoGroupsIE(dl)
-        result = ie.extract('http://vimeo.com/groups/rolexawards')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], 'Rolex Awards for Enterprise')
-        self.assertTrue(len(result['entries']) > 72)
-
-    def test_vine_user(self):
-        dl = FakeYDL()
-        ie = VineUserIE(dl)
-        result = ie.extract('https://vine.co/Visa')
-        self.assertIsPlaylist(result)
-        assertGreaterEqual(self, len(result['entries']), 47)
-
-    def test_ustream_channel(self):
-        dl = FakeYDL()
-        ie = UstreamChannelIE(dl)
-        result = ie.extract('http://www.ustream.tv/channel/channeljapan')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], '10874166')
-        assertGreaterEqual(self, len(result['entries']), 54)
-
-    def test_soundcloud_set(self):
-        dl = FakeYDL()
-        ie = SoundcloudSetIE(dl)
-        result = ie.extract('https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], 'The Royal Concept EP')
-        assertGreaterEqual(self, len(result['entries']), 6)
-
-    def test_soundcloud_user(self):
-        dl = FakeYDL()
-        ie = SoundcloudUserIE(dl)
-        result = ie.extract('https://soundcloud.com/the-concept-band')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], '9615865')
-        assertGreaterEqual(self, len(result['entries']), 12)
-
-    def test_soundcloud_likes(self):
-        dl = FakeYDL()
-        ie = SoundcloudUserIE(dl)
-        result = ie.extract('https://soundcloud.com/the-concept-band/likes')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], '9615865')
-        assertGreaterEqual(self, len(result['entries']), 1)
-
-    def test_soundcloud_playlist(self):
-        dl = FakeYDL()
-        ie = SoundcloudPlaylistIE(dl)
-        result = ie.extract('http://api.soundcloud.com/playlists/4110309')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], '4110309')
-        self.assertEqual(result['title'], 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]')
-        assertRegexpMatches(
-            self, result['description'], r'.*?TILT Brass - Bowery Poetry Club')
-        self.assertEqual(len(result['entries']), 6)
-
-    def test_livestream_event(self):
-        dl = FakeYDL()
-        ie = LivestreamIE(dl)
-        result = ie.extract('http://new.livestream.com/tedx/cityenglish')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], 'TEDCity2.0 (English)')
-        assertGreaterEqual(self, len(result['entries']), 4)
-
-    def test_livestreamoriginal_folder(self):
-        dl = FakeYDL()
-        ie = LivestreamOriginalIE(dl)
-        result = ie.extract('https://www.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], 'a07bf706-d0e4-4e75-a747-b021d84f2fd3')
-        assertGreaterEqual(self, len(result['entries']), 28)
-
-    def test_nhl_videocenter(self):
-        dl = FakeYDL()
-        ie = NHLVideocenterIE(dl)
-        result = ie.extract('http://video.canucks.nhl.com/videocenter/console?catid=999')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], '999')
-        self.assertEqual(result['title'], 'Highlights')
-        self.assertEqual(len(result['entries']), 12)
-
-    def test_bambuser_channel(self):
-        dl = FakeYDL()
-        ie = BambuserChannelIE(dl)
-        result = ie.extract('http://bambuser.com/channel/pixelversity')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], 'pixelversity')
-        assertGreaterEqual(self, len(result['entries']), 60)
-
-    def test_bandcamp_album(self):
-        dl = FakeYDL()
-        ie = BandcampAlbumIE(dl)
-        result = ie.extract('http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], 'Hierophany of the Open Grave')
-        assertGreaterEqual(self, len(result['entries']), 9)
-        
-    def test_smotri_community(self):
-        dl = FakeYDL()
-        ie = SmotriCommunityIE(dl)
-        result = ie.extract('http://smotri.com/community/video/kommuna')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], 'kommuna')
-        self.assertEqual(result['title'], 'КПРФ')
-        assertGreaterEqual(self, len(result['entries']), 4)
-        
-    def test_smotri_user(self):
-        dl = FakeYDL()
-        ie = SmotriUserIE(dl)
-        result = ie.extract('http://smotri.com/user/inspector')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], 'inspector')
-        self.assertEqual(result['title'], 'Inspector')
-        assertGreaterEqual(self, len(result['entries']), 9)
-
-    def test_AcademicEarthCourse(self):
-        dl = FakeYDL()
-        ie = AcademicEarthCourseIE(dl)
-        result = ie.extract('http://academicearth.org/playlists/laws-of-nature/')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], 'laws-of-nature')
-        self.assertEqual(result['title'], 'Laws of Nature')
-        self.assertEqual(result['description'],u'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.')# u"Today's websites are increasingly dynamic. Pages are no longer static HTML files but instead generated by scripts and database calls. User interfaces are more seamless, with technologies like Ajax replacing traditional page reloads. This course teaches students how to build dynamic websites with Ajax and with Linux, Apache, MySQL, and PHP (LAMP), one of today's most popular frameworks. Students learn how to set up domain names with DNS, how to structure pages with XHTML and CSS, how to program in JavaScript and PHP, how to configure Apache and MySQL, how to design and query databases with SQL, how to use Ajax with both XML and JSON, and how to build mashups. The course explores issues of security, scalability, and cross-browser support and also discusses enterprise-level deployments of websites, including third-party hosting, virtualization, colocation in data centers, firewalling, and load-balancing.")
-        self.assertEqual(len(result['entries']), 4)
-        
-    def test_ivi_compilation(self):
-        dl = FakeYDL()
-        ie = IviCompilationIE(dl)
-        result = ie.extract('http://www.ivi.ru/watch/dvoe_iz_lartsa')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], 'dvoe_iz_lartsa')
-        self.assertEqual(result['title'], 'Двое из ларца (2006 - 2008)')
-        assertGreaterEqual(self, len(result['entries']), 24)
-
-    def test_ivi_compilation_season(self):
-        dl = FakeYDL()
-        ie = IviCompilationIE(dl)
-        result = ie.extract('http://www.ivi.ru/watch/dvoe_iz_lartsa/season1')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], 'dvoe_iz_lartsa/season1')
-        self.assertEqual(result['title'], 'Двое из ларца (2006 - 2008) 1 сезон')
-        assertGreaterEqual(self, len(result['entries']), 12)
-        
-    def test_imdb_list(self):
-        dl = FakeYDL()
-        ie = ImdbListIE(dl)
-        result = ie.extract('http://www.imdb.com/list/JFs9NWw6XI0')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], 'JFs9NWw6XI0')
-        self.assertEqual(result['title'], 'March 23, 2012 Releases')
-        self.assertEqual(len(result['entries']), 7)
-
-    def test_khanacademy_topic(self):
-        dl = FakeYDL()
-        ie = KhanAcademyIE(dl)
-        result = ie.extract('https://www.khanacademy.org/math/applied-math/cryptography')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], 'cryptography')
-        self.assertEqual(result['title'], 'Journey into cryptography')
-        self.assertEqual(result['description'], 'How have humans protected their secret messages through history? What has changed today?')
-        assertGreaterEqual(self, len(result['entries']), 3)
-
-    def test_EveryonesMixtape(self):
-        dl = FakeYDL()
-        ie = EveryonesMixtapeIE(dl)
-        result = ie.extract('http://everyonesmixtape.com/#/mix/m7m0jJAbMQi')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], 'm7m0jJAbMQi')
-        self.assertEqual(result['title'], 'Driving')
-        self.assertEqual(len(result['entries']), 24)
-        
-    def test_rutube_channel(self):
-        dl = FakeYDL()
-        ie = RutubeChannelIE(dl)
-        result = ie.extract('http://rutube.ru/tags/video/1800/')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], '1800')
-        assertGreaterEqual(self, len(result['entries']), 68)
-
-    def test_rutube_person(self):
-        dl = FakeYDL()
-        ie = RutubePersonIE(dl)
-        result = ie.extract('http://rutube.ru/video/person/313878/')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], '313878')
-        assertGreaterEqual(self, len(result['entries']), 37)
-
-    def test_multiple_brightcove_videos(self):
-        # https://github.com/rg3/youtube-dl/issues/2283
-        dl = FakeYDL()
-        ie = GenericIE(dl)
-        result = ie.extract('http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], 'always-never-nuclear-command-and-control')
-        self.assertEqual(result['title'], 'Always/Never: A Little-Seen Movie About Nuclear Command and Control : The New Yorker')
-        self.assertEqual(len(result['entries']), 3)
-
-    def test_GoogleSearch(self):
-        dl = FakeYDL()
-        ie = GoogleSearchIE(dl)
-        result = ie.extract('gvsearch15:python language')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], 'python language')
-        self.assertEqual(result['title'], 'python language')
-        self.assertEqual(len(result['entries']), 15)
-
-    def test_generic_rss_feed(self):
-        dl = FakeYDL()
-        ie = GenericIE(dl)
-        result = ie.extract('http://phihag.de/2014/youtube-dl/rss.xml')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], 'http://phihag.de/2014/youtube-dl/rss.xml')
-        self.assertEqual(result['title'], 'Zero Punctuation')
-        self.assertTrue(len(result['entries']) > 10)
-
-    def test_ted_playlist(self):
-        dl = FakeYDL()
-        ie = TEDIE(dl)
-        result = ie.extract('http://www.ted.com/playlists/who_are_the_hackers')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], '10')
-        self.assertEqual(result['title'], 'Who are the hackers?')
-        assertGreaterEqual(self, len(result['entries']), 6)
-
-    def test_toypics_user(self):
-        dl = FakeYDL()
-        ie = ToypicsUserIE(dl)
-        result = ie.extract('http://videos.toypics.net/Mikey')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], 'Mikey')
-        assertGreaterEqual(self, len(result['entries']), 17)
-
-    def test_xtube_user(self):
-        dl = FakeYDL()
-        ie = XTubeUserIE(dl)
-        result = ie.extract('http://www.xtube.com/community/profile.php?user=greenshowers')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], 'greenshowers')
-        assertGreaterEqual(self, len(result['entries']), 155)
-
-    def test_InstagramUser(self):
-        dl = FakeYDL()
-        ie = InstagramUserIE(dl)
-        result = ie.extract('http://instagram.com/porsche')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], 'porsche')
-        assertGreaterEqual(self, len(result['entries']), 2)
-        test_video = next(
-            e for e in result['entries']
-            if e['id'] == '614605558512799803_462752227')
-        dl.add_default_extra_info(test_video, ie, '(irrelevant URL)')
-        dl.process_video_result(test_video, download=False)
-        EXPECTED = {
-            'id': '614605558512799803_462752227',
-            'ext': 'mp4',
-            'title': '#Porsche Intelligent Performance.',
-            'thumbnail': 're:^https?://.*\.jpg',
-            'uploader': 'Porsche',
-            'uploader_id': 'porsche',
-            'timestamp': 1387486713,
-            'upload_date': '20131219',
-        }
-        expect_info_dict(self, EXPECTED, test_video)
-
-    def test_CSpan_playlist(self):
-        dl = FakeYDL()
-        ie = CSpanIE(dl)
-        result = ie.extract(
-            'http://www.c-span.org/video/?318608-1/gm-ignition-switch-recall')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], '342759')
-        self.assertEqual(
-            result['title'], 'General Motors Ignition Switch Recall')
-        whole_duration = sum(e['duration'] for e in result['entries'])
-        self.assertEqual(whole_duration, 14855)
-
-    def test_aol_playlist(self):
-        dl = FakeYDL()
-        ie = AolIE(dl)
-        result = ie.extract(
-            'http://on.aol.com/playlist/brace-yourself---todays-weirdest-news-152147?icid=OnHomepageC4_Omg_Img#_videoid=518184316')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], '152147')
-        self.assertEqual(
-            result['title'], 'Brace Yourself - Today\'s Weirdest News')
-        assertGreaterEqual(self, len(result['entries']), 10)
-
-    def test_TeacherTubeUser(self):
-        dl = FakeYDL()
-        ie = TeacherTubeUserIE(dl)
-        result = ie.extract('http://www.teachertube.com/user/profile/rbhagwati2')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], 'rbhagwati2')
-        assertGreaterEqual(self, len(result['entries']), 179)
-
-if __name__ == '__main__':
-    unittest.main()
index 48c30219868b1975a8aa19b6c6fd6f2f80da6e98..7c4cd8218e218a6d3319747705c2893c4fbf4cd2 100644 (file)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+from __future__ import unicode_literals
 
 # Allow direct execution
 import os
@@ -15,12 +16,14 @@ from youtube_dl.extractor import (
     DailymotionIE,
     TEDIE,
     VimeoIE,
+    WallaIE,
 )
 
 
 class BaseTestSubtitles(unittest.TestCase):
     url = None
     IE = None
+
     def setUp(self):
         self.DL = FakeYDL()
         self.ie = self.IE(self.DL)
@@ -73,7 +76,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
         self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06')
 
     def test_youtube_list_subtitles(self):
-        self.DL.expect_warning(u'Video doesn\'t have automatic captions')
+        self.DL.expect_warning('Video doesn\'t have automatic captions')
         self.DL.params['listsubtitles'] = True
         info_dict = self.getInfoDict()
         self.assertEqual(info_dict, None)
@@ -86,7 +89,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
         self.assertTrue(subtitles['it'] is not None)
 
     def test_youtube_nosubtitles(self):
-        self.DL.expect_warning(u'video doesn\'t have subtitles')
+        self.DL.expect_warning('video doesn\'t have subtitles')
         self.url = 'n5BB19UTcdA'
         self.DL.params['writesubtitles'] = True
         self.DL.params['allsubtitles'] = True
@@ -100,7 +103,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
         self.DL.params['subtitleslangs'] = langs
         subtitles = self.getSubtitles()
         for lang in langs:
-            self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
+            self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
 
 
 class TestDailymotionSubtitles(BaseTestSubtitles):
@@ -129,20 +132,20 @@ class TestDailymotionSubtitles(BaseTestSubtitles):
         self.assertEqual(len(subtitles.keys()), 5)
 
     def test_list_subtitles(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['listsubtitles'] = True
         info_dict = self.getInfoDict()
         self.assertEqual(info_dict, None)
 
     def test_automatic_captions(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['writeautomaticsub'] = True
         self.DL.params['subtitleslang'] = ['en']
         subtitles = self.getSubtitles()
         self.assertTrue(len(subtitles.keys()) == 0)
 
     def test_nosubtitles(self):
-        self.DL.expect_warning(u'video doesn\'t have subtitles')
+        self.DL.expect_warning('video doesn\'t have subtitles')
         self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv'
         self.DL.params['writesubtitles'] = True
         self.DL.params['allsubtitles'] = True
@@ -155,7 +158,7 @@ class TestDailymotionSubtitles(BaseTestSubtitles):
         self.DL.params['subtitleslangs'] = langs
         subtitles = self.getSubtitles()
         for lang in langs:
-            self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
+            self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
 
 
 class TestTedSubtitles(BaseTestSubtitles):
@@ -184,13 +187,13 @@ class TestTedSubtitles(BaseTestSubtitles):
         self.assertTrue(len(subtitles.keys()) >= 28)
 
     def test_list_subtitles(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['listsubtitles'] = True
         info_dict = self.getInfoDict()
         self.assertEqual(info_dict, None)
 
     def test_automatic_captions(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['writeautomaticsub'] = True
         self.DL.params['subtitleslang'] = ['en']
         subtitles = self.getSubtitles()
@@ -202,7 +205,7 @@ class TestTedSubtitles(BaseTestSubtitles):
         self.DL.params['subtitleslangs'] = langs
         subtitles = self.getSubtitles()
         for lang in langs:
-            self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
+            self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
 
 
 class TestBlipTVSubtitles(BaseTestSubtitles):
@@ -210,13 +213,13 @@ class TestBlipTVSubtitles(BaseTestSubtitles):
     IE = BlipTVIE
 
     def test_list_subtitles(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['listsubtitles'] = True
         info_dict = self.getInfoDict()
         self.assertEqual(info_dict, None)
 
     def test_allsubtitles(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['writesubtitles'] = True
         self.DL.params['allsubtitles'] = True
         subtitles = self.getSubtitles()
@@ -235,7 +238,7 @@ class TestVimeoSubtitles(BaseTestSubtitles):
     def test_subtitles(self):
         self.DL.params['writesubtitles'] = True
         subtitles = self.getSubtitles()
-        self.assertEqual(md5(subtitles['en']), '8062383cf4dec168fc40a088aa6d5888')
+        self.assertEqual(md5(subtitles['en']), '26399116d23ae3cf2c087cea94bc43b4')
 
     def test_subtitles_lang(self):
         self.DL.params['writesubtitles'] = True
@@ -250,20 +253,20 @@ class TestVimeoSubtitles(BaseTestSubtitles):
         self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr']))
 
     def test_list_subtitles(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['listsubtitles'] = True
         info_dict = self.getInfoDict()
         self.assertEqual(info_dict, None)
 
     def test_automatic_captions(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['writeautomaticsub'] = True
         self.DL.params['subtitleslang'] = ['en']
         subtitles = self.getSubtitles()
         self.assertTrue(len(subtitles.keys()) == 0)
 
     def test_nosubtitles(self):
-        self.DL.expect_warning(u'video doesn\'t have subtitles')
+        self.DL.expect_warning('video doesn\'t have subtitles')
         self.url = 'http://vimeo.com/56015672'
         self.DL.params['writesubtitles'] = True
         self.DL.params['allsubtitles'] = True
@@ -276,7 +279,34 @@ class TestVimeoSubtitles(BaseTestSubtitles):
         self.DL.params['subtitleslangs'] = langs
         subtitles = self.getSubtitles()
         for lang in langs:
-            self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
+            self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
+
+
+class TestWallaSubtitles(BaseTestSubtitles):
+    url = 'http://vod.walla.co.il/movie/2705958/the-yes-men'
+    IE = WallaIE
+
+    def test_list_subtitles(self):
+        self.DL.expect_warning('Automatic Captions not supported by this server')
+        self.DL.params['listsubtitles'] = True
+        info_dict = self.getInfoDict()
+        self.assertEqual(info_dict, None)
+
+    def test_allsubtitles(self):
+        self.DL.expect_warning('Automatic Captions not supported by this server')
+        self.DL.params['writesubtitles'] = True
+        self.DL.params['allsubtitles'] = True
+        subtitles = self.getSubtitles()
+        self.assertEqual(set(subtitles.keys()), set(['heb']))
+        self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920')
+
+    def test_nosubtitles(self):
+        self.DL.expect_warning('video doesn\'t have subtitles')
+        self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one'
+        self.DL.params['writesubtitles'] = True
+        self.DL.params['allsubtitles'] = True
+        subtitles = self.getSubtitles()
+        self.assertEqual(len(subtitles), 0)
 
 
 if __name__ == '__main__':
index b42cd74c738b70704e1eb9ff9d70fefc89cf6f15..9f18055e629d3c21826ad8159bdf0ae55409bca2 100644 (file)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+from __future__ import unicode_literals
 
 # Allow direct execution
 import os
@@ -37,7 +38,9 @@ def _make_testfunc(testfile):
                 or os.path.getmtime(swf_file) < os.path.getmtime(as_file)):
             # Recompile
             try:
-                subprocess.check_call(['mxmlc', '-output', swf_file, as_file])
+                subprocess.check_call([
+                    'mxmlc', '-output', swf_file,
+                    '-static-link-runtime-shared-libraries', as_file])
             except OSError as ose:
                 if ose.errno == errno.ENOENT:
                     print('mxmlc not found! Skipping test.')
index a4ba7bad03b85f7c7e69ded7b435898c7b17e11e..d3cba869bc9382f0fea0c798c36cf2ca168dcc67 100644 (file)
@@ -1,5 +1,11 @@
 from __future__ import unicode_literals
 
+# Allow direct execution
+import os
+import sys
+import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
 import io
 import os
 import re
@@ -9,14 +15,16 @@ rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 
 IGNORED_FILES = [
     'setup.py',  # http://bugs.python.org/issue13943
+    'conf.py',
+    'buildserver.py',
 ]
 
 
+from helper import assertRegexpMatches
+
+
 class TestUnicodeLiterals(unittest.TestCase):
     def test_all_files(self):
-        print('Skipping this test (not yet fully implemented)')
-        return
-
         for dirpath, _, filenames in os.walk(rootDir):
             for basename in filenames:
                 if not basename.endswith('.py'):
@@ -30,10 +38,11 @@ class TestUnicodeLiterals(unittest.TestCase):
 
                 if "'" not in code and '"' not in code:
                     continue
-                imps = 'from __future__ import unicode_literals'
-                self.assertTrue(
-                    imps in code,
-                    ' %s  missing in %s' % (imps, fn))
+                assertRegexpMatches(
+                    self,
+                    code,
+                    r'(?:(?:#.*?|\s*)\n)*from __future__ import (?:[a-z_]+,\s*)*unicode_literals',
+                    'unicode_literals import  missing in %s' % fn)
 
                 m = re.search(r'(?<=\s)u[\'"](?!\)|,|$)', code)
                 if m is not None:
index 51eb0b6b936c7ea5d21cfef9bdc0b70f2ee7663a..d42df6d96d92f7cde133e2bddfc769793920c37d 100644 (file)
@@ -1,6 +1,8 @@
 #!/usr/bin/env python
 # coding: utf-8
 
+from __future__ import unicode_literals
+
 # Allow direct execution
 import os
 import sys
@@ -13,15 +15,15 @@ import io
 import json
 import xml.etree.ElementTree
 
-#from youtube_dl.utils import htmlentity_transform
 from youtube_dl.utils import (
+    clean_html,
     DateRange,
     encodeFilename,
     find_xpath_attr,
     fix_xml_ampersands,
-    get_meta_content,
     orderedSet,
-    PagedList,
+    OnDemandPagedList,
+    InAdvancePagedList,
     parse_duration,
     read_batch_urls,
     sanitize_filename,
@@ -39,13 +41,16 @@ from youtube_dl.utils import (
     parse_iso8601,
     strip_jsonp,
     uppercase_escape,
+    limit_length,
+    escape_rfc3986,
+    escape_url,
+    js_to_json,
+    intlist_to_bytes,
+    args_to_str,
+    parse_filesize,
+    version_tuple,
 )
 
-if sys.version_info < (3, 0):
-    _compat_str = lambda b: b.decode('unicode-escape')
-else:
-    _compat_str = lambda s: s
-
 
 class TestUtil(unittest.TestCase):
     def test_timeconvert(self):
@@ -67,9 +72,9 @@ class TestUtil(unittest.TestCase):
         self.assertEqual('this - that', sanitize_filename('this: that'))
 
         self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
-        aumlaut = _compat_str('\xe4')
+        aumlaut = 'ä'
         self.assertEqual(sanitize_filename(aumlaut), aumlaut)
-        tests = _compat_str('\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430')
+        tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430'
         self.assertEqual(sanitize_filename(tests), tests)
 
         forbidden = '"\0\\/'
@@ -91,9 +96,9 @@ class TestUtil(unittest.TestCase):
         self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
         self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
 
-        tests = _compat_str('a\xe4b\u4e2d\u56fd\u7684c')
+        tests = 'a\xe4b\u4e2d\u56fd\u7684c'
         self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c')
-        self.assertTrue(sanitize_filename(_compat_str('\xf6'), restricted=True) != '')  # No empty filename
+        self.assertTrue(sanitize_filename('\xf6', restricted=True) != '')  # No empty filename
 
         forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
         for fc in forbidden:
@@ -101,8 +106,8 @@ class TestUtil(unittest.TestCase):
                 self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
 
         # Handle a common case more neatly
-        self.assertEqual(sanitize_filename(_compat_str('\u5927\u58f0\u5e26 - Song'), restricted=True), 'Song')
-        self.assertEqual(sanitize_filename(_compat_str('\u603b\u7edf: Speech'), restricted=True), 'Speech')
+        self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song')
+        self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech')
         # .. but make sure the file name is never empty
         self.assertTrue(sanitize_filename('-', restricted=True) != '')
         self.assertTrue(sanitize_filename(':', restricted=True) != '')
@@ -116,14 +121,16 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
         self.assertEqual(orderedSet([]), [])
         self.assertEqual(orderedSet([1]), [1])
-        #keep the list ordered
+        # keep the list ordered
         self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
 
     def test_unescape_html(self):
-        self.assertEqual(unescapeHTML(_compat_str('%20;')), _compat_str('%20;'))
-        
+        self.assertEqual(unescapeHTML('%20;'), '%20;')
+        self.assertEqual(
+            unescapeHTML('&eacute;'), 'é')
+
     def test_daterange(self):
-        _20century = DateRange("19000101","20000101")
+        _20century = DateRange("19000101", "20000101")
         self.assertFalse("17890714" in _20century)
         _ac = DateRange("00010101")
         self.assertTrue("19690721" in _ac)
@@ -136,9 +143,13 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
         self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
         self.assertEqual(unified_strdate('1968-12-10'), '19681210')
+        self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
+        self.assertEqual(
+            unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False),
+            '20141126')
 
     def test_find_xpath_attr(self):
-        testxml = u'''<root>
+        testxml = '''<root>
             <node/>
             <node x="a"/>
             <node x="a" y="c" />
@@ -150,19 +161,8 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
         self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
 
-    def test_meta_parser(self):
-        testhtml = u'''
-        <head>
-            <meta name="description" content="foo &amp; bar">
-            <meta content='Plato' name='author'/>
-        </head>
-        '''
-        get_meta = lambda name: get_meta_content(name, testhtml)
-        self.assertEqual(get_meta('description'), u'foo & bar')
-        self.assertEqual(get_meta('author'), 'Plato')
-
     def test_xpath_with_ns(self):
-        testxml = u'''<root xmlns:media="http://example.com/">
+        testxml = '''<root xmlns:media="http://example.com/">
             <media:song>
                 <media:author>The Author</media:author>
                 <url>http://server.com/download.mp3</url>
@@ -171,11 +171,11 @@ class TestUtil(unittest.TestCase):
         doc = xml.etree.ElementTree.fromstring(testxml)
         find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
         self.assertTrue(find('media:song') is not None)
-        self.assertEqual(find('media:song/media:author').text, u'The Author')
-        self.assertEqual(find('media:song/url').text, u'http://server.com/download.mp3')
+        self.assertEqual(find('media:song/media:author').text, 'The Author')
+        self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
 
     def test_smuggle_url(self):
-        data = {u"ö": u"ö", u"abc": [3]}
+        data = {"ö": "ö", "abc": [3]}
         url = 'https://foo.bar/baz?x=y#a'
         smug_url = smuggle_url(url, data)
         unsmug_url, unsmug_data = unsmuggle_url(smug_url)
@@ -187,22 +187,22 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(res_data, None)
 
     def test_shell_quote(self):
-        args = ['ffmpeg', '-i', encodeFilename(u'ñ€ß\'.mp4')]
-        self.assertEqual(shell_quote(args), u"""ffmpeg -i 'ñ€ß'"'"'.mp4'""")
+        args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')]
+        self.assertEqual(shell_quote(args), """ffmpeg -i 'ñ€ß'"'"'.mp4'""")
 
     def test_str_to_int(self):
         self.assertEqual(str_to_int('123,456'), 123456)
         self.assertEqual(str_to_int('123.456'), 123456)
 
     def test_url_basename(self):
-        self.assertEqual(url_basename(u'http://foo.de/'), u'')
-        self.assertEqual(url_basename(u'http://foo.de/bar/baz'), u'baz')
-        self.assertEqual(url_basename(u'http://foo.de/bar/baz?x=y'), u'baz')
-        self.assertEqual(url_basename(u'http://foo.de/bar/baz#x=y'), u'baz')
-        self.assertEqual(url_basename(u'http://foo.de/bar/baz/'), u'baz')
+        self.assertEqual(url_basename('http://foo.de/'), '')
+        self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz')
+        self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz')
+        self.assertEqual(url_basename('http://foo.de/bar/baz#x=y'), 'baz')
+        self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz')
         self.assertEqual(
-            url_basename(u'http://media.w3.org/2010/05/sintel/trailer.mp4'),
-            u'trailer.mp4')
+            url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'),
+            'trailer.mp4')
 
     def test_parse_duration(self):
         self.assertEqual(parse_duration(None), None)
@@ -213,12 +213,20 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(parse_duration('00:01:01'), 61)
         self.assertEqual(parse_duration('x:y'), None)
         self.assertEqual(parse_duration('3h11m53s'), 11513)
+        self.assertEqual(parse_duration('3h 11m 53s'), 11513)
+        self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513)
+        self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513)
         self.assertEqual(parse_duration('62m45s'), 3765)
         self.assertEqual(parse_duration('6m59s'), 419)
         self.assertEqual(parse_duration('49s'), 49)
         self.assertEqual(parse_duration('0h0m0s'), 0)
         self.assertEqual(parse_duration('0m0s'), 0)
         self.assertEqual(parse_duration('0s'), 0)
+        self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
+        self.assertEqual(parse_duration('T30M38S'), 1838)
+        self.assertEqual(parse_duration('5 s'), 5)
+        self.assertEqual(parse_duration('3 min'), 180)
+        self.assertEqual(parse_duration('2.5 hours'), 9000)
 
     def test_fix_xml_ampersands(self):
         self.assertEqual(
@@ -241,10 +249,14 @@ class TestUtil(unittest.TestCase):
                 for i in range(firstid, upto):
                     yield i
 
-            pl = PagedList(get_page, pagesize)
+            pl = OnDemandPagedList(get_page, pagesize)
             got = pl.getslice(*sliceargs)
             self.assertEqual(got, expected)
 
+            iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
+            got = iapl.getslice(*sliceargs)
+            self.assertEqual(got, expected)
+
         testPL(5, 2, (), [0, 1, 2, 3, 4])
         testPL(5, 2, (1,), [1, 2, 3, 4])
         testPL(5, 2, (2,), [2, 3, 4])
@@ -255,16 +267,16 @@ class TestUtil(unittest.TestCase):
         testPL(5, 2, (20, 99), [])
 
     def test_struct_unpack(self):
-        self.assertEqual(struct_unpack(u'!B', b'\x00'), (0,))
+        self.assertEqual(struct_unpack('!B', b'\x00'), (0,))
 
     def test_read_batch_urls(self):
-        f = io.StringIO(u'''\xef\xbb\xbf foo
+        f = io.StringIO('''\xef\xbb\xbf foo
             bar\r
             baz
             # More after this line\r
             ; or after this
             bam''')
-        self.assertEqual(read_batch_urls(f), [u'foo', u'bar', u'baz', u'bam'])
+        self.assertEqual(read_batch_urls(f), ['foo', 'bar', 'baz', 'bam'])
 
     def test_urlencode_postdata(self):
         data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'})
@@ -274,15 +286,109 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
         self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
         self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
+        self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
 
     def test_strip_jsonp(self):
         stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
         d = json.loads(stripped)
         self.assertEqual(d, [{"id": "532cb", "x": 3}])
 
-    def test_uppercase_escpae(self):
-        self.assertEqual(uppercase_escape(u'aä'), u'aä')
-        self.assertEqual(uppercase_escape(u'\\U0001d550'), u'𝕐')
+        stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
+        d = json.loads(stripped)
+        self.assertEqual(d, {'STATUS': 'OK'})
+
+    def test_uppercase_escape(self):
+        self.assertEqual(uppercase_escape('aä'), 'aä')
+        self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
+
+    def test_limit_length(self):
+        self.assertEqual(limit_length(None, 12), None)
+        self.assertEqual(limit_length('foo', 12), 'foo')
+        self.assertTrue(
+            limit_length('foo bar baz asd', 12).startswith('foo bar'))
+        self.assertTrue('...' in limit_length('foo bar baz asd', 12))
+
+    def test_escape_rfc3986(self):
+        reserved = "!*'();:@&=+$,/?#[]"
+        unreserved = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~'
+        self.assertEqual(escape_rfc3986(reserved), reserved)
+        self.assertEqual(escape_rfc3986(unreserved), unreserved)
+        self.assertEqual(escape_rfc3986('тест'), '%D1%82%D0%B5%D1%81%D1%82')
+        self.assertEqual(escape_rfc3986('%D1%82%D0%B5%D1%81%D1%82'), '%D1%82%D0%B5%D1%81%D1%82')
+        self.assertEqual(escape_rfc3986('foo bar'), 'foo%20bar')
+        self.assertEqual(escape_rfc3986('foo%20bar'), 'foo%20bar')
+
+    def test_escape_url(self):
+        self.assertEqual(
+            escape_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
+            'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4'
+        )
+        self.assertEqual(
+            escape_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
+            'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290'
+        )
+        self.assertEqual(
+            escape_url('http://тест.рф/фрагмент'),
+            'http://тест.рф/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
+        )
+        self.assertEqual(
+            escape_url('http://тест.рф/абв?абв=абв#абв'),
+            'http://тест.рф/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
+        )
+        self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
+
+    def test_js_to_json_realworld(self):
+        inp = '''{
+            'clip':{'provider':'pseudo'}
+        }'''
+        self.assertEqual(js_to_json(inp), '''{
+            "clip":{"provider":"pseudo"}
+        }''')
+        json.loads(js_to_json(inp))
+
+        inp = '''{
+            'playlist':[{'controls':{'all':null}}]
+        }'''
+        self.assertEqual(js_to_json(inp), '''{
+            "playlist":[{"controls":{"all":null}}]
+        }''')
+
+    def test_js_to_json_edgecases(self):
+        on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
+        self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
+
+        on = js_to_json('{"abc": true}')
+        self.assertEqual(json.loads(on), {'abc': True})
+
+    def test_clean_html(self):
+        self.assertEqual(clean_html('a:\nb'), 'a: b')
+        self.assertEqual(clean_html('a:\n   "b"'), 'a:    "b"')
+
+    def test_intlist_to_bytes(self):
+        self.assertEqual(
+            intlist_to_bytes([0, 1, 127, 128, 255]),
+            b'\x00\x01\x7f\x80\xff')
+
+    def test_args_to_str(self):
+        self.assertEqual(
+            args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
+            'foo ba/r -baz \'2 be\' \'\''
+        )
+
+    def test_parse_filesize(self):
+        self.assertEqual(parse_filesize(None), None)
+        self.assertEqual(parse_filesize(''), None)
+        self.assertEqual(parse_filesize('91 B'), 91)
+        self.assertEqual(parse_filesize('foobar'), None)
+        self.assertEqual(parse_filesize('2 MiB'), 2097152)
+        self.assertEqual(parse_filesize('5 GB'), 5000000000)
+        self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
+        self.assertEqual(parse_filesize('1,24 KB'), 1240)
+
+    def test_version_tuple(self):
+        self.assertEqual(version_tuple('1'), (1,))
+        self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
+        self.assertEqual(version_tuple('10.1-6'), (10, 1, 6))  # avconv style
 
 if __name__ == '__main__':
     unittest.main()
index eac53b285ab6740b368f278784aced9625abb9a6..780636c7730d396c381fd45fd6d4e8126d1c9fe2 100644 (file)
@@ -1,5 +1,6 @@
 #!/usr/bin/env python
 # coding: utf-8
+from __future__ import unicode_literals
 
 # Allow direct execution
 import os
@@ -31,19 +32,18 @@ params = get_params({
 })
 
 
-
 TEST_ID = 'gr51aVj-mLg'
 ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
 EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
 
+
 class TestAnnotations(unittest.TestCase):
     def setUp(self):
         # Clear old files
         self.tearDown()
 
-
     def test_info_json(self):
-        expected = list(EXPECTED_ANNOTATIONS) #Two annotations could have the same text.
+        expected = list(EXPECTED_ANNOTATIONS)  # Two annotations could have the same text.
         ie = youtube_dl.extractor.YoutubeIE()
         ydl = YoutubeDL(params)
         ydl.add_info_extractor(ie)
@@ -51,7 +51,7 @@ class TestAnnotations(unittest.TestCase):
         self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
         annoxml = None
         with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof:
-                annoxml = xml.etree.ElementTree.parse(annof)
+            annoxml = xml.etree.ElementTree.parse(annof)
         self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
         root = annoxml.getroot()
         self.assertEqual(root.tag, 'document')
@@ -59,18 +59,17 @@ class TestAnnotations(unittest.TestCase):
         self.assertEqual(annotationsTag.tag, 'annotations')
         annotations = annotationsTag.findall('annotation')
 
-        #Not all the annotations have TEXT children and the annotations are returned unsorted.
+        # Not all the annotations have TEXT children and the annotations are returned unsorted.
         for a in annotations:
-                self.assertEqual(a.tag, 'annotation')
-                if a.get('type') == 'text':
-                        textTag = a.find('TEXT')
-                        text = textTag.text
-                        self.assertTrue(text in expected) #assertIn only added in python 2.7
-                        #remove the first occurance, there could be more than one annotation with the same text
-                        expected.remove(text)
-        #We should have seen (and removed) all the expected annotation texts.
+            self.assertEqual(a.tag, 'annotation')
+            if a.get('type') == 'text':
+                textTag = a.find('TEXT')
+                text = textTag.text
+                self.assertTrue(text in expected)  # assertIn only added in python 2.7
+                remove the first occurance, there could be more than one annotation with the same text
+                expected.remove(text)
+        # We should have seen (and removed) all the expected annotation texts.
         self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
-        
 
     def tearDown(self):
         try_rm(ANNOTATIONS_FILE)
index 90426a559551571a7769c6137a76270a4ad47b1c..0396ef26283f58c05ee261b0cf23773d462ad103 100644 (file)
@@ -1,5 +1,6 @@
 #!/usr/bin/env python
 # coding: utf-8
+from __future__ import unicode_literals
 
 # Allow direct execution
 import os
@@ -32,7 +33,7 @@ params = get_params({
 TEST_ID = 'BaW_jenozKc'
 INFO_JSON_FILE = TEST_ID + '.info.json'
 DESCRIPTION_FILE = TEST_ID + '.mp4.description'
-EXPECTED_DESCRIPTION = u'''test chars:  "'/\ä↭𝕐
+EXPECTED_DESCRIPTION = '''test chars:  "'/\ä↭𝕐
 test URL: https://github.com/rg3/youtube-dl/issues/1892
 
 This is a test video for youtube-dl.
@@ -53,11 +54,11 @@ class TestInfoJSON(unittest.TestCase):
         self.assertTrue(os.path.exists(INFO_JSON_FILE))
         with io.open(INFO_JSON_FILE, 'r', encoding='utf-8') as jsonf:
             jd = json.load(jsonf)
-        self.assertEqual(jd['upload_date'], u'20121002')
+        self.assertEqual(jd['upload_date'], '20121002')
         self.assertEqual(jd['description'], EXPECTED_DESCRIPTION)
         self.assertEqual(jd['id'], TEST_ID)
         self.assertEqual(jd['extractor'], 'youtube')
-        self.assertEqual(jd['title'], u'''youtube-dl test video "'/\ä↭𝕐''')
+        self.assertEqual(jd['title'], '''youtube-dl test video "'/\ä↭𝕐''')
         self.assertEqual(jd['uploader'], 'Philipp Hagemeister')
 
         self.assertTrue(os.path.exists(DESCRIPTION_FILE))
index 3aadedd64cf5af38ab1d18b640b10301c2073de2..c889b6f15c40f5ea91a4dd4ea5a86bb8c62c830d 100644 (file)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+from __future__ import unicode_literals
 
 # Allow direct execution
 import os
@@ -10,13 +11,8 @@ from test.helper import FakeYDL
 
 
 from youtube_dl.extractor import (
-    YoutubeUserIE,
     YoutubePlaylistIE,
     YoutubeIE,
-    YoutubeChannelIE,
-    YoutubeShowIE,
-    YoutubeTopListIE,
-    YoutubeSearchURLIE,
 )
 
 
@@ -25,15 +21,6 @@ class TestYoutubeLists(unittest.TestCase):
         """Make sure the info has '_type' set to 'playlist'"""
         self.assertEqual(info['_type'], 'playlist')
 
-    def test_youtube_playlist(self):
-        dl = FakeYDL()
-        ie = YoutubePlaylistIE(dl)
-        result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], 'ytdl test PL')
-        ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']]
-        self.assertEqual(ytie_results, [ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE'])
-
     def test_youtube_playlist_noplaylist(self):
         dl = FakeYDL()
         dl.params['noplaylist'] = True
@@ -42,35 +29,6 @@ class TestYoutubeLists(unittest.TestCase):
         self.assertEqual(result['_type'], 'url')
         self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
 
-    def test_issue_673(self):
-        dl = FakeYDL()
-        ie = YoutubePlaylistIE(dl)
-        result = ie.extract('PLBB231211A4F62143')
-        self.assertTrue(len(result['entries']) > 25)
-
-    def test_youtube_playlist_long(self):
-        dl = FakeYDL()
-        ie = YoutubePlaylistIE(dl)
-        result = ie.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
-        self.assertIsPlaylist(result)
-        self.assertTrue(len(result['entries']) >= 799)
-
-    def test_youtube_playlist_with_deleted(self):
-        #651
-        dl = FakeYDL()
-        ie = YoutubePlaylistIE(dl)
-        result = ie.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
-        ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']]
-        self.assertFalse('pElCt5oNDuI' in ytie_results)
-        self.assertFalse('KdPEApIVdWM' in ytie_results)
-        
-    def test_youtube_playlist_empty(self):
-        dl = FakeYDL()
-        ie = YoutubePlaylistIE(dl)
-        result = ie.extract('https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx')
-        self.assertIsPlaylist(result)
-        self.assertEqual(len(result['entries']), 0)
-
     def test_youtube_course(self):
         dl = FakeYDL()
         ie = YoutubePlaylistIE(dl)
@@ -81,34 +39,6 @@ class TestYoutubeLists(unittest.TestCase):
         self.assertEqual(len(entries), 25)
         self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0')
 
-    def test_youtube_channel(self):
-        dl = FakeYDL()
-        ie = YoutubeChannelIE(dl)
-        #test paginated channel
-        result = ie.extract('https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w')
-        self.assertTrue(len(result['entries']) > 90)
-        #test autogenerated channel
-        result = ie.extract('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
-        self.assertTrue(len(result['entries']) >= 18)
-
-    def test_youtube_user(self):
-        dl = FakeYDL()
-        ie = YoutubeUserIE(dl)
-        result = ie.extract('https://www.youtube.com/user/TheLinuxFoundation')
-        self.assertTrue(len(result['entries']) >= 320)
-
-    def test_youtube_safe_search(self):
-        dl = FakeYDL()
-        ie = YoutubePlaylistIE(dl)
-        result = ie.extract('PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl')
-        self.assertEqual(len(result['entries']), 2)
-
-    def test_youtube_show(self):
-        dl = FakeYDL()
-        ie = YoutubeShowIE(dl)
-        result = ie.extract('http://www.youtube.com/show/airdisasters')
-        self.assertTrue(len(result) >= 3)
-
     def test_youtube_mix(self):
         dl = FakeYDL()
         ie = YoutubePlaylistIE(dl)
@@ -127,21 +57,5 @@ class TestYoutubeLists(unittest.TestCase):
         entries = result['entries']
         self.assertEqual(len(entries), 100)
 
-    def test_youtube_toplist(self):
-        dl = FakeYDL()
-        ie = YoutubeTopListIE(dl)
-        result = ie.extract('yttoplist:music:Trending')
-        entries = result['entries']
-        self.assertTrue(len(entries) >= 5)
-
-    def test_youtube_search_url(self):
-        dl = FakeYDL()
-        ie = YoutubeSearchURLIE(dl)
-        result = ie.extract('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video')
-        entries = result['entries']
-        self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], 'youtube-dl test video')
-        self.assertTrue(len(entries) >= 5)
-
 if __name__ == '__main__':
     unittest.main()
index 604e76ab60ba42081c3b4779e77e2963038f43e5..13d228cd85e9e260942635a36652c4cdc010dc8d 100644 (file)
@@ -14,7 +14,7 @@ import re
 import string
 
 from youtube_dl.extractor import YoutubeIE
-from youtube_dl.utils import compat_str, compat_urlretrieve
+from youtube_dl.compat import compat_str, compat_urlretrieve
 
 _TESTS = [
     (
@@ -47,18 +47,6 @@ _TESTS = [
         '2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
         'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
     ),
-    (
-        'http://s.ytimg.com/yts/swfbin/player-vfl5vIhK2/watch_as3.swf',
-        'swf',
-        86,
-        'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVWXY\\!"#$%&\'()*+,-./:;<=>?'
-    ),
-    (
-        'http://s.ytimg.com/yts/swfbin/player-vflmDyk47/watch_as3.swf',
-        'swf',
-        'F375F75BF2AFDAAF2666E43868D46816F83F13E81C46.3725A8218E446A0DECD33F79DC282994D6AA92C92C9',
-        '9C29AA6D499282CD97F33DCED0A644E8128A5273.64C18E31F38361864D86834E6662FAADFA2FB57F'
-    ),
     (
         'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
         'js',
diff --git a/youtube-dl b/youtube-dl
deleted file mode 100755 (executable)
index e3eb877..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env python
-
-import sys, os
-import json, hashlib
-
-try:
-    import urllib.request as compat_urllib_request
-except ImportError: # Python 2
-    import urllib2 as compat_urllib_request
-
-def rsa_verify(message, signature, key):
-    from struct import pack
-    from hashlib import sha256
-    from sys import version_info
-    def b(x):
-        if version_info[0] == 2: return x
-        else: return x.encode('latin1')
-    assert(type(message) == type(b('')))
-    block_size = 0
-    n = key[0]
-    while n:
-        block_size += 1
-        n >>= 8
-    signature = pow(int(signature, 16), key[1], key[0])
-    raw_bytes = []
-    while signature:
-        raw_bytes.insert(0, pack("B", signature & 0xFF))
-        signature >>= 8
-    signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
-    if signature[0:2] != b('\x00\x01'): return False
-    signature = signature[2:]
-    if not b('\x00') in signature: return False
-    signature = signature[signature.index(b('\x00'))+1:]
-    if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
-    signature = signature[19:]
-    if signature != sha256(message).digest(): return False
-    return True
-
-sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
-sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
-sys.stderr.write(u'From now on, get the binaries from http://rg3.github.io/youtube-dl/download.html, not from the git repository.\n\n')
-
-try:
-       raw_input()
-except NameError: # Python 3
-       input()
-
-filename = sys.argv[0]
-
-UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
-VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
-JSON_URL = UPDATE_URL + 'versions.json'
-UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
-
-if not os.access(filename, os.W_OK):
-    sys.exit('ERROR: no write permissions on %s' % filename)
-
-try:
-    versions_info = compat_urllib_request.urlopen(JSON_URL).read().decode('utf-8')
-    versions_info = json.loads(versions_info)
-except:
-    sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.')
-if not 'signature' in versions_info:
-    sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.')
-signature = versions_info['signature']
-del versions_info['signature']
-if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY):
-    sys.exit(u'ERROR: the versions file signature is invalid. Aborting.')
-
-version = versions_info['versions'][versions_info['latest']]
-
-try:
-    urlh = compat_urllib_request.urlopen(version['bin'][0])
-    newcontent = urlh.read()
-    urlh.close()
-except (IOError, OSError) as err:
-    sys.exit('ERROR: unable to download latest version')
-
-newcontent_hash = hashlib.sha256(newcontent).hexdigest()
-if newcontent_hash != version['bin'][1]:
-    sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.')
-
-try:
-    with open(filename, 'wb') as outf:
-        outf.write(newcontent)
-except (IOError, OSError) as err:
-    sys.exit('ERROR: unable to overwrite current version')
-
-sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
diff --git a/youtube-dl.exe b/youtube-dl.exe
deleted file mode 100644 (file)
index 45eee04..0000000
Binary files a/youtube-dl.exe and /dev/null differ
index 14a1d06ab1ed3350547822cac71501745a14842a..578c8daf255048b90caaf6caa5f09825e1d2bfe6 100755 (executable)
@@ -7,6 +7,7 @@ import collections
 import datetime
 import errno
 import io
+import itertools
 import json
 import locale
 import os
@@ -22,12 +23,16 @@ import traceback
 if os.name == 'nt':
     import ctypes
 
-from .utils import (
+from .compat import (
     compat_cookiejar,
+    compat_expanduser,
     compat_http_client,
     compat_str,
     compat_urllib_error,
     compat_urllib_request,
+)
+from .utils import (
+    escape_url,
     ContentTooShortError,
     date_from_str,
     DateRange,
@@ -56,10 +61,13 @@ from .utils import (
     write_string,
     YoutubeDLHandler,
     prepend_extension,
+    args_to_str,
 )
+from .cache import Cache
 from .extractor import get_info_extractor, gen_extractors
 from .downloader import get_suitable_downloader
-from .postprocessor import FFmpegMergerPP
+from .downloader.rtmp import rtmpdump_version
+from .postprocessor import FFmpegMergerPP, FFmpegPostProcessor
 from .version import __version__
 
 
@@ -105,6 +113,8 @@ class YoutubeDL(object):
     forcefilename:     Force printing final filename.
     forceduration:     Force printing duration.
     forcejson:         Force printing info_dict as JSON.
+    dump_single_json:  Force printing the info_dict of the whole playlist
+                       (or video) as a single JSON line.
     simulate:          Do not download the video files.
     format:            Video format code.
     format_limit:      Highest quality format to try.
@@ -114,6 +124,7 @@ class YoutubeDL(object):
     nooverwrites:      Prevent overwriting files.
     playliststart:     Playlist item to start at.
     playlistend:       Playlist item to end at.
+    playlistreverse:   Download playlist items in reverse order.
     matchtitle:        Download only matching titles.
     rejecttitle:       Reject downloads for matching titles.
     logger:            Log messages to a logging.Logger instance.
@@ -133,7 +144,7 @@ class YoutubeDL(object):
     daterange:         A DateRange object, download only if the upload_date is in the range.
     skip_download:     Skip the actual download of the video file
     cachedir:          Location of the cache files in the filesystem.
-                       None to disable filesystem cache.
+                       False to disable filesystem cache.
     noplaylist:        Download single video instead of a playlist if in doubt.
     age_limit:         An integer representing the user's age in years.
                        Unsuitable videos for the given age are skipped.
@@ -162,6 +173,9 @@ class YoutubeDL(object):
     default_search:    Prepend this string if an input url is not valid.
                        'auto' for elaborate guessing
     encoding:          Use this encoding instead of the system-specified.
+    extract_flat:      Do not resolve URLs, return the immediate result.
+                       Pass in 'in_playlist' to only show this behavior for
+                       playlist items.
 
     The following parameters are not used by YoutubeDL itself, they are used by
     the FileDownloader:
@@ -171,6 +185,7 @@ class YoutubeDL(object):
     The following options are used by the post processors:
     prefer_ffmpeg:     If True, use ffmpeg instead of avconv if both are available,
                        otherwise prefer avconv.
+    exec_cmd:          Arbitrary command to run after downloading
     """
 
     params = None
@@ -180,7 +195,7 @@ class YoutubeDL(object):
     _num_downloads = None
     _screen_file = None
 
-    def __init__(self, params=None):
+    def __init__(self, params=None, auto_init=True):
         """Create a FileDownloader object with the given options."""
         if params is None:
             params = {}
@@ -193,6 +208,7 @@ class YoutubeDL(object):
         self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
         self._err_file = sys.stderr
         self.params = params
+        self.cache = Cache(self)
 
         if params.get('bidi_workaround', False):
             try:
@@ -223,11 +239,11 @@ class YoutubeDL(object):
 
         if (sys.version_info >= (3,) and sys.platform != 'win32' and
                 sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
-                and not params['restrictfilenames']):
+                and not params.get('restrictfilenames', False)):
             # On Python 3, the Unicode filesystem API will throw errors (#1474)
             self.report_warning(
                 'Assuming --restrict-filenames since file system encoding '
-                'cannot encode all charactes. '
+                'cannot encode all characters. '
                 'Set the LC_ALL environment variable to fix this.')
             self.params['restrictfilenames'] = True
 
@@ -236,6 +252,26 @@ class YoutubeDL(object):
 
         self._setup_opener()
 
+        if auto_init:
+            self.print_debug_header()
+            self.add_default_info_extractors()
+
+    def warn_if_short_id(self, argv):
+        # short YouTube ID starting with dash?
+        idxs = [
+            i for i, a in enumerate(argv)
+            if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
+        if idxs:
+            correct_argv = (
+                ['youtube-dl'] +
+                [a for i, a in enumerate(argv) if i not in idxs] +
+                ['--'] + [argv[i] for i in idxs]
+            )
+            self.report_warning(
+                'Long argument string detected. '
+                'Use -- to separate parameters and URLs, like this:\n%s\n' %
+                args_to_str(correct_argv))
+
     def add_info_extractor(self, ie):
         """Add an InfoExtractor object to the end of the list."""
         self._ies.append(ie)
@@ -280,7 +316,7 @@ class YoutubeDL(object):
         self._output_process.stdin.write((message + '\n').encode('utf-8'))
         self._output_process.stdin.flush()
         res = ''.join(self._output_channel.readline().decode('utf-8')
-                       for _ in range(line_count))
+                      for _ in range(line_count))
         return res[:-len('\n')]
 
     def to_screen(self, message, skip_eol=False):
@@ -423,7 +459,7 @@ class YoutubeDL(object):
             autonumber_templ = '%0' + str(autonumber_size) + 'd'
             template_dict['autonumber'] = autonumber_templ % self._num_downloads
             if template_dict.get('playlist_index') is not None:
-                template_dict['playlist_index'] = '%05d' % template_dict['playlist_index']
+                template_dict['playlist_index'] = '%0*d' % (len(str(template_dict['n_entries'])), template_dict['playlist_index'])
             if template_dict.get('resolution') is None:
                 if template_dict.get('width') and template_dict.get('height'):
                     template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
@@ -442,7 +478,7 @@ class YoutubeDL(object):
             template_dict = collections.defaultdict(lambda: 'NA', template_dict)
 
             outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
-            tmpl = os.path.expanduser(outtmpl)
+            tmpl = compat_expanduser(outtmpl)
             filename = tmpl % template_dict
             return filename
         except ValueError as err:
@@ -479,7 +515,10 @@ class YoutubeDL(object):
                 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
         age_limit = self.params.get('age_limit')
         if age_limit is not None:
-            if age_limit < info_dict.get('age_limit', 0):
+            actual_age_limit = info_dict.get('age_limit')
+            if actual_age_limit is None:
+                actual_age_limit = 0
+            if age_limit < actual_age_limit:
                 return 'Skipping "' + title + '" because it is age restricted'
         if self.in_download_archive(info_dict):
             return '%s has already been recorded in archive' % video_title
@@ -514,7 +553,7 @@ class YoutubeDL(object):
 
             try:
                 ie_result = ie.extract(url)
-                if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
+                if ie_result is None:  # Finished already (backwards compatibility; listformats and friends should be moved here)
                     break
                 if isinstance(ie_result, list):
                     # Backwards compatibility: old IE result format
@@ -527,7 +566,7 @@ class YoutubeDL(object):
                     return self.process_ie_result(ie_result, download, extra_info)
                 else:
                     return ie_result
-            except ExtractorError as de: # An error we somewhat expected
+            except ExtractorError as de:  # An error we somewhat expected
                 self.report_error(compat_str(de), de.format_traceback())
                 break
             except MaxDownloadsReached:
@@ -558,7 +597,16 @@ class YoutubeDL(object):
         Returns the resolved ie_result.
         """
 
-        result_type = ie_result.get('_type', 'video') # If not given we suppose it's a video, support the default old system
+        result_type = ie_result.get('_type', 'video')
+
+        if result_type in ('url', 'url_transparent'):
+            extract_flat = self.params.get('extract_flat', False)
+            if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or
+                    extract_flat is True):
+                if self.params.get('forcejson', False):
+                    self.to_stdout(json.dumps(ie_result))
+                return ie_result
+
         if result_type == 'video':
             self.add_extra_info(ie_result, extra_info)
             return self.process_video_result(ie_result, download=download)
@@ -575,27 +623,19 @@ class YoutubeDL(object):
                 ie_result['url'], ie_key=ie_result.get('ie_key'),
                 extra_info=extra_info, download=False, process=False)
 
-            def make_result(embedded_info):
-                new_result = ie_result.copy()
-                for f in ('_type', 'url', 'ext', 'player_url', 'formats',
-                          'entries', 'ie_key', 'duration',
-                          'subtitles', 'annotations', 'format',
-                          'thumbnail', 'thumbnails'):
-                    if f in new_result:
-                        del new_result[f]
-                    if f in embedded_info:
-                        new_result[f] = embedded_info[f]
-                return new_result
-            new_result = make_result(info)
+            force_properties = dict(
+                (k, v) for k, v in ie_result.items() if v is not None)
+            for f in ('_type', 'url'):
+                if f in force_properties:
+                    del force_properties[f]
+            new_result = info.copy()
+            new_result.update(force_properties)
 
             assert new_result.get('_type') != 'url_transparent'
-            if new_result.get('_type') == 'compat_list':
-                new_result['entries'] = [
-                    make_result(e) for e in new_result['entries']]
 
             return self.process_ie_result(
                 new_result, download=download, extra_info=extra_info)
-        elif result_type == 'playlist':
+        elif result_type == 'playlist' or result_type == 'multi_video':
             # We process each entry in the playlist
             playlist = ie_result.get('title', None) or ie_result.get('id', None)
             self.to_screen('[download] Downloading playlist: %s' % playlist)
@@ -608,26 +648,39 @@ class YoutubeDL(object):
             if playlistend == -1:
                 playlistend = None
 
-            if isinstance(ie_result['entries'], list):
-                n_all_entries = len(ie_result['entries'])
-                entries = ie_result['entries'][playliststart:playlistend]
+            ie_entries = ie_result['entries']
+            if isinstance(ie_entries, list):
+                n_all_entries = len(ie_entries)
+                entries = ie_entries[playliststart:playlistend]
                 n_entries = len(entries)
                 self.to_screen(
                     "[%s] playlist %s: Collected %d video ids (downloading %d of them)" %
                     (ie_result['extractor'], playlist, n_all_entries, n_entries))
-            else:
-                assert isinstance(ie_result['entries'], PagedList)
-                entries = ie_result['entries'].getslice(
+            elif isinstance(ie_entries, PagedList):
+                entries = ie_entries.getslice(
                     playliststart, playlistend)
                 n_entries = len(entries)
                 self.to_screen(
                     "[%s] playlist %s: Downloading %d videos" %
                     (ie_result['extractor'], playlist, n_entries))
+            else:  # iterable
+                entries = list(itertools.islice(
+                    ie_entries, playliststart, playlistend))
+                n_entries = len(entries)
+                self.to_screen(
+                    "[%s] playlist %s: Downloading %d videos" %
+                    (ie_result['extractor'], playlist, n_entries))
+
+            if self.params.get('playlistreverse', False):
+                entries = entries[::-1]
 
             for i, entry in enumerate(entries, 1):
                 self.to_screen('[download] Downloading video #%s of %s' % (i, n_entries))
                 extra = {
+                    'n_entries': n_entries,
                     'playlist': playlist,
+                    'playlist_id': ie_result.get('id'),
+                    'playlist_title': ie_result.get('title'),
                     'playlist_index': i + playliststart,
                     'extractor': ie_result['extractor'],
                     'webpage_url': ie_result['webpage_url'],
@@ -647,14 +700,20 @@ class YoutubeDL(object):
             ie_result['entries'] = playlist_results
             return ie_result
         elif result_type == 'compat_list':
+            self.report_warning(
+                'Extractor %s returned a compat_list result. '
+                'It needs to be updated.' % ie_result.get('extractor'))
+
             def _fixup(r):
-                self.add_extra_info(r,
+                self.add_extra_info(
+                    r,
                     {
                         'extractor': ie_result['extractor'],
                         'webpage_url': ie_result['webpage_url'],
                         'webpage_url_basename': url_basename(ie_result['webpage_url']),
                         'extractor_key': ie_result['extractor_key'],
-                    })
+                    }
+                )
                 return r
             ie_result['entries'] = [
                 self.process_ie_result(_fixup(r), download, extra_info)
@@ -694,7 +753,7 @@ class YoutubeDL(object):
             if video_formats:
                 return video_formats[0]
         else:
-            extensions = ['mp4', 'flv', 'webm', '3gp']
+            extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a']
             if format_spec in extensions:
                 filter_f = lambda f: f['ext'] == format_spec
             else:
@@ -732,6 +791,10 @@ class YoutubeDL(object):
             info_dict['display_id'] = info_dict['id']
 
         if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
+            # Working around negative timestamps in Windows
+            # (see http://bugs.python.org/issue1646728)
+            if info_dict['timestamp'] < 0 and os.name == 'nt':
+                info_dict['timestamp'] = 0
             upload_date = datetime.datetime.utcfromtimestamp(
                 info_dict['timestamp'])
             info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
@@ -795,28 +858,36 @@ class YoutubeDL(object):
         if req_format in ('-1', 'all'):
             formats_to_download = formats
         else:
-            # We can accept formats requested in the format: 34/5/best, we pick
-            # the first that is available, starting from left
-            req_formats = req_format.split('/')
-            for rf in req_formats:
-                if re.match(r'.+?\+.+?', rf) is not None:
-                    # Two formats have been requested like '137+139'
-                    format_1, format_2 = rf.split('+')
-                    formats_info = (self.select_format(format_1, formats),
-                        self.select_format(format_2, formats))
-                    if all(formats_info):
-                        selected_format = {
-                            'requested_formats': formats_info,
-                            'format': rf,
-                            'ext': formats_info[0]['ext'],
-                        }
+            for rfstr in req_format.split(','):
+                # We can accept formats requested in the format: 34/5/best, we pick
+                # the first that is available, starting from left
+                req_formats = rfstr.split('/')
+                for rf in req_formats:
+                    if re.match(r'.+?\+.+?', rf) is not None:
+                        # Two formats have been requested like '137+139'
+                        format_1, format_2 = rf.split('+')
+                        formats_info = (self.select_format(format_1, formats),
+                                        self.select_format(format_2, formats))
+                        if all(formats_info):
+                            # The first format must contain the video and the
+                            # second the audio
+                            if formats_info[0].get('vcodec') == 'none':
+                                self.report_error('The first format must '
+                                                  'contain the video, try using '
+                                                  '"-f %s+%s"' % (format_2, format_1))
+                                return
+                            selected_format = {
+                                'requested_formats': formats_info,
+                                'format': rf,
+                                'ext': formats_info[0]['ext'],
+                            }
+                        else:
+                            selected_format = None
                     else:
-                        selected_format = None
-                else:
-                    selected_format = self.select_format(rf, formats)
-                if selected_format is not None:
-                    formats_to_download = [selected_format]
-                    break
+                        selected_format = self.select_format(rf, formats)
+                    if selected_format is not None:
+                        formats_to_download.append(selected_format)
+                        break
         if not formats_to_download:
             raise ExtractorError('requested format not available',
                                  expected=True)
@@ -867,8 +938,12 @@ class YoutubeDL(object):
         if self.params.get('forceid', False):
             self.to_stdout(info_dict['id'])
         if self.params.get('forceurl', False):
-            # For RTMP URLs, also include the playpath
-            self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
+            if info_dict.get('requested_formats') is not None:
+                for f in info_dict['requested_formats']:
+                    self.to_stdout(f['url'] + f.get('play_path', ''))
+            else:
+                # For RTMP URLs, also include the playpath
+                self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
         if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
             self.to_stdout(info_dict['thumbnail'])
         if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
@@ -882,6 +957,8 @@ class YoutubeDL(object):
         if self.params.get('forcejson', False):
             info_dict['_filename'] = filename
             self.to_stdout(json.dumps(info_dict))
+        if self.params.get('dump_single_json', False):
+            info_dict['_filename'] = filename
 
         # Do nothing else if in simulate mode
         if self.params.get('simulate', False):
@@ -947,7 +1024,7 @@ class YoutubeDL(object):
                     else:
                         self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
                         with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
-                                subfile.write(sub)
+                            subfile.write(sub)
                 except (OSError, IOError):
                     self.report_error('Cannot write subtitles file ' + sub_filename)
                     return
@@ -959,7 +1036,7 @@ class YoutubeDL(object):
             else:
                 self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
                 try:
-                    write_json_file(info_dict, encodeFilename(infofn))
+                    write_json_file(info_dict, infofn)
                 except (OSError, IOError):
                     self.report_error('Cannot write metadata to JSON file ' + infofn)
                     return
@@ -979,10 +1056,10 @@ class YoutubeDL(object):
                         with open(thumb_filename, 'wb') as thumbf:
                             shutil.copyfileobj(uf, thumbf)
                         self.to_screen('[%s] %s: Writing thumbnail to: %s' %
-                            (info_dict['extractor'], info_dict['id'], thumb_filename))
+                                       (info_dict['extractor'], info_dict['id'], thumb_filename))
                     except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
                         self.report_warning('Unable to download thumbnail "%s": %s' %
-                            (info_dict['thumbnail'], compat_str(err)))
+                                            (info_dict['thumbnail'], compat_str(err)))
 
         if not self.params.get('skip_download', False):
             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
@@ -1000,11 +1077,11 @@ class YoutubeDL(object):
                         downloaded = []
                         success = True
                         merger = FFmpegMergerPP(self, not self.params.get('keepvideo'))
-                        if not merger._get_executable():
+                        if not merger._executable:
                             postprocessors = []
                             self.report_warning('You have requested multiple '
-                                'formats but ffmpeg or avconv are not installed.'
-                                ' The formats won\'t be merged')
+                                                'formats but ffmpeg or avconv are not installed.'
+                                                ' The formats won\'t be merged')
                         else:
                             postprocessors = [merger]
                         for f in info_dict['requested_formats']:
@@ -1048,13 +1125,16 @@ class YoutubeDL(object):
 
         for url in url_list:
             try:
-                #It also downloads the videos
-                self.extract_info(url)
+                # It also downloads the videos
+                res = self.extract_info(url)
             except UnavailableVideoError:
                 self.report_error('unable to download video')
             except MaxDownloadsReached:
                 self.to_screen('[info] Maximum number of downloaded files reached.')
                 raise
+            else:
+                if self.params.get('dump_single_json', False):
+                    self.to_stdout(json.dumps(res))
 
         return self._download_retcode
 
@@ -1178,6 +1258,8 @@ class YoutubeDL(object):
             res += 'video@'
         if fdict.get('vbr') is not None:
             res += '%4dk' % fdict['vbr']
+        if fdict.get('fps') is not None:
+            res += ', %sfps' % fdict['fps']
         if fdict.get('acodec') is not None:
             if res:
                 res += ', '
@@ -1228,6 +1310,26 @@ class YoutubeDL(object):
 
     def urlopen(self, req):
         """ Start an HTTP download """
+
+        # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
+        # always respected by websites, some tend to give out URLs with non percent-encoded
+        # non-ASCII characters (see telemb.py, ard.py [#3412])
+        # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
+        # To work around aforementioned issue we will replace request's original URL with
+        # percent-encoded one
+        req_is_string = isinstance(req, basestring if sys.version_info < (3, 0) else compat_str)
+        url = req if req_is_string else req.get_full_url()
+        url_escaped = escape_url(url)
+
+        # Substitute URL if any change after escaping
+        if url != url_escaped:
+            if req_is_string:
+                req = url_escaped
+            else:
+                req = compat_urllib_request.Request(
+                    url_escaped, data=req.data, headers=req.headers,
+                    origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
+
         return self._opener.open(req, timeout=self._socket_timeout)
 
     def print_debug_header(self):
@@ -1239,11 +1341,13 @@ class YoutubeDL(object):
             self.report_warning(
                 'Your Python is broken! Update to a newer and supported version')
 
+        stdout_encoding = getattr(
+            sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
         encoding_str = (
             '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
                 locale.getpreferredencoding(),
                 sys.getfilesystemencoding(),
-                sys.stdout.encoding,
+                stdout_encoding,
                 self.get_encoding()))
         write_string(encoding_str, encoding=None)
 
@@ -1262,8 +1366,19 @@ class YoutubeDL(object):
                 sys.exc_clear()
             except:
                 pass
-        self._write_string('[debug] Python version %s - %s' %
-                     (platform.python_version(), platform_name()) + '\n')
+        self._write_string('[debug] Python version %s - %s\n' % (
+            platform.python_version(), platform_name()))
+
+        exe_versions = FFmpegPostProcessor.get_versions()
+        exe_versions['rtmpdump'] = rtmpdump_version()
+        exe_str = ', '.join(
+            '%s %s' % (exe, v)
+            for exe, v in sorted(exe_versions.items())
+            if v
+        )
+        if not exe_str:
+            exe_str = 'none'
+        self._write_string('[debug] exe versions: %s\n' % exe_str)
 
         proxy_map = {}
         for handler in self._opener.handlers:
index 429630ce5c61289140b6b0188bba32cbf7a153a0..70c4f25b161d5d7b158da52a64b22dedc4b1a9f8 100644 (file)
@@ -1,94 +1,31 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
-__authors__  = (
-    'Ricardo Garcia Gonzalez',
-    'Danny Colligan',
-    'Benjamin Johnson',
-    'Vasyl\' Vavrychuk',
-    'Witold Baryluk',
-    'Paweł Paprota',
-    'Gergely Imreh',
-    'Rogério Brito',
-    'Philipp Hagemeister',
-    'Sören Schulze',
-    'Kevin Ngo',
-    'Ori Avtalion',
-    'shizeeg',
-    'Filippo Valsorda',
-    'Christian Albrecht',
-    'Dave Vasilevsky',
-    'Jaime Marquínez Ferrándiz',
-    'Jeff Crouse',
-    'Osama Khalid',
-    'Michael Walter',
-    'M. Yasoob Ullah Khalid',
-    'Julien Fraichard',
-    'Johny Mo Swag',
-    'Axel Noack',
-    'Albert Kim',
-    'Pierre Rudloff',
-    'Huarong Huo',
-    'Ismael Mejía',
-    'Steffan \'Ruirize\' James',
-    'Andras Elso',
-    'Jelle van der Waa',
-    'Marcin Cieślak',
-    'Anton Larionov',
-    'Takuya Tsuchida',
-    'Sergey M.',
-    'Michael Orlitzky',
-    'Chris Gahan',
-    'Saimadhav Heblikar',
-    'Mike Col',
-    'Oleg Prutz',
-    'pulpe',
-    'Andreas Schmitz',
-    'Michael Kaiser',
-    'Niklas Laxström',
-    'David Triendl',
-    'Anthony Weems',
-    'David Wagner',
-    'Juan C. Olivares',
-    'Mattias Harrysson',
-    'phaer',
-    'Sainyam Kapoor',
-    'Nicolas Évrard',
-    'Jason Normore',
-    'Hoje Lee',
-    'Adam Thalhammer',
-    'Georg Jähnig',
-    'Ralf Haring',
-    'Koki Takahashi',
-    'Ariset Llerena',
-    'Adam Malcontenti-Wilson',
-    'Tobias Bell',
-    'Naglis Jonaitis',
-    'Charles Chen',
-    'Hassaan Ali',
-)
+from __future__ import unicode_literals
 
 __license__ = 'Public Domain'
 
 import codecs
 import io
-import optparse
 import os
 import random
-import shlex
-import shutil
 import sys
 
 
-from .utils import (
+from .options import (
+    parseOpts,
+)
+from .compat import (
+    compat_expanduser,
     compat_getpass,
     compat_print,
+    workaround_optparse_bug9161,
+)
+from .utils import (
     DateRange,
     DEFAULT_OUTTMPL,
     decodeOption,
-    get_term_width,
     DownloadError,
-    get_cachedir,
     MaxDownloadsReached,
     preferredencoding,
     read_batch_urls,
@@ -102,7 +39,6 @@ from .downloader import (
     FileDownloader,
 )
 from .extractor import gen_extractors
-from .version import __version__
 from .YoutubeDL import YoutubeDL
 from .postprocessor import (
     AtomicParsleyPP,
@@ -112,481 +48,19 @@ from .postprocessor import (
     FFmpegExtractAudioPP,
     FFmpegEmbedSubtitlePP,
     XAttrMetadataPP,
+    ExecAfterDownloadPP,
 )
 
 
-def parseOpts(overrideArguments=None):
-    def _readOptions(filename_bytes, default=[]):
-        try:
-            optionf = open(filename_bytes)
-        except IOError:
-            return default  # silently skip if file is not present
-        try:
-            res = []
-            for l in optionf:
-                res += shlex.split(l, comments=True)
-        finally:
-            optionf.close()
-        return res
-
-    def _readUserConf():
-        xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
-        if xdg_config_home:
-            userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
-            if not os.path.isfile(userConfFile):
-                userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
-        else:
-            userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl', 'config')
-            if not os.path.isfile(userConfFile):
-                userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
-        userConf = _readOptions(userConfFile, None)
-
-        if userConf is None:
-            appdata_dir = os.environ.get('appdata')
-            if appdata_dir:
-                userConf = _readOptions(
-                    os.path.join(appdata_dir, 'youtube-dl', 'config'),
-                    default=None)
-                if userConf is None:
-                    userConf = _readOptions(
-                        os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
-                        default=None)
-
-        if userConf is None:
-            userConf = _readOptions(
-                os.path.join(os.path.expanduser('~'), 'youtube-dl.conf'),
-                default=None)
-        if userConf is None:
-            userConf = _readOptions(
-                os.path.join(os.path.expanduser('~'), 'youtube-dl.conf.txt'),
-                default=None)
-
-        if userConf is None:
-            userConf = []
-
-        return userConf
-
-    def _format_option_string(option):
-        ''' ('-o', '--option') -> -o, --format METAVAR'''
-
-        opts = []
-
-        if option._short_opts:
-            opts.append(option._short_opts[0])
-        if option._long_opts:
-            opts.append(option._long_opts[0])
-        if len(opts) > 1:
-            opts.insert(1, ', ')
-
-        if option.takes_value(): opts.append(' %s' % option.metavar)
-
-        return "".join(opts)
-
-    def _comma_separated_values_options_callback(option, opt_str, value, parser):
-        setattr(parser.values, option.dest, value.split(','))
-
-    def _hide_login_info(opts):
-        opts = list(opts)
-        for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
-            try:
-                i = opts.index(private_opt)
-                opts[i+1] = '<PRIVATE>'
-            except ValueError:
-                pass
-        return opts
-
-    max_width = 80
-    max_help_position = 80
-
-    # No need to wrap help messages if we're on a wide console
-    columns = get_term_width()
-    if columns: max_width = columns
-
-    fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
-    fmt.format_option_strings = _format_option_string
-
-    kw = {
-        'version'   : __version__,
-        'formatter' : fmt,
-        'usage' : '%prog [options] url [url...]',
-        'conflict_handler' : 'resolve',
-    }
-
-    parser = optparse.OptionParser(**kw)
-
-    # option groups
-    general        = optparse.OptionGroup(parser, 'General Options')
-    selection      = optparse.OptionGroup(parser, 'Video Selection')
-    authentication = optparse.OptionGroup(parser, 'Authentication Options')
-    video_format   = optparse.OptionGroup(parser, 'Video Format Options')
-    subtitles      = optparse.OptionGroup(parser, 'Subtitle Options')
-    downloader     = optparse.OptionGroup(parser, 'Download Options')
-    postproc       = optparse.OptionGroup(parser, 'Post-processing Options')
-    filesystem     = optparse.OptionGroup(parser, 'Filesystem Options')
-    workarounds    = optparse.OptionGroup(parser, 'Workarounds')
-    verbosity      = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
-
-    general.add_option('-h', '--help',
-            action='help', help='print this help text and exit')
-    general.add_option('-v', '--version',
-            action='version', help='print program version and exit')
-    general.add_option('-U', '--update',
-            action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
-    general.add_option('-i', '--ignore-errors',
-            action='store_true', dest='ignoreerrors', help='continue on download errors, for example to skip unavailable videos in a playlist', default=False)
-    general.add_option('--abort-on-error',
-            action='store_false', dest='ignoreerrors',
-            help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
-    general.add_option('--dump-user-agent',
-            action='store_true', dest='dump_user_agent',
-            help='display the current browser identification', default=False)
-    general.add_option('--list-extractors',
-            action='store_true', dest='list_extractors',
-            help='List all supported extractors and the URLs they would handle', default=False)
-    general.add_option('--extractor-descriptions',
-            action='store_true', dest='list_extractor_descriptions',
-            help='Output descriptions of all supported extractors', default=False)
-    general.add_option(
-        '--proxy', dest='proxy', default=None, metavar='URL',
-        help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
-    general.add_option(
-        '--socket-timeout', dest='socket_timeout',
-        type=float, default=None, help=u'Time to wait before giving up, in seconds')
-    general.add_option(
-        '--default-search',
-        dest='default_search', metavar='PREFIX',
-        help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for  youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.')
-    general.add_option(
-        '--ignore-config',
-        action='store_true',
-        help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
-
-    selection.add_option(
-        '--playlist-start',
-        dest='playliststart', metavar='NUMBER', default=1, type=int,
-        help='playlist video to start at (default is %default)')
-    selection.add_option(
-        '--playlist-end',
-        dest='playlistend', metavar='NUMBER', default=None, type=int,
-        help='playlist video to end at (default is last)')
-    selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
-    selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
-    selection.add_option('--max-downloads', metavar='NUMBER',
-                         dest='max_downloads', type=int, default=None,
-                         help='Abort after downloading NUMBER files')
-    selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
-    selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
-    selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None)
-    selection.add_option(
-        '--datebefore', metavar='DATE', dest='datebefore', default=None,
-        help='download only videos uploaded on or before this date (i.e. inclusive)')
-    selection.add_option(
-        '--dateafter', metavar='DATE', dest='dateafter', default=None,
-        help='download only videos uploaded on or after this date (i.e. inclusive)')
-    selection.add_option(
-        '--min-views', metavar='COUNT', dest='min_views',
-        default=None, type=int,
-        help="Do not download any videos with less than COUNT views",)
-    selection.add_option(
-        '--max-views', metavar='COUNT', dest='max_views',
-        default=None, type=int,
-        help="Do not download any videos with more than COUNT views",)
-    selection.add_option('--no-playlist', action='store_true', dest='noplaylist', help='download only the currently playing video', default=False)
-    selection.add_option('--age-limit', metavar='YEARS', dest='age_limit',
-                         help='download only videos suitable for the given age',
-                         default=None, type=int)
-    selection.add_option('--download-archive', metavar='FILE',
-                         dest='download_archive',
-                         help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
-    selection.add_option(
-        '--include-ads', dest='include_ads',
-        action='store_true',
-        help='Download advertisements as well (experimental)')
-    selection.add_option(
-        '--youtube-include-dash-manifest', action='store_true',
-        dest='youtube_include_dash_manifest', default=False,
-        help='Try to download the DASH manifest on YouTube videos (experimental)')
-
-    authentication.add_option('-u', '--username',
-            dest='username', metavar='USERNAME', help='account username')
-    authentication.add_option('-p', '--password',
-            dest='password', metavar='PASSWORD', help='account password')
-    authentication.add_option('-n', '--netrc',
-            action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
-    authentication.add_option('--video-password',
-            dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)')
-
-
-    video_format.add_option('-f', '--format',
-            action='store', dest='format', metavar='FORMAT', default=None,
-            help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported. You can also use the special names "best", "bestvideo", "bestaudio", "worst", "worstvideo" and "worstaudio". By default, youtube-dl will pick the best quality.')
-    video_format.add_option('--all-formats',
-            action='store_const', dest='format', help='download all available video formats', const='all')
-    video_format.add_option('--prefer-free-formats',
-            action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
-    video_format.add_option('--max-quality',
-            action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
-    video_format.add_option('-F', '--list-formats',
-            action='store_true', dest='listformats', help='list all available formats')
-
-    subtitles.add_option('--write-sub', '--write-srt',
-            action='store_true', dest='writesubtitles',
-            help='write subtitle file', default=False)
-    subtitles.add_option('--write-auto-sub', '--write-automatic-sub',
-            action='store_true', dest='writeautomaticsub',
-            help='write automatic subtitle file (youtube only)', default=False)
-    subtitles.add_option('--all-subs',
-            action='store_true', dest='allsubtitles',
-            help='downloads all the available subtitles of the video', default=False)
-    subtitles.add_option('--list-subs',
-            action='store_true', dest='listsubtitles',
-            help='lists all available subtitles for the video', default=False)
-    subtitles.add_option('--sub-format',
-            action='store', dest='subtitlesformat', metavar='FORMAT',
-            help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt')
-    subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang',
-            action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
-            default=[], callback=_comma_separated_values_options_callback,
-            help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
-
-    downloader.add_option('-r', '--rate-limit',
-            dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
-    downloader.add_option('-R', '--retries',
-            dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
-    downloader.add_option('--buffer-size',
-            dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16K) (default is %default)', default="1024")
-    downloader.add_option('--no-resize-buffer',
-            action='store_true', dest='noresizebuffer',
-            help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
-    downloader.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
-
-    workarounds.add_option(
-        '--encoding', dest='encoding', metavar='ENCODING',
-        help='Force the specified encoding (experimental)')
-    workarounds.add_option(
-        '--no-check-certificate', action='store_true',
-        dest='no_check_certificate', default=False,
-        help='Suppress HTTPS certificate validation.')
-    workarounds.add_option(
-        '--prefer-insecure', '--prefer-unsecure', action='store_true', dest='prefer_insecure',
-        help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
-    workarounds.add_option(
-        '--user-agent', metavar='UA',
-        dest='user_agent', help='specify a custom user agent')
-    workarounds.add_option(
-        '--referer', metavar='REF',
-        dest='referer', default=None,
-        help='specify a custom referer, use if the video access is restricted to one domain',
-    )
-    workarounds.add_option(
-        '--add-header', metavar='FIELD:VALUE',
-        dest='headers', action='append',
-        help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
-    )
-    workarounds.add_option(
-        '--bidi-workaround', dest='bidi_workaround', action='store_true',
-        help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
-
-    verbosity.add_option('-q', '--quiet',
-            action='store_true', dest='quiet', help='activates quiet mode', default=False)
-    verbosity.add_option(
-        '--no-warnings',
-        dest='no_warnings', action='store_true', default=False,
-        help='Ignore warnings')
-    verbosity.add_option('-s', '--simulate',
-            action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
-    verbosity.add_option('--skip-download',
-            action='store_true', dest='skip_download', help='do not download the video', default=False)
-    verbosity.add_option('-g', '--get-url',
-            action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
-    verbosity.add_option('-e', '--get-title',
-            action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
-    verbosity.add_option('--get-id',
-            action='store_true', dest='getid', help='simulate, quiet but print id', default=False)
-    verbosity.add_option('--get-thumbnail',
-            action='store_true', dest='getthumbnail',
-            help='simulate, quiet but print thumbnail URL', default=False)
-    verbosity.add_option('--get-description',
-            action='store_true', dest='getdescription',
-            help='simulate, quiet but print video description', default=False)
-    verbosity.add_option('--get-duration',
-            action='store_true', dest='getduration',
-            help='simulate, quiet but print video length', default=False)
-    verbosity.add_option('--get-filename',
-            action='store_true', dest='getfilename',
-            help='simulate, quiet but print output filename', default=False)
-    verbosity.add_option('--get-format',
-            action='store_true', dest='getformat',
-            help='simulate, quiet but print output format', default=False)
-    verbosity.add_option('-j', '--dump-json',
-            action='store_true', dest='dumpjson',
-            help='simulate, quiet but print JSON information. See --output for a description of available keys.', default=False)
-    verbosity.add_option('--newline',
-            action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
-    verbosity.add_option('--no-progress',
-            action='store_true', dest='noprogress', help='do not print progress bar', default=False)
-    verbosity.add_option('--console-title',
-            action='store_true', dest='consoletitle',
-            help='display progress in console titlebar', default=False)
-    verbosity.add_option('-v', '--verbose',
-            action='store_true', dest='verbose', help='print various debugging information', default=False)
-    verbosity.add_option('--dump-intermediate-pages',
-            action='store_true', dest='dump_intermediate_pages', default=False,
-            help='print downloaded pages to debug problems (very verbose)')
-    verbosity.add_option('--write-pages',
-            action='store_true', dest='write_pages', default=False,
-            help='Write downloaded intermediary pages to files in the current directory to debug problems')
-    verbosity.add_option('--youtube-print-sig-code',
-            action='store_true', dest='youtube_print_sig_code', default=False,
-            help=optparse.SUPPRESS_HELP)
-    verbosity.add_option('--print-traffic',
-            dest='debug_printtraffic', action='store_true', default=False,
-            help='Display sent and read HTTP traffic')
-
-
-    filesystem.add_option('-a', '--batch-file',
-            dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
-    filesystem.add_option('--id',
-            action='store_true', dest='useid', help='use only video ID in file name', default=False)
-    filesystem.add_option('-A', '--auto-number',
-            action='store_true', dest='autonumber',
-            help='number downloaded files starting from 00000', default=False)
-    filesystem.add_option('-o', '--output',
-            dest='outtmpl', metavar='TEMPLATE',
-            help=('output filename template. Use %(title)s to get the title, '
-                  '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
-                  '%(autonumber)s to get an automatically incremented number, '
-                  '%(ext)s for the filename extension, '
-                  '%(format)s for the format description (like "22 - 1280x720" or "HD"), '
-                  '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
-                  '%(upload_date)s for the upload date (YYYYMMDD), '
-                  '%(extractor)s for the provider (youtube, metacafe, etc), '
-                  '%(id)s for the video id, %(playlist)s for the playlist the video is in, '
-                  '%(playlist_index)s for the position in the playlist and %% for a literal percent. '
-                  '%(height)s and %(width)s for the width and height of the video format. '
-                  '%(resolution)s for a textual description of the resolution of the video format. '
-                  'Use - to output to stdout. Can also be used to download to a different directory, '
-                  'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
-    filesystem.add_option('--autonumber-size',
-            dest='autonumber_size', metavar='NUMBER',
-            help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
-    filesystem.add_option('--restrict-filenames',
-            action='store_true', dest='restrictfilenames',
-            help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
-    filesystem.add_option('-t', '--title',
-            action='store_true', dest='usetitle', help='[deprecated] use title in file name (default)', default=False)
-    filesystem.add_option('-l', '--literal',
-            action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
-    filesystem.add_option('-w', '--no-overwrites',
-            action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
-    filesystem.add_option('-c', '--continue',
-            action='store_true', dest='continue_dl', help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.', default=True)
-    filesystem.add_option('--no-continue',
-            action='store_false', dest='continue_dl',
-            help='do not resume partially downloaded files (restart from beginning)')
-    filesystem.add_option('--no-part',
-            action='store_true', dest='nopart', help='do not use .part files', default=False)
-    filesystem.add_option('--no-mtime',
-            action='store_false', dest='updatetime',
-            help='do not use the Last-modified header to set the file modification time', default=True)
-    filesystem.add_option('--write-description',
-            action='store_true', dest='writedescription',
-            help='write video description to a .description file', default=False)
-    filesystem.add_option('--write-info-json',
-            action='store_true', dest='writeinfojson',
-            help='write video metadata to a .info.json file', default=False)
-    filesystem.add_option('--write-annotations',
-            action='store_true', dest='writeannotations',
-            help='write video annotations to a .annotation file', default=False)
-    filesystem.add_option('--write-thumbnail',
-            action='store_true', dest='writethumbnail',
-            help='write thumbnail image to disk', default=False)
-    filesystem.add_option('--load-info',
-            dest='load_info_filename', metavar='FILE',
-            help='json file containing the video information (created with the "--write-json" option)')
-    filesystem.add_option('--cookies',
-            dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
-    filesystem.add_option(
-        '--cache-dir', dest='cachedir', default=get_cachedir(), metavar='DIR',
-        help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
-    filesystem.add_option(
-        '--no-cache-dir', action='store_const', const=None, dest='cachedir',
-        help='Disable filesystem caching')
-    filesystem.add_option(
-        '--rm-cache-dir', action='store_true', dest='rm_cachedir',
-        help='Delete all filesystem cache files')
-
-
-    postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
-            help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
-    postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
-            help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; best by default')
-    postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
-            help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
-    postproc.add_option('--recode-video', metavar='FORMAT', dest='recodevideo', default=None,
-            help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)')
-    postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
-            help='keeps the video file on disk after the post-processing; the video is erased by default')
-    postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
-            help='do not overwrite post-processed files; the post-processed files are overwritten by default')
-    postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
-            help='embed subtitles in the video (only for mp4 videos)')
-    postproc.add_option('--embed-thumbnail', action='store_true', dest='embedthumbnail', default=False,
-            help='embed thumbnail in the audio as cover art')
-    postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
-            help='write metadata to the video file')
-    postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False,
-            help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
-    postproc.add_option('--prefer-avconv', action='store_false', dest='prefer_ffmpeg',
-        help='Prefer avconv over ffmpeg for running the postprocessors (default)')
-    postproc.add_option('--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg',
-        help='Prefer ffmpeg over avconv for running the postprocessors')
-
-
-    parser.add_option_group(general)
-    parser.add_option_group(selection)
-    parser.add_option_group(downloader)
-    parser.add_option_group(filesystem)
-    parser.add_option_group(verbosity)
-    parser.add_option_group(workarounds)
-    parser.add_option_group(video_format)
-    parser.add_option_group(subtitles)
-    parser.add_option_group(authentication)
-    parser.add_option_group(postproc)
-
-    if overrideArguments is not None:
-        opts, args = parser.parse_args(overrideArguments)
-        if opts.verbose:
-            write_string(u'[debug] Override config: ' + repr(overrideArguments) + '\n')
-    else:
-        commandLineConf = sys.argv[1:]
-        if '--ignore-config' in commandLineConf:
-            systemConf = []
-            userConf = []
-        else:
-            systemConf = _readOptions('/etc/youtube-dl.conf')
-            if '--ignore-config' in systemConf:
-                userConf = []
-            else:
-                userConf = _readUserConf()
-        argv = systemConf + userConf + commandLineConf
-
-        opts, args = parser.parse_args(argv)
-        if opts.verbose:
-            write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
-            write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
-            write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
-
-    return parser, opts, args
-
-
 def _real_main(argv=None):
     # Compatibility fixes for Windows
     if sys.platform == 'win32':
         # https://github.com/rg3/youtube-dl/issues/820
         codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
 
-    setproctitle(u'youtube-dl')
+    workaround_optparse_bug9161()
+
+    setproctitle('youtube-dl')
 
     parser, opts, args = parseOpts(argv)
 
@@ -602,10 +76,10 @@ def _real_main(argv=None):
     if opts.headers is not None:
         for h in opts.headers:
             if h.find(':', 1) < 0:
-                parser.error(u'wrong header formatting, it should be key:value, not "%s"'%h)
+                parser.error('wrong header formatting, it should be key:value, not "%s"' % h)
             key, value = h.split(':', 2)
             if opts.verbose:
-                write_string(u'[debug] Adding header from command line option %s:%s\n'%(key, value))
+                write_string('[debug] Adding header from command line option %s:%s\n' % (key, value))
             std_headers[key] = value
 
     # Dump user agent
@@ -623,9 +97,9 @@ def _real_main(argv=None):
                 batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore')
             batch_urls = read_batch_urls(batchfd)
             if opts.verbose:
-                write_string(u'[debug] Batch file urls: ' + repr(batch_urls) + u'\n')
+                write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
         except IOError:
-            sys.exit(u'ERROR: batch file could not be read')
+            sys.exit('ERROR: batch file could not be read')
     all_urls = batch_urls + args
     all_urls = [url.strip() for url in all_urls]
     _enc = preferredencoding()
@@ -638,7 +112,7 @@ def _real_main(argv=None):
             compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
             matchedUrls = [url for url in all_urls if ie.suitable(url)]
             for mu in matchedUrls:
-                compat_print(u'  ' + mu)
+                compat_print('  ' + mu)
         sys.exit(0)
     if opts.list_extractor_descriptions:
         for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()):
@@ -648,69 +122,66 @@ def _real_main(argv=None):
             if desc is False:
                 continue
             if hasattr(ie, 'SEARCH_KEY'):
-                _SEARCHES = (u'cute kittens', u'slithering pythons', u'falling cat', u'angry poodle', u'purple fish', u'running tortoise', u'sleeping bunny')
-                _COUNTS = (u'', u'5', u'10', u'all')
-                desc += u' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
+                _SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny')
+                _COUNTS = ('', '5', '10', 'all')
+                desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
             compat_print(desc)
         sys.exit(0)
 
-
     # Conflicting, missing and erroneous options
     if opts.usenetrc and (opts.username is not None or opts.password is not None):
-        parser.error(u'using .netrc conflicts with giving username/password')
+        parser.error('using .netrc conflicts with giving username/password')
     if opts.password is not None and opts.username is None:
-        parser.error(u'account username missing\n')
+        parser.error('account username missing\n')
     if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
-        parser.error(u'using output template conflicts with using title, video ID or auto number')
+        parser.error('using output template conflicts with using title, video ID or auto number')
     if opts.usetitle and opts.useid:
-        parser.error(u'using title conflicts with using video ID')
+        parser.error('using title conflicts with using video ID')
     if opts.username is not None and opts.password is None:
-        opts.password = compat_getpass(u'Type account password and press [Return]: ')
+        opts.password = compat_getpass('Type account password and press [Return]: ')
     if opts.ratelimit is not None:
         numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
         if numeric_limit is None:
-            parser.error(u'invalid rate limit specified')
+            parser.error('invalid rate limit specified')
         opts.ratelimit = numeric_limit
     if opts.min_filesize is not None:
         numeric_limit = FileDownloader.parse_bytes(opts.min_filesize)
         if numeric_limit is None:
-            parser.error(u'invalid min_filesize specified')
+            parser.error('invalid min_filesize specified')
         opts.min_filesize = numeric_limit
     if opts.max_filesize is not None:
         numeric_limit = FileDownloader.parse_bytes(opts.max_filesize)
         if numeric_limit is None:
-            parser.error(u'invalid max_filesize specified')
+            parser.error('invalid max_filesize specified')
         opts.max_filesize = numeric_limit
     if opts.retries is not None:
         try:
             opts.retries = int(opts.retries)
         except (TypeError, ValueError):
-            parser.error(u'invalid retry count specified')
+            parser.error('invalid retry count specified')
     if opts.buffersize is not None:
         numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
         if numeric_buffersize is None:
-            parser.error(u'invalid buffer size specified')
+            parser.error('invalid buffer size specified')
         opts.buffersize = numeric_buffersize
     if opts.playliststart <= 0:
-        raise ValueError(u'Playlist start must be positive')
+        raise ValueError('Playlist start must be positive')
     if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
-        raise ValueError(u'Playlist end must be greater than playlist start')
+        raise ValueError('Playlist end must be greater than playlist start')
     if opts.extractaudio:
         if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
-            parser.error(u'invalid audio format specified')
+            parser.error('invalid audio format specified')
     if opts.audioquality:
         opts.audioquality = opts.audioquality.strip('k').strip('K')
         if not opts.audioquality.isdigit():
-            parser.error(u'invalid audio quality specified')
+            parser.error('invalid audio quality specified')
     if opts.recodevideo is not None:
         if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']:
-            parser.error(u'invalid video recode format specified')
+            parser.error('invalid video recode format specified')
     if opts.date is not None:
         date = DateRange.day(opts.date)
     else:
         date = DateRange(opts.dateafter, opts.datebefore)
-    if opts.default_search not in ('auto', 'auto_warning', 'error', 'fixup_error', None) and ':' not in opts.default_search:
-        parser.error(u'--default-search invalid; did you forget a colon (:) at the end?')
 
     # Do not download videos when there are audio-only formats
     if opts.extractaudio and not opts.keepvideo and opts.format is None:
@@ -718,33 +189,34 @@ def _real_main(argv=None):
 
     # --all-sub automatically sets --write-sub if --write-auto-sub is not given
     # this was the old behaviour if only --all-sub was given.
-    if opts.allsubtitles and (opts.writeautomaticsub == False):
+    if opts.allsubtitles and not opts.writeautomaticsub:
         opts.writesubtitles = True
 
     if sys.version_info < (3,):
         # In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
         if opts.outtmpl is not None:
             opts.outtmpl = opts.outtmpl.decode(preferredencoding())
-    outtmpl =((opts.outtmpl is not None and opts.outtmpl)
-            or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s')
-            or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
-            or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
-            or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s')
-            or (opts.useid and u'%(id)s.%(ext)s')
-            or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
-            or DEFAULT_OUTTMPL)
+    outtmpl = ((opts.outtmpl is not None and opts.outtmpl)
+               or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s')
+               or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s')
+               or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s')
+               or (opts.usetitle and '%(title)s-%(id)s.%(ext)s')
+               or (opts.useid and '%(id)s.%(ext)s')
+               or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s')
+               or DEFAULT_OUTTMPL)
     if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
-        parser.error(u'Cannot download a video and extract audio into the same'
-                     u' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
-                     u' template'.format(outtmpl))
+        parser.error('Cannot download a video and extract audio into the same'
+                     ' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
+                     ' template'.format(outtmpl))
 
-    any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson
-    download_archive_fn = os.path.expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
+    any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
+    download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
 
     ydl_opts = {
         'usenetrc': opts.usenetrc,
         'username': opts.username,
         'password': opts.password,
+        'twofactor': opts.twofactor,
         'videopassword': opts.videopassword,
         'quiet': (opts.quiet or any_printing),
         'no_warnings': opts.no_warnings,
@@ -757,8 +229,9 @@ def _real_main(argv=None):
         'forcefilename': opts.getfilename,
         'forceformat': opts.getformat,
         'forcejson': opts.dumpjson,
-        'simulate': opts.simulate,
-        'skip_download': (opts.skip_download or opts.simulate or any_printing),
+        'dump_single_json': opts.dump_single_json,
+        'simulate': opts.simulate or any_printing,
+        'skip_download': opts.skip_download,
         'format': opts.format,
         'format_limit': opts.format_limit,
         'listformats': opts.listformats,
@@ -776,6 +249,7 @@ def _real_main(argv=None):
         'progress_with_newline': opts.progress_with_newline,
         'playliststart': opts.playliststart,
         'playlistend': opts.playlistend,
+        'playlistreverse': opts.playlist_reverse,
         'noplaylist': opts.noplaylist,
         'logtostderr': opts.outtmpl == '-',
         'consoletitle': opts.consoletitle,
@@ -821,12 +295,11 @@ def _real_main(argv=None):
         'default_search': opts.default_search,
         'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
         'encoding': opts.encoding,
+        'exec_cmd': opts.exec_cmd,
+        'extract_flat': opts.extract_flat,
     }
 
     with YoutubeDL(ydl_opts) as ydl:
-        ydl.print_debug_header()
-        ydl.add_default_info_extractors()
-
         # PostProcessors
         # Add the metadata pp first, the other pps will copy it
         if opts.addmetadata:
@@ -844,41 +317,35 @@ def _real_main(argv=None):
                 ydl.add_post_processor(FFmpegAudioFixPP())
             ydl.add_post_processor(AtomicParsleyPP())
 
+        # Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way.
+        # So if the user is able to remove the file before your postprocessor runs it might cause a few problems.
+        if opts.exec_cmd:
+            ydl.add_post_processor(ExecAfterDownloadPP(
+                verboseOutput=opts.verbose, exec_cmd=opts.exec_cmd))
+
         # Update version
         if opts.update_self:
             update_self(ydl.to_screen, opts.verbose)
 
         # Remove cache dir
         if opts.rm_cachedir:
-            if opts.cachedir is None:
-                ydl.to_screen(u'No cache dir specified (Did you combine --no-cache-dir and --rm-cache-dir?)')
-            else:
-                if ('.cache' not in opts.cachedir) or ('youtube-dl' not in opts.cachedir):
-                    ydl.to_screen(u'Not removing directory %s - this does not look like a cache dir')
-                    retcode = 141
-                else:
-                    ydl.to_screen(
-                        u'Removing cache dir %s .' % opts.cachedir,
-                        skip_eol=True)
-                    if os.path.exists(opts.cachedir):
-                        ydl.to_screen(u'.', skip_eol=True)
-                        shutil.rmtree(opts.cachedir)
-                    ydl.to_screen(u'.')
+            ydl.cache.remove()
 
         # Maybe do nothing
         if (len(all_urls) < 1) and (opts.load_info_filename is None):
-            if not (opts.update_self or opts.rm_cachedir):
-                parser.error(u'you must provide at least one URL')
-            else:
+            if opts.update_self or opts.rm_cachedir:
                 sys.exit()
 
+            ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv)
+            parser.error('you must provide at least one URL')
+
         try:
             if opts.load_info_filename is not None:
                 retcode = ydl.download_with_info_file(opts.load_info_filename)
             else:
                 retcode = ydl.download(all_urls)
         except MaxDownloadsReached:
-            ydl.to_screen(u'--max-download limit reached, aborting.')
+            ydl.to_screen('--max-download limit reached, aborting.')
             retcode = 101
 
     sys.exit(retcode)
@@ -890,6 +357,6 @@ def main(argv=None):
     except DownloadError:
         sys.exit(1)
     except SameFileError:
-        sys.exit(u'ERROR: fixed output name but more than one file to download')
+        sys.exit('ERROR: fixed output name but more than one file to download')
     except KeyboardInterrupt:
-        sys.exit(u'\nERROR: Interrupted by user')
+        sys.exit('\nERROR: Interrupted by user')
index 3fe29c91f416e0d6c957ed750d3f0f69950dc9c0..65a0f891c5998cd49c7e1a98dbca75b6c926ccb8 100755 (executable)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+from __future__ import unicode_literals
 
 # Execute with
 # $ python youtube_dl/__main__.py (2.6+)
index e9c5e21521d66baa177986e8ca878e3fc1a75461..5efd0f836bcf2375b065be008a36e5b8a54d2e69 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 __all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text']
 
 import base64
@@ -7,10 +9,11 @@ from .utils import bytes_to_intlist, intlist_to_bytes
 
 BLOCK_SIZE_BYTES = 16
 
+
 def aes_ctr_decrypt(data, key, counter):
     """
     Decrypt with aes in counter mode
-    
+
     @param {int[]} data        cipher
     @param {int[]} key         16/24/32-Byte cipher key
     @param {instance} counter  Instance whose next_value function (@returns {int[]}  16-Byte block)
@@ -19,23 +22,24 @@ def aes_ctr_decrypt(data, key, counter):
     """
     expanded_key = key_expansion(key)
     block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
-    
-    decrypted_data=[]
+
+    decrypted_data = []
     for i in range(block_count):
         counter_block = counter.next_value()
-        block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
-        block += [0]*(BLOCK_SIZE_BYTES - len(block))
-        
+        block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
+        block += [0] * (BLOCK_SIZE_BYTES - len(block))
+
         cipher_counter_block = aes_encrypt(counter_block, expanded_key)
         decrypted_data += xor(block, cipher_counter_block)
     decrypted_data = decrypted_data[:len(data)]
-    
+
     return decrypted_data
 
+
 def aes_cbc_decrypt(data, key, iv):
     """
     Decrypt with aes in CBC mode
-    
+
     @param {int[]} data        cipher
     @param {int[]} key         16/24/32-Byte cipher key
     @param {int[]} iv          16-Byte IV
@@ -43,94 +47,98 @@ def aes_cbc_decrypt(data, key, iv):
     """
     expanded_key = key_expansion(key)
     block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
-    
-    decrypted_data=[]
+
+    decrypted_data = []
     previous_cipher_block = iv
     for i in range(block_count):
-        block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
-        block += [0]*(BLOCK_SIZE_BYTES - len(block))
-        
+        block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
+        block += [0] * (BLOCK_SIZE_BYTES - len(block))
+
         decrypted_block = aes_decrypt(block, expanded_key)
         decrypted_data += xor(decrypted_block, previous_cipher_block)
         previous_cipher_block = block
     decrypted_data = decrypted_data[:len(data)]
-    
+
     return decrypted_data
 
+
 def key_expansion(data):
     """
     Generate key schedule
-    
+
     @param {int[]} data  16/24/32-Byte cipher key
-    @returns {int[]}     176/208/240-Byte expanded key 
+    @returns {int[]}     176/208/240-Byte expanded key
     """
-    data = data[:] # copy
+    data = data[:]  # copy
     rcon_iteration = 1
     key_size_bytes = len(data)
     expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES
-    
+
     while len(data) < expanded_key_size_bytes:
         temp = data[-4:]
         temp = key_schedule_core(temp, rcon_iteration)
         rcon_iteration += 1
-        data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
-        
+        data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
+
         for _ in range(3):
             temp = data[-4:]
-            data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
-        
+            data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
+
         if key_size_bytes == 32:
             temp = data[-4:]
             temp = sub_bytes(temp)
-            data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
-        
-        for _ in range(3 if key_size_bytes == 32  else 2 if key_size_bytes == 24 else 0):
+            data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
+
+        for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
             temp = data[-4:]
-            data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
+            data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
     data = data[:expanded_key_size_bytes]
-    
+
     return data
 
+
 def aes_encrypt(data, expanded_key):
     """
     Encrypt one block with aes
-    
+
     @param {int[]} data          16-Byte state
-    @param {int[]} expanded_key  176/208/240-Byte expanded key 
+    @param {int[]} expanded_key  176/208/240-Byte expanded key
     @returns {int[]}             16-Byte cipher
     """
     rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
 
     data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
-    for i in range(1, rounds+1):
+    for i in range(1, rounds + 1):
         data = sub_bytes(data)
         data = shift_rows(data)
         if i != rounds:
             data = mix_columns(data)
-        data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
+        data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
 
     return data
 
+
 def aes_decrypt(data, expanded_key):
     """
     Decrypt one block with aes
-    
+
     @param {int[]} data          16-Byte cipher
     @param {int[]} expanded_key  176/208/240-Byte expanded key
     @returns {int[]}             16-Byte state
     """
     rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
-    
+
     for i in range(rounds, 0, -1):
-        data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
+        data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
         if i != rounds:
             data = mix_columns_inv(data)
         data = shift_rows_inv(data)
         data = sub_bytes_inv(data)
     data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
-    
+
     return data
 
+
 def aes_decrypt_text(data, password, key_size_bytes):
     """
     Decrypt text
@@ -138,33 +146,34 @@ def aes_decrypt_text(data, password, key_size_bytes):
     - The cipher key is retrieved by encrypting the first 16 Byte of 'password'
       with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's)
     - Mode of operation is 'counter'
-    
+
     @param {str} data                    Base64 encoded string
     @param {str,unicode} password        Password (will be encoded with utf-8)
     @param {int} key_size_bytes          Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit
     @returns {str}                       Decrypted data
     """
     NONCE_LENGTH_BYTES = 8
-    
+
     data = bytes_to_intlist(base64.b64decode(data))
     password = bytes_to_intlist(password.encode('utf-8'))
-    
-    key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password))
+
+    key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))
     key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
-    
+
     nonce = data[:NONCE_LENGTH_BYTES]
     cipher = data[NONCE_LENGTH_BYTES:]
-    
+
     class Counter:
-        __value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
+        __value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
+
         def next_value(self):
             temp = self.__value
             self.__value = inc(self.__value)
             return temp
-    
+
     decrypted_data = aes_ctr_decrypt(cipher, key, Counter())
     plaintext = intlist_to_bytes(decrypted_data)
-    
+
     return plaintext
 
 RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
@@ -200,14 +209,14 @@ SBOX_INV = (0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x
             0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
             0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
             0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d)
-MIX_COLUMN_MATRIX = ((0x2,0x3,0x1,0x1),
-                     (0x1,0x2,0x3,0x1),
-                     (0x1,0x1,0x2,0x3),
-                     (0x3,0x1,0x1,0x2))
-MIX_COLUMN_MATRIX_INV = ((0xE,0xB,0xD,0x9),
-                         (0x9,0xE,0xB,0xD),
-                         (0xD,0x9,0xE,0xB),
-                         (0xB,0xD,0x9,0xE))
+MIX_COLUMN_MATRIX = ((0x2, 0x3, 0x1, 0x1),
+                     (0x1, 0x2, 0x3, 0x1),
+                     (0x1, 0x1, 0x2, 0x3),
+                     (0x3, 0x1, 0x1, 0x2))
+MIX_COLUMN_MATRIX_INV = ((0xE, 0xB, 0xD, 0x9),
+                         (0x9, 0xE, 0xB, 0xD),
+                         (0xD, 0x9, 0xE, 0xB),
+                         (0xB, 0xD, 0x9, 0xE))
 RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35,
                       0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA,
                       0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31,
@@ -241,30 +250,37 @@ RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7
                       0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
                       0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
 
+
 def sub_bytes(data):
     return [SBOX[x] for x in data]
 
+
 def sub_bytes_inv(data):
     return [SBOX_INV[x] for x in data]
 
+
 def rotate(data):
     return data[1:] + [data[0]]
 
+
 def key_schedule_core(data, rcon_iteration):
     data = rotate(data)
     data = sub_bytes(data)
     data[0] = data[0] ^ RCON[rcon_iteration]
-    
+
     return data
 
+
 def xor(data1, data2):
-    return [x^y for x, y in zip(data1, data2)]
+    return [x ^ y for x, y in zip(data1, data2)]
+
 
 def rijndael_mul(a, b):
-    if(a==0 or b==0):
+    if(a == 0 or b == 0):
         return 0
     return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
 
+
 def mix_column(data, matrix):
     data_mixed = []
     for row in range(4):
@@ -275,33 +291,38 @@ def mix_column(data, matrix):
         data_mixed.append(mixed)
     return data_mixed
 
+
 def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
     data_mixed = []
     for i in range(4):
-        column = data[i*4 : (i+1)*4]
+        column = data[i * 4: (i + 1) * 4]
         data_mixed += mix_column(column, matrix)
     return data_mixed
 
+
 def mix_columns_inv(data):
     return mix_columns(data, MIX_COLUMN_MATRIX_INV)
 
+
 def shift_rows(data):
     data_shifted = []
     for column in range(4):
         for row in range(4):
-            data_shifted.append( data[((column + row) & 0b11) * 4 + row] )
+            data_shifted.append(data[((column + row) & 0b11) * 4 + row])
     return data_shifted
 
+
 def shift_rows_inv(data):
     data_shifted = []
     for column in range(4):
         for row in range(4):
-            data_shifted.append( data[((column - row) & 0b11) * 4 + row] )
+            data_shifted.append(data[((column - row) & 0b11) * 4 + row])
     return data_shifted
 
+
 def inc(data):
-    data = data[:] # copy
-    for i in range(len(data)-1,-1,-1):
+    data = data[:]  # copy
+    for i in range(len(data) - 1, -1, -1):
         if data[i] == 255:
             data[i] = 0
         else:
diff --git a/youtube_dl/cache.py b/youtube_dl/cache.py
new file mode 100644 (file)
index 0000000..5fe839e
--- /dev/null
@@ -0,0 +1,93 @@
+from __future__ import unicode_literals
+
+import errno
+import io
+import json
+import os
+import re
+import shutil
+import traceback
+
+from .compat import compat_expanduser, compat_getenv
+from .utils import write_json_file
+
+
+class Cache(object):
+    def __init__(self, ydl):
+        self._ydl = ydl
+
+    def _get_root_dir(self):
+        res = self._ydl.params.get('cachedir')
+        if res is None:
+            cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache')
+            res = os.path.join(cache_root, 'youtube-dl')
+        return compat_expanduser(res)
+
+    def _get_cache_fn(self, section, key, dtype):
+        assert re.match(r'^[a-zA-Z0-9_.-]+$', section), \
+            'invalid section %r' % section
+        assert re.match(r'^[a-zA-Z0-9_.-]+$', key), 'invalid key %r' % key
+        return os.path.join(
+            self._get_root_dir(), section, '%s.%s' % (key, dtype))
+
+    @property
+    def enabled(self):
+        return self._ydl.params.get('cachedir') is not False
+
+    def store(self, section, key, data, dtype='json'):
+        assert dtype in ('json',)
+
+        if not self.enabled:
+            return
+
+        fn = self._get_cache_fn(section, key, dtype)
+        try:
+            try:
+                os.makedirs(os.path.dirname(fn))
+            except OSError as ose:
+                if ose.errno != errno.EEXIST:
+                    raise
+            write_json_file(data, fn)
+        except Exception:
+            tb = traceback.format_exc()
+            self._ydl.report_warning(
+                'Writing cache to %r failed: %s' % (fn, tb))
+
+    def load(self, section, key, dtype='json', default=None):
+        assert dtype in ('json',)
+
+        if not self.enabled:
+            return default
+
+        cache_fn = self._get_cache_fn(section, key, dtype)
+        try:
+            try:
+                with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
+                    return json.load(cachef)
+            except ValueError:
+                try:
+                    file_size = os.path.getsize(cache_fn)
+                except (OSError, IOError) as oe:
+                    file_size = str(oe)
+                self._ydl.report_warning(
+                    'Cache retrieval from %s failed (%s)' % (cache_fn, file_size))
+        except IOError:
+            pass  # No cache available
+
+        return default
+
+    def remove(self):
+        if not self.enabled:
+            self._ydl.to_screen('Cache is disabled (Did you combine --no-cache-dir and --rm-cache-dir?)')
+            return
+
+        cachedir = self._get_root_dir()
+        if not any((term in cachedir) for term in ('cache', 'tmp')):
+            raise Exception('Not removing directory %s - this does not look like a cache dir' % cachedir)
+
+        self._ydl.to_screen(
+            'Removing cache dir %s .' % cachedir, skip_eol=True)
+        if os.path.exists(cachedir):
+            self._ydl.to_screen('.', skip_eol=True)
+            shutil.rmtree(cachedir)
+        self._ydl.to_screen('.')
diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py
new file mode 100644 (file)
index 0000000..46d4388
--- /dev/null
@@ -0,0 +1,358 @@
+from __future__ import unicode_literals
+
+import getpass
+import optparse
+import os
+import re
+import subprocess
+import sys
+
+
+try:
+    import urllib.request as compat_urllib_request
+except ImportError:  # Python 2
+    import urllib2 as compat_urllib_request
+
+try:
+    import urllib.error as compat_urllib_error
+except ImportError:  # Python 2
+    import urllib2 as compat_urllib_error
+
+try:
+    import urllib.parse as compat_urllib_parse
+except ImportError:  # Python 2
+    import urllib as compat_urllib_parse
+
+try:
+    from urllib.parse import urlparse as compat_urllib_parse_urlparse
+except ImportError:  # Python 2
+    from urlparse import urlparse as compat_urllib_parse_urlparse
+
+try:
+    import urllib.parse as compat_urlparse
+except ImportError:  # Python 2
+    import urlparse as compat_urlparse
+
+try:
+    import http.cookiejar as compat_cookiejar
+except ImportError:  # Python 2
+    import cookielib as compat_cookiejar
+
+try:
+    import html.entities as compat_html_entities
+except ImportError:  # Python 2
+    import htmlentitydefs as compat_html_entities
+
+try:
+    import html.parser as compat_html_parser
+except ImportError:  # Python 2
+    import HTMLParser as compat_html_parser
+
+try:
+    import http.client as compat_http_client
+except ImportError:  # Python 2
+    import httplib as compat_http_client
+
+try:
+    from urllib.error import HTTPError as compat_HTTPError
+except ImportError:  # Python 2
+    from urllib2 import HTTPError as compat_HTTPError
+
+try:
+    from urllib.request import urlretrieve as compat_urlretrieve
+except ImportError:  # Python 2
+    from urllib import urlretrieve as compat_urlretrieve
+
+
+try:
+    from subprocess import DEVNULL
+    compat_subprocess_get_DEVNULL = lambda: DEVNULL
+except ImportError:
+    compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
+
+try:
+    from urllib.parse import unquote as compat_urllib_parse_unquote
+except ImportError:
+    def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
+        if string == '':
+            return string
+        res = string.split('%')
+        if len(res) == 1:
+            return string
+        if encoding is None:
+            encoding = 'utf-8'
+        if errors is None:
+            errors = 'replace'
+        # pct_sequence: contiguous sequence of percent-encoded bytes, decoded
+        pct_sequence = b''
+        string = res[0]
+        for item in res[1:]:
+            try:
+                if not item:
+                    raise ValueError
+                pct_sequence += item[:2].decode('hex')
+                rest = item[2:]
+                if not rest:
+                    # This segment was just a single percent-encoded character.
+                    # May be part of a sequence of code units, so delay decoding.
+                    # (Stored in pct_sequence).
+                    continue
+            except ValueError:
+                rest = '%' + item
+            # Encountered non-percent-encoded characters. Flush the current
+            # pct_sequence.
+            string += pct_sequence.decode(encoding, errors) + rest
+            pct_sequence = b''
+        if pct_sequence:
+            # Flush the final pct_sequence
+            string += pct_sequence.decode(encoding, errors)
+        return string
+
+
+try:
+    from urllib.parse import parse_qs as compat_parse_qs
+except ImportError:  # Python 2
+    # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
+    # Python 2's version is apparently totally broken
+
+    def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
+                   encoding='utf-8', errors='replace'):
+        qs, _coerce_result = qs, unicode
+        pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
+        r = []
+        for name_value in pairs:
+            if not name_value and not strict_parsing:
+                continue
+            nv = name_value.split('=', 1)
+            if len(nv) != 2:
+                if strict_parsing:
+                    raise ValueError("bad query field: %r" % (name_value,))
+                # Handle case of a control-name with no equal sign
+                if keep_blank_values:
+                    nv.append('')
+                else:
+                    continue
+            if len(nv[1]) or keep_blank_values:
+                name = nv[0].replace('+', ' ')
+                name = compat_urllib_parse_unquote(
+                    name, encoding=encoding, errors=errors)
+                name = _coerce_result(name)
+                value = nv[1].replace('+', ' ')
+                value = compat_urllib_parse_unquote(
+                    value, encoding=encoding, errors=errors)
+                value = _coerce_result(value)
+                r.append((name, value))
+        return r
+
+    def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
+                        encoding='utf-8', errors='replace'):
+        parsed_result = {}
+        pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
+                           encoding=encoding, errors=errors)
+        for name, value in pairs:
+            if name in parsed_result:
+                parsed_result[name].append(value)
+            else:
+                parsed_result[name] = [value]
+        return parsed_result
+
+try:
+    compat_str = unicode  # Python 2
+except NameError:
+    compat_str = str
+
+try:
+    compat_chr = unichr  # Python 2
+except NameError:
+    compat_chr = chr
+
+try:
+    from xml.etree.ElementTree import ParseError as compat_xml_parse_error
+except ImportError:  # Python 2.6
+    from xml.parsers.expat import ExpatError as compat_xml_parse_error
+
+try:
+    from shlex import quote as shlex_quote
+except ImportError:  # Python < 3.3
+    def shlex_quote(s):
+        if re.match(r'^[-_\w./]+$', s):
+            return s
+        else:
+            return "'" + s.replace("'", "'\"'\"'") + "'"
+
+
+def compat_ord(c):
+    if type(c) is int:
+        return c
+    else:
+        return ord(c)
+
+
+if sys.version_info >= (3, 0):
+    compat_getenv = os.getenv
+    compat_expanduser = os.path.expanduser
+else:
+    # Environment variables should be decoded with filesystem encoding.
+    # Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
+
+    def compat_getenv(key, default=None):
+        from .utils import get_filesystem_encoding
+        env = os.getenv(key, default)
+        if env:
+            env = env.decode(get_filesystem_encoding())
+        return env
+
+    # HACK: The default implementations of os.path.expanduser from cpython do not decode
+    # environment variables with filesystem encoding. We will work around this by
+    # providing adjusted implementations.
+    # The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
+    # for different platforms with correct environment variables decoding.
+
+    if os.name == 'posix':
+        def compat_expanduser(path):
+            """Expand ~ and ~user constructions.  If user or $HOME is unknown,
+            do nothing."""
+            if not path.startswith('~'):
+                return path
+            i = path.find('/', 1)
+            if i < 0:
+                i = len(path)
+            if i == 1:
+                if 'HOME' not in os.environ:
+                    import pwd
+                    userhome = pwd.getpwuid(os.getuid()).pw_dir
+                else:
+                    userhome = compat_getenv('HOME')
+            else:
+                import pwd
+                try:
+                    pwent = pwd.getpwnam(path[1:i])
+                except KeyError:
+                    return path
+                userhome = pwent.pw_dir
+            userhome = userhome.rstrip('/')
+            return (userhome + path[i:]) or '/'
+    elif os.name == 'nt' or os.name == 'ce':
+        def compat_expanduser(path):
+            """Expand ~ and ~user constructs.
+
+            If user or $HOME is unknown, do nothing."""
+            if path[:1] != '~':
+                return path
+            i, n = 1, len(path)
+            while i < n and path[i] not in '/\\':
+                i = i + 1
+
+            if 'HOME' in os.environ:
+                userhome = compat_getenv('HOME')
+            elif 'USERPROFILE' in os.environ:
+                userhome = compat_getenv('USERPROFILE')
+            elif 'HOMEPATH' not in os.environ:
+                return path
+            else:
+                try:
+                    drive = compat_getenv('HOMEDRIVE')
+                except KeyError:
+                    drive = ''
+                userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
+
+            if i != 1:  # ~user
+                userhome = os.path.join(os.path.dirname(userhome), path[1:i])
+
+            return userhome + path[i:]
+    else:
+        compat_expanduser = os.path.expanduser
+
+
+if sys.version_info < (3, 0):
+    def compat_print(s):
+        from .utils import preferredencoding
+        print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
+else:
+    def compat_print(s):
+        assert isinstance(s, compat_str)
+        print(s)
+
+
+try:
+    subprocess_check_output = subprocess.check_output
+except AttributeError:
+    def subprocess_check_output(*args, **kwargs):
+        assert 'input' not in kwargs
+        p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
+        output, _ = p.communicate()
+        ret = p.poll()
+        if ret:
+            raise subprocess.CalledProcessError(ret, p.args, output=output)
+        return output
+
+if sys.version_info < (3, 0) and sys.platform == 'win32':
+    def compat_getpass(prompt, *args, **kwargs):
+        if isinstance(prompt, compat_str):
+            from .utils import preferredencoding
+            prompt = prompt.encode(preferredencoding())
+        return getpass.getpass(prompt, *args, **kwargs)
+else:
+    compat_getpass = getpass.getpass
+
+# Old 2.6 and 2.7 releases require kwargs to be bytes
+try:
+    def _testfunc(x):
+        pass
+    _testfunc(**{'x': 0})
+except TypeError:
+    def compat_kwargs(kwargs):
+        return dict((bytes(k), v) for k, v in kwargs.items())
+else:
+    compat_kwargs = lambda kwargs: kwargs
+
+
+# Fix https://github.com/rg3/youtube-dl/issues/4223
+# See http://bugs.python.org/issue9161 for what is broken
+def workaround_optparse_bug9161():
+    op = optparse.OptionParser()
+    og = optparse.OptionGroup(op, 'foo')
+    try:
+        og.add_option('-t')
+    except TypeError:
+        real_add_option = optparse.OptionGroup.add_option
+
+        def _compat_add_option(self, *args, **kwargs):
+            enc = lambda v: (
+                v.encode('ascii', 'replace') if isinstance(v, compat_str)
+                else v)
+            bargs = [enc(a) for a in args]
+            bkwargs = dict(
+                (k, enc(v)) for k, v in kwargs.items())
+            return real_add_option(self, *bargs, **bkwargs)
+        optparse.OptionGroup.add_option = _compat_add_option
+
+
+__all__ = [
+    'compat_HTTPError',
+    'compat_chr',
+    'compat_cookiejar',
+    'compat_expanduser',
+    'compat_getenv',
+    'compat_getpass',
+    'compat_html_entities',
+    'compat_html_parser',
+    'compat_http_client',
+    'compat_kwargs',
+    'compat_ord',
+    'compat_parse_qs',
+    'compat_print',
+    'compat_str',
+    'compat_subprocess_get_DEVNULL',
+    'compat_urllib_error',
+    'compat_urllib_parse',
+    'compat_urllib_parse_unquote',
+    'compat_urllib_parse_urlparse',
+    'compat_urllib_request',
+    'compat_urlparse',
+    'compat_urlretrieve',
+    'compat_xml_parse_error',
+    'shlex_quote',
+    'subprocess_check_output',
+    'workaround_optparse_bug9161',
+]
index 4ea5811a56f77180afea16b2e5fbc30056850b49..31e28df58e828f8c5434390ff18a63358215924a 100644 (file)
@@ -2,6 +2,7 @@ from __future__ import unicode_literals
 
 from .common import FileDownloader
 from .hls import HlsFD
+from .hls import NativeHlsFD
 from .http import HttpFD
 from .mplayer import MplayerFD
 from .rtmp import RtmpFD
@@ -19,6 +20,8 @@ def get_suitable_downloader(info_dict):
 
     if url.startswith('rtmp'):
         return RtmpFD
+    if protocol == 'm3u8_native':
+        return NativeHlsFD
     if (protocol == 'm3u8') or (protocol is None and determine_ext(url) == 'm3u8'):
         return HlsFD
     if url.startswith('mms') or url.startswith('rtsp'):
@@ -27,3 +30,8 @@ def get_suitable_downloader(info_dict):
         return F4mFD
     else:
         return HttpFD
+
+__all__ = [
+    'get_suitable_downloader',
+    'FileDownloader',
+]
index 33ebbf6b44e8de601b9c7ddb01f7c2d59cae5be0..6e44c88c9f980472c54d25efadb33de7264927f6 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import os
 import re
 import sys
@@ -42,6 +44,7 @@ class FileDownloader(object):
     Subclasses of this one must re-define the real_download method.
     """
 
+    _TEST_FILE_SIZE = 10241
     params = None
 
     def __init__(self, ydl, params):
@@ -161,14 +164,14 @@ class FileDownloader(object):
 
     def temp_name(self, filename):
         """Returns a temporary filename for the given filename."""
-        if self.params.get('nopart', False) or filename == u'-' or \
+        if self.params.get('nopart', False) or filename == '-' or \
                 (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
             return filename
-        return filename + u'.part'
+        return filename + '.part'
 
     def undo_temp_name(self, filename):
-        if filename.endswith(u'.part'):
-            return filename[:-len(u'.part')]
+        if filename.endswith('.part'):
+            return filename[:-len('.part')]
         return filename
 
     def try_rename(self, old_filename, new_filename):
@@ -177,7 +180,7 @@ class FileDownloader(object):
                 return
             os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
         except (IOError, OSError) as err:
-            self.report_error(u'unable to rename file: %s' % compat_str(err))
+            self.report_error('unable to rename file: %s' % compat_str(err))
 
     def try_utime(self, filename, last_modified_hdr):
         """Try to set the last-modified time of the given file."""
@@ -202,10 +205,10 @@ class FileDownloader(object):
 
     def report_destination(self, filename):
         """Report destination filename."""
-        self.to_screen(u'[download] Destination: ' + filename)
+        self.to_screen('[download] Destination: ' + filename)
 
     def _report_progress_status(self, msg, is_last_line=False):
-        fullmsg = u'[download] ' + msg
+        fullmsg = '[download] ' + msg
         if self.params.get('progress_with_newline', False):
             self.to_screen(fullmsg)
         else:
@@ -213,13 +216,13 @@ class FileDownloader(object):
                 prev_len = getattr(self, '_report_progress_prev_line_length',
                                    0)
                 if prev_len > len(fullmsg):
-                    fullmsg += u' ' * (prev_len - len(fullmsg))
+                    fullmsg += ' ' * (prev_len - len(fullmsg))
                 self._report_progress_prev_line_length = len(fullmsg)
-                clear_line = u'\r'
+                clear_line = '\r'
             else:
-                clear_line = (u'\r\x1b[K' if sys.stderr.isatty() else u'\r')
+                clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r')
             self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
-        self.to_console_title(u'youtube-dl ' + msg)
+        self.to_console_title('youtube-dl ' + msg)
 
     def report_progress(self, percent, data_len_str, speed, eta):
         """Report download progress."""
@@ -235,7 +238,7 @@ class FileDownloader(object):
             percent_str = 'Unknown %'
         speed_str = self.format_speed(speed)
 
-        msg = (u'%s of %s at %s ETA %s' %
+        msg = ('%s of %s at %s ETA %s' %
                (percent_str, data_len_str, speed_str, eta_str))
         self._report_progress_status(msg)
 
@@ -245,37 +248,37 @@ class FileDownloader(object):
         downloaded_str = format_bytes(downloaded_data_len)
         speed_str = self.format_speed(speed)
         elapsed_str = FileDownloader.format_seconds(elapsed)
-        msg = u'%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str)
+        msg = '%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str)
         self._report_progress_status(msg)
 
     def report_finish(self, data_len_str, tot_time):
         """Report download finished."""
         if self.params.get('noprogress', False):
-            self.to_screen(u'[download] Download completed')
+            self.to_screen('[download] Download completed')
         else:
             self._report_progress_status(
-                (u'100%% of %s in %s' %
+                ('100%% of %s in %s' %
                  (data_len_str, self.format_seconds(tot_time))),
                 is_last_line=True)
 
     def report_resuming_byte(self, resume_len):
         """Report attempt to resume at given byte."""
-        self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
+        self.to_screen('[download] Resuming download at byte %s' % resume_len)
 
     def report_retry(self, count, retries):
         """Report retry in case of HTTP error 5xx"""
-        self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
+        self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
 
     def report_file_already_downloaded(self, file_name):
         """Report file has already been fully downloaded."""
         try:
-            self.to_screen(u'[download] %s has already been downloaded' % file_name)
+            self.to_screen('[download] %s has already been downloaded' % file_name)
         except UnicodeEncodeError:
-            self.to_screen(u'[download] The file has already been downloaded')
+            self.to_screen('[download] The file has already been downloaded')
 
     def report_unable_to_resume(self):
         """Report it was impossible to resume download."""
-        self.to_screen(u'[download] Unable to resume')
+        self.to_screen('[download] Unable to resume')
 
     def download(self, filename, info_dict):
         """Download to a filename using the info from info_dict
@@ -295,7 +298,7 @@ class FileDownloader(object):
 
     def real_download(self, filename, info_dict):
         """Real download process. Redefine in subclasses."""
-        raise NotImplementedError(u'This method must be implemented by sublcasses')
+        raise NotImplementedError('This method must be implemented by subclasses')
 
     def _hook_progress(self, status):
         for ph in self._progress_hooks:
index 71353f607daead364acbdad83b18b79e61a5bffa..7cd22c504e463ad2551692728bd3933e8bcf20ab 100644 (file)
@@ -16,6 +16,7 @@ from ..utils import (
     format_bytes,
     encodeFilename,
     sanitize_open,
+    xpath_text,
 )
 
 
@@ -54,7 +55,7 @@ class FlvReader(io.BytesIO):
         if size == 1:
             real_size = self.read_unsigned_long_long()
             header_end = 16
-        return real_size, box_type, self.read(real_size-header_end)
+        return real_size, box_type, self.read(real_size - header_end)
 
     def read_asrt(self):
         # version
@@ -179,7 +180,7 @@ def build_fragments_list(boot_info):
     n_frags = segment_run_entry[1]
     fragment_run_entry_table = boot_info['fragments'][0]['fragments']
     first_frag_number = fragment_run_entry_table[0]['first']
-    for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)):
+    for (i, frag_number) in zip(range(1, n_frags + 1), itertools.count(first_frag_number)):
         res.append((1, frag_number))
     return res
 
@@ -224,13 +225,15 @@ class F4mFD(FileDownloader):
         self.to_screen('[download] Downloading f4m manifest')
         manifest = self.ydl.urlopen(man_url).read()
         self.report_destination(filename)
-        http_dl = HttpQuietDownloader(self.ydl,
+        http_dl = HttpQuietDownloader(
+            self.ydl,
             {
                 'continuedl': True,
                 'quiet': True,
                 'noprogress': True,
                 'test': self.params.get('test', False),
-            })
+            }
+        )
 
         doc = etree.fromstring(manifest)
         formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))]
@@ -243,14 +246,23 @@ class F4mFD(FileDownloader):
                 lambda f: int(f[0]) == requested_bitrate, formats))[0]
 
         base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
-        bootstrap = base64.b64decode(doc.find(_add_ns('bootstrapInfo')).text)
+        bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
+        if bootstrap_node.text is None:
+            bootstrap_url = compat_urlparse.urljoin(
+                base_url, bootstrap_node.attrib['url'])
+            bootstrap = self.ydl.urlopen(bootstrap_url).read()
+        else:
+            bootstrap = base64.b64decode(bootstrap_node.text)
         metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
         boot_info = read_bootstrap_info(bootstrap)
+
         fragments_list = build_fragments_list(boot_info)
         if self.params.get('test', False):
             # We only download the first fragment
             fragments_list = fragments_list[:1]
         total_frags = len(fragments_list)
+        # For some akamai manifests we'll need to add a query to the fragment url
+        akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
 
         tmpfilename = self.temp_name(filename)
         (dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
@@ -267,7 +279,7 @@ class F4mFD(FileDownloader):
         def frag_progress_hook(status):
             frag_total_bytes = status.get('total_bytes', 0)
             estimated_size = (state['downloaded_bytes'] +
-                (total_frags - state['frag_counter']) * frag_total_bytes)
+                              (total_frags - state['frag_counter']) * frag_total_bytes)
             if status['status'] == 'finished':
                 state['downloaded_bytes'] += frag_total_bytes
                 state['frag_counter'] += 1
@@ -277,19 +289,21 @@ class F4mFD(FileDownloader):
                 frag_downloaded_bytes = status['downloaded_bytes']
                 byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes
                 frag_progress = self.calc_percent(frag_downloaded_bytes,
-                    frag_total_bytes)
+                                                  frag_total_bytes)
                 progress = self.calc_percent(state['frag_counter'], total_frags)
                 progress += frag_progress / float(total_frags)
 
             eta = self.calc_eta(start, time.time(), estimated_size, byte_counter)
             self.report_progress(progress, format_bytes(estimated_size),
-                status.get('speed'), eta)
+                                 status.get('speed'), eta)
         http_dl.add_progress_hook(frag_progress_hook)
 
         frags_filenames = []
         for (seg_i, frag_i) in fragments_list:
             name = 'Seg%d-Frag%d' % (seg_i, frag_i)
             url = base_url + name
+            if akamai_pv:
+                url += '?' + akamai_pv.strip(';')
             frag_filename = '%s-%s' % (tmpfilename, name)
             success = http_dl.download(frag_filename, {'url': url})
             if not success:
index 9f29e2f8110ef09d8bba4c1d57e38acb8da8a52e..ad26cfa4085bbb028c7252aa9db2a8de3f7bd1e4 100644 (file)
@@ -1,8 +1,15 @@
+from __future__ import unicode_literals
+
 import os
+import re
 import subprocess
 
+from ..postprocessor.ffmpeg import FFmpegPostProcessor
 from .common import FileDownloader
 from ..utils import (
+    compat_urlparse,
+    compat_urllib_request,
+    check_executable,
     encodeFilename,
 )
 
@@ -19,19 +26,20 @@ class HlsFD(FileDownloader):
             encodeFilename(tmpfilename, for_subprocess=True)]
 
         for program in ['avconv', 'ffmpeg']:
-            try:
-                subprocess.call([program, '-version'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
+            if check_executable(program, ['-version']):
                 break
-            except (OSError, IOError):
-                pass
         else:
-            self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
+            self.report_error('m3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
+            return False
         cmd = [program] + args
 
+        ffpp = FFmpegPostProcessor(downloader=self)
+        ffpp.check_version()
+
         retval = subprocess.call(cmd)
         if retval == 0:
             fsize = os.path.getsize(encodeFilename(tmpfilename))
-            self.to_screen(u'\r[%s] %s bytes' % (cmd[0], fsize))
+            self.to_screen('\r[%s] %s bytes' % (cmd[0], fsize))
             self.try_rename(tmpfilename, filename)
             self._hook_progress({
                 'downloaded_bytes': fsize,
@@ -41,6 +49,59 @@ class HlsFD(FileDownloader):
             })
             return True
         else:
-            self.to_stderr(u"\n")
-            self.report_error(u'ffmpeg exited with code %d' % retval)
+            self.to_stderr('\n')
+            self.report_error('%s exited with code %d' % (program, retval))
             return False
+
+
+class NativeHlsFD(FileDownloader):
+    """ A more limited implementation that does not require ffmpeg """
+
+    def real_download(self, filename, info_dict):
+        url = info_dict['url']
+        self.report_destination(filename)
+        tmpfilename = self.temp_name(filename)
+
+        self.to_screen(
+            '[hlsnative] %s: Downloading m3u8 manifest' % info_dict['id'])
+        data = self.ydl.urlopen(url).read()
+        s = data.decode('utf-8', 'ignore')
+        segment_urls = []
+        for line in s.splitlines():
+            line = line.strip()
+            if line and not line.startswith('#'):
+                segment_url = (
+                    line
+                    if re.match(r'^https?://', line)
+                    else compat_urlparse.urljoin(url, line))
+                segment_urls.append(segment_url)
+
+        is_test = self.params.get('test', False)
+        remaining_bytes = self._TEST_FILE_SIZE if is_test else None
+        byte_counter = 0
+        with open(tmpfilename, 'wb') as outf:
+            for i, segurl in enumerate(segment_urls):
+                self.to_screen(
+                    '[hlsnative] %s: Downloading segment %d / %d' %
+                    (info_dict['id'], i + 1, len(segment_urls)))
+                seg_req = compat_urllib_request.Request(segurl)
+                if remaining_bytes is not None:
+                    seg_req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
+
+                segment = self.ydl.urlopen(seg_req).read()
+                if remaining_bytes is not None:
+                    segment = segment[:remaining_bytes]
+                    remaining_bytes -= len(segment)
+                outf.write(segment)
+                byte_counter += len(segment)
+                if remaining_bytes is not None and remaining_bytes <= 0:
+                    break
+
+        self._hook_progress({
+            'downloaded_bytes': byte_counter,
+            'total_bytes': byte_counter,
+            'filename': filename,
+            'status': 'finished',
+        })
+        self.try_rename(tmpfilename, filename)
+        return True
index 462be2739143e19ea27406731c069ca461787b41..224962e86827956b2145378b7a6cf920a02029e6 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import os
 import time
 
@@ -14,8 +16,6 @@ from ..utils import (
 
 
 class HttpFD(FileDownloader):
-    _TEST_FILE_SIZE = 10241
-
     def real_download(self, filename, info_dict):
         url = info_dict['url']
         tmpfilename = self.temp_name(filename)
@@ -27,8 +27,16 @@ class HttpFD(FileDownloader):
             headers['Youtubedl-user-agent'] = info_dict['user_agent']
         if 'http_referer' in info_dict:
             headers['Referer'] = info_dict['http_referer']
-        basic_request = compat_urllib_request.Request(url, None, headers)
-        request = compat_urllib_request.Request(url, None, headers)
+        add_headers = info_dict.get('http_headers')
+        if add_headers:
+            headers.update(add_headers)
+        data = info_dict.get('http_post_data')
+        http_method = info_dict.get('http_method')
+        basic_request = compat_urllib_request.Request(url, data, headers)
+        request = compat_urllib_request.Request(url, data, headers)
+        if http_method is not None:
+            basic_request.get_method = lambda: http_method
+            request.get_method = lambda: http_method
 
         is_test = self.params.get('test', False)
 
@@ -100,7 +108,7 @@ class HttpFD(FileDownloader):
                 self.report_retry(count, retries)
 
         if count > retries:
-            self.report_error(u'giving up after %s retries' % retries)
+            self.report_error('giving up after %s retries' % retries)
             return False
 
         data_len = data.info().get('Content-length', None)
@@ -118,10 +126,10 @@ class HttpFD(FileDownloader):
             min_data_len = self.params.get("min_filesize", None)
             max_data_len = self.params.get("max_filesize", None)
             if min_data_len is not None and data_len < min_data_len:
-                self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
+                self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
                 return False
             if max_data_len is not None and data_len > max_data_len:
-                self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
+                self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
                 return False
 
         data_len_str = format_bytes(data_len)
@@ -150,13 +158,13 @@ class HttpFD(FileDownloader):
                     filename = self.undo_temp_name(tmpfilename)
                     self.report_destination(filename)
                 except (OSError, IOError) as err:
-                    self.report_error(u'unable to open for writing: %s' % str(err))
+                    self.report_error('unable to open for writing: %s' % str(err))
                     return False
             try:
                 stream.write(data_block)
             except (IOError, OSError) as err:
-                self.to_stderr(u"\n")
-                self.report_error(u'unable to write data: %s' % str(err))
+                self.to_stderr('\n')
+                self.report_error('unable to write data: %s' % str(err))
                 return False
 
             # Apply rate limit
@@ -195,10 +203,11 @@ class HttpFD(FileDownloader):
                 break
 
         if stream is None:
-            self.to_stderr(u"\n")
-            self.report_error(u'Did not get any data blocks')
+            self.to_stderr('\n')
+            self.report_error('Did not get any data blocks')
             return False
-        stream.close()
+        if tmpfilename != '-':
+            stream.close()
         self.report_finish(data_len_str, (time.time() - start))
         if data_len is not None and byte_counter != data_len:
             raise ContentTooShortError(byte_counter, int(data_len))
index 4de7f15f4ee3dd11b51afbb9c0e1e221b12d0784..c53195da0c9471d55a61b53b1041e05ee209697e 100644 (file)
@@ -1,7 +1,10 @@
+from __future__ import unicode_literals
+
 import os
 import subprocess
 
 from .common import FileDownloader
+from ..compat import compat_subprocess_get_DEVNULL
 from ..utils import (
     encodeFilename,
 )
@@ -13,19 +16,23 @@ class MplayerFD(FileDownloader):
         self.report_destination(filename)
         tmpfilename = self.temp_name(filename)
 
-        args = ['mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', '-dumpstream', '-dumpfile', tmpfilename, url]
+        args = [
+            'mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy',
+            '-dumpstream', '-dumpfile', tmpfilename, url]
         # Check for mplayer first
         try:
-            subprocess.call(['mplayer', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
+            subprocess.call(
+                ['mplayer', '-h'],
+                stdout=compat_subprocess_get_DEVNULL(), stderr=subprocess.STDOUT)
         except (OSError, IOError):
-            self.report_error(u'MMS or RTSP download detected but "%s" could not be run' % args[0])
+            self.report_error('MMS or RTSP download detected but "%s" could not be run' % args[0])
             return False
 
         # Download using mplayer.
         retval = subprocess.call(args)
         if retval == 0:
             fsize = os.path.getsize(encodeFilename(tmpfilename))
-            self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize))
+            self.to_screen('\r[%s] %s bytes' % (args[0], fsize))
             self.try_rename(tmpfilename, filename)
             self._hook_progress({
                 'downloaded_bytes': fsize,
@@ -35,6 +42,6 @@ class MplayerFD(FileDownloader):
             })
             return True
         else:
-            self.to_stderr(u"\n")
-            self.report_error(u'mplayer exited with code %d' % retval)
+            self.to_stderr('\n')
+            self.report_error('mplayer exited with code %d' % retval)
             return False
index 68646709a16cf7c9dcec0ac1c5e09f5643a9a7a2..58ae2005c014eb4aafbd68e087d5018d26d02a21 100644 (file)
@@ -8,12 +8,19 @@ import time
 
 from .common import FileDownloader
 from ..utils import (
+    check_executable,
+    compat_str,
     encodeFilename,
     format_bytes,
-    compat_str,
+    get_exe_version,
 )
 
 
+def rtmpdump_version():
+    return get_exe_version(
+        'rtmpdump', ['--help'], r'(?i)RTMPDump\s*v?([0-9a-zA-Z._-]+)')
+
+
 class RtmpFD(FileDownloader):
     def real_download(self, filename, info_dict):
         def run_rtmpdump(args):
@@ -39,13 +46,13 @@ class RtmpFD(FileDownloader):
                     continue
                 mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
                 if mobj:
-                    downloaded_data_len = int(float(mobj.group(1))*1024)
+                    downloaded_data_len = int(float(mobj.group(1)) * 1024)
                     percent = float(mobj.group(2))
                     if not resume_percent:
                         resume_percent = percent
                         resume_downloaded_data_len = downloaded_data_len
-                    eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent)
-                    speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len)
+                    eta = self.calc_eta(start, time.time(), 100 - resume_percent, percent - resume_percent)
+                    speed = self.calc_speed(start, time.time(), downloaded_data_len - resume_downloaded_data_len)
                     data_len = None
                     if percent > 0:
                         data_len = int(downloaded_data_len * 100 / percent)
@@ -65,7 +72,7 @@ class RtmpFD(FileDownloader):
                     # no percent for live streams
                     mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
                     if mobj:
-                        downloaded_data_len = int(float(mobj.group(1))*1024)
+                        downloaded_data_len = int(float(mobj.group(1)) * 1024)
                         time_now = time.time()
                         speed = self.calc_speed(start, time_now, downloaded_data_len)
                         self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
@@ -81,7 +88,7 @@ class RtmpFD(FileDownloader):
                         if not cursor_in_new_line:
                             self.to_screen('')
                         cursor_in_new_line = True
-                        self.to_screen('[rtmpdump] '+line)
+                        self.to_screen('[rtmpdump] ' + line)
             proc.wait()
             if not cursor_in_new_line:
                 self.to_screen('')
@@ -103,9 +110,7 @@ class RtmpFD(FileDownloader):
         test = self.params.get('test', False)
 
         # Check for rtmpdump first
-        try:
-            subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
-        except (OSError, IOError):
+        if not check_executable('rtmpdump', ['-h']):
             self.report_error('RTMP download detected but "rtmpdump" could not be run. Please install it.')
             return False
 
@@ -175,7 +180,7 @@ class RtmpFD(FileDownloader):
         while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live:
             prevsize = os.path.getsize(encodeFilename(tmpfilename))
             self.to_screen('[rtmpdump] %s bytes' % prevsize)
-            time.sleep(5.0) # This seems to be needed
+            time.sleep(5.0)  # This seems to be needed
             retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == RD_FAILED])
             cursize = os.path.getsize(encodeFilename(tmpfilename))
             if prevsize == cursize and retval == RD_FAILED:
index 5ab89a1bf51ba6f6ad81e1a5a8b4c238b1a8c934..6866f06d4b93cb2875acdf7556f9b5499b104a33 100644 (file)
@@ -1,15 +1,18 @@
+from __future__ import unicode_literals
+
 from .abc import ABCIE
 from .academicearth import AcademicEarthCourseIE
 from .addanime import AddAnimeIE
 from .adultswim import AdultSwimIE
 from .aftonbladet import AftonbladetIE
 from .anitube import AnitubeIE
+from .anysex import AnySexIE
 from .aol import AolIE
 from .allocine import AllocineIE
 from .aparat import AparatIE
 from .appletrailers import AppleTrailersIE
 from .archiveorg import ArchiveOrgIE
-from .ard import ARDIE
+from .ard import ARDIE, ARDMediathekIE
 from .arte import (
     ArteTvIE,
     ArteTVPlus7IE,
@@ -19,17 +22,25 @@ from .arte import (
     ArteTVDDCIE,
     ArteTVEmbedIE,
 )
+from .audiomack import AudiomackIE
 from .auengine import AUEngineIE
+from .azubu import AzubuIE
 from .bambuser import BambuserIE, BambuserChannelIE
 from .bandcamp import BandcampIE, BandcampAlbumIE
 from .bbccouk import BBCCoUkIE
+from .beeg import BeegIE
+from .behindkink import BehindKinkIE
+from .bet import BetIE
+from .bild import BildIE
 from .bilibili import BiliBiliIE
 from .blinkx import BlinkxIE
 from .bliptv import BlipTVIE, BlipTVUserIE
 from .bloomberg import BloombergIE
+from .bpb import BpbIE
 from .br import BRIE
 from .breakcom import BreakIE
 from .brightcove import BrightcoveIE
+from .buzzfeed import BuzzFeedIE
 from .byutv import BYUtvIE
 from .c56 import C56IE
 from .canal13cl import Canal13clIE
@@ -40,10 +51,11 @@ from .cbsnews import CBSNewsIE
 from .ceskatelevize import CeskaTelevizeIE
 from .channel9 import Channel9IE
 from .chilloutzone import ChilloutzoneIE
-from .cinemassacre import CinemassacreIE
+from .cinchcast import CinchcastIE
 from .clipfish import ClipfishIE
 from .cliphunter import CliphunterIE
 from .clipsyndicate import ClipsyndicateIE
+from .cloudy import CloudyIE
 from .clubic import ClubicIE
 from .cmt import CMTIE
 from .cnet import CNETIE
@@ -56,19 +68,25 @@ from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
 from .condenast import CondeNastIE
 from .cracked import CrackedIE
 from .criterion import CriterionIE
-from .crunchyroll import CrunchyrollIE
+from .crunchyroll import (
+    CrunchyrollIE,
+    CrunchyrollShowPlaylistIE
+)
 from .cspan import CSpanIE
-from .d8 import D8IE
 from .dailymotion import (
     DailymotionIE,
     DailymotionPlaylistIE,
     DailymotionUserIE,
 )
 from .daum import DaumIE
+from .dbtv import DBTVIE
+from .deezer import DeezerPlaylistIE
 from .dfb import DFBIE
 from .dotsub import DotsubIE
 from .dreisat import DreiSatIE
+from .drtuber import DrTuberIE
 from .drtv import DRTVIE
+from .dump import DumpIE
 from .defense import DefenseGouvFrIE
 from .discovery import DiscoveryIE
 from .divxstage import DivxStageIE
@@ -76,13 +94,20 @@ from .dropbox import DropboxIE
 from .ebaumsworld import EbaumsWorldIE
 from .ehow import EHowIE
 from .eighttracks import EightTracksIE
+from .einthusan import EinthusanIE
 from .eitb import EitbIE
+from .ellentv import (
+    EllenTVIE,
+    EllenTVClipsIE,
+)
 from .elpais import ElPaisIE
-from .empflix import EmpflixIE
+from .empflix import EMPFlixIE
 from .engadget import EngadgetIE
+from .eporner import EpornerIE
 from .escapist import EscapistIE
 from .everyonesmixtape import EveryonesMixtapeIE
 from .exfm import ExfmIE
+from .expotv import ExpoTVIE
 from .extremetube import ExtremeTubeIE
 from .facebook import FacebookIE
 from .faz import FazIE
@@ -96,7 +121,10 @@ from .fktv import (
     FKTVPosteckeIE,
 )
 from .flickr import FlickrIE
+from .folketinget import FolketingetIE
 from .fourtube import FourTubeIE
+from .foxgay import FoxgayIE
+from .foxnews import FoxNewsIE
 from .franceculture import FranceCultureIE
 from .franceinter import FranceInterIE
 from .francetv import (
@@ -108,24 +136,38 @@ from .francetv import (
 )
 from .freesound import FreesoundIE
 from .freespeech import FreespeechIE
+from .freevideo import FreeVideoIE
 from .funnyordie import FunnyOrDieIE
 from .gamekings import GamekingsIE
-from .gameone import GameOneIE
+from .gameone import (
+    GameOneIE,
+    GameOnePlaylistIE,
+)
 from .gamespot import GameSpotIE
 from .gamestar import GameStarIE
 from .gametrailers import GametrailersIE
 from .gdcvault import GDCVaultIE
 from .generic import GenericIE
+from .giantbomb import GiantBombIE
+from .glide import GlideIE
+from .globo import GloboIE
 from .godtube import GodTubeIE
+from .goldenmoustache import GoldenMoustacheIE
+from .golem import GolemIE
 from .googleplus import GooglePlusIE
 from .googlesearch import GoogleSearchIE
 from .gorillavid import GorillaVidIE
 from .goshgay import GoshgayIE
+from .grooveshark import GroovesharkIE
 from .hark import HarkIE
+from .heise import HeiseIE
 from .helsinki import HelsinkiIE
 from .hentaistigma import HentaiStigmaIE
+from .hornbunny import HornBunnyIE
+from .hostingbulk import HostingBulkIE
 from .hotnewhiphop import HotNewHipHopIE
 from .howcast import HowcastIE
+from .howstuffworks import HowStuffWorksIE
 from .huffpost import HuffPostIE
 from .hypem import HypemIE
 from .iconosquare import IconosquareIE
@@ -143,10 +185,11 @@ from .ivi import (
     IviIE,
     IviCompilationIE
 )
+from .izlesene import IzleseneIE
 from .jadorecettepub import JadoreCettePubIE
 from .jeuxvideo import JeuxVideoIE
+from .jove import JoveIE
 from .jukebox import JukeboxIE
-from .justintv import JustinTVIE
 from .jpopsukitv import JpopsukiIE
 from .kankan import KankanIE
 from .keezmovies import KeezMoviesIE
@@ -157,6 +200,7 @@ from .kontrtube import KontrTubeIE
 from .krasview import KrasViewIE
 from .ku6 import Ku6IE
 from .la7 import LA7IE
+from .laola1tv import Laola1TvIE
 from .lifenews import LifeNewsIE
 from .liveleak import LiveLeakIE
 from .livestream import (
@@ -164,6 +208,7 @@ from .livestream import (
     LivestreamOriginalIE,
     LivestreamShortenerIE,
 )
+from .lrt import LRTIE
 from .lynda import (
     LyndaIE,
     LyndaCourseIE
@@ -175,15 +220,23 @@ from .malemotion import MalemotionIE
 from .mdr import MDRIE
 from .metacafe import MetacafeIE
 from .metacritic import MetacriticIE
+from .mgoon import MgoonIE
+from .minhateca import MinhatecaIE
+from .ministrygrid import MinistryGridIE
 from .mit import TechTVMITIE, MITIE, OCWMITIE
+from .mitele import MiTeleIE
 from .mixcloud import MixcloudIE
 from .mlb import MLBIE
 from .mpora import MporaIE
+from .moevideo import MoeVideoIE
 from .mofosex import MofosexIE
+from .mojvideo import MojvideoIE
+from .moniker import MonikerIE
 from .mooshare import MooshareIE
 from .morningstar import MorningstarIE
 from .motherless import MotherlessIE
 from .motorsport import MotorsportIE
+from .movieclips import MovieClipsIE
 from .moviezine import MoviezineIE
 from .movshare import MovShareIE
 from .mtv import (
@@ -191,11 +244,14 @@ from .mtv import (
     MTVServicesEmbeddedIE,
     MTVIggyIE,
 )
+from .muenchentv import MuenchenTVIE
 from .musicplayon import MusicPlayOnIE
+from .musicvault import MusicVaultIE
 from .muzu import MuzuTVIE
-from .myspace import MySpaceIE
+from .myspace import MySpaceIE, MySpaceAlbumIE
 from .myspass import MySpassIE
 from .myvideo import MyVideoIE
+from .myvidster import MyVidsterIE
 from .naver import NaverIE
 from .nba import NBAIE
 from .nbc import (
@@ -207,15 +263,20 @@ from .ndtv import NDTVIE
 from .newgrounds import NewgroundsIE
 from .newstube import NewstubeIE
 from .nfb import NFBIE
+from .nfl import NFLIE
 from .nhl import NHLIE, NHLVideocenterIE
-from .niconico import NiconicoIE
+from .niconico import NiconicoIE, NiconicoPlaylistIE
 from .ninegag import NineGagIE
 from .noco import NocoIE
 from .normalboots import NormalbootsIE
+from .nosvideo import NosVideoIE
 from .novamov import NovaMovIE
 from .nowness import NownessIE
 from .nowvideo import NowVideoIE
-from .npo import NPOIE
+from .npo import (
+    NPOIE,
+    TegenlichtVproIE,
+)
 from .nrk import (
     NRKIE,
     NRKTVIE,
@@ -223,19 +284,32 @@ from .nrk import (
 from .ntv import NTVIE
 from .nytimes import NYTimesIE
 from .nuvid import NuvidIE
-from .oe1 import OE1IE
+from .oktoberfesttv import OktoberfestTVIE
 from .ooyala import OoyalaIE
-from .orf import ORFIE
+from .orf import (
+    ORFTVthekIE,
+    ORFOE1IE,
+    ORFFM4IE,
+)
 from .parliamentliveuk import ParliamentLiveUKIE
+from .patreon import PatreonIE
 from .pbs import PBSIE
+from .phoenix import PhoenixIE
 from .photobucket import PhotobucketIE
+from .planetaplay import PlanetaPlayIE
+from .played import PlayedIE
+from .playfm import PlayFMIE
 from .playvid import PlayvidIE
 from .podomatic import PodomaticIE
 from .pornhd import PornHdIE
 from .pornhub import PornHubIE
 from .pornotube import PornotubeIE
+from .pornoxo import PornoXOIE
+from .promptfile import PromptFileIE
 from .prosiebensat1 import ProSiebenSat1IE
 from .pyvideo import PyvideoIE
+from .quickvid import QuickVidIE
+from .radiode import RadioDeIE
 from .radiofrance import RadioFranceIE
 from .rai import RaiIE
 from .rbmaradio import RBMARadioIE
@@ -246,9 +320,10 @@ from .ro220 import Ro220IE
 from .rottentomatoes import RottenTomatoesIE
 from .roxwel import RoxwelIE
 from .rtbf import RTBFIE
+from .rtlnl import RtlXlIE
 from .rtlnow import RTLnowIE
 from .rts import RTSIE
-from .rtve import RTVEALaCartaIE
+from .rtve import RTVEALaCartaIE, RTVELiveIE
 from .ruhd import RUHDIE
 from .rutube import (
     RutubeIE,
@@ -259,10 +334,15 @@ from .rutube import (
 from .rutv import RUTVIE
 from .sapo import SapoIE
 from .savefrom import SaveFromIE
+from .sbs import SBSIE
 from .scivee import SciVeeIE
 from .screencast import ScreencastIE
+from .screenwavemedia import CinemassacreIE, ScreenwaveMediaIE, TeamFourIE
 from .servingsys import ServingSysIE
+from .sexu import SexuIE
+from .sexykarma import SexyKarmaIE
 from .shared import SharedIE
+from .sharesix import ShareSixIE
 from .sina import SinaIE
 from .slideshare import SlideshareIE
 from .slutload import SlutloadIE
@@ -288,17 +368,24 @@ from .southpark import (
 )
 from .space import SpaceIE
 from .spankwire import SpankwireIE
-from .spiegel import SpiegelIE
+from .spiegel import SpiegelIE, SpiegelArticleIE
 from .spiegeltv import SpiegeltvIE
 from .spike import SpikeIE
+from .sport5 import Sport5IE
+from .sportbox import SportBoxIE
+from .sportdeutschland import SportDeutschlandIE
+from .srmediathek import SRMediathekIE
 from .stanfordoc import StanfordOpenClassroomIE
 from .steam import SteamIE
 from .streamcloud import StreamcloudIE
 from .streamcz import StreamCZIE
+from .sunporno import SunPornoIE
 from .swrmediathek import SWRMediathekIE
 from .syfy import SyfyIE
 from .sztvhu import SztvHuIE
 from .tagesschau import TagesschauIE
+from .tapely import TapelyIE
+from .tass import TassIE
 from .teachertube import (
     TeacherTubeIE,
     TeacherTubeUserIE,
@@ -307,13 +394,24 @@ from .teachingchannel import TeachingChannelIE
 from .teamcoco import TeamcocoIE
 from .techtalks import TechTalksIE
 from .ted import TEDIE
+from .telebruxelles import TeleBruxellesIE
+from .telecinco import TelecincoIE
+from .telemb import TeleMBIE
 from .tenplay import TenPlayIE
 from .testurl import TestURLIE
 from .tf1 import TF1IE
+from .theonion import TheOnionIE
 from .theplatform import ThePlatformIE
+from .thesixtyone import TheSixtyOneIE
 from .thisav import ThisAVIE
 from .tinypic import TinyPicIE
 from .tlc import TlcIE, TlcDeIE
+from .tmz import TMZIE
+from .tnaflix import TNAFlixIE
+from .thvideo import (
+    THVideoIE,
+    THVideoPlaylistIE
+)
 from .toutv import TouTvIE
 from .toypics import ToypicsUserIE, ToypicsIE
 from .traileraddict import TrailerAddictIE
@@ -322,10 +420,15 @@ from .trutube import TruTubeIE
 from .tube8 import Tube8IE
 from .tudou import TudouIE
 from .tumblr import TumblrIE
+from .tunein import TuneInIE
+from .turbo import TurboIE
 from .tutv import TutvIE
 from .tvigle import TvigleIE
 from .tvp import TvpIE
 from .tvplay import TVPlayIE
+from .twentyfourvideo import TwentyFourVideoIE
+from .twitch import TwitchIE
+from .ubu import UbuIE
 from .udemy import (
     UdemyIE,
     UdemyCourseIE
@@ -338,22 +441,28 @@ from .veehd import VeeHDIE
 from .veoh import VeohIE
 from .vesti import VestiIE
 from .vevo import VevoIE
+from .vgtv import VGTVIE
 from .vh1 import VH1IE
+from .vice import ViceIE
 from .viddler import ViddlerIE
 from .videobam import VideoBamIE
 from .videodetective import VideoDetectiveIE
 from .videolecturesnet import VideoLecturesNetIE
 from .videofyme import VideofyMeIE
+from .videomega import VideoMegaIE
 from .videopremium import VideoPremiumIE
 from .videott import VideoTtIE
 from .videoweed import VideoWeedIE
+from .vidme import VidmeIE
+from .vidzi import VidziIE
 from .vimeo import (
     VimeoIE,
-    VimeoChannelIE,
-    VimeoUserIE,
     VimeoAlbumIE,
+    VimeoChannelIE,
     VimeoGroupsIE,
+    VimeoLikesIE,
     VimeoReviewIE,
+    VimeoUserIE,
     VimeoWatchLaterIE,
 )
 from .vimple import VimpleIE
@@ -362,13 +471,20 @@ from .vine import (
     VineUserIE,
 )
 from .viki import VikiIE
-from .vk import VKIE
+from .vk import (
+    VKIE,
+    VKUserVideosIE,
+)
 from .vodlocker import VodlockerIE
+from .vporn import VpornIE
+from .vrt import VRTIE
 from .vube import VubeIE
 from .vuclip import VuClipIE
 from .vulture import VultureIE
+from .walla import WallaIE
 from .washingtonpost import WashingtonPostIE
 from .wat import WatIE
+from .wayofthemaster import WayOfTheMasterIE
 from .wdr import (
     WDRIE,
     WDRMobileIE,
@@ -380,18 +496,21 @@ from .wistia import WistiaIE
 from .worldstarhiphop import WorldStarHipHopIE
 from .wrzuta import WrzutaIE
 from .xbef import XBefIE
+from .xboxclips import XboxClipsIE
 from .xhamster import XHamsterIE
+from .xminus import XMinusIE
 from .xnxx import XNXXIE
 from .xvideos import XVideosIE
 from .xtube import XTubeUserIE, XTubeIE
 from .yahoo import (
     YahooIE,
-    YahooNewsIE,
     YahooSearchIE,
 )
+from .ynet import YnetIE
 from .youjizz import YouJizzIE
 from .youku import YoukuIE
 from .youporn import YouPornIE
+from .yourupload import YourUploadIE
 from .youtube import (
     YoutubeIE,
     YoutubeChannelIE,
@@ -409,9 +528,11 @@ from .youtube import (
     YoutubeUserIE,
     YoutubeWatchLaterIE,
 )
-
-from .zdf import ZDFIE
-
+from .zdf import ZDFIE, ZDFChannelIE
+from .zingmp3 import (
+    ZingMp3SongIE,
+    ZingMp3AlbumIE,
+)
 
 _ALL_CLASSES = [
     klass
@@ -430,4 +551,4 @@ def gen_extractors():
 
 def get_info_extractor(ie_name):
     """Returns the info extractor class with the given ie_name"""
-    return globals()[ie_name+'IE']
+    return globals()[ie_name + 'IE']
index 7d89f44ee4e6c5a1607c0567aafa3e2172cebbb1..dc0fb85d6048962505d1d207ae590940d69f52e6 100644 (file)
@@ -11,19 +11,18 @@ class ABCIE(InfoExtractor):
     _VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P<id>\d+)'
 
     _TEST = {
-        'url': 'http://www.abc.net.au/news/2014-07-25/bringing-asylum-seekers-to-australia-would-give/5624716',
-        'md5': 'dad6f8ad011a70d9ddf887ce6d5d0742',
+        'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
+        'md5': 'cb3dd03b18455a661071ee1e28344d9f',
         'info_dict': {
-            'id': '5624716',
+            'id': '5868334',
             'ext': 'mp4',
-            'title': 'Bringing asylum seekers to Australia would give them right to asylum claims: professor',
-            'description': 'md5:ba36fa5e27e5c9251fd929d339aea4af',
+            'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
+            'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
         },
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         urls_info_json = self._search_regex(
index 59d3bbba413c3c256a3f77917708fb171e337b14..47313fba8735902f964c0cd39992f9962e0f47fb 100644 (file)
@@ -1,4 +1,5 @@
 from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
@@ -7,17 +8,25 @@ from .common import InfoExtractor
 class AcademicEarthCourseIE(InfoExtractor):
     _VALID_URL = r'^https?://(?:www\.)?academicearth\.org/playlists/(?P<id>[^?#/]+)'
     IE_NAME = 'AcademicEarth:Course'
+    _TEST = {
+        'url': 'http://academicearth.org/playlists/laws-of-nature/',
+        'info_dict': {
+            'id': 'laws-of-nature',
+            'title': 'Laws of Nature',
+            'description': 'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.',
+        },
+        'playlist_count': 4,
+    }
 
     def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        playlist_id = m.group('id')
+        playlist_id = self._match_id(url)
 
         webpage = self._download_webpage(url, playlist_id)
         title = self._html_search_regex(
-            r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, u'title')
+            r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, 'title')
         description = self._html_search_regex(
             r'<p class="excerpt"[^>]*?>(.*?)</p>',
-            webpage, u'description', fatal=False)
+            webpage, 'description', fatal=False)
         urls = re.findall(
             r'<li class="lecture-preview">\s*?<a target="_blank" href="([^"]+)">',
             webpage)
index fcf296057cc807edbdca5ca1effbc9ad50153400..203936e54a3797ae37535022ad02757a925f24d7 100644 (file)
@@ -3,19 +3,19 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_HTTPError,
     compat_str,
     compat_urllib_parse,
     compat_urllib_parse_urlparse,
-
+)
+from ..utils import (
     ExtractorError,
 )
 
 
 class AddAnimeIE(InfoExtractor):
-
-    _VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video\.php\?(?:.*?)v=(?P<video_id>[\w_]+)(?:.*)'
+    _VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video\.php\?(?:.*?)v=(?P<id>[\w_]+)(?:.*)'
     _TEST = {
         'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
         'md5': '72954ea10bc979ab5e2eb288b21425a0',
@@ -28,9 +28,9 @@ class AddAnimeIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
+        video_id = self._match_id(url)
+
         try:
-            mobj = re.match(self._VALID_URL, url)
-            video_id = mobj.group('video_id')
             webpage = self._download_webpage(url, video_id)
         except ExtractorError as ee:
             if not isinstance(ee.cause, compat_HTTPError) or \
@@ -48,7 +48,7 @@ class AddAnimeIE(InfoExtractor):
                 r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);',
                 redir_webpage)
             if av is None:
-                raise ExtractorError(u'Cannot find redirect math task')
+                raise ExtractorError('Cannot find redirect math task')
             av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3))
 
             parsed_url = compat_urllib_parse_urlparse(url)
index a00bfcb35fc8f61b2192592a57776921d0dee9f6..39e4ca296f97a8fe20e65ab7160f9931fee89a67 100644 (file)
 from __future__ import unicode_literals
 
 import re
+import json
 
 from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+)
+
 
 class AdultSwimIE(InfoExtractor):
-    _VALID_URL = r'https?://video\.adultswim\.com/(?P<path>.+?)(?:\.html)?(?:\?.*)?(?:#.*)?$'
-    _TEST = {
-        'url': 'http://video.adultswim.com/rick-and-morty/close-rick-counters-of-the-rick-kind.html?x=y#title',
+    _VALID_URL = r'https?://(?:www\.)?adultswim\.com/videos/(?P<is_playlist>playlists/)?(?P<show_path>[^/]+)/(?P<episode_path>[^/?#]+)/?'
+
+    _TESTS = [{
+        'url': 'http://adultswim.com/videos/rick-and-morty/pilot',
         'playlist': [
             {
-                'md5': '4da359ec73b58df4575cd01a610ba5dc',
+                'md5': '247572debc75c7652f253c8daa51a14d',
                 'info_dict': {
-                    'id': '8a250ba1450996e901453d7f02ca02f5',
+                    'id': 'rQxZvXQ4ROaSOqq-or2Mow-0',
                     'ext': 'flv',
-                    'title': 'Rick and Morty Close Rick-Counters of the Rick Kind part 1',
-                    'description': 'Rick has a run in with some old associates, resulting in a fallout with Morty. You got any chips, broh?',
-                    'uploader': 'Rick and Morty',
-                    'thumbnail': 'http://i.cdn.turner.com/asfix/repository/8a250ba13f865824013fc9db8b6b0400/thumbnail_267549017116827057.jpg'
-                }
+                    'title': 'Rick and Morty - Pilot Part 1',
+                    'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
+                },
             },
             {
-                'md5': 'ffbdf55af9331c509d95350bd0cc1819',
+                'md5': '77b0e037a4b20ec6b98671c4c379f48d',
                 'info_dict': {
-                    'id': '8a250ba1450996e901453d7f4bd102f6',
+                    'id': 'rQxZvXQ4ROaSOqq-or2Mow-3',
                     'ext': 'flv',
-                    'title': 'Rick and Morty Close Rick-Counters of the Rick Kind part 2',
-                    'description': 'Rick has a run in with some old associates, resulting in a fallout with Morty. You got any chips, broh?',
-                    'uploader': 'Rick and Morty',
-                    'thumbnail': 'http://i.cdn.turner.com/asfix/repository/8a250ba13f865824013fc9db8b6b0400/thumbnail_267549017116827057.jpg'
-                }
-            },
-            {
-                'md5': 'b92409635540304280b4b6c36bd14a0a',
-                'info_dict': {
-                    'id': '8a250ba1450996e901453d7fa73c02f7',
-                    'ext': 'flv',
-                    'title': 'Rick and Morty Close Rick-Counters of the Rick Kind part 3',
-                    'description': 'Rick has a run in with some old associates, resulting in a fallout with Morty. You got any chips, broh?',
-                    'uploader': 'Rick and Morty',
-                    'thumbnail': 'http://i.cdn.turner.com/asfix/repository/8a250ba13f865824013fc9db8b6b0400/thumbnail_267549017116827057.jpg'
-                }
+                    'title': 'Rick and Morty - Pilot Part 4',
+                    'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
+                },
             },
+        ],
+        'info_dict': {
+            'title': 'Rick and Morty - Pilot',
+            'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
+        }
+    }, {
+        'url': 'http://www.adultswim.com/videos/playlists/american-parenting/putting-francine-out-of-business/',
+        'playlist': [
             {
-                'md5': 'e8818891d60e47b29cd89d7b0278156d',
+                'md5': '2eb5c06d0f9a1539da3718d897f13ec5',
                 'info_dict': {
-                    'id': '8a250ba1450996e901453d7fc8ba02f8',
+                    'id': '-t8CamQlQ2aYZ49ItZCFog-0',
                     'ext': 'flv',
-                    'title': 'Rick and Morty Close Rick-Counters of the Rick Kind part 4',
-                    'description': 'Rick has a run in with some old associates, resulting in a fallout with Morty. You got any chips, broh?',
-                    'uploader': 'Rick and Morty',
-                    'thumbnail': 'http://i.cdn.turner.com/asfix/repository/8a250ba13f865824013fc9db8b6b0400/thumbnail_267549017116827057.jpg'
-                }
+                    'title': 'American Dad - Putting Francine Out of Business',
+                    'description': 'Stan hatches a plan to get Francine out of the real estate business.Watch more American Dad on [adult swim].'
+                },
             }
-        ]
-    }
-
-    _video_extensions = {
-        '3500': 'flv',
-        '640': 'mp4',
-        '150': 'mp4',
-        'ipad': 'm3u8',
-        'iphone': 'm3u8'
-    }
-    _video_dimensions = {
-        '3500': (1280, 720),
-        '640': (480, 270),
-        '150': (320, 180)
-    }
+        ],
+        'info_dict': {
+            'title': 'American Dad - Putting Francine Out of Business',
+            'description': 'Stan hatches a plan to get Francine out of the real estate business.Watch more American Dad on [adult swim].'
+        },
+    }]
+
+    @staticmethod
+    def find_video_info(collection, slug):
+        for video in collection.get('videos'):
+            if video.get('slug') == slug:
+                return video
+
+    @staticmethod
+    def find_collection_by_linkURL(collections, linkURL):
+        for collection in collections:
+            if collection.get('linkURL') == linkURL:
+                return collection
+
+    @staticmethod
+    def find_collection_containing_video(collections, slug):
+        for collection in collections:
+            for video in collection.get('videos'):
+                if video.get('slug') == slug:
+                    return collection, video
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        video_path = mobj.group('path')
-
-        webpage = self._download_webpage(url, video_path)
-        episode_id = self._html_search_regex(r'<link rel="video_src" href="http://i\.adultswim\.com/adultswim/adultswimtv/tools/swf/viralplayer.swf\?id=([0-9a-f]+?)"\s*/?\s*>', webpage, 'episode_id')
-        title = self._og_search_title(webpage)
-
-        index_url = 'http://asfix.adultswim.com/asfix-svc/episodeSearch/getEpisodesByIDs?networkName=AS&ids=%s' % episode_id
-        idoc = self._download_xml(index_url, title, 'Downloading episode index', 'Unable to download episode index')
-
-        episode_el = idoc.find('.//episode')
-        show_title = episode_el.attrib.get('collectionTitle')
-        episode_title = episode_el.attrib.get('title')
-        thumbnail = episode_el.attrib.get('thumbnailUrl')
-        description = episode_el.find('./description').text.strip()
+        show_path = mobj.group('show_path')
+        episode_path = mobj.group('episode_path')
+        is_playlist = True if mobj.group('is_playlist') else False
+
+        webpage = self._download_webpage(url, episode_path)
+
+        # Extract the value of `bootstrappedData` from the Javascript in the page.
+        bootstrappedDataJS = self._search_regex(r'var bootstrappedData = ({.*});', webpage, episode_path)
+
+        try:
+            bootstrappedData = json.loads(bootstrappedDataJS)
+        except ValueError as ve:
+            errmsg = '%s: Failed to parse JSON ' % episode_path
+            raise ExtractorError(errmsg, cause=ve)
+
+        # Downloading videos from a /videos/playlist/ URL needs to be handled differently.
+        # NOTE: We are only downloading one video (the current one) not the playlist
+        if is_playlist:
+            collections = bootstrappedData['playlists']['collections']
+            collection = self.find_collection_by_linkURL(collections, show_path)
+            video_info = self.find_video_info(collection, episode_path)
+
+            show_title = video_info['showTitle']
+            segment_ids = [video_info['videoPlaybackID']]
+        else:
+            collections = bootstrappedData['show']['collections']
+            collection, video_info = self.find_collection_containing_video(collections, episode_path)
+
+            show = bootstrappedData['show']
+            show_title = show['title']
+            segment_ids = [clip['videoPlaybackID'] for clip in video_info['clips']]
+
+        episode_id = video_info['id']
+        episode_title = video_info['title']
+        episode_description = video_info['description']
+        episode_duration = video_info.get('duration')
 
         entries = []
-        segment_els = episode_el.findall('./segments/segment')
+        for part_num, segment_id in enumerate(segment_ids):
+            segment_url = 'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=mobile' % segment_id
+
+            segment_title = '%s - %s' % (show_title, episode_title)
+            if len(segment_ids) > 1:
+                segment_title += ' Part %d' % (part_num + 1)
 
-        for part_num, segment_el in enumerate(segment_els):
-            segment_id = segment_el.attrib.get('id')
-            segment_title = '%s %s part %d' % (show_title, episode_title, part_num + 1)
-            thumbnail = segment_el.attrib.get('thumbnailUrl')
-            duration = segment_el.attrib.get('duration')
+            idoc = self._download_xml(
+                segment_url, segment_title,
+                'Downloading segment information', 'Unable to download segment information')
 
-            segment_url = 'http://asfix.adultswim.com/asfix-svc/episodeservices/getCvpPlaylist?networkName=AS&id=%s' % segment_id
-            idoc = self._download_xml(segment_url, segment_title, 'Downloading segment information', 'Unable to download segment information')
+            segment_duration = idoc.find('.//trt').text.strip()
 
             formats = []
             file_els = idoc.findall('.//files/file')
 
             for file_el in file_els:
                 bitrate = file_el.attrib.get('bitrate')
-                type = file_el.attrib.get('type')
-                width, height = self._video_dimensions.get(bitrate, (None, None))
+                ftype = file_el.attrib.get('type')
+
                 formats.append({
-                    'format_id': '%s-%s' % (bitrate, type),
-                    'url': file_el.text,
-                    'ext': self._video_extensions.get(bitrate, 'mp4'),
+                    'format_id': '%s_%s' % (bitrate, ftype),
+                    'url': file_el.text.strip(),
                     # The bitrate may not be a number (for example: 'iphone')
                     'tbr': int(bitrate) if bitrate.isdigit() else None,
-                    'height': height,
-                    'width': width
+                    'quality': 1 if ftype == 'hd' else -1
                 })
 
             self._sort_formats(formats)
@@ -122,18 +151,16 @@ class AdultSwimIE(InfoExtractor):
                 'id': segment_id,
                 'title': segment_title,
                 'formats': formats,
-                'uploader': show_title,
-                'thumbnail': thumbnail,
-                'duration': duration,
-                'description': description
+                'duration': segment_duration,
+                'description': episode_description
             })
 
         return {
             '_type': 'playlist',
             'id': episode_id,
-            'display_id': video_path,
+            'display_id': episode_path,
             'entries': entries,
-            'title': '%s %s' % (show_title, episode_title),
-            'description': description,
-            'thumbnail': thumbnail
+            'title': '%s %s' % (show_title, episode_title),
+            'description': episode_description,
+            'duration': episode_duration
         }
index 7bd7978841d06747145feeda56624de84747fcc1..398e93bfb4f8472a23d2b8669e7f83f867933244 100644 (file)
@@ -22,7 +22,7 @@ class AllocineIE(InfoExtractor):
             'id': '19546517',
             'ext': 'mp4',
             'title': 'Astérix - Le Domaine des Dieux Teaser VF',
-            'description': 'md5:4a754271d9c6f16c72629a8a993ee884',
+            'description': 'md5:abcd09ce503c6560512c14ebfdb720d2',
             'thumbnail': 're:http://.*\.jpg',
         },
     }, {
diff --git a/youtube_dl/extractor/anysex.py b/youtube_dl/extractor/anysex.py
new file mode 100644 (file)
index 0000000..ad86d6e
--- /dev/null
@@ -0,0 +1,61 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_duration,
+    int_or_none,
+)
+
+
+class AnySexIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?anysex\.com/(?P<id>\d+)'
+    _TEST = {
+        'url': 'http://anysex.com/156592/',
+        'md5': '023e9fbb7f7987f5529a394c34ad3d3d',
+        'info_dict': {
+            'id': '156592',
+            'ext': 'mp4',
+            'title': 'Busty and sexy blondie in her bikini strips for you',
+            'description': 'md5:de9e418178e2931c10b62966474e1383',
+            'categories': ['Erotic'],
+            'duration': 270,
+            'age_limit': 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        video_url = self._html_search_regex(r"video_url\s*:\s*'([^']+)'", webpage, 'video URL')
+
+        title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
+        description = self._html_search_regex(
+            r'<div class="description"[^>]*>([^<]+)</div>', webpage, 'description', fatal=False)
+        thumbnail = self._html_search_regex(
+            r'preview_url\s*:\s*\'(.*?)\'', webpage, 'thumbnail', fatal=False)
+
+        categories = re.findall(
+            r'<a href="http://anysex\.com/categories/[^"]+" title="[^"]*">([^<]+)</a>', webpage)
+
+        duration = parse_duration(self._search_regex(
+            r'<b>Duration:</b> (?:<q itemprop="duration">)?(\d+:\d+)', webpage, 'duration', fatal=False))
+        view_count = int_or_none(self._html_search_regex(
+            r'<b>Views:</b> (\d+)', webpage, 'view count', fatal=False))
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'ext': 'mp4',
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'categories': categories,
+            'duration': duration,
+            'view_count': view_count,
+            'age_limit': 18,
+        }
index a7bfe5a5c8d8911f6d9c8e3b65762de1a822df57..47f8e415777ee21bfa5e001921077f3c9aaa16af 100644 (file)
@@ -21,7 +21,7 @@ class AolIE(InfoExtractor):
         (?:$|\?)
     '''
 
-    _TEST = {
+    _TESTS = [{
         'url': 'http://on.aol.com/video/u-s--official-warns-of-largest-ever-irs-phone-scam-518167793?icid=OnHomepageC2Wide_MustSee_Img',
         'md5': '18ef68f48740e86ae94b98da815eec42',
         'info_dict': {
@@ -30,7 +30,14 @@ class AolIE(InfoExtractor):
             'title': 'U.S. Official Warns Of \'Largest Ever\' IRS Phone Scam',
         },
         'add_ie': ['FiveMin'],
-    }
+    }, {
+        'url': 'http://on.aol.com/playlist/brace-yourself---todays-weirdest-news-152147?icid=OnHomepageC4_Omg_Img#_videoid=518184316',
+        'info_dict': {
+            'id': '152147',
+            'title': 'Brace Yourself - Today\'s Weirdest News',
+        },
+        'playlist_mincount': 10,
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
index 7e93bc4df286e71554f272fb69ba43348f3c5da4..15006336faacb0c7f6ab9c24263726776866dbb6 100644 (file)
@@ -1,4 +1,5 @@
-#coding: utf-8
+# coding: utf-8
+from __future__ import unicode_literals
 
 import re
 
@@ -13,39 +14,39 @@ class AparatIE(InfoExtractor):
     _VALID_URL = r'^https?://(?:www\.)?aparat\.com/(?:v/|video/video/embed/videohash/)(?P<id>[a-zA-Z0-9]+)'
 
     _TEST = {
-        u'url': u'http://www.aparat.com/v/wP8On',
-        u'file': u'wP8On.mp4',
-        u'md5': u'6714e0af7e0d875c5a39c4dc4ab46ad1',
-        u'info_dict': {
-            u"title": u"تیم گلکسی 11 - زومیت",
+        'url': 'http://www.aparat.com/v/wP8On',
+        'md5': '6714e0af7e0d875c5a39c4dc4ab46ad1',
+        'info_dict': {
+            'id': 'wP8On',
+            'ext': 'mp4',
+            'title': 'تیم گلکسی 11 - زومیت',
         },
-        #u'skip': u'Extremely unreliable',
+        # 'skip': 'Extremely unreliable',
     }
 
     def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('id')
+        video_id = self._match_id(url)
 
         # Note: There is an easier-to-parse configuration at
         # http://www.aparat.com/video/video/config/videohash/%video_id
         # but the URL in there does not work
-        embed_url = (u'http://www.aparat.com/video/video/embed/videohash/' +
-                     video_id + u'/vt/frame')
+        embed_url = ('http://www.aparat.com/video/video/embed/videohash/' +
+                     video_id + '/vt/frame')
         webpage = self._download_webpage(embed_url, video_id)
 
         video_urls = re.findall(r'fileList\[[0-9]+\]\s*=\s*"([^"]+)"', webpage)
         for i, video_url in enumerate(video_urls):
             req = HEADRequest(video_url)
             res = self._request_webpage(
-                req, video_id, note=u'Testing video URL %d' % i, errnote=False)
+                req, video_id, note='Testing video URL %d' % i, errnote=False)
             if res:
                 break
         else:
-            raise ExtractorError(u'No working video URLs found')
+            raise ExtractorError('No working video URLs found')
 
-        title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, u'title')
+        title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, 'title')
         thumbnail = self._search_regex(
-            r'\s+image:\s*"([^"]+)"', webpage, u'thumbnail', fatal=False)
+            r'\s+image:\s*"([^"]+)"', webpage, 'thumbnail', fatal=False)
 
         return {
             'id': video_id,
index dc8657b67c9850c1676af737f319cb4c06bad6d6..0c01fa1a13ffa6fbfbfe7b7fb2283d5ed4f8b70f 100644 (file)
@@ -6,6 +6,7 @@ import json
 from .common import InfoExtractor
 from ..utils import (
     compat_urlparse,
+    int_or_none,
 )
 
 
@@ -69,15 +70,17 @@ class AppleTrailersIE(InfoExtractor):
         uploader_id = mobj.group('company')
 
         playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc')
+
         def fix_html(s):
             s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s)
             s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s)
             # The ' in the onClick attributes are not escaped, it couldn't be parsed
             # like: http://trailers.apple.com/trailers/wb/gravity/
+
             def _clean_json(m):
                 return 'iTunes.playURL(%s);' % m.group(1).replace('\'', '&#39;')
             s = re.sub(self._JSON_RE, _clean_json, s)
-            s = '<html>' + s + u'</html>'
+            s = '<html>%s</html>' % s
             return s
         doc = self._download_xml(playlist_url, movie, transform_source=fix_html)
 
@@ -85,7 +88,7 @@ class AppleTrailersIE(InfoExtractor):
         for li in doc.findall('./div/ul/li'):
             on_click = li.find('.//a').attrib['onClick']
             trailer_info_json = self._search_regex(self._JSON_RE,
-                on_click, 'trailer info')
+                                                   on_click, 'trailer info')
             trailer_info = json.loads(trailer_info_json)
             title = trailer_info['title']
             video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower()
@@ -110,8 +113,8 @@ class AppleTrailersIE(InfoExtractor):
                 formats.append({
                     'url': format_url,
                     'format': format['type'],
-                    'width': format['width'],
-                    'height': int(format['height']),
+                    'width': int_or_none(format['width']),
+                    'height': int_or_none(format['height']),
                 })
 
             self._sort_formats(formats)
index 30a85c8c1c8d1b3a10a40ac55a577e3402cb487a..967bd865c53229e7ff38997ea9a7f4a6ab19f92d 100644 (file)
@@ -4,14 +4,21 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from .generic import GenericIE
 from ..utils import (
     determine_ext,
     ExtractorError,
     qualities,
+    int_or_none,
+    parse_duration,
+    unified_strdate,
+    xpath_text,
+    parse_xml,
 )
 
 
-class ARDIE(InfoExtractor):
+class ARDMediathekIE(InfoExtractor):
+    IE_NAME = 'ARD:mediathek'
     _VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[0-9]+|[^0-9][^/\?]+)[^/\?]*(?:\?.*)?'
 
     _TESTS = [{
@@ -46,6 +53,14 @@ class ARDIE(InfoExtractor):
 
         webpage = self._download_webpage(url, video_id)
 
+        if '>Der gewünschte Beitrag ist nicht mehr verfügbar.<' in webpage:
+            raise ExtractorError('Video %s is no longer available' % video_id, expected=True)
+
+        if re.search(r'[\?&]rss($|[=&])', url):
+            doc = parse_xml(webpage)
+            if doc.tag == 'rss':
+                return GenericIE()._extract_rss(url, video_id, doc)
+
         title = self._html_search_regex(
             [r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
              r'<meta name="dcterms.title" content="(.*?)"/>',
@@ -120,3 +135,60 @@ class ARDIE(InfoExtractor):
             'formats': formats,
             'thumbnail': thumbnail,
         }
+
+
+class ARDIE(InfoExtractor):
+    _VALID_URL = '(?P<mainurl>https?://(www\.)?daserste\.de/[^?#]+/videos/(?P<display_id>[^/?#]+)-(?P<id>[0-9]+))\.html'
+    _TEST = {
+        'url': 'http://www.daserste.de/information/reportage-dokumentation/dokus/videos/die-story-im-ersten-mission-unter-falscher-flagge-100.html',
+        'md5': 'd216c3a86493f9322545e045ddc3eb35',
+        'info_dict': {
+            'display_id': 'die-story-im-ersten-mission-unter-falscher-flagge',
+            'id': '100',
+            'ext': 'mp4',
+            'duration': 2600,
+            'title': 'Die Story im Ersten: Mission unter falscher Flagge',
+            'upload_date': '20140804',
+            'thumbnail': 're:^https?://.*\.jpg$',
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        display_id = mobj.group('display_id')
+
+        player_url = mobj.group('mainurl') + '~playerXml.xml'
+        doc = self._download_xml(player_url, display_id)
+        video_node = doc.find('./video')
+        upload_date = unified_strdate(xpath_text(
+            video_node, './broadcastDate'))
+        thumbnail = xpath_text(video_node, './/teaserImage//variant/url')
+
+        formats = []
+        for a in video_node.findall('.//asset'):
+            f = {
+                'format_id': a.attrib['type'],
+                'width': int_or_none(a.find('./frameWidth').text),
+                'height': int_or_none(a.find('./frameHeight').text),
+                'vbr': int_or_none(a.find('./bitrateVideo').text),
+                'abr': int_or_none(a.find('./bitrateAudio').text),
+                'vcodec': a.find('./codecVideo').text,
+                'tbr': int_or_none(a.find('./totalBitrate').text),
+            }
+            if a.find('./serverPrefix').text:
+                f['url'] = a.find('./serverPrefix').text
+                f['playpath'] = a.find('./fileName').text
+            else:
+                f['url'] = a.find('./fileName').text
+            formats.append(f)
+        self._sort_formats(formats)
+
+        return {
+            'id': mobj.group('id'),
+            'formats': formats,
+            'display_id': display_id,
+            'title': video_node.find('./title').text,
+            'duration': parse_duration(video_node.find('./duration').text),
+            'upload_date': upload_date,
+            'thumbnail': thumbnail,
+        }
index 9591bad8a66254e90247a204b43b10b6db4f6406..219631b9b0dfa690a37e09a7b3473566543370e2 100644 (file)
@@ -5,16 +5,15 @@ import re
 
 from .common import InfoExtractor
 from ..utils import (
-    ExtractorError,
     find_xpath_attr,
     unified_strdate,
-    determine_ext,
     get_element_by_id,
-    compat_str,
     get_element_by_attribute,
+    int_or_none,
+    qualities,
 )
 
-# There are different sources of video in arte.tv, the extraction process 
+# There are different sources of video in arte.tv, the extraction process
 # is different for each one. The videos usually expire in 7 days, so we can't
 # add tests.
 
@@ -78,111 +77,104 @@ class ArteTVPlus7IE(InfoExtractor):
 
     def _extract_from_webpage(self, webpage, video_id, lang):
         json_url = self._html_search_regex(
-            r'arte_vp_url="(.*?)"', webpage, 'json vp url')
+            [r'arte_vp_url=["\'](.*?)["\']', r'data-url=["\']([^"]+)["\']'],
+            webpage, 'json vp url')
         return self._extract_from_json_url(json_url, video_id, lang)
 
     def _extract_from_json_url(self, json_url, video_id, lang):
         info = self._download_json(json_url, video_id)
         player_info = info['videoJsonPlayer']
 
+        upload_date_str = player_info.get('shootingDate')
+        if not upload_date_str:
+            upload_date_str = player_info.get('VDA', '').split(' ')[0]
+
+        title = player_info['VTI'].strip()
+        subtitle = player_info.get('VSU', '').strip()
+        if subtitle:
+            title += ' - %s' % subtitle
+
         info_dict = {
             'id': player_info['VID'],
-            'title': player_info['VTI'],
+            'title': title,
             'description': player_info.get('VDE'),
-            'upload_date': unified_strdate(player_info.get('VDA', '').split(' ')[0]),
+            'upload_date': unified_strdate(upload_date_str),
             'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
         }
-
-        all_formats = player_info['VSR'].values()
-        # Some formats use the m3u8 protocol
-        all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats))
-        def _match_lang(f):
-            if f.get('versionCode') is None:
-                return True
-            # Return true if that format is in the language of the url
-            if lang == 'fr':
-                l = 'F'
-            elif lang == 'de':
-                l = 'A'
-            else:
-                l = lang
-            regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l]
-            return any(re.match(r, f['versionCode']) for r in regexes)
-        # Some formats may not be in the same language as the url
-        formats = filter(_match_lang, all_formats)
-        formats = list(formats) # in python3 filter returns an iterator
-        if not formats:
-            # Some videos are only available in the 'Originalversion'
-            # they aren't tagged as being in French or German
-            if all(f['versionCode'] == 'VO' or f['versionCode'] == 'VA' for f in all_formats):
-                formats = all_formats
-            else:
-                raise ExtractorError(u'The formats list is empty')
-
-        if re.match(r'[A-Z]Q', formats[0]['quality']) is not None:
-            def sort_key(f):
-                return ['HQ', 'MQ', 'EQ', 'SQ'].index(f['quality'])
-        else:
-            def sort_key(f):
-                versionCode = f.get('versionCode')
-                if versionCode is None:
-                    versionCode = ''
-                return (
-                    # Sort first by quality
-                    int(f.get('height', -1)),
-                    int(f.get('bitrate', -1)),
-                    # The original version with subtitles has lower relevance
-                    re.match(r'VO-ST(F|A)', versionCode) is None,
-                    # The version with sourds/mal subtitles has also lower relevance
-                    re.match(r'VO?(F|A)-STM\1', versionCode) is None,
-                    # Prefer http downloads over m3u8
-                    0 if f['url'].endswith('m3u8') else 1,
-                )
-        formats = sorted(formats, key=sort_key)
-        def _format(format_info):
-            quality = ''
-            height = format_info.get('height')
-            if height is not None:
-                quality = compat_str(height)
-            bitrate = format_info.get('bitrate')
-            if bitrate is not None:
-                quality += '-%d' % bitrate
-            if format_info.get('versionCode') is not None:
-                format_id = '%s-%s' % (quality, format_info['versionCode'])
-            else:
-                format_id = quality
-            info = {
+        qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ'])
+
+        formats = []
+        for format_id, format_dict in player_info['VSR'].items():
+            f = dict(format_dict)
+            versionCode = f.get('versionCode')
+
+            langcode = {
+                'fr': 'F',
+                'de': 'A',
+            }.get(lang, lang)
+            lang_rexs = [r'VO?%s' % langcode, r'VO?.-ST%s' % langcode]
+            lang_pref = (
+                None if versionCode is None else (
+                    10 if any(re.match(r, versionCode) for r in lang_rexs)
+                    else -10))
+            source_pref = 0
+            if versionCode is not None:
+                # The original version with subtitles has lower relevance
+                if re.match(r'VO-ST(F|A)', versionCode):
+                    source_pref -= 10
+                # The version with sourds/mal subtitles has also lower relevance
+                elif re.match(r'VO?(F|A)-STM\1', versionCode):
+                    source_pref -= 9
+            format = {
                 'format_id': format_id,
-                'format_note': format_info.get('versionLibelle'),
-                'width': format_info.get('width'),
-                'height': height,
+                'preference': -10 if f.get('videoFormat') == 'M3U8' else None,
+                'language_preference': lang_pref,
+                'format_note': '%s, %s' % (f.get('versionCode'), f.get('versionLibelle')),
+                'width': int_or_none(f.get('width')),
+                'height': int_or_none(f.get('height')),
+                'tbr': int_or_none(f.get('bitrate')),
+                'quality': qfunc(f['quality']),
+                'source_preference': source_pref,
             }
-            if format_info['mediaType'] == 'rtmp':
-                info['url'] = format_info['streamer']
-                info['play_path'] = 'mp4:' + format_info['url']
-                info['ext'] = 'flv'
+
+            if f.get('mediaType') == 'rtmp':
+                format['url'] = f['streamer']
+                format['play_path'] = 'mp4:' + f['url']
+                format['ext'] = 'flv'
             else:
-                info['url'] = format_info['url']
-                info['ext'] = determine_ext(info['url'])
-            return info
-        info_dict['formats'] = [_format(f) for f in formats]
+                format['url'] = f['url']
 
+            formats.append(format)
+
+        self._sort_formats(formats)
+
+        info_dict['formats'] = formats
         return info_dict
 
 
 # It also uses the arte_vp_url url from the webpage to extract the information
 class ArteTVCreativeIE(ArteTVPlus7IE):
     IE_NAME = 'arte.tv:creative'
-    _VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/magazine?/(?P<id>.+)'
+    _VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/(?:magazine?/)?(?P<id>[^?#]+)'
 
-    _TEST = {
+    _TESTS = [{
         'url': 'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
         'info_dict': {
-            'id': '050489-002',
+            'id': '72176',
             'ext': 'mp4',
-            'title': 'Agentur Amateur / Agence Amateur #2 : Corporate Design',
+            'title': 'Folge 2 - Corporate Design',
+            'upload_date': '20131004',
         },
-    }
+    }, {
+        'url': 'http://creative.arte.tv/fr/Monty-Python-Reunion',
+        'info_dict': {
+            'id': '160676',
+            'ext': 'mp4',
+            'title': 'Monty Python live (mostly)',
+            'description': 'Événement ! Quarante-cinq ans après leurs premiers succès, les légendaires Monty Python remontent sur scène.\n',
+            'upload_date': '20140805',
+        }
+    }]
 
 
 class ArteTVFutureIE(ArteTVPlus7IE):
diff --git a/youtube_dl/extractor/audiomack.py b/youtube_dl/extractor/audiomack.py
new file mode 100644 (file)
index 0000000..622b209
--- /dev/null
@@ -0,0 +1,69 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from .soundcloud import SoundcloudIE
+from ..utils import ExtractorError
+
+import time
+
+
+class AudiomackIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P<id>[\w/-]+)'
+    IE_NAME = 'audiomack'
+    _TESTS = [
+        # hosted on audiomack
+        {
+            'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary',
+            'info_dict':
+            {
+                'id': 'roosh-williams/extraordinary',
+                'ext': 'mp3',
+                'title': 'Roosh Williams - Extraordinary'
+            }
+        },
+        # hosted on soundcloud via audiomack
+        {
+            'add_ie': ['Soundcloud'],
+            'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare',
+            'info_dict': {
+                'id': '172419696',
+                'ext': 'mp3',
+                'description': 'md5:1fc3272ed7a635cce5be1568c2822997',
+                'title': 'Young Thug ft Lil Wayne - Take Kare',
+                'uploader': 'Young Thug World',
+                'upload_date': '20141016',
+            }
+        },
+    ]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        api_response = self._download_json(
+            "http://www.audiomack.com/api/music/url/song/%s?_=%d" % (
+                video_id, time.time()),
+            video_id)
+
+        if "url" not in api_response:
+            raise ExtractorError("Unable to deduce api url of song")
+        realurl = api_response["url"]
+
+        # Audiomack wraps a lot of soundcloud tracks in their branded wrapper
+        # - if so, pass the work off to the soundcloud extractor
+        if SoundcloudIE.suitable(realurl):
+            return {'_type': 'url', 'url': realurl, 'ie_key': 'Soundcloud'}
+
+        webpage = self._download_webpage(url, video_id)
+        artist = self._html_search_regex(
+            r'<span class="artist">(.*?)</span>', webpage, "artist")
+        songtitle = self._html_search_regex(
+            r'<h1 class="profile-title song-title"><span class="artist">.*?</span>(.*?)</h1>',
+            webpage, "title")
+        title = artist + " - " + songtitle
+
+        return {
+            'id': video_id,
+            'title': title,
+            'url': realurl,
+        }
index 20bf12550d4b4493982ca3ea6f31578368e31aba..1c765532a00d9274c2531277cc1ad81b75053dfd 100644 (file)
@@ -24,8 +24,7 @@ class AUEngineIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         webpage = self._download_webpage(url, video_id)
         title = self._html_search_regex(r'<title>(?P<title>.+?)</title>', webpage, 'title')
diff --git a/youtube_dl/extractor/azubu.py b/youtube_dl/extractor/azubu.py
new file mode 100644 (file)
index 0000000..0961d33
--- /dev/null
@@ -0,0 +1,93 @@
+from __future__ import unicode_literals
+
+import json
+
+from .common import InfoExtractor
+from ..utils import float_or_none
+
+
+class AzubuIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?azubu\.tv/[^/]+#!/play/(?P<id>\d+)'
+    _TESTS = [
+        {
+            'url': 'http://www.azubu.tv/GSL#!/play/15575/2014-hot6-cup-last-big-match-ro8-day-1',
+            'md5': 'a88b42fcf844f29ad6035054bd9ecaf4',
+            'info_dict': {
+                'id': '15575',
+                'ext': 'mp4',
+                'title': '2014 HOT6 CUP LAST BIG MATCH Ro8 Day 1',
+                'description': 'md5:d06bdea27b8cc4388a90ad35b5c66c01',
+                'thumbnail': 're:^https?://.*\.jpe?g',
+                'timestamp': 1417523507.334,
+                'upload_date': '20141202',
+                'duration': 9988.7,
+                'uploader': 'GSL',
+                'uploader_id': 414310,
+                'view_count': int,
+            },
+        },
+        {
+            'url': 'http://www.azubu.tv/FnaticTV#!/play/9344/-fnatic-at-worlds-2014:-toyz---%22i-love-rekkles,-he-has-amazing-mechanics%22-',
+            'md5': 'b72a871fe1d9f70bd7673769cdb3b925',
+            'info_dict': {
+                'id': '9344',
+                'ext': 'mp4',
+                'title': 'Fnatic at Worlds 2014: Toyz - "I love Rekkles, he has amazing mechanics"',
+                'description': 'md5:4a649737b5f6c8b5c5be543e88dc62af',
+                'thumbnail': 're:^https?://.*\.jpe?g',
+                'timestamp': 1410530893.320,
+                'upload_date': '20140912',
+                'duration': 172.385,
+                'uploader': 'FnaticTV',
+                'uploader_id': 272749,
+                'view_count': int,
+            },
+        },
+    ]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        data = self._download_json(
+            'http://www.azubu.tv/api/video/%s' % video_id, video_id)['data']
+
+        title = data['title'].strip()
+        description = data['description']
+        thumbnail = data['thumbnail']
+        view_count = data['view_count']
+        uploader = data['user']['username']
+        uploader_id = data['user']['id']
+
+        stream_params = json.loads(data['stream_params'])
+
+        timestamp = float_or_none(stream_params['creationDate'], 1000)
+        duration = float_or_none(stream_params['length'], 1000)
+
+        renditions = stream_params.get('renditions') or []
+        video = stream_params.get('FLVFullLength') or stream_params.get('videoFullLength')
+        if video:
+            renditions.append(video)
+
+        formats = [{
+            'url': fmt['url'],
+            'width': fmt['frameWidth'],
+            'height': fmt['frameHeight'],
+            'vbr': float_or_none(fmt['encodingRate'], 1000),
+            'filesize': fmt['size'],
+            'vcodec': fmt['videoCodec'],
+            'container': fmt['videoContainer'],
+        } for fmt in renditions if fmt['url']]
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'timestamp': timestamp,
+            'duration': duration,
+            'uploader': uploader,
+            'uploader_id': uploader_id,
+            'view_count': view_count,
+            'formats': formats,
+        }
index ccd31c4c7093d54e86df50a42600d08e12e55005..1ca0b7cf2bf78717fd45a11d17e3cce7e5191b9b 100644 (file)
@@ -18,7 +18,7 @@ class BambuserIE(InfoExtractor):
     _TEST = {
         'url': 'http://bambuser.com/v/4050584',
         # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
-        #u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
+        # 'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
         'info_dict': {
             'id': '4050584',
             'ext': 'flv',
@@ -38,7 +38,7 @@ class BambuserIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
         info_url = ('http://player-c.api.bambuser.com/getVideo.json?'
-            '&api_key=%s&vid=%s' % (self._API_KEY, video_id))
+                    '&api_key=%s&vid=%s' % (self._API_KEY, video_id))
         info_json = self._download_webpage(info_url, video_id)
         info = json.loads(info_json)['result']
 
@@ -59,6 +59,13 @@ class BambuserChannelIE(InfoExtractor):
     _VALID_URL = r'https?://bambuser\.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
     # The maximum number we can get with each request
     _STEP = 50
+    _TEST = {
+        'url': 'http://bambuser.com/channel/pixelversity',
+        'info_dict': {
+            'title': 'pixelversity',
+        },
+        'playlist_mincount': 60,
+    }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -66,17 +73,18 @@ class BambuserChannelIE(InfoExtractor):
         urls = []
         last_id = ''
         for i in itertools.count(1):
-            req_url = ('http://bambuser.com/xhr-api/index.php?username={user}'
+            req_url = (
+                'http://bambuser.com/xhr-api/index.php?username={user}'
                 '&sort=created&access_mode=0%2C1%2C2&limit={count}'
                 '&method=broadcast&format=json&vid_older_than={last}'
-                ).format(user=user, count=self._STEP, last=last_id)
+            ).format(user=user, count=self._STEP, last=last_id)
             req = compat_urllib_request.Request(req_url)
             # Without setting this header, we wouldn't get any result
             req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
-            info_json = self._download_webpage(req, user,
-                'Downloading page %d' % i)
-            results = json.loads(info_json)['result']
-            if len(results) == 0:
+            data = self._download_json(
+                req, user, 'Downloading page %d' % i)
+            results = data['result']
+            if not results:
                 break
             last_id = results[-1]['vid']
             urls.extend(self.url_result(v['page'], 'Bambuser') for v in results)
index dcbbdef4346c36c789e49531df1dc602bc35255b..acddbc8f1d19ebc48b721b3867b98fc30af3c133 100644 (file)
@@ -15,13 +15,23 @@ class BandcampIE(InfoExtractor):
     _VALID_URL = r'https?://.*?\.bandcamp\.com/track/(?P<title>.*)'
     _TESTS = [{
         'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
-        'file': '1812978515.mp3',
         'md5': 'c557841d5e50261777a6585648adf439',
         'info_dict': {
-            "title": "youtube-dl  \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
-            "duration": 9.8485,
+            'id': '1812978515',
+            'ext': 'mp3',
+            'title': "youtube-dl  \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
+            'duration': 9.8485,
         },
         '_skip': 'There is a limit of 200 free downloads / month for the test song'
+    }, {
+        'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
+        'md5': '2b68e5851514c20efdff2afc5603b8b4',
+        'info_dict': {
+            'id': '2650410135',
+            'ext': 'mp3',
+            'title': 'Lanius (Battle)',
+            'uploader': 'Ben Prunty Music',
+        },
     }]
 
     def _real_extract(self, url):
@@ -59,9 +69,9 @@ class BandcampIE(InfoExtractor):
                 raise ExtractorError('No free songs found')
 
         download_link = m_download.group(1)
-        video_id = re.search(
-            r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
-            webpage, re.MULTILINE | re.DOTALL).group('id')
+        video_id = self._search_regex(
+            r'var TralbumData = {.*?id: (?P<id>\d+),?$',
+            webpage, 'video id', flags=re.MULTILINE | re.DOTALL)
 
         download_webpage = self._download_webpage(download_link, video_id, 'Downloading free downloads page')
         # We get the dictionary of the track from some javascript code
@@ -73,12 +83,12 @@ class BandcampIE(InfoExtractor):
         initial_url = mp3_info['url']
         re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
         m_url = re.match(re_url, initial_url)
-        #We build the url we will use to get the final track url
+        # We build the url we will use to get the final track url
         # This url is build in Bandcamp in the script download_bunde_*.js
         request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts'))
         final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
         # If we could correctly generate the .rand field the url would be
-        #in the "download_url" key
+        # in the "download_url" key
         final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
 
         return {
@@ -96,29 +106,40 @@ class BandcampAlbumIE(InfoExtractor):
     IE_NAME = 'Bandcamp:album'
     _VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<title>[^?#]+))'
 
-    _TEST = {
+    _TESTS = [{
         'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
         'playlist': [
             {
-                'file': '1353101989.mp3',
                 'md5': '39bc1eded3476e927c724321ddf116cf',
                 'info_dict': {
+                    'id': '1353101989',
+                    'ext': 'mp3',
                     'title': 'Intro',
                 }
             },
             {
-                'file': '38097443.mp3',
                 'md5': '1a2c32e2691474643e912cc6cd4bffaa',
                 'info_dict': {
+                    'id': '38097443',
+                    'ext': 'mp3',
                     'title': 'Kero One - Keep It Alive (Blazo remix)',
                 }
             },
         ],
+        'info_dict': {
+            'title': 'Jazz Format Mixtape vol.1',
+        },
         'params': {
             'playlistend': 2
         },
         'skip': 'Bandcamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test'
-    }
+    }, {
+        'url': 'http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave',
+        'info_dict': {
+            'title': 'Hierophany of the Open Grave',
+        },
+        'playlist_mincount': 9,
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
index 75e608f99de4ff3cc14234ab370f370b1ae83940..01c02d360cd7255b14aa7aa8259de52e44701884 100644 (file)
@@ -1,9 +1,10 @@
 from __future__ import unicode_literals
 
-import re
+import xml.etree.ElementTree
 
 from .subtitles import SubtitlesInfoExtractor
 from ..utils import ExtractorError
+from ..compat import compat_HTTPError
 
 
 class BBCCoUkIE(SubtitlesInfoExtractor):
@@ -55,7 +56,22 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
                 'skip_download': True,
             },
             'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
-        }
+        },
+        {
+            'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
+            'info_dict': {
+                'id': 'b03k3pb7',
+                'ext': 'flv',
+                'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
+                'description': '2. Invasion',
+                'duration': 3600,
+            },
+            'params': {
+                # rtmp download
+                'skip_download': True,
+            },
+            'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
+        },
     ]
 
     def _extract_asx_playlist(self, connection, programme_id):
@@ -102,6 +118,10 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
         return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item')
 
     def _extract_medias(self, media_selection):
+        error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error')
+        if error is not None:
+            raise ExtractorError(
+                '%s returned error: %s' % (self.IE_NAME, error.get('id')), expected=True)
         return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media')
 
     def _extract_connections(self, media):
@@ -158,54 +178,73 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
             subtitles[lang] = srt
         return subtitles
 
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        group_id = mobj.group('id')
-
-        webpage = self._download_webpage(url, group_id, 'Downloading video page')
-        if re.search(r'id="emp-error" class="notinuk">', webpage):
-            raise ExtractorError('Currently BBC iPlayer TV programmes are available to play in the UK only',
-                expected=True)
-
-        playlist = self._download_xml('http://www.bbc.co.uk/iplayer/playlist/%s' % group_id, group_id,
-            'Downloading playlist XML')
-
-        no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
-        if no_items is not None:
-            reason = no_items.get('reason')
-            if reason == 'preAvailability':
-                msg = 'Episode %s is not yet available' % group_id
-            elif reason == 'postAvailability':
-                msg = 'Episode %s is no longer available' % group_id
+    def _download_media_selector(self, programme_id):
+        try:
+            media_selection = self._download_xml(
+                'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
+                programme_id, 'Downloading media selection XML')
+        except ExtractorError as ee:
+            if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
+                media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().encode('utf-8'))
             else:
-                msg = 'Episode %s is not available: %s' % (group_id, reason)
-            raise ExtractorError(msg, expected=True)
+                raise
 
         formats = []
         subtitles = None
 
-        for item in self._extract_items(playlist):
-            kind = item.get('kind')
-            if kind != 'programme' and kind != 'radioProgramme':
-                continue
-            title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text
-            description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text
+        for media in self._extract_medias(media_selection):
+            kind = media.get('kind')
+            if kind == 'audio':
+                formats.extend(self._extract_audio(media, programme_id))
+            elif kind == 'video':
+                formats.extend(self._extract_video(media, programme_id))
+            elif kind == 'captions':
+                subtitles = self._extract_captions(media, programme_id)
 
-            programme_id = item.get('identifier')
-            duration = int(item.get('duration'))
+        return formats, subtitles
 
-            media_selection = self._download_xml(
-                'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s'  % programme_id,
-                programme_id, 'Downloading media selection XML')
+    def _real_extract(self, url):
+        group_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, group_id, 'Downloading video page')
 
-            for media in self._extract_medias(media_selection):
-                kind = media.get('kind')
-                if kind == 'audio':
-                    formats.extend(self._extract_audio(media, programme_id))
-                elif kind == 'video':
-                    formats.extend(self._extract_video(media, programme_id))
-                elif kind == 'captions':
-                    subtitles = self._extract_captions(media, programme_id)
+        programme_id = self._search_regex(
+            r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False)
+        if programme_id:
+            player = self._download_json(
+                'http://www.bbc.co.uk/iplayer/episode/%s.json' % group_id,
+                group_id)['jsConf']['player']
+            title = player['title']
+            description = player['subtitle']
+            duration = player['duration']
+            formats, subtitles = self._download_media_selector(programme_id)
+        else:
+            playlist = self._download_xml(
+                'http://www.bbc.co.uk/iplayer/playlist/%s' % group_id,
+                group_id, 'Downloading playlist XML')
+
+            no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
+            if no_items is not None:
+                reason = no_items.get('reason')
+                if reason == 'preAvailability':
+                    msg = 'Episode %s is not yet available' % group_id
+                elif reason == 'postAvailability':
+                    msg = 'Episode %s is no longer available' % group_id
+                elif reason == 'noMedia':
+                    msg = 'Episode %s is not currently available' % group_id
+                else:
+                    msg = 'Episode %s is not available: %s' % (group_id, reason)
+                raise ExtractorError(msg, expected=True)
+
+            for item in self._extract_items(playlist):
+                kind = item.get('kind')
+                if kind != 'programme' and kind != 'radioProgramme':
+                    continue
+                title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text
+                description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text
+                programme_id = item.get('identifier')
+                duration = int(item.get('duration'))
+                formats, subtitles = self._download_media_selector(programme_id)
 
         if self._downloader.params.get('listsubtitles', False):
             self._list_available_subtitles(programme_id, subtitles)
@@ -220,4 +259,4 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
             'duration': duration,
             'formats': formats,
             'subtitles': subtitles,
-        }
\ No newline at end of file
+        }
diff --git a/youtube_dl/extractor/beeg.py b/youtube_dl/extractor/beeg.py
new file mode 100644 (file)
index 0000000..4e79fea
--- /dev/null
@@ -0,0 +1,65 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class BeegIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?beeg\.com/(?P<id>\d+)'
+    _TEST = {
+        'url': 'http://beeg.com/5416503',
+        'md5': '634526ae978711f6b748fe0dd6c11f57',
+        'info_dict': {
+            'id': '5416503',
+            'ext': 'mp4',
+            'title': 'Sultry Striptease',
+            'description': 'md5:6db3c6177972822aaba18652ff59c773',
+            'categories': list,  # NSFW
+            'thumbnail': 're:https?://.*\.jpg$',
+            'age_limit': 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        quality_arr = self._search_regex(
+            r'(?s)var\s+qualityArr\s*=\s*{\s*(.+?)\s*}', webpage, 'quality formats')
+
+        formats = [{
+            'url': fmt[1],
+            'format_id': fmt[0],
+            'height': int(fmt[0][:-1]),
+        } for fmt in re.findall(r"'([^']+)'\s*:\s*'([^']+)'", quality_arr)]
+
+        self._sort_formats(formats)
+
+        title = self._html_search_regex(
+            r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title')
+
+        description = self._html_search_regex(
+            r'<meta name="description" content="([^"]*)"',
+            webpage, 'description', fatal=False)
+        thumbnail = self._html_search_regex(
+            r'\'previewer.url\'\s*:\s*"([^"]*)"',
+            webpage, 'thumbnail', fatal=False)
+
+        categories_str = self._html_search_regex(
+            r'<meta name="keywords" content="([^"]+)"', webpage, 'categories', fatal=False)
+        categories = (
+            None if categories_str is None
+            else categories_str.split(','))
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'categories': categories,
+            'formats': formats,
+            'age_limit': 18,
+        }
diff --git a/youtube_dl/extractor/behindkink.py b/youtube_dl/extractor/behindkink.py
new file mode 100644 (file)
index 0000000..1bdc258
--- /dev/null
@@ -0,0 +1,46 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import url_basename
+
+
+class BehindKinkIE(InfoExtractor):
+    _VALID_URL = r'http://(?:www\.)?behindkink\.com/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<id>[^/#?_]+)'
+    _TEST = {
+        'url': 'http://www.behindkink.com/2014/12/05/what-are-you-passionate-about-marley-blaze/',
+        'md5': '507b57d8fdcd75a41a9a7bdb7989c762',
+        'info_dict': {
+            'id': '37127',
+            'ext': 'mp4',
+            'title': 'What are you passionate about – Marley Blaze',
+            'description': 'md5:aee8e9611b4ff70186f752975d9b94b4',
+            'upload_date': '20141205',
+            'thumbnail': 'http://www.behindkink.com/wp-content/uploads/2014/12/blaze-1.jpg',
+            'age_limit': 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        display_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, display_id)
+
+        video_url = self._search_regex(
+            r'<source src="([^"]+)"', webpage, 'video URL')
+        video_id = url_basename(video_url).split('_')[0]
+        upload_date = mobj.group('year') + mobj.group('month') + mobj.group('day')
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'url': video_url,
+            'title': self._og_search_title(webpage),
+            'thumbnail': self._og_search_thumbnail(webpage),
+            'description': self._og_search_description(webpage),
+            'upload_date': upload_date,
+            'age_limit': 18,
+        }
diff --git a/youtube_dl/extractor/bet.py b/youtube_dl/extractor/bet.py
new file mode 100644 (file)
index 0000000..c1fc433
--- /dev/null
@@ -0,0 +1,108 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+    xpath_text,
+    xpath_with_ns,
+    int_or_none,
+    parse_iso8601,
+)
+
+
+class BetIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?bet\.com/(?:[^/]+/)+(?P<id>.+?)\.html'
+    _TESTS = [
+        {
+            'url': 'http://www.bet.com/news/politics/2014/12/08/in-bet-exclusive-obama-talks-race-and-racism.html',
+            'info_dict': {
+                'id': '417cd61c-c793-4e8e-b006-e445ecc45add',
+                'display_id': 'in-bet-exclusive-obama-talks-race-and-racism',
+                'ext': 'flv',
+                'title': 'BET News Presents: A Conversation With President Obama',
+                'description': 'md5:5a88d8ae912c1b33e090290af7ec33c6',
+                'duration': 1534,
+                'timestamp': 1418075340,
+                'upload_date': '20141208',
+                'uploader': 'admin',
+                'thumbnail': 're:(?i)^https?://.*\.jpg$',
+            },
+            'params': {
+                # rtmp download
+                'skip_download': True,
+            },
+        },
+        {
+            'url': 'http://www.bet.com/video/news/national/2014/justice-for-ferguson-a-community-reacts.html',
+            'info_dict': {
+                'id': '4160e53b-ad41-43b1-980f-8d85f63121f4',
+                'display_id': 'justice-for-ferguson-a-community-reacts',
+                'ext': 'flv',
+                'title': 'Justice for Ferguson: A Community Reacts',
+                'description': 'A BET News special.',
+                'duration': 1696,
+                'timestamp': 1416942360,
+                'upload_date': '20141125',
+                'uploader': 'admin',
+                'thumbnail': 're:(?i)^https?://.*\.jpg$',
+            },
+            'params': {
+                # rtmp download
+                'skip_download': True,
+            },
+        }
+    ]
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, display_id)
+
+        media_url = compat_urllib_parse.unquote(self._search_regex(
+            [r'mediaURL\s*:\s*"([^"]+)"', r"var\s+mrssMediaUrl\s*=\s*'([^']+)'"],
+            webpage, 'media URL'))
+
+        mrss = self._download_xml(media_url, display_id)
+
+        item = mrss.find('./channel/item')
+
+        NS_MAP = {
+            'dc': 'http://purl.org/dc/elements/1.1/',
+            'media': 'http://search.yahoo.com/mrss/',
+            'ka': 'http://kickapps.com/karss',
+        }
+
+        title = xpath_text(item, './title', 'title')
+        description = xpath_text(
+            item, './description', 'description', fatal=False)
+
+        video_id = xpath_text(item, './guid', 'video id', fatal=False)
+
+        timestamp = parse_iso8601(xpath_text(
+            item, xpath_with_ns('./dc:date', NS_MAP),
+            'upload date', fatal=False))
+        uploader = xpath_text(
+            item, xpath_with_ns('./dc:creator', NS_MAP),
+            'uploader', fatal=False)
+
+        media_content = item.find(
+            xpath_with_ns('./media:content', NS_MAP))
+        duration = int_or_none(media_content.get('duration'))
+        smil_url = media_content.get('url')
+
+        thumbnail = media_content.find(
+            xpath_with_ns('./media:thumbnail', NS_MAP)).get('url')
+
+        formats = self._extract_smil_formats(smil_url, display_id)
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'timestamp': timestamp,
+            'uploader': uploader,
+            'duration': duration,
+            'formats': formats,
+        }
diff --git a/youtube_dl/extractor/bild.py b/youtube_dl/extractor/bild.py
new file mode 100644 (file)
index 0000000..77b562d
--- /dev/null
@@ -0,0 +1,39 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import int_or_none
+
+
+class BildIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?bild\.de/(?:[^/]+/)+(?P<display_id>[^/]+)-(?P<id>\d+)(?:,auto=true)?\.bild\.html'
+    IE_DESC = 'Bild.de'
+    _TEST = {
+        'url': 'http://www.bild.de/video/clip/apple-ipad-air/das-koennen-die-neuen-ipads-38184146.bild.html',
+        'md5': 'dd495cbd99f2413502a1713a1156ac8a',
+        'info_dict': {
+            'id': '38184146',
+            'ext': 'mp4',
+            'title': 'BILD hat sie getestet',
+            'thumbnail': 'http://bilder.bild.de/fotos/stand-das-koennen-die-neuen-ipads-38184138/Bild/1.bild.jpg',
+            'duration': 196,
+            'description': 'Mit dem iPad Air 2 und dem iPad Mini 3 hat Apple zwei neue Tablet-Modelle präsentiert. BILD-Reporter Sven Stein durfte die Geräte bereits testen. ',
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        xml_url = url.split(".bild.html")[0] + ",view=xml.bild.xml"
+        doc = self._download_xml(xml_url, video_id)
+
+        duration = int_or_none(doc.attrib.get('duration'), scale=1000)
+
+        return {
+            'id': video_id,
+            'title': doc.attrib['ueberschrift'],
+            'description': doc.attrib.get('text'),
+            'url': doc.attrib['src'],
+            'thumbnail': doc.attrib.get('img'),
+            'duration': duration,
+        }
index acfc4ad736d9deecf1ed9cdadd4063e2fc8e7243..14b814120be3b8215a28fc00a95f87bd22e0c062 100644 (file)
@@ -4,18 +4,22 @@ import re
 
 from .common import InfoExtractor
 from .subtitles import SubtitlesInfoExtractor
-from ..utils import (
+
+from ..compat import (
+    compat_str,
     compat_urllib_request,
-    unescapeHTML,
-    parse_iso8601,
     compat_urlparse,
+)
+from ..utils import (
     clean_html,
-    compat_str,
+    int_or_none,
+    parse_iso8601,
+    unescapeHTML,
 )
 
 
 class BlipTVIE(SubtitlesInfoExtractor):
-    _VALID_URL = r'https?://(?:\w+\.)?blip\.tv/(?:(?:.+-|rss/flash/)(?P<id>\d+)|((?:play/|api\.swf#)(?P<lookup_id>[\da-zA-Z+]+)))'
+    _VALID_URL = r'https?://(?:\w+\.)?blip\.tv/(?:(?:.+-|rss/flash/)(?P<id>\d+)|((?:play/|api\.swf#)(?P<lookup_id>[\da-zA-Z+_]+)))'
 
     _TESTS = [
         {
@@ -49,20 +53,70 @@ class BlipTVIE(SubtitlesInfoExtractor):
                 'uploader_id': '792887',
                 'duration': 279,
             }
-        }
+        },
+        {
+            # https://bugzilla.redhat.com/show_bug.cgi?id=967465
+            'url': 'http://a.blip.tv/api.swf#h6Uag5KbVwI',
+            'md5': '314e87b1ebe7a48fcbfdd51b791ce5a6',
+            'info_dict': {
+                'id': '6573122',
+                'ext': 'mov',
+                'upload_date': '20130520',
+                'description': 'Two hapless space marines argue over what to do when they realize they have an astronomically huge problem on their hands.',
+                'title': 'Red vs. Blue Season 11 Trailer',
+                'timestamp': 1369029609,
+                'uploader': 'redvsblue',
+                'uploader_id': '792887',
+            }
+        },
+        {
+            'url': 'http://blip.tv/play/gbk766dkj4Yn',
+            'md5': 'fe0a33f022d49399a241e84a8ea8b8e3',
+            'info_dict': {
+                'id': '1749452',
+                'ext': 'mp4',
+                'upload_date': '20090208',
+                'description': 'Witness the first appearance of the Nostalgia Critic character, as Doug reviews the movie Transformers.',
+                'title': 'Nostalgia Critic: Transformers',
+                'timestamp': 1234068723,
+                'uploader': 'NostalgiaCritic',
+                'uploader_id': '246467',
+            }
+        },
+        {
+            # https://github.com/rg3/youtube-dl/pull/4404
+            'note': 'Audio only',
+            'url': 'http://blip.tv/hilarios-productions/weekly-manga-recap-kingdom-7119982',
+            'md5': '76c0a56f24e769ceaab21fbb6416a351',
+            'info_dict': {
+                'id': '7103299',
+                'ext': 'flv',
+                'title': 'Weekly Manga Recap: Kingdom',
+                'description': 'And then Shin breaks the enemy line, and he&apos;s all like HWAH! And then he slices a guy and it&apos;s all like FWASHING! And... it&apos;s really hard to describe the best parts of this series without breaking down into sound effects, okay?',
+                'timestamp': 1417660321,
+                'upload_date': '20141204',
+                'uploader': 'The Rollo T',
+                'uploader_id': '407429',
+                'duration': 7251,
+                'vcodec': 'none',
+            }
+        },
     ]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         lookup_id = mobj.group('lookup_id')
 
-        # See https://github.com/rg3/youtube-dl/issues/857
+        # See https://github.com/rg3/youtube-dl/issues/857 and
+        # https://github.com/rg3/youtube-dl/issues/4197
         if lookup_id:
-            info_page = self._download_webpage(
-                'http://blip.tv/play/%s.x?p=1' % lookup_id, lookup_id, 'Resolving lookup id')
-            video_id = self._search_regex(r'data-episode-id="([0-9]+)', info_page, 'video_id')
-        else:
-            video_id = mobj.group('id')
+            urlh = self._request_webpage(
+                'http://blip.tv/play/%s' % lookup_id, lookup_id, 'Resolving lookup id')
+            url = compat_urlparse.urlparse(urlh.geturl())
+            qs = compat_urlparse.parse_qs(url.query)
+            mobj = re.match(self._VALID_URL, qs['file'][0])
+
+        video_id = mobj.group('id')
 
         rss = self._download_xml('http://blip.tv/rss/flash/%s' % video_id, video_id, 'Downloading video RSS')
 
@@ -98,7 +152,7 @@ class BlipTVIE(SubtitlesInfoExtractor):
             msg = self._download_webpage(
                 url + '?showplayer=20140425131715&referrer=http://blip.tv&mask=7&skin=flashvars&view=url',
                 video_id, 'Resolving URL for %s' % role)
-            real_url = compat_urlparse.parse_qs(msg)['message'][0]
+            real_url = compat_urlparse.parse_qs(msg.strip())['message'][0]
 
             media_type = media_content.get('type')
             if media_type == 'text/srt' or url.endswith('.srt'):
@@ -113,11 +167,11 @@ class BlipTVIE(SubtitlesInfoExtractor):
                     'url': real_url,
                     'format_id': role,
                     'format_note': media_type,
-                    'vcodec': media_content.get(blip('vcodec')),
+                    'vcodec': media_content.get(blip('vcodec')) or 'none',
                     'acodec': media_content.get(blip('acodec')),
                     'filesize': media_content.get('filesize'),
-                    'width': int(media_content.get('width')),
-                    'height': int(media_content.get('height')),
+                    'width': int_or_none(media_content.get('width')),
+                    'height': int_or_none(media_content.get('height')),
                 })
         self._sort_formats(formats)
 
@@ -150,9 +204,17 @@ class BlipTVIE(SubtitlesInfoExtractor):
 
 
 class BlipTVUserIE(InfoExtractor):
-    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
+    _VALID_URL = r'(?:(?:https?://(?:\w+\.)?blip\.tv/)|bliptvuser:)(?!api\.swf)([^/]+)/*$'
     _PAGE_SIZE = 12
     IE_NAME = 'blip.tv:user'
+    _TEST = {
+        'url': 'http://blip.tv/actone',
+        'info_dict': {
+            'id': 'actone',
+            'title': 'Act One: The Series',
+        },
+        'playlist_count': 5,
+    }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -163,6 +225,7 @@ class BlipTVUserIE(InfoExtractor):
         page = self._download_webpage(url, username, 'Downloading user page')
         mobj = re.search(r'data-users-id="([^"]+)"', page)
         page_base = page_base % mobj.group(1)
+        title = self._og_search_title(page)
 
         # Download video ids using BlipTV Ajax calls. Result size per
         # query is limited (currently to 12 videos) so we need to query
@@ -199,4 +262,5 @@ class BlipTVUserIE(InfoExtractor):
 
         urls = ['http://blip.tv/%s' % video_id for video_id in video_ids]
         url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
-        return [self.playlist_result(url_entries, playlist_title=username)]
+        return self.playlist_result(
+            url_entries, playlist_title=title, playlist_id=username)
diff --git a/youtube_dl/extractor/bpb.py b/youtube_dl/extractor/bpb.py
new file mode 100644 (file)
index 0000000..510813f
--- /dev/null
@@ -0,0 +1,37 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class BpbIE(InfoExtractor):
+    IE_DESC = 'Bundeszentrale für politische Bildung'
+    _VALID_URL = r'http://www\.bpb\.de/mediathek/(?P<id>[0-9]+)/'
+
+    _TEST = {
+        'url': 'http://www.bpb.de/mediathek/297/joachim-gauck-zu-1989-und-die-erinnerung-an-die-ddr',
+        'md5': '0792086e8e2bfbac9cdf27835d5f2093',
+        'info_dict': {
+            'id': '297',
+            'ext': 'mp4',
+            'title': 'Joachim Gauck zu 1989 und die Erinnerung an die DDR',
+            'description': 'Joachim Gauck, erster Beauftragter für die Stasi-Unterlagen, spricht auf dem Geschichtsforum über die friedliche Revolution 1989 und eine "gewisse Traurigkeit" im Umgang mit der DDR-Vergangenheit.'
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._html_search_regex(
+            r'<h2 class="white">(.*?)</h2>', webpage, 'title')
+        video_url = self._html_search_regex(
+            r'(http://film\.bpb\.de/player/dokument_[0-9]+\.mp4)',
+            webpage, 'video URL')
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+            'description': self._og_search_description(webpage),
+        }
index 86f0c2861e35f296f594a4ac45bbfe74b799d9e0..45ba5173246575ab617dbab911280b75d61d61e8 100644 (file)
@@ -1,8 +1,6 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 from ..utils import (
     ExtractorError,
@@ -26,17 +24,8 @@ class BRIE(InfoExtractor):
                 'title': 'Wenn das Traditions-Theater wackelt',
                 'description': 'Heimatsound-Festival 2014: Wenn das Traditions-Theater wackelt',
                 'duration': 34,
-            }
-        },
-        {
-            'url': 'http://www.br.de/mediathek/video/sendungen/unter-unserem-himmel/unter-unserem-himmel-alpen-ueber-den-pass-100.html',
-            'md5': 'ab451b09d861dbed7d7cc9ab0be19ebe',
-            'info_dict': {
-                'id': '2c060e69-3a27-4e13-b0f0-668fac17d812',
-                'ext': 'mp4',
-                'title': 'Über den Pass',
-                'description': 'Die Eroberung der Alpen: Über den Pass',
-                'duration': 2588,
+                'uploader': 'BR',
+                'upload_date': '20140802',
             }
         },
         {
@@ -77,8 +66,7 @@ class BRIE(InfoExtractor):
     ]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        display_id = mobj.group('id')
+        display_id = self._match_id(url)
         page = self._download_webpage(url, display_id)
         xml_url = self._search_regex(
             r"return BRavFramework\.register\(BRavFramework\('avPlayer_(?:[a-f0-9-]{36})'\)\.setup\({dataURL:'(/(?:[a-z0-9\-]+/)+[a-z0-9/~_.-]+)'}\)\);", page, 'XMLURL')
index 1bfc9f35bbd5c7c929c8f21a20f7b9642d00bcb2..4bcc897c95229ea0ee509fe53443d355309a66aa 100644 (file)
@@ -4,37 +4,60 @@ import re
 import json
 
 from .common import InfoExtractor
+from ..utils import (
+    int_or_none,
+    parse_age_limit,
+)
 
 
 class BreakIE(InfoExtractor):
-    _VALID_URL = r'http://(?:www\.)?break\.com/video/([^/]+)'
-    _TEST = {
+    _VALID_URL = r'http://(?:www\.)?break\.com/video/(?:[^/]+/)*.+-(?P<id>\d+)'
+    _TESTS = [{
         'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056',
-        'md5': 'a3513fb1547fba4fb6cfac1bffc6c46b',
         'info_dict': {
             'id': '2468056',
             'ext': 'mp4',
             'title': 'When Girls Act Like D-Bags',
         }
-    }
+    }, {
+        'url': 'http://www.break.com/video/ugc/baby-flex-2773063',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group(1).split("-")[-1]
-        embed_url = 'http://www.break.com/embed/%s' % video_id
-        webpage = self._download_webpage(embed_url, video_id)
-        info_json = self._search_regex(r'var embedVars = ({.*})\s*?</script>',
-            webpage, 'info json', flags=re.DOTALL)
-        info = json.loads(info_json)
-        video_url = info['videoUri']
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(
+            'http://www.break.com/embed/%s' % video_id, video_id)
+        info = json.loads(self._search_regex(
+            r'var embedVars = ({.*})\s*?</script>',
+            webpage, 'info json', flags=re.DOTALL))
+
         youtube_id = info.get('youtubeId')
         if youtube_id:
             return self.url_result(youtube_id, 'Youtube')
 
-        final_url = video_url + '?' + info['AuthToken']
+        formats = [{
+            'url': media['uri'] + '?' + info['AuthToken'],
+            'tbr': media['bitRate'],
+            'width': media['width'],
+            'height': media['height'],
+        } for media in info['media']]
+
+        if not formats:
+            formats.append({
+                'url': info['videoUri']
+            })
+
+        self._sort_formats(formats)
+
+        duration = int_or_none(info.get('videoLengthInSeconds'))
+        age_limit = parse_age_limit(info.get('audienceRating'))
+
         return {
             'id': video_id,
-            'url': final_url,
             'title': info['contentName'],
             'thumbnail': info['thumbUri'],
+            'duration': duration,
+            'age_limit': age_limit,
+            'formats': formats,
         }
index 419951b6279ae87fb8f0dab1c4f5249ce221a268..bf18a97e04e59894a08f500244ec00a05b6a5306 100644 (file)
@@ -14,6 +14,7 @@ from ..utils import (
     compat_str,
     compat_urllib_request,
     compat_parse_qs,
+    compat_urllib_parse_urlparse,
 
     determine_ext,
     ExtractorError,
@@ -23,7 +24,7 @@ from ..utils import (
 
 
 class BrightcoveIE(InfoExtractor):
-    _VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*\?(?P<query>.*)'
+    _VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*?\?(?P<query>.*)'
     _FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
 
     _TESTS = [
@@ -87,6 +88,15 @@ class BrightcoveIE(InfoExtractor):
                 'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
             },
         },
+        {
+            # playlist test
+            # from http://support.brightcove.com/en/video-cloud/docs/playlist-support-single-video-players
+            'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL',
+            'info_dict': {
+                'title': 'Sealife',
+            },
+            'playlist_mincount': 7,
+        },
     ]
 
     @classmethod
@@ -101,6 +111,8 @@ class BrightcoveIE(InfoExtractor):
                             lambda m: m.group(1) + '/>', object_str)
         # Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
         object_str = object_str.replace('<--', '<!--')
+        # remove namespace to simplify extraction
+        object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str)
         object_str = fix_xml_ampersands(object_str)
 
         object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8'))
@@ -154,12 +166,14 @@ class BrightcoveIE(InfoExtractor):
     def _extract_brightcove_urls(cls, webpage):
         """Return a list of all Brightcove URLs from the webpage """
 
-        url_m = re.search(r'<meta\s+property="og:video"\s+content="(http://c.brightcove.com/[^"]+)"', webpage)
+        url_m = re.search(
+            r'<meta\s+property="og:video"\s+content="(https?://(?:secure|c)\.brightcove.com/[^"]+)"',
+            webpage)
         if url_m:
             url = unescapeHTML(url_m.group(1))
             # Some sites don't add it, we can't download with this url, for example:
             # http://www.ktvu.com/videos/news/raw-video-caltrain-releases-video-of-man-almost/vCTZdY/
-            if 'playerKey' in url:
+            if 'playerKey' in url or 'videoId' in url:
                 return [url]
 
         matches = re.findall(
@@ -188,9 +202,13 @@ class BrightcoveIE(InfoExtractor):
             referer = smuggled_data.get('Referer', url)
             return self._get_video_info(
                 videoPlayer[0], query_str, query, referer=referer)
-        else:
+        elif 'playerKey' in query:
             player_key = query['playerKey']
             return self._get_playlist_info(player_key[0])
+        else:
+            raise ExtractorError(
+                'Cannot find playerKey= variable. Did you forget quotes in a shell invocation?',
+                expected=True)
 
     def _get_video_info(self, video_id, query_str, query, referer=None):
         request_url = self._FEDERATED_URL_TEMPLATE % query_str
@@ -202,6 +220,13 @@ class BrightcoveIE(InfoExtractor):
             req.add_header('Referer', referer)
         webpage = self._download_webpage(req, video_id)
 
+        error_msg = self._html_search_regex(
+            r"<h1>We're sorry.</h1>([\s\n]*<p>.*?</p>)+", webpage,
+            'error message', default=None)
+        if error_msg is not None:
+            raise ExtractorError(
+                'brightcove said: %s' % error_msg, expected=True)
+
         self.report_extraction(video_id)
         info = self._search_regex(r'var experienceJSON = ({.*});', webpage, 'json')
         info = json.loads(info)['data']
@@ -238,12 +263,21 @@ class BrightcoveIE(InfoExtractor):
             formats = []
             for rend in renditions:
                 url = rend['defaultURL']
+                if not url:
+                    continue
+                ext = None
                 if rend['remote']:
-                    # This type of renditions are served through akamaihd.net,
-                    # but they don't use f4m manifests
-                    url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
-                    ext = 'flv'
-                else:
+                    url_comp = compat_urllib_parse_urlparse(url)
+                    if url_comp.path.endswith('.m3u8'):
+                        formats.extend(
+                            self._extract_m3u8_formats(url, info['id'], 'mp4'))
+                        continue
+                    elif 'akamaihd.net' in url_comp.netloc:
+                        # This type of renditions are served through
+                        # akamaihd.net, but they don't use f4m manifests
+                        url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
+                        ext = 'flv'
+                if ext is None:
                     ext = determine_ext(url)
                 size = rend.get('size')
                 formats.append({
diff --git a/youtube_dl/extractor/buzzfeed.py b/youtube_dl/extractor/buzzfeed.py
new file mode 100644 (file)
index 0000000..a40a1bb
--- /dev/null
@@ -0,0 +1,74 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+
+
+class BuzzFeedIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?buzzfeed\.com/[^?#]*?/(?P<id>[^?#]+)'
+    _TESTS = [{
+        'url': 'http://www.buzzfeed.com/abagg/this-angry-ram-destroys-a-punching-bag-like-a-boss?utm_term=4ldqpia',
+        'info_dict': {
+            'id': 'this-angry-ram-destroys-a-punching-bag-like-a-boss',
+            'title': 'This Angry Ram Destroys A Punching Bag Like A Boss',
+            'description': 'Rambro!',
+        },
+        'playlist': [{
+            'info_dict': {
+                'id': 'aVCR29aE_OQ',
+                'ext': 'mp4',
+                'upload_date': '20141024',
+                'uploader_id': 'Buddhanz1',
+                'description': 'He likes to stay in shape with his heavy bag, he wont stop until its on the ground\n\nFollow Angry Ram on Facebook for regular updates -\nhttps://www.facebook.com/pages/Angry-Ram/1436897249899558?ref=hl',
+                'uploader': 'Buddhanz',
+                'title': 'Angry Ram destroys a punching bag',
+            }
+        }]
+    }, {
+        'url': 'http://www.buzzfeed.com/sheridanwatson/look-at-this-cute-dog-omg?utm_term=4ldqpia',
+        'params': {
+            'skip_download': True,  # Got enough YouTube download tests
+        },
+        'info_dict': {
+            'description': 'Munchkin the Teddy Bear is back !',
+            'title': 'You Need To Stop What You\'re Doing And Watching This Dog Walk On A Treadmill',
+        },
+        'playlist': [{
+            'info_dict': {
+                'id': 'mVmBL8B-In0',
+                'ext': 'mp4',
+                'upload_date': '20141124',
+                'uploader_id': 'CindysMunchkin',
+                'description': '© 2014 Munchkin the Shih Tzu\nAll rights reserved\nFacebook: http://facebook.com/MunchkintheShihTzu',
+                'uploader': 'Munchkin the Shih Tzu',
+                'title': 'Munchkin the Teddy Bear gets her exercise',
+            },
+        }]
+    }]
+
+    def _real_extract(self, url):
+        playlist_id = self._match_id(url)
+        webpage = self._download_webpage(url, playlist_id)
+
+        all_buckets = re.findall(
+            r'(?s)<div class="video-embed[^"]*"..*?rel:bf_bucket_data=\'([^\']+)\'',
+            webpage)
+
+        entries = []
+        for bd_json in all_buckets:
+            bd = json.loads(bd_json)
+            video = bd.get('video') or bd.get('progload_video')
+            if not video:
+                continue
+            entries.append(self.url_result(video['url']))
+
+        return {
+            '_type': 'playlist',
+            'id': playlist_id,
+            'title': self._og_search_title(webpage),
+            'description': self._og_search_description(webpage),
+            'entries': entries,
+        }
index cf19b7b0cf952c3b14d9ef5b91f541332d3e5e69..6252be05b7f4b57787152b4edae5378675a96847 100644 (file)
@@ -10,12 +10,12 @@ from ..utils import ExtractorError
 class BYUtvIE(InfoExtractor):
     _VALID_URL = r'^https?://(?:www\.)?byutv.org/watch/[0-9a-f-]+/(?P<video_id>[^/?#]+)'
     _TEST = {
-        'url': 'http://www.byutv.org/watch/44e80f7b-e3ba-43ba-8c51-b1fd96c94a79/granite-flats-talking',
+        'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d/studio-c-season-5-episode-5',
         'info_dict': {
-            'id': 'granite-flats-talking',
+            'id': 'studio-c-season-5-episode-5',
             'ext': 'mp4',
-            'description': 'md5:4e9a7ce60f209a33eca0ac65b4918e1c',
-            'title': 'Talking',
+            'description': 'md5:5438d33774b6bdc662f9485a340401cc',
+            'title': 'Season 5 Episode 5',
             'thumbnail': 're:^https?://.*promo.*'
         },
         'params': {
index 0202078b0cdcef31f300e1419eed6fc38fa7b424..9873728df6f3bb1adbfddc1959aa5e7e70241f5b 100644 (file)
@@ -7,15 +7,21 @@ from .common import InfoExtractor
 from ..utils import (
     unified_strdate,
     url_basename,
+    qualities,
 )
 
 
 class CanalplusIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.canalplus\.fr/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>[0-9]+))'
-    _VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/cplus/%s'
-    IE_NAME = 'canalplus.fr'
+    IE_DESC = 'canalplus.fr, piwiplus.fr and d8.tv'
+    _VALID_URL = r'https?://(?:www\.(?P<site>canalplus\.fr|piwiplus\.fr|d8\.tv)/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>[0-9]+))'
+    _VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/%s/%s'
+    _SITE_ID_MAP = {
+        'canalplus.fr': 'cplus',
+        'piwiplus.fr': 'teletoon',
+        'd8.tv': 'd8',
+    }
 
-    _TEST = {
+    _TESTS = [{
         'url': 'http://www.canalplus.fr/c-infos-documentaires/pid1830-c-zapping.html?vid=922470',
         'md5': '3db39fb48b9685438ecf33a1078023e4',
         'info_dict': {
@@ -25,36 +31,73 @@ class CanalplusIE(InfoExtractor):
             'description': 'Le meilleur de toutes les chaînes, tous les jours.\nEmission du 26 août 2013',
             'upload_date': '20130826',
         },
-    }
+    }, {
+        'url': 'http://www.piwiplus.fr/videos-piwi/pid1405-le-labyrinthe-boing-super-ranger.html?vid=1108190',
+        'info_dict': {
+            'id': '1108190',
+            'ext': 'flv',
+            'title': 'Le labyrinthe - Boing super ranger',
+            'description': 'md5:4cea7a37153be42c1ba2c1d3064376ff',
+            'upload_date': '20140724',
+        },
+        'skip': 'Only works from France',
+    }, {
+        'url': 'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
+        'info_dict': {
+            'id': '966289',
+            'ext': 'flv',
+            'title': 'Campagne intime - Documentaire exceptionnel',
+            'description': 'md5:d2643b799fb190846ae09c61e59a859f',
+            'upload_date': '20131108',
+        },
+        'skip': 'videos get deleted after a while',
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.groupdict().get('id')
 
+        site_id = self._SITE_ID_MAP[mobj.group('site') or 'canal']
+
         # Beware, some subclasses do not define an id group
         display_id = url_basename(mobj.group('path'))
 
         if video_id is None:
             webpage = self._download_webpage(url, display_id)
-            video_id = self._search_regex(r'<canal:player videoId="(\d+)"', webpage, 'video id')
+            video_id = self._search_regex(
+                r'<canal:player[^>]+?videoId="(\d+)"', webpage, 'video id')
 
-        info_url = self._VIDEO_INFO_TEMPLATE % video_id
+        info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id)
         doc = self._download_xml(info_url, video_id, 'Downloading video XML')
 
         video_info = [video for video in doc if video.find('ID').text == video_id][0]
         media = video_info.find('MEDIA')
         infos = video_info.find('INFOS')
 
-        preferences = ['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD', 'HLS', 'HDS']
+        preference = qualities(['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD', 'HLS', 'HDS'])
 
-        formats = [
-            {
-                'url': fmt.text + '?hdcore=2.11.3' if fmt.tag == 'HDS' else fmt.text,
-                'format_id': fmt.tag,
-                'ext': 'mp4' if fmt.tag == 'HLS' else 'flv',
-                'preference': preferences.index(fmt.tag) if fmt.tag in preferences else -1,
-            } for fmt in media.find('VIDEOS') if fmt.text
-        ]
+        formats = []
+        for fmt in media.find('VIDEOS'):
+            format_url = fmt.text
+            if not format_url:
+                continue
+            format_id = fmt.tag
+            if format_id == 'HLS':
+                hls_formats = self._extract_m3u8_formats(format_url, video_id, 'flv')
+                for fmt in hls_formats:
+                    fmt['preference'] = preference(format_id)
+                formats.extend(hls_formats)
+            elif format_id == 'HDS':
+                hds_formats = self._extract_f4m_formats(format_url + '?hdcore=2.11.3', video_id)
+                for fmt in hds_formats:
+                    fmt['preference'] = preference(format_id)
+                formats.extend(hds_formats)
+            else:
+                formats.append({
+                    'url': format_url,
+                    'format_id': format_id,
+                    'preference': preference(format_id),
+                })
         self._sort_formats(formats)
 
         return {
@@ -69,4 +112,4 @@ class CanalplusIE(InfoExtractor):
             'like_count': int(infos.find('NB_LIKES').text),
             'comment_count': int(infos.find('NB_COMMENTS').text),
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 822f9a7be1e1c9df23ca0e8fc164a883f706cba1..e43756ec69b1d7f1872e45cc6901b41752ec6ef6 100644 (file)
@@ -25,7 +25,7 @@ class CBSIE(InfoExtractor):
     }, {
         'url': 'http://www.cbs.com/shows/liveonletterman/artist/221752/st-vincent/',
         'info_dict': {
-            'id': 'P9gjWjelt6iP',
+            'id': 'WWF_5KqY3PK1',
             'ext': 'flv',
             'title': 'Live on Letterman - St. Vincent',
             'description': 'Live On Letterman: St. Vincent in concert from New York\'s Ed Sullivan Theater on Tuesday, July 16, 2014.',
@@ -45,4 +45,4 @@ class CBSIE(InfoExtractor):
         real_id = self._search_regex(
             r"video\.settings\.pid\s*=\s*'([^']+)';",
             webpage, 'real video ID')
-        return self.url_result(u'theplatform:%s' % real_id)
+        return self.url_result('theplatform:%s' % real_id)
index 0bce7937f830c1bc8178b88f95d619693edf7ae3..7e47960ab08f3ffba7a1e596b34c2fbfc7fd6c59 100644 (file)
@@ -84,4 +84,4 @@ class CBSNewsIE(InfoExtractor):
             'thumbnail': thumbnail,
             'duration': duration,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 90a3dddb9b79a875c57942a8dcb58951aa973090..97feb6704075831fb8b5ef95a547428ecc57ec3f 100644 (file)
@@ -92,7 +92,7 @@ class CeskaTelevizeIE(InfoExtractor):
         req.add_header('Referer', url)
 
         playlist = self._download_xml(req, video_id)
-        
+
         formats = []
         for i in playlist.find('smilRoot/body'):
             if 'AD' not in i.attrib['id']:
index 4f000292b7c4273c40df11252852986df08f5e01..3dfc24f5ba447ea92858e89868ad3684caf3a6d2 100644 (file)
@@ -5,6 +5,7 @@ import re
 from .common import InfoExtractor
 from ..utils import ExtractorError
 
+
 class Channel9IE(InfoExtractor):
     '''
     Common extractor for channel9.msdn.com.
@@ -27,11 +28,11 @@ class Channel9IE(InfoExtractor):
                 'title': 'Developer Kick-Off Session: Stuff We Love',
                 'description': 'md5:c08d72240b7c87fcecafe2692f80e35f',
                 'duration': 4576,
-                'thumbnail': 'http://media.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
+                'thumbnail': 'http://video.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
                 'session_code': 'KOS002',
                 'session_day': 'Day 1',
                 'session_room': 'Arena 1A',
-                'session_speakers': [ 'Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen' ],
+                'session_speakers': ['Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen'],
             },
         },
         {
@@ -43,8 +44,8 @@ class Channel9IE(InfoExtractor):
                 'title': 'Self-service BI with Power BI - nuclear testing',
                 'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
                 'duration': 1540,
-                'thumbnail': 'http://media.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
-                'authors': [ 'Mike Wilmot' ],
+                'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
+                'authors': ['Mike Wilmot'],
             },
         }
     ]
@@ -83,7 +84,7 @@ class Channel9IE(InfoExtractor):
             'format_id': x.group('quality'),
             'format_note': x.group('note'),
             'format': '%s (%s)' % (x.group('quality'), x.group('note')),
-            'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
+            'filesize': self._restore_bytes(x.group('filesize')),  # File size is approximate
             'preference': self._known_formats.index(x.group('quality')),
             'vcodec': 'none' if x.group('note') == 'Audio only' else None,
         } for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
@@ -94,7 +95,7 @@ class Channel9IE(InfoExtractor):
 
     def _extract_title(self, html):
         title = self._html_search_meta('title', html, 'title')
-        if title is None:           
+        if title is None:
             title = self._og_search_title(html)
             TITLE_SUFFIX = ' (Channel 9)'
             if title is not None and title.endswith(TITLE_SUFFIX):
@@ -115,7 +116,7 @@ class Channel9IE(InfoExtractor):
         return self._html_search_meta('description', html, 'description')
 
     def _extract_duration(self, html):
-        m = re.search(r'data-video_duration="(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
+        m = re.search(r'"length": *"(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
         return ((int(m.group('hours')) * 60 * 60) + (int(m.group('minutes')) * 60) + int(m.group('seconds'))) if m else None
 
     def _extract_slides(self, html):
@@ -167,7 +168,7 @@ class Channel9IE(InfoExtractor):
         return re.findall(r'<a href="/Events/Speakers/[^"]+">([^<]+)</a>', html)
 
     def _extract_content(self, html, content_path):
-        # Look for downloadable content        
+        # Look for downloadable content
         formats = self._formats_from_html(html)
         slides = self._extract_slides(html)
         zip_ = self._extract_zip(html)
@@ -187,32 +188,33 @@ class Channel9IE(InfoExtractor):
         view_count = self._extract_view_count(html)
         comment_count = self._extract_comment_count(html)
 
-        common = {'_type': 'video',
-                  'id': content_path,
-                  'description': description,
-                  'thumbnail': thumbnail,
-                  'duration': duration,
-                  'avg_rating': avg_rating,
-                  'rating_count': rating_count,
-                  'view_count': view_count,
-                  'comment_count': comment_count,
-                }
+        common = {
+            '_type': 'video',
+            'id': content_path,
+            'description': description,
+            'thumbnail': thumbnail,
+            'duration': duration,
+            'avg_rating': avg_rating,
+            'rating_count': rating_count,
+            'view_count': view_count,
+            'comment_count': comment_count,
+        }
 
         result = []
 
         if slides is not None:
             d = common.copy()
-            d.update({ 'title': title + '-Slides', 'url': slides })
+            d.update({'title': title + '-Slides', 'url': slides})
             result.append(d)
 
         if zip_ is not None:
             d = common.copy()
-            d.update({ 'title': title + '-Zip', 'url': zip_ })
+            d.update({'title': title + '-Zip', 'url': zip_})
             result.append(d)
 
         if len(formats) > 0:
             d = common.copy()
-            d.update({ 'title': title, 'formats': formats })
+            d.update({'title': title, 'formats': formats})
             result.append(d)
 
         return result
@@ -234,16 +236,17 @@ class Channel9IE(InfoExtractor):
         if contents is None:
             return contents
 
-        session_meta = {'session_code': self._extract_session_code(html),
-                        'session_day': self._extract_session_day(html),
-                        'session_room': self._extract_session_room(html),
-                        'session_speakers': self._extract_session_speakers(html),
-                        }
+        session_meta = {
+            'session_code': self._extract_session_code(html),
+            'session_day': self._extract_session_day(html),
+            'session_room': self._extract_session_room(html),
+            'session_speakers': self._extract_session_speakers(html),
+        }
 
         for content in contents:
             content.update(session_meta)
 
-        return contents
+        return self.playlist_result(contents)
 
     def _extract_list(self, content_path):
         rss = self._download_xml(self._RSS_URL % content_path, content_path, 'Downloading RSS')
@@ -258,16 +261,17 @@ class Channel9IE(InfoExtractor):
 
         webpage = self._download_webpage(url, content_path, 'Downloading web page')
 
-        page_type_m = re.search(r'<meta name="Search.PageType" content="(?P<pagetype>[^"]+)"/>', webpage)
-        if page_type_m is None:
-            raise ExtractorError('Search.PageType not found, don\'t know how to process this page', expected=True)
-
-        page_type = page_type_m.group('pagetype')
-        if page_type == 'List':         # List page, may contain list of 'item'-like objects
+        page_type_m = re.search(r'<meta name="WT.entryid" content="(?P<pagetype>[^:]+)[^"]+"/>', webpage)
+        if page_type_m is not None:
+            page_type = page_type_m.group('pagetype')
+            if page_type == 'Entry':      # Any 'item'-like page, may contain downloadable content
+                return self._extract_entry_item(webpage, content_path)
+            elif page_type == 'Session':  # Event session page, may contain downloadable content
+                return self._extract_session(webpage, content_path)
+            elif page_type == 'Event':
+                return self._extract_list(content_path)
+            else:
+                raise ExtractorError('Unexpected WT.entryid %s' % page_type, expected=True)
+
+        else:  # Assuming list
             return self._extract_list(content_path)
-        elif page_type == 'Entry.Item': # Any 'item'-like page, may contain downloadable content
-            return self._extract_entry_item(webpage, content_path)
-        elif page_type == 'Session':    # Event session page, may contain downloadable content
-            return self._extract_session(webpage, content_path)
-        else:
-            raise ExtractorError('Unexpected Search.PageType %s' % page_type, expected=True)
\ No newline at end of file
index a62395d4b727ce917f1ea946b63940b3f52b6bdd..c922f695905d70e4052ddfa5c8f336c01221413b 100644 (file)
@@ -42,7 +42,7 @@ class ChilloutzoneIE(InfoExtractor):
             'id': '85523671',
             'ext': 'mp4',
             'title': 'The Sunday Times - Icons',
-            'description': 'md5:a5f7ff82e2f7a9ed77473fe666954e84',
+            'description': 're:(?s)^Watch the making of - makingoficons.com.{300,}',
             'uploader': 'Us',
             'uploader_id': 'usfilms',
             'upload_date': '20140131'
diff --git a/youtube_dl/extractor/cinchcast.py b/youtube_dl/extractor/cinchcast.py
new file mode 100644 (file)
index 0000000..0c9a24b
--- /dev/null
@@ -0,0 +1,52 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+    unified_strdate,
+    xpath_text,
+)
+
+
+class CinchcastIE(InfoExtractor):
+    _VALID_URL = r'https?://player\.cinchcast\.com/.*?assetId=(?P<id>[0-9]+)'
+    _TEST = {
+        # Actual test is run in generic, look for undergroundwellness
+        'url': 'http://player.cinchcast.com/?platformId=1&#038;assetType=single&#038;assetId=7141703',
+        'only_matching': True,
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        doc = self._download_xml(
+            'http://www.blogtalkradio.com/playerasset/mrss?assetType=single&assetId=%s' % video_id,
+            video_id)
+
+        item = doc.find('.//item')
+        title = xpath_text(item, './title', fatal=True)
+        date_str = xpath_text(
+            item, './{http://developer.longtailvideo.com/trac/}date')
+        upload_date = unified_strdate(date_str, day_first=False)
+        # duration is present but wrong
+        formats = []
+        formats.append({
+            'format_id': 'main',
+            'url': item.find(
+                './{http://search.yahoo.com/mrss/}content').attrib['url'],
+        })
+        backup_url = xpath_text(
+            item, './{http://developer.longtailvideo.com/trac/}backupContent')
+        if backup_url:
+            formats.append({
+                'preference': 2,  # seems to be more reliable
+                'format_id': 'backup',
+                'url': backup_url,
+            })
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'upload_date': upload_date,
+            'formats': formats,
+        }
diff --git a/youtube_dl/extractor/cinemassacre.py b/youtube_dl/extractor/cinemassacre.py
deleted file mode 100644 (file)
index 496271b..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-# encoding: utf-8
-from __future__ import unicode_literals
-
-import re
-
-from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
-    int_or_none,
-)
-
-
-class CinemassacreIE(InfoExtractor):
-    _VALID_URL = r'http://(?:www\.)?cinemassacre\.com/(?P<date_Y>[0-9]{4})/(?P<date_m>[0-9]{2})/(?P<date_d>[0-9]{2})/(?P<display_id>[^?#/]+)'
-    _TESTS = [
-        {
-            'url': 'http://cinemassacre.com/2012/11/10/avgn-the-movie-trailer/',
-            'md5': 'fde81fbafaee331785f58cd6c0d46190',
-            'info_dict': {
-                'id': '19911',
-                'ext': 'mp4',
-                'upload_date': '20121110',
-                'title': '“Angry Video Game Nerd: The Movie” – Trailer',
-                'description': 'md5:fb87405fcb42a331742a0dce2708560b',
-            },
-        },
-        {
-            'url': 'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
-            'md5': 'd72f10cd39eac4215048f62ab477a511',
-            'info_dict': {
-                'id': '521be8ef82b16',
-                'ext': 'mp4',
-                'upload_date': '20131002',
-                'title': 'The Mummy’s Hand (1940)',
-            },
-        }
-    ]
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        display_id = mobj.group('display_id')
-
-        webpage = self._download_webpage(url, display_id)
-        video_date = mobj.group('date_Y') + mobj.group('date_m') + mobj.group('date_d')
-        mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?id=(?:Cinemassacre-)?(?P<video_id>.+?))"', webpage)
-        if not mobj:
-            raise ExtractorError('Can\'t extract embed url and video id')
-        playerdata_url = mobj.group('embed_url')
-        video_id = mobj.group('video_id')
-
-        video_title = self._html_search_regex(
-            r'<title>(?P<title>.+?)\|', webpage, 'title')
-        video_description = self._html_search_regex(
-            r'<div class="entry-content">(?P<description>.+?)</div>',
-            webpage, 'description', flags=re.DOTALL, fatal=False)
-
-        playerdata = self._download_webpage(playerdata_url, video_id, 'Downloading player webpage')
-        video_thumbnail = self._search_regex(
-            r'image: \'(?P<thumbnail>[^\']+)\'', playerdata, 'thumbnail', fatal=False)
-        sd_url = self._search_regex(r'file: \'([^\']+)\', label: \'SD\'', playerdata, 'sd_file')
-        videolist_url = self._search_regex(r'file: \'([^\']+\.smil)\'}', playerdata, 'videolist_url')
-
-        videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
-
-        formats = []
-        baseurl = sd_url[:sd_url.rfind('/')+1]
-        for video in videolist.findall('.//video'):
-            src = video.get('src')
-            if not src:
-                continue
-            file_ = src.partition(':')[-1]
-            width = int_or_none(video.get('width'))
-            height = int_or_none(video.get('height'))
-            bitrate = int_or_none(video.get('system-bitrate'))
-            format = {
-                'url': baseurl + file_,
-                'format_id': src.rpartition('.')[0].rpartition('_')[-1],
-            }
-            if width or height:
-                format.update({
-                    'tbr': bitrate // 1000 if bitrate else None,
-                    'width': width,
-                    'height': height,
-                })
-            else:
-                format.update({
-                    'abr': bitrate // 1000 if bitrate else None,
-                    'vcodec': 'none',
-                })
-            formats.append(format)
-        self._sort_formats(formats)
-
-        return {
-            'id': video_id,
-            'title': video_title,
-            'formats': formats,
-            'description': video_description,
-            'upload_date': video_date,
-            'thumbnail': video_thumbnail,
-        }
index 669919a2cc9039ffb91ae052b96d5531665341e0..a5c3cb7c6253776062fb5834d0fe4c121dfb9c99 100644 (file)
@@ -24,7 +24,7 @@ class ClipfishIE(InfoExtractor):
             'title': 'FIFA 14 - E3 2013 Trailer',
             'duration': 82,
         },
-        u'skip': 'Blocked in the US'
+        'skip': 'Blocked in the US'
     }
 
     def _real_extract(self, url):
@@ -34,7 +34,7 @@ class ClipfishIE(InfoExtractor):
         info_url = ('http://www.clipfish.de/devxml/videoinfo/%s?ts=%d' %
                     (video_id, int(time.time())))
         doc = self._download_xml(
-            info_url, video_id, note=u'Downloading info page')
+            info_url, video_id, note='Downloading info page')
         title = doc.find('title').text
         video_url = doc.find('filename').text
         if video_url is None:
index 58846e8e7cfacc631015ce32ace23cbd9b8a2576..2edab90a33d553225b8c790b8d391f0e40b55cf8 100644 (file)
@@ -1,11 +1,12 @@
 from __future__ import unicode_literals
 
+import json
 import re
 
 from .common import InfoExtractor
 
 
-translation_table = {
+_translation_table = {
     'a': 'h', 'd': 'e', 'e': 'v', 'f': 'o', 'g': 'f', 'i': 'd', 'l': 'n',
     'm': 'a', 'n': 'm', 'p': 'u', 'q': 't', 'r': 's', 'v': 'p', 'x': 'r',
     'y': 'l', 'z': 'i',
@@ -13,6 +14,10 @@ translation_table = {
 }
 
 
+def _decode(s):
+    return ''.join(_translation_table.get(c, c) for c in s)
+
+
 class CliphunterIE(InfoExtractor):
     IE_NAME = 'cliphunter'
 
@@ -22,35 +27,66 @@ class CliphunterIE(InfoExtractor):
     '''
     _TEST = {
         'url': 'http://www.cliphunter.com/w/1012420/Fun_Jynx_Maze_solo',
-        'file': '1012420.flv',
-        'md5': '15e7740f30428abf70f4223478dc1225',
+        'md5': 'a2ba71eebf523859fe527a61018f723e',
         'info_dict': {
+            'id': '1012420',
+            'ext': 'mp4',
             'title': 'Fun Jynx Maze solo',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'age_limit': 18,
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
+        video_title = self._search_regex(
+            r'mediaTitle = "([^"]+)"', webpage, 'title')
+
         pl_fiji = self._search_regex(
             r'pl_fiji = \'([^\']+)\'', webpage, 'video data')
         pl_c_qual = self._search_regex(
             r'pl_c_qual = "(.)"', webpage, 'video quality')
-        video_title = self._search_regex(
-            r'mediaTitle = "([^"]+)"', webpage, 'title')
-
-        video_url = ''.join(translation_table.get(c, c) for c in pl_fiji)
-
+        video_url = _decode(pl_fiji)
         formats = [{
             'url': video_url,
-            'format_id': pl_c_qual,
+            'format_id': 'default-%s' % pl_c_qual,
         }]
 
+        qualities_json = self._search_regex(
+            r'var pl_qualities\s*=\s*(.*?);\n', webpage, 'quality info')
+        qualities_data = json.loads(qualities_json)
+
+        for i, t in enumerate(
+                re.findall(r"pl_fiji_([a-z0-9]+)\s*=\s*'([^']+')", webpage)):
+            quality_id, crypted_url = t
+            video_url = _decode(crypted_url)
+            f = {
+                'format_id': quality_id,
+                'url': video_url,
+                'quality': i,
+            }
+            if quality_id in qualities_data:
+                qd = qualities_data[quality_id]
+                m = re.match(
+                    r'''(?x)<b>(?P<width>[0-9]+)x(?P<height>[0-9]+)<\\/b>
+                        \s*\(\s*(?P<tbr>[0-9]+)\s*kb\\/s''', qd)
+                if m:
+                    f['width'] = int(m.group('width'))
+                    f['height'] = int(m.group('height'))
+                    f['tbr'] = int(m.group('tbr'))
+            formats.append(f)
+        self._sort_formats(formats)
+
+        thumbnail = self._search_regex(
+            r"var\s+mov_thumb\s*=\s*'([^']+)';",
+            webpage, 'thumbnail', fatal=False)
+
         return {
             'id': video_id,
             'title': video_title,
             'formats': formats,
+            'age_limit': self._rta_search(webpage),
+            'thumbnail': thumbnail,
         }
index 02a1667fa3fbf7cbe1a822db7b82f9c087864249..d07d544eaf7742bb782a8b367a09561f14c44a4e 100644 (file)
@@ -39,6 +39,7 @@ class ClipsyndicateIE(InfoExtractor):
             transform_source=fix_xml_ampersands)
 
         track_doc = pdoc.find('trackList/track')
+
         def find_param(name):
             node = find_xpath_attr(track_doc, './/param', 'name', name)
             if node is not None:
diff --git a/youtube_dl/extractor/cloudy.py b/youtube_dl/extractor/cloudy.py
new file mode 100644 (file)
index 0000000..abf8cc2
--- /dev/null
@@ -0,0 +1,110 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..compat import (
+    compat_parse_qs,
+    compat_urllib_parse,
+    compat_HTTPError,
+)
+from ..utils import (
+    ExtractorError,
+    HEADRequest,
+    remove_end,
+)
+
+
+class CloudyIE(InfoExtractor):
+    _IE_DESC = 'cloudy.ec and videoraj.ch'
+    _VALID_URL = r'''(?x)
+        https?://(?:www\.)?(?P<host>cloudy\.ec|videoraj\.ch)/
+        (?:v/|embed\.php\?id=)
+        (?P<id>[A-Za-z0-9]+)
+        '''
+    _EMBED_URL = 'http://www.%s/embed.php?id=%s'
+    _API_URL = 'http://www.%s/api/player.api.php?%s'
+    _MAX_TRIES = 2
+    _TESTS = [
+        {
+            'url': 'https://www.cloudy.ec/v/af511e2527aac',
+            'md5': '5cb253ace826a42f35b4740539bedf07',
+            'info_dict': {
+                'id': 'af511e2527aac',
+                'ext': 'flv',
+                'title': 'Funny Cats and Animals Compilation june 2013',
+            }
+        },
+        {
+            'url': 'http://www.videoraj.ch/v/47f399fd8bb60',
+            'md5': '7d0f8799d91efd4eda26587421c3c3b0',
+            'info_dict': {
+                'id': '47f399fd8bb60',
+                'ext': 'flv',
+                'title': 'Burning a New iPhone 5 with Gasoline - Will it Survive?',
+            }
+        }
+    ]
+
+    def _extract_video(self, video_host, video_id, file_key, error_url=None, try_num=0):
+
+        if try_num > self._MAX_TRIES - 1:
+            raise ExtractorError('Unable to extract video URL', expected=True)
+
+        form = {
+            'file': video_id,
+            'key': file_key,
+        }
+
+        if error_url:
+            form.update({
+                'numOfErrors': try_num,
+                'errorCode': '404',
+                'errorUrl': error_url,
+            })
+
+        data_url = self._API_URL % (video_host, compat_urllib_parse.urlencode(form))
+        player_data = self._download_webpage(
+            data_url, video_id, 'Downloading player data')
+        data = compat_parse_qs(player_data)
+
+        try_num += 1
+
+        if 'error' in data:
+            raise ExtractorError(
+                '%s error: %s' % (self.IE_NAME, ' '.join(data['error_msg'])),
+                expected=True)
+
+        title = data.get('title', [None])[0]
+        if title:
+            title = remove_end(title, '&asdasdas').strip()
+
+        video_url = data.get('url', [None])[0]
+
+        if video_url:
+            try:
+                self._request_webpage(HEADRequest(video_url), video_id, 'Checking video URL')
+            except ExtractorError as e:
+                if isinstance(e.cause, compat_HTTPError) and e.cause.code in [404, 410]:
+                    self.report_warning('Invalid video URL, requesting another', video_id)
+                    return self._extract_video(video_host, video_id, file_key, video_url, try_num)
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+        }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_host = mobj.group('host')
+        video_id = mobj.group('id')
+
+        url = self._EMBED_URL % (video_host, video_id)
+        webpage = self._download_webpage(url, video_id)
+
+        file_key = self._search_regex(
+            r'filekey\s*=\s*"([^"]+)"', webpage, 'file_key')
+
+        return self._extract_video(video_host, video_id, file_key)
index 710d5009b71aafe0da901771048b8c0ba68def04..3145b30514ea2a075f92077b9f87b64c9e8820a7 100644 (file)
@@ -2,12 +2,10 @@
 from __future__ import unicode_literals
 
 import json
-import re
 
 from .common import InfoExtractor
 from ..utils import (
     ExtractorError,
-    int_or_none,
 )
 
 
@@ -15,23 +13,24 @@ class CNETIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?cnet\.com/videos/(?P<id>[^/]+)/'
     _TEST = {
         'url': 'http://www.cnet.com/videos/hands-on-with-microsofts-windows-8-1-update/',
-        'md5': '041233212a0d06b179c87cbcca1577b8',
         'info_dict': {
             'id': '56f4ea68-bd21-4852-b08c-4de5b8354c60',
-            'ext': 'mp4',
+            'ext': 'flv',
             'title': 'Hands-on with Microsoft Windows 8.1 Update',
             'description': 'The new update to the Windows 8 OS brings improved performance for mouse and keyboard users.',
             'thumbnail': 're:^http://.*/flmswindows8.jpg$',
-            'uploader_id': 'sarah.mitroff@cbsinteractive.com',
+            'uploader_id': '6085384d-619e-11e3-b231-14feb5ca9861',
             'uploader': 'Sarah Mitroff',
+        },
+        'params': {
+            'skip_download': 'requires rtmpdump',
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        display_id = mobj.group('id')
-
+        display_id = self._match_id(url)
         webpage = self._download_webpage(url, display_id)
+
         data_json = self._html_search_regex(
             r"<div class=\"cnetVideoPlayer\"\s+.*?data-cnet-video-options='([^']+)'",
             webpage, 'data json')
@@ -42,37 +41,31 @@ class CNETIE(InfoExtractor):
         if not vdata:
             raise ExtractorError('Cannot find video data')
 
+        mpx_account = data['config']['players']['default']['mpx_account']
+        vid = vdata['files']['rtmp']
+        tp_link = 'http://link.theplatform.com/s/%s/%s' % (mpx_account, vid)
+
         video_id = vdata['id']
         title = vdata.get('headline')
         if title is None:
             title = vdata.get('title')
         if title is None:
             raise ExtractorError('Cannot find title!')
-        description = vdata.get('dek')
         thumbnail = vdata.get('image', {}).get('path')
         author = vdata.get('author')
         if author:
             uploader = '%s %s' % (author['firstName'], author['lastName'])
-            uploader_id = author.get('email')
+            uploader_id = author.get('id')
         else:
             uploader = None
             uploader_id = None
 
-        formats = [{
-            'format_id': '%s-%s-%s' % (
-                f['type'], f['format'],
-                int_or_none(f.get('bitrate'), 1000, default='')),
-            'url': f['uri'],
-            'tbr': int_or_none(f.get('bitrate'), 1000),
-        } for f in vdata['files']['data']]
-        self._sort_formats(formats)
-
         return {
+            '_type': 'url_transparent',
+            'url': tp_link,
             'id': video_id,
             'display_id': display_id,
             'title': title,
-            'formats': formats,
-            'description': description,
             'uploader': uploader,
             'uploader_id': uploader_id,
             'thumbnail': thumbnail,
index dae40c136bae20fd54cae401e711b9233c750e14..81142ee419d45b9df9f75bdc152ab87e1317650f 100644 (file)
@@ -12,24 +12,25 @@ from ..utils import (
 
 class CNNIE(InfoExtractor):
     _VALID_URL = r'''(?x)https?://((edition|www)\.)?cnn\.com/video/(data/.+?|\?)/
-        (?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn|(?=&)))'''
+        (?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn(-ap)?|(?=&)))'''
 
     _TESTS = [{
         'url': 'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
-        'file': 'sports_2013_06_09_nadal-1-on-1.cnn.mp4',
         'md5': '3e6121ea48df7e2259fe73a0628605c4',
         'info_dict': {
+            'id': 'sports_2013_06_09_nadal-1-on-1.cnn',
+            'ext': 'mp4',
             'title': 'Nadal wins 8th French Open title',
             'description': 'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.',
             'duration': 135,
             'upload_date': '20130609',
         },
-    },
-    {
+    }, {
         "url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
-        "file": "us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4",
         "md5": "b5cc60c60a3477d185af8f19a2a26f4e",
         "info_dict": {
+            'id': 'us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology',
+            'ext': 'mp4',
             "title": "Student's epic speech stuns new freshmen",
             "description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
             "upload_date": "20130821",
index 6f866e7fcee7f401362b24d69db2285e22cfa6a4..002b240378299bf18000b2ee92c76ae062115126 100644 (file)
@@ -10,47 +10,46 @@ from ..utils import int_or_none
 class CollegeHumorIE(InfoExtractor):
     _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P<videoid>[0-9]+)/?(?P<shorttitle>.*)$'
 
-    _TESTS = [{
-        'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
-        'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd',
-        'info_dict': {
-            'id': '6902724',
-            'ext': 'mp4',
-            'title': 'Comic-Con Cosplay Catastrophe',
-            'description': "Fans get creative this year at San Diego.  Too creative.  And yes, that's really Joss Whedon.",
-            'age_limit': 13,
-            'duration': 187,
+    _TESTS = [
+        {
+            'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
+            'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd',
+            'info_dict': {
+                'id': '6902724',
+                'ext': 'mp4',
+                'title': 'Comic-Con Cosplay Catastrophe',
+                'description': "Fans get creative this year at San Diego.  Too creative.  And yes, that's really Joss Whedon.",
+                'age_limit': 13,
+                'duration': 187,
+            },
+        }, {
+            'url': 'http://www.collegehumor.com/video/3505939/font-conference',
+            'md5': '72fa701d8ef38664a4dbb9e2ab721816',
+            'info_dict': {
+                'id': '3505939',
+                'ext': 'mp4',
+                'title': 'Font Conference',
+                'description': "This video wasn't long enough, so we made it double-spaced.",
+                'age_limit': 10,
+                'duration': 179,
+            },
+        }, {
+            # embedded youtube video
+            'url': 'http://www.collegehumor.com/embed/6950306',
+            'info_dict': {
+                'id': 'Z-bao9fg6Yc',
+                'ext': 'mp4',
+                'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!',
+                'uploader': 'Mark Dice',
+                'uploader_id': 'MarkDice',
+                'description': 'md5:62c3dab9351fac7bb44b53b69511d87f',
+                'upload_date': '20140127',
+            },
+            'params': {
+                'skip_download': True,
+            },
+            'add_ie': ['Youtube'],
         },
-    },
-    {
-        'url': 'http://www.collegehumor.com/video/3505939/font-conference',
-        'md5': '72fa701d8ef38664a4dbb9e2ab721816',
-        'info_dict': {
-            'id': '3505939',
-            'ext': 'mp4',
-            'title': 'Font Conference',
-            'description': "This video wasn't long enough, so we made it double-spaced.",
-            'age_limit': 10,
-            'duration': 179,
-        },
-    },
-    # embedded youtube video
-    {
-        'url': 'http://www.collegehumor.com/embed/6950306',
-        'info_dict': {
-            'id': 'Z-bao9fg6Yc',
-            'ext': 'mp4',
-            'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!',
-            'uploader': 'Mark Dice',
-            'uploader_id': 'MarkDice',
-            'description': 'md5:62c3dab9351fac7bb44b53b69511d87f',
-            'upload_date': '20140127',
-        },
-        'params': {
-            'skip_download': True,
-        },
-        'add_ie': ['Youtube'],
-    },
     ]
 
     def _real_extract(self, url):
index c81ce5a96f03b539d2f5e98975218fcdd0ed861d..2e3ef3fdab4c25f2818f46a820702056ceb3f294 100644 (file)
@@ -2,7 +2,6 @@ from __future__ import unicode_literals
 
 import re
 
-from .common import InfoExtractor
 from .mtv import MTVServicesInfoExtractor
 from ..utils import (
     compat_str,
@@ -31,7 +30,7 @@ class ComedyCentralIE(MTVServicesInfoExtractor):
     }
 
 
-class ComedyCentralShowsIE(InfoExtractor):
+class ComedyCentralShowsIE(MTVServicesInfoExtractor):
     IE_DESC = 'The Daily Show / The Colbert Report'
     # urls can be abbreviations like :thedailyshow or :colbert
     # urls for episodes like:
@@ -43,14 +42,14 @@ class ComedyCentralShowsIE(InfoExtractor):
                           (?P<showname>thedailyshow|thecolbertreport)\.(?:cc\.)?com/
                          ((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)|
                           (?P<clip>
-                              (?:(?:guests/[^/]+|videos|video-playlists|special-editions)/[^/]+/(?P<videotitle>[^/?#]+))
+                              (?:(?:guests/[^/]+|videos|video-playlists|special-editions|news-team/[^/]+)/[^/]+/(?P<videotitle>[^/?#]+))
                               |(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
                               |(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))
                           )|
                           (?P<interview>
                               extended-interviews/(?P<interID>[0-9a-z]+)/(?:playlist_tds_extended_)?(?P<interview_title>.*?)(/.*?)?)))
                      (?:[?#].*|$)'''
-    _TEST = {
+    _TESTS = [{
         'url': 'http://thedailyshow.cc.com/watch/thu-december-13-2012/kristen-stewart',
         'md5': '4e2f5cb088a83cd8cdb7756132f9739d',
         'info_dict': {
@@ -61,7 +60,34 @@ class ComedyCentralShowsIE(InfoExtractor):
             'uploader': 'thedailyshow',
             'title': 'thedailyshow kristen-stewart part 1',
         }
-    }
+    }, {
+        'url': 'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
+        'only_matching': True,
+    }, {
+        'url': 'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
+        'only_matching': True,
+    }, {
+        'url': 'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
+        'only_matching': True,
+    }, {
+        'url': 'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
+        'only_matching': True,
+    }, {
+        'url': 'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
+        'only_matching': True,
+    }, {
+        'url': 'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
+        'only_matching': True,
+    }, {
+        'url': 'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
+        'only_matching': True,
+    }, {
+        'url': 'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
+        'only_matching': True,
+    }, {
+        'url': 'http://thedailyshow.cc.com/news-team/michael-che/7wnfel/we-need-to-talk-about-israel',
+        'only_matching': True,
+    }]
 
     _available_formats = ['3500', '2200', '1700', '1200', '750', '400']
 
@@ -82,18 +108,8 @@ class ComedyCentralShowsIE(InfoExtractor):
         '400': (384, 216),
     }
 
-    @staticmethod
-    def _transform_rtmp_url(rtmp_video_url):
-        m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\.comedystor/.*)$', rtmp_video_url)
-        if not m:
-            raise ExtractorError('Cannot transform RTMP url')
-        base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
-        return base + m.group('finalid')
-
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url, re.VERBOSE)
-        if mobj is None:
-            raise ExtractorError('Invalid URL: %s' % url)
+        mobj = re.match(self._VALID_URL, url)
 
         if mobj.group('shortname'):
             if mobj.group('shortname') in ('tds', 'thedailyshow'):
index 342bfb8b3b53bcb76951613002090be8737bbe29..d302fe45fdea0bc7556fdbda4f321d64d86c2c7c 100644 (file)
@@ -1,4 +1,7 @@
+from __future__ import unicode_literals
+
 import base64
+import datetime
 import hashlib
 import json
 import netrc
@@ -9,15 +12,19 @@ import sys
 import time
 import xml.etree.ElementTree
 
-from ..utils import (
+from ..compat import (
+    compat_cookiejar,
     compat_http_client,
     compat_urllib_error,
     compat_urllib_parse_urlparse,
+    compat_urlparse,
     compat_str,
-
+)
+from ..utils import (
     clean_html,
     compiled_regex_type,
     ExtractorError,
+    float_or_none,
     int_or_none,
     RegexNotFoundError,
     sanitize_filename,
@@ -37,7 +44,11 @@ class InfoExtractor(object):
     information possibly downloading the video to the file system, among
     other possible outcomes.
 
-    The dictionaries must include the following fields:
+    The type field determines the the type of the result.
+    By far the most common value (and the default if _type is missing) is
+    "video", which indicates a single video.
+
+    For a video, the dictionaries must include the following fields:
 
     id:             Video identifier.
     title:          Video title, unescaped.
@@ -67,6 +78,7 @@ class InfoExtractor(object):
                     * acodec     Name of the audio codec in use
                     * asr        Audio sampling rate in Hertz
                     * vbr        Average video bitrate in KBit/s
+                    * fps        Frame rate
                     * vcodec     Name of the video codec in use
                     * container  Name of the container format
                     * filesize   The number of bytes, if known in advance
@@ -80,10 +92,25 @@ class InfoExtractor(object):
                                  by this field, regardless of all other values.
                                  -1 for default (order by other properties),
                                  -2 or smaller for less than default.
+                    * language_preference  Is this in the correct requested
+                                 language?
+                                 10 if it's what the URL is about,
+                                 -1 for default (don't know),
+                                 -10 otherwise, other values reserved for now.
                     * quality    Order number of the video quality of this
                                  format, irrespective of the file format.
                                  -1 for default (order by other properties),
                                  -2 or smaller for less than default.
+                    * source_preference  Order number for this video source
+                                  (quality takes higher priority)
+                                 -1 for default (order by other properties),
+                                 -2 or smaller for less than default.
+                    * http_referer  HTTP Referer header value to set.
+                    * http_method  HTTP method to use for the download.
+                    * http_headers  A dictionary of additional HTTP headers
+                                 to add to the request.
+                    * http_post_data  Additional data to send with a POST
+                                 request.
     url:            Final video URL.
     ext:            Video filename extension.
     format:         The video format, defaults to ext (used for --get-format)
@@ -91,6 +118,7 @@ class InfoExtractor(object):
 
     The following fields are optional:
 
+    alt_title:      A secondary title of the video.
     display_id      An alternative identifier for the video, not necessarily
                     unique, but available before title. Typically, id is
                     something like "4234987", title "Dancing naked mole rats",
@@ -102,13 +130,13 @@ class InfoExtractor(object):
                         * "resolution" (optional, string "{width}x{height"},
                                         deprecated)
     thumbnail:      Full URL to a video thumbnail image.
-    description:    One-line video description.
+    description:    Full video description.
     uploader:       Full name of the video uploader.
     timestamp:      UNIX timestamp of the moment the video became available.
     upload_date:    Video upload date (YYYYMMDD).
                     If not explicitly set, calculated from timestamp.
     uploader_id:    Nickname or id of the video uploader.
-    location:       Physical location of the video.
+    location:       Physical location where the video was filmed.
     subtitles:      The subtitle file contents as a dictionary in the format
                     {language: subtitles}.
     duration:       Length of the video in seconds, as an integer.
@@ -122,9 +150,46 @@ class InfoExtractor(object):
                     by YoutubeDL if it's missing)
     categories:     A list of categories that the video falls in, for example
                     ["Sports", "Berlin"]
+    is_live:        True, False, or None (=unknown). Whether this video is a
+                    live stream that goes on instead of a fixed-length video.
 
     Unless mentioned otherwise, the fields should be Unicode strings.
 
+    Unless mentioned otherwise, None is equivalent to absence of information.
+
+
+    _type "playlist" indicates multiple videos.
+    There must be a key "entries", which is a list, an iterable, or a PagedList
+    object, each element of which is a valid dictionary by this specification.
+
+    Additionally, playlists can have "title" and "id" attributes with the same
+    semantics as videos (see above).
+
+
+    _type "multi_video" indicates that there are multiple videos that
+    form a single show, for examples multiple acts of an opera or TV episode.
+    It must have an entries key like a playlist and contain all the keys
+    required for a video at the same time.
+
+
+    _type "url" indicates that the video must be extracted from another
+    location, possibly by a different extractor. Its only required key is:
+    "url" - the next URL to extract.
+    The key "ie_key" can be set to the class name (minus the trailing "IE",
+    e.g. "Youtube") if the extractor class is known in advance.
+    Additionally, the dictionary may have any properties of the resolved entity
+    known in advance, for example "title" if the title of the referred video is
+    known ahead of time.
+
+
+    _type "url_transparent" entities have the same specification as "url", but
+    indicate that the given additional information is more precise than the one
+    associated with the resolved URL.
+    This is useful when a site employs a video service that hosts the video and
+    its technical metadata, but that video service does not embed a useful
+    title, description etc.
+
+
     Subclasses of this one should re-define the _real_initialize() and
     _real_extract() methods and define a _VALID_URL regexp.
     Probably, they should also be added to the list of extractors.
@@ -153,6 +218,14 @@ class InfoExtractor(object):
             cls._VALID_URL_RE = re.compile(cls._VALID_URL)
         return cls._VALID_URL_RE.match(url) is not None
 
+    @classmethod
+    def _match_id(cls, url):
+        if '_VALID_URL_RE' not in cls.__dict__:
+            cls._VALID_URL_RE = re.compile(cls._VALID_URL)
+        m = cls._VALID_URL_RE.match(url)
+        assert m
+        return m.group('id')
+
     @classmethod
     def working(cls):
         """Getter method for _WORKING."""
@@ -196,17 +269,17 @@ class InfoExtractor(object):
             self.report_download_webpage(video_id)
         elif note is not False:
             if video_id is None:
-                self.to_screen(u'%s' % (note,))
+                self.to_screen('%s' % (note,))
             else:
-                self.to_screen(u'%s: %s' % (video_id, note))
+                self.to_screen('%s: %s' % (video_id, note))
         try:
             return self._downloader.urlopen(url_or_request)
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
             if errnote is False:
                 return False
             if errnote is None:
-                errnote = u'Unable to download webpage'
-            errmsg = u'%s: %s' % (errnote, compat_str(err))
+                errnote = 'Unable to download webpage'
+            errmsg = '%s: %s' % (errnote, compat_str(err))
             if fatal:
                 raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
             else:
@@ -215,7 +288,6 @@ class InfoExtractor(object):
 
     def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
         """ Returns a tuple (page content as string, URL handle) """
-
         # Strip hashes from the URL (#1038)
         if isinstance(url_or_request, (compat_str, str)):
             url_or_request = url_or_request.partition('#')[0]
@@ -224,8 +296,14 @@ class InfoExtractor(object):
         if urlh is False:
             assert not fatal
             return False
+        content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal)
+        return (content, urlh)
+
+    def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None):
         content_type = urlh.headers.get('Content-Type', '')
         webpage_bytes = urlh.read()
+        if prefix is not None:
+            webpage_bytes = prefix + webpage_bytes
         m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
         if m:
             encoding = m.group(1)
@@ -243,7 +321,7 @@ class InfoExtractor(object):
                 url = url_or_request.get_full_url()
             except AttributeError:
                 url = url_or_request
-            self.to_screen(u'Dumping request to ' + url)
+            self.to_screen('Dumping request to ' + url)
             dump = base64.b64encode(webpage_bytes).decode('ascii')
             self._downloader.to_screen(dump)
         if self._downloader.params.get('write_pages', False):
@@ -253,11 +331,17 @@ class InfoExtractor(object):
                 url = url_or_request
             basen = '%s_%s' % (video_id, url)
             if len(basen) > 240:
-                h = u'___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
+                h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
                 basen = basen[:240 - len(h)] + h
             raw_filename = basen + '.dump'
             filename = sanitize_filename(raw_filename, restricted=True)
-            self.to_screen(u'Saving request to ' + filename)
+            self.to_screen('Saving request to ' + filename)
+            # Working around MAX_PATH limitation on Windows (see
+            # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
+            if os.name == 'nt':
+                absfilepath = os.path.abspath(filename)
+                if len(absfilepath) > 259:
+                    filename = '\\\\?\\' + absfilepath
             with open(filename, 'wb') as outf:
                 outf.write(webpage_bytes)
 
@@ -266,17 +350,17 @@ class InfoExtractor(object):
         except LookupError:
             content = webpage_bytes.decode('utf-8', 'replace')
 
-        if (u'<title>Access to this site is blocked</title>' in content and
-                u'Websense' in content[:512]):
-            msg = u'Access to this webpage has been blocked by Websense filtering software in your network.'
+        if ('<title>Access to this site is blocked</title>' in content and
+                'Websense' in content[:512]):
+            msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
             blocked_iframe = self._html_search_regex(
                 r'<iframe src="([^"]+)"', content,
-                u'Websense information URL', default=None)
+                'Websense information URL', default=None)
             if blocked_iframe:
-                msg += u' Visit %s for more details' % blocked_iframe
+                msg += ' Visit %s for more details' % blocked_iframe
             raise ExtractorError(msg, expected=True)
 
-        return (content, urlh)
+        return content
 
     def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
         """ Returns the data of the page as a string """
@@ -288,7 +372,7 @@ class InfoExtractor(object):
             return content
 
     def _download_xml(self, url_or_request, video_id,
-                      note=u'Downloading XML', errnote=u'Unable to download XML',
+                      note='Downloading XML', errnote='Unable to download XML',
                       transform_source=None, fatal=True):
         """Return the xml as an xml.etree.ElementTree.Element"""
         xml_string = self._download_webpage(
@@ -300,59 +384,68 @@ class InfoExtractor(object):
         return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
 
     def _download_json(self, url_or_request, video_id,
-                       note=u'Downloading JSON metadata',
-                       errnote=u'Unable to download JSON metadata',
+                       note='Downloading JSON metadata',
+                       errnote='Unable to download JSON metadata',
                        transform_source=None,
                        fatal=True):
         json_string = self._download_webpage(
             url_or_request, video_id, note, errnote, fatal=fatal)
         if (not fatal) and json_string is False:
             return None
+        return self._parse_json(
+            json_string, video_id, transform_source=transform_source, fatal=fatal)
+
+    def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
         if transform_source:
             json_string = transform_source(json_string)
         try:
             return json.loads(json_string)
         except ValueError as ve:
-            raise ExtractorError('Failed to download JSON', cause=ve)
+            errmsg = '%s: Failed to parse JSON ' % video_id
+            if fatal:
+                raise ExtractorError(errmsg, cause=ve)
+            else:
+                self.report_warning(errmsg + str(ve))
 
     def report_warning(self, msg, video_id=None):
-        idstr = u'' if video_id is None else u'%s: ' % video_id
+        idstr = '' if video_id is None else '%s: ' % video_id
         self._downloader.report_warning(
-            u'[%s] %s%s' % (self.IE_NAME, idstr, msg))
+            '[%s] %s%s' % (self.IE_NAME, idstr, msg))
 
     def to_screen(self, msg):
         """Print msg to screen, prefixing it with '[ie_name]'"""
-        self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
+        self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
 
     def report_extraction(self, id_or_name):
         """Report information extraction."""
-        self.to_screen(u'%s: Extracting information' % id_or_name)
+        self.to_screen('%s: Extracting information' % id_or_name)
 
     def report_download_webpage(self, video_id):
         """Report webpage download."""
-        self.to_screen(u'%s: Downloading webpage' % video_id)
+        self.to_screen('%s: Downloading webpage' % video_id)
 
     def report_age_confirmation(self):
         """Report attempt to confirm age."""
-        self.to_screen(u'Confirming age')
+        self.to_screen('Confirming age')
 
     def report_login(self):
         """Report attempt to log in."""
-        self.to_screen(u'Logging in')
+        self.to_screen('Logging in')
 
-    #Methods for following #608
+    # Methods for following #608
     @staticmethod
     def url_result(url, ie=None, video_id=None):
         """Returns a url that points to a page that should be processed"""
-        #TODO: ie should be the class used for getting the info
+        # TODO: ie should be the class used for getting the info
         video_info = {'_type': 'url',
                       'url': url,
                       'ie_key': ie}
         if video_id is not None:
             video_info['id'] = video_id
         return video_info
+
     @staticmethod
-    def playlist_result(entries, playlist_id=None, playlist_title=None):
+    def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
         """Returns a playlist"""
         video_info = {'_type': 'playlist',
                       'entries': entries}
@@ -360,9 +453,11 @@ class InfoExtractor(object):
             video_info['id'] = playlist_id
         if playlist_title:
             video_info['title'] = playlist_title
+        if playlist_description:
+            video_info['description'] = playlist_description
         return video_info
 
-    def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
+    def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
         """
         Perform a regex search on the given string, using a single or a list of
         patterns returning the first matching group.
@@ -378,27 +473,30 @@ class InfoExtractor(object):
                     break
 
         if os.name != 'nt' and sys.stderr.isatty():
-            _name = u'\033[0;34m%s\033[0m' % name
+            _name = '\033[0;34m%s\033[0m' % name
         else:
             _name = name
 
         if mobj:
-            # return the first matching group
-            return next(g for g in mobj.groups() if g is not None)
+            if group is None:
+                # return the first matching group
+                return next(g for g in mobj.groups() if g is not None)
+            else:
+                return mobj.group(group)
         elif default is not _NO_DEFAULT:
             return default
         elif fatal:
-            raise RegexNotFoundError(u'Unable to extract %s' % _name)
+            raise RegexNotFoundError('Unable to extract %s' % _name)
         else:
-            self._downloader.report_warning(u'unable to extract %s; '
-                u'please report this issue on http://yt-dl.org/bug' % _name)
+            self._downloader.report_warning('unable to extract %s; '
+                                            'please report this issue on http://yt-dl.org/bug' % _name)
             return None
 
-    def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
+    def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
         """
         Like _search_regex, but strips HTML tags and unescapes entities.
         """
-        res = self._search_regex(pattern, string, name, default, fatal, flags)
+        res = self._search_regex(pattern, string, name, default, fatal, flags, group)
         if res:
             return clean_html(res).strip()
         else:
@@ -430,10 +528,26 @@ class InfoExtractor(object):
                 else:
                     raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
             except (IOError, netrc.NetrcParseError) as err:
-                self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
-        
+                self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
+
         return (username, password)
 
+    def _get_tfa_info(self):
+        """
+        Get the two-factor authentication info
+        TODO - asking the user will be required for sms/phone verify
+        currently just uses the command line option
+        If there's no info available, return None
+        """
+        if self._downloader is None:
+            return None
+        downloader_params = self._downloader.params
+
+        if downloader_params.get('twofactor', None) is not None:
+            return downloader_params['twofactor']
+
+        return None
+
     # Helper functions for extracting OpenGraph info
     @staticmethod
     def _og_regexes(prop):
@@ -454,7 +568,7 @@ class InfoExtractor(object):
         return unescapeHTML(escaped)
 
     def _og_search_thumbnail(self, html, **kargs):
-        return self._og_search_property('image', html, u'thumbnail url', fatal=False, **kargs)
+        return self._og_search_property('image', html, 'thumbnail url', fatal=False, **kargs)
 
     def _og_search_description(self, html, **kargs):
         return self._og_search_property('description', html, fatal=False, **kargs)
@@ -463,8 +577,9 @@ class InfoExtractor(object):
         return self._og_search_property('title', html, **kargs)
 
     def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
-        regexes = self._og_regexes('video')
-        if secure: regexes = self._og_regexes('video:secure_url') + regexes
+        regexes = self._og_regexes('video') + self._og_regexes('video:url')
+        if secure:
+            regexes = self._og_regexes('video:secure_url') + regexes
         return self._html_search_regex(regexes, html, name, **kargs)
 
     def _og_search_url(self, html, **kargs):
@@ -475,9 +590,9 @@ class InfoExtractor(object):
             display_name = name
         return self._html_search_regex(
             r'''(?ix)<meta
-                    (?=[^>]+(?:itemprop|name|property)=["\']?%s["\']?)
-                    [^>]+content=["\']([^"\']+)["\']''' % re.escape(name),
-            html, display_name, fatal=fatal, **kwargs)
+                    (?=[^>]+(?:itemprop|name|property)=(["\']?)%s\1)
+                    [^>]+content=(["\'])(?P<content>.*?)\1''' % re.escape(name),
+            html, display_name, fatal=fatal, group='content', **kwargs)
 
     def _dc_search_uploader(self, html):
         return self._html_search_meta('dc.creator', html, 'uploader')
@@ -508,11 +623,11 @@ class InfoExtractor(object):
 
     def _twitter_search_player(self, html):
         return self._html_search_meta('twitter:player', html,
-            'twitter card player')
+                                      'twitter card player')
 
     def _sort_formats(self, formats):
         if not formats:
-            raise ExtractorError(u'No video formats found')
+            raise ExtractorError('No video formats found')
 
         def _formats_key(f):
             # TODO remove the following workaround
@@ -532,9 +647,9 @@ class InfoExtractor(object):
 
             if f.get('vcodec') == 'none':  # audio only
                 if self._downloader.params.get('prefer_free_formats'):
-                    ORDER = [u'aac', u'mp3', u'm4a', u'webm', u'ogg', u'opus']
+                    ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
                 else:
-                    ORDER = [u'webm', u'opus', u'ogg', u'mp3', u'aac', u'm4a']
+                    ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
                 ext_preference = 0
                 try:
                     audio_ext_preference = ORDER.index(f['ext'])
@@ -542,9 +657,9 @@ class InfoExtractor(object):
                     audio_ext_preference = -1
             else:
                 if self._downloader.params.get('prefer_free_formats'):
-                    ORDER = [u'flv', u'mp4', u'webm']
+                    ORDER = ['flv', 'mp4', 'webm']
                 else:
-                    ORDER = [u'webm', u'flv', u'mp4']
+                    ORDER = ['webm', 'flv', 'mp4']
                 try:
                     ext_preference = ORDER.index(f['ext'])
                 except ValueError:
@@ -553,6 +668,7 @@ class InfoExtractor(object):
 
             return (
                 preference,
+                f.get('language_preference') if f.get('language_preference') is not None else -1,
                 f.get('quality') if f.get('quality') is not None else -1,
                 f.get('height') if f.get('height') is not None else -1,
                 f.get('width') if f.get('width') is not None else -1,
@@ -561,14 +677,16 @@ class InfoExtractor(object):
                 f.get('vbr') if f.get('vbr') is not None else -1,
                 f.get('abr') if f.get('abr') is not None else -1,
                 audio_ext_preference,
+                f.get('fps') if f.get('fps') is not None else -1,
                 f.get('filesize') if f.get('filesize') is not None else -1,
                 f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
+                f.get('source_preference') if f.get('source_preference') is not None else -1,
                 f.get('format_id'),
             )
         formats.sort(key=_formats_key)
 
     def http_scheme(self):
-        """ Either "https:" or "https:", depending on the user's preferences """
+        """ Either "http:" or "https:", depending on the user's preferences """
         return (
             'http:'
             if self._downloader.params.get('prefer_insecure', False)
@@ -586,7 +704,7 @@ class InfoExtractor(object):
 
     def _sleep(self, timeout, video_id, msg_template=None):
         if msg_template is None:
-            msg_template = u'%(video_id)s: Waiting for %(timeout)s seconds'
+            msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
         msg = msg_template % {'video_id': video_id, 'timeout': timeout}
         self.to_screen(msg)
         time.sleep(timeout)
@@ -597,11 +715,15 @@ class InfoExtractor(object):
             'Unable to download f4m manifest')
 
         formats = []
-        for media_el in manifest.findall('{http://ns.adobe.com/f4m/1.0}media'):
+        media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
+        for i, media_el in enumerate(media_nodes):
+            tbr = int_or_none(media_el.attrib.get('bitrate'))
+            format_id = 'f4m-%d' % (i if tbr is None else tbr)
             formats.append({
+                'format_id': format_id,
                 'url': manifest_url,
                 'ext': 'flv',
-                'tbr': int_or_none(media_el.attrib.get('bitrate')),
+                'tbr': tbr,
                 'width': int_or_none(media_el.attrib.get('width')),
                 'height': int_or_none(media_el.attrib.get('height')),
             })
@@ -609,6 +731,150 @@ class InfoExtractor(object):
 
         return formats
 
+    def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
+                              entry_protocol='m3u8', preference=None):
+
+        formats = [{
+            'format_id': 'm3u8-meta',
+            'url': m3u8_url,
+            'ext': ext,
+            'protocol': 'm3u8',
+            'preference': -1,
+            'resolution': 'multiple',
+            'format_note': 'Quality selection URL',
+        }]
+
+        format_url = lambda u: (
+            u
+            if re.match(r'^https?://', u)
+            else compat_urlparse.urljoin(m3u8_url, u))
+
+        m3u8_doc = self._download_webpage(
+            m3u8_url, video_id,
+            note='Downloading m3u8 information',
+            errnote='Failed to download m3u8 information')
+        last_info = None
+        kv_rex = re.compile(
+            r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
+        for line in m3u8_doc.splitlines():
+            if line.startswith('#EXT-X-STREAM-INF:'):
+                last_info = {}
+                for m in kv_rex.finditer(line):
+                    v = m.group('val')
+                    if v.startswith('"'):
+                        v = v[1:-1]
+                    last_info[m.group('key')] = v
+            elif line.startswith('#') or not line.strip():
+                continue
+            else:
+                if last_info is None:
+                    formats.append({'url': format_url(line)})
+                    continue
+                tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
+
+                f = {
+                    'format_id': 'm3u8-%d' % (tbr if tbr else len(formats)),
+                    'url': format_url(line.strip()),
+                    'tbr': tbr,
+                    'ext': ext,
+                    'protocol': entry_protocol,
+                    'preference': preference,
+                }
+                codecs = last_info.get('CODECS')
+                if codecs:
+                    # TODO: looks like video codec is not always necessarily goes first
+                    va_codecs = codecs.split(',')
+                    if va_codecs[0]:
+                        f['vcodec'] = va_codecs[0].partition('.')[0]
+                    if len(va_codecs) > 1 and va_codecs[1]:
+                        f['acodec'] = va_codecs[1].partition('.')[0]
+                resolution = last_info.get('RESOLUTION')
+                if resolution:
+                    width_str, height_str = resolution.split('x')
+                    f['width'] = int(width_str)
+                    f['height'] = int(height_str)
+                formats.append(f)
+                last_info = {}
+        self._sort_formats(formats)
+        return formats
+
+    # TODO: improve extraction
+    def _extract_smil_formats(self, smil_url, video_id):
+        smil = self._download_xml(
+            smil_url, video_id, 'Downloading SMIL file',
+            'Unable to download SMIL file')
+
+        base = smil.find('./head/meta').get('base')
+
+        formats = []
+        rtmp_count = 0
+        for video in smil.findall('./body/switch/video'):
+            src = video.get('src')
+            if not src:
+                continue
+            bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
+            width = int_or_none(video.get('width'))
+            height = int_or_none(video.get('height'))
+            proto = video.get('proto')
+            if not proto:
+                if base:
+                    if base.startswith('rtmp'):
+                        proto = 'rtmp'
+                    elif base.startswith('http'):
+                        proto = 'http'
+            ext = video.get('ext')
+            if proto == 'm3u8':
+                formats.extend(self._extract_m3u8_formats(src, video_id, ext))
+            elif proto == 'rtmp':
+                rtmp_count += 1
+                streamer = video.get('streamer') or base
+                formats.append({
+                    'url': streamer,
+                    'play_path': src,
+                    'ext': 'flv',
+                    'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
+                    'tbr': bitrate,
+                    'width': width,
+                    'height': height,
+                })
+        self._sort_formats(formats)
+
+        return formats
+
+    def _live_title(self, name):
+        """ Generate the title for a live video """
+        now = datetime.datetime.now()
+        now_str = now.strftime("%Y-%m-%d %H:%M")
+        return name + ' ' + now_str
+
+    def _int(self, v, name, fatal=False, **kwargs):
+        res = int_or_none(v, **kwargs)
+        if 'get_attr' in kwargs:
+            print(getattr(v, kwargs['get_attr']))
+        if res is None:
+            msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
+            if fatal:
+                raise ExtractorError(msg)
+            else:
+                self._downloader.report_warning(msg)
+        return res
+
+    def _float(self, v, name, fatal=False, **kwargs):
+        res = float_or_none(v, **kwargs)
+        if res is None:
+            msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
+            if fatal:
+                raise ExtractorError(msg)
+            else:
+                self._downloader.report_warning(msg)
+        return res
+
+    def _set_cookie(self, domain, name, value, expire_time=None):
+        cookie = compat_cookiejar.Cookie(
+            0, name, value, None, None, domain, None,
+            None, '/', True, False, expire_time, '', None, None, None)
+        self._downloader.cookiejar.set_cookie(cookie)
+
 
 class SearchInfoExtractor(InfoExtractor):
     """
@@ -628,7 +894,7 @@ class SearchInfoExtractor(InfoExtractor):
     def _real_extract(self, query):
         mobj = re.match(self._make_valid_url(), query)
         if mobj is None:
-            raise ExtractorError(u'Invalid search query "%s"' % query)
+            raise ExtractorError('Invalid search query "%s"' % query)
 
         prefix = mobj.group('prefix')
         query = mobj.group('query')
@@ -639,9 +905,9 @@ class SearchInfoExtractor(InfoExtractor):
         else:
             n = int(prefix)
             if n <= 0:
-                raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
+                raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
             elif n > self._MAX_RESULTS:
-                self._downloader.report_warning(u'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
+                self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
                 n = self._MAX_RESULTS
             return self._get_n_results(query, n)
 
index ffbe4903b807faf0442057ebbee27bc9ed838c12..7a7e79360423ec39e341cf651b2aef4ca762d244 100644 (file)
@@ -34,6 +34,8 @@ class CondeNastIE(InfoExtractor):
     _VALID_URL = r'http://(video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys())
     IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
 
+    EMBED_URL = r'(?:https?:)?//player\.(?P<site>%s)\.com/(?P<type>embed)/.+?' % '|'.join(_SITES.keys())
+
     _TEST = {
         'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
         'md5': '1921f713ed48aabd715691f774c451f7',
index 74b880ffce9b0a8dbf7ab273063253a2743dbbdb..cf763ee7e03019adc5f957060b0f45e52e532084 100644 (file)
@@ -54,7 +54,7 @@ class CrackedIE(InfoExtractor):
 
         return {
             'id': video_id,
-            'url':video_url,
+            'url': video_url,
             'title': title,
             'description': description,
             'timestamp': timestamp,
@@ -62,4 +62,4 @@ class CrackedIE(InfoExtractor):
             'comment_count': comment_count,
             'height': height,
             'width': width,
-        }
\ No newline at end of file
+        }
index 026a9177e754de7d606961e6e4793af86da49fe2..d7e2b841e10856cadf0526fe8ff6d4c280dc0dae 100644 (file)
@@ -5,10 +5,11 @@ import re
 import json
 import base64
 import zlib
+import xml.etree.ElementTree
 
 from hashlib import sha1
 from math import pow, sqrt, floor
-from .common import InfoExtractor
+from .subtitles import SubtitlesInfoExtractor
 from ..utils import (
     ExtractorError,
     compat_urllib_parse,
@@ -16,15 +17,16 @@ from ..utils import (
     bytes_to_intlist,
     intlist_to_bytes,
     unified_strdate,
-    clean_html,
+    urlencode_postdata,
 )
 from ..aes import (
     aes_cbc_decrypt,
     inc,
 )
+from .common import InfoExtractor
 
 
-class CrunchyrollIE(InfoExtractor):
+class CrunchyrollIE(SubtitlesInfoExtractor):
     _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
     _TEST = {
         'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
@@ -37,6 +39,7 @@ class CrunchyrollIE(InfoExtractor):
             'thumbnail': 'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg',
             'uploader': 'Yomiuri Telecasting Corporation (YTV)',
             'upload_date': '20131013',
+            'url': 're:(?!.*&amp)',
         },
         'params': {
             # rtmp
@@ -51,6 +54,24 @@ class CrunchyrollIE(InfoExtractor):
         '1080': ('80', '108'),
     }
 
+    def _login(self):
+        (username, password) = self._get_login_info()
+        if username is None:
+            return
+        self.report_login()
+        login_url = 'https://www.crunchyroll.com/?a=formhandler'
+        data = urlencode_postdata({
+            'formname': 'RpcApiUser_Login',
+            'name': username,
+            'password': password,
+        })
+        login_request = compat_urllib_request.Request(login_url, data)
+        login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
+        self._download_webpage(login_request, None, False, 'Wrong login info')
+
+    def _real_initialize(self):
+        self._login()
+
     def _decrypt_subtitles(self, data, iv, id):
         data = bytes_to_intlist(data)
         iv = bytes_to_intlist(iv)
@@ -76,8 +97,10 @@ class CrunchyrollIE(InfoExtractor):
             return shaHash + [0] * 12
 
         key = obfuscate_key(id)
+
         class Counter:
             __value = iv
+
             def next_value(self):
                 temp = self.__value
                 self.__value = inc(self.__value)
@@ -85,19 +108,82 @@ class CrunchyrollIE(InfoExtractor):
         decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv))
         return zlib.decompress(decrypted_data)
 
-    def _convert_subtitles_to_srt(self, subtitles):
+    def _convert_subtitles_to_srt(self, sub_root):
         output = ''
-        for i, (start, end, text) in enumerate(re.findall(r'<event [^>]*?start="([^"]+)" [^>]*?end="([^"]+)" [^>]*?text="([^"]+)"[^>]*?>', subtitles), 1):
-            start = start.replace('.', ',')
-            end = end.replace('.', ',')
-            text = clean_html(text)
-            text = text.replace('\\N', '\n')
-            if not text:
-                continue
+
+        for i, event in enumerate(sub_root.findall('./events/event'), 1):
+            start = event.attrib['start'].replace('.', ',')
+            end = event.attrib['end'].replace('.', ',')
+            text = event.attrib['text'].replace('\\N', '\n')
             output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
         return output
 
-    def _real_extract(self,url):
+    def _convert_subtitles_to_ass(self, sub_root):
+        output = ''
+
+        def ass_bool(strvalue):
+            assvalue = '0'
+            if strvalue == '1':
+                assvalue = '-1'
+            return assvalue
+
+        output = '[Script Info]\n'
+        output += 'Title: %s\n' % sub_root.attrib["title"]
+        output += 'ScriptType: v4.00+\n'
+        output += 'WrapStyle: %s\n' % sub_root.attrib["wrap_style"]
+        output += 'PlayResX: %s\n' % sub_root.attrib["play_res_x"]
+        output += 'PlayResY: %s\n' % sub_root.attrib["play_res_y"]
+        output += """ScaledBorderAndShadow: yes
+
+[V4+ Styles]
+Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
+"""
+        for style in sub_root.findall('./styles/style'):
+            output += 'Style: ' + style.attrib["name"]
+            output += ',' + style.attrib["font_name"]
+            output += ',' + style.attrib["font_size"]
+            output += ',' + style.attrib["primary_colour"]
+            output += ',' + style.attrib["secondary_colour"]
+            output += ',' + style.attrib["outline_colour"]
+            output += ',' + style.attrib["back_colour"]
+            output += ',' + ass_bool(style.attrib["bold"])
+            output += ',' + ass_bool(style.attrib["italic"])
+            output += ',' + ass_bool(style.attrib["underline"])
+            output += ',' + ass_bool(style.attrib["strikeout"])
+            output += ',' + style.attrib["scale_x"]
+            output += ',' + style.attrib["scale_y"]
+            output += ',' + style.attrib["spacing"]
+            output += ',' + style.attrib["angle"]
+            output += ',' + style.attrib["border_style"]
+            output += ',' + style.attrib["outline"]
+            output += ',' + style.attrib["shadow"]
+            output += ',' + style.attrib["alignment"]
+            output += ',' + style.attrib["margin_l"]
+            output += ',' + style.attrib["margin_r"]
+            output += ',' + style.attrib["margin_v"]
+            output += ',' + style.attrib["encoding"]
+            output += '\n'
+
+        output += """
+[Events]
+Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
+"""
+        for event in sub_root.findall('./events/event'):
+            output += 'Dialogue: 0'
+            output += ',' + event.attrib["start"]
+            output += ',' + event.attrib["end"]
+            output += ',' + event.attrib["style"]
+            output += ',' + event.attrib["name"]
+            output += ',' + event.attrib["margin_l"]
+            output += ',' + event.attrib["margin_r"]
+            output += ',' + event.attrib["margin_v"]
+            output += ',' + event.attrib["effect"]
+            output += ',' + event.attrib["text"]
+            output += '\n'
+
+        return output
+
+    def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('video_id')
 
@@ -140,27 +226,31 @@ class CrunchyrollIE(InfoExtractor):
         formats = []
         for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage):
             stream_quality, stream_format = self._FORMAT_IDS[fmt]
-            video_format = fmt+'p'
+            video_format = fmt + 'p'
             streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/')
             # urlencode doesn't work!
-            streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+'&media%5Fid='+stream_id+'&video%5Fformat='+stream_format
+            streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' + stream_quality + '&media%5Fid=' + stream_id + '&video%5Fformat=' + stream_format
             streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
             streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
-            streamdata = self._download_webpage(streamdata_req, video_id, note='Downloading media info for '+video_format)
-            video_url = self._search_regex(r'<host>([^<]+)', streamdata, 'video_url')
-            video_play_path = self._search_regex(r'<file>([^<]+)', streamdata, 'video_play_path')
+            streamdata = self._download_xml(
+                streamdata_req, video_id,
+                note='Downloading media info for %s' % video_format)
+            video_url = streamdata.find('.//host').text
+            video_play_path = streamdata.find('.//file').text
             formats.append({
                 'url': video_url,
-                'play_path':   video_play_path,
+                'play_path': video_play_path,
                 'ext': 'flv',
                 'format': video_format,
                 'format_id': video_format,
             })
 
         subtitles = {}
+        sub_format = self._downloader.params.get('subtitlesformat', 'srt')
         for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
-            sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\
-                                              video_id, note='Downloading subtitles for '+sub_name)
+            sub_page = self._download_webpage(
+                'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,
+                video_id, note='Downloading subtitles for ' + sub_name)
             id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
             iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
             data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
@@ -174,15 +264,60 @@ class CrunchyrollIE(InfoExtractor):
             lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
             if not lang_code:
                 continue
-            subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
+            sub_root = xml.etree.ElementTree.fromstring(subtitle)
+            if sub_format == 'ass':
+                subtitles[lang_code] = self._convert_subtitles_to_ass(sub_root)
+            else:
+                subtitles[lang_code] = self._convert_subtitles_to_srt(sub_root)
+
+        if self._downloader.params.get('listsubtitles', False):
+            self._list_available_subtitles(video_id, subtitles)
+            return
 
         return {
-            'id':          video_id,
-            'title':       video_title,
+            'id': video_id,
+            'title': video_title,
             'description': video_description,
-            'thumbnail':   video_thumbnail,
-            'uploader':    video_uploader,
+            'thumbnail': video_thumbnail,
+            'uploader': video_uploader,
             'upload_date': video_upload_date,
-            'subtitles':   subtitles,
-            'formats':     formats,
+            'subtitles': subtitles,
+            'formats': formats,
+        }
+
+
+class CrunchyrollShowPlaylistIE(InfoExtractor):
+    IE_NAME = "crunchyroll:playlist"
+    _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?$'
+
+    _TESTS = [{
+        'url': 'http://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
+        'info_dict': {
+            'id': 'a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
+            'title': 'A Bridge to the Starry Skies - Hoshizora e Kakaru Hashi'
+        },
+        'playlist_count': 13,
+    }]
+
+    def _real_extract(self, url):
+        show_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, show_id)
+        title = self._html_search_regex(
+            r'(?s)<h1[^>]*>\s*<span itemprop="name">(.*?)</span>',
+            webpage, 'title')
+        episode_paths = re.findall(
+            r'(?s)<li id="showview_videos_media_[0-9]+"[^>]+>.*?<a href="([^"]+)"',
+            webpage)
+        entries = [
+            self.url_result('http://www.crunchyroll.com' + ep, 'Crunchyroll')
+            for ep in episode_paths
+        ]
+        entries.reverse()
+
+        return {
+            '_type': 'playlist',
+            'id': show_id,
+            'title': title,
+            'entries': entries,
         }
index b6552c542411c2abf639e71c955c66c34db2b007..5411066846eb94b9c9295bae4f8860e07112b2d1 100644 (file)
@@ -34,6 +34,13 @@ class CSpanIE(InfoExtractor):
             'title': 'International Health Care Models',
             'description': 'md5:7a985a2d595dba00af3d9c9f0783c967',
         }
+    }, {
+        'url': 'http://www.c-span.org/video/?318608-1/gm-ignition-switch-recall',
+        'info_dict': {
+            'id': '342759',
+            'title': 'General Motors Ignition Switch Recall',
+        },
+        'playlist_duration_sum': 14855,
     }]
 
     def _real_extract(self, url):
diff --git a/youtube_dl/extractor/d8.py b/youtube_dl/extractor/d8.py
deleted file mode 100644 (file)
index 6b26ff2..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-# encoding: utf-8
-from __future__ import unicode_literals
-
-from .canalplus import CanalplusIE
-
-
-class D8IE(CanalplusIE):
-    _VALID_URL = r'https?://www\.d8\.tv/.*?/(?P<path>.*)'
-    _VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/d8/%s'
-    IE_NAME = 'd8.tv'
-
-    _TEST = {
-        'url': 'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
-        'file': '966289.flv',
-        'info_dict': {
-            'title': 'Campagne intime - Documentaire exceptionnel',
-            'description': 'md5:d2643b799fb190846ae09c61e59a859f',
-            'upload_date': '20131108',
-        },
-        'params': {
-            # rtmp
-            'skip_download': True,
-        },
-        'skip': 'videos get deleted after a while',
-    }
index 5d0bfe454c9bfe1a3e16d1273b18ed3be2f436b8..936c13cd60b0ec44f376818a7e17cb9ccf4d1384 100644 (file)
@@ -1,3 +1,6 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
 import re
 import json
 import itertools
@@ -15,6 +18,7 @@ from ..utils import (
     unescapeHTML,
 )
 
+
 class DailymotionBaseInfoExtractor(InfoExtractor):
     @staticmethod
     def _build_request(url):
@@ -24,64 +28,63 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
         request.add_header('Cookie', 'ff=off')
         return request
 
+
 class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
     """Information Extractor for Dailymotion"""
 
     _VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
-    IE_NAME = u'dailymotion'
+    IE_NAME = 'dailymotion'
 
     _FORMATS = [
-        (u'stream_h264_ld_url', u'ld'),
-        (u'stream_h264_url', u'standard'),
-        (u'stream_h264_hq_url', u'hq'),
-        (u'stream_h264_hd_url', u'hd'),
-        (u'stream_h264_hd1080_url', u'hd180'),
+        ('stream_h264_ld_url', 'ld'),
+        ('stream_h264_url', 'standard'),
+        ('stream_h264_hq_url', 'hq'),
+        ('stream_h264_hd_url', 'hd'),
+        ('stream_h264_hd1080_url', 'hd180'),
     ]
 
     _TESTS = [
         {
-            u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
-            u'file': u'x33vw9.mp4',
-            u'md5': u'392c4b85a60a90dc4792da41ce3144eb',
-            u'info_dict': {
-                u"uploader": u"Amphora Alex and Van .", 
-                u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\""
+            'url': 'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
+            'md5': '392c4b85a60a90dc4792da41ce3144eb',
+            'info_dict': {
+                'id': 'x33vw9',
+                'ext': 'mp4',
+                'uploader': 'Amphora Alex and Van .',
+                'title': 'Tutoriel de Youtubeur"DL DES VIDEO DE YOUTUBE"',
             }
         },
         # Vevo video
         {
-            u'url': u'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
-            u'file': u'USUV71301934.mp4',
-            u'info_dict': {
-                u'title': u'Roar (Official)',
-                u'uploader': u'Katy Perry',
-                u'upload_date': u'20130905',
+            'url': 'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
+            'info_dict': {
+                'title': 'Roar (Official)',
+                'id': 'USUV71301934',
+                'ext': 'mp4',
+                'uploader': 'Katy Perry',
+                'upload_date': '20130905',
             },
-            u'params': {
-                u'skip_download': True,
+            'params': {
+                'skip_download': True,
             },
-            u'skip': u'VEVO is only available in some countries',
+            'skip': 'VEVO is only available in some countries',
         },
         # age-restricted video
         {
-            u'url': u'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
-            u'file': u'xyh2zz.mp4',
-            u'md5': u'0d667a7b9cebecc3c89ee93099c4159d',
-            u'info_dict': {
-                u'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
-                u'uploader': 'HotWaves1012',
-                u'age_limit': 18,
+            'url': 'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
+            'md5': '0d667a7b9cebecc3c89ee93099c4159d',
+            'info_dict': {
+                'id': 'xyh2zz',
+                'ext': 'mp4',
+                'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
+                'uploader': 'HotWaves1012',
+                'age_limit': 18,
             }
-
         }
     ]
 
     def _real_extract(self, url):
-        # Extract id and simplified title from URL
-        mobj = re.match(self._VALID_URL, url)
-
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         url = 'http://www.dailymotion.com/video/%s' % video_id
 
         # Retrieve video webpage to extract further information
@@ -93,12 +96,12 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
 
         # It may just embed a vevo video:
         m_vevo = re.search(
-            r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?videoId=(?P<id>[\w]*)',
+            r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)',
             webpage)
         if m_vevo is not None:
             vevo_id = m_vevo.group('id')
-            self.to_screen(u'Vevo video detected: %s' % vevo_id)
-            return self.url_result(u'vevo:%s' % vevo_id, ie='Vevo')
+            self.to_screen('Vevo video detected: %s' % vevo_id)
+            return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
 
         age_limit = self._rta_search(webpage)
 
@@ -109,9 +112,9 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
 
         embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id
         embed_page = self._download_webpage(embed_url, video_id,
-                                            u'Downloading embed page')
+                                            'Downloading embed page')
         info = self._search_regex(r'var info = ({.*?}),$', embed_page,
-            'video info', flags=re.MULTILINE)
+                                  'video info', flags=re.MULTILINE)
         info = json.loads(info)
         if info.get('error') is not None:
             msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
@@ -134,7 +137,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
                     'height': height,
                 })
         if not formats:
-            raise ExtractorError(u'Unable to extract video URL')
+            raise ExtractorError('Unable to extract video URL')
 
         # subtitles
         video_subtitles = self.extract_subtitles(video_id, webpage)
@@ -142,18 +145,23 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
             self._list_available_subtitles(video_id, webpage)
             return
 
-        view_count = self._search_regex(
-            r'video_views_count[^>]+>\s+([\d\.,]+)', webpage, u'view count', fatal=False)
-        if view_count is not None:
-            view_count = str_to_int(view_count)
+        view_count = str_to_int(self._search_regex(
+            r'video_views_count[^>]+>\s+([\d\.,]+)',
+            webpage, 'view count', fatal=False))
+
+        title = self._og_search_title(webpage, default=None)
+        if title is None:
+            title = self._html_search_regex(
+                r'(?s)<span\s+id="video_title"[^>]*>(.*?)</span>', webpage,
+                'title')
 
         return {
-            'id':       video_id,
+            'id': video_id,
             'formats': formats,
             'uploader': info['owner.screenname'],
-            'upload_date':  video_upload_date,
-            'title':    self._og_search_title(webpage),
-            'subtitles':    video_subtitles,
+            'upload_date': video_upload_date,
+            'title': title,
+            'subtitles': video_subtitles,
             'thumbnail': info['thumbnail_url'],
             'age_limit': age_limit,
             'view_count': view_count,
@@ -165,35 +173,42 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
                 'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
                 video_id, note=False)
         except ExtractorError as err:
-            self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
+            self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
             return {}
         info = json.loads(sub_list)
         if (info['total'] > 0):
             sub_lang_list = dict((l['language'], l['url']) for l in info['list'])
             return sub_lang_list
-        self._downloader.report_warning(u'video doesn\'t have subtitles')
+        self._downloader.report_warning('video doesn\'t have subtitles')
         return {}
 
 
 class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
-    IE_NAME = u'dailymotion:playlist'
+    IE_NAME = 'dailymotion:playlist'
     _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
     _MORE_PAGES_INDICATOR = r'(?s)<div class="pages[^"]*">.*?<a\s+class="[^"]*?icon-arrow_right[^"]*?"'
     _PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
+    _TESTS = [{
+        'url': 'http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q',
+        'info_dict': {
+            'title': 'SPORT',
+        },
+        'playlist_mincount': 20,
+    }]
 
     def _extract_entries(self, id):
         video_ids = []
         for pagenum in itertools.count(1):
             request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum))
             webpage = self._download_webpage(request,
-                                             id, u'Downloading page %s' % pagenum)
+                                             id, 'Downloading page %s' % pagenum)
 
             video_ids.extend(re.findall(r'data-xid="(.+?)"', webpage))
 
             if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:
                 break
         return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
-                   for video_id in orderedSet(video_ids)]
+                for video_id in orderedSet(video_ids)]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -209,9 +224,17 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
 
 
 class DailymotionUserIE(DailymotionPlaylistIE):
-    IE_NAME = u'dailymotion:user'
+    IE_NAME = 'dailymotion:user'
     _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/user/(?P<user>[^/]+)'
     _PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
+    _TESTS = [{
+        'url': 'https://www.dailymotion.com/user/nqtv',
+        'info_dict': {
+            'id': 'nqtv',
+            'title': 'Rémi Gaillard',
+        },
+        'playlist_mincount': 100,
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -219,7 +242,7 @@ class DailymotionUserIE(DailymotionPlaylistIE):
         webpage = self._download_webpage(url, user)
         full_user = unescapeHTML(self._html_search_regex(
             r'<a class="nav-image" title="([^"]+)" href="/%s">' % re.escape(user),
-            webpage, u'user', flags=re.DOTALL))
+            webpage, 'user'))
 
         return {
             '_type': 'playlist',
index 6033cd94a1b251d66e7a3f80034bc58b79fa4b55..45d66e2e663fa376cec8f4fc7931e84006ee30b9 100644 (file)
@@ -11,10 +11,10 @@ from ..utils import (
 
 
 class DaumIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/.*?clipid=(?P<id>\d+)'
+    _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/(?:v/|.*?clipid=)(?P<id>[^?#&]+)'
     IE_NAME = 'daum.net'
 
-    _TEST = {
+    _TESTS = [{
         'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',
         'info_dict': {
             'id': '52554690',
@@ -24,11 +24,17 @@ class DaumIE(InfoExtractor):
             'upload_date': '20130831',
             'duration': 3868,
         },
-    }
+    }, {
+        'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz',
+        'only_matching': True,
+    }, {
+        'url': 'http://tvpot.daum.net/v/07dXWRka62Y%24',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group(1)
+        video_id = mobj.group('id')
         canonical_url = 'http://tvpot.daum.net/v/%s' % video_id
         webpage = self._download_webpage(canonical_url, video_id)
         full_id = self._search_regex(
@@ -42,7 +48,6 @@ class DaumIE(InfoExtractor):
             'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query,
             video_id, 'Downloading video formats info')
 
-        self.to_screen(u'%s: Getting video urls' % video_id)
         formats = []
         for format_el in urls.findall('result/output_list/output_list'):
             profile = format_el.attrib['profile']
@@ -52,7 +57,7 @@ class DaumIE(InfoExtractor):
             })
             url_doc = self._download_xml(
                 'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query,
-                video_id, note=False)
+                video_id, note='Downloading video data for %s format' % profile)
             format_url = url_doc.find('result/url').text
             formats.append({
                 'url': format_url,
diff --git a/youtube_dl/extractor/dbtv.py b/youtube_dl/extractor/dbtv.py
new file mode 100644 (file)
index 0000000..1d3e2ff
--- /dev/null
@@ -0,0 +1,74 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    float_or_none,
+    int_or_none,
+    clean_html,
+)
+
+
+class DBTVIE(InfoExtractor):
+    _VALID_URL = r'http://dbtv\.no/(?P<id>[0-9]+)#(?P<display_id>.+)'
+    _TEST = {
+        'url': 'http://dbtv.no/3649835190001#Skulle_teste_ut_fornøyelsespark,_men_kollegaen_var_bare_opptatt_av_bikinikroppen',
+        'md5': 'b89953ed25dacb6edb3ef6c6f430f8bc',
+        'info_dict': {
+            'id': '33100',
+            'display_id': 'Skulle_teste_ut_fornøyelsespark,_men_kollegaen_var_bare_opptatt_av_bikinikroppen',
+            'ext': 'mp4',
+            'title': 'Skulle teste ut fornøyelsespark, men kollegaen var bare opptatt av bikinikroppen',
+            'description': 'md5:1504a54606c4dde3e4e61fc97aa857e0',
+            'thumbnail': 're:https?://.*\.jpg$',
+            'timestamp': 1404039863.438,
+            'upload_date': '20140629',
+            'duration': 69.544,
+            'view_count': int,
+            'categories': list,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        display_id = mobj.group('display_id')
+
+        data = self._download_json(
+            'http://api.dbtv.no/discovery/%s' % video_id, display_id)
+
+        video = data['playlist'][0]
+
+        formats = [{
+            'url': f['URL'],
+            'vcodec': f.get('container'),
+            'width': int_or_none(f.get('width')),
+            'height': int_or_none(f.get('height')),
+            'vbr': float_or_none(f.get('rate'), 1000),
+            'filesize': int_or_none(f.get('size')),
+        } for f in video['renditions'] if 'URL' in f]
+
+        if not formats:
+            for url_key, format_id in [('URL', 'mp4'), ('HLSURL', 'hls')]:
+                if url_key in video:
+                    formats.append({
+                        'url': video[url_key],
+                        'format_id': format_id,
+                    })
+
+        self._sort_formats(formats)
+
+        return {
+            'id': video['id'],
+            'display_id': display_id,
+            'title': video['title'],
+            'description': clean_html(video['desc']),
+            'thumbnail': video.get('splash') or video.get('thumb'),
+            'timestamp': float_or_none(video.get('publishedAt'), 1000),
+            'duration': float_or_none(video.get('length'), 1000),
+            'view_count': int_or_none(video.get('views')),
+            'categories': video.get('tags'),
+            'formats': formats,
+        }
diff --git a/youtube_dl/extractor/deezer.py b/youtube_dl/extractor/deezer.py
new file mode 100644 (file)
index 0000000..c3205ff
--- /dev/null
@@ -0,0 +1,89 @@
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    int_or_none,
+    orderedSet,
+)
+
+
+class DeezerPlaylistIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?deezer\.com/playlist/(?P<id>[0-9]+)'
+    _TEST = {
+        'url': 'http://www.deezer.com/playlist/176747451',
+        'info_dict': {
+            'id': '176747451',
+            'title': 'Best!',
+            'uploader': 'Anonymous',
+            'thumbnail': 're:^https?://cdn-images.deezer.com/images/cover/.*\.jpg$',
+        },
+        'playlist_count': 30,
+        'skip': 'Only available in .de',
+    }
+
+    def _real_extract(self, url):
+        if 'test' not in self._downloader.params:
+            self._downloader.report_warning('For now, this extractor only supports the 30 second previews. Patches welcome!')
+
+        mobj = re.match(self._VALID_URL, url)
+        playlist_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, playlist_id)
+        geoblocking_msg = self._html_search_regex(
+            r'<p class="soon-txt">(.*?)</p>', webpage, 'geoblocking message',
+            default=None)
+        if geoblocking_msg is not None:
+            raise ExtractorError(
+                'Deezer said: %s' % geoblocking_msg, expected=True)
+
+        data_json = self._search_regex(
+            r'naboo\.display\(\'[^\']+\',\s*(.*?)\);\n', webpage, 'data JSON')
+        data = json.loads(data_json)
+
+        playlist_title = data.get('DATA', {}).get('TITLE')
+        playlist_uploader = data.get('DATA', {}).get('PARENT_USERNAME')
+        playlist_thumbnail = self._search_regex(
+            r'<img id="naboo_playlist_image".*?src="([^"]+)"', webpage,
+            'playlist thumbnail')
+
+        preview_pattern = self._search_regex(
+            r"var SOUND_PREVIEW_GATEWAY\s*=\s*'([^']+)';", webpage,
+            'preview URL pattern', fatal=False)
+        entries = []
+        for s in data['SONGS']['data']:
+            puid = s['MD5_ORIGIN']
+            preview_video_url = preview_pattern.\
+                replace('{0}', puid[0]).\
+                replace('{1}', puid).\
+                replace('{2}', s['MEDIA_VERSION'])
+            formats = [{
+                'format_id': 'preview',
+                'url': preview_video_url,
+                'preference': -100,  # Only the first 30 seconds
+                'ext': 'mp3',
+            }]
+            self._sort_formats(formats)
+            artists = ', '.join(
+                orderedSet(a['ART_NAME'] for a in s['ARTISTS']))
+            entries.append({
+                'id': s['SNG_ID'],
+                'duration': int_or_none(s.get('DURATION')),
+                'title': '%s - %s' % (artists, s['SNG_TITLE']),
+                'uploader': s['ART_NAME'],
+                'uploader_id': s['ART_ID'],
+                'age_limit': 16 if s.get('EXPLICIT_LYRICS') == '1' else 0,
+                'formats': formats,
+            })
+
+        return {
+            '_type': 'playlist',
+            'id': playlist_id,
+            'title': playlist_title,
+            'uploader': playlist_uploader,
+            'thumbnail': playlist_thumbnail,
+            'entries': entries,
+        }
index c5529f8d455963e7f7ff6ad798f27493a0579410..5e50c63d9aca7d2642239ccf32a5cedd91b05174 100644 (file)
@@ -9,7 +9,7 @@ from .common import InfoExtractor
 class DefenseGouvFrIE(InfoExtractor):
     IE_NAME = 'defense.gouv.fr'
     _VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/'
-        r'ligthboxvideo/base-de-medias/webtv/(.*)')
+                  r'ligthboxvideo/base-de-medias/webtv/(.*)')
 
     _TEST = {
         'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1',
@@ -26,13 +26,13 @@ class DefenseGouvFrIE(InfoExtractor):
         video_id = self._search_regex(
             r"flashvars.pvg_id=\"(\d+)\";",
             webpage, 'ID')
-        
+
         json_url = ('http://static.videos.gouv.fr/brightcovehub/export/json/'
-            + video_id)
+                    + video_id)
         info = self._download_webpage(json_url, title,
-                                                  'Downloading JSON config')
+                                      'Downloading JSON config')
         video_url = json.loads(info)['renditions'][0]['url']
-        
+
         return {'id': video_id,
                 'ext': 'mp4',
                 'url': video_url,
index cb8e0682240bfed9a56a58490f18989f33fef71d..8049779b0a31049f704bae256a3752a9a22ad789 100644 (file)
@@ -30,7 +30,7 @@ class DFBIE(InfoExtractor):
             video_id)
         video_info = player_info.find('video')
 
-        f4m_info = self._download_xml(video_info.find('url').text, video_id)
+        f4m_info = self._download_xml(self._proto_relative_url(video_info.find('url').text.strip()), video_id)
         token_el = f4m_info.find('token')
         manifest_url = token_el.attrib['url'] + '?' + 'hdnea=' + token_el.attrib['auth'] + '&hdcore=3.2.0'
 
index 554df673506a88cada08b9db8300cd15d301087d..52c2d7ddf99873779b7f3223b0acfe4563e2b5d9 100644 (file)
@@ -16,9 +16,9 @@ class DiscoveryIE(InfoExtractor):
             'ext': 'mp4',
             'title': 'MythBusters: Mission Impossible Outtakes',
             'description': ('Watch Jamie Hyneman and Adam Savage practice being'
-                ' each other -- to the point of confusing Jamie\'s dog -- and '
-                'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s'
-                ' back.'),
+                            ' each other -- to the point of confusing Jamie\'s dog -- and '
+                            'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s'
+                            ' back.'),
             'duration': 156,
         },
     }
@@ -29,7 +29,7 @@ class DiscoveryIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
 
         video_list_json = self._search_regex(r'var videoListJSON = ({.*?});',
-            webpage, 'video list', flags=re.DOTALL)
+                                             webpage, 'video list', flags=re.DOTALL)
         video_list = json.loads(video_list_json)
         info = video_list['clips'][0]
         formats = []
index 4ca3f37a223a0ce4790a123a79233440ffcc2c24..b88379e066f08bf507a5a461d987b1f1eb27049a 100644 (file)
@@ -7,7 +7,7 @@ class DivxStageIE(NovaMovIE):
     IE_NAME = 'divxstage'
     IE_DESC = 'DivxStage'
 
-    _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'divxstage\.(?:eu|net|ch|co|at|ag)'}
+    _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'divxstage\.(?:eu|net|ch|co|at|ag|to)'}
 
     _HOST = 'www.divxstage.eu'
 
@@ -24,4 +24,4 @@ class DivxStageIE(NovaMovIE):
             'title': 'youtubedl test video',
             'description': 'This is a test video for youtubedl.',
         }
-    }
\ No newline at end of file
+    }
index 5ae0ad5b65cdf12a896a573db78e23494864fd23..638bb33cd81b53fdc2c4bbc31f300a098fb57652 100644 (file)
@@ -27,7 +27,7 @@ class DotsubIE(InfoExtractor):
         video_id = mobj.group('id')
         info_url = "https://dotsub.com/api/media/%s/metadata" % video_id
         info = self._download_json(info_url, video_id)
-        date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds
+        date = time.gmtime(info['dateCreated'] / 1000)  # The timestamp is in miliseconds
 
         return {
             'id': video_id,
index 9f569aa932967910e12b46c0d0269557437d0c79..14b6c00b0bd1c4d3a306b5513477e2bb6c4cd52d 100644 (file)
@@ -5,27 +5,33 @@ import os.path
 import re
 
 from .common import InfoExtractor
-from ..utils import compat_urllib_parse_unquote
+from ..compat import compat_urllib_parse_unquote
+from ..utils import url_basename
 
 
 class DropboxIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?dropbox[.]com/s/(?P<id>[a-zA-Z0-9]{15})/(?P<title>[^?#]*)'
-    _TEST = {
-        'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4',
-        'md5': '8a3d905427a6951ccb9eb292f154530b',
-        'info_dict': {
-            'id': 'nelirfsxnmcfbfh',
-            'ext': 'mp4',
-            'title': 'youtube-dl test video \'ä"BaW_jenozKc'
-        }
-    }
+    _VALID_URL = r'https?://(?:www\.)?dropbox[.]com/sh?/(?P<id>[a-zA-Z0-9]{15})/.*'
+    _TESTS = [
+        {
+            'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0',
+            'info_dict': {
+                'id': 'nelirfsxnmcfbfh',
+                'ext': 'mp4',
+                'title': 'youtube-dl test video \'ä"BaW_jenozKc'
+            }
+        }, {
+            'url': 'https://www.dropbox.com/sh/662glsejgzoj9sr/AAByil3FGH9KFNZ13e08eSa1a/Pregame%20Ceremony%20Program%20PA%2020140518.m4v',
+            'only_matching': True,
+        },
+    ]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
-        fn = compat_urllib_parse_unquote(mobj.group('title'))
+        fn = compat_urllib_parse_unquote(url_basename(url))
         title = os.path.splitext(fn)[0]
-        video_url = url + '?dl=1'
+        video_url = re.sub(r'[?&]dl=0', '', url)
+        video_url += ('?' if '?' not in video_url else '&') + 'dl=1'
 
         return {
             'id': video_id,
diff --git a/youtube_dl/extractor/drtuber.py b/youtube_dl/extractor/drtuber.py
new file mode 100644 (file)
index 0000000..ca274df
--- /dev/null
@@ -0,0 +1,70 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import str_to_int
+
+
+class DrTuberIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?drtuber\.com/video/(?P<id>\d+)/(?P<display_id>[\w-]+)'
+    _TEST = {
+        'url': 'http://www.drtuber.com/video/1740434/hot-perky-blonde-naked-golf',
+        'md5': '93e680cf2536ad0dfb7e74d94a89facd',
+        'info_dict': {
+            'id': '1740434',
+            'display_id': 'hot-perky-blonde-naked-golf',
+            'ext': 'mp4',
+            'title': 'Hot Perky Blonde Naked Golf',
+            'like_count': int,
+            'dislike_count': int,
+            'comment_count': int,
+            'categories': ['Babe', 'Blonde', 'Erotic', 'Outdoor', 'Softcore', 'Solo'],
+            'thumbnail': 're:https?://.*\.jpg$',
+            'age_limit': 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        display_id = mobj.group('display_id')
+
+        webpage = self._download_webpage(url, display_id)
+
+        video_url = self._html_search_regex(
+            r'<source src="([^"]+)"', webpage, 'video URL')
+
+        title = self._html_search_regex(
+            r'<title>([^<]+)\s*-\s*Free', webpage, 'title')
+
+        thumbnail = self._html_search_regex(
+            r'poster="([^"]+)"',
+            webpage, 'thumbnail', fatal=False)
+
+        like_count = str_to_int(self._html_search_regex(
+            r'<span id="rate_likes">\s*<img[^>]+>\s*<span>([\d,\.]+)</span>',
+            webpage, 'like count', fatal=False))
+        dislike_count = str_to_int(self._html_search_regex(
+            r'<span id="rate_dislikes">\s*<img[^>]+>\s*<span>([\d,\.]+)</span>',
+            webpage, 'like count', fatal=False))
+        comment_count = str_to_int(self._html_search_regex(
+            r'<span class="comments_count">([\d,\.]+)</span>',
+            webpage, 'comment count', fatal=False))
+
+        cats_str = self._search_regex(
+            r'<span>Categories:</span><div>(.+?)</div>', webpage, 'categories', fatal=False)
+        categories = [] if not cats_str else re.findall(r'<a title="([^"]+)"', cats_str)
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'url': video_url,
+            'title': title,
+            'thumbnail': thumbnail,
+            'like_count': like_count,
+            'dislike_count': dislike_count,
+            'comment_count': comment_count,
+            'categories': categories,
+            'age_limit': self._rta_search(webpage),
+        }
index cdccfd376b80ee5ebc61c25ab4cd00e12dcfc458..93b3c9f36094724cd751cb340f9f925f2d04554c 100644 (file)
@@ -1,14 +1,12 @@
 from __future__ import unicode_literals
 
-import re
-
 from .subtitles import SubtitlesInfoExtractor
 from .common import ExtractorError
 from ..utils import parse_iso8601
 
 
 class DRTVIE(SubtitlesInfoExtractor):
-    _VALID_URL = r'http://(?:www\.)?dr\.dk/tv/se/[^/]+/(?P<id>[\da-z-]+)'
+    _VALID_URL = r'http://(?:www\.)?dr\.dk/tv/se/(?:[^/]+/)+(?P<id>[\da-z-]+)(?:[/#?]|$)'
 
     _TEST = {
         'url': 'http://www.dr.dk/tv/se/partiets-mand/partiets-mand-7-8',
@@ -25,8 +23,7 @@ class DRTVIE(SubtitlesInfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         programcard = self._download_json(
             'http://www.dr.dk/mu/programcard/expanded/%s' % video_id, video_id, 'Downloading video JSON')
@@ -35,7 +32,7 @@ class DRTVIE(SubtitlesInfoExtractor):
 
         title = data['Title']
         description = data['Description']
-        timestamp = parse_iso8601(data['CreatedTime'][:-5])
+        timestamp = parse_iso8601(data['CreatedTime'])
 
         thumbnail = None
         duration = None
diff --git a/youtube_dl/extractor/dump.py b/youtube_dl/extractor/dump.py
new file mode 100644 (file)
index 0000000..6b65177
--- /dev/null
@@ -0,0 +1,39 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class DumpIE(InfoExtractor):
+    _VALID_URL = r'^https?://(?:www\.)?dump\.com/(?P<id>[a-zA-Z0-9]+)/'
+
+    _TEST = {
+        'url': 'http://www.dump.com/oneus/',
+        'md5': 'ad71704d1e67dfd9e81e3e8b42d69d99',
+        'info_dict': {
+            'id': 'oneus',
+            'ext': 'flv',
+            'title': "He's one of us.",
+            'thumbnail': 're:^https?://.*\.jpg$',
+        },
+    }
+
+    def _real_extract(self, url):
+        m = re.match(self._VALID_URL, url)
+        video_id = m.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+        video_url = self._search_regex(
+            r's1.addVariable\("file",\s*"([^"]+)"', webpage, 'video URL')
+
+        thumb = self._og_search_thumbnail(webpage)
+        title = self._search_regex(r'<b>([^"]+)</b>', webpage, 'title')
+
+        return {
+            'id': video_id,
+            'title': title,
+            'url': video_url,
+            'thumbnail': thumb,
+        }
index 877113d63a7261a284a628a06908c466d446613d..63c2549d37aa528cc79f83822c7a267d391b74cc 100644 (file)
@@ -1,19 +1,21 @@
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
-from ..utils import determine_ext
 
 
 class EbaumsWorldIE(InfoExtractor):
     _VALID_URL = r'https?://www\.ebaumsworld\.com/video/watch/(?P<id>\d+)'
 
     _TEST = {
-        u'url': u'http://www.ebaumsworld.com/video/watch/83367677/',
-        u'file': u'83367677.mp4',
-        u'info_dict': {
-            u'title': u'A Giant Python Opens The Door',
-            u'description': u'This is how nightmares start...',
-            u'uploader': u'jihadpizza',
+        'url': 'http://www.ebaumsworld.com/video/watch/83367677/',
+        'info_dict': {
+            'id': '83367677',
+            'ext': 'mp4',
+            'title': 'A Giant Python Opens The Door',
+            'description': 'This is how nightmares start...',
+            'uploader': 'jihadpizza',
         },
     }
 
@@ -28,7 +30,6 @@ class EbaumsWorldIE(InfoExtractor):
             'id': video_id,
             'title': config.find('title').text,
             'url': video_url,
-            'ext': determine_ext(video_url),
             'description': config.find('description').text,
             'thumbnail': config.find('image').text,
             'uploader': config.find('username').text,
index f8f49a013503cc853c2bf79e345b360af3db7fee..b766e17f26a9e79d654d4b160fa8f98f5f21503f 100644 (file)
@@ -28,7 +28,7 @@ class EHowIE(InfoExtractor):
         video_id = mobj.group('id')
         webpage = self._download_webpage(url, video_id)
         video_url = self._search_regex(r'(?:file|source)=(http[^\'"&]*)',
-            webpage, 'video URL')
+                                       webpage, 'video URL')
         final_url = compat_urllib_parse.unquote(video_url)
         uploader = self._html_search_meta('uploader', webpage)
         title = self._og_search_title(webpage).replace(' | eHow', '')
index 88f5526b8a59491cc6cd40b48fe9451b3fc2d12b..f4c1e2a72bf74821afd476dee86b806c0dbb56c7 100644 (file)
@@ -1,10 +1,13 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
 import json
 import random
 import re
 
 from .common import InfoExtractor
 from ..utils import (
-    ExtractorError,
+    compat_str,
 )
 
 
@@ -12,86 +15,98 @@ class EightTracksIE(InfoExtractor):
     IE_NAME = '8tracks'
     _VALID_URL = r'https?://8tracks\.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
     _TEST = {
-        u"name": u"EightTracks",
-        u"url": u"http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
-        u"playlist": [
+        "name": "EightTracks",
+        "url": "http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
+        "info_dict": {
+            'id': '1336550',
+            'display_id': 'youtube-dl-test-tracks-a',
+            "description": "test chars:  \"'/\\ä↭",
+            "title": "youtube-dl test tracks \"'/\\ä↭<>",
+        },
+        "playlist": [
             {
-                u"file": u"11885610.m4a",
-                u"md5": u"96ce57f24389fc8734ce47f4c1abcc55",
-                u"info_dict": {
-                    u"title": u"youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
-                    u"uploader_id": u"ytdl"
+                "md5": "96ce57f24389fc8734ce47f4c1abcc55",
+                "info_dict": {
+                    "id": "11885610",
+                    "ext": "m4a",
+                    "title": "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
+                    "uploader_id": "ytdl"
                 }
             },
             {
-                u"file": u"11885608.m4a",
-                u"md5": u"4ab26f05c1f7291ea460a3920be8021f",
-                u"info_dict": {
-                    u"title": u"youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
-                    u"uploader_id": u"ytdl"
+                "md5": "4ab26f05c1f7291ea460a3920be8021f",
+                "info_dict": {
+                    "id": "11885608",
+                    "ext": "m4a",
+                    "title": "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
+                    "uploader_id": "ytdl"
                 }
             },
             {
-                u"file": u"11885679.m4a",
-                u"md5": u"d30b5b5f74217410f4689605c35d1fd7",
-                u"info_dict": {
-                    u"title": u"youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
-                    u"uploader_id": u"ytdl"
+                "md5": "d30b5b5f74217410f4689605c35d1fd7",
+                "info_dict": {
+                    "id": "11885679",
+                    "ext": "m4a",
+                    "title": "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
+                    "uploader_id": "ytdl"
                 }
             },
             {
-                u"file": u"11885680.m4a",
-                u"md5": u"4eb0a669317cd725f6bbd336a29f923a",
-                u"info_dict": {
-                    u"title": u"youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
-                    u"uploader_id": u"ytdl"
+                "md5": "4eb0a669317cd725f6bbd336a29f923a",
+                "info_dict": {
+                    "id": "11885680",
+                    "ext": "m4a",
+                    "title": "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
+                    "uploader_id": "ytdl"
                 }
             },
             {
-                u"file": u"11885682.m4a",
-                u"md5": u"1893e872e263a2705558d1d319ad19e8",
-                u"info_dict": {
-                    u"title": u"PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
-                    u"uploader_id": u"ytdl"
+                "md5": "1893e872e263a2705558d1d319ad19e8",
+                "info_dict": {
+                    "id": "11885682",
+                    "ext": "m4a",
+                    "title": "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
+                    "uploader_id": "ytdl"
                 }
             },
             {
-                u"file": u"11885683.m4a",
-                u"md5": u"b673c46f47a216ab1741ae8836af5899",
-                u"info_dict": {
-                    u"title": u"PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
-                    u"uploader_id": u"ytdl"
+                "md5": "b673c46f47a216ab1741ae8836af5899",
+                "info_dict": {
+                    "id": "11885683",
+                    "ext": "m4a",
+                    "title": "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
+                    "uploader_id": "ytdl"
                 }
             },
             {
-                u"file": u"11885684.m4a",
-                u"md5": u"1d74534e95df54986da7f5abf7d842b7",
-                u"info_dict": {
-                    u"title": u"phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
-                    u"uploader_id": u"ytdl"
+                "md5": "1d74534e95df54986da7f5abf7d842b7",
+                "info_dict": {
+                    "id": "11885684",
+                    "ext": "m4a",
+                    "title": "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
+                    "uploader_id": "ytdl"
                 }
             },
             {
-                u"file": u"11885685.m4a",
-                u"md5": u"f081f47af8f6ae782ed131d38b9cd1c0",
-                u"info_dict": {
-                    u"title": u"phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
-                    u"uploader_id": u"ytdl"
+                "md5": "f081f47af8f6ae782ed131d38b9cd1c0",
+                "info_dict": {
+                    "id": "11885685",
+                    "ext": "m4a",
+                    "title": "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
+                    "uploader_id": "ytdl"
                 }
             }
         ]
     }
 
-
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
         playlist_id = mobj.group('id')
 
         webpage = self._download_webpage(url, playlist_id)
 
-        json_like = self._search_regex(r"PAGE.mix = (.*?);\n", webpage, u'trax information', flags=re.DOTALL)
+        json_like = self._search_regex(
+            r"(?s)PAGE.mix = (.*?);\n", webpage, 'trax information')
         data = json.loads(json_like)
 
         session = str(random.randint(0, 1000000000))
@@ -99,21 +114,30 @@ class EightTracksIE(InfoExtractor):
         track_count = data['tracks_count']
         first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
         next_url = first_url
-        res = []
+        entries = []
         for i in range(track_count):
-            api_json = self._download_webpage(next_url, playlist_id,
-                note=u'Downloading song information %s/%s' % (str(i+1), track_count),
-                errnote=u'Failed to download song information')
+            api_json = self._download_webpage(
+                next_url, playlist_id,
+                note='Downloading song information %d/%d' % (i + 1, track_count),
+                errnote='Failed to download song information')
             api_data = json.loads(api_json)
-            track_data = api_data[u'set']['track']
+            track_data = api_data['set']['track']
             info = {
-                'id': track_data['id'],
+                'id': compat_str(track_data['id']),
                 'url': track_data['track_file_stream_url'],
-                'title': track_data['performer'] + u' - ' + track_data['name'],
+                'title': track_data['performer'] + ' - ' + track_data['name'],
                 'raw_title': track_data['name'],
                 'uploader_id': data['user']['login'],
                 'ext': 'm4a',
             }
-            res.append(info)
-            next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])
-        return res
+            entries.append(info)
+            next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (
+                session, mix_id, track_data['id'])
+        return {
+            '_type': 'playlist',
+            'entries': entries,
+            'id': compat_str(mix_id),
+            'display_id': playlist_id,
+            'title': data.get('name'),
+            'description': data.get('description'),
+        }
diff --git a/youtube_dl/extractor/einthusan.py b/youtube_dl/extractor/einthusan.py
new file mode 100644 (file)
index 0000000..5dfea0d
--- /dev/null
@@ -0,0 +1,61 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class EinthusanIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?einthusan\.com/movies/watch.php\?([^#]*?)id=(?P<id>[0-9]+)'
+    _TESTS = [
+        {
+            'url': 'http://www.einthusan.com/movies/watch.php?id=2447',
+            'md5': 'af244f4458cd667205e513d75da5b8b1',
+            'info_dict': {
+                'id': '2447',
+                'ext': 'mp4',
+                'title': 'Ek Villain',
+                'thumbnail': 're:^https?://.*\.jpg$',
+                'description': 'md5:9d29fc91a7abadd4591fb862fa560d93',
+            }
+        },
+        {
+            'url': 'http://www.einthusan.com/movies/watch.php?id=1671',
+            'md5': 'ef63c7a803e22315880ed182c10d1c5c',
+            'info_dict': {
+                'id': '1671',
+                'ext': 'mp4',
+                'title': 'Soodhu Kavvuum',
+                'thumbnail': 're:^https?://.*\.jpg$',
+                'description': 'md5:05d8a0c0281a4240d86d76e14f2f4d51',
+            }
+        },
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        webpage = self._download_webpage(url, video_id)
+
+        video_title = self._html_search_regex(
+            r'<h1><a class="movie-title".*?>(.*?)</a></h1>', webpage, 'title')
+
+        video_url = self._html_search_regex(
+            r'''(?s)jwplayer\("mediaplayer"\)\.setup\({.*?'file': '([^']+)'.*?}\);''',
+            webpage, 'video url')
+
+        description = self._html_search_meta('description', webpage)
+        thumbnail = self._html_search_regex(
+            r'''<a class="movie-cover-wrapper".*?><img src=["'](.*?)["'].*?/></a>''',
+            webpage, "thumbnail url", fatal=False)
+        if thumbnail is not None:
+            thumbnail = thumbnail.replace('..', 'http://www.einthusan.com')
+
+        return {
+            'id': video_id,
+            'title': video_title,
+            'url': video_url,
+            'thumbnail': thumbnail,
+            'description': description,
+        }
index 4ba323148cc9e81e1e77c9ce12fba65ac8030638..2cba825325ad46caf931c5382c54dab263b7c15a 100644 (file)
@@ -1,4 +1,6 @@
 # encoding: utf-8
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
@@ -7,20 +9,20 @@ from ..utils import ExtractorError
 
 
 class EitbIE(InfoExtractor):
-    IE_NAME = u'eitb.tv'
+    IE_NAME = 'eitb.tv'
     _VALID_URL = r'https?://www\.eitb\.tv/(eu/bideoa|es/video)/[^/]+/(?P<playlist_id>\d+)/(?P<chapter_id>\d+)'
 
     _TEST = {
-        u'add_ie': ['Brightcove'],
-        u'url': u'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/',
-        u'md5': u'edf4436247185adee3ea18ce64c47998',
-        u'info_dict': {
-            u'id': u'2743577154001',
-            u'ext': u'mp4',
-            u'title': u'60 minutos (Lasa y Zabala, 30 años)',
+        'add_ie': ['Brightcove'],
+        'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/',
+        'md5': 'edf4436247185adee3ea18ce64c47998',
+        'info_dict': {
+            'id': '2743577154001',
+            'ext': 'mp4',
+            'title': '60 minutos (Lasa y Zabala, 30 años)',
             # All videos from eitb has this description in the brightcove info
-            u'description': u'.',
-            u'uploader': u'Euskal Telebista',
+            'description': '.',
+            'uploader': 'Euskal Telebista',
         },
     }
 
@@ -30,7 +32,7 @@ class EitbIE(InfoExtractor):
         webpage = self._download_webpage(url, chapter_id)
         bc_url = BrightcoveIE._extract_brightcove_url(webpage)
         if bc_url is None:
-            raise ExtractorError(u'Could not extract the Brightcove url')
+            raise ExtractorError('Could not extract the Brightcove url')
         # The BrightcoveExperience object doesn't contain the video id, we set
         # it manually
         bc_url += '&%40videoPlayer={0}'.format(chapter_id)
diff --git a/youtube_dl/extractor/ellentv.py b/youtube_dl/extractor/ellentv.py
new file mode 100644 (file)
index 0000000..3e79236
--- /dev/null
@@ -0,0 +1,79 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    parse_iso8601,
+)
+
+
+class EllenTVIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?ellentv\.com/videos/(?P<id>[a-z0-9_-]+)'
+    _TEST = {
+        'url': 'http://www.ellentv.com/videos/0-7jqrsr18/',
+        'md5': 'e4af06f3bf0d5f471921a18db5764642',
+        'info_dict': {
+            'id': '0-7jqrsr18',
+            'ext': 'mp4',
+            'title': 'What\'s Wrong with These Photos? A Whole Lot',
+            'timestamp': 1406876400,
+            'upload_date': '20140801',
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+        timestamp = parse_iso8601(self._search_regex(
+            r'<span class="publish-date"><time datetime="([^"]+)">',
+            webpage, 'timestamp'))
+
+        return {
+            'id': video_id,
+            'title': self._og_search_title(webpage),
+            'url': self._html_search_meta('VideoURL', webpage, 'url'),
+            'timestamp': timestamp,
+        }
+
+
+class EllenTVClipsIE(InfoExtractor):
+    IE_NAME = 'EllenTV:clips'
+    _VALID_URL = r'https?://(?:www\.)?ellentv\.com/episodes/(?P<id>[a-z0-9_-]+)'
+    _TEST = {
+        'url': 'http://www.ellentv.com/episodes/meryl-streep-vanessa-hudgens/',
+        'info_dict': {
+            'id': 'meryl-streep-vanessa-hudgens',
+            'title': 'Meryl Streep, Vanessa Hudgens',
+        },
+        'playlist_mincount': 9,
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        playlist_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, playlist_id)
+        playlist = self._extract_playlist(webpage)
+
+        return {
+            '_type': 'playlist',
+            'id': playlist_id,
+            'title': self._og_search_title(webpage),
+            'entries': self._extract_entries(playlist)
+        }
+
+    def _extract_playlist(self, webpage):
+        json_string = self._search_regex(r'playerView.addClips\(\[\{(.*?)\}\]\);', webpage, 'json')
+        try:
+            return json.loads("[{" + json_string + "}]")
+        except ValueError as ve:
+            raise ExtractorError('Failed to download JSON', cause=ve)
+
+    def _extract_entries(self, playlist):
+        return [self.url_result(item['url'], 'EllenTV') for item in playlist]
index e6952588fbdfa08167935fc2b1c0381804328943..70f8efe27578c4d43b27378a4a2c80d495a7488c 100644 (file)
@@ -1,54 +1,25 @@
 from __future__ import unicode_literals
 
-import re
+from .tnaflix import TNAFlixIE
 
-from .common import InfoExtractor
 
+class EMPFlixIE(TNAFlixIE):
+    _VALID_URL = r'^https?://www\.empflix\.com/videos/(?P<display_id>[0-9a-zA-Z-]+)-(?P<id>[0-9]+)\.html'
+
+    _TITLE_REGEX = r'name="title" value="(?P<title>[^"]*)"'
+    _DESCRIPTION_REGEX = r'name="description" value="([^"]*)"'
+    _CONFIG_REGEX = r'flashvars\.config\s*=\s*escape\("([^"]+)"'
 
-class EmpflixIE(InfoExtractor):
-    _VALID_URL = r'^https?://www\.empflix\.com/videos/.*?-(?P<id>[0-9]+)\.html'
     _TEST = {
         'url': 'http://www.empflix.com/videos/Amateur-Finger-Fuck-33051.html',
         'md5': 'b1bc15b6412d33902d6e5952035fcabc',
         'info_dict': {
             'id': '33051',
+            'display_id': 'Amateur-Finger-Fuck',
             'ext': 'mp4',
             'title': 'Amateur Finger Fuck',
             'description': 'Amateur solo finger fucking.',
+            'thumbnail': 're:https?://.*\.jpg$',
             'age_limit': 18,
         }
     }
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
-        webpage = self._download_webpage(url, video_id)
-        age_limit = self._rta_search(webpage)
-
-        video_title = self._html_search_regex(
-            r'name="title" value="(?P<title>[^"]*)"', webpage, 'title')
-        video_description = self._html_search_regex(
-            r'name="description" value="([^"]*)"', webpage, 'description', fatal=False)
-
-        cfg_url = self._html_search_regex(
-            r'flashvars\.config = escape\("([^"]+)"',
-            webpage, 'flashvars.config')
-
-        cfg_xml = self._download_xml(
-            cfg_url, video_id, note='Downloading metadata')
-
-        formats = [
-            {
-                'url': item.find('videoLink').text,
-                'format_id': item.find('res').text,
-            } for item in cfg_xml.findall('./quality/item')
-        ]
-
-        return {
-            'id': video_id,
-            'title': video_title,
-            'description': video_description,
-            'formats': formats,
-            'age_limit': age_limit,
-        }
diff --git a/youtube_dl/extractor/eporner.py b/youtube_dl/extractor/eporner.py
new file mode 100644 (file)
index 0000000..4de8d4b
--- /dev/null
@@ -0,0 +1,73 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_duration,
+    str_to_int,
+)
+
+
+class EpornerIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?eporner\.com/hd-porn/(?P<id>\d+)/(?P<display_id>[\w-]+)'
+    _TEST = {
+        'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/',
+        'md5': '39d486f046212d8e1b911c52ab4691f8',
+        'info_dict': {
+            'id': '95008',
+            'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video',
+            'ext': 'mp4',
+            'title': 'Infamous Tiffany Teen Strip Tease Video',
+            'duration': 1838,
+            'view_count': int,
+            'age_limit': 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        display_id = mobj.group('display_id')
+
+        webpage = self._download_webpage(url, display_id)
+        title = self._html_search_regex(
+            r'<title>(.*?) - EPORNER', webpage, 'title')
+
+        redirect_code = self._html_search_regex(
+            r'<script type="text/javascript" src="/config5/%s/([a-f\d]+)/">' % video_id,
+            webpage, 'redirect_code')
+        redirect_url = 'http://www.eporner.com/config5/%s/%s' % (video_id, redirect_code)
+        player_code = self._download_webpage(
+            redirect_url, display_id, note='Downloading player config')
+
+        sources = self._search_regex(
+            r'(?s)sources\s*:\s*\[\s*({.+?})\s*\]', player_code, 'sources')
+
+        formats = []
+        for video_url, format_id in re.findall(r'file\s*:\s*"([^"]+)",\s*label\s*:\s*"([^"]+)"', sources):
+            fmt = {
+                'url': video_url,
+                'format_id': format_id,
+            }
+            m = re.search(r'^(\d+)', format_id)
+            if m:
+                fmt['height'] = int(m.group(1))
+            formats.append(fmt)
+        self._sort_formats(formats)
+
+        duration = parse_duration(self._html_search_meta('duration', webpage))
+        view_count = str_to_int(self._search_regex(
+            r'id="cinemaviews">\s*([0-9,]+)\s*<small>views',
+            webpage, 'view count', fatal=False))
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'title': title,
+            'duration': duration,
+            'view_count': view_count,
+            'formats': formats,
+            'age_limit': self._rta_search(webpage),
+        }
index 272dfe1f643208a31635dade0e561c8eb009aab7..476fc22b93424b13255d5eec3578eb985dbfbdfd 100644 (file)
@@ -36,7 +36,7 @@ class EscapistIE(InfoExtractor):
             r'<meta name="description" content="([^"]*)"',
             webpage, 'description', fatal=False)
 
-        playerUrl = self._og_search_video_url(webpage, name=u'player URL')
+        playerUrl = self._og_search_video_url(webpage, name='player URL')
 
         title = self._html_search_regex(
             r'<meta name="title" content="([^"]*)"',
index 12829cbcc0631de4eea1bc4d84d4702b6da54281..d237a82813ea2556175e32a882d87bd5d1831924 100644 (file)
@@ -12,10 +12,11 @@ from ..utils import (
 class EveryonesMixtapeIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?everyonesmixtape\.com/#/mix/(?P<id>[0-9a-zA-Z]+)(?:/(?P<songnr>[0-9]))?$'
 
-    _TEST = {
+    _TESTS = [{
         'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi/5',
-        'file': '5bfseWNmlds.mp4',
         "info_dict": {
+            'id': '5bfseWNmlds',
+            'ext': 'mp4',
             "title": "Passion Pit - \"Sleepyhead\" (Official Music Video)",
             "uploader": "FKR.TV",
             "uploader_id": "frenchkissrecords",
@@ -25,7 +26,14 @@ class EveryonesMixtapeIE(InfoExtractor):
         'params': {
             'skip_download': True,  # This is simply YouTube
         }
-    }
+    }, {
+        'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi',
+        'info_dict': {
+            'id': 'm7m0jJAbMQi',
+            'title': 'Driving',
+        },
+        'playlist_count': 24
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
diff --git a/youtube_dl/extractor/expotv.py b/youtube_dl/extractor/expotv.py
new file mode 100644 (file)
index 0000000..a38b773
--- /dev/null
@@ -0,0 +1,73 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    int_or_none,
+    unified_strdate,
+)
+
+
+class ExpoTVIE(InfoExtractor):
+    _VALID_URL = r'https?://www\.expotv\.com/videos/[^?#]*/(?P<id>[0-9]+)($|[?#])'
+    _TEST = {
+        'url': 'http://www.expotv.com/videos/reviews/1/24/LinneCardscom/17561',
+        'md5': '2985e6d7a392b2f7a05e0ca350fe41d0',
+        'info_dict': {
+            'id': '17561',
+            'ext': 'mp4',
+            'upload_date': '20060212',
+            'title': 'My Favorite Online Scrapbook Store',
+            'view_count': int,
+            'description': 'You\'ll find most everything you need at this virtual store front.',
+            'uploader': 'Anna T.',
+            'thumbnail': 're:^https?://.*\.jpg$',
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+        player_key = self._search_regex(
+            r'<param name="playerKey" value="([^"]+)"', webpage, 'player key')
+        config_url = 'http://client.expotv.com/video/config/%s/%s' % (
+            video_id, player_key)
+        config = self._download_json(
+            config_url, video_id,
+            note='Downloading video configuration')
+
+        formats = [{
+            'url': fcfg['file'],
+            'height': int_or_none(fcfg.get('height')),
+            'format_note': fcfg.get('label'),
+            'ext': self._search_regex(
+                r'filename=.*\.([a-z0-9_A-Z]+)&', fcfg['file'],
+                'file extension', default=None),
+        } for fcfg in config['sources']]
+        self._sort_formats(formats)
+
+        title = self._og_search_title(webpage)
+        description = self._og_search_description(webpage)
+        thumbnail = config.get('image')
+        view_count = int_or_none(self._search_regex(
+            r'<h5>Plays: ([0-9]+)</h5>', webpage, 'view counts'))
+        uploader = self._search_regex(
+            r'<div class="reviewer">\s*<img alt="([^"]+)"', webpage, 'uploader',
+            fatal=False)
+        upload_date = unified_strdate(self._search_regex(
+            r'<h5>Reviewed on ([0-9/.]+)</h5>', webpage, 'upload date',
+            fatal=False))
+
+        return {
+            'id': video_id,
+            'formats': formats,
+            'title': title,
+            'description': description,
+            'view_count': view_count,
+            'thumbnail': thumbnail,
+            'uploader': uploader,
+            'upload_date': upload_date,
+        }
index 14a196ffc63336ae7d016b035cfb28cc7f7d28a0..aacbf14141f6d5109d265b8e4dfa37883cee81ab 100644 (file)
@@ -7,6 +7,7 @@ from ..utils import (
     compat_urllib_parse_urlparse,
     compat_urllib_request,
     compat_urllib_parse,
+    str_to_int,
 )
 
 
@@ -20,6 +21,7 @@ class ExtremeTubeIE(InfoExtractor):
             'ext': 'mp4',
             'title': 'Music Video 14 british euro brit european cumshots swallow',
             'uploader': 'unknown',
+            'view_count': int,
             'age_limit': 18,
         }
     }, {
@@ -39,8 +41,12 @@ class ExtremeTubeIE(InfoExtractor):
         video_title = self._html_search_regex(
             r'<h1 [^>]*?title="([^"]+)"[^>]*>', webpage, 'title')
         uploader = self._html_search_regex(
-            r'>Posted by:(?=<)(?:\s|<[^>]*>)*(.+?)\|', webpage, 'uploader',
-            fatal=False)
+            r'Uploaded by:\s*</strong>\s*(.+?)\s*</div>',
+            webpage, 'uploader', fatal=False)
+        view_count = str_to_int(self._html_search_regex(
+            r'Views:\s*</strong>\s*<span>([\d,\.]+)</span>',
+            webpage, 'view count', fatal=False))
+
         video_url = compat_urllib_parse.unquote(self._html_search_regex(
             r'video_url=(.+?)&amp;', webpage, 'video_url'))
         path = compat_urllib_parse_urlparse(video_url).path
@@ -51,6 +57,7 @@ class ExtremeTubeIE(InfoExtractor):
             'id': video_id,
             'title': video_title,
             'uploader': uploader,
+            'view_count': view_count,
             'url': video_url,
             'format': format,
             'format_id': format,
index f0cd8f1565b7e7b1b5220ba0f68d0b9225d953e6..1ad4e77a8a334dc0bfec62a0fb4752676e2e1435 100644 (file)
@@ -5,39 +5,52 @@ import re
 import socket
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_http_client,
     compat_str,
     compat_urllib_error,
     compat_urllib_parse,
     compat_urllib_request,
-    urlencode_postdata,
-
+)
+from ..utils import (
     ExtractorError,
+    int_or_none,
+    limit_length,
+    urlencode_postdata,
 )
 
 
 class FacebookIE(InfoExtractor):
     _VALID_URL = r'''(?x)
         https?://(?:\w+\.)?facebook\.com/
-        (?:[^#?]*\#!/)?
-        (?:video/video\.php|photo\.php|video/embed)\?(?:.*?)
+        (?:[^#]*?\#!/)?
+        (?:video/video\.php|photo\.php|video\.php|video/embed)\?(?:.*?)
         (?:v|video_id)=(?P<id>[0-9]+)
         (?:.*)'''
     _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
     _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
     _NETRC_MACHINE = 'facebook'
     IE_NAME = 'facebook'
-    _TEST = {
-        'url': 'https://www.facebook.com/photo.php?v=120708114770723',
-        'md5': '48975a41ccc4b7a581abd68651c1a5a8',
+    _TESTS = [{
+        'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
+        'md5': '6a40d33c0eccbb1af76cf0485a052659',
         'info_dict': {
-            'id': '120708114770723',
+            'id': '637842556329505',
             'ext': 'mp4',
-            'duration': 279,
-            'title': 'PEOPLE ARE AWESOME 2013',
+            'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
         }
-    }
+    }, {
+        'note': 'Video without discernible title',
+        'url': 'https://www.facebook.com/video.php?v=274175099429670',
+        'info_dict': {
+            'id': '274175099429670',
+            'ext': 'mp4',
+            'title': 'Facebook video #274175099429670',
+        }
+    }, {
+        'url': 'https://www.facebook.com/video.php?v=10204634152394104',
+        'only_matching': True,
+    }]
 
     def _login(self):
         (useremail, password) = self._get_login_info()
@@ -47,8 +60,8 @@ class FacebookIE(InfoExtractor):
         login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
         login_page_req.add_header('Cookie', 'locale=en_US')
         login_page = self._download_webpage(login_page_req, None,
-            note='Downloading login page',
-            errnote='Unable to download login page')
+                                            note='Downloading login page',
+                                            errnote='Unable to download login page')
         lsd = self._search_regex(
             r'<input type="hidden" name="lsd" value="([^"]*)"',
             login_page, 'lsd')
@@ -64,25 +77,26 @@ class FacebookIE(InfoExtractor):
             'legacy_return': '1',
             'timezone': '-60',
             'trynum': '1',
-            }
+        }
         request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         try:
             login_results = self._download_webpage(request, None,
-                note='Logging in', errnote='unable to fetch login page')
+                                                   note='Logging in', errnote='unable to fetch login page')
             if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
                 self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
                 return
 
             check_form = {
                 'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'),
-                'h': self._search_regex(r'name="h" value="(\w*?)"', login_results, 'h'),
+                'h': self._search_regex(
+                    r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'),
                 'name_action_selected': 'dont_save',
             }
             check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
             check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
             check_response = self._download_webpage(check_req, None,
-                note='Confirming login')
+                                                    note='Confirming login')
             if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
                 self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
@@ -93,9 +107,7 @@ class FacebookIE(InfoExtractor):
         self._login()
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
         webpage = self._download_webpage(url, video_id)
 
@@ -121,12 +133,20 @@ class FacebookIE(InfoExtractor):
             raise ExtractorError('Cannot find video URL')
 
         video_title = self._html_search_regex(
-            r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title')
+            r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title',
+            fatal=False)
+        if not video_title:
+            video_title = self._html_search_regex(
+                r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
+                webpage, 'alternative title', default=None)
+            video_title = limit_length(video_title, 80)
+        if not video_title:
+            video_title = 'Facebook video #%s' % video_id
 
         return {
             'id': video_id,
             'title': video_title,
             'url': video_url,
-            'duration': int(video_data['video_duration']),
-            'thumbnail': video_data['thumbnail_src'],
+            'duration': int_or_none(video_data.get('video_duration')),
+            'thumbnail': video_data.get('thumbnail_src'),
         }
index c6ab6952e84dc9074816f28ebb7fe6d8ce02cb47..3c39ca451a38e69a822968911e758847657380e9 100644 (file)
@@ -1,49 +1,48 @@
 # encoding: utf-8
-import re
+from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..utils import (
-    determine_ext,
-)
 
 
 class FazIE(InfoExtractor):
-    IE_NAME = u'faz.net'
+    IE_NAME = 'faz.net'
     _VALID_URL = r'https?://www\.faz\.net/multimedia/videos/.*?-(?P<id>\d+)\.html'
 
     _TEST = {
-        u'url': u'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html',
-        u'file': u'12610585.mp4',
-        u'info_dict': {
-            u'title': u'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher',
-            u'description': u'md5:1453fbf9a0d041d985a47306192ea253',
+        'url': 'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html',
+        'info_dict': {
+            'id': '12610585',
+            'ext': 'mp4',
+            'title': 'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher',
+            'description': 'md5:1453fbf9a0d041d985a47306192ea253',
         },
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-        self.to_screen(video_id)
+        video_id = self._match_id(url)
+
         webpage = self._download_webpage(url, video_id)
-        config_xml_url = self._search_regex(r'writeFLV\(\'(.+?)\',', webpage,
-            u'config xml url')
-        config = self._download_xml(config_xml_url, video_id,
-            u'Downloading config xml')
+        config_xml_url = self._search_regex(
+            r'writeFLV\(\'(.+?)\',', webpage, 'config xml url')
+        config = self._download_xml(
+            config_xml_url, video_id, 'Downloading config xml')
 
         encodings = config.find('ENCODINGS')
         formats = []
-        for code in ['LOW', 'HIGH', 'HQ']:
+        for pref, code in enumerate(['LOW', 'HIGH', 'HQ']):
             encoding = encodings.find(code)
             if encoding is None:
                 continue
             encoding_url = encoding.find('FILENAME').text
             formats.append({
                 'url': encoding_url,
-                'ext': determine_ext(encoding_url),
                 'format_id': code.lower(),
+                'quality': pref,
             })
+        self._sort_formats(formats)
 
-        descr = self._html_search_regex(r'<p class="Content Copy">(.*?)</p>', webpage, u'description')
+        descr = self._html_search_regex(
+            r'<p class="Content Copy">(.*?)</p>', webpage, 'description', fatal=False)
         return {
             'id': video_id,
             'title': self._og_search_title(webpage),
index c663a0f81d08650b24616d2c3c2daef262c95aa2..6f5d23559b78dfb621bbf6d819612d20b96fd597 100644 (file)
@@ -40,7 +40,7 @@ class FC2IE(InfoExtractor):
 
         info_url = (
             "http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
-            format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.','%2E')))
+            format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.', '%2E')))
 
         info_webpage = self._download_webpage(
             info_url, video_id, note='Downloading info page')
index 6d73c8a4a32f83975025a0b1ed932fc291176f8a..af439ccfeefeade46f75b693627b09ba6ed830d6 100644 (file)
@@ -42,7 +42,6 @@ class FiredriveIE(InfoExtractor):
         fields = dict(re.findall(r'''(?x)<input\s+
             type="hidden"\s+
             name="([^"]+)"\s+
-            (?:id="[^"]+"\s+)?
             value="([^"]*)"
             ''', webpage))
 
@@ -66,7 +65,7 @@ class FiredriveIE(InfoExtractor):
         ext = self._search_regex(r'type:\s?\'([^\']+)\',',
                                  webpage, 'extension', fatal=False)
         video_url = self._search_regex(
-            r'file:\s?\'(http[^\']+)\',', webpage, 'file url')
+            r'file:\s?loadURL\(\'(http[^\']+)\'\),', webpage, 'file url')
 
         formats = [{
             'format_id': 'sd',
index c2e987ff72d7847d6c6ec955943763ea9f231e92..08ceee4ed7d5e8b96b81e7d8b9b823a5ea18e120 100644 (file)
@@ -44,9 +44,9 @@ class FirstTVIE(InfoExtractor):
         duration = self._og_search_property('video:duration', webpage, 'video duration', fatal=False)
 
         like_count = self._html_search_regex(r'title="Понравилось".*?/></label> \[(\d+)\]',
-            webpage, 'like count', fatal=False)
+                                             webpage, 'like count', fatal=False)
         dislike_count = self._html_search_regex(r'title="Не понравилось".*?/></label> \[(\d+)\]',
-            webpage, 'dislike count', fatal=False)
+                                                webpage, 'dislike count', fatal=False)
 
         return {
             'id': video_id,
@@ -57,4 +57,4 @@ class FirstTVIE(InfoExtractor):
             'duration': int_or_none(duration),
             'like_count': int_or_none(like_count),
             'dislike_count': int_or_none(dislike_count),
-        }
\ No newline at end of file
+        }
index 3a50bab5c9bd04c9d176a88be080033368fe46c7..f9c127ce67bd7edefd22e7e7953ecce57e888d15 100644 (file)
@@ -50,7 +50,7 @@ class FiveMinIE(InfoExtractor):
         video_id = mobj.group('id')
         embed_url = 'https://embed.5min.com/playerseed/?playList=%s' % video_id
         embed_page = self._download_webpage(embed_url, video_id,
-            'Downloading embed page')
+                                            'Downloading embed page')
         sid = self._search_regex(r'sid=(\d+)', embed_page, 'sid')
         query = compat_urllib_parse.urlencode({
             'func': 'GetResults',
index d7048c8c1ae7e6ba149552a7b32ec2ab42c8a3f2..d09d1c13a70cffb725329f69368f37359d7f7a08 100644 (file)
@@ -1,25 +1,27 @@
+from __future__ import unicode_literals
+
 import re
 import random
 import json
 
 from .common import InfoExtractor
 from ..utils import (
-    determine_ext,
     get_element_by_id,
     clean_html,
 )
 
 
 class FKTVIE(InfoExtractor):
-    IE_NAME = u'fernsehkritik.tv'
-    _VALID_URL = r'(?:http://)?(?:www\.)?fernsehkritik\.tv/folge-(?P<ep>[0-9]+)(?:/.*)?'
+    IE_NAME = 'fernsehkritik.tv'
+    _VALID_URL = r'http://(?:www\.)?fernsehkritik\.tv/folge-(?P<ep>[0-9]+)(?:/.*)?'
 
     _TEST = {
-        u'url': u'http://fernsehkritik.tv/folge-1',
-        u'file': u'00011.flv',
-        u'info_dict': {
-            u'title': u'Folge 1 vom 10. April 2007',
-            u'description': u'md5:fb4818139c7cfe6907d4b83412a6864f',
+        'url': 'http://fernsehkritik.tv/folge-1',
+        'info_dict': {
+            'id': '00011',
+            'ext': 'flv',
+            'title': 'Folge 1 vom 10. April 2007',
+            'description': 'md5:fb4818139c7cfe6907d4b83412a6864f',
         },
     }
 
@@ -30,9 +32,9 @@ class FKTVIE(InfoExtractor):
         server = random.randint(2, 4)
         video_thumbnail = 'http://fernsehkritik.tv/images/magazin/folge%d.jpg' % episode
         start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%d/Start' % episode,
-            episode)
+                                               episode)
         playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage,
-            u'playlist', flags=re.DOTALL)
+                                      'playlist', flags=re.DOTALL)
         files = json.loads(re.sub('{[^{}]*?}', '{}', playlist))
         # TODO: return a single multipart video
         videos = []
@@ -42,7 +44,6 @@ class FKTVIE(InfoExtractor):
             videos.append({
                 'id': video_id,
                 'url': video_url,
-                'ext': determine_ext(video_url),
                 'title': clean_html(get_element_by_id('eptitle', start_webpage)),
                 'description': clean_html(get_element_by_id('contentlist', start_webpage)),
                 'thumbnail': video_thumbnail
@@ -51,14 +52,15 @@ class FKTVIE(InfoExtractor):
 
 
 class FKTVPosteckeIE(InfoExtractor):
-    IE_NAME = u'fernsehkritik.tv:postecke'
-    _VALID_URL = r'(?:http://)?(?:www\.)?fernsehkritik\.tv/inline-video/postecke\.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)'
+    IE_NAME = 'fernsehkritik.tv:postecke'
+    _VALID_URL = r'http://(?:www\.)?fernsehkritik\.tv/inline-video/postecke\.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)'
     _TEST = {
-        u'url': u'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120',
-        u'file': u'0120.flv',
-        u'md5': u'262f0adbac80317412f7e57b4808e5c4',
-        u'info_dict': {
-            u"title": u"Postecke 120"
+        'url': 'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120',
+        'md5': '262f0adbac80317412f7e57b4808e5c4',
+        'info_dict': {
+            'id': '0120',
+            'ext': 'flv',
+            'title': 'Postecke 120',
         }
     }
 
@@ -71,8 +73,7 @@ class FKTVPosteckeIE(InfoExtractor):
         video_url = 'http://dl%d.fernsehkritik.tv/postecke/postecke%d.flv' % (server, episode)
         video_title = 'Postecke %d' % episode
         return {
-            'id':       video_id,
-            'url':      video_url,
-            'ext':      determine_ext(video_url),
-            'title':    video_title,
+            'id': video_id,
+            'url': video_url,
+            'title': video_title,
         }
index 21ea5ec2bf1c499149809971b780daa9d10d0291..0c858b6544b919b1b569b4c4102447631298046e 100644 (file)
@@ -10,15 +10,15 @@ from ..utils import (
 
 
 class FlickrIE(InfoExtractor):
-    """Information Extractor for Flickr videos"""
-    _VALID_URL = r'(?:https?://)?(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
+    _VALID_URL = r'https?://(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
     _TEST = {
         'url': 'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/',
-        'file': '5645318632.mp4',
         'md5': '6fdc01adbc89d72fc9c4f15b4a4ba87b',
         'info_dict': {
-            "description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.", 
-            "uploader_id": "forestwander-nature-pictures", 
+            'id': '5645318632',
+            'ext': 'mp4',
+            "description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",
+            "uploader_id": "forestwander-nature-pictures",
             "title": "Dark Hollow Waterfalls"
         }
     }
@@ -37,7 +37,7 @@ class FlickrIE(InfoExtractor):
         first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage')
 
         node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>',
-            first_xml, 'node_id')
+                                          first_xml, 'node_id')
 
         second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'
         second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage')
@@ -49,12 +49,12 @@ class FlickrIE(InfoExtractor):
             raise ExtractorError('Unable to extract video url')
         video_url = mobj.group(1) + unescapeHTML(mobj.group(2))
 
-        return [{
-            'id':          video_id,
-            'url':         video_url,
-            'ext':         'mp4',
-            'title':       self._og_search_title(webpage),
+        return {
+            'id': video_id,
+            'url': video_url,
+            'ext': 'mp4',
+            'title': self._og_search_title(webpage),
             'description': self._og_search_description(webpage),
-            'thumbnail':   self._og_search_thumbnail(webpage),
+            'thumbnail': self._og_search_thumbnail(webpage),
             'uploader_id': video_uploader_id,
-        }]
+        }
diff --git a/youtube_dl/extractor/folketinget.py b/youtube_dl/extractor/folketinget.py
new file mode 100644 (file)
index 0000000..68e2db9
--- /dev/null
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_parse_qs
+from ..utils import (
+    int_or_none,
+    parse_duration,
+    parse_iso8601,
+    xpath_text,
+)
+
+
+class FolketingetIE(InfoExtractor):
+    IE_DESC = 'Folketinget (ft.dk; Danish parliament)'
+    _VALID_URL = r'https?://(?:www\.)?ft\.dk/webtv/video/[^?#]*?\.(?P<id>[0-9]+)\.aspx'
+    _TEST = {
+        'url': 'http://www.ft.dk/webtv/video/20141/eru/td.1165642.aspx?as=1#player',
+        'info_dict': {
+            'id': '1165642',
+            'ext': 'mp4',
+            'title': 'Åbent samråd i Erhvervsudvalget',
+            'description': 'Åbent samråd med erhvervs- og vækstministeren om regeringens politik på teleområdet',
+            'view_count': int,
+            'width': 768,
+            'height': 432,
+            'tbr': 928000,
+            'timestamp': 1416493800,
+            'upload_date': '20141120',
+            'duration': 3960,
+        },
+        'params': {
+            'skip_download': 'rtmpdump required',
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._og_search_title(webpage)
+        description = self._html_search_regex(
+            r'(?s)<div class="video-item-agenda"[^>]*>(.*?)<',
+            webpage, 'description', fatal=False)
+
+        player_params = compat_parse_qs(self._search_regex(
+            r'<embed src="http://ft\.arkena\.tv/flash/ftplayer\.swf\?([^"]+)"',
+            webpage, 'player params'))
+        xml_url = player_params['xml'][0]
+        doc = self._download_xml(xml_url, video_id)
+
+        timestamp = parse_iso8601(xpath_text(doc, './/date'))
+        duration = parse_duration(xpath_text(doc, './/duration'))
+        width = int_or_none(xpath_text(doc, './/width'))
+        height = int_or_none(xpath_text(doc, './/height'))
+        view_count = int_or_none(xpath_text(doc, './/views'))
+
+        formats = [{
+            'format_id': n.attrib['bitrate'],
+            'url': xpath_text(n, './url', fatal=True),
+            'tbr': int_or_none(n.attrib['bitrate']),
+        } for n in doc.findall('.//streams/stream')]
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'description': description,
+            'timestamp': timestamp,
+            'width': width,
+            'height': height,
+            'duration': duration,
+            'view_count': view_count,
+        }
index 7d56b9be93a0332e70381c3a46b748c6d39e5b6a..b22ce2acb5d1beae29d1b298b4ef63c8e7639e5f 100644 (file)
@@ -55,7 +55,7 @@ class FourTubeIE(InfoExtractor):
         description = self._html_search_meta('description', webpage, 'description')
         if description:
             upload_date = self._search_regex(r'Published Date: (\d{2} [a-zA-Z]{3} \d{4})', description, 'upload date',
-                fatal=False)
+                                             fatal=False)
             if upload_date:
                 upload_date = unified_strdate(upload_date)
             view_count = self._search_regex(r'Views: ([\d,\.]+)', description, 'view count', fatal=False)
@@ -65,9 +65,9 @@ class FourTubeIE(InfoExtractor):
 
         token_url = "http://tkn.4tube.com/{0}/desktop/{1}".format(media_id, "+".join(sources))
         headers = {
-                b'Content-Type': b'application/x-www-form-urlencoded',
-                b'Origin': b'http://www.4tube.com',
-                }
+            b'Content-Type': b'application/x-www-form-urlencoded',
+            b'Origin': b'http://www.4tube.com',
+        }
         token_req = compat_urllib_request.Request(token_url, b'{}', headers)
         tokens = self._download_json(token_req, video_id)
 
@@ -76,7 +76,7 @@ class FourTubeIE(InfoExtractor):
             'format_id': format + 'p',
             'resolution': format + 'p',
             'quality': int(format),
-            } for format in sources]
+        } for format in sources]
 
         self._sort_formats(formats)
 
@@ -92,4 +92,4 @@ class FourTubeIE(InfoExtractor):
             'duration': duration,
             'age_limit': 18,
             'webpage_url': webpage_url,
-        }
\ No newline at end of file
+        }
diff --git a/youtube_dl/extractor/foxgay.py b/youtube_dl/extractor/foxgay.py
new file mode 100644 (file)
index 0000000..08b8ea3
--- /dev/null
@@ -0,0 +1,48 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class FoxgayIE(InfoExtractor):
+    _VALID_URL = r'http://(?:www\.)?foxgay\.com/videos/(?:\S+-)?(?P<id>\d+)\.shtml'
+    _TEST = {
+        'url': 'http://foxgay.com/videos/fuck-turkish-style-2582.shtml',
+        'md5': '80d72beab5d04e1655a56ad37afe6841',
+        'info_dict': {
+            'id': '2582',
+            'ext': 'mp4',
+            'title': 'md5:6122f7ae0fc6b21ebdf59c5e083ce25a',
+            'description': 'md5:5e51dc4405f1fd315f7927daed2ce5cf',
+            'age_limit': 18,
+            'thumbnail': 're:https?://.*\.jpg$',
+        },
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._html_search_regex(
+            r'<title>(?P<title>.*?)</title>',
+            webpage, 'title', fatal=False)
+        description = self._html_search_regex(
+            r'<div class="ico_desc"><h2>(?P<description>.*?)</h2>',
+            webpage, 'description', fatal=False)
+
+        # Find the URL for the iFrame which contains the actual video.
+        iframe = self._download_webpage(
+            self._html_search_regex(r'iframe src="(?P<frame>.*?)"', webpage, 'video frame'),
+            video_id)
+        video_url = self._html_search_regex(
+            r"v_path = '(?P<vid>http://.*?)'", iframe, 'url')
+        thumb_url = self._html_search_regex(
+            r"t_path = '(?P<thumb>http://.*?)'", iframe, 'thumbnail', fatal=False)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'url': video_url,
+            'description': description,
+            'thumbnail': thumb_url,
+            'age_limit': 18,
+        }
diff --git a/youtube_dl/extractor/foxnews.py b/youtube_dl/extractor/foxnews.py
new file mode 100644 (file)
index 0000000..917f76b
--- /dev/null
@@ -0,0 +1,94 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_iso8601,
+    int_or_none,
+)
+
+
+class FoxNewsIE(InfoExtractor):
+    _VALID_URL = r'https?://video\.foxnews\.com/v/(?:video-embed\.html\?video_id=)?(?P<id>\d+)'
+    _TESTS = [
+        {
+            'url': 'http://video.foxnews.com/v/3937480/frozen-in-time/#sp=show-clips',
+            'md5': '32aaded6ba3ef0d1c04e238d01031e5e',
+            'info_dict': {
+                'id': '3937480',
+                'ext': 'flv',
+                'title': 'Frozen in Time',
+                'description': 'Doctors baffled by 16-year-old girl that is the size of a toddler',
+                'duration': 265,
+                'timestamp': 1304411491,
+                'upload_date': '20110503',
+                'thumbnail': 're:^https?://.*\.jpg$',
+            },
+        },
+        {
+            'url': 'http://video.foxnews.com/v/3922535568001/rep-luis-gutierrez-on-if-obamas-immigration-plan-is-legal/#sp=show-clips',
+            'md5': '5846c64a1ea05ec78175421b8323e2df',
+            'info_dict': {
+                'id': '3922535568001',
+                'ext': 'mp4',
+                'title': "Rep. Luis Gutierrez on if Obama's immigration plan is legal",
+                'description': "Congressman discusses the president's executive action",
+                'duration': 292,
+                'timestamp': 1417662047,
+                'upload_date': '20141204',
+                'thumbnail': 're:^https?://.*\.jpg$',
+            },
+        },
+        {
+            'url': 'http://video.foxnews.com/v/video-embed.html?video_id=3937480&d=video.foxnews.com',
+            'only_matching': True,
+        },
+    ]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        video = self._download_json(
+            'http://video.foxnews.com/v/feed/video/%s.js?template=fox' % video_id, video_id)
+
+        item = video['channel']['item']
+        title = item['title']
+        description = item['description']
+        timestamp = parse_iso8601(item['dc-date'])
+
+        media_group = item['media-group']
+        duration = None
+        formats = []
+        for media in media_group['media-content']:
+            attributes = media['@attributes']
+            video_url = attributes['url']
+            if video_url.endswith('.f4m'):
+                formats.extend(self._extract_f4m_formats(video_url + '?hdcore=3.4.0&plugin=aasp-3.4.0.132.124', video_id))
+            elif video_url.endswith('.m3u8'):
+                formats.extend(self._extract_m3u8_formats(video_url, video_id, 'flv'))
+            elif not video_url.endswith('.smil'):
+                duration = int_or_none(attributes.get('duration'))
+                formats.append({
+                    'url': video_url,
+                    'format_id': media['media-category']['@attributes']['label'],
+                    'preference': 1,
+                    'vbr': int_or_none(attributes.get('bitrate')),
+                    'filesize': int_or_none(attributes.get('fileSize'))
+                })
+        self._sort_formats(formats)
+
+        media_thumbnail = media_group['media-thumbnail']['@attributes']
+        thumbnails = [{
+            'url': media_thumbnail['url'],
+            'width': int_or_none(media_thumbnail.get('width')),
+            'height': int_or_none(media_thumbnail.get('height')),
+        }] if media_thumbnail else []
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'duration': duration,
+            'timestamp': timestamp,
+            'formats': formats,
+            'thumbnails': thumbnails,
+        }
index deb1b0b9dc55244874029af3c47412184c2c1e50..6613ee17acee4a3fade5470d17196864f4ccae29 100644 (file)
@@ -4,16 +4,21 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..utils import int_or_none
 
 
 class FranceInterIE(InfoExtractor):
-    _VALID_URL = r'http://(?:www\.)?franceinter\.fr/player/reecouter\?play=(?P<id>[0-9]{6})'
+    _VALID_URL = r'http://(?:www\.)?franceinter\.fr/player/reecouter\?play=(?P<id>[0-9]+)'
     _TEST = {
         'url': 'http://www.franceinter.fr/player/reecouter?play=793962',
-        'file': '793962.mp3',
         'md5': '4764932e466e6f6c79c317d2e74f6884',
         "info_dict": {
-            "title": "L’Histoire dans les jeux vidéo",
+            'id': '793962',
+            'ext': 'mp3',
+            'title': 'L’Histoire dans les jeux vidéo',
+            'description': 'md5:7e93ddb4451e7530022792240a3049c7',
+            'timestamp': 1387369800,
+            'upload_date': '20131218',
         },
     }
 
@@ -22,17 +27,26 @@ class FranceInterIE(InfoExtractor):
         video_id = mobj.group('id')
 
         webpage = self._download_webpage(url, video_id)
-        title = self._html_search_regex(
-            r'<span class="roll_overflow">(.*?)</span></h1>', webpage, 'title')
+
         path = self._search_regex(
-            r'&urlAOD=(.*?)&startTime', webpage, 'video url')
+            r'<a id="player".+?href="([^"]+)"', webpage, 'video url')
         video_url = 'http://www.franceinter.fr/' + path
 
+        title = self._html_search_regex(
+            r'<span class="title">(.+?)</span>', webpage, 'title')
+        description = self._html_search_regex(
+            r'<span class="description">(.*?)</span>',
+            webpage, 'description', fatal=False)
+        timestamp = int_or_none(self._search_regex(
+            r'data-date="(\d+)"', webpage, 'upload date', fatal=False))
+
         return {
             'id': video_id,
+            'title': title,
+            'description': description,
+            'timestamp': timestamp,
             'formats': [{
                 'url': video_url,
                 'vcodec': 'none',
             }],
-            'title': title,
         }
index 1b0e8e5d59dc23d52d7fb15d7e46e0b1383a7435..e0420a48f8cacb5661b6882f797c09755de4df46 100644 (file)
@@ -8,45 +8,85 @@ import json
 from .common import InfoExtractor
 from ..utils import (
     compat_urlparse,
+    ExtractorError,
+    clean_html,
+    parse_duration,
+    compat_urllib_parse_urlparse,
+    int_or_none,
 )
 
 
 class FranceTVBaseInfoExtractor(InfoExtractor):
-    def _extract_video(self, video_id):
-        info = self._download_xml(
-            'http://www.francetvinfo.fr/appftv/webservices/video/'
-            'getInfosOeuvre.php?id-diffusion='
-            + video_id, video_id, 'Downloading XML config')
-
-        manifest_url = info.find('videos/video/url').text
-        manifest_url = manifest_url.replace('/z/', '/i/')
-        
-        if manifest_url.startswith('rtmp'):
-            formats = [{'url': manifest_url, 'ext': 'flv'}]
+    def _extract_video(self, video_id, catalogue):
+        info = self._download_json(
+            'http://webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=%s&catalogue=%s'
+            % (video_id, catalogue),
+            video_id, 'Downloading video JSON')
+
+        if info.get('status') == 'NOK':
+            raise ExtractorError(
+                '%s returned error: %s' % (self.IE_NAME, info['message']), expected=True)
+        allowed_countries = info['videos'][0].get('geoblocage')
+        if allowed_countries:
+            georestricted = True
+            geo_info = self._download_json(
+                'http://geo.francetv.fr/ws/edgescape.json', video_id,
+                'Downloading geo restriction info')
+            country = geo_info['reponse']['geo_info']['country_code']
+            if country not in allowed_countries:
+                raise ExtractorError(
+                    'The video is not available from your location',
+                    expected=True)
         else:
-            formats = []
-            available_formats = self._search_regex(r'/[^,]*,(.*?),k\.mp4', manifest_url, 'available formats')
-            for index, format_descr in enumerate(available_formats.split(',')):
-                format_info = {
-                    'url': manifest_url.replace('manifest.f4m', 'index_%d_av.m3u8' % index),
-                    'ext': 'mp4',
-                }
-                m_resolution = re.search(r'(?P<width>\d+)x(?P<height>\d+)', format_descr)
-                if m_resolution is not None:
-                    format_info.update({
-                        'width': int(m_resolution.group('width')),
-                        'height': int(m_resolution.group('height')),
-                    })
-                formats.append(format_info)
-
-        thumbnail_path = info.find('image').text
+            georestricted = False
+
+        formats = []
+        for video in info['videos']:
+            if video['statut'] != 'ONLINE':
+                continue
+            video_url = video['url']
+            if not video_url:
+                continue
+            format_id = video['format']
+            if video_url.endswith('.f4m'):
+                if georestricted:
+                    # See https://github.com/rg3/youtube-dl/issues/3963
+                    # m3u8 urls work fine
+                    continue
+                video_url_parsed = compat_urllib_parse_urlparse(video_url)
+                f4m_url = self._download_webpage(
+                    'http://hdfauth.francetv.fr/esi/urltokengen2.html?url=%s' % video_url_parsed.path,
+                    video_id, 'Downloading f4m manifest token', fatal=False)
+                if f4m_url:
+                    f4m_formats = self._extract_f4m_formats(f4m_url, video_id)
+                    for f4m_format in f4m_formats:
+                        f4m_format['preference'] = 1
+                    formats.extend(f4m_formats)
+            elif video_url.endswith('.m3u8'):
+                formats.extend(self._extract_m3u8_formats(video_url, video_id, 'mp4'))
+            elif video_url.startswith('rtmp'):
+                formats.append({
+                    'url': video_url,
+                    'format_id': 'rtmp-%s' % format_id,
+                    'ext': 'flv',
+                    'preference': 1,
+                })
+            else:
+                formats.append({
+                    'url': video_url,
+                    'format_id': format_id,
+                    'preference': -1,
+                })
+        self._sort_formats(formats)
 
         return {
             'id': video_id,
-            'title': info.find('titre').text,
+            'title': info['titre'],
+            'description': clean_html(info['synopsis']),
+            'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', info['image']),
+            'duration': parse_duration(info['duree']),
+            'timestamp': int_or_none(info['diffusion']['timestamp']),
             'formats': formats,
-            'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', thumbnail_path),
-            'description': info.find('synopsis').text,
         }
 
 
@@ -61,7 +101,7 @@ class PluzzIE(FranceTVBaseInfoExtractor):
         webpage = self._download_webpage(url, title)
         video_id = self._search_regex(
             r'data-diffusion="(\d+)"', webpage, 'ID')
-        return self._extract_video(video_id)
+        return self._extract_video(video_id, 'Pluzz')
 
 
 class FranceTvInfoIE(FranceTVBaseInfoExtractor):
@@ -72,11 +112,10 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor):
         'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
         'info_dict': {
             'id': '84981923',
-            'ext': 'mp4',
+            'ext': 'flv',
             'title': 'Soir 3',
-        },
-        'params': {
-            'skip_download': True,
+            'upload_date': '20130826',
+            'timestamp': 1377548400,
         },
     }, {
         'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html',
@@ -88,15 +127,17 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor):
         },
         'params': {
             'skip_download': 'HLS (reqires ffmpeg)'
-        }
+        },
+        'skip': 'Ce direct est terminé et sera disponible en rattrapage dans quelques minutes.',
     }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         page_title = mobj.group('title')
         webpage = self._download_webpage(url, page_title)
-        video_id = self._search_regex(r'id-video=((?:[^0-9]*?_)?[0-9]+)[@"]', webpage, 'video id')
-        return self._extract_video(video_id)
+        video_id, catalogue = self._search_regex(
+            r'id-video=([^@]+@[^"]+)', webpage, 'video id').split('@')
+        return self._extract_video(video_id, catalogue)
 
 
 class FranceTVIE(FranceTVBaseInfoExtractor):
@@ -112,91 +153,77 @@ class FranceTVIE(FranceTVBaseInfoExtractor):
         # france2
         {
             'url': 'http://www.france2.fr/emissions/13h15-le-samedi-le-dimanche/videos/75540104',
-            'file': '75540104.mp4',
+            'md5': 'c03fc87cb85429ffd55df32b9fc05523',
             'info_dict': {
-                'title': '13h15, le samedi...',
-                'description': 'md5:2e5b58ba7a2d3692b35c792be081a03d',
-            },
-            'params': {
-                # m3u8 download
-                'skip_download': True,
+                'id': '109169362',
+                'ext': 'flv',
+                'title': '13h15, le dimanche...',
+                'description': 'md5:9a0932bb465f22d377a449be9d1a0ff7',
+                'upload_date': '20140914',
+                'timestamp': 1410693600,
             },
         },
         # france3
         {
             'url': 'http://www.france3.fr/emissions/pieces-a-conviction/diffusions/13-11-2013_145575',
+            'md5': '679bb8f8921f8623bd658fa2f8364da0',
             'info_dict': {
                 'id': '000702326_CAPP_PicesconvictionExtrait313022013_120220131722_Au',
-                'ext': 'flv',
+                'ext': 'mp4',
                 'title': 'Le scandale du prix des médicaments',
                 'description': 'md5:1384089fbee2f04fc6c9de025ee2e9ce',
-            },
-            'params': {
-                # rtmp download
-                'skip_download': True,
+                'upload_date': '20131113',
+                'timestamp': 1384380000,
             },
         },
         # france4
         {
             'url': 'http://www.france4.fr/emissions/hero-corp/videos/rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
+            'md5': 'a182bf8d2c43d88d46ec48fbdd260c1c',
             'info_dict': {
                 'id': 'rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
-                'ext': 'flv',
+                'ext': 'mp4',
                 'title': 'Hero Corp Making of - Extrait 1',
                 'description': 'md5:c87d54871b1790679aec1197e73d650a',
-            },
-            'params': {
-                # rtmp download
-                'skip_download': True,
+                'upload_date': '20131106',
+                'timestamp': 1383766500,
             },
         },
         # france5
         {
             'url': 'http://www.france5.fr/emissions/c-a-dire/videos/92837968',
+            'md5': '78f0f4064f9074438e660785bbf2c5d9',
             'info_dict': {
-                'id': '92837968',
-                'ext': 'mp4',
+                'id': '108961659',
+                'ext': 'flv',
                 'title': 'C à dire ?!',
-                'description': 'md5:fb1db1cbad784dcce7c7a7bd177c8e2f',
-            },
-            'params': {
-                # m3u8 download
-                'skip_download': True,
+                'description': 'md5:1a4aeab476eb657bf57c4ff122129f81',
+                'upload_date': '20140915',
+                'timestamp': 1410795000,
             },
         },
         # franceo
         {
             'url': 'http://www.franceo.fr/jt/info-afrique/04-12-2013',
+            'md5': '52f0bfe202848b15915a2f39aaa8981b',
             'info_dict': {
-                'id': '92327925',
-                'ext': 'mp4',
-                'title': 'Infô-Afrique',
+                'id': '108634970',
+                'ext': 'flv',
+                'title': 'Infô Afrique',
                 'description': 'md5:ebf346da789428841bee0fd2a935ea55',
+                'upload_date': '20140915',
+                'timestamp': 1410822000,
             },
-            'params': {
-                # m3u8 download
-                'skip_download': True,
-            },
-            'skip': 'The id changes frequently',
         },
     ]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        if mobj.group('key'):
-            webpage = self._download_webpage(url, mobj.group('key'))
-            id_res = [
-                (r'''(?x)<div\s+class="video-player">\s*
-                    <a\s+href="http://videos.francetv.fr/video/([0-9]+)"\s+
-                    class="francetv-video-player">'''),
-                (r'<a id="player_direct" href="http://info\.francetelevisions'
-                 '\.fr/\?id-video=([^"/&]+)'),
-                (r'<a class="video" id="ftv_player_(.+?)"'),
-            ]
-            video_id = self._html_search_regex(id_res, webpage, 'video ID')
-        else:
-            video_id = mobj.group('id')
-        return self._extract_video(video_id)
+        webpage = self._download_webpage(url, mobj.group('key') or mobj.group('id'))
+        video_id, catalogue = self._html_search_regex(
+            r'href="http://videos\.francetv\.fr/video/([^@]+@[^"]+)"',
+            webpage, 'video ID').split('@')
+        return self._extract_video(video_id, catalogue)
 
 
 class GenerationQuoiIE(InfoExtractor):
@@ -224,7 +251,7 @@ class GenerationQuoiIE(InfoExtractor):
         info_json = self._download_webpage(info_url, name)
         info = json.loads(info_json)
         return self.url_result('http://www.dailymotion.com/video/%s' % info['id'],
-            ie='Dailymotion')
+                               ie='Dailymotion')
 
 
 class CultureboxIE(FranceTVBaseInfoExtractor):
@@ -232,16 +259,15 @@ class CultureboxIE(FranceTVBaseInfoExtractor):
     _VALID_URL = r'https?://(?:m\.)?culturebox\.francetvinfo\.fr/(?P<name>.*?)(\?|$)'
 
     _TEST = {
-        'url': 'http://culturebox.francetvinfo.fr/einstein-on-the-beach-au-theatre-du-chatelet-146813',
+        'url': 'http://culturebox.francetvinfo.fr/festivals/dans-les-jardins-de-william-christie/dans-les-jardins-de-william-christie-le-camus-162553',
+        'md5': '5ad6dec1ffb2a3fbcb20cc4b744be8d6',
         'info_dict': {
-            'id': 'EV_6785',
-            'ext': 'mp4',
-            'title': 'Einstein on the beach au Théâtre du Châtelet',
-            'description': 'md5:9ce2888b1efefc617b5e58b3f6200eeb',
-        },
-        'params': {
-            # m3u8 download
-            'skip_download': True,
+            'id': 'EV_22853',
+            'ext': 'flv',
+            'title': 'Dans les jardins de William Christie - Le Camus',
+            'description': 'md5:4710c82315c40f0c865ca8b9a68b5299',
+            'upload_date': '20140829',
+            'timestamp': 1409317200,
         },
     }
 
@@ -249,5 +275,7 @@ class CultureboxIE(FranceTVBaseInfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         name = mobj.group('name')
         webpage = self._download_webpage(url, name)
-        video_id = self._search_regex(r'"http://videos\.francetv\.fr/video/(.*?)"', webpage, 'video id')
-        return self._extract_video(video_id)
+        video_id, catalogue = self._search_regex(
+            r'"http://videos\.francetv\.fr/video/([^@]+@[^"]+)"', webpage, 'video id').split('@')
+
+        return self._extract_video(video_id, catalogue)
diff --git a/youtube_dl/extractor/freevideo.py b/youtube_dl/extractor/freevideo.py
new file mode 100644 (file)
index 0000000..f755e3c
--- /dev/null
@@ -0,0 +1,38 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class FreeVideoIE(InfoExtractor):
+    _VALID_URL = r'^http://www.freevideo.cz/vase-videa/(?P<id>[^.]+)\.html(?:$|[?#])'
+
+    _TEST = {
+        'url': 'http://www.freevideo.cz/vase-videa/vysukany-zadecek-22033.html',
+        'info_dict': {
+            'id': 'vysukany-zadecek-22033',
+            'ext': 'mp4',
+            "title": "vysukany-zadecek-22033",
+            "age_limit": 18,
+        },
+        'skip': 'Blocked outside .cz',
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage, handle = self._download_webpage_handle(url, video_id)
+        if '//www.czechav.com/' in handle.geturl():
+            raise ExtractorError(
+                'Access to freevideo is blocked from your location',
+                expected=True)
+
+        video_url = self._search_regex(
+            r'\s+url: "(http://[a-z0-9-]+.cdn.freevideo.cz/stream/.*?/video.mp4)"',
+            webpage, 'video URL')
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': video_id,
+            'age_limit': 18,
+        }
index 721e5fce011e113bf8c413543df496fc3eeca17d..a49fc1151cf324f5e4b61cbd4f1d586718410626 100644 (file)
@@ -8,7 +8,7 @@ from ..utils import ExtractorError
 
 
 class FunnyOrDieIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'
+    _VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|articles|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'
     _TESTS = [{
         'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',
         'md5': 'bcd81e0c4f26189ee09be362ad6e6ba9',
@@ -21,7 +21,6 @@ class FunnyOrDieIE(InfoExtractor):
         },
     }, {
         'url': 'http://www.funnyordie.com/embed/e402820827',
-        'md5': 'ff4d83318f89776ed0250634cfaa8d36',
         'info_dict': {
             'id': 'e402820827',
             'ext': 'mp4',
@@ -29,6 +28,9 @@ class FunnyOrDieIE(InfoExtractor):
             'description': 'Please use this to sell something.  www.jonlajoie.com',
             'thumbnail': 're:^http:.*\.jpg$',
         },
+    }, {
+        'url': 'http://www.funnyordie.com/articles/ebf5e34fc8/10-hours-of-walking-in-nyc-as-a-man',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
@@ -37,7 +39,7 @@ class FunnyOrDieIE(InfoExtractor):
         video_id = mobj.group('id')
         webpage = self._download_webpage(url, video_id)
 
-        links = re.findall(r'<source src="([^"]+/v)\d+\.([^"]+)" type=\'video', webpage)
+        links = re.findall(r'<source src="([^"]+/v)[^"]+\.([^"]+)" type=\'video', webpage)
         if not links:
             raise ExtractorError('No media links available for %s' % video_id)
 
index 11fee3d31e88833b8074a1b59cff885eeffa46d3..cf8e90d7dbe9483efffe7894bedbe437746c5da1 100644 (file)
@@ -11,7 +11,7 @@ class GamekingsIE(InfoExtractor):
         'url': 'http://www.gamekings.tv/videos/phoenix-wright-ace-attorney-dual-destinies-review/',
         # MD5 is flaky, seems to change regularly
         # 'md5': '2f32b1f7b80fdc5cb616efb4f387f8a3',
-        u'info_dict': {
+        'info_dict': {
             'id': '20130811',
             'ext': 'mp4',
             'title': 'Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review',
index b580f52fb42fd80f42dd78201b7a6db09425f77c..3022f539d2571f34ae87bc91dc4cf1f1e25ccdc0 100644 (file)
@@ -88,3 +88,28 @@ class GameOneIE(InfoExtractor):
             'age_limit': age_limit,
             'timestamp': timestamp,
         }
+
+
+class GameOnePlaylistIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?gameone\.de(?:/tv)?/?$'
+    IE_NAME = 'gameone:playlist'
+    _TEST = {
+        'url': 'http://www.gameone.de/tv',
+        'info_dict': {
+            'title': 'GameOne',
+        },
+        'playlist_mincount': 294,
+    }
+
+    def _real_extract(self, url):
+        webpage = self._download_webpage('http://www.gameone.de/tv', 'TV')
+        max_id = max(map(int, re.findall(r'<a href="/tv/(\d+)"', webpage)))
+        entries = [
+            self.url_result('http://www.gameone.de/tv/%d' % video_id, 'GameOne')
+            for video_id in range(max_id, 0, -1)]
+
+        return {
+            '_type': 'playlist',
+            'title': 'GameOne',
+            'entries': entries,
+        }
index 3d67b9d60242760ff3e32c9fbbbcab39542f01da..d570e3f6a85ca399d81328e3afedee4f98158e5f 100644 (file)
@@ -8,12 +8,11 @@ from ..utils import (
     compat_urllib_parse,
     compat_urlparse,
     unescapeHTML,
-    get_meta_content,
 )
 
 
 class GameSpotIE(InfoExtractor):
-    _VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<page_id>\d+)/?'
+    _VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<id>\d+)/?'
     _TEST = {
         'url': 'http://www.gamespot.com/videos/arma-3-community-guide-sitrep-i/2300-6410818/',
         'md5': 'b2a30deaa8654fcccd43713a6b6a4825',
@@ -26,10 +25,10 @@ class GameSpotIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        page_id = mobj.group('page_id')
+        page_id = self._match_id(url)
         webpage = self._download_webpage(url, page_id)
-        data_video_json = self._search_regex(r'data-video=["\'](.*?)["\']', webpage, 'data video')
+        data_video_json = self._search_regex(
+            r'data-video=["\'](.*?)["\']', webpage, 'data video')
         data_video = json.loads(unescapeHTML(data_video_json))
 
         # Transform the manifest url to a link to the mp4 files
@@ -41,7 +40,8 @@ class GameSpotIE(InfoExtractor):
         http_path = f4m_path[1:].split('/', 1)[1]
         http_template = re.sub(QUALITIES_RE, r'%s', http_path)
         http_template = http_template.replace('.csmil/manifest.f4m', '')
-        http_template = compat_urlparse.urljoin('http://video.gamespotcdn.com/', http_template)
+        http_template = compat_urlparse.urljoin(
+            'http://video.gamespotcdn.com/', http_template)
         formats = []
         for q in qualities:
             formats.append({
@@ -52,8 +52,9 @@ class GameSpotIE(InfoExtractor):
 
         return {
             'id': data_video['guid'],
+            'display_id': page_id,
             'title': compat_urllib_parse.unquote(data_video['title']),
             'formats': formats,
-            'description': get_meta_content('description', webpage),
+            'description': self._html_search_meta('description', webpage),
             'thumbnail': self._og_search_thumbnail(webpage),
         }
index bcb0765940df39656be9f78c1ab144976adb5e5e..2b4d8c62f5696fce9c144bac7c1867e552d3e625 100644 (file)
@@ -7,25 +7,29 @@ import re
 
 from .common import InfoExtractor
 from .youtube import YoutubeIE
-from ..utils import (
-    compat_urllib_error,
+from ..compat import (
     compat_urllib_parse,
-    compat_urllib_request,
     compat_urlparse,
     compat_xml_parse_error,
-
+)
+from ..utils import (
+    determine_ext,
     ExtractorError,
+    float_or_none,
     HEADRequest,
+    orderedSet,
     parse_xml,
     smuggle_url,
     unescapeHTML,
     unified_strdate,
+    unsmuggle_url,
     url_basename,
 )
 from .brightcove import BrightcoveIE
 from .ooyala import OoyalaIE
 from .rutv import RUTVIE
 from .smotri import SmotriIE
+from .condenast import CondeNastIE
 
 
 class GenericIE(InfoExtractor):
@@ -96,6 +100,22 @@ class GenericIE(InfoExtractor):
                 'uploader': 'Championat',
             },
         },
+        {
+            # https://github.com/rg3/youtube-dl/issues/3541
+            'add_ie': ['Brightcove'],
+            'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1',
+            'info_dict': {
+                'id': '3866516442001',
+                'ext': 'mp4',
+                'title': 'Leer mij vrouwen kennen: Aflevering 1',
+                'description': 'Leer mij vrouwen kennen: Aflevering 1',
+                'uploader': 'SBS Broadcasting',
+            },
+            'skip': 'Restricted to Netherlands',
+            'params': {
+                'skip_download': True,  # m3u8 download
+            },
+        },
         # Direct link to a video
         {
             'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
@@ -153,7 +173,6 @@ class GenericIE(InfoExtractor):
         # funnyordie embed
         {
             'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
-            'md5': '7cf780be104d40fea7bae52eed4a470e',
             'info_dict': {
                 'id': '18e820ec3f',
                 'ext': 'mp4',
@@ -178,13 +197,13 @@ class GenericIE(InfoExtractor):
         # Embedded TED video
         {
             'url': 'http://en.support.wordpress.com/videos/ted-talks/',
-            'md5': 'deeeabcc1085eb2ba205474e7235a3d5',
+            'md5': '65fdff94098e4a607385a60c5177c638',
             'info_dict': {
-                'id': '981',
+                'id': '1969',
                 'ext': 'mp4',
-                'title': 'My web playroom',
-                'uploader': 'Ze Frank',
-                'description': 'md5:ddb2a40ecd6b6a147e400e535874947b',
+                'title': 'Hidden miracles of the natural world',
+                'uploader': 'Louie Schwartzberg',
+                'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
             }
         },
         # Embeded Ustream video
@@ -224,21 +243,6 @@ class GenericIE(InfoExtractor):
                 'skip_download': 'Requires rtmpdump'
             }
         },
-        # smotri embed
-        {
-            'url': 'http://rbctv.rbc.ru/archive/news/562949990879132.shtml',
-            'md5': 'ec40048448e9284c9a1de77bb188108b',
-            'info_dict': {
-                'id': 'v27008541fad',
-                'ext': 'mp4',
-                'title': 'Крым и Севастополь вошли в состав России',
-                'description': 'md5:fae01b61f68984c7bd2fa741e11c3175',
-                'duration': 900,
-                'upload_date': '20140318',
-                'uploader': 'rbctv_2012_4',
-                'uploader_id': 'rbctv_2012_4',
-            },
-        },
         # Condé Nast embed
         {
             'url': 'http://www.wired.com/2014/04/honda-asimo/',
@@ -289,70 +293,197 @@ class GenericIE(InfoExtractor):
                 'description': 'Mario\'s life in the fast lane has never looked so good.',
             },
         },
+        # YouTube embed via <data-embed-url="">
+        {
+            'url': 'https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftA8HM',
+            'info_dict': {
+                'id': '4vAffPZIT44',
+                'ext': 'mp4',
+                'title': 'Asphalt 8: Airborne - Update - Welcome to Dubai!',
+                'uploader': 'Gameloft',
+                'uploader_id': 'gameloft',
+                'upload_date': '20140828',
+                'description': 'md5:c80da9ed3d83ae6d1876c834de03e1c4',
+            },
+            'params': {
+                'skip_download': True,
+            }
+        },
+        # Camtasia studio
+        {
+            'url': 'http://www.ll.mit.edu/workshops/education/videocourses/antennas/lecture1/video/',
+            'playlist': [{
+                'md5': '0c5e352edabf715d762b0ad4e6d9ee67',
+                'info_dict': {
+                    'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
+                    'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - video1',
+                    'ext': 'flv',
+                    'duration': 2235.90,
+                }
+            }, {
+                'md5': '10e4bb3aaca9fd630e273ff92d9f3c63',
+                'info_dict': {
+                    'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final_PIP',
+                    'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - pip',
+                    'ext': 'flv',
+                    'duration': 2235.93,
+                }
+            }],
+            'info_dict': {
+                'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
+            }
+        },
+        # Flowplayer
+        {
+            'url': 'http://www.handjobhub.com/video/busty-blonde-siri-tit-fuck-while-wank-6313.html',
+            'md5': '9d65602bf31c6e20014319c7d07fba27',
+            'info_dict': {
+                'id': '5123ea6d5e5a7',
+                'ext': 'mp4',
+                'age_limit': 18,
+                'uploader': 'www.handjobhub.com',
+                'title': 'Busty Blonde Siri Tit Fuck While Wank at HandjobHub.com',
+            }
+        },
+        # RSS feed
+        {
+            'url': 'http://phihag.de/2014/youtube-dl/rss2.xml',
+            'info_dict': {
+                'id': 'http://phihag.de/2014/youtube-dl/rss2.xml',
+                'title': 'Zero Punctuation',
+                'description': 're:'
+            },
+            'playlist_mincount': 11,
+        },
+        # Multiple brightcove videos
+        # https://github.com/rg3/youtube-dl/issues/2283
+        {
+            'url': 'http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html',
+            'info_dict': {
+                'id': 'always-never',
+                'title': 'Always / Never - The New Yorker',
+            },
+            'playlist_count': 3,
+            'params': {
+                'extract_flat': False,
+                'skip_download': True,
+            }
+        },
+        # MLB embed
+        {
+            'url': 'http://umpire-empire.com/index.php/topic/58125-laz-decides-no-thats-low/',
+            'md5': '96f09a37e44da40dd083e12d9a683327',
+            'info_dict': {
+                'id': '33322633',
+                'ext': 'mp4',
+                'title': 'Ump changes call to ball',
+                'description': 'md5:71c11215384298a172a6dcb4c2e20685',
+                'duration': 48,
+                'timestamp': 1401537900,
+                'upload_date': '20140531',
+                'thumbnail': 're:^https?://.*\.jpg$',
+            },
+        },
+        # Wistia embed
+        {
+            'url': 'http://education-portal.com/academy/lesson/north-american-exploration-failed-colonies-of-spain-france-england.html#lesson',
+            'md5': '8788b683c777a5cf25621eaf286d0c23',
+            'info_dict': {
+                'id': '1cfaf6b7ea',
+                'ext': 'mov',
+                'title': 'md5:51364a8d3d009997ba99656004b5e20d',
+                'duration': 643.0,
+                'filesize': 182808282,
+                'uploader': 'education-portal.com',
+            },
+        },
+        {
+            'url': 'http://thoughtworks.wistia.com/medias/uxjb0lwrcz',
+            'md5': 'baf49c2baa8a7de5f3fc145a8506dcd4',
+            'info_dict': {
+                'id': 'uxjb0lwrcz',
+                'ext': 'mp4',
+                'title': 'Conversation about Hexagonal Rails Part 1 - ThoughtWorks',
+                'duration': 1715.0,
+                'uploader': 'thoughtworks.wistia.com',
+            },
+        },
+        # Direct download with broken HEAD
+        {
+            'url': 'http://ai-radio.org:8000/radio.opus',
+            'info_dict': {
+                'id': 'radio',
+                'ext': 'opus',
+                'title': 'radio',
+            },
+            'params': {
+                'skip_download': True,  # infinite live stream
+            },
+            'expected_warnings': [
+                r'501.*Not Implemented'
+            ],
+        },
+        # Soundcloud embed
+        {
+            'url': 'http://nakedsecurity.sophos.com/2014/10/29/sscc-171-are-you-sure-that-1234-is-a-bad-password-podcast/',
+            'info_dict': {
+                'id': '174391317',
+                'ext': 'mp3',
+                'description': 'md5:ff867d6b555488ad3c52572bb33d432c',
+                'uploader': 'Sophos Security',
+                'title': 'Chet Chat 171 - Oct 29, 2014',
+                'upload_date': '20141029',
+            }
+        },
+        # Livestream embed
+        {
+            'url': 'http://www.esa.int/Our_Activities/Space_Science/Rosetta/Philae_comet_touch-down_webcast',
+            'info_dict': {
+                'id': '67864563',
+                'ext': 'flv',
+                'upload_date': '20141112',
+                'title': 'Rosetta #CometLanding webcast HL 10',
+            }
+        },
+        # LazyYT
+        {
+            'url': 'http://discourse.ubuntu.com/t/unity-8-desktop-mode-windows-on-mir/1986',
+            'info_dict': {
+                'title': 'Unity 8 desktop-mode windows on Mir! - Ubuntu Discourse',
+            },
+            'playlist_mincount': 2,
+        },
+        # Direct link with incorrect MIME type
+        {
+            'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
+            'md5': '4ccbebe5f36706d85221f204d7eb5913',
+            'info_dict': {
+                'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
+                'id': '5_Lennart_Poettering_-_Systemd',
+                'ext': 'webm',
+                'title': '5_Lennart_Poettering_-_Systemd',
+                'upload_date': '20141120',
+            },
+            'expected_warnings': [
+                'URL could be a direct video link, returning it as such.'
+            ]
+        },
+        # Cinchcast embed
+        {
+            'url': 'http://undergroundwellness.com/podcasts/306-5-steps-to-permanent-gut-healing/',
+            'info_dict': {
+                'id': '7141703',
+                'ext': 'mp3',
+                'upload_date': '20141126',
+                'title': 'Jack Tips: 5 Steps to Permanent Gut Healing',
+            }
+        },
     ]
 
-    def report_download_webpage(self, video_id):
-        """Report webpage download."""
-        if not self._downloader.params.get('test', False):
-            self._downloader.report_warning('Falling back on generic information extractor.')
-        super(GenericIE, self).report_download_webpage(video_id)
-
     def report_following_redirect(self, new_url):
         """Report information extraction."""
         self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
 
-    def _send_head(self, url):
-        """Check if it is a redirect, like url shorteners, in case return the new url."""
-
-        class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
-            """
-            Subclass the HTTPRedirectHandler to make it use our
-            HEADRequest also on the redirected URL
-            """
-            def redirect_request(self, req, fp, code, msg, headers, newurl):
-                if code in (301, 302, 303, 307):
-                    newurl = newurl.replace(' ', '%20')
-                    newheaders = dict((k,v) for k,v in req.headers.items()
-                                      if k.lower() not in ("content-length", "content-type"))
-                    try:
-                        # This function was deprecated in python 3.3 and removed in 3.4
-                        origin_req_host = req.get_origin_req_host()
-                    except AttributeError:
-                        origin_req_host = req.origin_req_host
-                    return HEADRequest(newurl,
-                                       headers=newheaders,
-                                       origin_req_host=origin_req_host,
-                                       unverifiable=True)
-                else:
-                    raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
-
-        class HTTPMethodFallback(compat_urllib_request.BaseHandler):
-            """
-            Fallback to GET if HEAD is not allowed (405 HTTP error)
-            """
-            def http_error_405(self, req, fp, code, msg, headers):
-                fp.read()
-                fp.close()
-
-                newheaders = dict((k,v) for k,v in req.headers.items()
-                                  if k.lower() not in ("content-length", "content-type"))
-                return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
-                                                 headers=newheaders,
-                                                 origin_req_host=req.get_origin_req_host(),
-                                                 unverifiable=True))
-
-        # Build our opener
-        opener = compat_urllib_request.OpenerDirector()
-        for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
-                        HTTPMethodFallback, HEADRedirectHandler,
-                        compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
-            opener.add_handler(handler())
-
-        response = opener.open(HEADRequest(url))
-        if response is None:
-            raise ExtractorError('Invalid URL protocol')
-        return response
-
     def _extract_rss(self, url, video_id, doc):
         playlist_title = doc.find('./channel/title').text
         playlist_desc_el = doc.find('./channel/description')
@@ -372,6 +503,43 @@ class GenericIE(InfoExtractor):
             'entries': entries,
         }
 
+    def _extract_camtasia(self, url, video_id, webpage):
+        """ Returns None if no camtasia video can be found. """
+
+        camtasia_cfg = self._search_regex(
+            r'fo\.addVariable\(\s*"csConfigFile",\s*"([^"]+)"\s*\);',
+            webpage, 'camtasia configuration file', default=None)
+        if camtasia_cfg is None:
+            return None
+
+        title = self._html_search_meta('DC.title', webpage, fatal=True)
+
+        camtasia_url = compat_urlparse.urljoin(url, camtasia_cfg)
+        camtasia_cfg = self._download_xml(
+            camtasia_url, video_id,
+            note='Downloading camtasia configuration',
+            errnote='Failed to download camtasia configuration')
+        fileset_node = camtasia_cfg.find('./playlist/array/fileset')
+
+        entries = []
+        for n in fileset_node.getchildren():
+            url_n = n.find('./uri')
+            if url_n is None:
+                continue
+
+            entries.append({
+                'id': os.path.splitext(url_n.text.rpartition('/')[2])[0],
+                'title': '%s - %s' % (title, n.tag),
+                'url': compat_urlparse.urljoin(url, url_n.text),
+                'duration': float_or_none(n.find('./duration').text),
+            })
+
+        return {
+            '_type': 'playlist',
+            'entries': entries,
+            'title': title,
+        }
+
     def _real_extract(self, url):
         if url.startswith('//'):
             return {
@@ -402,53 +570,88 @@ class GenericIE(InfoExtractor):
 
             if default_search in ('error', 'fixup_error'):
                 raise ExtractorError(
-                    ('%r is not a valid URL. '
-                     'Set --default-search "ytsearch" (or run  youtube-dl "ytsearch:%s" ) to search YouTube'
-                    % (url, url), expected=True)
+                    '%r is not a valid URL. '
+                    'Set --default-search "ytsearch" (or run  youtube-dl "ytsearch:%s" ) to search YouTube'
+                    % (url, url), expected=True)
             else:
-                assert ':' in default_search
+                if ':' not in default_search:
+                    default_search += ':'
                 return self.url_result(default_search + url)
-        video_id = os.path.splitext(url.rstrip('/').split('/')[-1])[0]
+
+        url, smuggled_data = unsmuggle_url(url)
+        force_videoid = None
+        is_intentional = smuggled_data and smuggled_data.get('to_generic')
+        if smuggled_data and 'force_videoid' in smuggled_data:
+            force_videoid = smuggled_data['force_videoid']
+            video_id = force_videoid
+        else:
+            video_id = os.path.splitext(url.rstrip('/').split('/')[-1])[0]
 
         self.to_screen('%s: Requesting header' % video_id)
 
-        try:
-            response = self._send_head(url)
+        head_req = HEADRequest(url)
+        head_response = self._request_webpage(
+            head_req, video_id,
+            note=False, errnote='Could not send HEAD request to %s' % url,
+            fatal=False)
 
+        if head_response is not False:
             # Check for redirect
-            new_url = response.geturl()
+            new_url = head_response.geturl()
             if url != new_url:
                 self.report_following_redirect(new_url)
+                if force_videoid:
+                    new_url = smuggle_url(
+                        new_url, {'force_videoid': force_videoid})
                 return self.url_result(new_url)
 
-            # Check for direct link to a video
-            content_type = response.headers.get('Content-Type', '')
-            m = re.match(r'^(?P<type>audio|video|application(?=/ogg$))/(?P<format_id>.+)$', content_type)
-            if m:
-                upload_date = response.headers.get('Last-Modified')
-                if upload_date:
-                    upload_date = unified_strdate(upload_date)
-                return {
-                    'id': video_id,
-                    'title': os.path.splitext(url_basename(url))[0],
-                    'formats': [{
-                        'format_id': m.group('format_id'),
-                        'url': url,
-                        'vcodec': 'none' if m.group('type') == 'audio' else None
-                    }],
-                    'upload_date': upload_date,
-                }
+        full_response = None
+        if head_response is False:
+            full_response = self._request_webpage(url, video_id)
+            head_response = full_response
+
+        # Check for direct link to a video
+        content_type = head_response.headers.get('Content-Type', '')
+        m = re.match(r'^(?P<type>audio|video|application(?=/ogg$))/(?P<format_id>.+)$', content_type)
+        if m:
+            upload_date = unified_strdate(
+                head_response.headers.get('Last-Modified'))
+            return {
+                'id': video_id,
+                'title': os.path.splitext(url_basename(url))[0],
+                'direct': True,
+                'formats': [{
+                    'format_id': m.group('format_id'),
+                    'url': url,
+                    'vcodec': 'none' if m.group('type') == 'audio' else None
+                }],
+                'upload_date': upload_date,
+            }
 
-        except compat_urllib_error.HTTPError:
-            # This may be a stupid server that doesn't like HEAD, our UA, or so
-            pass
+        if not self._downloader.params.get('test', False) and not is_intentional:
+            self._downloader.report_warning('Falling back on generic information extractor.')
 
-        try:
-            webpage = self._download_webpage(url, video_id)
-        except ValueError:
-            # since this is the last-resort InfoExtractor, if
-            # this error is thrown, it'll be thrown here
-            raise ExtractorError('Failed to download URL: %s' % url)
+        if not full_response:
+            full_response = self._request_webpage(url, video_id)
+
+        # Maybe it's a direct link to a video?
+        # Be careful not to download the whole thing!
+        first_bytes = full_response.read(512)
+        if not re.match(r'^\s*<', first_bytes.decode('utf-8', 'replace')):
+            self._downloader.report_warning(
+                'URL could be a direct video link, returning it as such.')
+            upload_date = unified_strdate(
+                head_response.headers.get('Last-Modified'))
+            return {
+                'id': video_id,
+                'title': os.path.splitext(url_basename(url))[0],
+                'direct': True,
+                'url': url,
+                'upload_date': upload_date,
+            }
+
+        webpage = self._webpage_read_content(
+            full_response, url, video_id, prefix=first_bytes)
 
         self.report_extraction(video_id)
 
@@ -460,6 +663,11 @@ class GenericIE(InfoExtractor):
         except compat_xml_parse_error:
             pass
 
+        # Is it a Camtasia project?
+        camtasia_res = self._extract_camtasia(url, video_id, webpage)
+        if camtasia_res is not None:
+            return camtasia_res
+
         # Sometimes embedded video player is hidden behind percent encoding
         # (e.g. https://github.com/rg3/youtube-dl/issues/2448)
         # Unescaping the whole page allows to handle those cases in a generic way
@@ -475,10 +683,28 @@ class GenericIE(InfoExtractor):
             r'(?s)<title>(.*?)</title>', webpage, 'video title',
             default='video')
 
+        # Try to detect age limit automatically
+        age_limit = self._rta_search(webpage)
+        # And then there are the jokers who advertise that they use RTA,
+        # but actually don't.
+        AGE_LIMIT_MARKERS = [
+            r'Proudly Labeled <a href="http://www.rtalabel.org/" title="Restricted to Adults">RTA</a>',
+        ]
+        if any(re.search(marker, webpage) for marker in AGE_LIMIT_MARKERS):
+            age_limit = 18
+
         # video uploader is domain name
         video_uploader = self._search_regex(
             r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
 
+        # Helper method
+        def _playlist_from_matches(matches, getter, ie=None):
+            urlrs = orderedSet(
+                self.url_result(self._proto_relative_url(getter(m)), ie)
+                for m in matches)
+            return self.playlist_result(
+                urlrs, playlist_id=video_id, playlist_title=video_title)
+
         # Look for BrightCove:
         bc_urls = BrightcoveIE._extract_brightcove_urls(webpage)
         if bc_urls:
@@ -502,58 +728,85 @@ class GenericIE(InfoExtractor):
         if mobj:
             player_url = unescapeHTML(mobj.group('url'))
             surl = smuggle_url(player_url, {'Referer': url})
-            return self.url_result(surl, 'Vimeo')
+            return self.url_result(surl)
 
         # Look for embedded (swf embed) Vimeo player
         mobj = re.search(
-            r'<embed[^>]+?src="(https?://(?:www\.)?vimeo\.com/moogaloop\.swf.+?)"', webpage)
+            r'<embed[^>]+?src="((?:https?:)?//(?:www\.)?vimeo\.com/moogaloop\.swf.+?)"', webpage)
         if mobj:
-            return self.url_result(mobj.group(1), 'Vimeo')
+            return self.url_result(mobj.group(1))
 
         # Look for embedded YouTube player
         matches = re.findall(r'''(?x)
             (?:
                 <iframe[^>]+?src=|
+                data-video-url=|
                 <embed[^>]+?src=|
-                embedSWF\(?:\s*
+                embedSWF\(?:\s*|
+                new\s+SWFObject\(
             )
             (["\'])
-                (?P<url>(?:https?:)?//(?:www\.)?youtube\.com/
-                (?:embed|v)/.+?)
+                (?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
+                (?:embed|v|p)/.+?)
             \1''', webpage)
         if matches:
-            urlrs = [self.url_result(unescapeHTML(tuppl[1]), 'Youtube')
-                     for tuppl in matches]
-            return self.playlist_result(
-                urlrs, playlist_id=video_id, playlist_title=video_title)
+            return _playlist_from_matches(
+                matches, lambda m: unescapeHTML(m[1]))
+
+        # Look for lazyYT YouTube embed
+        matches = re.findall(
+            r'class="lazyYT" data-youtube-id="([^"]+)"', webpage)
+        if matches:
+            return _playlist_from_matches(matches, lambda m: unescapeHTML(m))
 
         # Look for embedded Dailymotion player
         matches = re.findall(
             r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.com/embed/video/.+?)\1', webpage)
         if matches:
-            urlrs = [self.url_result(unescapeHTML(tuppl[1]))
-                     for tuppl in matches]
-            return self.playlist_result(
-                urlrs, playlist_id=video_id, playlist_title=video_title)
+            return _playlist_from_matches(
+                matches, lambda m: unescapeHTML(m[1]))
+
+        # Look for embedded Dailymotion playlist player (#3822)
+        m = re.search(
+            r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1', webpage)
+        if m:
+            playlists = re.findall(
+                r'list\[\]=/playlist/([^/]+)/', unescapeHTML(m.group('url')))
+            if playlists:
+                return _playlist_from_matches(
+                    playlists, lambda p: '//dailymotion.com/playlist/%s' % p)
 
         # Look for embedded Wistia player
         match = re.search(
-            r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
+            r'<(?:meta[^>]+?content|iframe[^>]+?src)=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
         if match:
+            embed_url = self._proto_relative_url(
+                unescapeHTML(match.group('url')))
             return {
                 '_type': 'url_transparent',
-                'url': unescapeHTML(match.group('url')),
+                'url': embed_url,
                 'ie_key': 'Wistia',
                 'uploader': video_uploader,
                 'title': video_title,
                 'id': video_id,
             }
 
+        match = re.search(r'(?:id=["\']wistia_|data-wistia-?id=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage)
+        if match:
+            return {
+                '_type': 'url_transparent',
+                'url': 'http://fast.wistia.net/embed/iframe/{0:}'.format(match.group('id')),
+                'ie_key': 'Wistia',
+                'uploader': video_uploader,
+                'title': video_title,
+                'id': match.group('id')
+            }
+
         # Look for embedded blip.tv player
         mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
         if mobj:
-            return self.url_result('http://blip.tv/a/a-'+mobj.group(1), 'BlipTV')
-        mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9]+)', webpage)
+            return self.url_result('http://blip.tv/a/a-' + mobj.group(1), 'BlipTV')
+        mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9_]+)', webpage)
         if mobj:
             return self.url_result(mobj.group(1), 'BlipTV')
 
@@ -588,7 +841,7 @@ class GenericIE(InfoExtractor):
 
         # Look for Ooyala videos
         mobj = (re.search(r'player.ooyala.com/[^"?]+\?[^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or
-             re.search(r'OO.Player.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage))
+                re.search(r'OO.Player.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage))
         if mobj is not None:
             return OoyalaIE._build_url_result(mobj.group('ec'))
 
@@ -648,10 +901,8 @@ class GenericIE(InfoExtractor):
         # Look for funnyordie embed
         matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage)
         if matches:
-            urlrs = [self.url_result(unescapeHTML(eurl), 'FunnyOrDie')
-                     for eurl in matches]
-            return self.playlist_result(
-                urlrs, playlist_id=video_id, playlist_title=video_title)
+            return _playlist_from_matches(
+                matches, getter=unescapeHTML, ie='FunnyOrDie')
 
         # Look for embedded RUTV player
         rutv_url = RUTVIE._extract_url(webpage)
@@ -684,7 +935,7 @@ class GenericIE(InfoExtractor):
 
         # Look for embeded soundcloud player
         mobj = re.search(
-            r'<iframe src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
+            r'<iframe\s+(?:[a-zA-Z0-9_-]+="[^"]+"\s+)*src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
             webpage)
         if mobj is not None:
             url = unescapeHTML(mobj.group('url'))
@@ -706,40 +957,97 @@ class GenericIE(InfoExtractor):
             url = unescapeHTML(mobj.group('url'))
             return self.url_result(url, ie='MTVServicesEmbedded')
 
+        # Look for embedded yahoo player
+        mobj = re.search(
+            r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:screen|movies)\.yahoo\.com/.+?\.html\?format=embed)\1',
+            webpage)
+        if mobj is not None:
+            return self.url_result(mobj.group('url'), 'Yahoo')
+
+        # Look for embedded sbs.com.au player
+        mobj = re.search(
+            r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:www\.)sbs\.com\.au/ondemand/video/single/.+?)\1',
+            webpage)
+        if mobj is not None:
+            return self.url_result(mobj.group('url'), 'SBS')
+
+        # Look for embedded Cinchcast player
+        mobj = re.search(
+            r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.cinchcast\.com/.+?)\1',
+            webpage)
+        if mobj is not None:
+            return self.url_result(mobj.group('url'), 'Cinchcast')
+
+        mobj = re.search(
+            r'<iframe[^>]+?src=(["\'])(?P<url>https?://m(?:lb)?\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
+            webpage)
+        if mobj is not None:
+            return self.url_result(mobj.group('url'), 'MLB')
+
+        mobj = re.search(
+            r'<iframe[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
+            webpage)
+        if mobj is not None:
+            return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
+
+        mobj = re.search(
+            r'<iframe[^>]+src="(?P<url>https?://new\.livestream\.com/[^"]+/player[^"]+)"',
+            webpage)
+        if mobj is not None:
+            return self.url_result(mobj.group('url'), 'Livestream')
+
+        def check_video(vurl):
+            vpath = compat_urlparse.urlparse(vurl).path
+            vext = determine_ext(vpath)
+            return '.' in vpath and vext not in ('swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml')
+
+        def filter_video(urls):
+            return list(filter(check_video, urls))
+
         # Start with something easy: JW Player in SWFObject
-        found = re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
+        found = filter_video(re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage))
         if not found:
             # Look for gorilla-vid style embedding
-            found = re.findall(r'''(?sx)
+            found = filter_video(re.findall(r'''(?sx)
                 (?:
                     jw_plugins|
                     JWPlayerOptions|
                     jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
                 )
-                .*?file\s*:\s*["\'](.*?)["\']''', webpage)
+                .*?file\s*:\s*["\'](.*?)["\']''', webpage))
         if not found:
             # Broaden the search a little bit
-            found = re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
+            found = filter_video(re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage))
         if not found:
             # Broaden the findall a little bit: JWPlayer JS loader
-            found = re.findall(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage)
+            found = filter_video(re.findall(
+                r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
+        if not found:
+            # Flow player
+            found = filter_video(re.findall(r'''(?xs)
+                flowplayer\("[^"]+",\s*
+                    \{[^}]+?\}\s*,
+                    \s*{[^}]+? ["']?clip["']?\s*:\s*\{\s*
+                        ["']?url["']?\s*:\s*["']([^"']+)["']
+            ''', webpage))
         if not found:
             # Try to find twitter cards info
-            found = re.findall(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
+            found = filter_video(re.findall(
+                r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage))
         if not found:
             # We look for Open Graph info:
             # We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
             m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
             # We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
             if m_video_type is not None:
-                found = re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)
+                found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
         if not found:
             # HTML5 video
-            found = re.findall(r'(?s)<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage)
+            found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]*)?\s+src=["\'](.*?)["\']', webpage)
         if not found:
             found = re.search(
                 r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
-                r'(?:[a-z-]+="[^"]+"\s+)*?content="[0-9]{,2};url=\'([^\']+)\'"',
+                r'(?:[a-z-]+="[^"]+"\s+)*?content="[0-9]{,2};url=\'?([^\'"]+)',
                 webpage)
             if found:
                 new_url = found.group(1)
@@ -769,6 +1077,7 @@ class GenericIE(InfoExtractor):
                 'url': video_url,
                 'uploader': video_uploader,
                 'title': video_title,
+                'age_limit': age_limit,
             })
 
         if len(entries) == 1:
@@ -780,4 +1089,3 @@ class GenericIE(InfoExtractor):
                 '_type': 'playlist',
                 'entries': entries,
             }
-
diff --git a/youtube_dl/extractor/giantbomb.py b/youtube_dl/extractor/giantbomb.py
new file mode 100644 (file)
index 0000000..87cd191
--- /dev/null
@@ -0,0 +1,81 @@
+from __future__ import unicode_literals
+
+import re
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+    unescapeHTML,
+    qualities,
+    int_or_none,
+)
+
+
+class GiantBombIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?giantbomb\.com/videos/(?P<display_id>[^/]+)/(?P<id>\d+-\d+)'
+    _TEST = {
+        'url': 'http://www.giantbomb.com/videos/quick-look-destiny-the-dark-below/2300-9782/',
+        'md5': '57badeface303ecf6b98b812de1b9018',
+        'info_dict': {
+            'id': '2300-9782',
+            'display_id': 'quick-look-destiny-the-dark-below',
+            'ext': 'mp4',
+            'title': 'Quick Look: Destiny: The Dark Below',
+            'description': 'md5:0aa3aaf2772a41b91d44c63f30dfad24',
+            'duration': 2399,
+            'thumbnail': 're:^https?://.*\.jpg$',
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        display_id = mobj.group('display_id')
+
+        webpage = self._download_webpage(url, display_id)
+
+        title = self._og_search_title(webpage)
+        description = self._og_search_description(webpage)
+        thumbnail = self._og_search_thumbnail(webpage)
+
+        video = json.loads(unescapeHTML(self._search_regex(
+            r'data-video="([^"]+)"', webpage, 'data-video')))
+
+        duration = int_or_none(video.get('lengthSeconds'))
+
+        quality = qualities([
+            'f4m_low', 'progressive_low', 'f4m_high',
+            'progressive_high', 'f4m_hd', 'progressive_hd'])
+
+        formats = []
+        for format_id, video_url in video['videoStreams'].items():
+            if format_id == 'f4m_stream':
+                continue
+            if video_url.endswith('.f4m'):
+                f4m_formats = self._extract_f4m_formats(video_url + '?hdcore=3.3.1', display_id)
+                if f4m_formats:
+                    f4m_formats[0]['quality'] = quality(format_id)
+                    formats.extend(f4m_formats)
+            else:
+                formats.append({
+                    'url': video_url,
+                    'format_id': format_id,
+                    'quality': quality(format_id),
+                })
+
+        if not formats:
+            youtube_id = video.get('youtubeID')
+            if youtube_id:
+                return self.url_result(youtube_id, 'Youtube')
+
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'duration': duration,
+            'formats': formats,
+        }
diff --git a/youtube_dl/extractor/glide.py b/youtube_dl/extractor/glide.py
new file mode 100644 (file)
index 0000000..9561ed5
--- /dev/null
@@ -0,0 +1,40 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class GlideIE(InfoExtractor):
+    IE_DESC = 'Glide mobile video messages (glide.me)'
+    _VALID_URL = r'https?://share\.glide\.me/(?P<id>[A-Za-z0-9\-=_+]+)'
+    _TEST = {
+        'url': 'http://share.glide.me/UZF8zlmuQbe4mr+7dCiQ0w==',
+        'md5': '4466372687352851af2d131cfaa8a4c7',
+        'info_dict': {
+            'id': 'UZF8zlmuQbe4mr+7dCiQ0w==',
+            'ext': 'mp4',
+            'title': 'Damon Timm\'s Glide message',
+            'thumbnail': 're:^https?://.*?\.cloudfront\.net/.*\.jpg$',
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+        title = self._html_search_regex(
+            r'<title>(.*?)</title>', webpage, 'title')
+        video_url = self.http_scheme() + self._search_regex(
+            r'<source src="(.*?)" type="video/mp4">', webpage, 'video URL')
+        thumbnail_url = self._search_regex(
+            r'<img id="video-thumbnail" src="(.*?)"',
+            webpage, 'thumbnail url', fatal=False)
+        thumbnail = (
+            thumbnail_url if thumbnail_url is None
+            else self.http_scheme() + thumbnail_url)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'url': video_url,
+            'thumbnail': thumbnail,
+        }
diff --git a/youtube_dl/extractor/globo.py b/youtube_dl/extractor/globo.py
new file mode 100644 (file)
index 0000000..6949a57
--- /dev/null
@@ -0,0 +1,400 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import random
+import math
+
+from .common import InfoExtractor
+from ..compat import (
+    compat_str,
+    compat_chr,
+    compat_ord,
+)
+from ..utils import (
+    ExtractorError,
+    float_or_none,
+)
+
+
+class GloboIE(InfoExtractor):
+    _VALID_URL = 'https?://.+?\.globo\.com/(?P<id>.+)'
+
+    _API_URL_TEMPLATE = 'http://api.globovideos.com/videos/%s/playlist'
+    _SECURITY_URL_TEMPLATE = 'http://security.video.globo.com/videos/%s/hash?player=flash&version=2.9.9.50&resource_id=%s'
+
+    _VIDEOID_REGEXES = [
+        r'\bdata-video-id="(\d+)"',
+        r'\bdata-player-videosids="(\d+)"',
+        r'<div[^>]+\bid="(\d+)"',
+    ]
+
+    _RESIGN_EXPIRATION = 86400
+
+    _TESTS = [
+        {
+            'url': 'http://globotv.globo.com/sportv/futebol-nacional/v/os-gols-de-atletico-mg-3-x-2-santos-pela-24a-rodada-do-brasileirao/3654973/',
+            'md5': '03ebf41cb7ade43581608b7d9b71fab0',
+            'info_dict': {
+                'id': '3654973',
+                'ext': 'mp4',
+                'title': 'Os gols de Atlético-MG 3 x 2 Santos pela 24ª rodada do Brasileirão',
+                'duration': 251.585,
+                'uploader': 'SporTV',
+                'uploader_id': 698,
+                'like_count': int,
+            }
+        },
+        {
+            'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/',
+            'md5': 'b3ccc801f75cd04a914d51dadb83a78d',
+            'info_dict': {
+                'id': '3607726',
+                'ext': 'mp4',
+                'title': 'Mercedes-Benz GLA passa por teste de colisão na Europa',
+                'duration': 103.204,
+                'uploader': 'Globo.com',
+                'uploader_id': 265,
+                'like_count': int,
+            }
+        },
+        {
+            'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html',
+            'md5': '307fdeae4390ccfe6ba1aa198cf6e72b',
+            'info_dict': {
+                'id': '3652183',
+                'ext': 'mp4',
+                'title': 'Receita Federal explica como vai fiscalizar bagagens de quem retorna ao Brasil de avião',
+                'duration': 110.711,
+                'uploader': 'Rede Globo',
+                'uploader_id': 196,
+                'like_count': int,
+            }
+        },
+    ]
+
+    class MD5():
+        HEX_FORMAT_LOWERCASE = 0
+        HEX_FORMAT_UPPERCASE = 1
+        BASE64_PAD_CHARACTER_DEFAULT_COMPLIANCE = ''
+        BASE64_PAD_CHARACTER_RFC_COMPLIANCE = '='
+        PADDING = '=0xFF01DD'
+        hexcase = 0
+        b64pad = ''
+
+        def __init__(self):
+            pass
+
+        class JSArray(list):
+            def __getitem__(self, y):
+                try:
+                    return list.__getitem__(self, y)
+                except IndexError:
+                    return 0
+
+            def __setitem__(self, i, y):
+                try:
+                    return list.__setitem__(self, i, y)
+                except IndexError:
+                    self.extend([0] * (i - len(self) + 1))
+                    self[-1] = y
+
+        @classmethod
+        def hex_md5(cls, param1):
+            return cls.rstr2hex(cls.rstr_md5(cls.str2rstr_utf8(param1)))
+
+        @classmethod
+        def b64_md5(cls, param1, param2=None):
+            return cls.rstr2b64(cls.rstr_md5(cls.str2rstr_utf8(param1, param2)))
+
+        @classmethod
+        def any_md5(cls, param1, param2):
+            return cls.rstr2any(cls.rstr_md5(cls.str2rstr_utf8(param1)), param2)
+
+        @classmethod
+        def rstr_md5(cls, param1):
+            return cls.binl2rstr(cls.binl_md5(cls.rstr2binl(param1), len(param1) * 8))
+
+        @classmethod
+        def rstr2hex(cls, param1):
+            _loc_2 = '0123456789ABCDEF' if cls.hexcase else '0123456789abcdef'
+            _loc_3 = ''
+            for _loc_5 in range(0, len(param1)):
+                _loc_4 = compat_ord(param1[_loc_5])
+                _loc_3 += _loc_2[_loc_4 >> 4 & 15] + _loc_2[_loc_4 & 15]
+            return _loc_3
+
+        @classmethod
+        def rstr2b64(cls, param1):
+            _loc_2 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
+            _loc_3 = ''
+            _loc_4 = len(param1)
+            for _loc_5 in range(0, _loc_4, 3):
+                _loc_6_1 = compat_ord(param1[_loc_5]) << 16
+                _loc_6_2 = compat_ord(param1[_loc_5 + 1]) << 8 if _loc_5 + 1 < _loc_4 else 0
+                _loc_6_3 = compat_ord(param1[_loc_5 + 2]) if _loc_5 + 2 < _loc_4 else 0
+                _loc_6 = _loc_6_1 | _loc_6_2 | _loc_6_3
+                for _loc_7 in range(0, 4):
+                    if _loc_5 * 8 + _loc_7 * 6 > len(param1) * 8:
+                        _loc_3 += cls.b64pad
+                    else:
+                        _loc_3 += _loc_2[_loc_6 >> 6 * (3 - _loc_7) & 63]
+            return _loc_3
+
+        @staticmethod
+        def rstr2any(param1, param2):
+            _loc_3 = len(param2)
+            _loc_4 = []
+            _loc_9 = [0] * ((len(param1) >> 2) + 1)
+            for _loc_5 in range(0, len(_loc_9)):
+                _loc_9[_loc_5] = compat_ord(param1[_loc_5 * 2]) << 8 | compat_ord(param1[_loc_5 * 2 + 1])
+
+            while len(_loc_9) > 0:
+                _loc_8 = []
+                _loc_7 = 0
+                for _loc_5 in range(0, len(_loc_9)):
+                    _loc_7 = (_loc_7 << 16) + _loc_9[_loc_5]
+                    _loc_6 = math.floor(_loc_7 / _loc_3)
+                    _loc_7 -= _loc_6 * _loc_3
+                    if len(_loc_8) > 0 or _loc_6 > 0:
+                        _loc_8[len(_loc_8)] = _loc_6
+
+                _loc_4[len(_loc_4)] = _loc_7
+                _loc_9 = _loc_8
+
+            _loc_10 = ''
+            _loc_5 = len(_loc_4) - 1
+            while _loc_5 >= 0:
+                _loc_10 += param2[_loc_4[_loc_5]]
+                _loc_5 -= 1
+
+            return _loc_10
+
+        @classmethod
+        def str2rstr_utf8(cls, param1, param2=None):
+            _loc_3 = ''
+            _loc_4 = -1
+            if not param2:
+                param2 = cls.PADDING
+            param1 = param1 + param2[1:9]
+            while True:
+                _loc_4 += 1
+                if _loc_4 >= len(param1):
+                    break
+                _loc_5 = compat_ord(param1[_loc_4])
+                _loc_6 = compat_ord(param1[_loc_4 + 1]) if _loc_4 + 1 < len(param1) else 0
+                if 55296 <= _loc_5 <= 56319 and 56320 <= _loc_6 <= 57343:
+                    _loc_5 = 65536 + ((_loc_5 & 1023) << 10) + (_loc_6 & 1023)
+                    _loc_4 += 1
+                if _loc_5 <= 127:
+                    _loc_3 += compat_chr(_loc_5)
+                    continue
+                if _loc_5 <= 2047:
+                    _loc_3 += compat_chr(192 | _loc_5 >> 6 & 31) + compat_chr(128 | _loc_5 & 63)
+                    continue
+                if _loc_5 <= 65535:
+                    _loc_3 += compat_chr(224 | _loc_5 >> 12 & 15) + compat_chr(128 | _loc_5 >> 6 & 63) + compat_chr(
+                        128 | _loc_5 & 63)
+                    continue
+                if _loc_5 <= 2097151:
+                    _loc_3 += compat_chr(240 | _loc_5 >> 18 & 7) + compat_chr(128 | _loc_5 >> 12 & 63) + compat_chr(
+                        128 | _loc_5 >> 6 & 63) + compat_chr(128 | _loc_5 & 63)
+            return _loc_3
+
+        @staticmethod
+        def rstr2binl(param1):
+            _loc_2 = [0] * ((len(param1) >> 2) + 1)
+            for _loc_3 in range(0, len(_loc_2)):
+                _loc_2[_loc_3] = 0
+            for _loc_3 in range(0, len(param1) * 8, 8):
+                _loc_2[_loc_3 >> 5] |= (compat_ord(param1[_loc_3 // 8]) & 255) << _loc_3 % 32
+            return _loc_2
+
+        @staticmethod
+        def binl2rstr(param1):
+            _loc_2 = ''
+            for _loc_3 in range(0, len(param1) * 32, 8):
+                _loc_2 += compat_chr(param1[_loc_3 >> 5] >> _loc_3 % 32 & 255)
+            return _loc_2
+
+        @classmethod
+        def binl_md5(cls, param1, param2):
+            param1 = cls.JSArray(param1)
+            param1[param2 >> 5] |= 128 << param2 % 32
+            param1[(param2 + 64 >> 9 << 4) + 14] = param2
+            _loc_3 = 1732584193
+            _loc_4 = -271733879
+            _loc_5 = -1732584194
+            _loc_6 = 271733878
+            for _loc_7 in range(0, len(param1), 16):
+                _loc_8 = _loc_3
+                _loc_9 = _loc_4
+                _loc_10 = _loc_5
+                _loc_11 = _loc_6
+                _loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 0], 7, -680876936)
+                _loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 1], 12, -389564586)
+                _loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 2], 17, 606105819)
+                _loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 3], 22, -1044525330)
+                _loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 4], 7, -176418897)
+                _loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 5], 12, 1200080426)
+                _loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 6], 17, -1473231341)
+                _loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 7], 22, -45705983)
+                _loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 8], 7, 1770035416)
+                _loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 9], 12, -1958414417)
+                _loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 10], 17, -42063)
+                _loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 11], 22, -1990404162)
+                _loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 12], 7, 1804603682)
+                _loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 13], 12, -40341101)
+                _loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 14], 17, -1502002290)
+                _loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 15], 22, 1236535329)
+                _loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 1], 5, -165796510)
+                _loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 6], 9, -1069501632)
+                _loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 11], 14, 643717713)
+                _loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 0], 20, -373897302)
+                _loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 5], 5, -701558691)
+                _loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 10], 9, 38016083)
+                _loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 15], 14, -660478335)
+                _loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 4], 20, -405537848)
+                _loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 9], 5, 568446438)
+                _loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 14], 9, -1019803690)
+                _loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 3], 14, -187363961)
+                _loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 8], 20, 1163531501)
+                _loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 13], 5, -1444681467)
+                _loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 2], 9, -51403784)
+                _loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 7], 14, 1735328473)
+                _loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 12], 20, -1926607734)
+                _loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 5], 4, -378558)
+                _loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 8], 11, -2022574463)
+                _loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 11], 16, 1839030562)
+                _loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 14], 23, -35309556)
+                _loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 1], 4, -1530992060)
+                _loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 4], 11, 1272893353)
+                _loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 7], 16, -155497632)
+                _loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 10], 23, -1094730640)
+                _loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 13], 4, 681279174)
+                _loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 0], 11, -358537222)
+                _loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 3], 16, -722521979)
+                _loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 6], 23, 76029189)
+                _loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 9], 4, -640364487)
+                _loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 12], 11, -421815835)
+                _loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 15], 16, 530742520)
+                _loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 2], 23, -995338651)
+                _loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 0], 6, -198630844)
+                _loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 7], 10, 1126891415)
+                _loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 14], 15, -1416354905)
+                _loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 5], 21, -57434055)
+                _loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 12], 6, 1700485571)
+                _loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 3], 10, -1894986606)
+                _loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 10], 15, -1051523)
+                _loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 1], 21, -2054922799)
+                _loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 8], 6, 1873313359)
+                _loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 15], 10, -30611744)
+                _loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 6], 15, -1560198380)
+                _loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 13], 21, 1309151649)
+                _loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 4], 6, -145523070)
+                _loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 11], 10, -1120210379)
+                _loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 2], 15, 718787259)
+                _loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 9], 21, -343485551)
+                _loc_3 = cls.safe_add(_loc_3, _loc_8)
+                _loc_4 = cls.safe_add(_loc_4, _loc_9)
+                _loc_5 = cls.safe_add(_loc_5, _loc_10)
+                _loc_6 = cls.safe_add(_loc_6, _loc_11)
+            return [_loc_3, _loc_4, _loc_5, _loc_6]
+
+        @classmethod
+        def md5_cmn(cls, param1, param2, param3, param4, param5, param6):
+            return cls.safe_add(
+                cls.bit_rol(cls.safe_add(cls.safe_add(param2, param1), cls.safe_add(param4, param6)), param5), param3)
+
+        @classmethod
+        def md5_ff(cls, param1, param2, param3, param4, param5, param6, param7):
+            return cls.md5_cmn(param2 & param3 | ~param2 & param4, param1, param2, param5, param6, param7)
+
+        @classmethod
+        def md5_gg(cls, param1, param2, param3, param4, param5, param6, param7):
+            return cls.md5_cmn(param2 & param4 | param3 & ~param4, param1, param2, param5, param6, param7)
+
+        @classmethod
+        def md5_hh(cls, param1, param2, param3, param4, param5, param6, param7):
+            return cls.md5_cmn(param2 ^ param3 ^ param4, param1, param2, param5, param6, param7)
+
+        @classmethod
+        def md5_ii(cls, param1, param2, param3, param4, param5, param6, param7):
+            return cls.md5_cmn(param3 ^ (param2 | ~param4), param1, param2, param5, param6, param7)
+
+        @classmethod
+        def safe_add(cls, param1, param2):
+            _loc_3 = (param1 & 65535) + (param2 & 65535)
+            _loc_4 = (param1 >> 16) + (param2 >> 16) + (_loc_3 >> 16)
+            return cls.lshift(_loc_4, 16) | _loc_3 & 65535
+
+        @classmethod
+        def bit_rol(cls, param1, param2):
+            return cls.lshift(param1, param2) | (param1 & 0xFFFFFFFF) >> (32 - param2)
+
+        @staticmethod
+        def lshift(value, count):
+            r = (0xFFFFFFFF & value) << count
+            return -(~(r - 1) & 0xFFFFFFFF) if r > 0x7FFFFFFF else r
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, video_id)
+        video_id = self._search_regex(self._VIDEOID_REGEXES, webpage, 'video id')
+
+        video = self._download_json(
+            self._API_URL_TEMPLATE % video_id, video_id)['videos'][0]
+
+        title = video['title']
+        duration = float_or_none(video['duration'], 1000)
+        like_count = video['likes']
+        uploader = video['channel']
+        uploader_id = video['channel_id']
+
+        formats = []
+
+        for resource in video['resources']:
+            resource_id = resource.get('_id')
+            if not resource_id:
+                continue
+
+            security = self._download_json(
+                self._SECURITY_URL_TEMPLATE % (video_id, resource_id),
+                video_id, 'Downloading security hash for %s' % resource_id)
+
+            security_hash = security.get('hash')
+            if not security_hash:
+                message = security.get('message')
+                if message:
+                    raise ExtractorError(
+                        '%s returned error: %s' % (self.IE_NAME, message), expected=True)
+                continue
+
+            hash_code = security_hash[:2]
+            received_time = int(security_hash[2:12])
+            received_random = security_hash[12:22]
+            received_md5 = security_hash[22:]
+
+            sign_time = received_time + self._RESIGN_EXPIRATION
+            padding = '%010d' % random.randint(1, 10000000000)
+
+            signed_md5 = self.MD5.b64_md5(received_md5 + compat_str(sign_time) + padding)
+            signed_hash = hash_code + compat_str(received_time) + received_random + compat_str(sign_time) + padding + signed_md5
+
+            formats.append({
+                'url': '%s?h=%s&k=%s' % (resource['url'], signed_hash, 'flash'),
+                'format_id': resource_id,
+                'height': resource['height']
+            })
+
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'duration': duration,
+            'uploader': uploader,
+            'uploader_id': uploader_id,
+            'like_count': like_count,
+            'formats': formats
+        }
index 73bd6d8903018ce374488441e6fbc47dff318244..363dc66086e350af241959f2b547004ebd07d6db 100644 (file)
@@ -36,16 +36,16 @@ class GodTubeIE(InfoExtractor):
             'http://www.godtube.com/resource/mediaplayer/%s.xml' % video_id.lower(),
             video_id, 'Downloading player config XML')
 
-        video_url = config.find('.//file').text
-        uploader = config.find('.//author').text
-        timestamp = parse_iso8601(config.find('.//date').text)
-        duration = parse_duration(config.find('.//duration').text)
-        thumbnail = config.find('.//image').text
+        video_url = config.find('file').text
+        uploader = config.find('author').text
+        timestamp = parse_iso8601(config.find('date').text)
+        duration = parse_duration(config.find('duration').text)
+        thumbnail = config.find('image').text
 
         media = self._download_xml(
             'http://www.godtube.com/media/xml/?v=%s' % video_id, video_id, 'Downloading media XML')
 
-        title = media.find('.//title').text
+        title = media.find('title').text
 
         return {
             'id': video_id,
diff --git a/youtube_dl/extractor/goldenmoustache.py b/youtube_dl/extractor/goldenmoustache.py
new file mode 100644 (file)
index 0000000..0fb5097
--- /dev/null
@@ -0,0 +1,48 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class GoldenMoustacheIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?goldenmoustache\.com/(?P<display_id>[\w-]+)-(?P<id>\d+)'
+    _TESTS = [{
+        'url': 'http://www.goldenmoustache.com/suricate-le-poker-3700/',
+        'md5': '0f904432fa07da5054d6c8beb5efb51a',
+        'info_dict': {
+            'id': '3700',
+            'ext': 'mp4',
+            'title': 'Suricate - Le Poker',
+            'description': 'md5:3d1f242f44f8c8cb0a106f1fd08e5dc9',
+            'thumbnail': 're:^https?://.*\.jpg$',
+        }
+    }, {
+        'url': 'http://www.goldenmoustache.com/le-lab-tout-effacer-mc-fly-et-carlito-55249/',
+        'md5': '27f0c50fb4dd5f01dc9082fc67cd5700',
+        'info_dict': {
+            'id': '55249',
+            'ext': 'mp4',
+            'title': 'Le LAB - Tout Effacer (Mc Fly et Carlito)',
+            'description': 'md5:9b7fbf11023fb2250bd4b185e3de3b2a',
+            'thumbnail': 're:^https?://.*\.(?:png|jpg)$',
+        }
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        video_url = self._html_search_regex(
+            r'data-src-type="mp4" data-src="([^"]+)"', webpage, 'video URL')
+        title = self._html_search_regex(
+            r'<title>(.*?)(?: - Golden Moustache)?</title>', webpage, 'title')
+        thumbnail = self._og_search_thumbnail(webpage)
+        description = self._og_search_description(webpage)
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'ext': 'mp4',
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+        }
diff --git a/youtube_dl/extractor/golem.py b/youtube_dl/extractor/golem.py
new file mode 100644 (file)
index 0000000..53714f4
--- /dev/null
@@ -0,0 +1,69 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urlparse,
+    determine_ext,
+)
+
+
+class GolemIE(InfoExtractor):
+    _VALID_URL = r'^https?://video\.golem\.de/.+?/(?P<id>.+?)/'
+    _TEST = {
+        'url': 'http://video.golem.de/handy/14095/iphone-6-und-6-plus-test.html',
+        'md5': 'c1a2c0a3c863319651c7c992c5ee29bf',
+        'info_dict': {
+            'id': '14095',
+            'format_id': 'high',
+            'ext': 'mp4',
+            'title': 'iPhone 6 und 6 Plus - Test',
+            'duration': 300.44,
+            'filesize': 65309548,
+        }
+    }
+
+    _PREFIX = 'http://video.golem.de'
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        config = self._download_xml(
+            'https://video.golem.de/xml/{0}.xml'.format(video_id), video_id)
+
+        info = {
+            'id': video_id,
+            'title': config.findtext('./title', 'golem'),
+            'duration': self._float(config.findtext('./playtime'), 'duration'),
+        }
+
+        formats = []
+        for e in config:
+            url = e.findtext('./url')
+            if not url:
+                continue
+
+            formats.append({
+                'format_id': e.tag,
+                'url': compat_urlparse.urljoin(self._PREFIX, url),
+                'height': self._int(e.get('height'), 'height'),
+                'width': self._int(e.get('width'), 'width'),
+                'filesize': self._int(e.findtext('filesize'), 'filesize'),
+                'ext': determine_ext(e.findtext('./filename')),
+            })
+        self._sort_formats(formats)
+        info['formats'] = formats
+
+        thumbnails = []
+        for e in config.findall('.//teaser'):
+            url = e.findtext('./url')
+            if not url:
+                continue
+            thumbnails.append({
+                'url': compat_urlparse.urljoin(self._PREFIX, url),
+                'width': self._int(e.get('width'), 'thumbnail width'),
+                'height': self._int(e.get('height'), 'thumbnail height'),
+            })
+        info['thumbnails'] = thumbnails
+
+        return info
index 07d994b448040fb80912593b9cdae4ac66e63bbb..fcefe54cd1207f1a57000c04b7fb460590f2024e 100644 (file)
@@ -1,13 +1,11 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import datetime
 import re
+import codecs
 
 from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
-)
+from ..utils import unified_strdate
 
 
 class GooglePlusIE(InfoExtractor):
@@ -19,74 +17,57 @@ class GooglePlusIE(InfoExtractor):
         'info_dict': {
             'id': 'ZButuJc6CtH',
             'ext': 'flv',
+            'title': '嘆きの天使 降臨',
             'upload_date': '20120613',
             'uploader': '井上ヨシマサ',
-            'title': '嘆きの天使 降臨',
         }
     }
 
     def _real_extract(self, url):
-        # Extract id from URL
-        mobj = re.match(self._VALID_URL, url)
-
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         # Step 1, Retrieve post webpage to extract further information
         webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
 
-        self.report_extraction(video_id)
-
-        # Extract update date
-        upload_date = self._html_search_regex(
+        title = self._og_search_description(webpage).splitlines()[0]
+        upload_date = unified_strdate(self._html_search_regex(
             r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
                     ([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
-            webpage, 'upload date', fatal=False, flags=re.VERBOSE)
-        if upload_date:
-            # Convert timestring to a format suitable for filename
-            upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
-            upload_date = upload_date.strftime('%Y%m%d')
-
-        # Extract uploader
-        uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
-            webpage, 'uploader', fatal=False)
-
-        # Extract title
-        # Get the first line for title
-        video_title = self._og_search_description(webpage).splitlines()[0]
+            webpage, 'upload date', fatal=False, flags=re.VERBOSE))
+        uploader = self._html_search_regex(
+            r'rel="author".*?>(.*?)</a>', webpage, 'uploader', fatal=False)
 
         # Step 2, Simulate clicking the image box to launch video
         DOMAIN = 'https://plus.google.com/'
-        video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
+        video_page = self._search_regex(
+            r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
             webpage, 'video page URL')
         if not video_page.startswith(DOMAIN):
             video_page = DOMAIN + video_page
 
         webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
 
-        # Extract video links all sizes
-        pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
-        mobj = re.findall(pattern, webpage)
-        if len(mobj) == 0:
-            raise ExtractorError('Unable to extract video links')
-
-        # Sort in resolution
-        links = sorted(mobj)
+        def unicode_escape(s):
+            decoder = codecs.getdecoder('unicode_escape')
+            return re.sub(
+                r'\\u[0-9a-fA-F]{4,}',
+                lambda m: decoder(m.group(0))[0],
+                s)
 
-        # Choose the lowest of the sort, i.e. highest resolution
-        video_url = links[-1]
-        # Only get the url. The resolution part in the tuple has no use anymore
-        video_url = video_url[-1]
-        # Treat escaped \u0026 style hex
-        try:
-            video_url = video_url.decode("unicode_escape")
-        except AttributeError: # Python 3
-            video_url = bytes(video_url, 'ascii').decode('unicode-escape')
+        # Extract video links all sizes
+        formats = [{
+            'url': unicode_escape(video_url),
+            'ext': 'flv',
+            'width': int(width),
+            'height': int(height),
+        } for width, height, video_url in re.findall(
+            r'\d+,(\d+),(\d+),"(https?://redirector\.googlevideo\.com.*?)"', webpage)]
+        self._sort_formats(formats)
 
         return {
             'id': video_id,
-            'url': video_url,
+            'title': title,
             'uploader': uploader,
             'upload_date': upload_date,
-            'title': video_title,
-            'ext': 'flv',
+            'formats': formats,
         }
index 383032d81b1c9bd965be9caee8adefd79547b208..469e1f9357eaf66ef48b3bed6c3d464c48f2b5dc 100644 (file)
@@ -14,6 +14,14 @@ class GoogleSearchIE(SearchInfoExtractor):
     _MAX_RESULTS = 1000
     IE_NAME = 'video.google:search'
     _SEARCH_KEY = 'gvsearch'
+    _TEST = {
+        'url': 'gvsearch15:python language',
+        'info_dict': {
+            'id': 'python language',
+            'title': 'python language',
+        },
+        'playlist_count': 15,
+    }
 
     def _get_n_results(self, query, n):
         """Get a specified number of results for a query"""
index ca5f7c4178e23fb6e10e644807175a65f042f296..1ac1da8569e20b84c2baf38011488d6304a090cb 100644 (file)
@@ -5,27 +5,31 @@ import re
 
 from .common import InfoExtractor
 from ..utils import (
+    ExtractorError,
     determine_ext,
     compat_urllib_parse,
     compat_urllib_request,
+    int_or_none,
 )
 
 
 class GorillaVidIE(InfoExtractor):
-    IE_DESC = 'GorillaVid.in and daclips.in'
+    IE_DESC = 'GorillaVid.in, daclips.in, movpod.in and fastvideo.in'
     _VALID_URL = r'''(?x)
         https?://(?P<host>(?:www\.)?
-            (?:daclips\.in|gorillavid\.in))/
+            (?:daclips\.in|gorillavid\.in|movpod\.in|fastvideo\.in))/
         (?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:-[0-9]+x[0-9]+\.html)?
     '''
 
+    _FILE_NOT_FOUND_REGEX = r'>(?:404 - )?File Not Found<'
+
     _TESTS = [{
         'url': 'http://gorillavid.in/06y9juieqpmi',
         'md5': '5ae4a3580620380619678ee4875893ba',
         'info_dict': {
             'id': '06y9juieqpmi',
             'ext': 'flv',
-            'title': 'Rebecca Black My Moment Official Music Video Reaction',
+            'title': 'Rebecca Black My Moment Official Music Video Reaction-6GK87Rc8bzQ',
             'thumbnail': 're:http://.*\.jpg',
         },
     }, {
@@ -43,9 +47,22 @@ class GorillaVidIE(InfoExtractor):
         'info_dict': {
             'id': '3rso4kdn6f9m',
             'ext': 'mp4',
-            'title': 'Micro Pig piglets ready on 16th July 2009',
+            'title': 'Micro Pig piglets ready on 16th July 2009-bG0PdrCdxUc',
+            'thumbnail': 're:http://.*\.jpg',
+        }
+    }, {
+        # video with countdown timeout
+        'url': 'http://fastvideo.in/1qmdn1lmsmbw',
+        'md5': '8b87ec3f6564a3108a0e8e66594842ba',
+        'info_dict': {
+            'id': '1qmdn1lmsmbw',
+            'ext': 'mp4',
+            'title': 'Man of Steel - Trailer',
             'thumbnail': 're:http://.*\.jpg',
         },
+    }, {
+        'url': 'http://movpod.in/0wguyyxi1yca',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
@@ -54,14 +71,23 @@ class GorillaVidIE(InfoExtractor):
 
         webpage = self._download_webpage('http://%s/%s' % (mobj.group('host'), video_id), video_id)
 
+        if re.search(self._FILE_NOT_FOUND_REGEX, webpage) is not None:
+            raise ExtractorError('Video %s does not exist' % video_id, expected=True)
+
         fields = dict(re.findall(r'''(?x)<input\s+
             type="hidden"\s+
             name="([^"]+)"\s+
             (?:id="[^"]+"\s+)?
             value="([^"]*)"
             ''', webpage))
-        
+
         if fields['op'] == 'download1':
+            countdown = int_or_none(self._search_regex(
+                r'<span id="countdown_str">(?:[Ww]ait)?\s*<span id="cxc">(\d+)</span>\s*(?:seconds?)?</span>',
+                webpage, 'countdown', default=None))
+            if countdown:
+                self._sleep(countdown, video_id)
+
             post = compat_urllib_parse.urlencode(fields)
 
             req = compat_urllib_request.Request(url, post)
@@ -69,14 +95,18 @@ class GorillaVidIE(InfoExtractor):
 
             webpage = self._download_webpage(req, video_id, 'Downloading video page')
 
-        title = self._search_regex(r'style="z-index: [0-9]+;">([0-9a-zA-Z ]+)(?:-.+)?</span>', webpage, 'title')
-        thumbnail = self._search_regex(r'image:\'(http[^\']+)\',', webpage, 'thumbnail')
-        url = self._search_regex(r'file: \'(http[^\']+)\',', webpage, 'file url')
+        title = self._search_regex(
+            r'style="z-index: [0-9]+;">([^<]+)</span>',
+            webpage, 'title', default=None) or self._og_search_title(webpage)
+        video_url = self._search_regex(
+            r'file\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'file url')
+        thumbnail = self._search_regex(
+            r'image\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'thumbnail', fatal=False)
 
         formats = [{
             'format_id': 'sd',
-            'url': url,
-            'ext': determine_ext(url),
+            'url': video_url,
+            'ext': determine_ext(video_url),
             'quality': 1,
         }]
 
index 7bca21ad0fe81c71444bfe64fb23ea3ad62c6c5b..b116d251d5d3f30c6affc852454e7e326d14f660 100644 (file)
@@ -1,73 +1,53 @@
 # -*- coding: utf-8 -*-
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
+from ..compat import (
+    compat_parse_qs,
+)
 from ..utils import (
-    compat_urlparse,
-    str_to_int,
-    ExtractorError,
+    parse_duration,
 )
-import json
 
 
 class GoshgayIE(InfoExtractor):
-    _VALID_URL = r'^(?:https?://)www.goshgay.com/video(?P<id>\d+?)($|/)'
+    _VALID_URL = r'https?://www\.goshgay\.com/video(?P<id>\d+?)($|/)'
     _TEST = {
-        'url': 'http://www.goshgay.com/video4116282',
-        'md5': '268b9f3c3229105c57859e166dd72b03',
+        'url': 'http://www.goshgay.com/video299069/diesel_sfw_xxx_video',
+        'md5': '027fcc54459dff0feb0bc06a7aeda680',
         'info_dict': {
-            'id': '4116282',
+            'id': '299069',
             'ext': 'flv',
-            'title': 'md5:089833a4790b5e103285a07337f245bf',
-            'thumbnail': 're:http://.*\.jpg',
+            'title': 'DIESEL SFW XXX Video',
+            'thumbnail': 're:^http://.*\.jpg$',
+            'duration': 79,
             'age_limit': 18,
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
-        title = self._search_regex(r'class="video-title"><h1>(.+?)<', webpage, 'title')
 
-        player_config = self._search_regex(
-            r'(?s)jwplayer\("player"\)\.setup\(({.+?})\)', webpage, 'config settings')
-        player_vars = json.loads(player_config.replace("'", '"'))
-        width = str_to_int(player_vars.get('width'))
-        height = str_to_int(player_vars.get('height'))
-        config_uri = player_vars.get('config')
+        title = self._html_search_regex(
+            r'<h2>(.*?)<', webpage, 'title')
+        duration = parse_duration(self._html_search_regex(
+            r'<span class="duration">\s*-?\s*(.*?)</span>',
+            webpage, 'duration', fatal=False))
+        family_friendly = self._html_search_meta(
+            'isFamilyFriendly', webpage, default='false')
 
-        if config_uri is None:
-            raise ExtractorError('Missing config URI')
-        node = self._download_xml(config_uri, video_id, 'Downloading player config XML',
-                                  errnote='Unable to download XML')
-        if node is None:
-            raise ExtractorError('Missing config XML')
-        if node.tag != 'config':
-            raise ExtractorError('Missing config attribute')
-        fns = node.findall('file')
-        imgs = node.findall('image')
-        if len(fns) != 1:
-            raise ExtractorError('Missing media URI')
-        video_url = fns[0].text
-        if len(imgs) < 1:
-            thumbnail = None
-        else:
-            thumbnail = imgs[0].text
-
-        url_comp = compat_urlparse.urlparse(url)
-        ref = "%s://%s%s" % (url_comp[0], url_comp[1], url_comp[2])
+        flashvars = compat_parse_qs(self._html_search_regex(
+            r'<embed.+?id="flash-player-embed".+?flashvars="([^"]+)"',
+            webpage, 'flashvars'))
+        thumbnail = flashvars.get('url_bigthumb', [None])[0]
+        video_url = flashvars['flv_url'][0]
 
         return {
             'id': video_id,
             'url': video_url,
             'title': title,
-            'width': width,
-            'height': height,
             'thumbnail': thumbnail,
-            'http_referer': ref,
-            'age_limit': 18,
+            'duration': duration,
+            'age_limit': 0 if family_friendly == 'true' else 18,
         }
diff --git a/youtube_dl/extractor/grooveshark.py b/youtube_dl/extractor/grooveshark.py
new file mode 100644 (file)
index 0000000..fff74a7
--- /dev/null
@@ -0,0 +1,191 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import time
+import math
+import os.path
+import re
+
+
+from .common import InfoExtractor
+from ..compat import (
+    compat_html_parser,
+    compat_urllib_parse,
+    compat_urllib_request,
+    compat_urlparse,
+)
+from ..utils import ExtractorError
+
+
+class GroovesharkHtmlParser(compat_html_parser.HTMLParser):
+    def __init__(self):
+        self._current_object = None
+        self.objects = []
+        compat_html_parser.HTMLParser.__init__(self)
+
+    def handle_starttag(self, tag, attrs):
+        attrs = dict((k, v) for k, v in attrs)
+        if tag == 'object':
+            self._current_object = {'attrs': attrs, 'params': []}
+        elif tag == 'param':
+            self._current_object['params'].append(attrs)
+
+    def handle_endtag(self, tag):
+        if tag == 'object':
+            self.objects.append(self._current_object)
+            self._current_object = None
+
+    @classmethod
+    def extract_object_tags(cls, html):
+        p = cls()
+        p.feed(html)
+        p.close()
+        return p.objects
+
+
+class GroovesharkIE(InfoExtractor):
+    _VALID_URL = r'https?://(www\.)?grooveshark\.com/#!/s/([^/]+)/([^/]+)'
+    _TEST = {
+        'url': 'http://grooveshark.com/#!/s/Jolene+Tenth+Key+Remix+Ft+Will+Sessions/6SS1DW?src=5',
+        'md5': '7ecf8aefa59d6b2098517e1baa530023',
+        'info_dict': {
+            'id': '6SS1DW',
+            'title': 'Jolene (Tenth Key Remix ft. Will Sessions)',
+            'ext': 'mp3',
+            'duration': 227,
+        }
+    }
+
+    do_playerpage_request = True
+    do_bootstrap_request = True
+
+    def _parse_target(self, target):
+        uri = compat_urlparse.urlparse(target)
+        hash = uri.fragment[1:].split('?')[0]
+        token = os.path.basename(hash.rstrip('/'))
+        return (uri, hash, token)
+
+    def _build_bootstrap_url(self, target):
+        (uri, hash, token) = self._parse_target(target)
+        query = 'getCommunicationToken=1&hash=%s&%d' % (compat_urllib_parse.quote(hash, safe=''), self.ts)
+        return (compat_urlparse.urlunparse((uri.scheme, uri.netloc, '/preload.php', None, query, None)), token)
+
+    def _build_meta_url(self, target):
+        (uri, hash, token) = self._parse_target(target)
+        query = 'hash=%s&%d' % (compat_urllib_parse.quote(hash, safe=''), self.ts)
+        return (compat_urlparse.urlunparse((uri.scheme, uri.netloc, '/preload.php', None, query, None)), token)
+
+    def _build_stream_url(self, meta):
+        return compat_urlparse.urlunparse(('http', meta['streamKey']['ip'], '/stream.php', None, None, None))
+
+    def _build_swf_referer(self, target, obj):
+        (uri, _, _) = self._parse_target(target)
+        return compat_urlparse.urlunparse((uri.scheme, uri.netloc, obj['attrs']['data'], None, None, None))
+
+    def _transform_bootstrap(self, js):
+        return re.split('(?m)^\s*try\s*{', js)[0] \
+                 .split(' = ', 1)[1].strip().rstrip(';')
+
+    def _transform_meta(self, js):
+        return js.split('\n')[0].split('=')[1].rstrip(';')
+
+    def _get_meta(self, target):
+        (meta_url, token) = self._build_meta_url(target)
+        self.to_screen('Metadata URL: %s' % meta_url)
+
+        headers = {'Referer': compat_urlparse.urldefrag(target)[0]}
+        req = compat_urllib_request.Request(meta_url, headers=headers)
+        res = self._download_json(req, token,
+                                  transform_source=self._transform_meta)
+
+        if 'getStreamKeyWithSong' not in res:
+            raise ExtractorError(
+                'Metadata not found. URL may be malformed, or Grooveshark API may have changed.')
+
+        if res['getStreamKeyWithSong'] is None:
+            raise ExtractorError(
+                'Metadata download failed, probably due to Grooveshark anti-abuse throttling. Wait at least an hour before retrying from this IP.',
+                expected=True)
+
+        return res['getStreamKeyWithSong']
+
+    def _get_bootstrap(self, target):
+        (bootstrap_url, token) = self._build_bootstrap_url(target)
+
+        headers = {'Referer': compat_urlparse.urldefrag(target)[0]}
+        req = compat_urllib_request.Request(bootstrap_url, headers=headers)
+        res = self._download_json(req, token, fatal=False,
+                                  note='Downloading player bootstrap data',
+                                  errnote='Unable to download player bootstrap data',
+                                  transform_source=self._transform_bootstrap)
+        return res
+
+    def _get_playerpage(self, target):
+        (_, _, token) = self._parse_target(target)
+
+        webpage = self._download_webpage(
+            target, token,
+            note='Downloading player page',
+            errnote='Unable to download player page',
+            fatal=False)
+
+        if webpage is not None:
+            # Search (for example German) error message
+            error_msg = self._html_search_regex(
+                r'<div id="content">\s*<h2>(.*?)</h2>', webpage,
+                'error message', default=None)
+            if error_msg is not None:
+                error_msg = error_msg.replace('\n', ' ')
+                raise ExtractorError('Grooveshark said: %s' % error_msg)
+
+        if webpage is not None:
+            o = GroovesharkHtmlParser.extract_object_tags(webpage)
+            return (webpage, [x for x in o if x['attrs']['id'] == 'jsPlayerEmbed'])
+
+        return (webpage, None)
+
+    def _real_initialize(self):
+        self.ts = int(time.time() * 1000)  # timestamp in millis
+
+    def _real_extract(self, url):
+        (target_uri, _, token) = self._parse_target(url)
+
+        # 1. Fill cookiejar by making a request to the player page
+        swf_referer = None
+        if self.do_playerpage_request:
+            (_, player_objs) = self._get_playerpage(url)
+            if player_objs is not None:
+                swf_referer = self._build_swf_referer(url, player_objs[0])
+                self.to_screen('SWF Referer: %s' % swf_referer)
+
+        # 2. Ask preload.php for swf bootstrap data to better mimic webapp
+        if self.do_bootstrap_request:
+            bootstrap = self._get_bootstrap(url)
+            self.to_screen('CommunicationToken: %s' % bootstrap['getCommunicationToken'])
+
+        # 3. Ask preload.php for track metadata.
+        meta = self._get_meta(url)
+
+        # 4. Construct stream request for track.
+        stream_url = self._build_stream_url(meta)
+        duration = int(math.ceil(float(meta['streamKey']['uSecs']) / 1000000))
+        post_dict = {'streamKey': meta['streamKey']['streamKey']}
+        post_data = compat_urllib_parse.urlencode(post_dict).encode('utf-8')
+        headers = {
+            'Content-Length': len(post_data),
+            'Content-Type': 'application/x-www-form-urlencoded'
+        }
+        if swf_referer is not None:
+            headers['Referer'] = swf_referer
+
+        return {
+            'id': token,
+            'title': meta['song']['Name'],
+            'http_method': 'POST',
+            'url': stream_url,
+            'ext': 'mp3',
+            'format': 'mp3 audio',
+            'duration': duration,
+            'http_post_data': post_data,
+            'http_headers': headers,
+        }
index 5bdd08afabd27474fc2f5b1ed2afb371efdf8d9d..b6cc15b6fbad25c43fe0699668bd3ec452ed944d 100644 (file)
@@ -1,37 +1,33 @@
 # -*- coding: utf-8 -*-
-
-import re
-import json
+from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..utils import determine_ext
+
 
 class HarkIE(InfoExtractor):
-    _VALID_URL = r'https?://www\.hark\.com/clips/(.+?)-.+'
+    _VALID_URL = r'https?://www\.hark\.com/clips/(?P<id>.+?)-.+'
     _TEST = {
-        u'url': u'http://www.hark.com/clips/mmbzyhkgny-obama-beyond-the-afghan-theater-we-only-target-al-qaeda-on-may-23-2013',
-        u'file': u'mmbzyhkgny.mp3',
-        u'md5': u'6783a58491b47b92c7c1af5a77d4cbee',
-        u'info_dict': {
-            u'title': u"Obama: 'Beyond The Afghan Theater, We Only Target Al Qaeda' on May 23, 2013",
-            u'description': u'President Barack Obama addressed the nation live on May 23, 2013 in a speech aimed at addressing counter-terrorism policies including the use of drone strikes, detainees at Guantanamo Bay prison facility, and American citizens who are terrorists.',
-            u'duration': 11,
+        'url': 'http://www.hark.com/clips/mmbzyhkgny-obama-beyond-the-afghan-theater-we-only-target-al-qaeda-on-may-23-2013',
+        'md5': '6783a58491b47b92c7c1af5a77d4cbee',
+        'info_dict': {
+            'id': 'mmbzyhkgny',
+            'ext': 'mp3',
+            'title': 'Obama: \'Beyond The Afghan Theater, We Only Target Al Qaeda\' on May 23, 2013',
+            'description': 'President Barack Obama addressed the nation live on May 23, 2013 in a speech aimed at addressing counter-terrorism policies including the use of drone strikes, detainees at Guantanamo Bay prison facility, and American citizens who are terrorists.',
+            'duration': 11,
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group(1)
-        json_url = "http://www.hark.com/clips/%s.json" %(video_id)
-        info_json = self._download_webpage(json_url, video_id)
-        info = json.loads(info_json)
-        final_url = info['url']
+        video_id = self._match_id(url)
+        data = self._download_json(
+            'http://www.hark.com/clips/%s.json' % video_id, video_id)
 
-        return {'id': video_id,
-                'url' : final_url,
-                'title': info['name'],
-                'ext': determine_ext(final_url),
-                'description': info['description'],
-                'thumbnail': info['image_original'],
-                'duration': info['duration'],
-                }
+        return {
+            'id': video_id,
+            'url': data['url'],
+            'title': data['name'],
+            'description': data.get('description'),
+            'thumbnail': data.get('image_original'),
+            'duration': data.get('duration'),
+        }
diff --git a/youtube_dl/extractor/heise.py b/youtube_dl/extractor/heise.py
new file mode 100644 (file)
index 0000000..278d9f5
--- /dev/null
@@ -0,0 +1,79 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+    determine_ext,
+    int_or_none,
+    parse_iso8601,
+)
+
+
+class HeiseIE(InfoExtractor):
+    _VALID_URL = r'''(?x)
+        https?://(?:www\.)?heise\.de/video/artikel/
+        .+?(?P<id>[0-9]+)\.html(?:$|[?#])
+    '''
+    _TEST = {
+        'url': (
+            'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html'
+        ),
+        'md5': 'ffed432483e922e88545ad9f2f15d30e',
+        'info_dict': {
+            'id': '2404147',
+            'ext': 'mp4',
+            'title': (
+                "Podcast: c't uplink 3.3 – Owncloud / Tastaturen / Peilsender Smartphone"
+            ),
+            'format_id': 'mp4_720p',
+            'timestamp': 1411812600,
+            'upload_date': '20140927',
+            'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.',
+            'thumbnail': 're:^https?://.*\.jpe?g$',
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        container_id = self._search_regex(
+            r'<div class="videoplayerjw".*?data-container="([0-9]+)"',
+            webpage, 'container ID')
+        sequenz_id = self._search_regex(
+            r'<div class="videoplayerjw".*?data-sequenz="([0-9]+)"',
+            webpage, 'sequenz ID')
+        data_url = 'http://www.heise.de/videout/feed?container=%s&sequenz=%s' % (container_id, sequenz_id)
+        doc = self._download_xml(data_url, video_id)
+
+        info = {
+            'id': video_id,
+            'thumbnail': self._og_search_thumbnail(webpage),
+            'timestamp': parse_iso8601(
+                self._html_search_meta('date', webpage)),
+            'description': self._og_search_description(webpage),
+        }
+
+        title = self._html_search_meta('fulltitle', webpage)
+        if title:
+            info['title'] = title
+        else:
+            info['title'] = self._og_search_title(webpage)
+
+        formats = []
+        for source_node in doc.findall('.//{http://rss.jwpcdn.com/}source'):
+            label = source_node.attrib['label']
+            height = int_or_none(self._search_regex(
+                r'^(.*?_)?([0-9]+)p$', label, 'height', default=None))
+            video_url = source_node.attrib['file']
+            ext = determine_ext(video_url, '')
+            formats.append({
+                'url': video_url,
+                'format_note': label,
+                'format_id': '%s_%s' % (ext, label),
+                'height': height,
+            })
+        self._sort_formats(formats)
+        info['formats'] = formats
+
+        return info
index 5268efa49433ca8c7cb0c2288df0705165e876a3..93107b3064ebfba513b3aa208556b5822f6cf979 100644 (file)
@@ -2,9 +2,8 @@
 
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
+from ..utils import js_to_json
 
 
 class HelsinkiIE(InfoExtractor):
@@ -24,39 +23,21 @@ class HelsinkiIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
-        formats = []
-
-        mobj = re.search(r'file=((\w+):[^&]+)', webpage)
-        if mobj:
-            formats.append({
-                'ext': mobj.group(2),
-                'play_path': mobj.group(1),
-                'url': 'rtmp://flashvideo.it.helsinki.fi/vod/',
-                'player_url': 'http://video.helsinki.fi/player.swf',
-                'format_note': 'sd',
-                'quality': 0,
-            })
-
-        mobj = re.search(r'hd\.file=((\w+):[^&]+)', webpage)
-        if mobj:
-            formats.append({
-                'ext': mobj.group(2),
-                'play_path': mobj.group(1),
-                'url': 'rtmp://flashvideo.it.helsinki.fi/vod/',
-                'player_url': 'http://video.helsinki.fi/player.swf',
-                'format_note': 'hd',
-                'quality': 1,
-            })
 
+        params = self._parse_json(self._html_search_regex(
+            r'(?s)jwplayer\("player"\).setup\((\{.*?\})\);',
+            webpage, 'player code'), video_id, transform_source=js_to_json)
+        formats = [{
+            'url': s['file'],
+            'ext': 'mp4',
+        } for s in params['sources']]
         self._sort_formats(formats)
 
         return {
             'id': video_id,
             'title': self._og_search_title(webpage).replace('Video: ', ''),
             'description': self._og_search_description(webpage),
-            'thumbnail': self._og_search_thumbnail(webpage),
             'formats': formats,
         }
diff --git a/youtube_dl/extractor/hornbunny.py b/youtube_dl/extractor/hornbunny.py
new file mode 100644 (file)
index 0000000..5b6efb2
--- /dev/null
@@ -0,0 +1,56 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    int_or_none,
+    parse_duration,
+)
+
+
+class HornBunnyIE(InfoExtractor):
+    _VALID_URL = r'http?://(?:www\.)?hornbunny\.com/videos/(?P<title_dash>[a-z-]+)-(?P<id>\d+)\.html'
+    _TEST = {
+        'url': 'http://hornbunny.com/videos/panty-slut-jerk-off-instruction-5227.html',
+        'md5': '95e40865aedd08eff60272b704852ad7',
+        'info_dict': {
+            'id': '5227',
+            'ext': 'flv',
+            'title': 'panty slut jerk off instruction',
+            'duration': 550,
+            'age_limit': 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(
+            url, video_id, note='Downloading initial webpage')
+        title = self._html_search_regex(
+            r'class="title">(.*?)</h2>', webpage, 'title')
+        redirect_url = self._html_search_regex(
+            r'pg&settings=(.*?)\|0"\);', webpage, 'title')
+        webpage2 = self._download_webpage(redirect_url, video_id)
+        video_url = self._html_search_regex(
+            r'flvMask:(.*?);', webpage2, 'video_url')
+
+        duration = parse_duration(self._search_regex(
+            r'<strong>Runtime:</strong>\s*([0-9:]+)</div>',
+            webpage, 'duration', fatal=False))
+        view_count = int_or_none(self._search_regex(
+            r'<strong>Views:</strong>\s*(\d+)</div>',
+            webpage, 'view count', fatal=False))
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+            'ext': 'flv',
+            'duration': duration,
+            'view_count': view_count,
+            'age_limit': 18,
+        }
diff --git a/youtube_dl/extractor/hostingbulk.py b/youtube_dl/extractor/hostingbulk.py
new file mode 100644 (file)
index 0000000..8e812b6
--- /dev/null
@@ -0,0 +1,84 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    compat_urllib_request,
+    int_or_none,
+    urlencode_postdata,
+)
+
+
+class HostingBulkIE(InfoExtractor):
+    _VALID_URL = r'''(?x)
+        https?://(?:www\.)?hostingbulk\.com/
+        (?:embed-)?(?P<id>[A-Za-z0-9]{12})(?:-\d+x\d+)?\.html'''
+    _FILE_DELETED_REGEX = r'<b>File Not Found</b>'
+    _TEST = {
+        'url': 'http://hostingbulk.com/n0ulw1hv20fm.html',
+        'md5': '6c8653c8ecf7ebfa83b76e24b7b2fe3f',
+        'info_dict': {
+            'id': 'n0ulw1hv20fm',
+            'ext': 'mp4',
+            'title': 'md5:5afeba33f48ec87219c269e054afd622',
+            'filesize': 6816081,
+            'thumbnail': 're:^http://.*\.jpg$',
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        url = 'http://hostingbulk.com/{0:}.html'.format(video_id)
+
+        # Custom request with cookie to set language to English, so our file
+        # deleted regex would work.
+        request = compat_urllib_request.Request(
+            url, headers={'Cookie': 'lang=english'})
+        webpage = self._download_webpage(request, video_id)
+
+        if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
+            raise ExtractorError('Video %s does not exist' % video_id,
+                                 expected=True)
+
+        title = self._html_search_regex(r'<h3>(.*?)</h3>', webpage, 'title')
+        filesize = int_or_none(
+            self._search_regex(
+                r'<small>\((\d+)\sbytes?\)</small>',
+                webpage,
+                'filesize',
+                fatal=False
+            )
+        )
+        thumbnail = self._search_regex(
+            r'<img src="([^"]+)".+?class="pic"',
+            webpage, 'thumbnail', fatal=False)
+
+        fields = dict(re.findall(r'''(?x)<input\s+
+            type="hidden"\s+
+            name="([^"]+)"\s+
+            value="([^"]*)"
+            ''', webpage))
+
+        request = compat_urllib_request.Request(url, urlencode_postdata(fields))
+        request.add_header('Content-type', 'application/x-www-form-urlencoded')
+        response = self._request_webpage(request, video_id,
+                                         'Submiting download request')
+        video_url = response.geturl()
+
+        formats = [{
+            'format_id': 'sd',
+            'filesize': filesize,
+            'url': video_url,
+        }]
+
+        return {
+            'id': video_id,
+            'title': title,
+            'thumbnail': thumbnail,
+            'formats': formats,
+        }
index 80b48b1b3605a18fa11547ab890897b6b47ccaa5..651784b73940032fd65c9675043f045b0bcf4ff2 100644 (file)
@@ -1,12 +1,13 @@
 from __future__ import unicode_literals
 
-import re
 import base64
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
+)
+from ..utils import (
     ExtractorError,
     HEADRequest,
 )
@@ -16,25 +17,24 @@ class HotNewHipHopIE(InfoExtractor):
     _VALID_URL = r'http://www\.hotnewhiphop\.com/.*\.(?P<id>.*)\.html'
     _TEST = {
         'url': 'http://www.hotnewhiphop.com/freddie-gibbs-lay-it-down-song.1435540.html',
-        'file': '1435540.mp3',
         'md5': '2c2cd2f76ef11a9b3b581e8b232f3d96',
         'info_dict': {
+            'id': '1435540',
+            'ext': 'mp3',
             'title': 'Freddie Gibbs - Lay It Down'
         }
     }
 
     def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('id')
-
-        webpage_src = self._download_webpage(url, video_id)
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
 
         video_url_base64 = self._search_regex(
-            r'data-path="(.*?)"', webpage_src, u'video URL', fatal=False)
+            r'data-path="(.*?)"', webpage, 'video URL', default=None)
 
         if video_url_base64 is None:
             video_url = self._search_regex(
-                r'"contentUrl" content="(.*?)"', webpage_src, u'video URL')
+                r'"contentUrl" content="(.*?)"', webpage, 'content URL')
             return self.url_result(video_url, ie='Youtube')
 
         reqdata = compat_urllib_parse.urlencode([
@@ -59,11 +59,11 @@ class HotNewHipHopIE(InfoExtractor):
         if video_url.endswith('.html'):
             raise ExtractorError('Redirect failed')
 
-        video_title = self._og_search_title(webpage_src).strip()
+        video_title = self._og_search_title(webpage).strip()
 
         return {
             'id': video_id,
             'url': video_url,
             'title': video_title,
-            'thumbnail': self._og_search_thumbnail(webpage_src),
+            'thumbnail': self._og_search_thumbnail(webpage),
         }
index 6ae04782c1aabb27ee6973819810f1e6f763b8b0..3f7d6666c0810e545c0f285dcf70689806c4dac7 100644 (file)
@@ -13,7 +13,7 @@ class HowcastIE(InfoExtractor):
         'info_dict': {
             'id': '390161',
             'ext': 'mp4',
-            'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.', 
+            'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.',
             'title': 'How to Tie a Square Knot Properly',
         }
     }
@@ -27,10 +27,10 @@ class HowcastIE(InfoExtractor):
         self.report_extraction(video_id)
 
         video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
-            webpage, 'video URL')
+                                       webpage, 'video URL')
 
         video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
-            webpage, 'description', fatal=False)
+                                                    webpage, 'description', fatal=False)
 
         return {
             'id': video_id,
diff --git a/youtube_dl/extractor/howstuffworks.py b/youtube_dl/extractor/howstuffworks.py
new file mode 100644 (file)
index 0000000..e973391
--- /dev/null
@@ -0,0 +1,103 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+    find_xpath_attr,
+    int_or_none,
+    js_to_json,
+    unescapeHTML,
+)
+
+
+class HowStuffWorksIE(InfoExtractor):
+    _VALID_URL = r'https?://[\da-z-]+\.howstuffworks\.com/(?:[^/]+/)*\d+-(?P<id>.+?)-video\.htm'
+    _TESTS = [
+        {
+            'url': 'http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm',
+            'info_dict': {
+                'id': '450221',
+                'ext': 'flv',
+                'title': 'Cool Jobs - Iditarod Musher',
+                'description': 'Cold sleds, freezing temps and warm dog breath... an Iditarod musher\'s dream. Kasey-Dee Gardner jumps on a sled to find out what the big deal is.',
+                'display_id': 'cool-jobs-iditarod-musher',
+                'thumbnail': 're:^https?://.*\.jpg$',
+                'duration': 161,
+            },
+        },
+        {
+            'url': 'http://adventure.howstuffworks.com/7199-survival-zone-food-and-water-in-the-savanna-video.htm',
+            'info_dict': {
+                'id': '453464',
+                'ext': 'mp4',
+                'title': 'Survival Zone: Food and Water In the Savanna',
+                'description': 'Learn how to find both food and water while trekking in the African savannah. In this video from the Discovery Channel.',
+                'display_id': 'survival-zone-food-and-water-in-the-savanna',
+                'thumbnail': 're:^https?://.*\.jpg$',
+            },
+        },
+        {
+            'url': 'http://entertainment.howstuffworks.com/arts/2706-sword-swallowing-1-by-dan-meyer-video.htm',
+            'info_dict': {
+                'id': '440011',
+                'ext': 'flv',
+                'title': 'Sword Swallowing #1 by Dan Meyer',
+                'description': 'Video footage (1 of 3) used by permission of the owner Dan Meyer through Sword Swallowers Association International <www.swordswallow.org>',
+                'display_id': 'sword-swallowing-1-by-dan-meyer',
+                'thumbnail': 're:^https?://.*\.jpg$',
+            },
+        },
+    ]
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url)
+        webpage = self._download_webpage(url, display_id)
+        clip_js = self._search_regex(
+            r'(?s)var clip = ({.*?});', webpage, 'clip info')
+        clip_info = self._parse_json(
+            clip_js, display_id, transform_source=js_to_json)
+
+        video_id = clip_info['content_id']
+        formats = []
+        m3u8_url = clip_info.get('m3u8')
+        if m3u8_url:
+            formats += self._extract_m3u8_formats(m3u8_url, video_id, 'mp4')
+        for video in clip_info.get('mp4', []):
+            formats.append({
+                'url': video['src'],
+                'format_id': video['bitrate'],
+                'vbr': int(video['bitrate'].rstrip('k')),
+            })
+
+        if not formats:
+            smil = self._download_xml(
+                'http://services.media.howstuffworks.com/videos/%s/smil-service.smil' % video_id,
+                video_id, 'Downloading video SMIL')
+
+            http_base = find_xpath_attr(
+                smil,
+                './{0}head/{0}meta'.format('{http://www.w3.org/2001/SMIL20/Language}'),
+                'name',
+                'httpBase').get('content')
+
+            URL_SUFFIX = '?v=2.11.3&fp=LNX 11,2,202,356&r=A&g=A'
+
+            for video in smil.findall(
+                    './{0}body/{0}switch/{0}video'.format('{http://www.w3.org/2001/SMIL20/Language}')):
+                vbr = int_or_none(video.attrib['system-bitrate'], scale=1000)
+                formats.append({
+                    'url': '%s/%s%s' % (http_base, video.attrib['src'], URL_SUFFIX),
+                    'format_id': '%dk' % vbr,
+                    'vbr': vbr,
+                })
+
+        self._sort_formats(formats)
+
+        return {
+            'id': '%s' % video_id,
+            'display_id': display_id,
+            'title': unescapeHTML(clip_info['clip_title']),
+            'description': unescapeHTML(clip_info.get('caption')),
+            'thumbnail': clip_info.get('video_still_url'),
+            'duration': clip_info.get('duration'),
+            'formats': formats,
+        }
index 94e7cf79008aa0b2426f70a26ba70218f916d731..4ccf6b9b8a82c3ef28c1d9d04dcc6f26ce2a8f8d 100644 (file)
@@ -33,8 +33,7 @@ class HuffPostIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         api_url = 'http://embed.live.huffingtonpost.com/api/segments/%s.json' % video_id
         data = self._download_json(api_url, video_id)['data']
index 1d5a10a3b6349d95387aee00d4be5e6deac618b8..370e86e5ac7ce497c8b3c658805374246fb9690a 100644 (file)
@@ -1,7 +1,5 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 
 
@@ -20,13 +18,11 @@ class IconosquareIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
-        html_title = self._html_search_regex(
-            r'<title>(.+?)</title>',
+        title = self._html_search_regex(
+            r'<title>(.+?)(?: *\(Videos?\))? \| (?:Iconosquare|Statigram)</title>',
             webpage, 'title')
-        title = re.sub(r'(?: *\(Videos?\))? \| (?:Iconosquare|Statigram)$', '', html_title)
         uploader_id = self._html_search_regex(
             r'@([^ ]+)', title, 'uploader name', fatal=False)
 
index 1f42c6d3a957674aa7bb2ee4ee3d56dac43cd2f8..3db668cd0297ea0ff3c0168c2b3f5db1491a0db4 100644 (file)
@@ -18,6 +18,7 @@ class IGNIE(InfoExtractor):
     _DESCRIPTION_RE = [
         r'<span class="page-object-description">(.+?)</span>',
         r'id="my_show_video">.*?<p>(.*?)</p>',
+        r'<meta name="description" content="(.*?)"',
     ]
 
     _TESTS = [
@@ -55,13 +56,28 @@ class IGNIE(InfoExtractor):
                 'skip_download': True,
             },
         },
+        {
+            'url': 'http://www.ign.com/articles/2014/08/15/rewind-theater-wild-trailer-gamescom-2014?watch',
+            'md5': '4e9a0bda1e5eebd31ddcf86ec0b9b3c7',
+            'info_dict': {
+                'id': '078fdd005f6d3c02f63d795faa1b984f',
+                'ext': 'mp4',
+                'title': 'Rewind Theater - Wild Trailer Gamescom 2014',
+                'description': (
+                    'Giant skeletons, bloody hunts, and captivating'
+                    ' natural beauty take our breath away.'
+                ),
+            },
+        },
     ]
 
     def _find_video_id(self, webpage):
         res_id = [
+            r'"video_id"\s*:\s*"(.*?)"',
             r'data-video-id="(.+?)"',
             r'<object id="vid_(.+?)"',
             r'<meta name="og:image" content=".*/(.+?)-(.+?)/.+.jpg"',
+            r'class="hero-poster[^"]*?"[^>]*id="(.+?)"',
         ]
         return self._search_regex(res_id, webpage, 'video id')
 
@@ -70,20 +86,22 @@ class IGNIE(InfoExtractor):
         name_or_id = mobj.group('name_or_id')
         page_type = mobj.group('type')
         webpage = self._download_webpage(url, name_or_id)
-        if page_type == 'articles':
-            video_url = self._search_regex(r'var videoUrl = "(.+?)"', webpage, 'video url')
-            return self.url_result(video_url, ie='IGN')
-        elif page_type != 'video':
+        if page_type != 'video':
             multiple_urls = re.findall(
-                '<param name="flashvars" value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]',
+                '<param name="flashvars"[^>]*value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]',
                 webpage)
             if multiple_urls:
-                return [self.url_result(u, ie='IGN') for u in multiple_urls]
+                entries = [self.url_result(u, ie='IGN') for u in multiple_urls]
+                return {
+                    '_type': 'playlist',
+                    'id': name_or_id,
+                    'entries': entries,
+                }
 
         video_id = self._find_video_id(webpage)
         result = self._get_video_info(video_id)
         description = self._html_search_regex(self._DESCRIPTION_RE,
-            webpage, 'video description', flags=re.DOTALL)
+                                              webpage, 'video description', flags=re.DOTALL)
         result['description'] = description
         return result
 
@@ -101,13 +119,13 @@ class IGNIE(InfoExtractor):
 
 
 class OneUPIE(IGNIE):
-    _VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)'
+    _VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)\.html'
     IE_NAME = '1up.com'
 
     _DESCRIPTION_RE = r'<div id="vid_summary">(.+?)</div>'
 
     _TESTS = [{
-        'url': 'http://gamevideos.1up.com/video/id/34976',
+        'url': 'http://gamevideos.1up.com/video/id/34976.html',
         'md5': '68a54ce4ebc772e4b71e3123d413163d',
         'info_dict': {
             'id': '34976',
index 7cee505c085cd1601e0b8ce3ab689795b4f94dfd..f2c1c10f5c1dec44129ea4cd7d4cff69c7c07206 100644 (file)
@@ -6,7 +6,6 @@ import json
 from .common import InfoExtractor
 from ..utils import (
     compat_urlparse,
-    get_element_by_attribute,
 )
 
 
@@ -27,10 +26,11 @@ class ImdbIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage('http://www.imdb.com/video/imdb/vi%s' % video_id, video_id)
-        descr = get_element_by_attribute('itemprop', 'description', webpage)
+        descr = self._html_search_regex(
+            r'(?s)<span itemprop="description">(.*?)</span>',
+            webpage, 'description', fatal=False)
         available_formats = re.findall(
             r'case \'(?P<f_id>.*?)\' :$\s+url = \'(?P<path>.*?)\'', webpage,
             flags=re.MULTILINE)
@@ -63,11 +63,17 @@ class ImdbListIE(InfoExtractor):
     IE_NAME = 'imdb:list'
     IE_DESC = 'Internet Movie Database lists'
     _VALID_URL = r'http://www\.imdb\.com/list/(?P<id>[\da-zA-Z_-]{11})'
-    
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        list_id = mobj.group('id')
+    _TEST = {
+        'url': 'http://www.imdb.com/list/JFs9NWw6XI0',
+        'info_dict': {
+            'id': 'JFs9NWw6XI0',
+            'title': 'March 23, 2012 Releases',
+        },
+        'playlist_count': 7,
+    }
 
+    def _real_extract(self, url):
+        list_id = self._match_id(url)
         webpage = self._download_webpage(url, list_id)
         entries = [
             self.url_result('http://www.imdb.com' + m, 'Imdb')
index b5372bf7a24e48a347127a1dc76c9dc672b32b64..b020e2621a5cc3c8d7ef6a1bc2cb6aaea989f779 100644 (file)
@@ -27,9 +27,9 @@ class InstagramIE(InfoExtractor):
         video_id = mobj.group('id')
         webpage = self._download_webpage(url, video_id)
         uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
-            webpage, 'uploader id', fatal=False)
+                                         webpage, 'uploader id', fatal=False)
         desc = self._search_regex(r'"caption":"(.*?)"', webpage, 'description',
-            fatal=False)
+                                  fatal=False)
 
         return {
             'id': video_id,
@@ -46,6 +46,30 @@ class InstagramUserIE(InfoExtractor):
     _VALID_URL = r'http://instagram\.com/(?P<username>[^/]{2,})/?(?:$|[?#])'
     IE_DESC = 'Instagram user profile'
     IE_NAME = 'instagram:user'
+    _TEST = {
+        'url': 'http://instagram.com/porsche',
+        'info_dict': {
+            'id': 'porsche',
+            'title': 'porsche',
+        },
+        'playlist_mincount': 2,
+        'playlist': [{
+            'info_dict': {
+                'id': '614605558512799803_462752227',
+                'ext': 'mp4',
+                'title': '#Porsche Intelligent Performance.',
+                'thumbnail': 're:^https?://.*\.jpg',
+                'uploader': 'Porsche',
+                'uploader_id': 'porsche',
+                'timestamp': 1387486713,
+                'upload_date': '20131219',
+            },
+        }],
+        'params': {
+            'extract_flat': True,
+            'skip_download': True,
+        }
+    }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
index 4ddda2f1bb86dd534f623218b2acd74566d781e1..1e47991874ecf6afa75181f99b6bf98a8dd60916 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
@@ -12,12 +14,13 @@ class InternetVideoArchiveIE(InfoExtractor):
     _VALID_URL = r'https?://video\.internetvideoarchive\.net/flash/players/.*?\?.*?publishedid.*?'
 
     _TEST = {
-        u'url': u'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?customerid=69249&publishedid=452693&playerid=247',
-        u'file': u'452693.mp4',
-        u'info_dict': {
-            u'title': u'SKYFALL',
-            u'description': u'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.',
-            u'duration': 153,
+        'url': 'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?customerid=69249&publishedid=452693&playerid=247',
+        'info_dict': {
+            'id': '452693',
+            'ext': 'mp4',
+            'title': 'SKYFALL',
+            'description': 'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.',
+            'duration': 149,
         },
     }
 
@@ -29,7 +32,7 @@ class InternetVideoArchiveIE(InfoExtractor):
     def _clean_query(query):
         NEEDED_ARGS = ['publishedid', 'customerid']
         query_dic = compat_urlparse.parse_qs(query)
-        cleaned_dic = dict((k,v[0]) for (k,v) in query_dic.items() if k in NEEDED_ARGS)
+        cleaned_dic = dict((k, v[0]) for (k, v) in query_dic.items() if k in NEEDED_ARGS)
         # Other player ids return m3u8 urls
         cleaned_dic['playerid'] = '247'
         cleaned_dic['videokbrate'] = '100000'
@@ -42,22 +45,26 @@ class InternetVideoArchiveIE(InfoExtractor):
         url = self._build_url(query)
 
         flashconfiguration = self._download_xml(url, video_id,
-            u'Downloading flash configuration')
+                                                'Downloading flash configuration')
         file_url = flashconfiguration.find('file').text
         file_url = file_url.replace('/playlist.aspx', '/mrssplaylist.aspx')
         # Replace some of the parameters in the query to get the best quality
         # and http links (no m3u8 manifests)
         file_url = re.sub(r'(?<=\?)(.+)$',
-            lambda m: self._clean_query(m.group()),
-            file_url)
+                          lambda m: self._clean_query(m.group()),
+                          file_url)
         info = self._download_xml(file_url, video_id,
-            u'Downloading video info')
+                                  'Downloading video info')
         item = info.find('channel/item')
 
         def _bp(p):
-            return xpath_with_ns(p,
-                {'media': 'http://search.yahoo.com/mrss/',
-                'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats'})
+            return xpath_with_ns(
+                p,
+                {
+                    'media': 'http://search.yahoo.com/mrss/',
+                    'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats',
+                }
+            )
         formats = []
         for content in item.findall(_bp('media:group/media:content')):
             attr = content.attrib
index d1defd363c5fe9c86330236f53b8aa21bfe65a38..4247d6391fa25f674449d9d8ac44b428c7c387e0 100644 (file)
@@ -54,7 +54,7 @@ class IPrimaIE(InfoExtractor):
 
         player_url = (
             'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' %
-            (floor(random()*1073741824), floor(random()*1073741824))
+            (floor(random() * 1073741824), floor(random() * 1073741824))
         )
 
         req = compat_urllib_request.Request(player_url)
index 4027deb7071806fcba313a80cebe694e9f96580e..f0fba1adb7dba9c4c09717132731d98ff0e5ffd3 100644 (file)
@@ -43,7 +43,7 @@ class IviIE(InfoExtractor):
                 'thumbnail': 'http://thumbs.ivi.ru/f15.vcp.digitalaccess.ru/contents/8/4/0068dc0677041f3336b7c2baad8fc0.jpg',
             },
             'skip': 'Only works from Russia',
-         }
+        }
     ]
 
     # Sorted by quality
@@ -102,7 +102,7 @@ class IviIE(InfoExtractor):
         compilation = result['compilation']
         title = result['title']
 
-        title = '%s - %s' % (compilation, title) if compilation is not None else title  
+        title = '%s - %s' % (compilation, title) if compilation is not None else title
 
         previews = result['preview']
         previews.sort(key=lambda fmt: self._known_thumbnails.index(fmt['content_format']))
@@ -127,6 +127,21 @@ class IviCompilationIE(InfoExtractor):
     IE_DESC = 'ivi.ru compilations'
     IE_NAME = 'ivi:compilation'
     _VALID_URL = r'https?://(?:www\.)?ivi\.ru/watch/(?!\d+)(?P<compilationid>[a-z\d_-]+)(?:/season(?P<seasonid>\d+))?$'
+    _TESTS = [{
+        'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa',
+        'info_dict': {
+            'id': 'dvoe_iz_lartsa',
+            'title': 'Двое из ларца (2006 - 2008)',
+        },
+        'playlist_mincount': 24,
+    }, {
+        'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/season1',
+        'info_dict': {
+            'id': 'dvoe_iz_lartsa/season1',
+            'title': 'Двое из ларца (2006 - 2008) 1 сезон',
+        },
+        'playlist_mincount': 12,
+    }]
 
     def _extract_entries(self, html, compilation_id):
         return [self.url_result('http://www.ivi.ru/watch/%s/%s' % (compilation_id, serie), 'Ivi')
@@ -137,17 +152,17 @@ class IviCompilationIE(InfoExtractor):
         compilation_id = mobj.group('compilationid')
         season_id = mobj.group('seasonid')
 
-        if season_id is not None: # Season link
+        if season_id is not None:  # Season link
             season_page = self._download_webpage(url, compilation_id, 'Downloading season %s web page' % season_id)
             playlist_id = '%s/season%s' % (compilation_id, season_id)
             playlist_title = self._html_search_meta('title', season_page, 'title')
             entries = self._extract_entries(season_page, compilation_id)
-        else: # Compilation link            
+        else:  # Compilation link
             compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page')
             playlist_id = compilation_id
             playlist_title = self._html_search_meta('title', compilation_page, 'title')
             seasons = re.findall(r'<a href="/watch/%s/season(\d+)">[^<]+</a>' % compilation_id, compilation_page)
-            if len(seasons) == 0: # No seasons in this compilation
+            if len(seasons) == 0:  # No seasons in this compilation
                 entries = self._extract_entries(compilation_page, compilation_id)
             else:
                 entries = []
@@ -157,4 +172,4 @@ class IviCompilationIE(InfoExtractor):
                         compilation_id, 'Downloading season %s web page' % season_id)
                     entries.extend(self._extract_entries(season_page, compilation_id))
 
-        return self.playlist_result(entries, playlist_id, playlist_title)
\ No newline at end of file
+        return self.playlist_result(entries, playlist_id, playlist_title)
diff --git a/youtube_dl/extractor/izlesene.py b/youtube_dl/extractor/izlesene.py
new file mode 100644 (file)
index 0000000..d16d483
--- /dev/null
@@ -0,0 +1,125 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    determine_ext,
+    float_or_none,
+    get_element_by_id,
+    int_or_none,
+    parse_iso8601,
+    str_to_int,
+)
+
+
+class IzleseneIE(InfoExtractor):
+    _VALID_URL = r'''(?x)
+        https?://(?:(?:www|m)\.)?izlesene\.com/
+        (?:video|embedplayer)/(?:[^/]+/)?(?P<id>[0-9]+)
+        '''
+    _TESTS = [
+        {
+            'url': 'http://www.izlesene.com/video/sevincten-cildirtan-dogum-gunu-hediyesi/7599694',
+            'md5': '4384f9f0ea65086734b881085ee05ac2',
+            'info_dict': {
+                'id': '7599694',
+                'ext': 'mp4',
+                'title': 'Sevinçten Çıldırtan Doğum Günü Hediyesi',
+                'description': 'md5:253753e2655dde93f59f74b572454f6d',
+                'thumbnail': 're:^http://.*\.jpg',
+                'uploader_id': 'pelikzzle',
+                'timestamp': 1404302298,
+                'upload_date': '20140702',
+                'duration': 95.395,
+                'age_limit': 0,
+            }
+        },
+        {
+            'url': 'http://www.izlesene.com/video/tarkan-dortmund-2006-konseri/17997',
+            'md5': '97f09b6872bffa284cb7fa4f6910cb72',
+            'info_dict': {
+                'id': '17997',
+                'ext': 'mp4',
+                'title': 'Tarkan Dortmund 2006 Konseri',
+                'description': 'Tarkan Dortmund 2006 Konseri',
+                'thumbnail': 're:^http://.*\.jpg',
+                'uploader_id': 'parlayankiz',
+                'timestamp': 1163322193,
+                'upload_date': '20061112',
+                'duration': 253.666,
+                'age_limit': 0,
+            }
+        },
+    ]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        url = 'http://www.izlesene.com/video/%s' % video_id
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._og_search_title(webpage)
+        description = self._og_search_description(webpage)
+        thumbnail = self._proto_relative_url(
+            self._og_search_thumbnail(webpage), scheme='http:')
+
+        uploader = self._html_search_regex(
+            r"adduserUsername\s*=\s*'([^']+)';",
+            webpage, 'uploader', fatal=False, default='')
+        timestamp = parse_iso8601(self._html_search_meta(
+            'uploadDate', webpage, 'upload date', fatal=False))
+
+        duration = float_or_none(self._html_search_regex(
+            r'"videoduration"\s*:\s*"([^"]+)"',
+            webpage, 'duration', fatal=False), scale=1000)
+
+        view_count = str_to_int(get_element_by_id('videoViewCount', webpage))
+        comment_count = self._html_search_regex(
+            r'comment_count\s*=\s*\'([^\']+)\';',
+            webpage, 'comment_count', fatal=False)
+
+        family_friendly = self._html_search_meta(
+            'isFamilyFriendly', webpage, 'age limit', fatal=False)
+
+        content_url = self._html_search_meta(
+            'contentURL', webpage, 'content URL', fatal=False)
+        ext = determine_ext(content_url, 'mp4')
+
+        # Might be empty for some videos.
+        streams = self._html_search_regex(
+            r'"qualitylevel"\s*:\s*"([^"]+)"',
+            webpage, 'streams', fatal=False, default='')
+
+        formats = []
+        if streams:
+            for stream in streams.split('|'):
+                quality, url = re.search(r'\[(\w+)\](.+)', stream).groups()
+                formats.append({
+                    'format_id': '%sp' % quality if quality else 'sd',
+                    'url': url,
+                    'ext': ext,
+                })
+        else:
+            stream_url = self._search_regex(
+                r'"streamurl"\s?:\s?"([^"]+)"', webpage, 'stream URL')
+            formats.append({
+                'format_id': 'sd',
+                'url': stream_url,
+                'ext': ext,
+            })
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'uploader_id': uploader,
+            'timestamp': timestamp,
+            'duration': duration,
+            'view_count': int_or_none(view_count),
+            'comment_count': int_or_none(comment_count),
+            'age_limit': 18 if family_friendly == 'False' else 0,
+            'formats': formats,
+        }
index ace08769bd7671619db448696a016d08fd453e67..063e86de46c896c94be505ae916fd6f3fbdedc02 100644 (file)
@@ -45,4 +45,3 @@ class JadoreCettePubIE(InfoExtractor):
             'title': title,
             'description': description,
         }
-
index 1881659665b415714d1315f6d52bc93dbe324a2c..8094cc2e487f2880a66178d0a07c97a2ef9432f5 100644 (file)
@@ -29,7 +29,7 @@ class JeuxVideoIE(InfoExtractor):
         xml_link = self._html_search_regex(
             r'<param name="flashvars" value="config=(.*?)" />',
             webpage, 'config URL')
-        
+
         video_id = self._search_regex(
             r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml',
             xml_link, 'video ID')
@@ -38,7 +38,7 @@ class JeuxVideoIE(InfoExtractor):
             xml_link, title, 'Downloading XML config')
         info_json = config.find('format.json').text
         info = json.loads(info_json)['versions'][0]
-        
+
         video_url = 'http://video720.jeuxvideo.com/' + info['file']
 
         return {
diff --git a/youtube_dl/extractor/jove.py b/youtube_dl/extractor/jove.py
new file mode 100644 (file)
index 0000000..cf73cd7
--- /dev/null
@@ -0,0 +1,80 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    unified_strdate
+)
+
+
+class JoveIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?jove\.com/video/(?P<id>[0-9]+)'
+    _CHAPTERS_URL = 'http://www.jove.com/video-chapters?videoid={video_id:}'
+    _TESTS = [
+        {
+            'url': 'http://www.jove.com/video/2744/electrode-positioning-montage-transcranial-direct-current',
+            'md5': '93723888d82dbd6ba8b3d7d0cd65dd2b',
+            'info_dict': {
+                'id': '2744',
+                'ext': 'mp4',
+                'title': 'Electrode Positioning and Montage in Transcranial Direct Current Stimulation',
+                'description': 'md5:015dd4509649c0908bc27f049e0262c6',
+                'thumbnail': 're:^https?://.*\.png$',
+                'upload_date': '20110523',
+            }
+        },
+        {
+            'url': 'http://www.jove.com/video/51796/culturing-caenorhabditis-elegans-axenic-liquid-media-creation',
+            'md5': '914aeb356f416811d911996434811beb',
+            'info_dict': {
+                'id': '51796',
+                'ext': 'mp4',
+                'title': 'Culturing Caenorhabditis elegans in Axenic Liquid Media and Creation of Transgenic Worms by Microparticle Bombardment',
+                'description': 'md5:35ff029261900583970c4023b70f1dc9',
+                'thumbnail': 're:^https?://.*\.png$',
+                'upload_date': '20140802',
+            }
+        },
+
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        chapters_id = self._html_search_regex(
+            r'/video-chapters\?videoid=([0-9]+)', webpage, 'chapters id')
+
+        chapters_xml = self._download_xml(
+            self._CHAPTERS_URL.format(video_id=chapters_id),
+            video_id, note='Downloading chapters XML',
+            errnote='Failed to download chapters XML')
+
+        video_url = chapters_xml.attrib.get('video')
+        if not video_url:
+            raise ExtractorError('Failed to get the video URL')
+
+        title = self._html_search_meta('citation_title', webpage, 'title')
+        thumbnail = self._og_search_thumbnail(webpage)
+        description = self._html_search_regex(
+            r'<div id="section_body_summary"><p class="jove_content">(.+?)</p>',
+            webpage, 'description', fatal=False)
+        publish_date = unified_strdate(self._html_search_meta(
+            'citation_publication_date', webpage, 'publish date', fatal=False))
+        comment_count = self._html_search_regex(
+            r'<meta name="num_comments" content="(\d+) Comments?"',
+            webpage, 'comment count', fatal=False)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'url': video_url,
+            'thumbnail': thumbnail,
+            'description': description,
+            'upload_date': publish_date,
+            'comment_count': comment_count,
+        }
index aad7825788a74bc8930d401a38fe41f7703803c6..122e2dd8cad8c9fba6d861a80d77752e1b508301 100644 (file)
@@ -1,8 +1,6 @@
 # coding=utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 from ..utils import (
     int_or_none,
@@ -12,14 +10,14 @@ from ..utils import (
 
 class JpopsukiIE(InfoExtractor):
     IE_NAME = 'jpopsuki.tv'
-    _VALID_URL = r'https?://(?:www\.)?jpopsuki\.tv/video/(.*?)/(?P<id>\S+)'
+    _VALID_URL = r'https?://(?:www\.)?jpopsuki\.tv/(?:category/)?video/[^/]+/(?P<id>\S+)'
 
     _TEST = {
         'url': 'http://www.jpopsuki.tv/video/ayumi-hamasaki---evolution/00be659d23b0b40508169cdee4545771',
         'md5': '88018c0c1a9b1387940e90ec9e7e198e',
-        'file': '00be659d23b0b40508169cdee4545771.mp4',
         'info_dict': {
             'id': '00be659d23b0b40508169cdee4545771',
+            'ext': 'mp4',
             'title': 'ayumi hamasaki - evolution',
             'description': 'Release date: 2001.01.31\r\n浜崎あゆみ - evolution',
             'thumbnail': 'http://www.jpopsuki.tv/cache/89722c74d2a2ebe58bcac65321c115b2.jpg',
@@ -30,8 +28,7 @@ class JpopsukiIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         webpage = self._download_webpage(url, video_id)
 
@@ -47,11 +44,9 @@ class JpopsukiIE(InfoExtractor):
         uploader_id = self._html_search_regex(
             r'<li>from: <a href="/user/view/user/\S*?/uid/(\d*)',
             webpage, 'video uploader_id', fatal=False)
-        upload_date = self._html_search_regex(
+        upload_date = unified_strdate(self._html_search_regex(
             r'<li>uploaded: (.*?)</li>', webpage, 'video upload_date',
-            fatal=False)
-        if upload_date is not None:
-            upload_date = unified_strdate(upload_date)
+            fatal=False))
         view_count_str = self._html_search_regex(
             r'<li>Hits: ([0-9]+?)</li>', webpage, 'video view_count',
             fatal=False)
index 9b553b9fa52873739b0d4ecb4e3927e1beff929e..da8068efcba914a14788bc7b39872b8b5cd01c7c 100644 (file)
@@ -11,10 +11,9 @@ from ..utils import (
 
 
 class JukeboxIE(InfoExtractor):
-    _VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<video_id>[a-z0-9\-]+)\.html'
+    _VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<id>[a-z0-9\-]+)\.html'
     _TEST = {
         'url': 'http://www.jukebox.es/kosheen/videoclip,pride,r303r.html',
-        'md5': '1574e9b4d6438446d5b7dbcdf2786276',
         'info_dict': {
             'id': 'r303r',
             'ext': 'flv',
@@ -24,8 +23,7 @@ class JukeboxIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('video_id')
+        video_id = self._match_id(url)
 
         html = self._download_webpage(url, video_id)
         iframe_url = unescapeHTML(self._search_regex(r'<iframe .*src="([^"]*)"', html, 'iframe url'))
@@ -38,7 +36,7 @@ class JukeboxIE(InfoExtractor):
 
         try:
             video_url = self._search_regex(r'"config":{"file":"(?P<video_url>http:[^"]+\?mdtk=[0-9]+)"',
-                iframe_html, 'video url')
+                                           iframe_html, 'video url')
             video_url = unescapeHTML(video_url).replace('\/', '/')
         except RegexNotFoundError:
             youtube_url = self._search_regex(
@@ -49,9 +47,9 @@ class JukeboxIE(InfoExtractor):
             return self.url_result(youtube_url, ie='Youtube')
 
         title = self._html_search_regex(r'<h1 class="inline">([^<]+)</h1>',
-            html, 'title')
+                                        html, 'title')
         artist = self._html_search_regex(r'<span id="infos_article_artist">([^<]+)</span>',
-            html, 'artist')
+                                         html, 'artist')
 
         return {
             'id': video_id,
diff --git a/youtube_dl/extractor/justintv.py b/youtube_dl/extractor/justintv.py
deleted file mode 100644 (file)
index 27017e8..0000000
+++ /dev/null
@@ -1,155 +0,0 @@
-from __future__ import unicode_literals
-
-import itertools
-import json
-import os
-import re
-
-from .common import InfoExtractor
-from ..utils import (
-    compat_str,
-    ExtractorError,
-    formatSeconds,
-)
-
-
-class JustinTVIE(InfoExtractor):
-    """Information extractor for justin.tv and twitch.tv"""
-    # TODO: One broadcast may be split into multiple videos. The key
-    # 'broadcast_id' is the same for all parts, and 'broadcast_part'
-    # starts at 1 and increases. Can we treat all parts as one video?
-
-    _VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?(?:twitch|justin)\.tv/
-        (?:
-            (?P<channelid>[^/]+)|
-            (?:(?:[^/]+)/b/(?P<videoid>[^/]+))|
-            (?:(?:[^/]+)/c/(?P<chapterid>[^/]+))
-        )
-        /?(?:\#.*)?$
-        """
-    _JUSTIN_PAGE_LIMIT = 100
-    IE_NAME = 'justin.tv'
-    IE_DESC = 'justin.tv and twitch.tv'
-    _TEST = {
-        'url': 'http://www.twitch.tv/thegamedevhub/b/296128360',
-        'md5': 'ecaa8a790c22a40770901460af191c9a',
-        'info_dict': {
-            'id': '296128360',
-            'ext': 'flv',
-            'upload_date': '20110927',
-            'uploader_id': 25114803,
-            'uploader': 'thegamedevhub',
-            'title': 'Beginner Series - Scripting With Python Pt.1'
-        }
-    }
-
-    # Return count of items, list of *valid* items
-    def _parse_page(self, url, video_id, counter):
-        info_json = self._download_webpage(
-            url, video_id,
-            'Downloading video info JSON on page %d' % counter,
-            'Unable to download video info JSON %d' % counter)
-
-        response = json.loads(info_json)
-        if type(response) != list:
-            error_text = response.get('error', 'unknown error')
-            raise ExtractorError('Justin.tv API: %s' % error_text)
-        info = []
-        for clip in response:
-            video_url = clip['video_file_url']
-            if video_url:
-                video_extension = os.path.splitext(video_url)[1][1:]
-                video_date = re.sub('-', '', clip['start_time'][:10])
-                video_uploader_id = clip.get('user_id', clip.get('channel_id'))
-                video_id = clip['id']
-                video_title = clip.get('title', video_id)
-                info.append({
-                    'id': compat_str(video_id),
-                    'url': video_url,
-                    'title': video_title,
-                    'uploader': clip.get('channel_name', video_uploader_id),
-                    'uploader_id': video_uploader_id,
-                    'upload_date': video_date,
-                    'ext': video_extension,
-                })
-        return (len(response), info)
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-
-        api_base = 'http://api.justin.tv'
-        paged = False
-        if mobj.group('channelid'):
-            paged = True
-            video_id = mobj.group('channelid')
-            api = api_base + '/channel/archives/%s.json' % video_id
-        elif mobj.group('chapterid'):
-            chapter_id = mobj.group('chapterid')
-
-            webpage = self._download_webpage(url, chapter_id)
-            m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
-            if not m:
-                raise ExtractorError('Cannot find archive of a chapter')
-            archive_id = m.group(1)
-
-            api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
-            doc = self._download_xml(
-                api, chapter_id,
-                note='Downloading chapter information',
-                errnote='Chapter information download failed')
-            for a in doc.findall('.//archive'):
-                if archive_id == a.find('./id').text:
-                    break
-            else:
-                raise ExtractorError('Could not find chapter in chapter information')
-
-            video_url = a.find('./video_file_url').text
-            video_ext = video_url.rpartition('.')[2] or 'flv'
-
-            chapter_api_url = 'https://api.twitch.tv/kraken/videos/c' + chapter_id
-            chapter_info = self._download_json(
-                chapter_api_url, 'c' + chapter_id,
-                note='Downloading chapter metadata',
-                errnote='Download of chapter metadata failed')
-
-            bracket_start = int(doc.find('.//bracket_start').text)
-            bracket_end = int(doc.find('.//bracket_end').text)
-
-            # TODO determine start (and probably fix up file)
-            #  youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
-            #video_url += '?start=' + TODO:start_timestamp
-            # bracket_start is 13290, but we want 51670615
-            self._downloader.report_warning('Chapter detected, but we can just download the whole file. '
-                                            'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
-
-            info = {
-                'id': 'c' + chapter_id,
-                'url': video_url,
-                'ext': video_ext,
-                'title': chapter_info['title'],
-                'thumbnail': chapter_info['preview'],
-                'description': chapter_info['description'],
-                'uploader': chapter_info['channel']['display_name'],
-                'uploader_id': chapter_info['channel']['name'],
-            }
-            return info
-        else:
-            video_id = mobj.group('videoid')
-            api = api_base + '/broadcast/by_archive/%s.json' % video_id
-
-        entries = []
-        offset = 0
-        limit = self._JUSTIN_PAGE_LIMIT
-        for counter in itertools.count(1):
-            page_url = api + ('?offset=%d&limit=%d' % (offset, limit))
-            page_count, page_info = self._parse_page(
-                page_url, video_id, counter)
-            entries.extend(page_info)
-            if not paged or page_count != limit:
-                break
-            offset += limit
-        return {
-            '_type': 'playlist',
-            'id': video_id,
-            'entries': entries,
-        }
index 23103b163fea1ed6a27cb44dadcf231b478edcdb..dbfe4cc03fd8c569ed2f05d5ae9c86c36bb9e278 100644 (file)
@@ -10,7 +10,7 @@ _md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
 
 class KankanIE(InfoExtractor):
     _VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml'
-    
+
     _TEST = {
         'url': 'http://yinyue.kankan.com/vod/48/48863.shtml',
         'file': '48863.flv',
index 772bb5671e8f027b2723b54c00794217f6d94edb..408d00944ceb83e1551c12b5707031c961bb4f5d 100644 (file)
@@ -9,21 +9,30 @@ from ..utils import (
 
 
 class KhanAcademyIE(InfoExtractor):
-    _VALID_URL = r'^https?://(?:www\.)?khanacademy\.org/(?P<key>[^/]+)/(?:[^/]+/){,2}(?P<id>[^?#/]+)(?:$|[?#])'
+    _VALID_URL = r'^https?://(?:(?:www|api)\.)?khanacademy\.org/(?P<key>[^/]+)/(?:[^/]+/){,2}(?P<id>[^?#/]+)(?:$|[?#])'
     IE_NAME = 'KhanAcademy'
 
-    _TEST = {
+    _TESTS = [{
         'url': 'http://www.khanacademy.org/video/one-time-pad',
-        'file': 'one-time-pad.mp4',
         'md5': '7021db7f2d47d4fff89b13177cb1e8f4',
         'info_dict': {
+            'id': 'one-time-pad',
+            'ext': 'mp4',
             'title': 'The one-time pad',
             'description': 'The perfect cipher',
             'duration': 176,
             'uploader': 'Brit Cruise',
             'upload_date': '20120411',
         }
-    }
+    }, {
+        'url': 'https://www.khanacademy.org/math/applied-math/cryptography',
+        'info_dict': {
+            'id': 'cryptography',
+            'title': 'Journey into cryptography',
+            'description': 'How have humans protected their secret messages through history? What has changed today?',
+        },
+        'playlist_mincount': 3,
+    }]
 
     def _real_extract(self, url):
         m = re.match(self._VALID_URL, url)
index 56a76380cad6f45cb4a0a33581803f1371b2543b..7d4b57056509383fdc082a68c1650f38dc258763 100644 (file)
@@ -1,8 +1,6 @@
 # encoding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 
 
@@ -15,28 +13,25 @@ class KickStarterIE(InfoExtractor):
             'id': '1404461844',
             'ext': 'mp4',
             'title': 'Intersection: The Story of Josh Grant by Kyle Cowling',
-            'description': 'A unique motocross documentary that examines the '
-                'life and mind of one of sports most elite athletes: Josh Grant.',
+            'description': (
+                'A unique motocross documentary that examines the '
+                'life and mind of one of sports most elite athletes: Josh Grant.'
+            ),
         },
     }, {
         'note': 'Embedded video (not using the native kickstarter video service)',
         'url': 'https://www.kickstarter.com/projects/597507018/pebble-e-paper-watch-for-iphone-and-android/posts/659178',
-        'playlist': [
-            {
-                'info_dict': {
-                    'id': '78704821',
-                    'ext': 'mp4',
-                    'uploader_id': 'pebble',
-                    'uploader': 'Pebble Technology',
-                    'title': 'Pebble iOS Notifications',
-                }
-            }
-        ],
+        'info_dict': {
+            'id': '78704821',
+            'ext': 'mp4',
+            'uploader_id': 'pebble',
+            'uploader': 'Pebble Technology',
+            'title': 'Pebble iOS Notifications',
+        }
     }]
 
     def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         title = self._html_search_regex(
index 5341ac773f79fe237626bdfe3243bd1561d8003d..41fd62009ac16100c1e6bb776028020cd12e9ff2 100644 (file)
@@ -34,7 +34,7 @@ class KontrTubeIE(InfoExtractor):
         video_url = self._html_search_regex(r"video_url: '(.+?)/?',", webpage, 'video URL')
         thumbnail = self._html_search_regex(r"preview_url: '(.+?)/?',", webpage, 'video thumbnail', fatal=False)
         title = self._html_search_regex(
-            r'<title>(.+?) - Труба зовёт - Интересный видеохостинг</title>', webpage, 'video title')
+            r'<title>(.+?)</title>', webpage, 'video title')
         description = self._html_search_meta('description', webpage, 'video description')
 
         mobj = re.search(
@@ -63,4 +63,4 @@ class KontrTubeIE(InfoExtractor):
             'duration': duration,
             'view_count': int_or_none(view_count),
             'comment_count': int_or_none(comment_count),
-        }
\ No newline at end of file
+        }
index 484239b19e9b20436eaa40e15263d7dd72c11445..a602980a141f3f8ccce026eaddc8b383e7894352 100644 (file)
@@ -1,7 +1,5 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 
 
@@ -18,11 +16,11 @@ class Ku6IE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
-        title = self._search_regex(r'<h1 title=.*>(.*?)</h1>', webpage, 'title')
+
+        title = self._html_search_regex(
+            r'<h1 title=.*>(.*?)</h1>', webpage, 'title')
         dataUrl = 'http://v.ku6.com/fetchVideo4Player/%s.html' % video_id
         jsonData = self._download_json(dataUrl, video_id)
         downloadUrl = jsonData['data']['f']
@@ -32,4 +30,3 @@ class Ku6IE(InfoExtractor):
             'title': title,
             'url': downloadUrl
         }
-
diff --git a/youtube_dl/extractor/laola1tv.py b/youtube_dl/extractor/laola1tv.py
new file mode 100644 (file)
index 0000000..2fd3b46
--- /dev/null
@@ -0,0 +1,77 @@
+from __future__ import unicode_literals
+
+import random
+import re
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class Laola1TvIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?laola1\.tv/(?P<lang>[a-z]+)-(?P<portal>[a-z]+)/.*?/(?P<id>[0-9]+)\.html'
+    _TEST = {
+        'url': 'http://www.laola1.tv/de-de/live/bwf-bitburger-open-grand-prix-gold-court-1/250019.html',
+        'info_dict': {
+            'id': '250019',
+            'ext': 'mp4',
+            'title': 'Bitburger Open Grand Prix Gold - Court 1',
+            'categories': ['Badminton'],
+            'uploader': 'BWF - Badminton World Federation',
+            'is_live': True,
+        },
+        'params': {
+            'skip_download': True,
+        }
+    }
+
+    _BROKEN = True  # Not really - extractor works fine, but f4m downloader does not support live streams yet.
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        lang = mobj.group('lang')
+        portal = mobj.group('portal')
+
+        webpage = self._download_webpage(url, video_id)
+        iframe_url = self._search_regex(
+            r'<iframe[^>]*?class="main_tv_player"[^>]*?src="([^"]+)"',
+            webpage, 'iframe URL')
+
+        iframe = self._download_webpage(
+            iframe_url, video_id, note='Downloading iframe')
+        flashvars_m = re.findall(
+            r'flashvars\.([_a-zA-Z0-9]+)\s*=\s*"([^"]*)";', iframe)
+        flashvars = dict((m[0], m[1]) for m in flashvars_m)
+
+        xml_url = ('http://www.laola1.tv/server/hd_video.php?' +
+                   'play=%s&partner=1&portal=%s&v5ident=&lang=%s' % (
+                       video_id, portal, lang))
+        hd_doc = self._download_xml(xml_url, video_id)
+
+        title = hd_doc.find('.//video/title').text
+        flash_url = hd_doc.find('.//video/url').text
+        categories = hd_doc.find('.//video/meta_sports').text.split(',')
+        uploader = hd_doc.find('.//video/meta_organistation').text
+
+        ident = random.randint(10000000, 99999999)
+        token_url = '%s&ident=%s&klub=0&unikey=0&timestamp=%s&auth=%s' % (
+            flash_url, ident, flashvars['timestamp'], flashvars['auth'])
+
+        token_doc = self._download_xml(
+            token_url, video_id, note='Downloading token')
+        token_attrib = token_doc.find('.//token').attrib
+        if token_attrib.get('auth') == 'blocked':
+            raise ExtractorError('Token error: ' % token_attrib.get('comment'))
+
+        video_url = '%s?hdnea=%s&hdcore=3.2.0' % (
+            token_attrib['url'], token_attrib['auth'])
+
+        return {
+            'id': video_id,
+            'is_live': True,
+            'title': title,
+            'url': video_url,
+            'uploader': uploader,
+            'categories': categories,
+            'ext': 'mp4',
+        }
index 8d9491f233bf578bc9274a18fe022a1538effad6..1dfe7f77f4ccdefa0b076f71f6467644e465cb52 100644 (file)
@@ -52,7 +52,7 @@ class LifeNewsIE(InfoExtractor):
             r'<div class=\'comments\'>\s*<span class=\'counter\'>(\d+)</span>', webpage, 'comment count', fatal=False)
 
         upload_date = self._html_search_regex(
-            r'<time datetime=\'([^\']+)\'>', webpage, 'upload date',fatal=False)
+            r'<time datetime=\'([^\']+)\'>', webpage, 'upload date', fatal=False)
         if upload_date is not None:
             upload_date = unified_strdate(upload_date)
 
@@ -71,4 +71,4 @@ class LifeNewsIE(InfoExtractor):
         if len(videos) == 1:
             return make_entry(video_id, videos[0])
         else:
-            return [make_entry(video_id, media, video_number+1) for video_number, media in enumerate(videos)]
\ No newline at end of file
+            return [make_entry(video_id, media, video_number + 1) for video_number, media in enumerate(videos)]
index 8e50e8f79adee2e21c7801da13da60897fcb61ec..b04be1e8cfda94addca26a1d1e3731ce61519dc1 100644 (file)
@@ -19,8 +19,7 @@ class LiveLeakIE(InfoExtractor):
             'uploader': 'ljfriel2',
             'title': 'Most unlucky car accident'
         }
-    },
-    {
+    }, {
         'url': 'http://www.liveleak.com/view?i=f93_1390833151',
         'md5': 'd3f1367d14cc3c15bf24fbfbe04b9abf',
         'info_dict': {
@@ -30,8 +29,7 @@ class LiveLeakIE(InfoExtractor):
             'uploader': 'ARD_Stinkt',
             'title': 'German Television does first Edward Snowden Interview (ENGLISH)',
         }
-    },
-    {
+    }, {
         'url': 'http://www.liveleak.com/view?i=4f7_1392687779',
         'md5': '42c6d97d54f1db107958760788c5f48f',
         'info_dict': {
index c0c2d9b09d92c8ce79894112fd58834f0f1b92a1..03c4691c6abb6794557a7d83b30fce49a2a2e4e7 100644 (file)
@@ -18,8 +18,8 @@ from ..utils import (
 
 class LivestreamIE(InfoExtractor):
     IE_NAME = 'livestream'
-    _VALID_URL = r'http://new\.livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>\d+))?/?$'
-    _TEST = {
+    _VALID_URL = r'https?://new\.livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>[0-9]+)(?:/player)?)?/?(?:$|[?#])'
+    _TESTS = [{
         'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',
         'md5': '53274c76ba7754fb0e8d072716f2292b',
         'info_dict': {
@@ -31,7 +31,16 @@ class LivestreamIE(InfoExtractor):
             'view_count': int,
             'thumbnail': 're:^http://.*\.jpg$'
         }
-    }
+    }, {
+        'url': 'http://new.livestream.com/tedx/cityenglish',
+        'info_dict': {
+            'title': 'TEDCity2.0 (English)',
+        },
+        'playlist_mincount': 4,
+    }, {
+        'url': 'https://new.livestream.com/accounts/362/events/3557232/videos/67864563/player?autoPlay=false&height=360&mute=false&width=640',
+        'only_matching': True,
+    }]
 
     def _parse_smil(self, video_id, smil_url):
         formats = []
@@ -111,23 +120,37 @@ class LivestreamIE(InfoExtractor):
         event_name = mobj.group('event_name')
         webpage = self._download_webpage(url, video_id or event_name)
 
+        og_video = self._og_search_video_url(
+            webpage, 'player url', fatal=False, default=None)
+        if og_video is not None:
+            query_str = compat_urllib_parse_urlparse(og_video).query
+            query = compat_urlparse.parse_qs(query_str)
+            if 'play_url' in query:
+                api_url = query['play_url'][0].replace('.smil', '')
+                info = json.loads(self._download_webpage(
+                    api_url, video_id, 'Downloading video info'))
+                return self._extract_video_info(info)
+
+        config_json = self._search_regex(
+            r'window.config = ({.*?});', webpage, 'window config')
+        info = json.loads(config_json)['event']
+
+        def is_relevant(vdata, vid):
+            result = vdata['type'] == 'video'
+            if video_id is not None:
+                result = result and compat_str(vdata['data']['id']) == vid
+            return result
+
+        videos = [self._extract_video_info(video_data['data'])
+                  for video_data in info['feed']['data']
+                  if is_relevant(video_data, video_id)]
         if video_id is None:
             # This is an event page:
-            config_json = self._search_regex(
-                r'window.config = ({.*?});', webpage, 'window config')
-            info = json.loads(config_json)['event']
-            videos = [self._extract_video_info(video_data['data'])
-                for video_data in info['feed']['data']
-                if video_data['type'] == 'video']
             return self.playlist_result(videos, info['id'], info['full_name'])
         else:
-            og_video = self._og_search_video_url(webpage, 'player url')
-            query_str = compat_urllib_parse_urlparse(og_video).query
-            query = compat_urlparse.parse_qs(query_str)
-            api_url = query['play_url'][0].replace('.smil', '')
-            info = json.loads(self._download_webpage(
-                api_url, video_id, 'Downloading video info'))
-            return self._extract_video_info(info)
+            if not videos:
+                raise ExtractorError('Cannot find video %s' % video_id)
+            return videos[0]
 
 
 # The original version of Livestream uses a different system
@@ -137,7 +160,7 @@ class LivestreamOriginalIE(InfoExtractor):
         (?P<user>[^/]+)/(?P<type>video|folder)
         (?:\?.*?Id=|/)(?P<id>.*?)(&|$)
         '''
-    _TEST = {
+    _TESTS = [{
         'url': 'http://www.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
         'info_dict': {
             'id': 'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
@@ -148,7 +171,13 @@ class LivestreamOriginalIE(InfoExtractor):
             # rtmp
             'skip_download': True,
         },
-    }
+    }, {
+        'url': 'https://www.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3',
+        'info_dict': {
+            'id': 'a07bf706-d0e4-4e75-a747-b021d84f2fd3',
+        },
+        'playlist_mincount': 4,
+    }]
 
     def _extract_video(self, user, video_id):
         api_url = 'http://x{0}x.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id={1}'.format(user, video_id)
@@ -164,22 +193,27 @@ class LivestreamOriginalIE(InfoExtractor):
             'id': video_id,
             'title': item.find('title').text,
             'url': 'rtmp://extondemand.livestream.com/ondemand',
-            'play_path': 'mp4:trans/dv15/mogulus-{0}.mp4'.format(path),
+            'play_path': 'trans/dv15/mogulus-{0}'.format(path),
+            'player_url': 'http://static.livestream.com/chromelessPlayer/v21/playerapi.swf?hash=5uetk&v=0803&classid=D27CDB6E-AE6D-11cf-96B8-444553540000&jsEnabled=false&wmode=opaque',
             'ext': 'flv',
             'thumbnail': thumbnail_url,
         }
 
     def _extract_folder(self, url, folder_id):
         webpage = self._download_webpage(url, folder_id)
-        urls = orderedSet(re.findall(r'<a href="(https?://livestre\.am/.*?)"', webpage))
+        paths = orderedSet(re.findall(
+            r'''(?x)(?:
+                <li\s+class="folder">\s*<a\s+href="|
+                <a\s+href="(?=https?://livestre\.am/)
+            )([^"]+)"''', webpage))
 
         return {
             '_type': 'playlist',
             'id': folder_id,
             'entries': [{
                 '_type': 'url',
-                'url': video_url,
-            } for video_url in urls],
+                'url': compat_urlparse.urljoin(url, p),
+            } for p in paths],
         }
 
     def _real_extract(self, url):
diff --git a/youtube_dl/extractor/lrt.py b/youtube_dl/extractor/lrt.py
new file mode 100644 (file)
index 0000000..d72d470
--- /dev/null
@@ -0,0 +1,67 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+    determine_ext,
+    js_to_json,
+    parse_duration,
+    remove_end,
+)
+
+
+class LRTIE(InfoExtractor):
+    IE_NAME = 'lrt.lt'
+    _VALID_URL = r'https?://(?:www\.)?lrt\.lt/mediateka/irasas/(?P<id>[0-9]+)'
+    _TEST = {
+        'url': 'http://www.lrt.lt/mediateka/irasas/54391/',
+        'info_dict': {
+            'id': '54391',
+            'ext': 'mp4',
+            'title': 'Septynios Kauno dienos',
+            'description': 'md5:24d84534c7dc76581e59f5689462411a',
+            'duration': 1783,
+        },
+        'params': {
+            'skip_download': True,  # HLS download
+        },
+
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        title = remove_end(self._og_search_title(webpage), ' - LRT')
+        thumbnail = self._og_search_thumbnail(webpage)
+        description = self._og_search_description(webpage)
+        duration = parse_duration(self._search_regex(
+            r"'duration':\s*'([^']+)',", webpage,
+            'duration', fatal=False, default=None))
+
+        formats = []
+        for js in re.findall(r'(?s)config:\s*(\{.*?\})', webpage):
+            data = json.loads(js_to_json(js))
+            if data['provider'] == 'rtmp':
+                formats.append({
+                    'format_id': 'rtmp',
+                    'ext': determine_ext(data['file']),
+                    'url': data['streamer'],
+                    'play_path': 'mp4:%s' % data['file'],
+                    'preference': -1,
+                })
+            else:
+                formats.extend(
+                    self._extract_m3u8_formats(data['file'], video_id, 'mp4'))
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'thumbnail': thumbnail,
+            'description': description,
+            'duration': duration,
+        }
index 33f34f4e9bdda2aa034dd4f46ef3299478f181ec..2160d6cb08ae5b71584ffdc0d76982e2a9bdf0c0 100644 (file)
@@ -45,7 +45,7 @@ class LyndaIE(SubtitlesInfoExtractor):
         video_id = mobj.group(1)
 
         page = self._download_webpage('http://www.lynda.com/ajax/player?videoId=%s&type=video' % video_id, video_id,
-            'Downloading video JSON')
+                                      'Downloading video JSON')
         video_json = json.loads(page)
 
         if 'Status' in video_json:
@@ -109,7 +109,7 @@ class LyndaIE(SubtitlesInfoExtractor):
             'password': password,
             'remember': 'false',
             'stayPut': 'false'
-        }        
+        }
         request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
         login_page = self._download_webpage(request, None, 'Logging in as %s' % username)
 
@@ -117,7 +117,7 @@ class LyndaIE(SubtitlesInfoExtractor):
         m = re.search(r'loginResultJson = \'(?P<json>[^\']+)\';', login_page)
         if m is not None:
             response = m.group('json')
-            response_json = json.loads(response)            
+            response_json = json.loads(response)
             state = response_json['state']
 
             if state == 'notlogged':
@@ -187,7 +187,7 @@ class LyndaCourseIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         course_path = mobj.group('coursepath')
         course_id = mobj.group('courseid')
-        
+
         page = self._download_webpage('http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
                                       course_id, 'Downloading course JSON')
         course_json = json.loads(page)
@@ -221,4 +221,4 @@ class LyndaCourseIE(InfoExtractor):
 
         course_title = course_json['Title']
 
-        return self.playlist_result(entries, course_id, course_title)
\ No newline at end of file
+        return self.playlist_result(entries, course_id, course_title)
index 1a26b5d572852eab85449af0a6ffc2fd7c98aeda..7e025831b51d611f00e248bda637b4ae8f35efb6 100644 (file)
@@ -27,7 +27,7 @@ class M6IE(InfoExtractor):
         video_id = mobj.group('id')
 
         rss = self._download_xml('http://ws.m6.fr/v1/video/info/m6/bonus/%s' % video_id, video_id,
-            'Downloading video RSS')
+                                 'Downloading video RSS')
 
         title = rss.find('./channel/item/title').text
         description = rss.find('./channel/item/description').text
@@ -53,4 +53,4 @@ class M6IE(InfoExtractor):
             'duration': duration,
             'view_count': view_count,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 7460d81cd501b8c52dcce3caae8313f6854b571a..54a14cb94c93dad587a83c58d58ec3d262f0eed8 100644 (file)
@@ -16,7 +16,7 @@ class MailRuIE(InfoExtractor):
             'url': 'http://my.mail.ru/video/top#video=/mail/sonypicturesrus/75/76',
             'md5': 'dea205f03120046894db4ebb6159879a',
             'info_dict': {
-                'id': '46301138',
+                'id': '46301138_76',
                 'ext': 'mp4',
                 'title': 'Новый Человек-Паук. Высокое напряжение. Восстание Электро',
                 'timestamp': 1393232740,
@@ -30,7 +30,7 @@ class MailRuIE(InfoExtractor):
             'url': 'http://my.mail.ru/corp/hitech/video/news_hi-tech_mail_ru/1263.html',
             'md5': '00a91a58c3402204dcced523777b475f',
             'info_dict': {
-                'id': '46843144',
+                'id': '46843144_1263',
                 'ext': 'mp4',
                 'title': 'Samsung Galaxy S5 Hammer Smash Fail Battery Explosion',
                 'timestamp': 1397217632,
@@ -54,33 +54,36 @@ class MailRuIE(InfoExtractor):
 
         author = video_data['author']
         uploader = author['name']
-        uploader_id = author['id']
+        uploader_id = author.get('id') or author.get('email')
+        view_count = video_data.get('views_count')
 
-        movie = video_data['movie']
-        content_id = str(movie['contentId'])
-        title = movie['title']
+        meta_data = video_data['meta']
+        content_id = '%s_%s' % (
+            meta_data.get('accId', ''), meta_data['itemId'])
+        title = meta_data['title']
         if title.endswith('.mp4'):
             title = title[:-4]
-        thumbnail = movie['poster']
-        duration = movie['duration']
-
-        view_count = video_data['views_count']
+        thumbnail = meta_data['poster']
+        duration = meta_data['duration']
+        timestamp = meta_data['timestamp']
 
         formats = [
             {
                 'url': video['url'],
-                'format_id': video['name'],
+                'format_id': video['key'],
+                'height': int(video['key'].rstrip('p'))
             } for video in video_data['videos']
         ]
+        self._sort_formats(formats)
 
         return {
             'id': content_id,
             'title': title,
             'thumbnail': thumbnail,
-            'timestamp': video_data['timestamp'],
+            'timestamp': timestamp,
             'uploader': uploader,
             'uploader_id': uploader_id,
             'duration': duration,
             'view_count': view_count,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 8c1966ab25e5215ab96286822d50507aa25ea58d..1abf6e4f85d52cbe4d257280cf26277e715dbdb1 100644 (file)
@@ -7,6 +7,7 @@ from ..utils import (
     compat_urllib_parse,
 )
 
+
 class MalemotionIE(InfoExtractor):
     _VALID_URL = r'^(?:https?://)?malemotion\.com/video/(.+?)\.(?P<id>.+?)(#|$)'
     _TEST = {
index 1b8c4a32edf5d269b1e9bb9db366487ef2fba981..5fdd19027db3ccad0265601b8d88452a0eaac525 100644 (file)
@@ -7,7 +7,7 @@ from .common import InfoExtractor
 
 class MDRIE(InfoExtractor):
     _VALID_URL = r'^(?P<domain>https?://(?:www\.)?mdr\.de)/(?:.*)/(?P<type>video|audio)(?P<video_id>[^/_]+)(?:_|\.html)'
-    
+
     # No tests, MDR regularily deletes its videos
     _TEST = {
         'url': 'http://www.mdr.de/fakt/video189002.html',
index 6436c05a3cd8e3f25499b9ff911de837a6c98207..858c1c0c31f4c08c3068a62983781129288dc3b8 100644 (file)
@@ -9,6 +9,7 @@ from ..utils import (
     compat_urllib_request,
     determine_ext,
     ExtractorError,
+    int_or_none,
 )
 
 
@@ -21,7 +22,7 @@ class MetacafeIE(InfoExtractor):
         # Youtube video
         {
             'add_ie': ['Youtube'],
-            'url':  'http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/',
+            'url': 'http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/',
             'info_dict': {
                 'id': '_aUehQsCQtM',
                 'ext': 'mp4',
@@ -83,6 +84,21 @@ class MetacafeIE(InfoExtractor):
                 'skip_download': True,
             },
         },
+        # Movieclips.com video
+        {
+            'url': 'http://www.metacafe.com/watch/mv-Wy7ZU/my_week_with_marilyn_do_you_love_me/',
+            'info_dict': {
+                'id': 'mv-Wy7ZU',
+                'ext': 'mp4',
+                'title': 'My Week with Marilyn - Do You Love Me?',
+                'description': 'From the movie My Week with Marilyn - Colin (Eddie Redmayne) professes his love to Marilyn (Michelle Williams) and gets her to promise to return to set and finish the movie.',
+                'uploader': 'movie_trailers',
+                'duration': 176,
+            },
+            'params': {
+                'skip_download': 'requires rtmpdump',
+            }
+        }
     ]
 
     def report_disclaimer(self):
@@ -134,6 +150,7 @@ class MetacafeIE(InfoExtractor):
 
         # Extract URL, uploader and title from webpage
         self.report_extraction(video_id)
+        video_url = None
         mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
         if mobj is not None:
             mediaURL = compat_urllib_parse.unquote(mobj.group(1))
@@ -146,16 +163,17 @@ class MetacafeIE(InfoExtractor):
             else:
                 gdaKey = mobj.group(1)
                 video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
-        else:
+        if video_url is None:
             mobj = re.search(r'<video src="([^"]+)"', webpage)
             if mobj:
                 video_url = mobj.group(1)
                 video_ext = 'mp4'
-            else:
-                mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
-                if mobj is None:
-                    raise ExtractorError('Unable to extract media URL')
-                vardict = compat_parse_qs(mobj.group(1))
+        if video_url is None:
+            flashvars = self._search_regex(
+                r' name="flashvars" value="(.*?)"', webpage, 'flashvars',
+                default=None)
+            if flashvars:
+                vardict = compat_parse_qs(flashvars)
                 if 'mediaData' not in vardict:
                     raise ExtractorError('Unable to extract media URL')
                 mobj = re.search(
@@ -165,26 +183,68 @@ class MetacafeIE(InfoExtractor):
                 mediaURL = mobj.group('mediaURL').replace('\\/', '/')
                 video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key'))
                 video_ext = determine_ext(video_url)
-
-        video_title = self._html_search_regex(r'(?im)<title>(.*) - Video</title>', webpage, 'title')
+        if video_url is None:
+            player_url = self._search_regex(
+                r"swfobject\.embedSWF\('([^']+)'",
+                webpage, 'config URL', default=None)
+            if player_url:
+                config_url = self._search_regex(
+                    r'config=(.+)$', player_url, 'config URL')
+                config_doc = self._download_xml(
+                    config_url, video_id,
+                    note='Downloading video config')
+                smil_url = config_doc.find('.//properties').attrib['smil_file']
+                smil_doc = self._download_xml(
+                    smil_url, video_id,
+                    note='Downloading SMIL document')
+                base_url = smil_doc.find('./head/meta').attrib['base']
+                video_url = []
+                for vn in smil_doc.findall('.//video'):
+                    br = int(vn.attrib['system-bitrate'])
+                    play_path = vn.attrib['src']
+                    video_url.append({
+                        'format_id': 'smil-%d' % br,
+                        'url': base_url,
+                        'play_path': play_path,
+                        'page_url': url,
+                        'player_url': player_url,
+                        'ext': play_path.partition(':')[0],
+                    })
+
+        if video_url is None:
+            raise ExtractorError('Unsupported video type')
+
+        video_title = self._html_search_regex(
+            r'(?im)<title>(.*) - Video</title>', webpage, 'title')
         description = self._og_search_description(webpage)
         thumbnail = self._og_search_thumbnail(webpage)
         video_uploader = self._html_search_regex(
-                r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);',
-                webpage, 'uploader nickname', fatal=False)
-
-        if re.search(r'"contentRating":"restricted"', webpage) is not None:
-            age_limit = 18
+            r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);',
+            webpage, 'uploader nickname', fatal=False)
+        duration = int_or_none(
+            self._html_search_meta('video:duration', webpage))
+
+        age_limit = (
+            18
+            if re.search(r'"contentRating":"restricted"', webpage)
+            else 0)
+
+        if isinstance(video_url, list):
+            formats = video_url
         else:
-            age_limit = 0
+            formats = [{
+                'url': video_url,
+                'ext': video_ext,
+            }]
 
+        self._sort_formats(formats)
         return {
             'id': video_id,
-            'url': video_url,
             'description': description,
             'uploader': video_uploader,
             'title': video_title,
-            'thumbnail':thumbnail,
-            'ext': video_ext,
+            'thumbnail': thumbnail,
             'age_limit': age_limit,
+            'formats': formats,
+            'duration': duration,
         }
index 07f072924a6dadb2838230fd29a6a830ff99bb64..e30320569805aedaa6694ae54f9086909593f7a4 100644 (file)
@@ -28,7 +28,7 @@ class MetacriticIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
         # The xml is not well formatted, there are raw '&'
         info = self._download_xml('http://www.metacritic.com/video_data?video=' + video_id,
-            video_id, 'Downloading info xml', transform_source=fix_xml_ampersands)
+                                  video_id, 'Downloading info xml', transform_source=fix_xml_ampersands)
 
         clip = next(c for c in info.findall('playList/clip') if c.find('id').text == video_id)
         formats = []
@@ -44,7 +44,7 @@ class MetacriticIE(InfoExtractor):
         self._sort_formats(formats)
 
         description = self._html_search_regex(r'<b>Description:</b>(.*?)</p>',
-            webpage, 'description', flags=re.DOTALL)
+                                              webpage, 'description', flags=re.DOTALL)
 
         return {
             'id': video_id,
diff --git a/youtube_dl/extractor/mgoon.py b/youtube_dl/extractor/mgoon.py
new file mode 100644 (file)
index 0000000..94bc87b
--- /dev/null
@@ -0,0 +1,87 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    qualities,
+    unified_strdate,
+)
+
+
+class MgoonIE(InfoExtractor):
+    _VALID_URL = r'''(?x)https?://(?:www\.)?
+    (?:(:?m\.)?mgoon\.com/(?:ch/(?:.+)/v|play/view)|
+        video\.mgoon\.com)/(?P<id>[0-9]+)'''
+    _API_URL = 'http://mpos.mgoon.com/player/video?id={0:}'
+    _TESTS = [
+        {
+            'url': 'http://m.mgoon.com/ch/hi6618/v/5582148',
+            'md5': 'dd46bb66ab35cf6d51cc812fd82da79d',
+            'info_dict': {
+                'id': '5582148',
+                'uploader_id': 'hi6618',
+                'duration': 240.419,
+                'upload_date': '20131220',
+                'ext': 'mp4',
+                'title': 'md5:543aa4c27a4931d371c3f433e8cebebc',
+                'thumbnail': 're:^https?://.*\.jpg$',
+            }
+        },
+        {
+            'url': 'http://www.mgoon.com/play/view/5582148',
+            'only_matching': True,
+        },
+        {
+            'url': 'http://video.mgoon.com/5582148',
+            'only_matching': True,
+        },
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        data = self._download_json(self._API_URL.format(video_id), video_id)
+
+        if data.get('errorInfo', {}).get('code') != 'NONE':
+            raise ExtractorError('%s encountered an error: %s' % (
+                self.IE_NAME, data['errorInfo']['message']), expected=True)
+
+        v_info = data['videoInfo']
+        title = v_info.get('v_title')
+        thumbnail = v_info.get('v_thumbnail')
+        duration = v_info.get('v_duration')
+        upload_date = unified_strdate(v_info.get('v_reg_date'))
+        uploader_id = data.get('userInfo', {}).get('u_alias')
+        if duration:
+            duration /= 1000.0
+
+        age_limit = None
+        if data.get('accessInfo', {}).get('code') == 'VIDEO_STATUS_ADULT':
+            age_limit = 18
+
+        formats = []
+        get_quality = qualities(['360p', '480p', '720p', '1080p'])
+        for fmt in data['videoFiles']:
+            formats.append({
+                'format_id': fmt['label'],
+                'quality': get_quality(fmt['label']),
+                'url': fmt['url'],
+                'ext': fmt['format'],
+
+            })
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'thumbnail': thumbnail,
+            'duration': duration,
+            'upload_date': upload_date,
+            'uploader_id': uploader_id,
+            'age_limit': age_limit,
+        }
diff --git a/youtube_dl/extractor/minhateca.py b/youtube_dl/extractor/minhateca.py
new file mode 100644 (file)
index 0000000..14934b7
--- /dev/null
@@ -0,0 +1,72 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import (
+    compat_urllib_parse,
+    compat_urllib_request,
+)
+from ..utils import (
+    int_or_none,
+    parse_duration,
+    parse_filesize,
+)
+
+
+class MinhatecaIE(InfoExtractor):
+    _VALID_URL = r'https?://minhateca\.com\.br/[^?#]+,(?P<id>[0-9]+)\.'
+    _TEST = {
+        'url': 'http://minhateca.com.br/pereba/misc/youtube-dl+test+video,125848331.mp4(video)',
+        'info_dict': {
+            'id': '125848331',
+            'ext': 'mp4',
+            'title': 'youtube-dl test video',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'filesize_approx': 1530000,
+            'duration': 9,
+            'view_count': int,
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        token = self._html_search_regex(
+            r'<input name="__RequestVerificationToken".*?value="([^"]+)"',
+            webpage, 'request token')
+        token_data = [
+            ('fileId', video_id),
+            ('__RequestVerificationToken', token),
+        ]
+        req = compat_urllib_request.Request(
+            'http://minhateca.com.br/action/License/Download',
+            data=compat_urllib_parse.urlencode(token_data))
+        req.add_header('Content-Type', 'application/x-www-form-urlencoded')
+        data = self._download_json(
+            req, video_id, note='Downloading metadata')
+
+        video_url = data['redirectUrl']
+        title_str = self._html_search_regex(
+            r'<h1.*?>(.*?)</h1>', webpage, 'title')
+        title, _, ext = title_str.rpartition('.')
+        filesize_approx = parse_filesize(self._html_search_regex(
+            r'<p class="fileSize">(.*?)</p>',
+            webpage, 'file size approximation', fatal=False))
+        duration = parse_duration(self._html_search_regex(
+            r'(?s)<p class="fileLeng[ht][th]">.*?class="bold">(.*?)<',
+            webpage, 'duration', fatal=False))
+        view_count = int_or_none(self._html_search_regex(
+            r'<p class="downloadsCounter">([0-9]+)</p>',
+            webpage, 'view count', fatal=False))
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+            'ext': ext,
+            'filesize_approx': filesize_approx,
+            'duration': duration,
+            'view_count': view_count,
+            'thumbnail': self._og_search_thumbnail(webpage),
+        }
diff --git a/youtube_dl/extractor/ministrygrid.py b/youtube_dl/extractor/ministrygrid.py
new file mode 100644 (file)
index 0000000..949ad11
--- /dev/null
@@ -0,0 +1,57 @@
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    smuggle_url,
+)
+
+
+class MinistryGridIE(InfoExtractor):
+    _VALID_URL = r'https?://www\.ministrygrid.com/([^/?#]*/)*(?P<id>[^/#?]+)/?(?:$|[?#])'
+
+    _TEST = {
+        'url': 'http://www.ministrygrid.com/training-viewer/-/training/t4g-2014-conference/the-gospel-by-numbers-4/the-gospel-by-numbers',
+        'md5': '844be0d2a1340422759c2a9101bab017',
+        'info_dict': {
+            'id': '3453494717001',
+            'ext': 'mp4',
+            'title': 'The Gospel by Numbers',
+            'description': 'Coming soon from T4G 2014!',
+            'uploader': 'LifeWay Christian Resources (MG)',
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+        portlets_json = self._search_regex(
+            r'Liferay\.Portlet\.list=(\[.+?\])', webpage, 'portlet list')
+        portlets = json.loads(portlets_json)
+        pl_id = self._search_regex(
+            r'<!--\s*p_l_id - ([0-9]+)<br>', webpage, 'p_l_id')
+
+        for i, portlet in enumerate(portlets):
+            portlet_url = 'http://www.ministrygrid.com/c/portal/render_portlet?p_l_id=%s&p_p_id=%s' % (pl_id, portlet)
+            portlet_code = self._download_webpage(
+                portlet_url, video_id,
+                note='Looking in portlet %s (%d/%d)' % (portlet, i + 1, len(portlets)),
+                fatal=False)
+            video_iframe_url = self._search_regex(
+                r'<iframe.*?src="([^"]+)"', portlet_code, 'video iframe',
+                default=None)
+            if video_iframe_url:
+                surl = smuggle_url(
+                    video_iframe_url, {'force_videoid': video_id})
+                return {
+                    '_type': 'url',
+                    'id': video_id,
+                    'url': surl,
+                }
+
+        raise ExtractorError('Could not find video iframe in any portlets')
diff --git a/youtube_dl/extractor/mitele.py b/youtube_dl/extractor/mitele.py
new file mode 100644 (file)
index 0000000..6691521
--- /dev/null
@@ -0,0 +1,69 @@
+from __future__ import unicode_literals
+
+import re
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+    compat_urlparse,
+    get_element_by_attribute,
+    parse_duration,
+    strip_jsonp,
+)
+
+
+class MiTeleIE(InfoExtractor):
+    IE_NAME = 'mitele.es'
+    _VALID_URL = r'http://www\.mitele\.es/[^/]+/[^/]+/[^/]+/(?P<episode>[^/]+)/'
+
+    _TEST = {
+        'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/',
+        'md5': '6a75fe9d0d3275bead0cb683c616fddb',
+        'info_dict': {
+            'id': '0fce117d',
+            'ext': 'mp4',
+            'title': 'Programa 144 - Tor, la web invisible',
+            'description': 'md5:3b6fce7eaa41b2d97358726378d9369f',
+            'display_id': 'programa-144',
+            'duration': 2913,
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        episode = mobj.group('episode')
+        webpage = self._download_webpage(url, episode)
+        embed_data_json = self._search_regex(
+            r'MSV\.embedData\[.*?\]\s*=\s*({.*?});', webpage, 'embed data',
+            flags=re.DOTALL
+        ).replace('\'', '"')
+        embed_data = json.loads(embed_data_json)
+
+        domain = embed_data['mediaUrl']
+        if not domain.startswith('http'):
+            # only happens in telecinco.es videos
+            domain = 'http://' + domain
+        info_url = compat_urlparse.urljoin(
+            domain,
+            compat_urllib_parse.unquote(embed_data['flashvars']['host'])
+        )
+        info_el = self._download_xml(info_url, episode).find('./video/info')
+
+        video_link = info_el.find('videoUrl/link').text
+        token_query = compat_urllib_parse.urlencode({'id': video_link})
+        token_info = self._download_json(
+            embed_data['flashvars']['ov_tk'] + '?' + token_query,
+            episode,
+            transform_source=strip_jsonp
+        )
+
+        return {
+            'id': embed_data['videoId'],
+            'display_id': episode,
+            'title': info_el.find('title').text,
+            'url': token_info['tokenizedUrl'],
+            'description': get_element_by_attribute('class', 'text', webpage),
+            'thumbnail': info_el.find('thumb').text,
+            'duration': parse_duration(info_el.find('duration').text),
+        }
index 5f64e7bd0d98b74aea2a4350a51f057b4d0280ba..55cc33a3e4b94014f9d4642eff1d1a3d6dc26d05 100644 (file)
@@ -6,6 +6,7 @@ from .common import InfoExtractor
 from ..utils import (
     compat_urllib_parse,
     ExtractorError,
+    HEADRequest,
     int_or_none,
     parse_iso8601,
 )
@@ -32,22 +33,22 @@ class MixcloudIE(InfoExtractor):
         },
     }
 
-    def check_urls(self, url_list):
-        """Returns 1st active url from list"""
-        for url in url_list:
+    def _get_url(self, track_id, template_url):
+        server_count = 30
+        for i in range(server_count):
+            url = template_url % i
             try:
                 # We only want to know if the request succeed
                 # don't download the whole file
-                self._request_webpage(url, None, False)
+                self._request_webpage(
+                    HEADRequest(url), track_id,
+                    'Checking URL %d/%d ...' % (i + 1, server_count + 1))
                 return url
             except ExtractorError:
-                url = None
+                pass
 
         return None
 
-    def _get_url(self, template_url):
-        return self.check_urls(template_url % i for i in range(30))
-
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         uploader = mobj.group(1)
@@ -60,16 +61,16 @@ class MixcloudIE(InfoExtractor):
             r'\s(?:data-preview-url|m-preview)="(.+?)"', webpage, 'preview url')
         song_url = preview_url.replace('/previews/', '/c/originals/')
         template_url = re.sub(r'(stream\d*)', 'stream%d', song_url)
-        final_song_url = self._get_url(template_url)
+        final_song_url = self._get_url(track_id, template_url)
         if final_song_url is None:
             self.to_screen('Trying with m4a extension')
             template_url = template_url.replace('.mp3', '.m4a').replace('originals/', 'm4a/64/')
-            final_song_url = self._get_url(template_url)
+            final_song_url = self._get_url(track_id, template_url)
         if final_song_url is None:
             raise ExtractorError('Unable to extract track url')
 
         PREFIX = (
-            r'<div class="cloudcast-play-button-container"'
+            r'<span class="play-button[^"]*?"'
             r'(?:\s+[a-zA-Z0-9-]+(?:="[^"]+")?)*?\s+')
         title = self._html_search_regex(
             PREFIX + r'm-title="([^"]+)"', webpage, 'title')
index 37c72bc5357e3766819b0f86d5bc379fdf7406c4..1a241aca77983ac9626a53e59bf85ad4394cc8fd 100644 (file)
@@ -6,12 +6,11 @@ from .common import InfoExtractor
 from ..utils import (
     parse_duration,
     parse_iso8601,
-    find_xpath_attr,
 )
 
 
 class MLBIE(InfoExtractor):
-    _VALID_URL = r'https?://m\.mlb\.com/(?:.*?/)?video/(?:topic/[\da-z_-]+/)?v(?P<id>n?\d+)'
+    _VALID_URL = r'https?://m(?:lb)?\.mlb\.com/(?:(?:.*?/)?video/(?:topic/[\da-z_-]+/)?v|(?:shared/video/embed/embed\.html|[^/]+/video/play\.jsp)\?.*?\bcontent_id=)(?P<id>n?\d+)'
     _TESTS = [
         {
             'url': 'http://m.mlb.com/sea/video/topic/51231442/v34698933/nymsea-ackley-robs-a-home-run-with-an-amazing-catch/?c_id=sea',
@@ -69,6 +68,18 @@ class MLBIE(InfoExtractor):
                 'thumbnail': 're:^https?://.*\.jpg$',
             },
         },
+        {
+            'url': 'http://m.mlb.com/shared/video/embed/embed.html?content_id=35692085&topic_id=6479266&width=400&height=224&property=mlb',
+            'only_matching': True,
+        },
+        {
+            'url': 'http://mlb.mlb.com/shared/video/embed/embed.html?content_id=36599553',
+            'only_matching': True,
+        },
+        {
+            'url': 'http://mlb.mlb.com/es/video/play.jsp?content_id=36599553',
+            'only_matching': True,
+        },
     ]
 
     def _real_extract(self, url):
@@ -84,8 +95,9 @@ class MLBIE(InfoExtractor):
         duration = parse_duration(detail.find('./duration').text)
         timestamp = parse_iso8601(detail.attrib['date'][:-5])
 
-        thumbnail = find_xpath_attr(
-            detail, './thumbnailScenarios/thumbnailScenario', 'type', '45').text
+        thumbnails = [{
+            'url': thumbnail.text,
+        } for thumbnail in detail.findall('./thumbnailScenarios/thumbnailScenario')]
 
         formats = []
         for media_url in detail.findall('./url'):
@@ -112,5 +124,5 @@ class MLBIE(InfoExtractor):
             'duration': duration,
             'timestamp': timestamp,
             'formats': formats,
-            'thumbnail': thumbnail,
+            'thumbnails': thumbnails,
         }
diff --git a/youtube_dl/extractor/moevideo.py b/youtube_dl/extractor/moevideo.py
new file mode 100644 (file)
index 0000000..2ff79b9
--- /dev/null
@@ -0,0 +1,112 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    compat_urllib_parse,
+    compat_urllib_request,
+    int_or_none,
+)
+
+
+class MoeVideoIE(InfoExtractor):
+    IE_DESC = 'LetitBit video services: moevideo.net, playreplay.net and videochart.net'
+    _VALID_URL = r'''(?x)
+        https?://(?P<host>(?:www\.)?
+        (?:(?:moevideo|playreplay|videochart)\.net))/
+        (?:video|framevideo)/(?P<id>[0-9]+\.[0-9A-Za-z]+)'''
+    _API_URL = 'http://api.letitbit.net/'
+    _API_KEY = 'tVL0gjqo5'
+    _TESTS = [
+        {
+            'url': 'http://moevideo.net/video/00297.0036103fe3d513ef27915216fd29',
+            'md5': '129f5ae1f6585d0e9bb4f38e774ffb3a',
+            'info_dict': {
+                'id': '00297.0036103fe3d513ef27915216fd29',
+                'ext': 'flv',
+                'title': 'Sink cut out machine',
+                'description': 'md5:f29ff97b663aefa760bf7ca63c8ca8a8',
+                'thumbnail': 're:^https?://.*\.jpg$',
+                'width': 540,
+                'height': 360,
+                'duration': 179,
+                'filesize': 17822500,
+            }
+        },
+        {
+            'url': 'http://playreplay.net/video/77107.7f325710a627383d40540d8e991a',
+            'md5': '74f0a014d5b661f0f0e2361300d1620e',
+            'info_dict': {
+                'id': '77107.7f325710a627383d40540d8e991a',
+                'ext': 'flv',
+                'title': 'Operacion Condor.',
+                'description': 'md5:7e68cb2fcda66833d5081c542491a9a3',
+                'thumbnail': 're:^https?://.*\.jpg$',
+                'width': 480,
+                'height': 296,
+                'duration': 6027,
+                'filesize': 588257923,
+            }
+        },
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(
+            'http://%s/video/%s' % (mobj.group('host'), video_id),
+            video_id, 'Downloading webpage')
+
+        title = self._og_search_title(webpage)
+        thumbnail = self._og_search_thumbnail(webpage)
+        description = self._og_search_description(webpage)
+
+        r = [
+            self._API_KEY,
+            [
+                'preview/flv_link',
+                {
+                    'uid': video_id,
+                },
+            ],
+        ]
+        r_json = json.dumps(r)
+        post = compat_urllib_parse.urlencode({'r': r_json})
+        req = compat_urllib_request.Request(self._API_URL, post)
+        req.add_header('Content-type', 'application/x-www-form-urlencoded')
+
+        response = self._download_json(req, video_id)
+        if response['status'] != 'OK':
+            raise ExtractorError(
+                '%s returned error: %s' % (self.IE_NAME, response['data']),
+                expected=True
+            )
+        item = response['data'][0]
+        video_url = item['link']
+        duration = int_or_none(item['length'])
+        width = int_or_none(item['width'])
+        height = int_or_none(item['height'])
+        filesize = int_or_none(item['convert_size'])
+
+        formats = [{
+            'format_id': 'sd',
+            'http_headers': {'Range': 'bytes=0-'},  # Required to download
+            'url': video_url,
+            'width': width,
+            'height': height,
+            'filesize': filesize,
+        }]
+
+        return {
+            'id': video_id,
+            'title': title,
+            'thumbnail': thumbnail,
+            'description': description,
+            'duration': duration,
+            'formats': formats,
+        }
index b9430b09b749ad8c1dba28e3db794ef435b5aac8..d658647e6ca6d9b7675dd76ea55c58f52887374d 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import os
 import re
 
@@ -8,15 +10,17 @@ from ..utils import (
     compat_urllib_parse,
 )
 
+
 class MofosexIE(InfoExtractor):
-    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>mofosex\.com/videos/(?P<videoid>[0-9]+)/.*?\.html)'
+    _VALID_URL = r'^https?://(?:www\.)?(?P<url>mofosex\.com/videos/(?P<videoid>[0-9]+)/.*?\.html)'
     _TEST = {
-        u'url': u'http://www.mofosex.com/videos/5018/japanese-teen-music-video.html',
-        u'file': u'5018.mp4',
-        u'md5': u'1b2eb47ac33cc75d4a80e3026b613c5a',
-        u'info_dict': {
-            u"title": u"Japanese Teen Music Video",
-            u"age_limit": 18,
+        'url': 'http://www.mofosex.com/videos/5018/japanese-teen-music-video.html',
+        'md5': '1b2eb47ac33cc75d4a80e3026b613c5a',
+        'info_dict': {
+            'id': '5018',
+            'ext': 'mp4',
+            'title': 'Japanese Teen Music Video',
+            'age_limit': 18,
         }
     }
 
@@ -29,8 +33,8 @@ class MofosexIE(InfoExtractor):
         req.add_header('Cookie', 'age_verified=1')
         webpage = self._download_webpage(req, video_id)
 
-        video_title = self._html_search_regex(r'<h1>(.+?)<', webpage, u'title')
-        video_url = compat_urllib_parse.unquote(self._html_search_regex(r'flashvars.video_url = \'([^\']+)', webpage, u'video_url'))
+        video_title = self._html_search_regex(r'<h1>(.+?)<', webpage, 'title')
+        video_url = compat_urllib_parse.unquote(self._html_search_regex(r'flashvars.video_url = \'([^\']+)', webpage, 'video_url'))
         path = compat_urllib_parse_urlparse(video_url).path
         extension = os.path.splitext(path)[1][1:]
         format = path.split('/')[5].split('_')[:2]
diff --git a/youtube_dl/extractor/mojvideo.py b/youtube_dl/extractor/mojvideo.py
new file mode 100644 (file)
index 0000000..0ba435d
--- /dev/null
@@ -0,0 +1,58 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    parse_duration,
+)
+
+
+class MojvideoIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?mojvideo\.com/video-(?P<display_id>[^/]+)/(?P<id>[a-f0-9]+)'
+    _TEST = {
+        'url': 'http://www.mojvideo.com/video-v-avtu-pred-mano-rdecelaska-alfi-nipic/3d1ed4497707730b2906',
+        'md5': 'f7fd662cc8ce2be107b0d4f2c0483ae7',
+        'info_dict': {
+            'id': '3d1ed4497707730b2906',
+            'display_id': 'v-avtu-pred-mano-rdecelaska-alfi-nipic',
+            'ext': 'mp4',
+            'title': 'V avtu pred mano rdečelaska - Alfi Nipič',
+            'thumbnail': 're:^http://.*\.jpg$',
+            'duration': 242,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        display_id = mobj.group('display_id')
+
+        # XML is malformed
+        playerapi = self._download_webpage(
+            'http://www.mojvideo.com/playerapi.php?v=%s&t=1' % video_id, display_id)
+
+        if '<error>true</error>' in playerapi:
+            error_desc = self._html_search_regex(
+                r'<errordesc>([^<]*)</errordesc>', playerapi, 'error description', fatal=False)
+            raise ExtractorError('%s said: %s' % (self.IE_NAME, error_desc), expected=True)
+
+        title = self._html_search_regex(
+            r'<title>([^<]+)</title>', playerapi, 'title')
+        video_url = self._html_search_regex(
+            r'<file>([^<]+)</file>', playerapi, 'video URL')
+        thumbnail = self._html_search_regex(
+            r'<preview>([^<]+)</preview>', playerapi, 'thumbnail', fatal=False)
+        duration = parse_duration(self._html_search_regex(
+            r'<duration>([^<]+)</duration>', playerapi, 'duration', fatal=False))
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'url': video_url,
+            'title': title,
+            'thumbnail': thumbnail,
+            'duration': duration,
+        }
diff --git a/youtube_dl/extractor/moniker.py b/youtube_dl/extractor/moniker.py
new file mode 100644 (file)
index 0000000..1c4f589
--- /dev/null
@@ -0,0 +1,70 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import os.path
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+    compat_urllib_request,
+)
+
+
+class MonikerIE(InfoExtractor):
+    IE_DESC = 'allmyvideos.net and vidspot.net'
+    _VALID_URL = r'https?://(?:www\.)?(?:allmyvideos|vidspot)\.net/(?P<id>[a-zA-Z0-9_-]+)'
+
+    _TESTS = [{
+        'url': 'http://allmyvideos.net/jih3nce3x6wn',
+        'md5': '710883dee1bfc370ecf9fa6a89307c88',
+        'info_dict': {
+            'id': 'jih3nce3x6wn',
+            'ext': 'mp4',
+            'title': 'youtube-dl test video',
+        },
+    }, {
+        'url': 'http://vidspot.net/l2ngsmhs8ci5',
+        'md5': '710883dee1bfc370ecf9fa6a89307c88',
+        'info_dict': {
+            'id': 'l2ngsmhs8ci5',
+            'ext': 'mp4',
+            'title': 'youtube-dl test video',
+        },
+    }, {
+        'url': 'https://www.vidspot.net/l2ngsmhs8ci5',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        orig_webpage = self._download_webpage(url, video_id)
+        fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
+        data = dict(fields)
+
+        post = compat_urllib_parse.urlencode(data)
+        headers = {
+            b'Content-Type': b'application/x-www-form-urlencoded',
+        }
+        req = compat_urllib_request.Request(url, post, headers)
+        webpage = self._download_webpage(
+            req, video_id, note='Downloading video page ...')
+
+        title = os.path.splitext(data['fname'])[0]
+
+        # Could be several links with different quality
+        links = re.findall(r'"file" : "?(.+?)",', webpage)
+        # Assume the links are ordered in quality
+        formats = [{
+            'url': l,
+            'quality': i,
+        } for i, l in enumerate(links)]
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+        }
index 7d21ea18f1bec57a83a49478d43f000b8039041f..34a4bec3a0d1fb91208acce4684a4078188b847d 100644 (file)
@@ -49,7 +49,7 @@ class MooshareIE(InfoExtractor):
         page = self._download_webpage(url, video_id, 'Downloading page')
 
         if re.search(r'>Video Not Found or Deleted<', page) is not None:
-            raise ExtractorError(u'Video %s does not exist' % video_id, expected=True)
+            raise ExtractorError('Video %s does not exist' % video_id, expected=True)
 
         hash_key = self._html_search_regex(r'<input type="hidden" name="hash" value="([^"]+)">', page, 'hash')
         title = self._html_search_regex(r'(?m)<div class="blockTitle">\s*<h2>Watch ([^<]+)</h2>', page, 'title')
@@ -111,4 +111,4 @@ class MooshareIE(InfoExtractor):
             'thumbnail': thumbnail,
             'duration': duration,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 6229b21732b70525b832ab2f3370594736cae8df..97d5da626a7a5d2555ac3107eb89d1a4fd11b510 100644 (file)
@@ -5,20 +5,20 @@ import re
 
 from .common import InfoExtractor
 from ..utils import (
-    int_or_none,
+    str_to_int,
     unified_strdate,
 )
 
 
 class MotherlessIE(InfoExtractor):
-    _VALID_URL = r'http://(?:www\.)?motherless\.com/(?P<id>[A-Z0-9]+)'
+    _VALID_URL = r'http://(?:www\.)?motherless\.com/(?:g/[a-z0-9_]+/)?(?P<id>[A-Z0-9]+)'
     _TESTS = [
         {
             'url': 'http://motherless.com/AC3FFE1',
-            'md5': '5527fef81d2e529215dad3c2d744a7d9',
+            'md5': '310f62e325a9fafe64f68c0bccb6e75f',
             'info_dict': {
                 'id': 'AC3FFE1',
-                'ext': 'flv',
+                'ext': 'mp4',
                 'title': 'Fucked in the ass while playing PS3',
                 'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'],
                 'upload_date': '20100913',
@@ -40,33 +40,51 @@ class MotherlessIE(InfoExtractor):
                 'thumbnail': 're:http://.*\.jpg',
                 'age_limit': 18,
             }
+        },
+        {
+            'url': 'http://motherless.com/g/cosplay/633979F',
+            'md5': '0b2a43f447a49c3e649c93ad1fafa4a0',
+            'info_dict': {
+                'id': '633979F',
+                'ext': 'mp4',
+                'title': 'Turtlette',
+                'categories': ['superheroine heroine  superher'],
+                'upload_date': '20140827',
+                'uploader_id': 'shade0230',
+                'thumbnail': 're:http://.*\.jpg',
+                'age_limit': 18,
+            }
         }
     ]
 
-    def _real_extract(self,url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
-        title = self._html_search_regex(r'id="view-upload-title">\s+([^<]+)<', webpage, 'title')
-        
-        video_url = self._html_search_regex(r'setup\(\{\s+"file".+: "([^"]+)",', webpage, 'video_url')
+        title = self._html_search_regex(
+            r'id="view-upload-title">\s+([^<]+)<', webpage, 'title')
+        video_url = self._html_search_regex(
+            r'setup\(\{\s+"file".+: "([^"]+)",', webpage, 'video URL')
         age_limit = self._rta_search(webpage)
+        view_count = str_to_int(self._html_search_regex(
+            r'<strong>Views</strong>\s+([^<]+)<',
+            webpage, 'view count', fatal=False))
+        like_count = str_to_int(self._html_search_regex(
+            r'<strong>Favorited</strong>\s+([^<]+)<',
+            webpage, 'like count', fatal=False))
 
-        view_count = self._html_search_regex(r'<strong>Views</strong>\s+([^<]+)<', webpage, 'view_count')
-        upload_date = self._html_search_regex(r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload_date')
+        upload_date = self._html_search_regex(
+            r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload date')
         if 'Ago' in upload_date:
             days = int(re.search(r'([0-9]+)', upload_date).group(1))
             upload_date = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y%m%d')
         else:
             upload_date = unified_strdate(upload_date)
 
-        like_count = self._html_search_regex(r'<strong>Favorited</strong>\s+([^<]+)<', webpage, 'like_count')
-
         comment_count = webpage.count('class="media-comment-contents"')
-        uploader_id = self._html_search_regex(r'"thumb-member-username">\s+<a href="/m/([^"]+)"', webpage, 'uploader_id')
+        uploader_id = self._html_search_regex(
+            r'"thumb-member-username">\s+<a href="/m/([^"]+)"',
+            webpage, 'uploader_id')
 
         categories = self._html_search_meta('keywords', webpage)
         if categories:
@@ -79,8 +97,8 @@ class MotherlessIE(InfoExtractor):
             'uploader_id': uploader_id,
             'thumbnail': self._og_search_thumbnail(webpage),
             'categories': categories,
-            'view_count': int_or_none(view_count.replace(',', '')),
-            'like_count': int_or_none(like_count.replace(',', '')),
+            'view_count': view_count,
+            'like_count': like_count,
             'comment_count': comment_count,
             'age_limit': age_limit,
             'url': video_url,
diff --git a/youtube_dl/extractor/movieclips.py b/youtube_dl/extractor/movieclips.py
new file mode 100644 (file)
index 0000000..456807d
--- /dev/null
@@ -0,0 +1,78 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    compat_str,
+    clean_html,
+)
+
+
+class MovieClipsIE(InfoExtractor):
+    _VALID_URL = r'https?://movieclips\.com/(?P<id>[\da-zA-Z]+)(?:-(?P<display_id>[\da-z-]+))?'
+    _TEST = {
+        'url': 'http://movieclips.com/Wy7ZU-my-week-with-marilyn-movie-do-you-love-me/',
+        'info_dict': {
+            'id': 'Wy7ZU',
+            'display_id': 'my-week-with-marilyn-movie-do-you-love-me',
+            'ext': 'mp4',
+            'title': 'My Week with Marilyn - Do You Love Me?',
+            'description': 'md5:e86795bd332fe3cff461e7c8dc542acb',
+            'thumbnail': 're:^https?://.*\.jpg$',
+        },
+        'params': {
+            # rtmp download
+            'skip_download': True,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        display_id = mobj.group('display_id')
+        show_id = display_id or video_id
+
+        config = self._download_xml(
+            'http://config.movieclips.com/player/config/%s' % video_id,
+            show_id, 'Downloading player config')
+
+        if config.find('./country-region').text == 'false':
+            raise ExtractorError(
+                '%s said: %s' % (self.IE_NAME, config.find('./region_alert').text), expected=True)
+
+        properties = config.find('./video/properties')
+        smil_file = properties.attrib['smil_file']
+
+        smil = self._download_xml(smil_file, show_id, 'Downloading SMIL')
+        base_url = smil.find('./head/meta').attrib['base']
+
+        formats = []
+        for video in smil.findall('./body/switch/video'):
+            vbr = int(video.attrib['system-bitrate']) / 1000
+            src = video.attrib['src']
+            formats.append({
+                'url': base_url,
+                'play_path': src,
+                'ext': src.split(':')[0],
+                'vbr': vbr,
+                'format_id': '%dk' % vbr,
+            })
+
+        self._sort_formats(formats)
+
+        title = '%s - %s' % (properties.attrib['clip_movie_title'], properties.attrib['clip_title'])
+        description = clean_html(compat_str(properties.attrib['clip_description']))
+        thumbnail = properties.attrib['image']
+        categories = properties.attrib['clip_categories'].split(',')
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'categories': categories,
+            'formats': formats,
+        }
index 43146180ad2b272104cb2234de3f4482e7c330fb..f130b75c416ad3fe2e8d4ac3221799d1eb4aa1b9 100644 (file)
@@ -27,7 +27,7 @@ class MoviezineIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
         jsplayer = self._download_webpage('http://www.moviezine.se/api/player.js?video=%s' % video_id, video_id, 'Downloading js api player')
 
-        formats =[{
+        formats = [{
             'format_id': 'sd',
             'url': self._html_search_regex(r'file: "(.+?)",', jsplayer, 'file'),
             'quality': 0,
index 4191cf7a0c808034edf37aa07fa8338174a8d7e3..6101063f2ef9695dd86bbabedec134a4387a9332 100644 (file)
@@ -24,4 +24,4 @@ class MovShareIE(NovaMovIE):
             'title': 'dissapeared image',
             'description': 'optical illusion  dissapeared image  magic illusion',
         }
-    }
\ No newline at end of file
+    }
index 387935d4db784641377b72f5be9ec5e7649f5908..88c9501cd4e34492003a1fe67923d1d84a9e2d2b 100644 (file)
@@ -44,7 +44,7 @@ class MporaIE(InfoExtractor):
                     r'_([0-9]+)\.[a-zA-Z0-9]+$', src['src'],
                     False, default=None)
                 vcodec = src['type'].partition('/')[2]
-                
+
                 formats.append({
                     'format_id': encoding_id + '-' + vcodec,
                     'url': src['src'],
index 228b42d2b940d8eadd0fa3d5e61d0836fd19b7b7..b482d6d4dfb09416bc46ef43ac21dff8bf2d5744 100644 (file)
@@ -33,7 +33,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
         m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\..+?/.*)$', rtmp_video_url)
         if not m:
             return rtmp_video_url
-        base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
+        base = 'http://viacommtvstrmfs.fplive.net/'
         return base + m.group('finalid')
 
     def _get_feed_url(self, uri):
@@ -53,23 +53,23 @@ class MTVServicesInfoExtractor(InfoExtractor):
         # Otherwise we get a webpage that would execute some javascript
         req.add_header('Youtubedl-user-agent', 'curl/7')
         webpage = self._download_webpage(req, mtvn_id,
-            'Downloading mobile page')
+                                         'Downloading mobile page')
         metrics_url = unescapeHTML(self._search_regex(r'<a href="(http://metrics.+?)"', webpage, 'url'))
         req = HEADRequest(metrics_url)
         response = self._request_webpage(req, mtvn_id, 'Resolving url')
         url = response.geturl()
         # Transform the url to get the best quality:
         url = re.sub(r'.+pxE=mp4', 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=mp4', url, 1)
-        return [{'url': url,'ext': 'mp4'}]
+        return [{'url': url, 'ext': 'mp4'}]
 
     def _extract_video_formats(self, mdoc, mtvn_id):
         if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4)$', mdoc.find('.//src').text) is not None:
             if mtvn_id is not None and self._MOBILE_TEMPLATE is not None:
                 self.to_screen('The normal version is not available from your '
-                    'country, trying with the mobile version')
+                               'country, trying with the mobile version')
                 return self._extract_mobile_video_formats(mtvn_id)
             raise ExtractorError('This video is not available from your country.',
-                expected=True)
+                                 expected=True)
 
         formats = []
         for rendition in mdoc.findall('.//rendition'):
@@ -98,7 +98,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
             mediagen_url += '&acceptMethods=fms'
 
         mediagen_doc = self._download_xml(mediagen_url, video_id,
-            'Downloading video urls')
+                                          'Downloading video urls')
 
         description_node = itemdoc.find('description')
         if description_node is not None:
@@ -126,7 +126,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
         # This a short id that's used in the webpage urls
         mtvn_id = None
         mtvn_id_node = find_xpath_attr(itemdoc, './/{http://search.yahoo.com/mrss/}category',
-                'scheme', 'urn:mtvn:id')
+                                       'scheme', 'urn:mtvn:id')
         if mtvn_id_node is not None:
             mtvn_id = mtvn_id_node.text
 
@@ -145,7 +145,8 @@ class MTVServicesInfoExtractor(InfoExtractor):
         idoc = self._download_xml(
             feed_url + '?' + data, video_id,
             'Downloading info', transform_source=fix_xml_ampersands)
-        return [self._get_video_info(item) for item in idoc.findall('.//item')]
+        return self.playlist_result(
+            [self._get_video_info(item) for item in idoc.findall('.//item')])
 
     def _real_extract(self, url):
         title = url_basename(url)
@@ -163,7 +164,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
         if mgid is None or ':' not in mgid:
             mgid = self._search_regex(
                 [r'data-mgid="(.*?)"', r'swfobject.embedSWF\(".*?(mgid:.*?)"'],
-                webpage, u'mgid')
+                webpage, 'mgid')
         return self._get_videos_info(mgid)
 
 
@@ -186,7 +187,8 @@ class MTVServicesEmbeddedIE(MTVServicesInfoExtractor):
     def _get_feed_url(self, uri):
         video_id = self._id_from_uri(uri)
         site_id = uri.replace(video_id, '')
-        config_url = 'http://media.mtvnservices.com/pmt/e1/players/{0}/config.xml'.format(site_id)
+        config_url = ('http://media.mtvnservices.com/pmt/e1/players/{0}/'
+                      'context4/context5/config.xml'.format(site_id))
         config_doc = self._download_xml(config_url, video_id)
         feed_node = config_doc.find('.//feed')
         feed_url = feed_node.text.strip().split('?')[0]
@@ -238,15 +240,15 @@ class MTVIE(MTVServicesInfoExtractor):
         uri = mobj.groupdict().get('mgid')
         if uri is None:
             webpage = self._download_webpage(url, video_id)
-    
+
             # Some videos come from Vevo.com
             m_vevo = re.search(r'isVevoVideo = true;.*?vevoVideoId = "(.*?)";',
                                webpage, re.DOTALL)
             if m_vevo:
-                vevo_id = m_vevo.group(1);
+                vevo_id = m_vevo.group(1)
                 self.to_screen('Vevo video detected: %s' % vevo_id)
                 return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
-    
+
             uri = self._html_search_regex(r'/uri/(.*?)\?', webpage, 'uri')
         return self._get_videos_info(uri)
 
diff --git a/youtube_dl/extractor/muenchentv.py b/youtube_dl/extractor/muenchentv.py
new file mode 100644 (file)
index 0000000..b4e8ad1
--- /dev/null
@@ -0,0 +1,75 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+    determine_ext,
+    int_or_none,
+    js_to_json,
+)
+
+
+class MuenchenTVIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?muenchen\.tv/livestream'
+    IE_DESC = 'münchen.tv'
+    _TEST = {
+        'url': 'http://www.muenchen.tv/livestream/',
+        'info_dict': {
+            'id': '5334',
+            'display_id': 'live',
+            'ext': 'mp4',
+            'title': 're:^münchen.tv-Livestream [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
+            'is_live': True,
+            'thumbnail': 're:^https?://.*\.jpg$'
+        },
+        'params': {
+            'skip_download': True,
+        }
+    }
+
+    def _real_extract(self, url):
+        display_id = 'live'
+        webpage = self._download_webpage(url, display_id)
+
+        title = self._live_title(self._og_search_title(webpage))
+
+        data_js = self._search_regex(
+            r'(?s)\nplaylist:\s*(\[.*?}\]),related:',
+            webpage, 'playlist configuration')
+        data_json = js_to_json(data_js)
+        data = json.loads(data_json)[0]
+
+        video_id = data['mediaid']
+        thumbnail = data.get('image')
+
+        formats = []
+        for format_num, s in enumerate(data['sources']):
+            ext = determine_ext(s['file'], None)
+            label_str = s.get('label')
+            if label_str is None:
+                label_str = '_%d' % format_num
+
+            if ext is None:
+                format_id = label_str
+            else:
+                format_id = '%s-%s' % (ext, label_str)
+
+            formats.append({
+                'url': s['file'],
+                'tbr': int_or_none(s.get('label')),
+                'ext': 'mp4',
+                'format_id': format_id,
+                'preference': -100 if '.smil' in s['file'] else 0,
+            })
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'title': title,
+            'formats': formats,
+            'is_live': True,
+            'thumbnail': thumbnail,
+        }
index 42d7a82a5dcc50fe0f84c48cb96fdf4b4a794c37..50d92b50ae5ec2fa49e45cc64aea7f08cc21ccea 100644 (file)
@@ -72,4 +72,4 @@ class MusicPlayOnIE(InfoExtractor):
             'duration': int_or_none(duration),
             'view_count': int_or_none(view_count),
             'formats': formats,
-        }
\ No newline at end of file
+        }
diff --git a/youtube_dl/extractor/musicvault.py b/youtube_dl/extractor/musicvault.py
new file mode 100644 (file)
index 0000000..ebb1eb8
--- /dev/null
@@ -0,0 +1,76 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_duration,
+    unified_strdate,
+)
+
+
+class MusicVaultIE(InfoExtractor):
+    _VALID_URL = r'https?://www\.musicvault\.com/(?P<uploader_id>[^/?#]*)/video/(?P<display_id>[^/?#]*)_(?P<id>[0-9]+)\.html'
+    _TEST = {
+        'url': 'http://www.musicvault.com/the-allman-brothers-band/video/straight-from-the-heart_1010863.html',
+        'md5': '2cdbb3ae75f7fb3519821507d2fb3c15',
+        'info_dict': {
+            'id': '1010863',
+            'ext': 'mp4',
+            'uploader_id': 'the-allman-brothers-band',
+            'title': 'Straight from the Heart',
+            'duration': 244,
+            'uploader': 'The Allman Brothers Band',
+            'thumbnail': 're:^https?://.*/thumbnail/.*',
+            'upload_date': '19811216',
+            'location': 'Capitol Theatre (Passaic, NJ)',
+            'description': 'Listen to The Allman Brothers Band perform Straight from the Heart at Capitol Theatre (Passaic, NJ) on Dec 16, 1981',
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        display_id = mobj.group('display_id')
+        webpage = self._download_webpage(url, display_id)
+
+        thumbnail = self._search_regex(
+            r'<meta itemprop="thumbnail" content="([^"]+)"',
+            webpage, 'thumbnail', fatal=False)
+
+        data_div = self._search_regex(
+            r'(?s)<div class="data">(.*?)</div>', webpage, 'data fields')
+        uploader = self._html_search_regex(
+            r'<h1.*?>(.*?)</h1>', data_div, 'uploader', fatal=False)
+        title = self._html_search_regex(
+            r'<h2.*?>(.*?)</h2>', data_div, 'title')
+        upload_date = unified_strdate(self._html_search_regex(
+            r'<h3.*?>(.*?)</h3>', data_div, 'uploader', fatal=False))
+        location = self._html_search_regex(
+            r'<h4.*?>(.*?)</h4>', data_div, 'location', fatal=False)
+
+        duration = parse_duration(self._html_search_meta('duration', webpage))
+
+        VIDEO_URL_TEMPLATE = 'http://cdnapi.kaltura.com/p/%(uid)s/sp/%(wid)s/playManifest/entryId/%(entry_id)s/format/url/protocol/http'
+        kaltura_id = self._search_regex(
+            r'<div id="video-detail-player" data-kaltura-id="([^"]+)"',
+            webpage, 'kaltura ID')
+        video_url = VIDEO_URL_TEMPLATE % {
+            'entry_id': kaltura_id,
+            'wid': self._search_regex(r'/wid/_([0-9]+)/', webpage, 'wid'),
+            'uid': self._search_regex(r'uiconf_id/([0-9]+)/', webpage, 'uid'),
+        }
+
+        return {
+            'id': mobj.group('id'),
+            'url': video_url,
+            'ext': 'mp4',
+            'display_id': display_id,
+            'uploader_id': mobj.group('uploader_id'),
+            'thumbnail': thumbnail,
+            'description': self._html_search_meta('description', webpage),
+            'upload_date': upload_date,
+            'location': location,
+            'title': title,
+            'uploader': uploader,
+            'duration': duration,
+        }
index 1772b7f9ae43c2eaef57a15a5b3df5d9e7244213..1e9cf8de9174e086dd7c19525a7dc94025075683 100644 (file)
@@ -1,64 +1,65 @@
-import re
-import json
+from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
-    determine_ext,
 )
 
 
 class MuzuTVIE(InfoExtractor):
     _VALID_URL = r'https?://www\.muzu\.tv/(.+?)/(.+?)/(?P<id>\d+)'
-    IE_NAME = u'muzu.tv'
+    IE_NAME = 'muzu.tv'
 
     _TEST = {
-        u'url': u'http://www.muzu.tv/defected/marcashken-featuring-sos-cat-walk-original-mix-music-video/1981454/',
-        u'file': u'1981454.mp4',
-        u'md5': u'98f8b2c7bc50578d6a0364fff2bfb000',
-        u'info_dict': {
-            u'title': u'Cat Walk (Original Mix)',
-            u'description': u'md5:90e868994de201b2570e4e5854e19420',
-            u'uploader': u'MarcAshken featuring SOS',
+        'url': 'http://www.muzu.tv/defected/marcashken-featuring-sos-cat-walk-original-mix-music-video/1981454/',
+        'md5': '98f8b2c7bc50578d6a0364fff2bfb000',
+        'info_dict': {
+            'id': '1981454',
+            'ext': 'mp4',
+            'title': 'Cat Walk (Original Mix)',
+            'description': 'md5:90e868994de201b2570e4e5854e19420',
+            'uploader': 'MarcAshken featuring SOS',
         },
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
-        info_data = compat_urllib_parse.urlencode({'format': 'json',
-                                                   'url': url,
-                                                   })
-        video_info_page = self._download_webpage('http://www.muzu.tv/api/oembed/?%s' % info_data,
-                                                 video_id, u'Downloading video info')
-        info = json.loads(video_info_page)
+        info_data = compat_urllib_parse.urlencode({
+            'format': 'json',
+            'url': url,
+        })
+        info = self._download_json(
+            'http://www.muzu.tv/api/oembed/?%s' % info_data,
+            video_id, 'Downloading video info')
 
-        player_info_page = self._download_webpage('http://player.muzu.tv/player/playerInit?ai=%s' % video_id,
-                                                  video_id, u'Downloading player info')
-        video_info = json.loads(player_info_page)['videos'][0]
-        for quality in ['1080' , '720', '480', '360']:
+        player_info = self._download_json(
+            'http://player.muzu.tv/player/playerInit?ai=%s' % video_id,
+            video_id, 'Downloading player info')
+        video_info = player_info['videos'][0]
+        for quality in ['1080', '720', '480', '360']:
             if video_info.get('v%s' % quality):
                 break
 
-        data = compat_urllib_parse.urlencode({'ai': video_id,
-                                              # Even if each time you watch a video the hash changes,
-                                              # it seems to work for different videos, and it will work
-                                              # even if you use any non empty string as a hash
-                                              'viewhash': 'VBNff6djeV4HV5TRPW5kOHub2k',
-                                              'device': 'web',
-                                              'qv': quality,
-                                              })
-        video_url_page = self._download_webpage('http://player.muzu.tv/player/requestVideo?%s' % data,
-                                                video_id, u'Downloading video url')
-        video_url_info = json.loads(video_url_page)
+        data = compat_urllib_parse.urlencode({
+            'ai': video_id,
+            # Even if each time you watch a video the hash changes,
+            # it seems to work for different videos, and it will work
+            # even if you use any non empty string as a hash
+            'viewhash': 'VBNff6djeV4HV5TRPW5kOHub2k',
+            'device': 'web',
+            'qv': quality,
+        })
+        video_url_info = self._download_json(
+            'http://player.muzu.tv/player/requestVideo?%s' % data,
+            video_id, 'Downloading video url')
         video_url = video_url_info['url']
 
-        return {'id': video_id,
-                'title': info['title'],
-                'url': video_url,
-                'ext': determine_ext(video_url),
-                'thumbnail': info['thumbnail_url'],
-                'description': info['description'],
-                'uploader': info['author_name'],
-                }
+        return {
+            'id': video_id,
+            'title': info['title'],
+            'url': video_url,
+            'thumbnail': info['thumbnail_url'],
+            'description': info['description'],
+            'uploader': info['author_name'],
+        }
index c16939f5437cf55b6d3e51e9d51ab45d1d12392f..83414a2325586d7319c06247fa037c42bb2b199a 100644 (file)
@@ -1,12 +1,14 @@
+# encoding: utf-8
 from __future__ import unicode_literals
 
 import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_str,
 )
+from ..utils import ExtractorError
 
 
 class MySpaceIE(InfoExtractor):
@@ -14,33 +16,58 @@ class MySpaceIE(InfoExtractor):
 
     _TESTS = [
         {
-            'url': 'https://myspace.com/coldplay/video/viva-la-vida/100008689',
+            'url': 'https://myspace.com/fiveminutestothestage/video/little-big-town/109594919',
             'info_dict': {
-                'id': '100008689',
+                'id': '109594919',
                 'ext': 'flv',
-                'title': 'Viva La Vida',
-                'description': 'The official Viva La Vida video, directed by Hype Williams',
-                'uploader': 'Coldplay',
-                'uploader_id': 'coldplay',
+                'title': 'Little Big Town',
+                'description': 'This country quartet was all smiles while playing a sold out show at the Pacific Amphitheatre in Orange County, California.',
+                'uploader': 'Five Minutes to the Stage',
+                'uploader_id': 'fiveminutestothestage',
             },
             'params': {
                 # rtmp download
                 'skip_download': True,
             },
         },
-        # song
+        # songs
         {
-            'url': 'https://myspace.com/spiderbags/music/song/darkness-in-my-heart-39008454-27041242',
+            'url': 'https://myspace.com/killsorrow/music/song/of-weakened-soul...-93388656-103880681',
             'info_dict': {
-                'id': '39008454',
+                'id': '93388656',
                 'ext': 'flv',
-                'title': 'Darkness In My Heart',
-                'uploader_id': 'spiderbags',
+                'title': 'Of weakened soul...',
+                'uploader': 'Killsorrow',
+                'uploader_id': 'killsorrow',
             },
             'params': {
                 # rtmp download
                 'skip_download': True,
             },
+        }, {
+            'add_ie': ['Vevo'],
+            'url': 'https://myspace.com/threedaysgrace/music/song/animal-i-have-become-28400208-28218041',
+            'info_dict': {
+                'id': 'USZM20600099',
+                'ext': 'mp4',
+                'title': 'Animal I Have Become',
+                'uploader': 'Three Days Grace',
+                'timestamp': int,
+                'upload_date': '20060502',
+            },
+            'skip': 'VEVO is only available in some countries',
+        }, {
+            'add_ie': ['Youtube'],
+            'url': 'https://myspace.com/starset2/music/song/first-light-95799905-106964426',
+            'info_dict': {
+                'id': 'ypWvQgnJrSU',
+                'ext': 'mp4',
+                'title': 'Starset - First Light',
+                'description': 'md5:2d5db6c9d11d527683bcda818d332414',
+                'uploader': 'Jacob Soren',
+                'uploader_id': 'SorenPromotions',
+                'upload_date': '20140725',
+            }
         },
     ]
 
@@ -48,22 +75,47 @@ class MySpaceIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
         webpage = self._download_webpage(url, video_id)
+        player_url = self._search_regex(
+            r'playerSwf":"([^"?]*)', webpage, 'player URL')
 
         if mobj.group('mediatype').startswith('music/song'):
             # songs don't store any useful info in the 'context' variable
+            song_data = self._search_regex(
+                r'''<button.*data-song-id=(["\'])%s\1.*''' % video_id,
+                webpage, 'song_data', default=None, group=0)
+            if song_data is None:
+                # some songs in an album are not playable
+                self.report_warning(
+                    '%s: No downloadable song on this page' % video_id)
+                return
+
             def search_data(name):
-                return self._search_regex(r'data-%s="(.*?)"' % name, webpage,
-                    name)
+                return self._search_regex(
+                    r'''data-%s=([\'"])(?P<data>.*?)\1''' % name,
+                    song_data, name, default='', group='data')
             streamUrl = search_data('stream-url')
+            if not streamUrl:
+                vevo_id = search_data('vevo-id')
+                youtube_id = search_data('youtube-id')
+                if vevo_id:
+                    self.to_screen('Vevo video detected: %s' % vevo_id)
+                    return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
+                elif youtube_id:
+                    self.to_screen('Youtube video detected: %s' % youtube_id)
+                    return self.url_result(youtube_id, ie='Youtube')
+                else:
+                    raise ExtractorError(
+                        'Found song but don\'t know how to download it')
             info = {
                 'id': video_id,
                 'title': self._og_search_title(webpage),
+                'uploader': search_data('artist-name'),
                 'uploader_id': search_data('artist-username'),
                 'thumbnail': self._og_search_thumbnail(webpage),
             }
         else:
-            context = json.loads(self._search_regex(r'context = ({.*?});', webpage,
-                u'context'))
+            context = json.loads(self._search_regex(
+                r'context = ({.*?});', webpage, 'context'))
             video = context['video']
             streamUrl = video['streamUrl']
             info = {
@@ -79,6 +131,50 @@ class MySpaceIE(InfoExtractor):
         info.update({
             'url': rtmp_url,
             'play_path': play_path,
+            'player_url': player_url,
             'ext': 'flv',
         })
         return info
+
+
+class MySpaceAlbumIE(InfoExtractor):
+    IE_NAME = 'MySpace:album'
+    _VALID_URL = r'https?://myspace\.com/([^/]+)/music/album/(?P<title>.*-)(?P<id>\d+)'
+
+    _TESTS = [{
+        'url': 'https://myspace.com/starset2/music/album/transmissions-19455773',
+        'info_dict': {
+            'title': 'Transmissions',
+            'id': '19455773',
+        },
+        'playlist_count': 14,
+        'skip': 'this album is only available in some countries',
+    }, {
+        'url': 'https://myspace.com/killsorrow/music/album/the-demo-18596029',
+        'info_dict': {
+            'title': 'The Demo',
+            'id': '18596029',
+        },
+        'playlist_count': 5,
+    }]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        playlist_id = mobj.group('id')
+        display_id = mobj.group('title') + playlist_id
+        webpage = self._download_webpage(url, display_id)
+        tracks_paths = re.findall(r'"music:song" content="(.*?)"', webpage)
+        if not tracks_paths:
+            raise ExtractorError(
+                '%s: No songs found, try using proxy' % display_id,
+                expected=True)
+        entries = [
+            self.url_result(t_path, ie=MySpaceIE.ie_key())
+            for t_path in tracks_paths]
+        return {
+            '_type': 'playlist',
+            'id': playlist_id,
+            'display_id': display_id,
+            'title': self._og_search_title(webpage),
+            'entries': entries,
+        }
index 4fa0575f8a282aa6f8f561a7f18bc0129fceea8c..51e540814be209856a9a71f891e55eeb4ba559c2 100644 (file)
@@ -13,9 +13,10 @@ class MySpassIE(InfoExtractor):
     _VALID_URL = r'http://www\.myspass\.de/.*'
     _TEST = {
         'url': 'http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/',
-        'file': '11741.mp4',
         'md5': '0b49f4844a068f8b33f4b7c88405862b',
         'info_dict': {
+            'id': '11741',
+            'ext': 'mp4',
             "description": "Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?",
             "title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2",
         },
index ccb5959c4046e73f1263ecee19e829b418c6d506..5e754fcffb6403cbd359b9b358ad3879ef279f53 100644 (file)
@@ -7,11 +7,12 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_ord,
     compat_urllib_parse,
     compat_urllib_request,
-
+)
+from ..utils import (
     ExtractorError,
 )
 
@@ -32,7 +33,7 @@ class MyVideoIE(InfoExtractor):
     # Original Code from: https://github.com/dersphere/plugin.video.myvideo_de.git
     # Released into the Public Domain by Tristan Fischer on 2013-05-19
     # https://github.com/rg3/youtube-dl/pull/842
-    def __rc4crypt(self,data, key):
+    def __rc4crypt(self, data, key):
         x = 0
         box = list(range(256))
         for i in list(range(256)):
@@ -48,17 +49,17 @@ class MyVideoIE(InfoExtractor):
             out += chr(compat_ord(char) ^ box[(box[x] + box[y]) % 256])
         return out
 
-    def __md5(self,s):
+    def __md5(self, s):
         return hashlib.md5(s).hexdigest().encode()
 
-    def _real_extract(self,url):
+    def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
 
         GK = (
-          b'WXpnME1EZGhNRGhpTTJNM01XVmhOREU0WldNNVpHTTJOakpt'
-          b'TW1FMU5tVTBNR05pWkRaa05XRXhNVFJoWVRVd1ptSXhaVEV3'
-          b'TnpsbA0KTVRkbU1tSTRNdz09'
+            b'WXpnME1EZGhNRGhpTTJNM01XVmhOREU0WldNNVpHTTJOakpt'
+            b'TW1FMU5tVTBNR05pWkRaa05XRXhNVFJoWVRVd1ptSXhaVEV3'
+            b'TnpsbA0KTVRkbU1tSTRNdz09'
         )
 
         # Get video webpage
@@ -71,7 +72,7 @@ class MyVideoIE(InfoExtractor):
             video_url = mobj.group(1) + '.flv'
 
             video_title = self._html_search_regex('<title>([^<]+)</title>',
-                webpage, 'title')
+                                                  webpage, 'title')
 
             return {
                 'id': video_id,
@@ -161,7 +162,7 @@ class MyVideoIE(InfoExtractor):
         video_swfobj = compat_urllib_parse.unquote(video_swfobj)
 
         video_title = self._html_search_regex("<h1(?: class='globalHd')?>(.*?)</h1>",
-            webpage, 'title')
+                                              webpage, 'title')
 
         return {
             'id': video_id,
@@ -172,4 +173,3 @@ class MyVideoIE(InfoExtractor):
             'play_path': video_playpath,
             'player_url': video_swfobj,
         }
-
diff --git a/youtube_dl/extractor/myvidster.py b/youtube_dl/extractor/myvidster.py
new file mode 100644 (file)
index 0000000..a94ab83
--- /dev/null
@@ -0,0 +1,29 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class MyVidsterIE(InfoExtractor):
+    _VALID_URL = r'http://(?:www\.)?myvidster\.com/video/(?P<id>\d+)/'
+
+    _TEST = {
+        'url': 'http://www.myvidster.com/video/32059805/Hot_chemistry_with_raw_love_making',
+        'md5': '95296d0231c1363222c3441af62dc4ca',
+        'info_dict': {
+            'id': '3685814',
+            'title': 'md5:7d8427d6d02c4fbcef50fe269980c749',
+            'upload_date': '20141027',
+            'uploader_id': 'utkualp',
+            'ext': 'mp4',
+            'age_limit': 18,
+        },
+        'add_ie': ['XHamster'],
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        return self.url_result(self._html_search_regex(
+            r'rel="videolink" href="(?P<real_url>.*)">',
+            webpage, 'real video url'))
index c0231c197b12b86c669e9cff4b34a5c2ac1639bf..fbe34defd868694d44f4371825322a41b39019a3 100644 (file)
@@ -7,6 +7,7 @@ from .common import InfoExtractor
 from ..utils import (
     compat_urllib_parse,
     ExtractorError,
+    clean_html,
 )
 
 
@@ -29,12 +30,17 @@ class NaverIE(InfoExtractor):
         video_id = mobj.group(1)
         webpage = self._download_webpage(url, video_id)
         m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"',
-            webpage)
+                         webpage)
         if m_id is None:
+            m_error = re.search(
+                r'(?s)<div class="nation_error">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>',
+                webpage)
+            if m_error:
+                raise ExtractorError(clean_html(m_error.group('msg')), expected=True)
             raise ExtractorError('couldn\'t extract vid and key')
         vid = m_id.group(1)
         key = m_id.group(2)
-        query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key,})
+        query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key, })
         query_urls = compat_urllib_parse.urlencode({
             'masterVid': vid,
             'protocol': 'p2p',
@@ -59,7 +65,7 @@ class NaverIE(InfoExtractor):
             if domain.startswith('rtmp'):
                 f.update({
                     'ext': 'flv',
-                    'rtmp_protocol': '1', # rtmpt
+                    'rtmp_protocol': '1',  # rtmpt
                 })
             formats.append(f)
         self._sort_formats(formats)
index 633b42f728489c6e9f9c61a98b8b0b4d38e57be1..862b706bf96719aa071f1f89c73f2a4ef45a20b1 100644 (file)
@@ -1,39 +1,47 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
+from ..utils import (
+    remove_end,
+    parse_duration,
+)
 
 
 class NBAIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?:nba/)?video(?P<id>/[^?]*?)(?:/index\.html)?(?:\?.*)?$'
-    _TEST = {
+    _VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?:nba/)?video(?P<id>/[^?]*?)/?(?:/index\.html)?(?:\?.*)?$'
+    _TESTS = [{
         'url': 'http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html',
-        'md5': u'c0edcfc37607344e2ff8f13c378c88a4',
+        'md5': 'c0edcfc37607344e2ff8f13c378c88a4',
         'info_dict': {
             'id': '0021200253-okc-bkn-recap.nba',
             'ext': 'mp4',
-            'description': 'Kevin Durant scores 32 points and dishes out six assists as the Thunder beat the Nets in Brooklyn.',
             'title': 'Thunder vs. Nets',
+            'description': 'Kevin Durant scores 32 points and dishes out six assists as the Thunder beat the Nets in Brooklyn.',
+            'duration': 181,
         },
-    }
+    }, {
+        'url': 'http://www.nba.com/video/games/hornets/2014/12/05/0021400276-nyk-cha-play5.nba/',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         video_url = 'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4'
 
         shortened_video_id = video_id.rpartition('/')[2]
-        title = self._og_search_title(webpage, default=shortened_video_id).replace('NBA.com: ', '')
+        title = remove_end(
+            self._og_search_title(webpage, default=shortened_video_id), ' : NBA.com')
 
-        description = self._html_search_regex(r'<meta name="description" (?:content|value)="(.*?)" />', webpage, 'description', fatal=False)
+        description = self._og_search_description(webpage)
+        duration = parse_duration(
+            self._html_search_meta('duration', webpage, 'duration'))
 
         return {
             'id': shortened_video_id,
             'url': video_url,
             'title': title,
             'description': description,
+            'duration': duration,
         }
index d2e4acbada5b99c5c3eac4fe6b966ce77dab1ef9..bf5132721ecfe03d5ced4e637c512fe23ff6791c 100644 (file)
@@ -12,24 +12,37 @@ from ..utils import (
 
 
 class NBCIE(InfoExtractor):
-    _VALID_URL = r'http://www\.nbc\.com/[^/]+/video/[^/]+/(?P<id>n?\d+)'
-
-    _TEST = {
-        'url': 'http://www.nbc.com/chicago-fire/video/i-am-a-firefighter/2734188',
-        'md5': '54d0fbc33e0b853a65d7b4de5c06d64e',
-        'info_dict': {
-            'id': 'u1RInQZRN7QJ',
-            'ext': 'flv',
-            'title': 'I Am a Firefighter',
-            'description': 'An emergency puts Dawson\'sf irefighter skills to the ultimate test in this four-part digital series.',
+    _VALID_URL = r'http://www\.nbc\.com/(?:[^/]+/)+(?P<id>n?\d+)'
+
+    _TESTS = [
+        {
+            'url': 'http://www.nbc.com/chicago-fire/video/i-am-a-firefighter/2734188',
+            # md5 checksum is not stable
+            'info_dict': {
+                'id': 'bTmnLCvIbaaH',
+                'ext': 'flv',
+                'title': 'I Am a Firefighter',
+                'description': 'An emergency puts Dawson\'sf irefighter skills to the ultimate test in this four-part digital series.',
+            },
+        },
+        {
+            'url': 'http://www.nbc.com/the-tonight-show/episodes/176',
+            'info_dict': {
+                'id': 'XwU9KZkp98TH',
+                'ext': 'flv',
+                'title': 'Ricky Gervais, Steven Van Zandt, ILoveMakonnen',
+                'description': 'A brand new episode of The Tonight Show welcomes Ricky Gervais, Steven Van Zandt and ILoveMakonnen.',
+            },
+            'skip': 'Only works from US',
         },
-    }
+    ]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
-        theplatform_url = self._search_regex('class="video-player video-player-full" data-mpx-url="(.*?)"', webpage, 'theplatform url')
+        theplatform_url = self._search_regex(
+            '(?:class="video-player video-player-full" data-mpx-url|class="player" src)="(.*?)"',
+            webpage, 'theplatform url').replace('_no_endcard', '')
         if theplatform_url.startswith('//'):
             theplatform_url = 'http:' + theplatform_url
         return self.url_result(theplatform_url)
@@ -57,7 +70,7 @@ class NBCNewsIE(InfoExtractor):
             'md5': 'b2421750c9f260783721d898f4c42063',
             'info_dict': {
                 'id': 'I1wpAI_zmhsQ',
-                'ext': 'flv',
+                'ext': 'mp4',
                 'title': 'How Twitter Reacted To The Snowden Interview',
                 'description': 'md5:65a0bd5d76fe114f3c2727aa3a81fe64',
             },
@@ -97,6 +110,8 @@ class NBCNewsIE(InfoExtractor):
             ]
 
             for base_url in base_urls:
+                if not base_url:
+                    continue
                 playlist_url = base_url + '?form=MPXNBCNewsAPI'
                 all_videos = self._download_json(playlist_url, title)['videos']
 
index 94d5ba98289529ee3148fe1460fe67ca463cc6ab..f49c666909a270ad18e36a2d1177ef681adc3121 100644 (file)
@@ -18,16 +18,16 @@ class NDRIE(InfoExtractor):
 
     _TESTS = [
         {
-            'url': 'http://www.ndr.de/fernsehen/media/dienordreportage325.html',
-            'md5': '4a4eeafd17c3058b65f0c8f091355855',
+            'url': 'http://www.ndr.de/fernsehen/sendungen/nordmagazin/Kartoffeltage-in-der-Lewitz,nordmagazin25866.html',
+            'md5': '5bc5f5b92c82c0f8b26cddca34f8bb2c',
             'note': 'Video file',
             'info_dict': {
-                'id': '325',
+                'id': '25866',
                 'ext': 'mp4',
-                'title': 'Blaue Bohnen aus Blocken',
-                'description': 'md5:190d71ba2ccddc805ed01547718963bc',
-                'duration': 1715,
-            },
+                'title': 'Kartoffeltage in der Lewitz',
+                'description': 'md5:48c4c04dde604c8a9971b3d4e3b9eaa8',
+                'duration': 166,
+            }
         },
         {
             'url': 'http://www.ndr.de/info/audio51535.html',
@@ -67,7 +67,7 @@ class NDRIE(InfoExtractor):
 
         thumbnail = None
 
-        video_url = re.search(r'''3: \{src:'(?P<video>.+?)\.hi\.mp4', type:"video/mp4"},''', page)
+        video_url = re.search(r'''3: \{src:'(?P<video>.+?)\.(lo|hi|hq)\.mp4', type:"video/mp4"},''', page)
         if video_url:
             thumbnails = re.findall(r'''\d+: \{src: "([^"]+)"(?: \|\| '[^']+')?, quality: '([^']+)'}''', page)
             if thumbnails:
@@ -91,4 +91,4 @@ class NDRIE(InfoExtractor):
             'thumbnail': thumbnail,
             'duration': duration,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 2e72e8915aab601b6916fd18e4f64090a613986e..cd117b04edeff88d90842f2ed8e15a8c43bde714 100644 (file)
@@ -23,12 +23,12 @@ class NewgroundsIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         music_id = mobj.group('id')
         webpage = self._download_webpage(url, music_id)
-        
+
         title = self._html_search_regex(
             r',"name":"([^"]+)",', webpage, 'music title')
         uploader = self._html_search_regex(
             r',"artist":"([^"]+)",', webpage, 'music uploader')
-        
+
         music_url_json_string = self._html_search_regex(
             r'({"url":"[^"]+"),', webpage, 'music url') + '}'
         music_url_json = json.loads(music_url_json_string)
index 551bd4d7a511c51f3d5809cf488d7e454b6ed6b5..85fcad06b51dc9ce87bdd563043c92a126cc8eea 100644 (file)
@@ -89,4 +89,4 @@ class NewstubeIE(InfoExtractor):
             'thumbnail': thumbnail,
             'duration': duration,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index ba7b77a467e8e7ba969fb9d4555e47fad6ed7196..7ce1d481d0a73218598bf24fee964ca8ec65956d 100644 (file)
@@ -38,12 +38,12 @@ class NFBIE(InfoExtractor):
         page = self._download_webpage('https://www.nfb.ca/film/%s' % video_id, video_id, 'Downloading film page')
 
         uploader_id = self._html_search_regex(r'<a class="director-link" href="/explore-all-directors/([^/]+)/"',
-            page, 'director id', fatal=False)
+                                              page, 'director id', fatal=False)
         uploader = self._html_search_regex(r'<em class="director-name" itemprop="name">([^<]+)</em>',
-            page, 'director name', fatal=False)
+                                           page, 'director name', fatal=False)
 
         request = compat_urllib_request.Request('https://www.nfb.ca/film/%s/player_config' % video_id,
-            compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii'))
+                                                compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii'))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf')
 
@@ -93,4 +93,4 @@ class NFBIE(InfoExtractor):
             'uploader': uploader,
             'uploader_id': uploader_id,
             'formats': formats,
-        }
\ No newline at end of file
+        }
diff --git a/youtube_dl/extractor/nfl.py b/youtube_dl/extractor/nfl.py
new file mode 100644 (file)
index 0000000..cc7c921
--- /dev/null
@@ -0,0 +1,144 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    compat_urllib_parse_urlparse,
+    int_or_none,
+    remove_end,
+)
+
+
+class NFLIE(InfoExtractor):
+    IE_NAME = 'nfl.com'
+    _VALID_URL = r'''(?x)https?://
+        (?P<host>(?:www\.)?(?:nfl\.com|.*?\.clubs\.nfl\.com))/
+        (?:.+?/)*
+        (?P<id>(?:\d[a-z]{2}\d{13}|\w{8}\-(?:\w{4}\-){3}\w{12}))'''
+    _TESTS = [
+        {
+            'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights',
+            'md5': '394ef771ddcd1354f665b471d78ec4c6',
+            'info_dict': {
+                'id': '0ap3000000398478',
+                'ext': 'mp4',
+                'title': 'Week 3: Redskins vs. Eagles highlights',
+                'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478',
+                'upload_date': '20140921',
+                'timestamp': 1411337580,
+                'thumbnail': 're:^https?://.*\.jpg$',
+            }
+        },
+        {
+            'url': 'http://prod.www.steelers.clubs.nfl.com/video-and-audio/videos/LIVE_Post_Game_vs_Browns/9d72f26a-9e2b-4718-84d3-09fb4046c266',
+            'md5': 'cf85bdb4bc49f6e9d3816d130c78279c',
+            'info_dict': {
+                'id': '9d72f26a-9e2b-4718-84d3-09fb4046c266',
+                'ext': 'mp4',
+                'title': 'LIVE: Post Game vs. Browns',
+                'description': 'md5:6a97f7e5ebeb4c0e69a418a89e0636e8',
+                'upload_date': '20131229',
+                'timestamp': 1388354455,
+                'thumbnail': 're:^https?://.*\.jpg$',
+            }
+        }
+    ]
+
+    @staticmethod
+    def prepend_host(host, url):
+        if not url.startswith('http'):
+            if not url.startswith('/'):
+                url = '/%s' % url
+            url = 'http://{0:}{1:}'.format(host, url)
+        return url
+
+    @staticmethod
+    def format_from_stream(stream, protocol, host, path_prefix='',
+                           preference=0, note=None):
+        url = '{protocol:}://{host:}/{prefix:}{path:}'.format(
+            protocol=protocol,
+            host=host,
+            prefix=path_prefix,
+            path=stream.get('path'),
+        )
+        return {
+            'url': url,
+            'vbr': int_or_none(stream.get('rate', 0), 1000),
+            'preference': preference,
+            'format_note': note,
+        }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id, host = mobj.group('id'), mobj.group('host')
+
+        webpage = self._download_webpage(url, video_id)
+
+        config_url = NFLIE.prepend_host(host, self._search_regex(
+            r'(?:config|configURL)\s*:\s*"([^"]+)"', webpage, 'config URL'))
+        config = self._download_json(config_url, video_id,
+                                     note='Downloading player config')
+        url_template = NFLIE.prepend_host(
+            host, '{contentURLTemplate:}'.format(**config))
+        video_data = self._download_json(
+            url_template.format(id=video_id), video_id)
+
+        formats = []
+        cdn_data = video_data.get('cdnData', {})
+        streams = cdn_data.get('bitrateInfo', [])
+        if cdn_data.get('format') == 'EXTERNAL_HTTP_STREAM':
+            parts = compat_urllib_parse_urlparse(cdn_data.get('uri'))
+            protocol, host = parts.scheme, parts.netloc
+            for stream in streams:
+                formats.append(
+                    NFLIE.format_from_stream(stream, protocol, host))
+        else:
+            cdns = config.get('cdns')
+            if not cdns:
+                raise ExtractorError('Failed to get CDN data', expected=True)
+
+            for name, cdn in cdns.items():
+                # LimeLight streams don't seem to work
+                if cdn.get('name') == 'LIMELIGHT':
+                    continue
+
+                protocol = cdn.get('protocol')
+                host = remove_end(cdn.get('host', ''), '/')
+                if not (protocol and host):
+                    continue
+
+                prefix = cdn.get('pathprefix', '')
+                if prefix and not prefix.endswith('/'):
+                    prefix = '%s/' % prefix
+
+                preference = 0
+                if protocol == 'rtmp':
+                    preference = -2
+                elif 'prog' in name.lower():
+                    preference = 1
+
+                for stream in streams:
+                    formats.append(
+                        NFLIE.format_from_stream(stream, protocol, host,
+                                                 prefix, preference, name))
+
+        self._sort_formats(formats)
+
+        thumbnail = None
+        for q in ('xl', 'l', 'm', 's', 'xs'):
+            thumbnail = video_data.get('imagePaths', {}).get(q)
+            if thumbnail:
+                break
+
+        return {
+            'id': video_id,
+            'title': video_data.get('headline'),
+            'formats': formats,
+            'description': video_data.get('caption'),
+            'duration': video_data.get('duration'),
+            'thumbnail': thumbnail,
+            'timestamp': int_or_none(video_data.get('posted'), 1000),
+        }
index 2edd806a3f6aa12792f3c8d8065a57fd2e2e70a1..b2f40344f59d75caf94028167dcf5db7ce0f83fd 100644 (file)
@@ -1,11 +1,16 @@
+from __future__ import unicode_literals
+
 import re
 import json
+import os
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urlparse,
     compat_urllib_parse,
-    determine_ext,
+    compat_urllib_parse_urlparse
+)
+from ..utils import (
     unified_strdate,
 )
 
@@ -20,21 +25,26 @@ class NHLBaseInfoExtractor(InfoExtractor):
         self.report_extraction(video_id)
 
         initial_video_url = info['publishPoint']
-        data = compat_urllib_parse.urlencode({
-            'type': 'fvod',
-            'path': initial_video_url.replace('.mp4', '_sd.mp4'),
-        })
-        path_url = 'http://video.nhl.com/videocenter/servlets/encryptvideopath?' + data
-        path_doc = self._download_xml(path_url, video_id,
-            u'Downloading final video url')
-        video_url = path_doc.find('path').text
+        if info['formats'] == '1':
+            parsed_url = compat_urllib_parse_urlparse(initial_video_url)
+            filename, ext = os.path.splitext(parsed_url.path)
+            path = '%s_sd%s' % (filename, ext)
+            data = compat_urllib_parse.urlencode({
+                'type': 'fvod',
+                'path': compat_urlparse.urlunparse(parsed_url[:2] + (path,) + parsed_url[3:])
+            })
+            path_url = 'http://video.nhl.com/videocenter/servlets/encryptvideopath?' + data
+            path_doc = self._download_xml(
+                path_url, video_id, 'Downloading final video url')
+            video_url = path_doc.find('path').text
+        else:
+            video_url = initial_video_url
 
         join = compat_urlparse.urljoin
         return {
             'id': video_id,
             'title': info['name'],
             'url': video_url,
-            'ext': determine_ext(video_url),
             'description': info['description'],
             'duration': int(info['duration']),
             'thumbnail': join(join(video_url, '/u/'), info['bigImage']),
@@ -43,41 +53,68 @@ class NHLBaseInfoExtractor(InfoExtractor):
 
 
 class NHLIE(NHLBaseInfoExtractor):
-    IE_NAME = u'nhl.com'
-    _VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/console\?.*?(?<=[?&])id=(?P<id>\d+)'
-
-    _TEST = {
-        u'url': u'http://video.canucks.nhl.com/videocenter/console?catid=6?id=453614',
-        u'file': u'453614.mp4',
-        u'info_dict': {
-            u'title': u'Quick clip: Weise 4-3 goal vs Flames',
-            u'description': u'Dale Weise scores his first of the season to put the Canucks up 4-3.',
-            u'duration': 18,
-            u'upload_date': u'20131006',
+    IE_NAME = 'nhl.com'
+    _VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/console(?:\?(?:.*?[?&])?)id=(?P<id>[0-9a-z-]+)'
+
+    _TESTS = [{
+        'url': 'http://video.canucks.nhl.com/videocenter/console?catid=6?id=453614',
+        'md5': 'db704a4ea09e8d3988c85e36cc892d09',
+        'info_dict': {
+            'id': '453614',
+            'ext': 'mp4',
+            'title': 'Quick clip: Weise 4-3 goal vs Flames',
+            'description': 'Dale Weise scores his first of the season to put the Canucks up 4-3.',
+            'duration': 18,
+            'upload_date': '20131006',
         },
-    }
+    }, {
+        'url': 'http://video.nhl.com/videocenter/console?id=2014020024-628-h',
+        'md5': 'd22e82bc592f52d37d24b03531ee9696',
+        'info_dict': {
+            'id': '2014020024-628-h',
+            'ext': 'mp4',
+            'title': 'Alex Galchenyuk Goal on Ray Emery (14:40/3rd)',
+            'description': 'Home broadcast - Montreal Canadiens at Philadelphia Flyers - October 11, 2014',
+            'duration': 0,
+            'upload_date': '20141011',
+        },
+    }, {
+        'url': 'http://video.mapleleafs.nhl.com/videocenter/console?id=58665&catid=802',
+        'md5': 'c78fc64ea01777e426cfc202b746c825',
+        'info_dict': {
+            'id': '58665',
+            'ext': 'flv',
+            'title': 'Classic Game In Six - April 22, 1979',
+            'description': 'It was the last playoff game for the Leafs in the decade, and the last time the Leafs and Habs played in the playoffs. Great game, not a great ending.',
+            'duration': 400,
+            'upload_date': '20100129'
+        },
+    }, {
+        'url': 'http://video.flames.nhl.com/videocenter/console?id=630616',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
         json_url = 'http://video.nhl.com/videocenter/servlets/playlist?ids=%s&format=json' % video_id
-        info_json = self._download_webpage(json_url, video_id,
-            u'Downloading info json')
-        info_json = self._fix_json(info_json)
-        info = json.loads(info_json)[0]
-        return self._extract_video(info)
+        data = self._download_json(
+            json_url, video_id, transform_source=self._fix_json)
+        return self._extract_video(data[0])
 
 
 class NHLVideocenterIE(NHLBaseInfoExtractor):
-    IE_NAME = u'nhl.com:videocenter'
-    IE_DESC = u'NHL videocenter category'
-    _VALID_URL = r'https?://video\.(?P<team>[^.]*)\.nhl\.com/videocenter/(console\?.*?catid=(?P<catid>[^&]+))?'
-
-    @classmethod
-    def suitable(cls, url):
-        if NHLIE.suitable(url):
-            return False
-        return super(NHLVideocenterIE, cls).suitable(url)
+    IE_NAME = 'nhl.com:videocenter'
+    IE_DESC = 'NHL videocenter category'
+    _VALID_URL = r'https?://video\.(?P<team>[^.]*)\.nhl\.com/videocenter/(console\?[^(id=)]*catid=(?P<catid>[0-9]+)(?![&?]id=).*?)?$'
+    _TEST = {
+        'url': 'http://video.canucks.nhl.com/videocenter/console?catid=999',
+        'info_dict': {
+            'id': '999',
+            'title': 'Highlights',
+        },
+        'playlist_count': 12,
+    }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -86,10 +123,10 @@ class NHLVideocenterIE(NHLBaseInfoExtractor):
         cat_id = self._search_regex(
             [r'var defaultCatId = "(.+?)";',
              r'{statusIndex:0,index:0,.*?id:(.*?),'],
-            webpage, u'category id')
+            webpage, 'category id')
         playlist_title = self._html_search_regex(
             r'tab0"[^>]*?>(.*?)</td>',
-            webpage, u'playlist title', flags=re.DOTALL).lower().capitalize()
+            webpage, 'playlist title', flags=re.DOTALL).lower().capitalize()
 
         data = compat_urllib_parse.urlencode({
             'cid': cat_id,
@@ -103,10 +140,10 @@ class NHLVideocenterIE(NHLBaseInfoExtractor):
         response = self._download_webpage(request_url, playlist_title)
         response = self._fix_json(response)
         if not response.strip():
-            self._downloader.report_warning(u'Got an empty reponse, trying '
-                                            u'adding the "newvideos" parameter')
+            self._downloader.report_warning('Got an empty reponse, trying '
+                                            'adding the "newvideos" parameter')
             response = self._download_webpage(request_url + '&newvideos=true',
-                playlist_title)
+                                              playlist_title)
             response = self._fix_json(response)
         videos = json.loads(response)
 
@@ -114,5 +151,5 @@ class NHLVideocenterIE(NHLBaseInfoExtractor):
             '_type': 'playlist',
             'title': playlist_title,
             'id': cat_id,
-            'entries': [self._extract_video(i) for i in videos],
+            'entries': [self._extract_video(v) for v in videos],
         }
index c0c139b5df16ce900ba6920a1a004bc433eab4e9..1d9c1a096403e7e7b4f3835f31dbee31812594c9 100644 (file)
@@ -2,6 +2,7 @@
 from __future__ import unicode_literals
 
 import re
+import json
 
 from .common import InfoExtractor
 from ..utils import (
@@ -11,6 +12,7 @@ from ..utils import (
     unified_strdate,
     parse_duration,
     int_or_none,
+    ExtractorError,
 )
 
 
@@ -39,18 +41,17 @@ class NiconicoIE(InfoExtractor):
 
     _VALID_URL = r'https?://(?:www\.|secure\.)?nicovideo\.jp/watch/((?:[a-z]{2})?[0-9]+)'
     _NETRC_MACHINE = 'niconico'
-    # Determine whether the downloader uses authentication to download video
-    _AUTHENTICATE = False
+    # Determine whether the downloader used authentication to download video
+    _AUTHENTICATED = False
 
     def _real_initialize(self):
-        if self._downloader.params.get('username', None) is not None:
-            self._AUTHENTICATE = True
-
-        if self._AUTHENTICATE:
-            self._login()
+        self._login()
 
     def _login(self):
         (username, password) = self._get_login_info()
+        # No authentication to be performed
+        if not username:
+            return True
 
         # Log in
         login_form_strs = {
@@ -68,6 +69,8 @@ class NiconicoIE(InfoExtractor):
         if re.search(r'(?i)<h1 class="mb8p4">Log in error</h1>', login_results) is not None:
             self._downloader.report_warning('unable to log in: bad username or password')
             return False
+        # Successful login
+        self._AUTHENTICATED = True
         return True
 
     def _real_extract(self, url):
@@ -82,7 +85,7 @@ class NiconicoIE(InfoExtractor):
             'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id, video_id,
             note='Downloading video info page')
 
-        if self._AUTHENTICATE:
+        if self._AUTHENTICATED:
             # Get flv info
             flv_info_webpage = self._download_webpage(
                 'http://flapi.nicovideo.jp/api/getflv?v=' + video_id,
@@ -106,6 +109,9 @@ class NiconicoIE(InfoExtractor):
                 flv_info_request, video_id,
                 note='Downloading flv info', errnote='Unable to download flv info')
 
+        if 'deleted=' in flv_info_webpage:
+            raise ExtractorError('The video has been deleted.',
+                                 expected=True)
         video_real_url = compat_urlparse.parse_qs(flv_info_webpage)['url'][0]
 
         # Start extracting information
@@ -145,3 +151,37 @@ class NiconicoIE(InfoExtractor):
             'duration': duration,
             'webpage_url': webpage_url,
         }
+
+
+class NiconicoPlaylistIE(InfoExtractor):
+    _VALID_URL = r'https?://www\.nicovideo\.jp/mylist/(?P<id>\d+)'
+
+    _TEST = {
+        'url': 'http://www.nicovideo.jp/mylist/27411728',
+        'info_dict': {
+            'id': '27411728',
+            'title': 'AKB48のオールナイトニッポン',
+        },
+        'playlist_mincount': 225,
+    }
+
+    def _real_extract(self, url):
+        list_id = self._match_id(url)
+        webpage = self._download_webpage(url, list_id)
+
+        entries_json = self._search_regex(r'Mylist\.preload\(\d+, (\[.*\])\);',
+                                          webpage, 'entries')
+        entries = json.loads(entries_json)
+        entries = [{
+            '_type': 'url',
+            'ie_key': NiconicoIE.ie_key(),
+            'url': ('http://www.nicovideo.jp/watch/%s' %
+                    entry['item_data']['video_id']),
+        } for entry in entries]
+
+        return {
+            '_type': 'playlist',
+            'title': self._search_regex(r'\s+name: "(.*?)"', webpage, 'title'),
+            'id': list_id,
+            'entries': entries,
+        }
index 33daa0dec327dea3f691f5dccab5b6b312d79e4f..16a02ad7939082627ffb9edd41d7bf6e62fd1f6d 100644 (file)
@@ -27,8 +27,7 @@ class NineGagIE(InfoExtractor):
             "thumbnail": "re:^https?://",
         },
         'add_ie': ['Youtube']
-    },
-    {
+    }, {
         'url': 'http://9gag.tv/p/KklwM/alternate-banned-opening-scene-of-gravity?ref=fsidebar',
         'info_dict': {
             'id': 'KklwM',
index da203538dbea3781d0daf05edbbaacbd72be622f..7d2ff7b9a149d0284da1d52a26f4c0a85bf0f6ce 100644 (file)
@@ -2,10 +2,15 @@
 from __future__ import unicode_literals
 
 import re
+import time
+import hashlib
 
 from .common import InfoExtractor
 from ..utils import (
+    compat_urllib_request,
+    compat_urllib_parse,
     ExtractorError,
+    clean_html,
     unified_strdate,
     compat_str,
 )
@@ -13,6 +18,10 @@ from ..utils import (
 
 class NocoIE(InfoExtractor):
     _VALID_URL = r'http://(?:(?:www\.)?noco\.tv/emission/|player\.noco\.tv/\?idvideo=)(?P<id>\d+)'
+    _LOGIN_URL = 'http://noco.tv/do.php'
+    _API_URL_TEMPLATE = 'https://api.noco.tv/1.1/%s?ts=%s&tk=%s'
+    _SUB_LANG_TEMPLATE = '&sub_lang=%s'
+    _NETRC_MACHINE = 'noco'
 
     _TEST = {
         'url': 'http://noco.tv/emission/11538/nolife/ami-ami-idol-hello-france/',
@@ -30,54 +39,111 @@ class NocoIE(InfoExtractor):
         'skip': 'Requires noco account',
     }
 
+    def _real_initialize(self):
+        self._login()
+
+    def _login(self):
+        (username, password) = self._get_login_info()
+        if username is None:
+            return
+
+        login_form = {
+            'a': 'login',
+            'cookie': '1',
+            'username': username,
+            'password': password,
+        }
+        request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
+        request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8')
+
+        login = self._download_json(request, None, 'Logging in as %s' % username)
+
+        if 'erreur' in login:
+            raise ExtractorError('Unable to login: %s' % clean_html(login['erreur']), expected=True)
+
+    def _call_api(self, path, video_id, note, sub_lang=None):
+        ts = compat_str(int(time.time() * 1000))
+        tk = hashlib.md5((hashlib.md5(ts.encode('ascii')).hexdigest() + '#8S?uCraTedap6a').encode('ascii')).hexdigest()
+        url = self._API_URL_TEMPLATE % (path, ts, tk)
+        if sub_lang:
+            url += self._SUB_LANG_TEMPLATE % sub_lang
+
+        resp = self._download_json(url, video_id, note)
+
+        if isinstance(resp, dict) and resp.get('error'):
+            self._raise_error(resp['error'], resp['description'])
+
+        return resp
+
+    def _raise_error(self, error, description):
+        raise ExtractorError(
+            '%s returned error: %s - %s' % (self.IE_NAME, error, description),
+            expected=True)
+
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
 
-        medias = self._download_json(
-            'https://api.noco.tv/1.0/video/medias/%s' % video_id, video_id, 'Downloading video JSON')
+        medias = self._call_api(
+            'shows/%s/medias' % video_id,
+            video_id, 'Downloading video JSON')
 
-        formats = []
+        qualities = self._call_api(
+            'qualities',
+            video_id, 'Downloading qualities JSON')
 
-        for fmt in medias['fr']['video_list']['default']['quality_list']:
-            format_id = fmt['quality_key']
+        formats = []
 
-            file = self._download_json(
-                'https://api.noco.tv/1.0/video/file/%s/fr/%s' % (format_id.lower(), video_id),
-                video_id, 'Downloading %s video JSON' % format_id)
-
-            file_url = file['file']
-            if not file_url:
-                continue
-
-            if file_url == 'forbidden':
-                raise ExtractorError(
-                    '%s returned error: %s - %s' % (
-                        self.IE_NAME, file['popmessage']['title'], file['popmessage']['message']),
-                    expected=True)
-
-            formats.append({
-                'url': file_url,
-                'format_id': format_id,
-                'width': fmt['res_width'],
-                'height': fmt['res_lines'],
-                'abr': fmt['audiobitrate'],
-                'vbr': fmt['videobitrate'],
-                'filesize': fmt['filesize'],
-                'format_note': fmt['quality_name'],
-                'preference': fmt['priority'],
-            })
+        for lang, lang_dict in medias['fr']['video_list'].items():
+            for format_id, fmt in lang_dict['quality_list'].items():
+                format_id_extended = '%s-%s' % (lang, format_id) if lang != 'none' else format_id
+
+                video = self._call_api(
+                    'shows/%s/video/%s/fr' % (video_id, format_id.lower()),
+                    video_id, 'Downloading %s video JSON' % format_id_extended,
+                    lang if lang != 'none' else None)
+
+                file_url = video['file']
+                if not file_url:
+                    continue
+
+                if file_url in ['forbidden', 'not found']:
+                    popmessage = video['popmessage']
+                    self._raise_error(popmessage['title'], popmessage['message'])
+
+                formats.append({
+                    'url': file_url,
+                    'format_id': format_id_extended,
+                    'width': fmt['res_width'],
+                    'height': fmt['res_lines'],
+                    'abr': fmt['audiobitrate'],
+                    'vbr': fmt['videobitrate'],
+                    'filesize': fmt['filesize'],
+                    'format_note': qualities[format_id]['quality_name'],
+                    'preference': qualities[format_id]['priority'],
+                })
 
         self._sort_formats(formats)
 
-        show = self._download_json(
-            'https://api.noco.tv/1.0/shows/show/%s' % video_id, video_id, 'Downloading show JSON')[0]
+        show = self._call_api(
+            'shows/by_id/%s' % video_id,
+            video_id, 'Downloading show JSON')[0]
 
-        upload_date = unified_strdate(show['indexed'])
+        upload_date = unified_strdate(show['online_date_start_utc'])
         uploader = show['partner_name']
         uploader_id = show['partner_key']
         duration = show['duration_ms'] / 1000.0
-        thumbnail = show['screenshot']
+
+        thumbnails = []
+        for thumbnail_key, thumbnail_url in show.items():
+            m = re.search(r'^screenshot_(?P<width>\d+)x(?P<height>\d+)$', thumbnail_key)
+            if not m:
+                continue
+            thumbnails.append({
+                'url': thumbnail_url,
+                'width': int(m.group('width')),
+                'height': int(m.group('height')),
+            })
 
         episode = show.get('show_TT') or show.get('show_OT')
         family = show.get('family_TT') or show.get('family_OT')
@@ -97,10 +163,10 @@ class NocoIE(InfoExtractor):
             'id': video_id,
             'title': title,
             'description': description,
-            'thumbnail': thumbnail,
+            'thumbnails': thumbnails,
             'upload_date': upload_date,
             'uploader': uploader,
             'uploader_id': uploader_id,
             'duration': duration,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 25e71a56e196d9cf7f9d2423c47293b01e46cd24..3d35b11ac81286a359a06620d0e08e8fae18043b 100644 (file)
@@ -31,9 +31,9 @@ class NormalbootsIE(InfoExtractor):
 
         webpage = self._download_webpage(url, video_id)
         video_uploader = self._html_search_regex(r'Posted\sby\s<a\shref="[A-Za-z0-9/]*">(?P<uploader>[A-Za-z]*)\s</a>',
-            webpage, 'uploader')
+                                                 webpage, 'uploader')
         raw_upload_date = self._html_search_regex('<span style="text-transform:uppercase; font-size:inherit;">[A-Za-z]+, (?P<date>.*)</span>',
-            webpage, 'date')
+                                                  webpage, 'date')
         video_upload_date = unified_strdate(raw_upload_date)
 
         player_url = self._html_search_regex(r'<iframe\swidth="[0-9]+"\sheight="[0-9]+"\ssrc="(?P<url>[\S]+)"', webpage, 'url')
diff --git a/youtube_dl/extractor/nosvideo.py b/youtube_dl/extractor/nosvideo.py
new file mode 100644 (file)
index 0000000..f3be8f5
--- /dev/null
@@ -0,0 +1,76 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    compat_urllib_request,
+    urlencode_postdata,
+    xpath_text,
+    xpath_with_ns,
+)
+
+_x = lambda p: xpath_with_ns(p, {'xspf': 'http://xspf.org/ns/0/'})
+
+
+class NosVideoIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?nosvideo\.com/' + \
+                 '(?:embed/|\?v=)(?P<id>[A-Za-z0-9]{12})/?'
+    _PLAYLIST_URL = 'http://nosvideo.com/xml/{xml_id:s}.xml'
+    _FILE_DELETED_REGEX = r'<b>File Not Found</b>'
+    _TEST = {
+        'url': 'http://nosvideo.com/?v=mu8fle7g7rpq',
+        'md5': '6124ed47130d8be3eacae635b071e6b6',
+        'info_dict': {
+            'id': 'mu8fle7g7rpq',
+            'ext': 'mp4',
+            'title': 'big_buck_bunny_480p_surround-fix.avi.mp4',
+            'thumbnail': 're:^https?://.*\.jpg$',
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        fields = {
+            'id': video_id,
+            'op': 'download1',
+            'method_free': 'Continue to Video',
+        }
+        req = compat_urllib_request.Request(url, urlencode_postdata(fields))
+        req.add_header('Content-type', 'application/x-www-form-urlencoded')
+        webpage = self._download_webpage(req, video_id,
+                                         'Downloading download page')
+        if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
+            raise ExtractorError('Video %s does not exist' % video_id,
+                                 expected=True)
+
+        xml_id = self._search_regex(r'php\|([^\|]+)\|', webpage, 'XML ID')
+        playlist_url = self._PLAYLIST_URL.format(xml_id=xml_id)
+        playlist = self._download_xml(playlist_url, video_id)
+
+        track = playlist.find(_x('.//xspf:track'))
+        if track is None:
+            raise ExtractorError(
+                'XML playlist is missing the \'track\' element',
+                expected=True)
+        title = xpath_text(track, _x('./xspf:title'), 'title')
+        url = xpath_text(track, _x('./xspf:file'), 'URL', fatal=True)
+        thumbnail = xpath_text(track, _x('./xspf:image'), 'thumbnail')
+        if title is not None:
+            title = title.strip()
+
+        formats = [{
+            'format_id': 'sd',
+            'url': url,
+        }]
+
+        return {
+            'id': video_id,
+            'title': title,
+            'thumbnail': thumbnail,
+            'formats': formats,
+        }
index 2e7ab1e4f9ce23c422fddf478b23b1497aac02ae..38d05e46604a859247c0b155625ee41f5b556b36 100644 (file)
@@ -66,4 +66,4 @@ class NovaMovIE(InfoExtractor):
             'url': video_url,
             'title': title,
             'description': description
-        }
\ No newline at end of file
+        }
index 1c5e9401f36c72a73a701bdffc89529979a1eaaf..6b2f3f55a60d19ff3b4735027a399b6c38ad1310 100644 (file)
@@ -1,3 +1,4 @@
+# encoding: utf-8
 from __future__ import unicode_literals
 
 import re
@@ -8,19 +9,34 @@ from ..utils import ExtractorError
 
 
 class NownessIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?nowness\.com/[^?#]*?/(?P<id>[0-9]+)/(?P<slug>[^/]+?)(?:$|[?#])'
-
-    _TEST = {
-        'url': 'http://www.nowness.com/day/2013/6/27/3131/candor--the-art-of-gesticulation',
-        'md5': '068bc0202558c2e391924cb8cc470676',
-        'info_dict': {
-            'id': '2520295746001',
-            'ext': 'mp4',
-            'description': 'Candor: The Art of Gesticulation',
-            'uploader': 'Nowness',
-            'title': 'Candor: The Art of Gesticulation',
-        }
-    }
+    _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/[^?#]*?/(?P<id>[0-9]+)/(?P<slug>[^/]+?)(?:$|[?#])'
+
+    _TESTS = [
+        {
+            'url': 'http://www.nowness.com/day/2013/6/27/3131/candor--the-art-of-gesticulation',
+            'md5': '068bc0202558c2e391924cb8cc470676',
+            'info_dict': {
+                'id': '2520295746001',
+                'ext': 'mp4',
+                'title': 'Candor: The Art of Gesticulation',
+                'description': 'Candor: The Art of Gesticulation',
+                'thumbnail': 're:^https?://.*\.jpg',
+                'uploader': 'Nowness',
+            }
+        },
+        {
+            'url': 'http://cn.nowness.com/day/2014/8/7/4069/kasper-bj-rke-ft-jaakko-eino-kalevi--tnr',
+            'md5': 'e79cf125e387216f86b2e0a5b5c63aa3',
+            'info_dict': {
+                'id': '3716354522001',
+                'ext': 'mp4',
+                'title': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR',
+                'description': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR',
+                'thumbnail': 're:^https?://.*\.jpg',
+                'uploader': 'Nowness',
+            }
+        },
+    ]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
index bfba184184c09bfd429698229efc1375c334e617..ecb38de2d3d969b96145b0b191054e2b34f1801e 100644 (file)
@@ -25,4 +25,4 @@ class NowVideoIE(NovaMovIE):
             'title': 'youtubedl test video _BaW_jenozKc.mp4',
             'description': 'Description',
         }
-    }
\ No newline at end of file
+    }
index 12e85a716fec900cf01d72157ab4159bc69ae8f8..ce31694a506a99d1ae21460f0c458ef3100c1850 100644 (file)
@@ -5,6 +5,10 @@ import re
 from .common import InfoExtractor
 from ..utils import (
     unified_strdate,
+    parse_duration,
+    qualities,
+    strip_jsonp,
+    url_basename,
 )
 
 
@@ -12,51 +16,124 @@ class NPOIE(InfoExtractor):
     IE_NAME = 'npo.nl'
     _VALID_URL = r'https?://www\.npo\.nl/[^/]+/[^/]+/(?P<id>[^/?]+)'
 
-    _TEST = {
-        'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
-        'md5': '4b3f9c429157ec4775f2c9cb7b911016',
-        'info_dict': {
-            'id': 'VPWON_1220719',
-            'ext': 'mp4',
-            'title': 'Nieuwsuur',
-            'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
-            'upload_date': '20140622',
+    _TESTS = [
+        {
+            'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
+            'md5': '4b3f9c429157ec4775f2c9cb7b911016',
+            'info_dict': {
+                'id': 'VPWON_1220719',
+                'ext': 'm4v',
+                'title': 'Nieuwsuur',
+                'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
+                'upload_date': '20140622',
+            },
         },
-    }
+        {
+            'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800',
+            'md5': 'da50a5787dbfc1603c4ad80f31c5120b',
+            'info_dict': {
+                'id': 'VARA_101191800',
+                'ext': 'm4v',
+                'title': 'De Mega Mike & Mega Thomas show',
+                'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4',
+                'upload_date': '20090227',
+                'duration': 2400,
+            },
+        },
+        {
+            'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289',
+            'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
+            'info_dict': {
+                'id': 'VPWON_1169289',
+                'ext': 'm4v',
+                'title': 'Tegenlicht',
+                'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1',
+                'upload_date': '20130225',
+                'duration': 3000,
+            },
+        }
+    ]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
+        return self._get_info(video_id)
 
+    def _get_info(self, video_id):
         metadata = self._download_json(
             'http://e.omroep.nl/metadata/aflevering/%s' % video_id,
             video_id,
             # We have to remove the javascript callback
-            transform_source=lambda j: re.sub(r'parseMetadata\((.*?)\);\n//.*$', r'\1', j)
+            transform_source=strip_jsonp,
         )
         token_page = self._download_webpage(
             'http://ida.omroep.nl/npoplayer/i.js',
             video_id,
             note='Downloading token'
         )
-        token = self._search_regex(r'npoplayer.token = "(.+?)"', token_page, 'token')
-        streams_info = self._download_json(
-            'http://ida.omroep.nl/odi/?prid=%s&puboptions=h264_std&adaptive=yes&token=%s' % (video_id, token),
-            video_id
-        )
+        token = self._search_regex(r'npoplayer\.token = "(.+?)"', token_page, 'token')
 
-        stream_info = self._download_json(
-            streams_info['streams'][0] + '&type=json',
-            video_id,
-            'Downloading stream info'
-        )
+        formats = []
+        quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std'])
+        for format_id in metadata['pubopties']:
+            format_info = self._download_json(
+                'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s' % (video_id, format_id, token),
+                video_id, 'Downloading %s JSON' % format_id)
+            if format_info.get('error_code', 0) or format_info.get('errorcode', 0):
+                continue
+            streams = format_info.get('streams')
+            if streams:
+                video_info = self._download_json(
+                    streams[0] + '&type=json',
+                    video_id, 'Downloading %s stream JSON' % format_id)
+            else:
+                video_info = format_info
+            video_url = video_info.get('url')
+            if not video_url:
+                continue
+            if format_id == 'adaptive':
+                formats.extend(self._extract_m3u8_formats(video_url, video_id))
+            else:
+                formats.append({
+                    'url': video_url,
+                    'format_id': format_id,
+                    'quality': quality(format_id),
+                })
+        self._sort_formats(formats)
 
         return {
             'id': video_id,
             'title': metadata['titel'],
-            'ext': 'mp4',
-            'url': stream_info['url'],
             'description': metadata['info'],
-            'thumbnail': metadata['images'][-1]['url'],
-            'upload_date': unified_strdate(metadata['gidsdatum']),
+            'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
+            'upload_date': unified_strdate(metadata.get('gidsdatum')),
+            'duration': parse_duration(metadata.get('tijdsduur')),
+            'formats': formats,
         }
+
+
+class TegenlichtVproIE(NPOIE):
+    IE_NAME = 'tegenlicht.vpro.nl'
+    _VALID_URL = r'https?://tegenlicht\.vpro\.nl/afleveringen/.*?'
+
+    _TESTS = [
+        {
+            'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html',
+            'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
+            'info_dict': {
+                'id': 'VPWON_1169289',
+                'ext': 'm4v',
+                'title': 'Tegenlicht',
+                'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1',
+                'upload_date': '20130225',
+            },
+        },
+    ]
+
+    def _real_extract(self, url):
+        name = url_basename(url)
+        webpage = self._download_webpage(url, name)
+        urn = self._html_search_meta('mediaurn', webpage)
+        info_page = self._download_json(
+            'http://rs.vpro.nl/v2/api/media/%s.json' % urn, name)
+        return self._get_info(info_page['mid'])
index ed60314eca4f918392da7fe2637e47ed3cdf5ee9..ee740cd9c0fe71a48b79aee00c40ea610e81ea99 100644 (file)
@@ -130,7 +130,7 @@ class NTVIE(InfoExtractor):
                 'rtmp_conn': 'B:1',
                 'player_url': 'http://www.ntv.ru/swf/vps1.swf?update=20131128',
                 'page_url': 'http://www.ntv.ru',
-                'flash_ver': 'LNX 11,2,202,341',
+                'flash_version': 'LNX 11,2,202,341',
                 'rtmp_live': True,
                 'ext': 'flv',
                 'filesize': int(size.text),
@@ -145,4 +145,4 @@ class NTVIE(InfoExtractor):
             'duration': duration,
             'view_count': view_count,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 280328b78306e5ab332cbb7111127f832c6c9aba..449c8a6a3e86c410daaafbf80878161693242e27 100644 (file)
@@ -38,7 +38,7 @@ class NuvidIE(InfoExtractor):
             webpage = self._download_webpage(
                 request, video_id, 'Downloading %s page' % format_id)
             video_url = self._html_search_regex(
-                r'<a href="([^"]+)"\s*>Continue to watch video', webpage, '%s video URL' % format_id, fatal=False)
+                r'<a\s+href="([^"]+)"\s+class="b_link">', webpage, '%s video URL' % format_id, fatal=False)
             if not video_url:
                 continue
             formats.append({
@@ -49,21 +49,26 @@ class NuvidIE(InfoExtractor):
         webpage = self._download_webpage(
             'http://m.nuvid.com/video/%s' % video_id, video_id, 'Downloading video page')
         title = self._html_search_regex(
-            r'<div class="title">\s+<h2[^>]*>([^<]+)</h2>', webpage, 'title').strip()
-        thumbnail = self._html_search_regex(
-            r'href="(/thumbs/[^"]+)"[^>]*data-link_type="thumbs"',
-            webpage, 'thumbnail URL', fatal=False)
+            [r'<span title="([^"]+)">',
+             r'<div class="thumb-holder video">\s*<h5[^>]*>([^<]+)</h5>'], webpage, 'title').strip()
+        thumbnails = [
+            {
+                'url': thumb_url,
+            } for thumb_url in re.findall(r'<img src="([^"]+)" alt="" />', webpage)
+        ]
+        thumbnail = thumbnails[0]['url'] if thumbnails else None
         duration = parse_duration(self._html_search_regex(
-            r'Length:\s*<span>(\d{2}:\d{2})</span>',webpage, 'duration', fatal=False))
+            r'<i class="fa fa-clock-o"></i>\s*(\d{2}:\d{2})', webpage, 'duration', fatal=False))
         upload_date = unified_strdate(self._html_search_regex(
-            r'Added:\s*<span>(\d{4}-\d{2}-\d{2})</span>', webpage, 'upload date', fatal=False))
+            r'<i class="fa fa-user"></i>\s*(\d{4}-\d{2}-\d{2})', webpage, 'upload date', fatal=False))
 
         return {
             'id': video_id,
             'title': title,
-            'thumbnail': 'http://m.nuvid.com%s' % thumbnail,
+            'thumbnails': thumbnails,
+            'thumbnail': thumbnail,
             'duration': duration,
             'upload_date': upload_date,
             'age_limit': 18,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 7bf105d38178a5d59339d81262649b40fa57a46e..56e1cad3b0021431721b59df2162feaf7e0c357b 100644 (file)
@@ -74,4 +74,4 @@ class NYTimesIE(InfoExtractor):
             'duration': duration,
             'formats': formats,
             'thumbnails': thumbnails,
-        }
\ No newline at end of file
+        }
diff --git a/youtube_dl/extractor/oe1.py b/youtube_dl/extractor/oe1.py
deleted file mode 100644 (file)
index 38971ab..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-# coding: utf-8
-from __future__ import unicode_literals
-
-import calendar
-import datetime
-import re
-
-from .common import InfoExtractor
-
-# audios on oe1.orf.at are only available for 7 days, so we can't
-# add tests.
-
-
-class OE1IE(InfoExtractor):
-    IE_DESC = 'oe1.orf.at'
-    _VALID_URL = r'http://oe1\.orf\.at/programm/(?P<id>[0-9]+)'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        show_id = mobj.group('id')
-
-        data = self._download_json(
-            'http://oe1.orf.at/programm/%s/konsole' % show_id,
-            show_id
-        )
-
-        timestamp = datetime.datetime.strptime('%s %s' % (
-            data['item']['day_label'],
-            data['item']['time']
-        ), '%d.%m.%Y %H:%M')
-        unix_timestamp = calendar.timegm(timestamp.utctimetuple())
-
-        return {
-            'id': show_id,
-            'title': data['item']['title'],
-            'url': data['item']['url_stream'],
-            'ext': 'mp3',
-            'description': data['item'].get('info'),
-            'timestamp': unix_timestamp
-        }
diff --git a/youtube_dl/extractor/oktoberfesttv.py b/youtube_dl/extractor/oktoberfesttv.py
new file mode 100644 (file)
index 0000000..4a41c05
--- /dev/null
@@ -0,0 +1,47 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class OktoberfestTVIE(InfoExtractor):
+    _VALID_URL = r'https?://www\.oktoberfest-tv\.de/[^/]+/[^/]+/video/(?P<id>[^/?#]+)'
+
+    _TEST = {
+        'url': 'http://www.oktoberfest-tv.de/de/kameras/video/hb-zelt',
+        'info_dict': {
+            'id': 'hb-zelt',
+            'ext': 'mp4',
+            'title': 're:^Live-Kamera: Hofbräuzelt [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'is_live': True,
+        },
+        'params': {
+            'skip_download': True,
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._live_title(self._html_search_regex(
+            r'<h1><strong>.*?</strong>(.*?)</h1>', webpage, 'title'))
+
+        clip = self._search_regex(
+            r"clip:\s*\{\s*url:\s*'([^']+)'", webpage, 'clip')
+        ncurl = self._search_regex(
+            r"netConnectionUrl:\s*'([^']+)'", webpage, 'rtmp base')
+        video_url = ncurl + clip
+        thumbnail = self._search_regex(
+            r"canvas:\s*\{\s*backgroundImage:\s*'url\(([^)]+)\)'", webpage,
+            'thumbnail', fatal=False)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'url': video_url,
+            'ext': 'mp4',
+            'is_live': True,
+            'thumbnail': thumbnail,
+        }
index 13f12824c99aa71c357047ff62a866365bbc49fb..f17a528583bf7296906431bdc1a0e3a3cbd6c71d 100644 (file)
@@ -3,23 +3,38 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import unescapeHTML
+from ..utils import (
+    unescapeHTML,
+    ExtractorError,
+)
 
 
 class OoyalaIE(InfoExtractor):
     _VALID_URL = r'(?:ooyala:|https?://.+?\.ooyala\.com/.*?(?:embedCode|ec)=)(?P<id>.+?)(&|$)'
 
-    _TEST = {
-        # From http://it.slashdot.org/story/13/04/25/178216/recovering-data-from-broken-hard-drives-and-ssds-video
-        'url': 'http://player.ooyala.com/player.js?embedCode=pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
-        'md5': '3f5cceb3a7bf461d6c29dc466cf8033c',
-        'info_dict': {
-            'id': 'pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
-            'ext': 'mp4',
-            'title': 'Explaining Data Recovery from Hard Drives and SSDs',
-            'description': 'How badly damaged does a drive have to be to defeat Russell and his crew? Apparently, smashed to bits.',
+    _TESTS = [
+        {
+            # From http://it.slashdot.org/story/13/04/25/178216/recovering-data-from-broken-hard-drives-and-ssds-video
+            'url': 'http://player.ooyala.com/player.js?embedCode=pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
+            'md5': '3f5cceb3a7bf461d6c29dc466cf8033c',
+            'info_dict': {
+                'id': 'pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
+                'ext': 'mp4',
+                'title': 'Explaining Data Recovery from Hard Drives and SSDs',
+                'description': 'How badly damaged does a drive have to be to defeat Russell and his crew? Apparently, smashed to bits.',
+            },
+        }, {
+            # Only available for ipad
+            'url': 'http://player.ooyala.com/player.js?embedCode=x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0',
+            'md5': '4b9754921fddb68106e48c142e2a01e6',
+            'info_dict': {
+                'id': 'x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0',
+                'ext': 'mp4',
+                'title': 'Simulation Overview - Levels of Simulation',
+                'description': '',
+            },
         },
-    }
+    ]
 
     @staticmethod
     def _url_for_embed_code(embed_code):
@@ -28,7 +43,7 @@ class OoyalaIE(InfoExtractor):
     @classmethod
     def _build_url_result(cls, embed_code):
         return cls.url_result(cls._url_for_embed_code(embed_code),
-            ie=cls.ie_key())
+                              ie=cls.ie_key())
 
     def _extract_result(self, info, more_info):
         return {
@@ -47,13 +62,30 @@ class OoyalaIE(InfoExtractor):
         player = self._download_webpage(player_url, embedCode)
         mobile_url = self._search_regex(r'mobile_player_url="(.+?)&device="',
                                         player, 'mobile player url')
-        mobile_player = self._download_webpage(mobile_url, embedCode)
-        videos_info = self._search_regex(
-            r'var streams=window.oo_testEnv\?\[\]:eval\("\((\[{.*?}\])\)"\);',
-            mobile_player, 'info').replace('\\"','"')
-        videos_more_info = self._search_regex(r'eval\("\(({.*?\\"promo\\".*?})\)"', mobile_player, 'more info').replace('\\"','"')
+        # Looks like some videos are only available for particular devices
+        # (e.g. http://player.ooyala.com/player.js?embedCode=x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0
+        # is only available for ipad)
+        # Working around with fetching URLs for all the devices found starting with 'unknown'
+        # until we succeed or eventually fail for each device.
+        devices = re.findall(r'device\s*=\s*"([^"]+)";', player)
+        devices.remove('unknown')
+        devices.insert(0, 'unknown')
+        for device in devices:
+            mobile_player = self._download_webpage(
+                '%s&device=%s' % (mobile_url, device), embedCode,
+                'Downloading mobile player JS for %s device' % device)
+            videos_info = self._search_regex(
+                r'var streams=window.oo_testEnv\?\[\]:eval\("\((\[{.*?}\])\)"\);',
+                mobile_player, 'info', fatal=False, default=None)
+            if videos_info:
+                break
+        if not videos_info:
+            raise ExtractorError('Unable to extract info')
+        videos_info = videos_info.replace('\\"', '"')
+        videos_more_info = self._search_regex(
+            r'eval\("\(({.*?\\"promo\\".*?})\)"', mobile_player, 'more info').replace('\\"', '"')
         videos_info = json.loads(videos_info)
-        videos_more_info =json.loads(videos_more_info)
+        videos_more_info = json.loads(videos_more_info)
 
         if videos_more_info.get('lineup'):
             videos = [self._extract_result(info, more_info) for (info, more_info) in zip(videos_info, videos_more_info['lineup'])]
@@ -65,4 +97,3 @@ class OoyalaIE(InfoExtractor):
             }
         else:
             return self._extract_result(videos_info[0], videos_more_info)
-        
index 03421d1d5c78f2acd712e560ae17fb96d4a323be..572a234ad8c2514e5704d936fb98a19035662f40 100644 (file)
@@ -3,6 +3,8 @@ from __future__ import unicode_literals
 
 import json
 import re
+import calendar
+import datetime
 
 from .common import InfoExtractor
 from ..utils import (
@@ -12,7 +14,9 @@ from ..utils import (
 )
 
 
-class ORFIE(InfoExtractor):
+class ORFTVthekIE(InfoExtractor):
+    IE_NAME = 'orf:tvthek'
+    IE_DESC = 'ORF TVthek'
     _VALID_URL = r'https?://tvthek\.orf\.at/(?:programs/.+?/episodes|topics/.+?|program/[^/]+)/(?P<id>\d+)'
 
     _TEST = {
@@ -105,3 +109,73 @@ class ORFIE(InfoExtractor):
             'entries': entries,
             'id': playlist_id,
         }
+
+
+# Audios on ORF radio are only available for 7 days, so we can't add tests.
+
+
+class ORFOE1IE(InfoExtractor):
+    IE_NAME = 'orf:oe1'
+    IE_DESC = 'Radio Österreich 1'
+    _VALID_URL = r'http://oe1\.orf\.at/programm/(?P<id>[0-9]+)'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        show_id = mobj.group('id')
+
+        data = self._download_json(
+            'http://oe1.orf.at/programm/%s/konsole' % show_id,
+            show_id
+        )
+
+        timestamp = datetime.datetime.strptime('%s %s' % (
+            data['item']['day_label'],
+            data['item']['time']
+        ), '%d.%m.%Y %H:%M')
+        unix_timestamp = calendar.timegm(timestamp.utctimetuple())
+
+        return {
+            'id': show_id,
+            'title': data['item']['title'],
+            'url': data['item']['url_stream'],
+            'ext': 'mp3',
+            'description': data['item'].get('info'),
+            'timestamp': unix_timestamp
+        }
+
+
+class ORFFM4IE(InfoExtractor):
+    IE_DESC = 'orf:fm4'
+    IE_DESC = 'radio FM4'
+    _VALID_URL = r'http://fm4\.orf\.at/7tage/?#(?P<date>[0-9]+)/(?P<show>\w+)'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        show_date = mobj.group('date')
+        show_id = mobj.group('show')
+
+        data = self._download_json(
+            'http://audioapi.orf.at/fm4/json/2.0/broadcasts/%s/4%s' % (show_date, show_id),
+            show_id
+        )
+
+        def extract_entry_dict(info, title, subtitle):
+            return {
+                'id': info['loopStreamId'].replace('.mp3', ''),
+                'url': 'http://loopstream01.apa.at/?channel=fm4&id=%s' % info['loopStreamId'],
+                'title': title,
+                'description': subtitle,
+                'duration': (info['end'] - info['start']) / 1000,
+                'timestamp': info['start'] / 1000,
+                'ext': 'mp3'
+            }
+
+        entries = [extract_entry_dict(t, data['title'], data['subtitle']) for t in data['streams']]
+
+        return {
+            '_type': 'playlist',
+            'id': show_id,
+            'title': data['title'],
+            'description': data['subtitle'],
+            'entries': entries
+        }
diff --git a/youtube_dl/extractor/patreon.py b/youtube_dl/extractor/patreon.py
new file mode 100644 (file)
index 0000000..5429592
--- /dev/null
@@ -0,0 +1,100 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    js_to_json,
+)
+
+
+class PatreonIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?patreon\.com/creation\?hid=(.+)'
+    _TESTS = [
+        {
+            'url': 'http://www.patreon.com/creation?hid=743933',
+            'md5': 'e25505eec1053a6e6813b8ed369875cc',
+            'info_dict': {
+                'id': '743933',
+                'ext': 'mp3',
+                'title': 'Episode 166: David Smalley of Dogma Debate',
+                'uploader': 'Cognitive Dissonance Podcast',
+                'thumbnail': 're:^https?://.*$',
+            },
+        },
+        {
+            'url': 'http://www.patreon.com/creation?hid=754133',
+            'md5': '3eb09345bf44bf60451b8b0b81759d0a',
+            'info_dict': {
+                'id': '754133',
+                'ext': 'mp3',
+                'title': 'CD 167 Extra',
+                'uploader': 'Cognitive Dissonance Podcast',
+                'thumbnail': 're:^https?://.*$',
+            },
+        },
+    ]
+
+    # Currently Patreon exposes download URL via hidden CSS, so login is not
+    # needed. Keeping this commented for when this inevitably changes.
+    '''
+    def _login(self):
+        (username, password) = self._get_login_info()
+        if username is None:
+            return
+
+        login_form = {
+            'redirectUrl': 'http://www.patreon.com/',
+            'email': username,
+            'password': password,
+        }
+
+        request = compat_urllib_request.Request(
+            'https://www.patreon.com/processLogin',
+            compat_urllib_parse.urlencode(login_form).encode('utf-8')
+        )
+        login_page = self._download_webpage(request, None, note='Logging in as %s' % username)
+
+        if re.search(r'onLoginFailed', login_page):
+            raise ExtractorError('Unable to login, incorrect username and/or password', expected=True)
+
+    def _real_initialize(self):
+        self._login()
+    '''
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group(1)
+
+        webpage = self._download_webpage(url, video_id)
+        title = self._og_search_title(webpage).strip()
+
+        attach_fn = self._html_search_regex(
+            r'<div class="attach"><a target="_blank" href="([^"]+)">',
+            webpage, 'attachment URL', default=None)
+        if attach_fn is not None:
+            video_url = 'http://www.patreon.com' + attach_fn
+            thumbnail = self._og_search_thumbnail(webpage)
+            uploader = self._html_search_regex(
+                r'<strong>(.*?)</strong> is creating', webpage, 'uploader')
+        else:
+            playlist_js = self._search_regex(
+                r'(?s)new\s+jPlayerPlaylist\(\s*\{\s*[^}]*},\s*(\[.*?,?\s*\])',
+                webpage, 'playlist JSON')
+            playlist_json = js_to_json(playlist_js)
+            playlist = json.loads(playlist_json)
+            data = playlist[0]
+            video_url = self._proto_relative_url(data['mp3'])
+            thumbnail = self._proto_relative_url(data.get('cover'))
+            uploader = data.get('artist')
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'ext': 'mp3',
+            'title': title,
+            'uploader': uploader,
+            'thumbnail': thumbnail,
+        }
index 64cded70789249746a5e2b6604d86563a6ad499c..6118ed5c2021492ee91e22dccd642d564918604c 100644 (file)
@@ -4,6 +4,7 @@ import re
 
 from .common import InfoExtractor
 from ..utils import (
+    unified_strdate,
     US_RATINGS,
 )
 
@@ -11,36 +12,115 @@ from ..utils import (
 class PBSIE(InfoExtractor):
     _VALID_URL = r'''(?x)https?://
         (?:
-            # Direct video URL
-            video\.pbs\.org/(?:viralplayer|video)/(?P<id>[0-9]+)/? |
-            # Article with embedded player
-           (?:www\.)?pbs\.org/(?:[^/]+/){2,5}(?P<presumptive_id>[^/]+)/?(?:$|[?\#]) |
+           # Direct video URL
+           video\.pbs\.org/(?:viralplayer|video)/(?P<id>[0-9]+)/? |
+           # Article with embedded player (or direct video)
+           (?:www\.)?pbs\.org/(?:[^/]+/){2,5}(?P<presumptive_id>[^/]+?)(?:\.html)?/?(?:$|[?\#]) |
            # Player
            video\.pbs\.org/(?:widget/)?partnerplayer/(?P<player_id>[^/]+)/
         )
     '''
 
-    _TEST = {
-        'url': 'http://www.pbs.org/tpt/constitution-usa-peter-sagal/watch/a-more-perfect-union/',
-        'md5': 'ce1888486f0908d555a8093cac9a7362',
-        'info_dict': {
-            'id': '2365006249',
-            'ext': 'mp4',
-            'title': 'A More Perfect Union',
-            'description': 'md5:ba0c207295339c8d6eced00b7c363c6a',
-            'duration': 3190,
+    _TESTS = [
+        {
+            'url': 'http://www.pbs.org/tpt/constitution-usa-peter-sagal/watch/a-more-perfect-union/',
+            'md5': 'ce1888486f0908d555a8093cac9a7362',
+            'info_dict': {
+                'id': '2365006249',
+                'ext': 'mp4',
+                'title': 'A More Perfect Union',
+                'description': 'md5:ba0c207295339c8d6eced00b7c363c6a',
+                'duration': 3190,
+            },
+        },
+        {
+            'url': 'http://www.pbs.org/wgbh/pages/frontline/losing-iraq/',
+            'md5': '143c98aa54a346738a3d78f54c925321',
+            'info_dict': {
+                'id': '2365297690',
+                'ext': 'mp4',
+                'title': 'Losing Iraq',
+                'description': 'md5:f5bfbefadf421e8bb8647602011caf8e',
+                'duration': 5050,
+            },
+        },
+        {
+            'url': 'http://www.pbs.org/newshour/bb/education-jan-june12-cyberschools_02-23/',
+            'md5': 'b19856d7f5351b17a5ab1dc6a64be633',
+            'info_dict': {
+                'id': '2201174722',
+                'ext': 'mp4',
+                'title': 'Cyber Schools Gain Popularity, but Quality Questions Persist',
+                'description': 'md5:5871c15cba347c1b3d28ac47a73c7c28',
+                'duration': 801,
+            },
+        },
+        {
+            'url': 'http://www.pbs.org/wnet/gperf/dudamel-conducts-verdi-requiem-hollywood-bowl-full-episode/3374/',
+            'md5': 'c62859342be2a0358d6c9eb306595978',
+            'info_dict': {
+                'id': '2365297708',
+                'ext': 'mp4',
+                'description': 'md5:68d87ef760660eb564455eb30ca464fe',
+                'title': 'Dudamel Conducts Verdi Requiem at the Hollywood Bowl - Full',
+                'duration': 6559,
+                'thumbnail': 're:^https?://.*\.jpg$',
+            }
         },
-    }
+        {
+            'url': 'http://www.pbs.org/wgbh/nova/earth/killer-typhoon.html',
+            'md5': '908f3e5473a693b266b84e25e1cf9703',
+            'info_dict': {
+                'id': '2365160389',
+                'display_id': 'killer-typhoon',
+                'ext': 'mp4',
+                'description': 'md5:c741d14e979fc53228c575894094f157',
+                'title': 'Killer Typhoon',
+                'duration': 3172,
+                'thumbnail': 're:^https?://.*\.jpg$',
+                'upload_date': '20140122',
+            }
+        },
+        {
+            'url': 'http://www.pbs.org/wgbh/pages/frontline/united-states-of-secrets/',
+            'info_dict': {
+                'id': 'united-states-of-secrets',
+            },
+            'playlist_count': 2,
+        }
+    ]
 
-    def _real_extract(self, url):
+    def _extract_webpage(self, url):
         mobj = re.match(self._VALID_URL, url)
 
         presumptive_id = mobj.group('presumptive_id')
         display_id = presumptive_id
         if presumptive_id:
             webpage = self._download_webpage(url, display_id)
+
+            upload_date = unified_strdate(self._search_regex(
+                r'<input type="hidden" id="air_date_[0-9]+" value="([^"]+)"',
+                webpage, 'upload date', default=None))
+
+            # tabbed frontline videos
+            tabbed_videos = re.findall(
+                r'<div[^>]+class="videotab[^"]*"[^>]+vid="(\d+)"', webpage)
+            if tabbed_videos:
+                return tabbed_videos, presumptive_id, upload_date
+
+            MEDIA_ID_REGEXES = [
+                r"div\s*:\s*'videoembed'\s*,\s*mediaid\s*:\s*'(\d+)'",  # frontline video embed
+                r'class="coveplayerid">([^<]+)<',                       # coveplayer
+                r'<input type="hidden" id="pbs_video_id_[0-9]+" value="([0-9]+)"/>',  # jwplayer
+            ]
+
+            media_id = self._search_regex(
+                MEDIA_ID_REGEXES, webpage, 'media ID', fatal=False, default=None)
+            if media_id:
+                return media_id, presumptive_id, upload_date
+
             url = self._search_regex(
-                r'<iframe\s+id=["\']partnerPlayer["\'].*?\s+src=["\'](.*?)["\']>',
+                r'<iframe\s+(?:class|id)=["\']partnerPlayer["\'].*?\s+src=["\'](.*?)["\']>',
                 webpage, 'player URL')
             mobj = re.match(self._VALID_URL, url)
 
@@ -57,6 +137,17 @@ class PBSIE(InfoExtractor):
             video_id = mobj.group('id')
             display_id = video_id
 
+        return video_id, display_id, None
+
+    def _real_extract(self, url):
+        video_id, display_id, upload_date = self._extract_webpage(url)
+
+        if isinstance(video_id, list):
+            entries = [self.url_result(
+                'http://video.pbs.org/video/%s' % vid_id, 'PBS', vid_id)
+                for vid_id in video_id]
+            return self.playlist_result(entries, display_id)
+
         info_url = 'http://video.pbs.org/videoInfo/%s?format=json' % video_id
         info = self._download_json(info_url, display_id)
 
@@ -67,6 +158,7 @@ class PBSIE(InfoExtractor):
 
         return {
             'id': video_id,
+            'display_id': display_id,
             'title': info['title'],
             'url': info['alternate_encoding']['url'],
             'ext': 'mp4',
@@ -74,4 +166,5 @@ class PBSIE(InfoExtractor):
             'thumbnail': info.get('image_url'),
             'duration': info.get('duration'),
             'age_limit': age_limit,
+            'upload_date': upload_date,
         }
diff --git a/youtube_dl/extractor/phoenix.py b/youtube_dl/extractor/phoenix.py
new file mode 100644 (file)
index 0000000..a20672c
--- /dev/null
@@ -0,0 +1,31 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from .zdf import extract_from_xml_url
+
+
+class PhoenixIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?phoenix\.de/content/(?P<id>[0-9]+)'
+    _TEST = {
+        'url': 'http://www.phoenix.de/content/884301',
+        'md5': 'ed249f045256150c92e72dbb70eadec6',
+        'info_dict': {
+            'id': '884301',
+            'ext': 'mp4',
+            'title': 'Michael Krons mit Hans-Werner Sinn',
+            'description': 'Im Dialog - Sa. 25.10.14, 00.00 - 00.35 Uhr',
+            'upload_date': '20141025',
+            'uploader': 'Im Dialog',
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        internal_id = self._search_regex(
+            r'<div class="phx_vod" id="phx_vod_([0-9]+)"',
+            webpage, 'internal video ID')
+
+        api_url = 'http://www.phoenix.de/php/zdfplayer-v1.3/data/beitragsDetails.php?ak=web&id=%s' % internal_id
+        return extract_from_xml_url(self, video_id, api_url)
index 8aa69c46eb75e9ccfe6fab5b7bff2c9a5778009e..b4389e0b6feaf0726a4805bec674b77cd38e295b 100644 (file)
@@ -31,7 +31,7 @@ class PhotobucketIE(InfoExtractor):
         # Extract URL, uploader, and title from webpage
         self.report_extraction(video_id)
         info_json = self._search_regex(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (.*?)\);',
-            webpage, 'info json')
+                                       webpage, 'info json')
         info = json.loads(info_json)
         url = compat_urllib_parse.unquote(self._html_search_regex(r'file=(.+\.mp4)', info['linkcodes']['html'], 'url'))
         return {
diff --git a/youtube_dl/extractor/planetaplay.py b/youtube_dl/extractor/planetaplay.py
new file mode 100644 (file)
index 0000000..596c621
--- /dev/null
@@ -0,0 +1,60 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class PlanetaPlayIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?planetaplay\.com/\?sng=(?P<id>[0-9]+)'
+    _API_URL = 'http://planetaplay.com/action/playlist/?sng={0:}'
+    _THUMBNAIL_URL = 'http://planetaplay.com/img/thumb/{thumb:}'
+    _TEST = {
+        'url': 'http://planetaplay.com/?sng=3586',
+        'md5': '9d569dceb7251a4e01355d5aea60f9db',
+        'info_dict': {
+            'id': '3586',
+            'ext': 'flv',
+            'title': 'md5:e829428ee28b1deed00de90de49d1da1',
+        }
+    }
+
+    _SONG_FORMATS = {
+        'lq': (0, 'http://www.planetaplay.com/videoplayback/{med_hash:}'),
+        'hq': (1, 'http://www.planetaplay.com/videoplayback/hi/{med_hash:}'),
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        response = self._download_json(
+            self._API_URL.format(video_id), video_id)['response']
+        try:
+            data = response.get('data')[0]
+        except IndexError:
+            raise ExtractorError(
+                '%s: failed to get the playlist' % self.IE_NAME, expected=True)
+
+        title = '{song_artists:} - {sng_name:}'.format(**data)
+        thumbnail = self._THUMBNAIL_URL.format(**data)
+
+        formats = []
+        for format_id, (quality, url_template) in self._SONG_FORMATS.items():
+            formats.append({
+                'format_id': format_id,
+                'url': url_template.format(**data),
+                'quality': quality,
+                'ext': 'flv',
+            })
+
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'thumbnail': thumbnail,
+        }
diff --git a/youtube_dl/extractor/played.py b/youtube_dl/extractor/played.py
new file mode 100644 (file)
index 0000000..1788047
--- /dev/null
@@ -0,0 +1,62 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+import os.path
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    compat_urllib_parse,
+    compat_urllib_request,
+)
+
+
+class PlayedIE(InfoExtractor):
+    IE_NAME = 'played.to'
+    _VALID_URL = r'https?://(?:www\.)?played\.to/(?P<id>[a-zA-Z0-9_-]+)'
+
+    _TEST = {
+        'url': 'http://played.to/j2f2sfiiukgt',
+        'md5': 'c2bd75a368e82980e7257bf500c00637',
+        'info_dict': {
+            'id': 'j2f2sfiiukgt',
+            'ext': 'flv',
+            'title': 'youtube-dl_test_video.mp4',
+        },
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        orig_webpage = self._download_webpage(url, video_id)
+
+        m_error = re.search(
+            r'(?s)Reason for deletion:.*?<b class="err"[^>]*>(?P<msg>[^<]+)</b>', orig_webpage)
+        if m_error:
+            raise ExtractorError(m_error.group('msg'), expected=True)
+
+        fields = re.findall(
+            r'type="hidden" name="([^"]+)"\s+value="([^"]+)">', orig_webpage)
+        data = dict(fields)
+
+        self._sleep(2, video_id)
+
+        post = compat_urllib_parse.urlencode(data)
+        headers = {
+            b'Content-Type': b'application/x-www-form-urlencoded',
+        }
+        req = compat_urllib_request.Request(url, post, headers)
+        webpage = self._download_webpage(
+            req, video_id, note='Downloading video page ...')
+
+        title = os.path.splitext(data['fname'])[0]
+
+        video_url = self._search_regex(
+            r'file: "?(.+?)",', webpage, 'video URL')
+
+        return {
+            'id': video_id,
+            'title': title,
+            'url': video_url,
+        }
diff --git a/youtube_dl/extractor/playfm.py b/youtube_dl/extractor/playfm.py
new file mode 100644 (file)
index 0000000..ebc0468
--- /dev/null
@@ -0,0 +1,86 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+    compat_urllib_request,
+    ExtractorError,
+    float_or_none,
+    int_or_none,
+    str_to_int,
+)
+
+
+class PlayFMIE(InfoExtractor):
+    IE_NAME = 'play.fm'
+    _VALID_URL = r'https?://(?:www\.)?play\.fm/[^?#]*(?P<upload_date>[0-9]{8})(?P<id>[0-9]{6})(?:$|[?#])'
+
+    _TEST = {
+        'url': 'http://www.play.fm/recording/leipzigelectronicmusicbatofarparis_fr20140712137220',
+        'md5': 'c505f8307825a245d0c7ad1850001f22',
+        'info_dict': {
+            'id': '137220',
+            'ext': 'mp3',
+            'title': 'LEIPZIG ELECTRONIC MUSIC @ Batofar (Paris,FR) - 2014-07-12',
+            'uploader': 'Sven Tasnadi',
+            'uploader_id': 'sventasnadi',
+            'duration': 5627.428,
+            'upload_date': '20140712',
+            'view_count': int,
+            'comment_count': int,
+            'thumbnail': 're:^https?://.*\.jpg$',
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        upload_date = mobj.group('upload_date')
+
+        rec_data = compat_urllib_parse.urlencode({'rec_id': video_id})
+        req = compat_urllib_request.Request(
+            'http://www.play.fm/flexRead/recording', data=rec_data)
+        req.add_header('Content-Type', 'application/x-www-form-urlencoded')
+        rec_doc = self._download_xml(req, video_id)
+
+        error_node = rec_doc.find('./error')
+        if error_node is not None:
+            raise ExtractorError('An error occured: %s (code %s)' % (
+                error_node.text, rec_doc.find('./status').text))
+
+        recording = rec_doc.find('./recording')
+        title = recording.find('./title').text
+        view_count = str_to_int(recording.find('./stats/playcount').text)
+        comment_count = str_to_int(recording.find('./stats/comments').text)
+        duration = float_or_none(recording.find('./duration').text, scale=1000)
+        thumbnail = recording.find('./image').text
+
+        artist = recording.find('./artists/artist')
+        uploader = artist.find('./name').text
+        uploader_id = artist.find('./slug').text
+
+        video_url = '%s//%s/%s/%s/offset/0/sh/%s/rec/%s/jingle/%s/loc/%s' % (
+            'http:', recording.find('./url').text,
+            recording.find('./_class').text, recording.find('./file_id').text,
+            rec_doc.find('./uuid').text, video_id,
+            rec_doc.find('./jingle/file_id').text,
+            'http%3A%2F%2Fwww.play.fm%2Fplayer',
+        )
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'ext': 'mp3',
+            'filesize': int_or_none(recording.find('./size').text),
+            'title': title,
+            'upload_date': upload_date,
+            'view_count': view_count,
+            'comment_count': comment_count,
+            'duration': duration,
+            'thumbnail': thumbnail,
+            'uploader': uploader,
+            'uploader_id': uploader_id,
+        }
index b1322f13f8b62a4618b1f857dadf0829b9752238..cd3905acb0fcd0ef9fe08beec2bda41fd2f94f70 100644 (file)
@@ -4,6 +4,8 @@ import re
 
 from .common import InfoExtractor
 from ..utils import (
+    ExtractorError,
+    clean_html,
     compat_urllib_parse,
 )
 
@@ -28,6 +30,11 @@ class PlayvidIE(InfoExtractor):
 
         webpage = self._download_webpage(url, video_id)
 
+        m_error = re.search(
+            r'<div class="block-error">\s*<div class="heading">\s*<div>(?P<msg>.+?)</div>\s*</div>', webpage)
+        if m_error:
+            raise ExtractorError(clean_html(m_error.group('msg')), expected=True)
+
         video_title = None
         duration = None
         video_thumbnail = None
index ffafd23800f49de2bc7987f3d63c9b5ab2104b58..f20946a2bd0616d8d90cebf360570a3f0bc40e94 100644 (file)
@@ -6,6 +6,7 @@ import re
 from .common import InfoExtractor
 from ..utils import int_or_none
 
+
 class PodomaticIE(InfoExtractor):
     IE_NAME = 'podomatic'
     _VALID_URL = r'^(?P<proto>https?)://(?P<channel>[^.]+)\.podomatic\.com/entry/(?P<id>[^?]+)'
index 718fe9aba5fc710ee5efc47c2bbae2b02fc3c117..954dfccb75954d50a9a46bc14bdb1d0dcbd5588c 100644 (file)
@@ -4,19 +4,26 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import int_or_none
+from ..utils import (
+    int_or_none,
+    js_to_json,
+    qualities,
+)
 
 
 class PornHdIE(InfoExtractor):
-    _VALID_URL = r'http://(?:www\.)?pornhd\.com/(?:[a-z]{2,4}/)?videos/(?P<id>\d+)'
+    _VALID_URL = r'http://(?:www\.)?pornhd\.com/(?:[a-z]{2,4}/)?videos/(?P<id>\d+)(?:/(?P<display_id>.+))?'
     _TEST = {
         'url': 'http://www.pornhd.com/videos/1962/sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
         'md5': '956b8ca569f7f4d8ec563e2c41598441',
         'info_dict': {
             'id': '1962',
+            'display_id': 'sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
             'ext': 'mp4',
             'title': 'Sierra loves doing laundry',
             'description': 'md5:8ff0523848ac2b8f9b065ba781ccf294',
+            'thumbnail': 're:^https?://.*\.jpg',
+            'view_count': int,
             'age_limit': 18,
         }
     }
@@ -24,54 +31,36 @@ class PornHdIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
+        display_id = mobj.group('display_id')
 
-        webpage = self._download_webpage(url, video_id)
-
-        title = self._og_search_title(webpage)
-        TITLE_SUFFIX = ' porn HD Video | PornHD.com '
-        if title.endswith(TITLE_SUFFIX):
-            title = title[:-len(TITLE_SUFFIX)]
+        webpage = self._download_webpage(url, display_id or video_id)
 
+        title = self._html_search_regex(
+            r'<title>(.+) porn HD.+?</title>', webpage, 'title')
         description = self._html_search_regex(
             r'<div class="description">([^<]+)</div>', webpage, 'description', fatal=False)
         view_count = int_or_none(self._html_search_regex(
-            r'(\d+) views      </span>', webpage, 'view count', fatal=False))
-
-        formats = [
-            {
-                'url': format_url,
-                'ext': format.lower(),
-                'format_id': '%s-%s' % (format.lower(), quality.lower()),
-                'quality': 1 if quality.lower() == 'high' else 0,
-            } for format, quality, format_url in re.findall(
-                r'var __video([\da-zA-Z]+?)(Low|High)StreamUrl = \'(http://.+?)\?noProxy=1\'', webpage)
-        ]
-
-        mobj = re.search(r'flashVars = (?P<flashvars>{.+?});', webpage)
-        if mobj:
-            flashvars = json.loads(mobj.group('flashvars'))
-            formats.extend([
-                {
-                    'url': flashvars['hashlink'].replace('?noProxy=1', ''),
-                    'ext': 'flv',
-                    'format_id': 'flv-low',
-                    'quality': 0,
-                },
-                {
-                    'url': flashvars['hd'].replace('?noProxy=1', ''),
-                    'ext': 'flv',
-                    'format_id': 'flv-high',
-                    'quality': 1,
-                }
-            ])
-            thumbnail = flashvars['urlWallpaper']
-        else:
-            thumbnail = self._og_search_thumbnail(webpage)
+            r'(\d+) views\s*</span>', webpage, 'view count', fatal=False))
+        thumbnail = self._search_regex(
+            r"'poster'\s*:\s*'([^']+)'", webpage, 'thumbnail', fatal=False)
 
+        quality = qualities(['sd', 'hd'])
+        sources = json.loads(js_to_json(self._search_regex(
+            r"(?s)'sources'\s*:\s*(\{.+?\})\s*\}\);", webpage, 'sources')))
+        formats = []
+        for container, s in sources.items():
+            for qname, video_url in s.items():
+                formats.append({
+                    'url': video_url,
+                    'container': container,
+                    'format_id': '%s-%s' % (container, qname),
+                    'quality': quality(qname),
+                })
         self._sort_formats(formats)
 
         return {
             'id': video_id,
+            'display_id': display_id,
             'title': title,
             'description': description,
             'thumbnail': thumbnail,
index 4118ee9560e03d2fa1eea171766ef4893e274aa5..2ca15b717ec5dd9a36b6aa2bfc7e9019148c0493 100644 (file)
@@ -16,13 +16,14 @@ from ..aes import (
 
 
 class PornHubIE(InfoExtractor):
-    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>pornhub\.com/view_video\.php\?viewkey=(?P<videoid>[0-9a-f]+))'
+    _VALID_URL = r'^https?://(?:www\.)?pornhub\.com/view_video\.php\?viewkey=(?P<id>[0-9a-f]+)'
     _TEST = {
         'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
-        'file': '648719015.mp4',
         'md5': '882f488fa1f0026f023f33576004a2ed',
         'info_dict': {
-            "uploader": "BABES-COM",
+            'id': '648719015',
+            'ext': 'mp4',
+            "uploader": "Babes",
             "title": "Seductive Indian beauty strips down and fingers her pink pussy",
             "age_limit": 18
         }
@@ -35,9 +36,7 @@ class PornHubIE(InfoExtractor):
         return count
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('videoid')
-        url = 'http://www.' + mobj.group('url')
+        video_id = self._match_id(url)
 
         req = compat_urllib_request.Request(url)
         req.add_header('Cookie', 'age_verified=1')
@@ -45,7 +44,7 @@ class PornHubIE(InfoExtractor):
 
         video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
         video_uploader = self._html_search_regex(
-            r'(?s)From:&nbsp;.+?<(?:a href="/users/|<span class="username)[^>]+>(.+?)<',
+            r'(?s)From:&nbsp;.+?<(?:a href="/users/|a href="/channels/|<span class="username)[^>]+>(.+?)<',
             webpage, 'uploader', fatal=False)
         thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False)
         if thumbnail:
@@ -57,7 +56,7 @@ class PornHubIE(InfoExtractor):
         comment_count = self._extract_count(
             r'All comments \(<var class="videoCommentCount">([\d,\.]+)</var>', webpage, 'comment')
 
-        video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
+        video_urls = list(map(compat_urllib_parse.unquote, re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
         if webpage.find('"encrypted":true') != -1:
             password = compat_urllib_parse.unquote_plus(self._html_search_regex(r'"video_title":"([^"]+)', webpage, 'password'))
             video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
index 35dc5a9ffafb32d36e30f51988291dded6a6d18c..5253aa3d30062ec7937c5e5f48d85998923c6e8f 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
@@ -9,15 +11,16 @@ from ..utils import (
 
 
 class PornotubeIE(InfoExtractor):
-    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
+    _VALID_URL = r'https?://(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
     _TEST = {
-        u'url': u'http://pornotube.com/c/173/m/1689755/Marilyn-Monroe-Bathing',
-        u'file': u'1689755.flv',
-        u'md5': u'374dd6dcedd24234453b295209aa69b6',
-        u'info_dict': {
-            u"upload_date": u"20090708", 
-            u"title": u"Marilyn-Monroe-Bathing",
-            u"age_limit": 18
+        'url': 'http://pornotube.com/c/173/m/1689755/Marilyn-Monroe-Bathing',
+        'md5': '374dd6dcedd24234453b295209aa69b6',
+        'info_dict': {
+            'id': '1689755',
+            'ext': 'flv',
+            'upload_date': '20090708',
+            'title': 'Marilyn-Monroe-Bathing',
+            'age_limit': 18
         }
     }
 
@@ -32,22 +35,22 @@ class PornotubeIE(InfoExtractor):
 
         # Get the video URL
         VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
-        video_url = self._search_regex(VIDEO_URL_RE, webpage, u'video url')
+        video_url = self._search_regex(VIDEO_URL_RE, webpage, 'video url')
         video_url = compat_urllib_parse.unquote(video_url)
 
-        #Get the uploaded date
+        # Get the uploaded date
         VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
-        upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, u'upload date', fatal=False)
-        if upload_date: upload_date = unified_strdate(upload_date)
+        upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, 'upload date', fatal=False)
+        if upload_date:
+            upload_date = unified_strdate(upload_date)
         age_limit = self._rta_search(webpage)
 
-        info = {'id': video_id,
-                'url': video_url,
-                'uploader': None,
-                'upload_date': upload_date,
-                'title': video_title,
-                'ext': 'flv',
-                'format': 'flv',
-                'age_limit': age_limit}
-
-        return [info]
+        return {
+            'id': video_id,
+            'url': video_url,
+            'upload_date': upload_date,
+            'title': video_title,
+            'ext': 'flv',
+            'format': 'flv',
+            'age_limit': age_limit,
+        }
diff --git a/youtube_dl/extractor/pornoxo.py b/youtube_dl/extractor/pornoxo.py
new file mode 100644 (file)
index 0000000..202f586
--- /dev/null
@@ -0,0 +1,65 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    str_to_int,
+)
+
+
+class PornoXOIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?pornoxo\.com/videos/(?P<id>\d+)/(?P<display_id>[^/]+)\.html'
+    _TEST = {
+        'url': 'http://www.pornoxo.com/videos/7564/striptease-from-sexy-secretary.html',
+        'md5': '582f28ecbaa9e6e24cb90f50f524ce87',
+        'info_dict': {
+            'id': '7564',
+            'ext': 'flv',
+            'title': 'Striptease From Sexy Secretary!',
+            'description': 'Striptease From Sexy Secretary!',
+            'categories': list,  # NSFW
+            'thumbnail': 're:https?://.*\.jpg$',
+            'age_limit': 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        video_url = self._html_search_regex(
+            r'\'file\'\s*:\s*"([^"]+)"', webpage, 'video_url')
+
+        title = self._html_search_regex(
+            r'<title>([^<]+)\s*-\s*PornoXO', webpage, 'title')
+
+        description = self._html_search_regex(
+            r'<meta name="description" content="([^"]+)\s*featuring',
+            webpage, 'description', fatal=False)
+
+        thumbnail = self._html_search_regex(
+            r'\'image\'\s*:\s*"([^"]+)"', webpage, 'thumbnail', fatal=False)
+
+        view_count = str_to_int(self._html_search_regex(
+            r'[vV]iews:\s*([0-9,]+)', webpage, 'view count', fatal=False))
+
+        categories_str = self._html_search_regex(
+            r'<meta name="description" content=".*featuring\s*([^"]+)"',
+            webpage, 'categories', fatal=False)
+        categories = (
+            None if categories_str is None
+            else categories_str.split(','))
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'categories': categories,
+            'view_count': view_count,
+            'age_limit': 18,
+        }
diff --git a/youtube_dl/extractor/promptfile.py b/youtube_dl/extractor/promptfile.py
new file mode 100644 (file)
index 0000000..7fcde08
--- /dev/null
@@ -0,0 +1,65 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    determine_ext,
+    compat_urllib_parse,
+    compat_urllib_request,
+)
+
+
+class PromptFileIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?promptfile\.com/l/(?P<id>[0-9A-Z\-]+)'
+    _TEST = {
+        'url': 'http://www.promptfile.com/l/D21B4746E9-F01462F0FF',
+        'md5': 'd1451b6302da7215485837aaea882c4c',
+        'info_dict': {
+            'id': 'D21B4746E9-F01462F0FF',
+            'ext': 'mp4',
+            'title': 'Birds.mp4',
+            'thumbnail': 're:^https?://.*\.jpg$',
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        if re.search(r'<div.+id="not_found_msg".+>(?!We are).+</div>[^-]', webpage) is not None:
+            raise ExtractorError('Video %s does not exist' % video_id,
+                                 expected=True)
+
+        fields = dict(re.findall(r'''(?x)type="hidden"\s+
+            name="(.+?)"\s+
+            value="(.*?)"
+            ''', webpage))
+        post = compat_urllib_parse.urlencode(fields)
+        req = compat_urllib_request.Request(url, post)
+        req.add_header('Content-type', 'application/x-www-form-urlencoded')
+        webpage = self._download_webpage(
+            req, video_id, 'Downloading video page')
+
+        url = self._html_search_regex(r'url:\s*\'([^\']+)\'', webpage, 'URL')
+        title = self._html_search_regex(
+            r'<span.+title="([^"]+)">', webpage, 'title')
+        thumbnail = self._html_search_regex(
+            r'<div id="player_overlay">.*button>.*?<img src="([^"]+)"',
+            webpage, 'thumbnail', fatal=False, flags=re.DOTALL)
+
+        formats = [{
+            'format_id': 'sd',
+            'url': url,
+            'ext': determine_ext(title),
+        }]
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'thumbnail': thumbnail,
+            'formats': formats,
+        }
index da64a1a7b4c0d8bceb89415894c84d651c7ac566..1262793c820f335dfe940eafb224495af168495e 100644 (file)
@@ -85,7 +85,7 @@ class ProSiebenSat1IE(InfoExtractor):
                 'ext': 'mp4',
                 'title': 'Im Interview: Kai Wiesinger',
                 'description': 'md5:e4e5370652ec63b95023e914190b4eb9',
-                'upload_date': '20140225',
+                'upload_date': '20140203',
                 'duration': 522.56,
             },
             'params': {
@@ -100,7 +100,7 @@ class ProSiebenSat1IE(InfoExtractor):
                 'ext': 'mp4',
                 'title': 'Jagd auf Fertigkost im Elsthal - Teil 2',
                 'description': 'md5:2669cde3febe9bce13904f701e774eb6',
-                'upload_date': '20140225',
+                'upload_date': '20141014',
                 'duration': 2410.44,
             },
             'params': {
@@ -144,8 +144,7 @@ class ProSiebenSat1IE(InfoExtractor):
                 'id': '2156342',
                 'ext': 'mp4',
                 'title': 'Kurztrips zum Valentinstag',
-                'description': 'md5:8ba6301e70351ae0bedf8da00f7ba528',
-                'upload_date': '20130206',
+                'description': 'Romantischer Kurztrip zum Valentinstag? Wir verraten, was sich hier wirklich lohnt.',
                 'duration': 307.24,
             },
             'params': {
@@ -153,12 +152,22 @@ class ProSiebenSat1IE(InfoExtractor):
                 'skip_download': True,
             },
         },
+        {
+            'url': 'http://www.prosieben.de/tv/joko-gegen-klaas/videos/playlists/episode-8-ganze-folge-playlist',
+            'info_dict': {
+                'id': '439664',
+                'title': 'Episode 8 - Ganze Folge - Playlist',
+                'description': 'md5:63b8963e71f481782aeea877658dec84',
+            },
+            'playlist_count': 2,
+        },
     ]
 
     _CLIPID_REGEXES = [
         r'"clip_id"\s*:\s+"(\d+)"',
         r'clipid: "(\d+)"',
         r'clip[iI]d=(\d+)',
+        r"'itemImageUrl'\s*:\s*'/dynamic/thumbnails/full/\d+/(\d+)",
     ]
     _TITLE_REGEXES = [
         r'<h2 class="subtitle" itemprop="name">\s*(.+?)</h2>',
@@ -179,14 +188,20 @@ class ProSiebenSat1IE(InfoExtractor):
         r'<span style="padding-left: 4px;line-height:20px; color:#404040">(\d{2}\.\d{2}\.\d{4})</span>',
         r'(\d{2}\.\d{2}\.\d{4}) \| \d{2}:\d{2} Min<br/>',
     ]
+    _PAGE_TYPE_REGEXES = [
+        r'<meta name="page_type" content="([^"]+)">',
+        r"'itemType'\s*:\s*'([^']*)'",
+    ]
+    _PLAYLIST_ID_REGEXES = [
+        r'content[iI]d=(\d+)',
+        r"'itemId'\s*:\s*'([^']*)'",
+    ]
+    _PLAYLIST_CLIP_REGEXES = [
+        r'(?s)data-qvt=.+?<a href="([^"]+)"',
+    ]
 
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
-        page = self._download_webpage(url, video_id, 'Downloading page')
-
-        clip_id = self._html_search_regex(self._CLIPID_REGEXES, page, 'clip id')
+    def _extract_clip(self, url, webpage):
+        clip_id = self._html_search_regex(self._CLIPID_REGEXES, webpage, 'clip id')
 
         access_token = 'testclient'
         client_name = 'kolibri-1.2.5'
@@ -235,12 +250,12 @@ class ProSiebenSat1IE(InfoExtractor):
 
         urls = self._download_json(url_api_url, clip_id, 'Downloading urls JSON')
 
-        title = self._html_search_regex(self._TITLE_REGEXES, page, 'title')
-        description = self._html_search_regex(self._DESCRIPTION_REGEXES, page, 'description', fatal=False)
-        thumbnail = self._og_search_thumbnail(page)
+        title = self._html_search_regex(self._TITLE_REGEXES, webpage, 'title')
+        description = self._html_search_regex(self._DESCRIPTION_REGEXES, webpage, 'description', fatal=False)
+        thumbnail = self._og_search_thumbnail(webpage)
 
         upload_date = unified_strdate(self._html_search_regex(
-            self._UPLOAD_DATE_REGEXES, page, 'upload date', fatal=False))
+            self._UPLOAD_DATE_REGEXES, webpage, 'upload date', default=None))
 
         formats = []
 
@@ -249,7 +264,7 @@ class ProSiebenSat1IE(InfoExtractor):
             urls_sources = urls_sources.values()
 
         def fix_bitrate(bitrate):
-            return bitrate / 1000 if bitrate % 1000 == 0 else bitrate
+            return (bitrate // 1000) if bitrate % 1000 == 0 else bitrate
 
         for source in urls_sources:
             protocol = source['protocol']
@@ -283,4 +298,32 @@ class ProSiebenSat1IE(InfoExtractor):
             'upload_date': upload_date,
             'duration': duration,
             'formats': formats,
-        }
\ No newline at end of file
+        }
+
+    def _extract_playlist(self, url, webpage):
+        playlist_id = self._html_search_regex(
+            self._PLAYLIST_ID_REGEXES, webpage, 'playlist id')
+        for regex in self._PLAYLIST_CLIP_REGEXES:
+            playlist_clips = re.findall(regex, webpage)
+            if playlist_clips:
+                title = self._html_search_regex(
+                    self._TITLE_REGEXES, webpage, 'title')
+                description = self._html_search_regex(
+                    self._DESCRIPTION_REGEXES, webpage, 'description', fatal=False)
+                entries = [
+                    self.url_result(
+                        re.match('(.+?//.+?)/', url).group(1) + clip_path,
+                        'ProSiebenSat1')
+                    for clip_path in playlist_clips]
+                return self.playlist_result(entries, playlist_id, title, description)
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+        page_type = self._search_regex(
+            self._PAGE_TYPE_REGEXES, webpage,
+            'page type', default='clip').lower()
+        if page_type == 'clip':
+            return self._extract_clip(url, webpage)
+        elif page_type == 'playlist':
+            return self._extract_playlist(url, webpage)
diff --git a/youtube_dl/extractor/quickvid.py b/youtube_dl/extractor/quickvid.py
new file mode 100644 (file)
index 0000000..3bc7806
--- /dev/null
@@ -0,0 +1,51 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urlparse,
+    determine_ext,
+    int_or_none,
+)
+
+
+class QuickVidIE(InfoExtractor):
+    _VALID_URL = r'https?://(www\.)?quickvid\.org/watch\.php\?v=(?P<id>[a-zA-Z_0-9-]+)'
+    _TEST = {
+        'url': 'http://quickvid.org/watch.php?v=sUQT3RCG8dx',
+        'md5': 'c0c72dd473f260c06c808a05d19acdc5',
+        'info_dict': {
+            'id': 'sUQT3RCG8dx',
+            'ext': 'mp4',
+            'title': 'Nick Offerman\'s Summer Reading Recap',
+            'thumbnail': 're:^https?://.*\.(?:png|jpg|gif)$',
+            'view_count': int,
+        },
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._html_search_regex(r'<h2>(.*?)</h2>', webpage, 'title')
+        view_count = int_or_none(self._html_search_regex(
+            r'(?s)<div id="views">(.*?)</div>',
+            webpage, 'view count', fatal=False))
+        video_code = self._search_regex(
+            r'(?s)<video id="video"[^>]*>(.*?)</video>', webpage, 'video code')
+        formats = [
+            {
+                'url': compat_urlparse.urljoin(url, src),
+                'format_id': determine_ext(src, None),
+            } for src in re.findall('<source\s+src="([^"]+)"', video_code)
+        ]
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'thumbnail': self._og_search_thumbnail(webpage),
+            'view_count': view_count,
+        }
diff --git a/youtube_dl/extractor/radiode.py b/youtube_dl/extractor/radiode.py
new file mode 100644 (file)
index 0000000..f95bc94
--- /dev/null
@@ -0,0 +1,55 @@
+from __future__ import unicode_literals
+
+import json
+
+from .common import InfoExtractor
+
+
+class RadioDeIE(InfoExtractor):
+    IE_NAME = 'radio.de'
+    _VALID_URL = r'https?://(?P<id>.+?)\.(?:radio\.(?:de|at|fr|pt|es|pl|it)|rad\.io)'
+    _TEST = {
+        'url': 'http://ndr2.radio.de/',
+        'md5': '3b4cdd011bc59174596b6145cda474a4',
+        'info_dict': {
+            'id': 'ndr2',
+            'ext': 'mp3',
+            'title': 're:^NDR 2 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
+            'description': 'md5:591c49c702db1a33751625ebfb67f273',
+            'thumbnail': 're:^https?://.*\.png',
+        },
+        'params': {
+            'skip_download': True,
+        }
+    }
+
+    def _real_extract(self, url):
+        radio_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, radio_id)
+
+        broadcast = json.loads(self._search_regex(
+            r'_getBroadcast\s*=\s*function\(\s*\)\s*{\s*return\s+({.+?})\s*;\s*}',
+            webpage, 'broadcast'))
+
+        title = self._live_title(broadcast['name'])
+        description = broadcast.get('description') or broadcast.get('shortDescription')
+        thumbnail = broadcast.get('picture4Url') or broadcast.get('picture4TransUrl')
+
+        formats = [{
+            'url': stream['streamUrl'],
+            'ext': stream['streamContentFormat'].lower(),
+            'acodec': stream['streamContentFormat'],
+            'abr': stream['bitRate'],
+            'asr': stream['sampleRate']
+        } for stream in broadcast['streamUrls']]
+        self._sort_formats(formats)
+
+        return {
+            'id': radio_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'is_live': True,
+            'formats': formats,
+        }
index ba3dd707f8b5d38363dacce62e9fe684265c6a96..2d39ecfe4faa537cb86f156df930dc2b19241a0b 100644 (file)
@@ -119,4 +119,4 @@ class RaiIE(SubtitlesInfoExtractor):
             if captions.endswith(STL_EXT):
                 captions = captions[:-len(STL_EXT)] + SRT_EXT
             subtitles['it'] = 'http://www.rai.tv%s' % compat_urllib_parse.quote(captions)
-        return subtitles
\ No newline at end of file
+        return subtitles
index 2c53ed2e1147a50248a4294c838ceef67688a356..0f8f3ebde0999e8599eaa86516dd2b52524c9b40 100644 (file)
@@ -33,7 +33,7 @@ class RBMARadioIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
 
         json_data = self._search_regex(r'window\.gon.*?gon\.show=(.+?);$',
-            webpage, 'json data', flags=re.MULTILINE)
+                                       webpage, 'json data', flags=re.MULTILINE)
 
         try:
             data = json.loads(json_data)
index d1e12dd8d5a6699ba3caa8d041c8bc039e996fc7..846b76c81528431c0faf8ea3fc9bbd6b017db099 100644 (file)
@@ -1,7 +1,5 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 
 
@@ -9,32 +7,23 @@ class RedTubeIE(InfoExtractor):
     _VALID_URL = r'http://(?:www\.)?redtube\.com/(?P<id>[0-9]+)'
     _TEST = {
         'url': 'http://www.redtube.com/66418',
-        'file': '66418.mp4',
-        # md5 varies from time to time, as in
-        # https://travis-ci.org/rg3/youtube-dl/jobs/14052463#L295
-        #'md5': u'7b8c22b5e7098a3e1c09709df1126d2d',
         'info_dict': {
+            'id': '66418',
+            'ext': 'mp4',
             "title": "Sucked on a toilet",
             "age_limit": 18,
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-
-        video_id = mobj.group('id')
-        video_extension = 'mp4'
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
-        self.report_extraction(video_id)
-
         video_url = self._html_search_regex(
-            r'<source src="(.+?)" type="video/mp4">', webpage, u'video URL')
-
+            r'<source src="(.+?)" type="video/mp4">', webpage, 'video URL')
         video_title = self._html_search_regex(
             r'<h1 class="videoTitle[^"]*">(.+?)</h1>',
-            webpage, u'title')
-
+            webpage, 'title')
         video_thumbnail = self._og_search_thumbnail(webpage)
 
         # No self-labeling, but they describe themselves as
@@ -44,7 +33,7 @@ class RedTubeIE(InfoExtractor):
         return {
             'id': video_id,
             'url': video_url,
-            'ext': video_extension,
+            'ext': 'mp4',
             'title': video_title,
             'thumbnail': video_thumbnail,
             'age_limit': age_limit,
index 49cf427a1141ee6f8fd14aca490e088dedbe3d73..ec7e7df7bc1f7a6b8ffdb4fc46b24a9bf8cb5148 100644 (file)
@@ -1,23 +1,23 @@
 from __future__ import unicode_literals
 
 import re
-import time
 
 from .common import InfoExtractor
-from ..utils import strip_jsonp
+from ..utils import str_or_none
 
 
 class ReverbNationIE(InfoExtractor):
     _VALID_URL = r'^https?://(?:www\.)?reverbnation\.com/.*?/song/(?P<id>\d+).*?$'
     _TESTS = [{
         'url': 'http://www.reverbnation.com/alkilados/song/16965047-mona-lisa',
-        'file': '16965047.mp3',
         'md5': '3da12ebca28c67c111a7f8b262d3f7a7',
         'info_dict': {
+            "id": "16965047",
+            "ext": "mp3",
             "title": "MONA LISA",
             "uploader": "ALKILADOS",
-            "uploader_id": 216429,
-            "thumbnail": "//gp1.wac.edgecastcdn.net/802892/production_public/Photo/13761700/image/1366002176_AVATAR_MONA_LISA.jpg"
+            "uploader_id": "216429",
+            "thumbnail": "re:^https://gp1\.wac\.edgecastcdn\.net/.*?\.jpg$"
         },
     }]
 
@@ -26,10 +26,8 @@ class ReverbNationIE(InfoExtractor):
         song_id = mobj.group('id')
 
         api_res = self._download_json(
-            'https://api.reverbnation.com/song/%s?callback=api_response_5&_=%d'
-                % (song_id, int(time.time() * 1000)),
+            'https://api.reverbnation.com/song/%s' % song_id,
             song_id,
-            transform_source=strip_jsonp,
             note='Downloading information of song %s' % song_id
         )
 
@@ -38,8 +36,9 @@ class ReverbNationIE(InfoExtractor):
             'title': api_res.get('name'),
             'url': api_res.get('url'),
             'uploader': api_res.get('artist', {}).get('name'),
-            'uploader_id': api_res.get('artist', {}).get('id'),
-            'thumbnail': api_res.get('image', api_res.get('thumbnail')),
+            'uploader_id': str_or_none(api_res.get('artist', {}).get('id')),
+            'thumbnail': self._proto_relative_url(
+                api_res.get('image', api_res.get('thumbnail'))),
             'ext': 'mp3',
             'vcodec': 'none',
         }
index 9fbdb9fcbbbc8fc5ef1847a2df8e8f7f929d3b75..59dc137cc225889feb9428dd70f42a91451a951d 100644 (file)
@@ -41,4 +41,3 @@ class RingTVIE(InfoExtractor):
             'thumbnail': thumbnail_url,
             'description': description,
         }
-
index a6ad594659250254b6cee9efd31cdcb8364da89b..962b524e94d2bddd12781b8dace06a1b28bc2c71 100644 (file)
@@ -1,43 +1,43 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
-from ..utils import (
-    clean_html,
-    compat_parse_qs,
-)
+from ..compat import compat_urllib_parse_unquote
 
 
 class Ro220IE(InfoExtractor):
     IE_NAME = '220.ro'
-    _VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<video_id>[^/]+)'
+    _VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<id>[^/]+)'
     _TEST = {
-        "url": "http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/",
-        'file': 'LYV6doKo7f.mp4',
+        'url': 'http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/',
         'md5': '03af18b73a07b4088753930db7a34add',
         'info_dict': {
-            "title": "Luati-le Banii sez 4 ep 1",
-            "description": "re:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$",
+            'id': 'LYV6doKo7f',
+            'ext': 'mp4',
+            'title': 'Luati-le Banii sez 4 ep 1',
+            'description': 're:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$',
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('video_id')
+        video_id = self._match_id(url)
 
         webpage = self._download_webpage(url, video_id)
-        flashVars_str = self._search_regex(
-            r'<param name="flashVars" value="([^"]+)"',
-            webpage, 'flashVars')
-        flashVars = compat_parse_qs(flashVars_str)
+        url = compat_urllib_parse_unquote(self._search_regex(
+            r'(?s)clip\s*:\s*{.*?url\s*:\s*\'([^\']+)\'', webpage, 'url'))
+        title = self._og_search_title(webpage)
+        description = self._og_search_description(webpage)
+        thumbnail = self._og_search_thumbnail(webpage)
+
+        formats = [{
+            'format_id': 'sd',
+            'url': url,
+            'ext': 'mp4',
+        }]
 
         return {
-            '_type': 'video',
             'id': video_id,
-            'ext': 'mp4',
-            'url': flashVars['videoURL'][0],
-            'title': flashVars['title'][0],
-            'description': clean_html(flashVars['desc'][0]),
-            'thumbnail': flashVars['preview'][0],
+            'formats': formats,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
         }
diff --git a/youtube_dl/extractor/rtlnl.py b/youtube_dl/extractor/rtlnl.py
new file mode 100644 (file)
index 0000000..d029b0e
--- /dev/null
@@ -0,0 +1,71 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import parse_duration
+
+
+class RtlXlIE(InfoExtractor):
+    IE_NAME = 'rtlxl.nl'
+    _VALID_URL = r'https?://www\.rtlxl\.nl/#!/[^/]+/(?P<uuid>[^/?]+)'
+
+    _TEST = {
+        'url': 'http://www.rtlxl.nl/#!/rtl-nieuws-132237/6e4203a6-0a5e-3596-8424-c599a59e0677',
+        'md5': 'cc16baa36a6c169391f0764fa6b16654',
+        'info_dict': {
+            'id': '6e4203a6-0a5e-3596-8424-c599a59e0677',
+            'ext': 'mp4',
+            'title': 'RTL Nieuws - Laat',
+            'description': 'md5:6b61f66510c8889923b11f2778c72dc5',
+            'timestamp': 1408051800,
+            'upload_date': '20140814',
+            'duration': 576.880,
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        uuid = mobj.group('uuid')
+
+        info = self._download_json(
+            'http://www.rtl.nl/system/s4m/vfd/version=2/uuid=%s/fmt=flash/' % uuid,
+            uuid)
+
+        material = info['material'][0]
+        episode_info = info['episodes'][0]
+
+        progname = info['abstracts'][0]['name']
+        subtitle = material['title'] or info['episodes'][0]['name']
+
+        # Use unencrypted m3u8 streams (See https://github.com/rg3/youtube-dl/issues/4118)
+        videopath = material['videopath'].replace('.f4m', '.m3u8')
+        m3u8_url = 'http://manifest.us.rtl.nl' + videopath
+
+        formats = self._extract_m3u8_formats(m3u8_url, uuid, ext='mp4')
+
+        video_urlpart = videopath.split('/flash/')[1][:-5]
+        PG_URL_TEMPLATE = 'http://pg.us.rtl.nl/rtlxl/network/%s/progressive/%s.mp4'
+
+        formats.extend([
+            {
+                'url': PG_URL_TEMPLATE % ('a2m', video_urlpart),
+                'format_id': 'pg-sd',
+            },
+            {
+                'url': PG_URL_TEMPLATE % ('a3m', video_urlpart),
+                'format_id': 'pg-hd',
+                'quality': 0,
+            }
+        ])
+
+        self._sort_formats(formats)
+
+        return {
+            'id': uuid,
+            'title': '%s - %s' % (progname, subtitle),
+            'formats': formats,
+            'timestamp': material['original_date'],
+            'description': episode_info['synopsis'],
+            'duration': parse_duration(material.get('duration')),
+        }
index a45884b251fa355e04c65de554f0e9cbfb5406bb..285c3c4bebf8ec7c2cd793d2b40739222c18a5ca 100644 (file)
@@ -81,7 +81,7 @@ class RTLnowIE(InfoExtractor):
                 'id': '99205',
                 'ext': 'flv',
                 'title': 'Medicopter 117 - Angst!',
-                'description': 'md5:895b1df01639b5f61a04fc305a5cb94d',
+                'description': 're:^Im Therapiezentrum \'Sonnalm\' kommen durch eine Unachtsamkeit die für die B.handlung mit Phobikern gehaltenen Voglespinnen frei\. Eine Ausreißerin',
                 'thumbnail': 'http://autoimg.static-fra.de/superrtlnow/287529/1500x1500/image2.jpg',
                 'upload_date': '20080928',
                 'duration': 2691,
@@ -122,7 +122,7 @@ class RTLnowIE(InfoExtractor):
         playerdata = self._download_xml(playerdata_url, video_id, 'Downloading player data XML')
 
         videoinfo = playerdata.find('./playlist/videoinfo')
-        
+
         formats = []
         for filename in videoinfo.findall('filename'):
             mobj = re.search(r'(?P<url>rtmpe://(?:[^/]+/){2})(?P<play_path>.+)', filename.text)
@@ -153,4 +153,4 @@ class RTLnowIE(InfoExtractor):
             'upload_date': upload_date,
             'duration': duration,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index e8199b11446e503897500e0e423b3cbd5d52b441..dc59a5e5c0da8c6e2001b411a03451604ca190f3 100644 (file)
@@ -15,7 +15,7 @@ from ..utils import (
 
 class RTSIE(InfoExtractor):
     IE_DESC = 'RTS.ch'
-    _VALID_URL = r'^https?://(?:www\.)?rts\.ch/(?:[^/]+/){2,}(?P<id>[0-9]+)-.*?\.html'
+    _VALID_URL = r'https?://(?:www\.)?rts\.ch/(?:(?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html|play/tv/[^/]+/video/(?P<display_id_new>.+?)\?id=(?P<id_new>[0-9]+))'
 
     _TESTS = [
         {
@@ -23,6 +23,7 @@ class RTSIE(InfoExtractor):
             'md5': '753b877968ad8afaeddccc374d4256a5',
             'info_dict': {
                 'id': '3449373',
+                'display_id': 'les-enfants-terribles',
                 'ext': 'mp4',
                 'duration': 1488,
                 'title': 'Les Enfants Terribles',
@@ -30,7 +31,8 @@ class RTSIE(InfoExtractor):
                 'uploader': 'Divers',
                 'upload_date': '19680921',
                 'timestamp': -40280400,
-                'thumbnail': 're:^https?://.*\.image'
+                'thumbnail': 're:^https?://.*\.image',
+                'view_count': int,
             },
         },
         {
@@ -38,6 +40,7 @@ class RTSIE(InfoExtractor):
             'md5': 'c148457a27bdc9e5b1ffe081a7a8337b',
             'info_dict': {
                 'id': '5624067',
+                'display_id': 'entre-ciel-et-mer',
                 'ext': 'mp4',
                 'duration': 3720,
                 'title': 'Les yeux dans les cieux - Mon homard au Canada',
@@ -45,7 +48,8 @@ class RTSIE(InfoExtractor):
                 'uploader': 'Passe-moi les jumelles',
                 'upload_date': '20140404',
                 'timestamp': 1396635300,
-                'thumbnail': 're:^https?://.*\.image'
+                'thumbnail': 're:^https?://.*\.image',
+                'view_count': int,
             },
         },
         {
@@ -53,6 +57,7 @@ class RTSIE(InfoExtractor):
             'md5': 'b4326fecd3eb64a458ba73c73e91299d',
             'info_dict': {
                 'id': '5745975',
+                'display_id': '1-2-kloten-fribourg-5-2-second-but-pour-gotteron-par-kwiatowski',
                 'ext': 'mp4',
                 'duration': 48,
                 'title': '1/2, Kloten - Fribourg (5-2): second but pour Gottéron par Kwiatowski',
@@ -60,7 +65,8 @@ class RTSIE(InfoExtractor):
                 'uploader': 'Hockey',
                 'upload_date': '20140403',
                 'timestamp': 1396556882,
-                'thumbnail': 're:^https?://.*\.image'
+                'thumbnail': 're:^https?://.*\.image',
+                'view_count': int,
             },
             'skip': 'Blocked outside Switzerland',
         },
@@ -69,6 +75,7 @@ class RTSIE(InfoExtractor):
             'md5': '9bb06503773c07ce83d3cbd793cebb91',
             'info_dict': {
                 'id': '5745356',
+                'display_id': 'londres-cachee-par-un-epais-smog',
                 'ext': 'mp4',
                 'duration': 33,
                 'title': 'Londres cachée par un épais smog',
@@ -76,7 +83,8 @@ class RTSIE(InfoExtractor):
                 'uploader': 'Le Journal en continu',
                 'upload_date': '20140403',
                 'timestamp': 1396537322,
-                'thumbnail': 're:^https?://.*\.image'
+                'thumbnail': 're:^https?://.*\.image',
+                'view_count': int,
             },
         },
         {
@@ -84,6 +92,7 @@ class RTSIE(InfoExtractor):
             'md5': 'dd8ef6a22dff163d063e2a52bc8adcae',
             'info_dict': {
                 'id': '5706148',
+                'display_id': 'urban-hippie-de-damien-krisl-03-04-2014',
                 'ext': 'mp3',
                 'duration': 123,
                 'title': '"Urban Hippie", de Damien Krisl',
@@ -92,22 +101,44 @@ class RTSIE(InfoExtractor):
                 'timestamp': 1396551600,
             },
         },
+        {
+            'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260',
+            'md5': '968777c8779e5aa2434be96c54e19743',
+            'info_dict': {
+                'id': '6348260',
+                'display_id': 'le-19h30',
+                'ext': 'mp4',
+                'duration': 1796,
+                'title': 'Le 19h30',
+                'description': '',
+                'uploader': 'Le 19h30',
+                'upload_date': '20141201',
+                'timestamp': 1417458600,
+                'thumbnail': 're:^https?://.*\.image',
+                'view_count': int,
+            },
+        },
+        {
+            'url': 'http://www.rts.ch/play/tv/le-19h30/video/le-chantier-du-nouveau-parlement-vaudois-a-permis-une-trouvaille-historique?id=6348280',
+            'only_matching': True,
+        }
     ]
 
     def _real_extract(self, url):
         m = re.match(self._VALID_URL, url)
-        video_id = m.group('id')
+        video_id = m.group('id') or m.group('id_new')
+        display_id = m.group('display_id') or m.group('display_id_new')
 
         def download_json(internal_id):
             return self._download_json(
                 'http://www.rts.ch/a/%s.html?f=json/article' % internal_id,
-                video_id)
+                display_id)
 
         all_info = download_json(video_id)
 
         # video_id extracted out of URL is not always a real id
         if 'video' not in all_info and 'audio' not in all_info:
-            page = self._download_webpage(url, video_id)
+            page = self._download_webpage(url, display_id)
             internal_id = self._html_search_regex(
                 r'<(?:video|audio) data-id="([0-9]+)"', page,
                 'internal video id')
@@ -143,6 +174,7 @@ class RTSIE(InfoExtractor):
 
         return {
             'id': video_id,
+            'display_id': display_id,
             'formats': formats,
             'title': info['title'],
             'description': info.get('intro'),
index c2228b2f0f6a1fc9bba02cddcb5a1740cc85038d..0ce22d60c7fa995980e3f70159583f156672d76b 100644 (file)
@@ -1,21 +1,65 @@
 # encoding: utf-8
 from __future__ import unicode_literals
 
-import re
 import base64
+import re
+import time
 
 from .common import InfoExtractor
 from ..utils import (
     struct_unpack,
+    remove_end,
 )
 
 
+def _decrypt_url(png):
+    encrypted_data = base64.b64decode(png)
+    text_index = encrypted_data.find(b'tEXt')
+    text_chunk = encrypted_data[text_index - 4:]
+    length = struct_unpack('!I', text_chunk[:4])[0]
+    # Use bytearray to get integers when iterating in both python 2.x and 3.x
+    data = bytearray(text_chunk[8:8 + length])
+    data = [chr(b) for b in data if b != 0]
+    hash_index = data.index('#')
+    alphabet_data = data[:hash_index]
+    url_data = data[hash_index + 1:]
+
+    alphabet = []
+    e = 0
+    d = 0
+    for l in alphabet_data:
+        if d == 0:
+            alphabet.append(l)
+            d = e = (e + 1) % 4
+        else:
+            d -= 1
+    url = ''
+    f = 0
+    e = 3
+    b = 1
+    for letter in url_data:
+        if f == 0:
+            l = int(letter) * 10
+            f = 1
+        else:
+            if e == 0:
+                l += int(letter)
+                url += alphabet[l]
+                e = (b + 3) % 4
+                f = 0
+                b += 1
+            else:
+                e -= 1
+
+    return url
+
+
 class RTVEALaCartaIE(InfoExtractor):
     IE_NAME = 'rtve.es:alacarta'
     IE_DESC = 'RTVE a la carta'
     _VALID_URL = r'http://www\.rtve\.es/alacarta/videos/[^/]+/[^/]+/(?P<id>\d+)'
 
-    _TEST = {
+    _TESTS = [{
         'url': 'http://www.rtve.es/alacarta/videos/balonmano/o-swiss-cup-masculina-final-espana-suecia/2491869/',
         'md5': '1d49b7e1ca7a7502c56a4bf1b60f1b43',
         'info_dict': {
@@ -23,48 +67,15 @@ class RTVEALaCartaIE(InfoExtractor):
             'ext': 'mp4',
             'title': 'Balonmano - Swiss Cup masculina. Final: España-Suecia',
         },
-    }
-
-    def _decrypt_url(self, png):
-        encrypted_data = base64.b64decode(png)
-        text_index = encrypted_data.find(b'tEXt')
-        text_chunk = encrypted_data[text_index-4:]
-        length = struct_unpack('!I', text_chunk[:4])[0]
-        # Use bytearray to get integers when iterating in both python 2.x and 3.x
-        data = bytearray(text_chunk[8:8+length])
-        data = [chr(b) for b in data if b != 0]
-        hash_index = data.index('#')
-        alphabet_data = data[:hash_index]
-        url_data = data[hash_index+1:]
-
-        alphabet = []
-        e = 0
-        d = 0
-        for l in alphabet_data:
-            if d == 0:
-                alphabet.append(l)
-                d = e = (e + 1) % 4
-            else:
-                d -= 1
-        url = ''
-        f = 0
-        e = 3
-        b = 1
-        for letter in url_data:
-            if f == 0:
-                l = int(letter)*10
-                f = 1
-            else:
-                if e == 0:
-                    l += int(letter)
-                    url += alphabet[l]
-                    e = (b + 3) % 4
-                    f = 0
-                    b += 1
-                else:
-                    e -= 1
-
-        return url
+    }, {
+        'note': 'Live stream',
+        'url': 'http://www.rtve.es/alacarta/videos/television/24h-live/1694255/',
+        'info_dict': {
+            'id': '1694255',
+            'ext': 'flv',
+            'title': 'TODO',
+        }
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -74,11 +85,57 @@ class RTVEALaCartaIE(InfoExtractor):
             video_id)['page']['items'][0]
         png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/default/videos/%s.png' % video_id
         png = self._download_webpage(png_url, video_id, 'Downloading url information')
-        video_url = self._decrypt_url(png)
+        video_url = _decrypt_url(png)
 
         return {
             'id': video_id,
             'title': info['title'],
             'url': video_url,
-            'thumbnail': info['image'],
+            'thumbnail': info.get('image'),
+            'page_url': url,
+        }
+
+
+class RTVELiveIE(InfoExtractor):
+    IE_NAME = 'rtve.es:live'
+    IE_DESC = 'RTVE.es live streams'
+    _VALID_URL = r'http://www\.rtve\.es/(?:deportes/directo|noticias|television)/(?P<id>[a-zA-Z0-9-]+)'
+
+    _TESTS = [{
+        'url': 'http://www.rtve.es/noticias/directo-la-1/',
+        'info_dict': {
+            'id': 'directo-la-1',
+            'ext': 'flv',
+            'title': 're:^La 1 de TVE [0-9]{4}-[0-9]{2}-[0-9]{2}Z[0-9]{6}$',
+        },
+        'params': {
+            'skip_download': 'live stream',
+        }
+    }]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        start_time = time.gmtime()
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+        player_url = self._search_regex(
+            r'<param name="movie" value="([^"]+)"/>', webpage, 'player URL')
+        title = remove_end(self._og_search_title(webpage), ' en directo')
+        title += ' ' + time.strftime('%Y-%m-%dZ%H%M%S', start_time)
+
+        vidplayer_id = self._search_regex(
+            r' id="vidplayer([0-9]+)"', webpage, 'internal video ID')
+        png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/default/videos/%s.png' % vidplayer_id
+        png = self._download_webpage(png_url, video_id, 'Downloading url information')
+        video_url = _decrypt_url(png)
+
+        return {
+            'id': video_id,
+            'ext': 'flv',
+            'title': title,
+            'url': video_url,
+            'app': 'rtve-live-live?ovpfv=2.1.2',
+            'player_url': player_url,
+            'rtmp_live': True,
         }
index 55b58e5e6c09af2d211edaba256ad2ebce2a958d..0e470e73f538fd60d7ed34cbe515042f6abc078b 100644 (file)
@@ -1,8 +1,6 @@
 # -*- coding: utf-8 -*-
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 
 
@@ -21,19 +19,20 @@ class RUHDIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         video_url = self._html_search_regex(
             r'<param name="src" value="([^"]+)"', webpage, 'video url')
         title = self._html_search_regex(
-            r'<title>([^<]+)&nbsp;&nbsp; RUHD.ru - Видео Высокого качества №1 в России!</title>', webpage, 'title')
+            r'<title>([^<]+)&nbsp;&nbsp; RUHD.ru - Видео Высокого качества №1 в России!</title>',
+            webpage, 'title')
         description = self._html_search_regex(
-            r'(?s)<div id="longdesc">(.+?)<span id="showlink">', webpage, 'description', fatal=False)
+            r'(?s)<div id="longdesc">(.+?)<span id="showlink">',
+            webpage, 'description', fatal=False)
         thumbnail = self._html_search_regex(
-            r'<param name="previewImage" value="([^"]+)"', webpage, 'thumbnail', fatal=False)
+            r'<param name="previewImage" value="([^"]+)"',
+            webpage, 'thumbnail', fatal=False)
         if thumbnail:
             thumbnail = 'http://www.ruhd.ru' + thumbnail
 
index 357edbbdaf88c6c29395aa7878c18f305c79b216..6941d96fb52d23246656fbdd8afb892c32fcc7dd 100644 (file)
@@ -53,6 +53,7 @@ class RutubeIE(InfoExtractor):
         m3u8_url = options['video_balancer'].get('m3u8')
         if m3u8_url is None:
             raise ExtractorError('Couldn\'t find m3u8 manifest url')
+        formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4')
 
         return {
             'id': video['id'],
@@ -60,8 +61,7 @@ class RutubeIE(InfoExtractor):
             'description': video['description'],
             'duration': video['duration'],
             'view_count': video['hits'],
-            'url': m3u8_url,
-            'ext': 'mp4',
+            'formats': formats,
             'thumbnail': video['thumbnail_url'],
             'uploader': author.get('name'),
             'uploader_id': compat_str(author['id']) if author else None,
@@ -74,6 +74,13 @@ class RutubeChannelIE(InfoExtractor):
     IE_NAME = 'rutube:channel'
     IE_DESC = 'Rutube channels'
     _VALID_URL = r'http://rutube\.ru/tags/video/(?P<id>\d+)'
+    _TESTS = [{
+        'url': 'http://rutube.ru/tags/video/1800/',
+        'info_dict': {
+            'id': '1800',
+        },
+        'playlist_mincount': 68,
+    }]
 
     _PAGE_TEMPLATE = 'http://rutube.ru/api/tags/video/%s/?page=%s&format=json'
 
@@ -101,6 +108,7 @@ class RutubeMovieIE(RutubeChannelIE):
     IE_NAME = 'rutube:movie'
     IE_DESC = 'Rutube movies'
     _VALID_URL = r'http://rutube\.ru/metainfo/tv/(?P<id>\d+)'
+    _TESTS = []
 
     _MOVIE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/?format=json'
     _PAGE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/video?page=%s&format=json'
@@ -119,5 +127,12 @@ class RutubePersonIE(RutubeChannelIE):
     IE_NAME = 'rutube:person'
     IE_DESC = 'Rutube person videos'
     _VALID_URL = r'http://rutube\.ru/video/person/(?P<id>\d+)'
+    _TESTS = [{
+        'url': 'http://rutube.ru/video/person/313878/',
+        'info_dict': {
+            'id': '313878',
+        },
+        'playlist_mincount': 37,
+    }]
 
     _PAGE_TEMPLATE = 'http://rutube.ru/api/video/person/%s/?page=%s&format=json'
index 6c5f5a6804722bc43451f595dcdff56c3a7377b6..a73e6f331fc02a8977863a412227681b3838b91a 100644 (file)
@@ -100,7 +100,7 @@ class RUTVIE(InfoExtractor):
             return mobj.group('url')
 
         mobj = re.search(
-            r'<meta[^>]+?property=(["\'])og:video\1[^>]+?content=(["\'])(?P<url>http://player\.(?:rutv\.ru|vgtrk\.com)/flash2v/container\.swf\?id=.+?\2)',
+            r'<meta[^>]+?property=(["\'])og:video\1[^>]+?content=(["\'])(?P<url>https?://player\.(?:rutv\.ru|vgtrk\.com)/flash2v/container\.swf\?id=.+?\2)',
             webpage)
         if mobj:
             return mobj.group('url')
@@ -191,4 +191,4 @@ class RUTVIE(InfoExtractor):
             'view_count': view_count,
             'duration': duration,
             'formats': formats,
-        }
\ No newline at end of file
+        }
diff --git a/youtube_dl/extractor/sbs.py b/youtube_dl/extractor/sbs.py
new file mode 100644 (file)
index 0000000..b8775c2
--- /dev/null
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import json
+import re
+from .common import InfoExtractor
+from ..utils import (
+    js_to_json,
+    remove_end,
+)
+
+
+class SBSIE(InfoExtractor):
+    IE_DESC = 'sbs.com.au'
+    _VALID_URL = r'https?://(?:www\.)?sbs\.com\.au/ondemand/video/(?:single/)?(?P<id>[0-9]+)'
+
+    _TESTS = [{
+        # Original URL is handled by the generic IE which finds the iframe:
+        # http://www.sbs.com.au/thefeed/blog/2014/08/21/dingo-conservation
+        'url': 'http://www.sbs.com.au/ondemand/video/single/320403011771/?source=drupal&vertical=thefeed',
+        'md5': '3150cf278965eeabb5b4cea1c963fe0a',
+        'info_dict': {
+            'id': '320403011771',
+            'ext': 'mp4',
+            'title': 'Dingo Conservation',
+            'description': 'Dingoes are on the brink of extinction; most of the animals we think are dingoes are in fact crossbred with wild dogs. This family run a dingo conservation park to prevent their extinction',
+            'thumbnail': 're:http://.*\.jpg',
+        },
+        'add_ies': ['generic'],
+    }, {
+        'url': 'http://www.sbs.com.au/ondemand/video/320403011771/Dingo-Conservation-The-Feed',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        webpage = self._download_webpage(url, video_id)
+
+        release_urls_json = js_to_json(self._search_regex(
+            r'(?s)playerParams\.releaseUrls\s*=\s*(\{.*?\n\});\n',
+            webpage, ''))
+        release_urls = json.loads(release_urls_json)
+        theplatform_url = (
+            release_urls.get('progressive') or release_urls.get('standard'))
+
+        title = remove_end(self._og_search_title(webpage), ' (The Feed)')
+        description = self._html_search_meta('description', webpage)
+        thumbnail = self._og_search_thumbnail(webpage)
+
+        return {
+            '_type': 'url_transparent',
+            'id': video_id,
+            'url': theplatform_url,
+
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+        }
index 55a481cc0ed41fa0bef33f3af1bce941fa7659a1..3bf93c870b2bc30c3baf9567a64d06171558f06b 100644 (file)
@@ -53,4 +53,4 @@ class SciVeeIE(InfoExtractor):
             'description': description,
             'thumbnail': 'http://www.scivee.tv/assets/videothumb/%s' % video_id,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 306869e6af109cf6971a72df14f47fe99a7b6885..c145f6fc72f1b9eed8a5089dce48dfdd5f106a79 100644 (file)
@@ -96,7 +96,7 @@ class ScreencastIE(InfoExtractor):
         if title is None:
             title = self._html_search_regex(
                 [r'<b>Title:</b> ([^<]*)</div>',
-                r'class="tabSeperator">></span><span class="tabText">(.*?)<'],
+                 r'class="tabSeperator">></span><span class="tabText">(.*?)<'],
                 webpage, 'title')
         thumbnail = self._og_search_thumbnail(webpage)
         description = self._og_search_description(webpage, default=None)
diff --git a/youtube_dl/extractor/screenwavemedia.py b/youtube_dl/extractor/screenwavemedia.py
new file mode 100644 (file)
index 0000000..6c9fdb7
--- /dev/null
@@ -0,0 +1,178 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    int_or_none,
+    unified_strdate,
+)
+
+
+class ScreenwaveMediaIE(InfoExtractor):
+    _VALID_URL = r'http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?[^"]*\bid=(?P<id>.+)'
+
+    _TESTS = [{
+        'url': 'http://player.screenwavemedia.com/play/play.php?playerdiv=videoarea&companiondiv=squareAd&id=Cinemassacre-19911',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        playerdata = self._download_webpage(url, video_id, 'Downloading player webpage')
+
+        vidtitle = self._search_regex(
+            r'\'vidtitle\'\s*:\s*"([^"]+)"', playerdata, 'vidtitle').replace('\\/', '/')
+        vidurl = self._search_regex(
+            r'\'vidurl\'\s*:\s*"([^"]+)"', playerdata, 'vidurl').replace('\\/', '/')
+
+        videolist_url = None
+
+        mobj = re.search(r"'videoserver'\s*:\s*'(?P<videoserver>[^']+)'", playerdata)
+        if mobj:
+            videoserver = mobj.group('videoserver')
+            mobj = re.search(r'\'vidid\'\s*:\s*"(?P<vidid>[^\']+)"', playerdata)
+            vidid = mobj.group('vidid') if mobj else video_id
+            videolist_url = 'http://%s/vod/smil:%s.smil/jwplayer.smil' % (videoserver, vidid)
+        else:
+            mobj = re.search(r"file\s*:\s*'(?P<smil>http.+?/jwplayer\.smil)'", playerdata)
+            if mobj:
+                videolist_url = mobj.group('smil')
+
+        if videolist_url:
+            videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
+            formats = []
+            baseurl = vidurl[:vidurl.rfind('/') + 1]
+            for video in videolist.findall('.//video'):
+                src = video.get('src')
+                if not src:
+                    continue
+                file_ = src.partition(':')[-1]
+                width = int_or_none(video.get('width'))
+                height = int_or_none(video.get('height'))
+                bitrate = int_or_none(video.get('system-bitrate'), scale=1000)
+                format = {
+                    'url': baseurl + file_,
+                    'format_id': src.rpartition('.')[0].rpartition('_')[-1],
+                }
+                if width or height:
+                    format.update({
+                        'tbr': bitrate,
+                        'width': width,
+                        'height': height,
+                    })
+                else:
+                    format.update({
+                        'abr': bitrate,
+                        'vcodec': 'none',
+                    })
+                formats.append(format)
+        else:
+            formats = [{
+                'url': vidurl,
+            }]
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': vidtitle,
+            'formats': formats,
+        }
+
+
+class CinemassacreIE(InfoExtractor):
+    _VALID_URL = 'https?://(?:www\.)?cinemassacre\.com/(?P<date_y>[0-9]{4})/(?P<date_m>[0-9]{2})/(?P<date_d>[0-9]{2})/(?P<display_id>[^?#/]+)'
+    _TESTS = [
+        {
+            'url': 'http://cinemassacre.com/2012/11/10/avgn-the-movie-trailer/',
+            'md5': 'fde81fbafaee331785f58cd6c0d46190',
+            'info_dict': {
+                'id': 'Cinemassacre-19911',
+                'ext': 'mp4',
+                'upload_date': '20121110',
+                'title': '“Angry Video Game Nerd: The Movie” – Trailer',
+                'description': 'md5:fb87405fcb42a331742a0dce2708560b',
+            },
+        },
+        {
+            'url': 'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
+            'md5': 'd72f10cd39eac4215048f62ab477a511',
+            'info_dict': {
+                'id': 'Cinemassacre-521be8ef82b16',
+                'ext': 'mp4',
+                'upload_date': '20131002',
+                'title': 'The Mummy’s Hand (1940)',
+            },
+        }
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        display_id = mobj.group('display_id')
+        video_date = mobj.group('date_y') + mobj.group('date_m') + mobj.group('date_d')
+
+        webpage = self._download_webpage(url, display_id)
+
+        playerdata_url = self._search_regex(
+            r'src="(http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?[^"]*\bid=.+?)"',
+            webpage, 'player data URL')
+        video_title = self._html_search_regex(
+            r'<title>(?P<title>.+?)\|', webpage, 'title')
+        video_description = self._html_search_regex(
+            r'<div class="entry-content">(?P<description>.+?)</div>',
+            webpage, 'description', flags=re.DOTALL, fatal=False)
+        video_thumbnail = self._og_search_thumbnail(webpage)
+
+        return {
+            '_type': 'url_transparent',
+            'display_id': display_id,
+            'title': video_title,
+            'description': video_description,
+            'upload_date': video_date,
+            'thumbnail': video_thumbnail,
+            'url': playerdata_url,
+        }
+
+
+class TeamFourIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?teamfourstar\.com/video/(?P<id>[a-z0-9\-]+)/?'
+    _TEST = {
+        'url': 'http://teamfourstar.com/video/a-moment-with-tfs-episode-4/',
+        'info_dict': {
+            'id': 'TeamFourStar-5292a02f20bfa',
+            'ext': 'mp4',
+            'upload_date': '20130401',
+            'description': 'Check out this and more on our website: http://teamfourstar.com\nTFS Store: http://sharkrobot.com/team-four-star\nFollow on Twitter: http://twitter.com/teamfourstar\nLike on FB: http://facebook.com/teamfourstar',
+            'title': 'A Moment With TFS Episode 4',
+        }
+    }
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url)
+        webpage = self._download_webpage(url, display_id)
+
+        playerdata_url = self._search_regex(
+            r'src="(http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?[^"]*\bid=.+?)"',
+            webpage, 'player data URL')
+
+        video_title = self._html_search_regex(
+            r'<div class="heroheadingtitle">(?P<title>.+?)</div>',
+            webpage, 'title')
+        video_date = unified_strdate(self._html_search_regex(
+            r'<div class="heroheadingdate">(?P<date>.+?)</div>',
+            webpage, 'date', fatal=False))
+        video_description = self._html_search_regex(
+            r'(?s)<div class="postcontent">(?P<description>.+?)</div>',
+            webpage, 'description', fatal=False)
+        video_thumbnail = self._og_search_thumbnail(webpage)
+
+        return {
+            '_type': 'url_transparent',
+            'display_id': display_id,
+            'title': video_title,
+            'description': video_description,
+            'upload_date': video_date,
+            'thumbnail': video_thumbnail,
+            'url': playerdata_url,
+        }
index 1dc551d5c7f274b717369952824050812c21fee3..16dc3736b48bfb15a94b98713beef4757446b642 100644 (file)
@@ -67,5 +67,3 @@ class ServingSysIE(InfoExtractor):
             'title': title,
             'entries': entries,
         }
-
\ No newline at end of file
diff --git a/youtube_dl/extractor/sexu.py b/youtube_dl/extractor/sexu.py
new file mode 100644 (file)
index 0000000..6365a87
--- /dev/null
@@ -0,0 +1,61 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class SexuIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?sexu\.com/(?P<id>\d+)'
+    _TEST = {
+        'url': 'http://sexu.com/961791/',
+        'md5': 'ff615aca9691053c94f8f10d96cd7884',
+        'info_dict': {
+            'id': '961791',
+            'ext': 'mp4',
+            'title': 'md5:4d05a19a5fc049a63dbbaf05fb71d91b',
+            'description': 'md5:c5ed8625eb386855d5a7967bd7b77a54',
+            'categories': list,  # NSFW
+            'thumbnail': 're:https?://.*\.jpg$',
+            'age_limit': 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        quality_arr = self._search_regex(
+            r'sources:\s*\[([^\]]+)\]', webpage, 'forrmat string')
+        formats = [{
+            'url': fmt[0].replace('\\', ''),
+            'format_id': fmt[1],
+            'height': int(fmt[1][:3]),
+        } for fmt in re.findall(r'"file":"([^"]+)","label":"([^"]+)"', quality_arr)]
+        self._sort_formats(formats)
+
+        title = self._html_search_regex(
+            r'<title>([^<]+)\s*-\s*Sexu\.Com</title>', webpage, 'title')
+
+        description = self._html_search_meta(
+            'description', webpage, 'description')
+
+        thumbnail = self._html_search_regex(
+            r'image:\s*"([^"]+)"',
+            webpage, 'thumbnail', fatal=False)
+
+        categories_str = self._html_search_meta(
+            'keywords', webpage, 'categories')
+        categories = (
+            None if categories_str is None
+            else categories_str.split(','))
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'categories': categories,
+            'formats': formats,
+            'age_limit': 18,
+        }
diff --git a/youtube_dl/extractor/sexykarma.py b/youtube_dl/extractor/sexykarma.py
new file mode 100644 (file)
index 0000000..c833fc8
--- /dev/null
@@ -0,0 +1,117 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    unified_strdate,
+    parse_duration,
+    int_or_none,
+)
+
+
+class SexyKarmaIE(InfoExtractor):
+    IE_DESC = 'Sexy Karma and Watch Indian Porn'
+    _VALID_URL = r'https?://(?:www\.)?(?:sexykarma\.com|watchindianporn\.net)/(?:[^/]+/)*video/(?P<display_id>[^/]+)-(?P<id>[a-zA-Z0-9]+)\.html'
+    _TESTS = [{
+        'url': 'http://www.sexykarma.com/gonewild/video/taking-a-quick-pee-yHI70cOyIHt.html',
+        'md5': 'b9798e7d1ef1765116a8f516c8091dbd',
+        'info_dict': {
+            'id': 'yHI70cOyIHt',
+            'display_id': 'taking-a-quick-pee',
+            'ext': 'mp4',
+            'title': 'Taking a quick pee.',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'uploader': 'wildginger7',
+            'upload_date': '20141007',
+            'duration': 22,
+            'view_count': int,
+            'comment_count': int,
+            'categories': list,
+        }
+    }, {
+        'url': 'http://www.sexykarma.com/gonewild/video/pot-pixie-tribute-8Id6EZPbuHf.html',
+        'md5': 'dd216c68d29b49b12842b9babe762a5d',
+        'info_dict': {
+            'id': '8Id6EZPbuHf',
+            'display_id': 'pot-pixie-tribute',
+            'ext': 'mp4',
+            'title': 'pot_pixie tribute',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'uploader': 'banffite',
+            'upload_date': '20141013',
+            'duration': 16,
+            'view_count': int,
+            'comment_count': int,
+            'categories': list,
+        }
+    }, {
+        'url': 'http://www.watchindianporn.net/video/desi-dancer-namrata-stripping-completely-nude-and-dancing-on-a-hot-number-dW2mtctxJfs.html',
+        'md5': '9afb80675550406ed9a63ac2819ef69d',
+        'info_dict': {
+            'id': 'dW2mtctxJfs',
+            'display_id': 'desi-dancer-namrata-stripping-completely-nude-and-dancing-on-a-hot-number',
+            'ext': 'mp4',
+            'title': 'Desi dancer namrata stripping completely nude and dancing on a hot number',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'uploader': 'Don',
+            'upload_date': '20140213',
+            'duration': 83,
+            'view_count': int,
+            'comment_count': int,
+            'categories': list,
+        }
+    }]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        display_id = mobj.group('display_id')
+
+        webpage = self._download_webpage(url, display_id)
+
+        video_url = self._html_search_regex(
+            r"url: escape\('([^']+)'\)", webpage, 'url')
+
+        title = self._html_search_regex(
+            r'<h2 class="he2"><span>(.*?)</span>',
+            webpage, 'title')
+        thumbnail = self._html_search_regex(
+            r'<span id="container"><img\s+src="([^"]+)"',
+            webpage, 'thumbnail', fatal=False)
+
+        uploader = self._html_search_regex(
+            r'class="aupa">\s*(.*?)</a>',
+            webpage, 'uploader')
+        upload_date = unified_strdate(self._html_search_regex(
+            r'Added: <strong>(.+?)</strong>', webpage, 'upload date', fatal=False))
+
+        duration = parse_duration(self._search_regex(
+            r'<td>Time:\s*</td>\s*<td align="right"><span>\s*(.+?)\s*</span>',
+            webpage, 'duration', fatal=False))
+
+        view_count = int_or_none(self._search_regex(
+            r'<td>Views:\s*</td>\s*<td align="right"><span>\s*(\d+)\s*</span>',
+            webpage, 'view count', fatal=False))
+        comment_count = int_or_none(self._search_regex(
+            r'<td>Comments:\s*</td>\s*<td align="right"><span>\s*(\d+)\s*</span>',
+            webpage, 'comment count', fatal=False))
+
+        categories = re.findall(
+            r'<a href="[^"]+/search/video/desi"><span>([^<]+)</span></a>',
+            webpage)
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'url': video_url,
+            'title': title,
+            'thumbnail': thumbnail,
+            'uploader': uploader,
+            'upload_date': upload_date,
+            'duration': duration,
+            'view_count': view_count,
+            'comment_count': comment_count,
+            'categories': categories,
+        }
index 8607482be8b318fa2772fcb3cd882c9359f50cf9..fdc31603a709676713f7ba87325f4b2ba6b47f3e 100644 (file)
@@ -17,11 +17,11 @@ class SharedIE(InfoExtractor):
 
     _TEST = {
         'url': 'http://shared.sx/0060718775',
-        'md5': '53e1c58fc3e777ae1dfe9e57ba2f9c72',
+        'md5': '106fefed92a8a2adb8c98e6a0652f49b',
         'info_dict': {
             'id': '0060718775',
             'ext': 'mp4',
-            'title': 'Big Buck Bunny Trailer',
+            'title': 'Bmp4',
         },
     }
 
@@ -54,4 +54,4 @@ class SharedIE(InfoExtractor):
             'filesize': filesize,
             'title': title,
             'thumbnail': thumbnail,
-        }
\ No newline at end of file
+        }
diff --git a/youtube_dl/extractor/sharesix.py b/youtube_dl/extractor/sharesix.py
new file mode 100644 (file)
index 0000000..7531e83
--- /dev/null
@@ -0,0 +1,91 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+    compat_urllib_request,
+    parse_duration,
+)
+
+
+class ShareSixIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?sharesix\.com/(?:f/)?(?P<id>[0-9a-zA-Z]+)'
+    _TESTS = [
+        {
+            'url': 'http://sharesix.com/f/OXjQ7Y6',
+            'md5': '9e8e95d8823942815a7d7c773110cc93',
+            'info_dict': {
+                'id': 'OXjQ7Y6',
+                'ext': 'mp4',
+                'title': 'big_buck_bunny_480p_surround-fix.avi',
+                'duration': 596,
+                'width': 854,
+                'height': 480,
+            },
+        },
+        {
+            'url': 'http://sharesix.com/lfrwoxp35zdd',
+            'md5': 'dd19f1435b7cec2d7912c64beeee8185',
+            'info_dict': {
+                'id': 'lfrwoxp35zdd',
+                'ext': 'flv',
+                'title': 'WhiteBoard___a_Mac_vs_PC_Parody_Cartoon.mp4.flv',
+                'duration': 65,
+                'width': 1280,
+                'height': 720,
+            },
+        }
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        fields = {
+            'method_free': 'Free'
+        }
+        post = compat_urllib_parse.urlencode(fields)
+        req = compat_urllib_request.Request(url, post)
+        req.add_header('Content-type', 'application/x-www-form-urlencoded')
+
+        webpage = self._download_webpage(req, video_id,
+                                         'Downloading video page')
+
+        video_url = self._search_regex(
+            r"var\slnk1\s=\s'([^']+)'", webpage, 'video URL')
+        title = self._html_search_regex(
+            r'(?s)<dt>Filename:</dt>.+?<dd>(.+?)</dd>', webpage, 'title')
+        duration = parse_duration(
+            self._search_regex(
+                r'(?s)<dt>Length:</dt>.+?<dd>(.+?)</dd>',
+                webpage,
+                'duration',
+                fatal=False
+            )
+        )
+
+        m = re.search(
+            r'''(?xs)<dt>Width\sx\sHeight</dt>.+?
+                     <dd>(?P<width>\d+)\sx\s(?P<height>\d+)</dd>''',
+            webpage
+        )
+        width = height = None
+        if m:
+            width, height = int(m.group('width')), int(m.group('height'))
+
+        formats = [{
+            'format_id': 'sd',
+            'url': video_url,
+            'width': width,
+            'height': height,
+        }]
+
+        return {
+            'id': video_id,
+            'title': title,
+            'duration': duration,
+            'formats': formats,
+        }
index 2909ef18b51a5ac6dadc4eec33bd05522443da8c..5eadbb7eaea263b8a37307fbdcc01c3e54c5eaa2 100644 (file)
@@ -46,7 +46,7 @@ class SinaIE(InfoExtractor):
     def _extract_video(self, video_id):
         data = compat_urllib_parse.urlencode({'vid': video_id})
         url_doc = self._download_xml('http://v.iask.com/v_play.php?%s' % data,
-            video_id, 'Downloading video url')
+                                     video_id, 'Downloading video url')
         image_page = self._download_webpage(
             'http://interface.video.sina.com.cn/interface/common/getVideoImage.php?%s' % data,
             video_id, 'Downloading thumbnail info')
index 53c3c9220374737b88dc516ec810ecb6865b74f7..5864b9936cca2e4d0ba3a0fa217884c21f897ed7 100644 (file)
@@ -39,7 +39,7 @@ class SlideshareIE(InfoExtractor):
         ext = info['jsplayer']['video_extension']
         video_url = compat_urlparse.urljoin(bucket, doc + '-SD.' + ext)
         description = self._html_search_regex(
-            r'<p\s+(?:style="[^"]*"\s+)?class="description.*?"[^>]*>(.*?)</p>', webpage,
+            r'<p\s+(?:style="[^"]*"\s+)?class=".*?description.*?"[^>]*>(.*?)</p>', webpage,
             'description', fatal=False)
 
         return {
index e6e7d086503a04a3fda862f601e109003169b9d7..3df71304dafc9c9e353923f6769c88e1fcf8c5ff 100644 (file)
@@ -26,7 +26,7 @@ class SlutloadIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
 
         video_title = self._html_search_regex(r'<h1><strong>([^<]+)</strong>',
-            webpage, 'title').strip()
+                                              webpage, 'title').strip()
 
         video_url = self._html_search_regex(
             r'(?s)<div id="vidPlayer"\s+data-url="([^"]+)"',
index 13e7e71cb37b4d7b5ec2e5ab2c341551e7e05f28..646af3cc9c9686b7d09fdc87b305c0b7c6c0f8ce 100644 (file)
@@ -1,7 +1,6 @@
 # encoding: utf-8
 from __future__ import unicode_literals
 
-import os.path
 import re
 import json
 import hashlib
@@ -12,15 +11,15 @@ from ..utils import (
     compat_urllib_parse,
     compat_urllib_request,
     ExtractorError,
-    url_basename,
     int_or_none,
+    unified_strdate,
 )
 
 
 class SmotriIE(InfoExtractor):
     IE_DESC = 'Smotri.com'
     IE_NAME = 'smotri'
-    _VALID_URL = r'^https?://(?:www\.)?(?:smotri\.com/video/view/\?id=|pics\.smotri\.com/(?:player|scrubber_custom8)\.swf\?file=)(?P<videoid>v(?P<realvideoid>[0-9]+)[a-z0-9]{4})'
+    _VALID_URL = r'^https?://(?:www\.)?(?:smotri\.com/video/view/\?id=|pics\.smotri\.com/(?:player|scrubber_custom8)\.swf\?file=)(?P<id>v(?P<realvideoid>[0-9]+)[a-z0-9]{4})'
     _NETRC_MACHINE = 'smotri'
 
     _TESTS = [
@@ -35,7 +34,6 @@ class SmotriIE(InfoExtractor):
                 'uploader': 'rbc2008',
                 'uploader_id': 'rbc08',
                 'upload_date': '20131118',
-                'description': 'катастрофа с камер видеонаблюдения, видео катастрофа с камер видеонаблюдения',
                 'thumbnail': 'http://frame6.loadup.ru/8b/a9/2610366.3.3.jpg',
             },
         },
@@ -50,7 +48,6 @@ class SmotriIE(InfoExtractor):
                 'uploader': 'Support Photofile@photofile',
                 'uploader_id': 'support-photofile',
                 'upload_date': '20070704',
-                'description': 'test, видео test',
                 'thumbnail': 'http://frame4.loadup.ru/03/ed/57591.2.3.jpg',
             },
         },
@@ -66,7 +63,6 @@ class SmotriIE(InfoExtractor):
                 'uploader_id': 'timoxa40',
                 'upload_date': '20100404',
                 'thumbnail': 'http://frame7.loadup.ru/af/3f/1390466.3.3.jpg',
-                'description': 'TOCCA_A_NOI_-_LE_COSE_NON_VANNO_CAMBIAMOLE_ORA-1, видео TOCCA_A_NOI_-_LE_COSE_NON_VANNO_CAMBIAMOLE_ORA-1',
             },
             'params': {
                 'videopassword': 'qwerty',
@@ -85,7 +81,6 @@ class SmotriIE(InfoExtractor):
                 'upload_date': '20101001',
                 'thumbnail': 'http://frame3.loadup.ru/75/75/1540889.1.3.jpg',
                 'age_limit': 18,
-                'description': 'этот ролик не покажут по ТВ, видео этот ролик не покажут по ТВ',
             },
             'params': {
                 'videopassword': '333'
@@ -102,17 +97,11 @@ class SmotriIE(InfoExtractor):
                 'uploader': 'HannahL',
                 'uploader_id': 'lisaha95',
                 'upload_date': '20090331',
-                'description': 'Shakira - Don\'t Bother, видео Shakira - Don\'t Bother',
                 'thumbnail': 'http://frame8.loadup.ru/44/0b/918809.7.3.jpg',
             },
         },
     ]
 
-    _SUCCESS = 0
-    _PASSWORD_NOT_VERIFIED = 1
-    _PASSWORD_DETECTED = 2
-    _VIDEO_NOT_FOUND = 3
-
     @classmethod
     def _extract_url(cls, webpage):
         mobj = re.search(
@@ -137,44 +126,44 @@ class SmotriIE(InfoExtractor):
         return self._html_search_meta(name, html, display_name)
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('videoid')
-        real_video_id = mobj.group('realvideoid')
+        video_id = self._match_id(url)
+
+        video_form = {
+            'ticket': video_id,
+            'video_url': '1',
+            'frame_url': '1',
+            'devid': 'LoadupFlashPlayer',
+            'getvideoinfo': '1',
+        }
+
+        request = compat_urllib_request.Request(
+            'http://smotri.com/video/view/url/bot/', compat_urllib_parse.urlencode(video_form))
+        request.add_header('Content-Type', 'application/x-www-form-urlencoded')
 
-        # Download video JSON data
-        video_json_url = 'http://smotri.com/vt.php?id=%s' % real_video_id
-        video_json_page = self._download_webpage(video_json_url, video_id, 'Downloading video JSON')
-        video_json = json.loads(video_json_page)
+        video = self._download_json(request, video_id, 'Downloading video JSON')
 
-        status = video_json['status']
-        if status == self._VIDEO_NOT_FOUND:
+        if video.get('_moderate_no') or not video.get('moderated'):
+            raise ExtractorError('Video %s has not been approved by moderator' % video_id, expected=True)
+
+        if video.get('error'):
             raise ExtractorError('Video %s does not exist' % video_id, expected=True)
-        elif status == self._PASSWORD_DETECTED: # The video is protected by a password, retry with
-                                                # video-password set
-            video_password = self._downloader.params.get('videopassword', None)
-            if not video_password:
-                raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
-            video_json_url += '&md5pass=%s' % hashlib.md5(video_password.encode('utf-8')).hexdigest()
-            video_json_page = self._download_webpage(video_json_url, video_id, 'Downloading video JSON (video-password set)')
-            video_json = json.loads(video_json_page)
-            status = video_json['status']
-            if status == self._PASSWORD_NOT_VERIFIED:
-                raise ExtractorError('Video password is invalid', expected=True)
-
-        if status != self._SUCCESS:
-            raise ExtractorError('Unexpected status value %s' % status)
-
-        # Extract the URL of the video
-        video_url = video_json['file_data']
+
+        video_url = video.get('_vidURL') or video.get('_vidURL_mp4')
+        title = video['title']
+        thumbnail = video['_imgURL']
+        upload_date = unified_strdate(video['added'])
+        uploader = video['userNick']
+        uploader_id = video['userLogin']
+        duration = int_or_none(video['duration'])
 
         # Video JSON does not provide enough meta data
         # We will extract some from the video web page instead
-        video_page_url = 'http://smotri.com/video/view/?id=%s' % video_id
-        video_page = self._download_webpage(video_page_url, video_id, 'Downloading video page')
+        webpage_url = 'http://smotri.com/video/view/?id=%s' % video_id
+        webpage = self._download_webpage(webpage_url, video_id, 'Downloading video page')
 
         # Warning if video is unavailable
         warning = self._html_search_regex(
-            r'<div class="videoUnModer">(.*?)</div>', video_page,
+            r'<div class="videoUnModer">(.*?)</div>', webpage,
             'warning message', default=None)
         if warning is not None:
             self._downloader.report_warning(
@@ -182,84 +171,32 @@ class SmotriIE(InfoExtractor):
                 (video_id, warning))
 
         # Adult content
-        if re.search('EroConfirmText">', video_page) is not None:
+        if re.search('EroConfirmText">', webpage) is not None:
             self.report_age_confirmation()
             confirm_string = self._html_search_regex(
                 r'<a href="/video/view/\?id=%s&confirm=([^"]+)" title="[^"]+">' % video_id,
-                video_page, 'confirm string')
-            confirm_url = video_page_url + '&confirm=%s' % confirm_string
-            video_page = self._download_webpage(confirm_url, video_id, 'Downloading video page (age confirmed)')
+                webpage, 'confirm string')
+            confirm_url = webpage_url + '&confirm=%s' % confirm_string
+            webpage = self._download_webpage(confirm_url, video_id, 'Downloading video page (age confirmed)')
             adult_content = True
         else:
             adult_content = False
 
-        # Extract the rest of meta data
-        video_title = self._search_meta('name', video_page, 'title')
-        if not video_title:
-            video_title = os.path.splitext(url_basename(video_url))[0]
-
-        video_description = self._search_meta('description', video_page)
-        END_TEXT = ' на сайте Smotri.com'
-        if video_description and video_description.endswith(END_TEXT):
-            video_description = video_description[:-len(END_TEXT)]
-        START_TEXT = 'Смотреть онлайн ролик '
-        if video_description and video_description.startswith(START_TEXT):
-            video_description = video_description[len(START_TEXT):]
-        video_thumbnail = self._search_meta('thumbnail', video_page)
-
-        upload_date_str = self._search_meta('uploadDate', video_page, 'upload date')
-        if upload_date_str:
-            upload_date_m = re.search(r'(?P<year>\d{4})\.(?P<month>\d{2})\.(?P<day>\d{2})T', upload_date_str)
-            video_upload_date = (
-                (
-                    upload_date_m.group('year') +
-                    upload_date_m.group('month') +
-                    upload_date_m.group('day')
-                )
-                if upload_date_m else None
-            )
-        else:
-            video_upload_date = None
-
-        duration_str = self._search_meta('duration', video_page)
-        if duration_str:
-            duration_m = re.search(r'T(?P<hours>[0-9]{2})H(?P<minutes>[0-9]{2})M(?P<seconds>[0-9]{2})S', duration_str)
-            video_duration = (
-                (
-                    (int(duration_m.group('hours')) * 60 * 60) +
-                    (int(duration_m.group('minutes')) * 60) +
-                    int(duration_m.group('seconds'))
-                )
-                if duration_m else None
-            )
-        else:
-            video_duration = None
-
-        video_uploader = self._html_search_regex(
-            '<div class="DescrUser"><div>Автор.*?onmouseover="popup_user_info[^"]+">(.*?)</a>',
-            video_page, 'uploader', fatal=False, flags=re.MULTILINE|re.DOTALL)
-
-        video_uploader_id = self._html_search_regex(
-            '<div class="DescrUser"><div>Автор.*?onmouseover="popup_user_info\\(.*?\'([^\']+)\'\\);">',
-            video_page, 'uploader id', fatal=False, flags=re.MULTILINE|re.DOTALL)
-
-        video_view_count = self._html_search_regex(
+        view_count = self._html_search_regex(
             'Общее количество просмотров.*?<span class="Number">(\\d+)</span>',
-            video_page, 'view count', fatal=False, flags=re.MULTILINE|re.DOTALL)
+            webpage, 'view count', fatal=False, flags=re.MULTILINE | re.DOTALL)
 
         return {
             'id': video_id,
             'url': video_url,
-            'title': video_title,
-            'thumbnail': video_thumbnail,
-            'description': video_description,
-            'uploader': video_uploader,
-            'upload_date': video_upload_date,
-            'uploader_id': video_uploader_id,
-            'duration': video_duration,
-            'view_count': int_or_none(video_view_count),
+            'title': title,
+            'thumbnail': thumbnail,
+            'uploader': uploader,
+            'upload_date': upload_date,
+            'uploader_id': uploader_id,
+            'duration': duration,
+            'view_count': int_or_none(view_count),
             'age_limit': 18 if adult_content else 0,
-            'video_page_url': video_page_url
         }
 
 
@@ -267,7 +204,15 @@ class SmotriCommunityIE(InfoExtractor):
     IE_DESC = 'Smotri.com community videos'
     IE_NAME = 'smotri:community'
     _VALID_URL = r'^https?://(?:www\.)?smotri\.com/community/video/(?P<communityid>[0-9A-Za-z_\'-]+)'
-    
+    _TEST = {
+        'url': 'http://smotri.com/community/video/kommuna',
+        'info_dict': {
+            'id': 'kommuna',
+            'title': 'КПРФ',
+        },
+        'playlist_mincount': 4,
+    }
+
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         community_id = mobj.group('communityid')
@@ -289,6 +234,14 @@ class SmotriUserIE(InfoExtractor):
     IE_DESC = 'Smotri.com user videos'
     IE_NAME = 'smotri:user'
     _VALID_URL = r'^https?://(?:www\.)?smotri\.com/user/(?P<userid>[0-9A-Za-z_\'-]+)'
+    _TESTS = [{
+        'url': 'http://smotri.com/user/inspector',
+        'info_dict': {
+            'id': 'inspector',
+            'title': 'Inspector',
+        },
+        'playlist_mincount': 9,
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -321,15 +274,18 @@ class SmotriBroadcastIE(InfoExtractor):
         broadcast_page = self._download_webpage(broadcast_url, broadcast_id, 'Downloading broadcast page')
 
         if re.search('>Режиссер с логином <br/>"%s"<br/> <span>не существует<' % broadcast_id, broadcast_page) is not None:
-            raise ExtractorError('Broadcast %s does not exist' % broadcast_id, expected=True)
+            raise ExtractorError(
+                'Broadcast %s does not exist' % broadcast_id, expected=True)
 
         # Adult content
         if re.search('EroConfirmText">', broadcast_page) is not None:
 
             (username, password) = self._get_login_info()
             if username is None:
-                raise ExtractorError('Erotic broadcasts allowed only for registered users, '
-                    'use --username and --password options to provide account credentials.', expected=True)
+                raise ExtractorError(
+                    'Erotic broadcasts allowed only for registered users, '
+                    'use --username and --password options to provide account credentials.',
+                    expected=True)
 
             login_form = {
                 'login-hint53': '1',
@@ -338,9 +294,11 @@ class SmotriBroadcastIE(InfoExtractor):
                 'password': password,
             }
 
-            request = compat_urllib_request.Request(broadcast_url + '/?no_redirect=1', compat_urllib_parse.urlencode(login_form))
+            request = compat_urllib_request.Request(
+                broadcast_url + '/?no_redirect=1', compat_urllib_parse.urlencode(login_form))
             request.add_header('Content-Type', 'application/x-www-form-urlencoded')
-            broadcast_page = self._download_webpage(request, broadcast_id, 'Logging in and confirming age')
+            broadcast_page = self._download_webpage(
+                request, broadcast_id, 'Logging in and confirming age')
 
             if re.search('>Неверный логин или пароль<', broadcast_page) is not None:
                 raise ExtractorError('Unable to log in: bad username or password', expected=True)
@@ -350,7 +308,7 @@ class SmotriBroadcastIE(InfoExtractor):
             adult_content = False
 
         ticket = self._html_search_regex(
-            'window\.broadcast_control\.addFlashVar\\(\'file\', \'([^\']+)\'\\);',
+            r"window\.broadcast_control\.addFlashVar\('file'\s*,\s*'([^']+)'\)",
             broadcast_page, 'broadcast ticket')
 
         url = 'http://smotri.com/broadcast/view/url/?ticket=%s' % ticket
@@ -359,26 +317,31 @@ class SmotriBroadcastIE(InfoExtractor):
         if broadcast_password:
             url += '&pass=%s' % hashlib.md5(broadcast_password.encode('utf-8')).hexdigest()
 
-        broadcast_json_page = self._download_webpage(url, broadcast_id, 'Downloading broadcast JSON')
+        broadcast_json_page = self._download_webpage(
+            url, broadcast_id, 'Downloading broadcast JSON')
 
         try:
             broadcast_json = json.loads(broadcast_json_page)
 
             protected_broadcast = broadcast_json['_pass_protected'] == 1
             if protected_broadcast and not broadcast_password:
-                raise ExtractorError('This broadcast is protected by a password, use the --video-password option', expected=True)
+                raise ExtractorError(
+                    'This broadcast is protected by a password, use the --video-password option',
+                    expected=True)
 
             broadcast_offline = broadcast_json['is_play'] == 0
             if broadcast_offline:
                 raise ExtractorError('Broadcast %s is offline' % broadcast_id, expected=True)
 
             rtmp_url = broadcast_json['_server']
-            if not rtmp_url.startswith('rtmp://'):
+            mobj = re.search(r'^rtmp://[^/]+/(?P<app>.+)/?$', rtmp_url)
+            if not mobj:
                 raise ExtractorError('Unexpected broadcast rtmp URL')
 
             broadcast_playpath = broadcast_json['_streamName']
+            broadcast_app = '%s/%s' % (mobj.group('app'), broadcast_json['_vidURL'])
             broadcast_thumbnail = broadcast_json['_imgURL']
-            broadcast_title = broadcast_json['title']
+            broadcast_title = self._live_title(broadcast_json['title'])
             broadcast_description = broadcast_json['description']
             broadcaster_nick = broadcast_json['nick']
             broadcaster_login = broadcast_json['login']
@@ -399,6 +362,9 @@ class SmotriBroadcastIE(InfoExtractor):
             'age_limit': 18 if adult_content else 0,
             'ext': 'flv',
             'play_path': broadcast_playpath,
+            'player_url': 'http://pics.smotri.com/broadcast_play.swf',
+            'app': broadcast_app,
             'rtmp_live': True,
-            'rtmp_conn': rtmp_conn
+            'rtmp_conn': rtmp_conn,
+            'is_live': True,
         }
index dc9f8055013170a87a447ee6370f5cae4546174d..c663e56d42ed02645313637cd7866a9071d10ae7 100644 (file)
@@ -61,7 +61,10 @@ class SockshareIE(InfoExtractor):
             r'<a href="([^"]*)".+class="download_file_link"',
             webpage, 'file url')
         video_url = "http://www.sockshare.com" + video_url
-        title = self._html_search_regex(r'<h1>(.+)<strong>', webpage, 'title')
+        title = self._html_search_regex((
+            r'<h1>(.+)<strong>',
+            r'var name = "([^"]+)";'),
+            webpage, 'title', default=None)
         thumbnail = self._html_search_regex(
             r'<img\s+src="([^"]*)".+?name="bg"',
             webpage, 'thumbnail')
index bebcafb62bea3fe2e180818256475262177f902f..07f514a46246657206ae3fd31e19b1d2932e6f15 100644 (file)
@@ -1,4 +1,5 @@
 # encoding: utf-8
+from __future__ import unicode_literals
 
 import json
 import re
@@ -11,13 +12,14 @@ class SohuIE(InfoExtractor):
     _VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?'
 
     _TEST = {
-        u'url': u'http://tv.sohu.com/20130724/n382479172.shtml#super',
-        u'file': u'382479172.mp4',
-        u'md5': u'bde8d9a6ffd82c63a1eefaef4eeefec7',
-        u'info_dict': {
-            u'title': u'MV:Far East Movement《The Illest》',
+        'url': 'http://tv.sohu.com/20130724/n382479172.shtml#super',
+        'md5': 'bde8d9a6ffd82c63a1eefaef4eeefec7',
+        'info_dict': {
+            'id': '382479172',
+            'ext': 'mp4',
+            'title': 'MV:Far East Movement《The Illest》',
         },
-        u'skip': u'Only available from China',
+        'skip': 'Only available from China',
     }
 
     def _real_extract(self, url):
@@ -26,11 +28,11 @@ class SohuIE(InfoExtractor):
             if mytv:
                 base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid='
             else:
-                base_data_url = u'http://hot.vrs.sohu.com/vrs_flash.action?vid='
+                base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid='
             data_url = base_data_url + str(vid_id)
             data_json = self._download_webpage(
                 data_url, video_id,
-                note=u'Downloading JSON data for ' + str(vid_id))
+                note='Downloading JSON data for ' + str(vid_id))
             return json.loads(data_json)
 
         mobj = re.match(self._VALID_URL, url)
@@ -39,11 +41,11 @@ class SohuIE(InfoExtractor):
 
         webpage = self._download_webpage(url, video_id)
         raw_title = self._html_search_regex(r'(?s)<title>(.+?)</title>',
-                                            webpage, u'video title')
+                                            webpage, 'video title')
         title = raw_title.partition('-')[0].strip()
 
         vid = self._html_search_regex(r'var vid ?= ?["\'](\d+)["\']', webpage,
-                                      u'video path')
+                                      'video path')
         data = _fetch_data(vid, mytv)
 
         QUALITIES = ('ori', 'super', 'high', 'nor')
@@ -51,7 +53,7 @@ class SohuIE(InfoExtractor):
                    for q in QUALITIES
                    if data['data'][q + 'Vid'] != 0]
         if not vid_ids:
-            raise ExtractorError(u'No formats available for this video')
+            raise ExtractorError('No formats available for this video')
 
         # For now, we just pick the highest available quality
         vid_id = vid_ids[-1]
@@ -69,7 +71,7 @@ class SohuIE(InfoExtractor):
                         (allot, prot, clipsURL[i], su[i]))
             part_str = self._download_webpage(
                 part_url, video_id,
-                note=u'Downloading part %d of %d' % (i+1, part_count))
+                note='Downloading part %d of %d' % (i + 1, part_count))
 
             part_info = part_str.split('|')
             video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3])
index 097d0e418d452a968cdf0355419b02c4dd392081..ab9483d2dc0136258084a794fd6945680d2aae47 100644 (file)
@@ -28,9 +28,11 @@ class SoundcloudIE(InfoExtractor):
     _VALID_URL = r'''(?x)^(?:https?://)?
                     (?:(?:(?:www\.|m\.)?soundcloud\.com/
                             (?P<uploader>[\w\d-]+)/
-                            (?!sets/)(?P<title>[\w\d-]+)/?
+                            (?!sets/|likes/?(?:$|[?#]))
+                            (?P<title>[\w\d-]+)/?
                             (?P<token>[^?]+?)?(?:[?].*)?$)
-                       |(?:api\.soundcloud\.com/tracks/(?P<track_id>\d+))
+                       |(?:api\.soundcloud\.com/tracks/(?P<track_id>\d+)
+                          (?:/?\?secret_token=(?P<secret_token>[^&]+))?)
                        |(?P<player>(?:w|player|p.)\.soundcloud\.com/player/?.*?url=.*)
                     )
                     '''
@@ -38,14 +40,15 @@ class SoundcloudIE(InfoExtractor):
     _TESTS = [
         {
             'url': 'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy',
-            'file': '62986583.mp3',
             'md5': 'ebef0a451b909710ed1d7787dddbf0d7',
             'info_dict': {
-                "upload_date": "20121011",
-                "description": "No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o'd",
-                "uploader": "E.T. ExTerrestrial Music",
-                "title": "Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1",
-                "duration": 143,
+                'id': '62986583',
+                'ext': 'mp3',
+                'upload_date': '20121011',
+                'description': 'No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o\'d',
+                'uploader': 'E.T. ExTerrestrial Music',
+                'title': 'Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1',
+                'duration': 143,
             }
         },
         # not streamable song
@@ -79,6 +82,20 @@ class SoundcloudIE(InfoExtractor):
                 'duration': 9,
             },
         },
+        # private link (alt format)
+        {
+            'url': 'https://api.soundcloud.com/tracks/123998367?secret_token=s-8Pjrp',
+            'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604',
+            'info_dict': {
+                'id': '123998367',
+                'ext': 'mp3',
+                'title': 'Youtube - Dl Test Video \'\' Ä↭',
+                'uploader': 'jaimeMF',
+                'description': 'test chars:  \"\'/\\ä↭',
+                'upload_date': '20131209',
+                'duration': 9,
+            },
+        },
         # downloadable song
         {
             'url': 'https://soundcloud.com/oddsamples/bus-brakes',
@@ -87,7 +104,7 @@ class SoundcloudIE(InfoExtractor):
                 'id': '128590877',
                 'ext': 'mp3',
                 'title': 'Bus Brakes',
-                'description': 'md5:0170be75dd395c96025d210d261c784e',
+                'description': 'md5:0053ca6396e8d2fd7b7e1595ef12ab66',
                 'uploader': 'oddsamples',
                 'upload_date': '20140109',
                 'duration': 17,
@@ -124,6 +141,7 @@ class SoundcloudIE(InfoExtractor):
             'description': info['description'],
             'thumbnail': thumbnail,
             'duration': int_or_none(info.get('duration'), 1000),
+            'webpage_url': info.get('permalink_url'),
         }
         formats = []
         if info.get('downloadable', False):
@@ -141,7 +159,7 @@ class SoundcloudIE(InfoExtractor):
 
         # We have to retrieve the url
         streams_url = ('http://api.soundcloud.com/i1/tracks/{0}/streams?'
-            'client_id={1}&secret_token={2}'.format(track_id, self._IPHONE_CLIENT_ID, secret_token))
+                       'client_id={1}&secret_token={2}'.format(track_id, self._IPHONE_CLIENT_ID, secret_token))
         format_dict = self._download_json(
             streams_url,
             track_id, 'Downloading track url')
@@ -196,6 +214,9 @@ class SoundcloudIE(InfoExtractor):
         if track_id is not None:
             info_json_url = 'http://api.soundcloud.com/tracks/' + track_id + '.json?client_id=' + self._CLIENT_ID
             full_title = track_id
+            token = mobj.group('secret_token')
+            if token:
+                info_json_url += "&secret_token=" + token
         elif mobj.group('player'):
             query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
             return self.url_result(query['url'][0])
@@ -203,14 +224,14 @@ class SoundcloudIE(InfoExtractor):
             # extract uploader (which is in the url)
             uploader = mobj.group('uploader')
             # extract simple title (uploader + slug of song title)
-            slug_title =  mobj.group('title')
+            slug_title = mobj.group('title')
             token = mobj.group('token')
             full_title = resolve_title = '%s/%s' % (uploader, slug_title)
             if token:
                 resolve_title += '/%s' % token
-    
+
             self.report_resolve(full_title)
-    
+
             url = 'http://soundcloud.com/%s' % resolve_title
             info_json_url = self._resolv_url(url)
         info = self._download_json(info_json_url, full_title, 'Downloading info JSON')
@@ -219,25 +240,33 @@ class SoundcloudIE(InfoExtractor):
 
 
 class SoundcloudSetIE(SoundcloudIE):
-    _VALID_URL = r'https?://(?:www\.)?soundcloud\.com/([\w\d-]+)/sets/([\w\d-]+)'
+    _VALID_URL = r'https?://(?:www\.)?soundcloud\.com/(?P<uploader>[\w\d-]+)/sets/(?P<slug_title>[\w\d-]+)(?:/(?P<token>[^?/]+))?'
     IE_NAME = 'soundcloud:set'
-    # it's in tests/test_playlists.py
-    _TESTS = []
+    _TESTS = [{
+        'url': 'https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep',
+        'info_dict': {
+            'title': 'The Royal Concept EP',
+        },
+        'playlist_mincount': 6,
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError('Invalid URL: %s' % url)
 
         # extract uploader (which is in the url)
-        uploader = mobj.group(1)
+        uploader = mobj.group('uploader')
         # extract simple title (uploader + slug of song title)
-        slug_title = mobj.group(2)
+        slug_title = mobj.group('slug_title')
         full_title = '%s/sets/%s' % (uploader, slug_title)
+        url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title)
+
+        token = mobj.group('token')
+        if token:
+            full_title += '/' + token
+            url += '/' + token
 
         self.report_resolve(full_title)
 
-        url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title)
         resolv_url = self._resolv_url(url)
         info = self._download_json(resolv_url, full_title)
 
@@ -246,20 +275,32 @@ class SoundcloudSetIE(SoundcloudIE):
                 self._downloader.report_error('unable to download video webpage: %s' % compat_str(err['error_message']))
             return
 
-        self.report_extraction(full_title)
-        return {'_type': 'playlist',
-                'entries': [self._extract_info_dict(track) for track in info['tracks']],
-                'id': info['id'],
-                'title': info['title'],
-                }
+        return {
+            '_type': 'playlist',
+            'entries': [self._extract_info_dict(track, secret_token=token) for track in info['tracks']],
+            'id': info['id'],
+            'title': info['title'],
+        }
 
 
 class SoundcloudUserIE(SoundcloudIE):
     _VALID_URL = r'https?://(www\.)?soundcloud\.com/(?P<user>[^/]+)/?((?P<rsrc>tracks|likes)/?)?(\?.*)?$'
     IE_NAME = 'soundcloud:user'
-
-    # it's in tests/test_playlists.py
-    _TESTS = []
+    _TESTS = [{
+        'url': 'https://soundcloud.com/the-concept-band',
+        'info_dict': {
+            'id': '9615865',
+            'title': 'The Royal Concept',
+        },
+        'playlist_mincount': 12
+    }, {
+        'url': 'https://soundcloud.com/the-concept-band/likes',
+        'info_dict': {
+            'id': '9615865',
+            'title': 'The Royal Concept',
+        },
+        'playlist_mincount': 1,
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -299,25 +340,38 @@ class SoundcloudUserIE(SoundcloudIE):
 
 
 class SoundcloudPlaylistIE(SoundcloudIE):
-    _VALID_URL = r'https?://api\.soundcloud\.com/playlists/(?P<id>[0-9]+)'
+    _VALID_URL = r'https?://api\.soundcloud\.com/playlists/(?P<id>[0-9]+)(?:/?\?secret_token=(?P<token>[^&]+?))?$'
     IE_NAME = 'soundcloud:playlist'
-
-     # it's in tests/test_playlists.py
-    _TESTS = []
+    _TESTS = [{
+        'url': 'http://api.soundcloud.com/playlists/4110309',
+        'info_dict': {
+            'id': '4110309',
+            'title': 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]',
+            'description': 're:.*?TILT Brass - Bowery Poetry Club',
+        },
+        'playlist_count': 6,
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         playlist_id = mobj.group('id')
         base_url = '%s//api.soundcloud.com/playlists/%s.json?' % (self.http_scheme(), playlist_id)
 
-        data = compat_urllib_parse.urlencode({
+        data_dict = {
             'client_id': self._CLIENT_ID,
-        })
+        }
+        token = mobj.group('token')
+
+        if token:
+            data_dict['secret_token'] = token
+
+        data = compat_urllib_parse.urlencode(data_dict)
         data = self._download_json(
             base_url + data, playlist_id, 'Downloading playlist')
 
         entries = [
-            self._extract_info_dict(t, quiet=True) for t in data['tracks']]
+            self._extract_info_dict(t, quiet=True, secret_token=token)
+            for t in data['tracks']]
 
         return {
             '_type': 'playlist',
index d34aefeaa24a2b8b307005e9164ad9349b638e88..c2d0d36a6935c40553419621678ce8987c4f2dbd 100644 (file)
@@ -33,5 +33,6 @@ class SpaceIE(InfoExtractor):
             # Other videos works fine with the info from the object
             brightcove_url = BrightcoveIE._extract_brightcove_url(webpage)
         if brightcove_url is None:
-            raise ExtractorError(u'The webpage does not contain a video', expected=True)
+            raise ExtractorError(
+                'The webpage does not contain a video', expected=True)
         return self.url_result(brightcove_url, BrightcoveIE.ie_key())
index 2007a00134dfe73cd721dbee9e86e4f349a2e034..94602e89e56549243ed38ecb107ef842cd8ebd46 100644 (file)
@@ -9,7 +9,6 @@ from ..utils import (
     compat_urllib_parse,
     unified_strdate,
     str_to_int,
-    int_or_none,
 )
 from ..aes import aes_decrypt_text
 
@@ -40,31 +39,42 @@ class SpankwireIE(InfoExtractor):
         req.add_header('Cookie', 'age_verified=1')
         webpage = self._download_webpage(req, video_id)
 
-        title = self._html_search_regex(r'<h1>([^<]+)', webpage, 'title')
+        title = self._html_search_regex(
+            r'<h1>([^<]+)', webpage, 'title')
         description = self._html_search_regex(
-            r'<div\s+id="descriptionContent">([^<]+)<', webpage, 'description', fatal=False)
+            r'<div\s+id="descriptionContent">([^<]+)<',
+            webpage, 'description', fatal=False)
         thumbnail = self._html_search_regex(
-            r'flashvars\.image_url = "([^"]+)', webpage, 'thumbnail', fatal=False)
+            r'playerData\.screenShot\s*=\s*["\']([^"\']+)["\']',
+            webpage, 'thumbnail', fatal=False)
 
         uploader = self._html_search_regex(
-            r'by:\s*<a [^>]*>(.+?)</a>', webpage, 'uploader', fatal=False)
+            r'by:\s*<a [^>]*>(.+?)</a>',
+            webpage, 'uploader', fatal=False)
         uploader_id = self._html_search_regex(
-            r'by:\s*<a href="/Profile\.aspx\?.*?UserId=(\d+).*?"', webpage, 'uploader id', fatal=False)
-        upload_date = self._html_search_regex(r'</a> on (.+?) at \d+:\d+', webpage, 'upload date', fatal=False)
-        if upload_date:
-            upload_date = unified_strdate(upload_date)
-        
-        view_count = self._html_search_regex(
-            r'<div id="viewsCounter"><span>([^<]+)</span> views</div>', webpage, 'view count', fatal=False)
-        if view_count:
-            view_count = str_to_int(view_count)
-        comment_count = int_or_none(self._html_search_regex(
-            r'<span id="spCommentCount">\s*(\d+)</span> Comments</div>', webpage, 'comment count', fatal=False))
+            r'by:\s*<a href="/Profile\.aspx\?.*?UserId=(\d+).*?"',
+            webpage, 'uploader id', fatal=False)
+        upload_date = unified_strdate(self._html_search_regex(
+            r'</a> on (.+?) at \d+:\d+',
+            webpage, 'upload date', fatal=False))
 
-        video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'flashvars\.quality_[0-9]{3}p = "([^"]+)', webpage)))
+        view_count = str_to_int(self._html_search_regex(
+            r'<div id="viewsCounter"><span>([\d,\.]+)</span> views</div>',
+            webpage, 'view count', fatal=False))
+        comment_count = str_to_int(self._html_search_regex(
+            r'Comments<span[^>]+>\s*\(([\d,\.]+)\)</span>',
+            webpage, 'comment count', fatal=False))
+
+        video_urls = list(map(
+            compat_urllib_parse.unquote,
+            re.findall(r'playerData\.cdnPath[0-9]{3,}\s*=\s*["\']([^"\']+)["\']', webpage)))
         if webpage.find('flashvars\.encrypted = "true"') != -1:
-            password = self._html_search_regex(r'flashvars\.video_title = "([^"]+)', webpage, 'password').replace('+', ' ')
-            video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
+            password = self._html_search_regex(
+                r'flashvars\.video_title = "([^"]+)',
+                webpage, 'password').replace('+', ' ')
+            video_urls = list(map(
+                lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'),
+                video_urls))
 
         formats = []
         for video_url in video_urls:
index 340a38440d02ad28b5eb6ab19916eee870818c35..1e55a9ffb5748b70969de11886c13720ff936be7 100644 (file)
@@ -4,10 +4,12 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import compat_urlparse
+from .spiegeltv import SpiegeltvIE
 
 
 class SpiegelIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?(?:#.*)?$'
+    _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed)?(?:\.html)?(?:#.*)?$'
     _TESTS = [{
         'url': 'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html',
         'md5': '2c2754212136f35fb4b19767d242f66e',
@@ -29,25 +31,27 @@ class SpiegelIE(InfoExtractor):
             'duration': 983,
         },
     }, {
-        'url': 'http://www.spiegel.de/video/johann-westhauser-videobotschaft-des-hoehlenforschers-video-1502367.html',
-        'md5': '54f58ba0e752e3c07bc2a26222dd0acf',
+        'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-embed.html',
+        'md5': 'd8eeca6bfc8f1cd6f490eb1f44695d51',
         'info_dict': {
-            'id': '1502367',
+            'id': '1519126',
             'ext': 'mp4',
-            'title': 'Videobotschaft: Höhlenforscher Westhauser dankt seinen Rettern',
-            'description': 'md5:c6f1ec11413ebd1088b6813943e5fc91',
-            'duration': 42,
-        },
+            'description': 'SPIEGEL ONLINE-Nutzer durften den deutschen Astronauten Alexander Gerst über sein Leben auf der ISS-Station befragen. Hier kommen seine Antworten auf die besten sechs Fragen.',
+            'title': 'Fragen an Astronaut Alexander Gerst: "Bekommen Sie die Tageszeiten mit?"',
+        }
     }]
 
     def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('videoID')
+        video_id = self._match_id(url)
+        webpage, handle = self._download_webpage_handle(url, video_id)
 
-        webpage = self._download_webpage(url, video_id)
+        # 302 to spiegel.tv, like http://www.spiegel.de/video/der-film-zum-wochenende-die-wahrheit-ueber-maenner-video-99003272.html
+        if SpiegeltvIE.suitable(handle.geturl()):
+            return self.url_result(handle.geturl(), 'Spiegeltv')
 
-        title = self._html_search_regex(
-            r'<div class="module-title">(.*?)</div>', webpage, 'title')
+        title = re.sub(r'\s+', ' ', self._html_search_regex(
+            r'(?s)<(?:h1|div) class="module-title"[^>]*>(.*?)</(?:h1|div)>',
+            webpage, 'title'))
         description = self._html_search_meta('description', webpage, 'description')
 
         base_url = self._search_regex(
@@ -82,3 +86,48 @@ class SpiegelIE(InfoExtractor):
             'duration': duration,
             'formats': formats,
         }
+
+
+class SpiegelArticleIE(InfoExtractor):
+    _VALID_URL = 'https?://www\.spiegel\.de/(?!video/)[^?#]*?-(?P<id>[0-9]+)\.html'
+    IE_NAME = 'Spiegel:Article'
+    IE_DESC = 'Articles on spiegel.de'
+    _TESTS = [{
+        'url': 'http://www.spiegel.de/sport/sonst/badminton-wm-die-randsportart-soll-populaerer-werden-a-987092.html',
+        'info_dict': {
+            'id': '1516455',
+            'ext': 'mp4',
+            'title': 'Faszination Badminton: Nennt es bloß nicht Federball',
+            'description': 're:^Patrick Kämnitz gehört.{100,}',
+        },
+    }, {
+        'url': 'http://www.spiegel.de/wissenschaft/weltall/astronaut-alexander-gerst-antwortet-spiegel-online-lesern-a-989876.html',
+        'info_dict': {
+
+        },
+        'playlist_count': 6,
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        # Single video on top of the page
+        video_link = self._search_regex(
+            r'<a href="([^"]+)" onclick="return spOpenVideo\(this,', webpage,
+            'video page URL', default=None)
+        if video_link:
+            video_url = compat_urlparse.urljoin(
+                self.http_scheme() + '//spiegel.de/', video_link)
+            return self.url_result(video_url)
+
+        # Multiple embedded videos
+        embeds = re.findall(
+            r'<div class="vid_holder[0-9]+.*?</div>\s*.*?url\s*=\s*"([^"]+)"',
+            webpage)
+        entries = [
+            self.url_result(compat_urlparse.urljoin(
+                self.http_scheme() + '//spiegel.de/', embed_path))
+            for embed_path in embeds
+        ]
+        return self.playlist_result(entries)
index 7f388aced0800ebc1881de06b1ddded61afba926..98cf92d89a1151edfd11b8f15a86eeaa6a83178d 100644 (file)
@@ -1,13 +1,13 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import re
 from .common import InfoExtractor
+from ..utils import float_or_none
 
 
 class SpiegeltvIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?spiegel\.tv/filme/(?P<id>[\-a-z0-9]+)'
-    _TEST = {
+    _VALID_URL = r'https?://(?:www\.)?spiegel\.tv/(?:#/)?filme/(?P<id>[\-a-z0-9]+)'
+    _TESTS = [{
         'url': 'http://www.spiegel.tv/filme/flug-mh370/',
         'info_dict': {
             'id': 'flug-mh370',
@@ -20,12 +20,15 @@ class SpiegeltvIE(InfoExtractor):
             # rtmp download
             'skip_download': True,
         }
-    }
+    }, {
+        'url': 'http://www.spiegel.tv/#/filme/alleskino-die-wahrheit-ueber-maenner/',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        if '/#/' in url:
+            url = url.replace('/#/', '/')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
         title = self._html_search_regex(r'<h1.*?>(.*?)</h1>', webpage, 'title')
 
@@ -61,12 +64,8 @@ class SpiegeltvIE(InfoExtractor):
             })
 
         description = media_json['subtitle']
-        duration = media_json['duration_in_ms'] / 1000.
-
-        if is_wide:
-            format = '16x9'
-        else:
-            format = '4x3'
+        duration = float_or_none(media_json.get('duration_in_ms'), scale=1000)
+        format = '16x9' if is_wide else '4x3'
 
         url = server + 'mp4:' + uuid + '_spiegeltv_0500_' + format + '.m4v'
 
@@ -78,4 +77,4 @@ class SpiegeltvIE(InfoExtractor):
             'description': description,
             'duration': duration,
             'thumbnails': thumbnails
-        }
\ No newline at end of file
+        }
diff --git a/youtube_dl/extractor/sport5.py b/youtube_dl/extractor/sport5.py
new file mode 100644 (file)
index 0000000..dfe50ed
--- /dev/null
@@ -0,0 +1,92 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class Sport5IE(InfoExtractor):
+    _VALID_URL = r'http://(?:www|vod)?\.sport5\.co\.il/.*\b(?:Vi|docID)=(?P<id>\d+)'
+    _TESTS = [
+        {
+            'url': 'http://vod.sport5.co.il/?Vc=147&Vi=176331&Page=1',
+            'info_dict': {
+                'id': 's5-Y59xx1-GUh2',
+                'ext': 'mp4',
+                'title': 'ולנסיה-קורדובה 0:3',
+                'description': 'אלקאסר, גאייה ופגולי סידרו לקבוצה של נונו ניצחון על קורדובה ואת המקום הראשון בליגה',
+                'duration': 228,
+                'categories': list,
+            },
+            'skip': 'Blocked outside of Israel',
+        }, {
+            'url': 'http://www.sport5.co.il/articles.aspx?FolderID=3075&docID=176372&lang=HE',
+            'info_dict': {
+                'id': 's5-SiXxx1-hKh2',
+                'ext': 'mp4',
+                'title': 'GOALS_CELTIC_270914.mp4',
+                'description': '',
+                'duration': 87,
+                'categories': list,
+            },
+            'skip': 'Blocked outside of Israel',
+        }
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        media_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, media_id)
+
+        video_id = self._html_search_regex('clipId=([\w-]+)', webpage, 'video id')
+
+        metadata = self._download_xml(
+            'http://sport5-metadata-rr-d.nsacdn.com/vod/vod/%s/HDS/metadata.xml' % video_id,
+            video_id)
+
+        error = metadata.find('./Error')
+        if error is not None:
+            raise ExtractorError(
+                '%s returned error: %s - %s' % (
+                    self.IE_NAME,
+                    error.find('./Name').text,
+                    error.find('./Description').text),
+                expected=True)
+
+        title = metadata.find('./Title').text
+        description = metadata.find('./Description').text
+        duration = int(metadata.find('./Duration').text)
+
+        posters_el = metadata.find('./PosterLinks')
+        thumbnails = [{
+            'url': thumbnail.text,
+            'width': int(thumbnail.get('width')),
+            'height': int(thumbnail.get('height')),
+        } for thumbnail in posters_el.findall('./PosterIMG')] if posters_el is not None else []
+
+        categories_el = metadata.find('./Categories')
+        categories = [
+            cat.get('name') for cat in categories_el.findall('./Category')
+        ] if categories_el is not None else []
+
+        formats = [{
+            'url': fmt.text,
+            'ext': 'mp4',
+            'vbr': int(fmt.get('bitrate')),
+            'width': int(fmt.get('width')),
+            'height': int(fmt.get('height')),
+        } for fmt in metadata.findall('./PlaybackLinks/FileURL')]
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnails': thumbnails,
+            'duration': duration,
+            'categories': categories,
+            'formats': formats,
+        }
diff --git a/youtube_dl/extractor/sportbox.py b/youtube_dl/extractor/sportbox.py
new file mode 100644 (file)
index 0000000..becdf65
--- /dev/null
@@ -0,0 +1,76 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_duration,
+    parse_iso8601,
+)
+
+
+class SportBoxIE(InfoExtractor):
+    _VALID_URL = r'https?://news\.sportbox\.ru/Vidy_sporta/(?:[^/]+/)+spbvideo_NI\d+_(?P<display_id>.+)'
+    _TESTS = [
+        {
+            'url': 'http://news.sportbox.ru/Vidy_sporta/Avtosport/Rossijskij/spbvideo_NI483529_Gonka-2-zaezd-Obyedinenniy-2000-klassi-Turing-i-S',
+            'md5': 'ff56a598c2cf411a9a38a69709e97079',
+            'info_dict': {
+                'id': '80822',
+                'ext': 'mp4',
+                'title': 'Гонка 2  заезд ««Объединенный 2000»: классы Туринг и Супер-продакшн',
+                'description': 'md5:81715fa9c4ea3d9e7915dc8180c778ed',
+                'thumbnail': 're:^https?://.*\.jpg$',
+                'timestamp': 1411896237,
+                'upload_date': '20140928',
+                'duration': 4846,
+            },
+            'params': {
+                # m3u8 download
+                'skip_download': True,
+            },
+        }, {
+            'url': 'http://news.sportbox.ru/Vidy_sporta/billiard/spbvideo_NI486287_CHempionat-mira-po-dinamichnoy-piramide-4',
+            'only_matching': True,
+        }
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        display_id = mobj.group('display_id')
+
+        webpage = self._download_webpage(url, display_id)
+
+        video_id = self._search_regex(
+            r'src="/vdl/player/media/(\d+)"', webpage, 'video id')
+
+        player = self._download_webpage(
+            'http://news.sportbox.ru/vdl/player/media/%s' % video_id,
+            display_id, 'Downloading player webpage')
+
+        hls = self._search_regex(
+            r"var\s+original_hls_file\s*=\s*'([^']+)'", player, 'hls file')
+
+        formats = self._extract_m3u8_formats(hls, display_id, 'mp4')
+
+        title = self._html_search_regex(
+            r'<h1 itemprop="name">([^<]+)</h1>', webpage, 'title')
+        description = self._html_search_regex(
+            r'(?s)<div itemprop="description">(.+?)</div>', webpage, 'description', fatal=False)
+        thumbnail = self._og_search_thumbnail(webpage)
+        timestamp = parse_iso8601(self._search_regex(
+            r'<span itemprop="uploadDate">([^<]+)</span>', webpage, 'timestamp', fatal=False))
+        duration = parse_duration(self._html_search_regex(
+            r'<meta itemprop="duration" content="PT([^"]+)">', webpage, 'duration', fatal=False))
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'timestamp': timestamp,
+            'duration': duration,
+            'formats': formats,
+        }
diff --git a/youtube_dl/extractor/sportdeutschland.py b/youtube_dl/extractor/sportdeutschland.py
new file mode 100644 (file)
index 0000000..057ef52
--- /dev/null
@@ -0,0 +1,95 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_request,
+    parse_iso8601,
+)
+
+
+class SportDeutschlandIE(InfoExtractor):
+    _VALID_URL = r'https?://sportdeutschland\.tv/(?P<sport>[^/?#]+)/(?P<id>[^?#/]+)(?:$|[?#])'
+    _TESTS = [{
+        'url': 'http://sportdeutschland.tv/badminton/live-li-ning-badminton-weltmeisterschaft-2014-kopenhagen',
+        'info_dict': {
+            'id': 'live-li-ning-badminton-weltmeisterschaft-2014-kopenhagen',
+            'ext': 'mp4',
+            'title': 're:Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen',
+            'categories': ['Badminton'],
+            'view_count': int,
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'description': 're:Die Badminton-WM 2014 aus Kopenhagen bei Sportdeutschland\.TV',
+            'timestamp': int,
+            'upload_date': 're:^201408[23][0-9]$',
+        },
+        'params': {
+            'skip_download': 'Live stream',
+        },
+    }, {
+        'url': 'http://sportdeutschland.tv/li-ning-badminton-wm-2014/lee-li-ning-badminton-weltmeisterschaft-2014-kopenhagen-herren-einzel-wei-vs',
+        'info_dict': {
+            'id': 'lee-li-ning-badminton-weltmeisterschaft-2014-kopenhagen-herren-einzel-wei-vs',
+            'ext': 'mp4',
+            'upload_date': '20140825',
+            'description': 'md5:60a20536b57cee7d9a4ec005e8687504',
+            'timestamp': 1408976060,
+            'title': 'Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen: Herren Einzel, Wei Lee vs. Keun Lee',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'view_count': int,
+            'categories': ['Li-Ning Badminton WM 2014'],
+        }
+    }]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        sport_id = mobj.group('sport')
+
+        api_url = 'http://splink.tv/api/permalinks/%s/%s' % (
+            sport_id, video_id)
+        req = compat_urllib_request.Request(api_url, headers={
+            'Accept': 'application/vnd.vidibus.v2.html+json',
+            'Referer': url,
+        })
+        data = self._download_json(req, video_id)
+
+        categories = list(data.get('section', {}).get('tags', {}).values())
+        asset = data['asset']
+
+        formats = []
+        smil_url = asset['video']
+        if '.smil' in smil_url:
+            m3u8_url = smil_url.replace('.smil', '.m3u8')
+            formats.extend(
+                self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4'))
+
+            smil_doc = self._download_xml(
+                smil_url, video_id, note='Downloading SMIL metadata')
+            base_url = smil_doc.find('./head/meta').attrib['base']
+            formats.extend([{
+                'format_id': 'rmtp',
+                'url': base_url,
+                'play_path': n.attrib['src'],
+                'ext': 'flv',
+                'preference': -100,
+                'format_note': 'Seems to fail at example stream',
+            } for n in smil_doc.findall('./body/video')])
+        else:
+            formats.append({'url': smil_url})
+
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'formats': formats,
+            'title': asset['title'],
+            'thumbnail': asset.get('image'),
+            'description': asset.get('teaser'),
+            'categories': categories,
+            'view_count': asset.get('views'),
+            'rtmp_live': asset.get('live'),
+            'timestamp': parse_iso8601(asset.get('date')),
+        }
diff --git a/youtube_dl/extractor/srmediathek.py b/youtube_dl/extractor/srmediathek.py
new file mode 100644 (file)
index 0000000..666a7dc
--- /dev/null
@@ -0,0 +1,43 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import json
+
+from .common import InfoExtractor
+from ..utils import js_to_json
+
+
+class SRMediathekIE(InfoExtractor):
+    IE_DESC = 'Süddeutscher Rundfunk'
+    _VALID_URL = r'https?://sr-mediathek\.sr-online\.de/index\.php\?.*?&id=(?P<id>[0-9]+)'
+
+    _TEST = {
+        'url': 'http://sr-mediathek.sr-online.de/index.php?seite=7&id=28455',
+        'info_dict': {
+            'id': '28455',
+            'ext': 'mp4',
+            'title': 'sportarena (26.10.2014)',
+            'description': 'Ringen: KSV Köllerbach gegen Aachen-Walheim; Frauen-Fußball: 1. FC Saarbrücken gegen Sindelfingen; Motorsport: Rallye in Losheim; dazu: Interview mit Timo Bernhard; Turnen: TG Saar; Reitsport: Deutscher Voltigier-Pokal; Badminton: Interview mit Michael Fuchs ',
+            'thumbnail': 're:^https?://.*\.jpg$',
+        },
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        murls = json.loads(js_to_json(self._search_regex(
+            r'var mediaURLs\s*=\s*(.*?);\n', webpage, 'video URLs')))
+        formats = [{'url': murl} for murl in murls]
+        self._sort_formats(formats)
+
+        title = json.loads(js_to_json(self._search_regex(
+            r'var mediaTitles\s*=\s*(.*?);\n', webpage, 'title')))[0]
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'description': self._og_search_description(webpage),
+            'thumbnail': self._og_search_thumbnail(webpage),
+        }
index 44c52c718e2090cca8f28c6542d67ae4585332e6..4a3d8bb8f267b588c59e2f16b208955a70d362d9 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
@@ -9,24 +11,23 @@ from ..utils import (
 
 
 class StanfordOpenClassroomIE(InfoExtractor):
-    IE_NAME = u'stanfordoc'
-    IE_DESC = u'Stanford Open ClassRoom'
-    _VALID_URL = r'^(?:https?://)?openclassroom\.stanford\.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
+    IE_NAME = 'stanfordoc'
+    IE_DESC = 'Stanford Open ClassRoom'
+    _VALID_URL = r'https?://openclassroom\.stanford\.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
     _TEST = {
-        u'url': u'http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100',
-        u'file': u'PracticalUnix_intro-environment.mp4',
-        u'md5': u'544a9468546059d4e80d76265b0443b8',
-        u'info_dict': {
-            u"title": u"Intro Environment"
+        'url': 'http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100',
+        'md5': '544a9468546059d4e80d76265b0443b8',
+        'info_dict': {
+            'id': 'PracticalUnix_intro-environment',
+            'ext': 'mp4',
+            'title': 'Intro Environment',
         }
     }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
 
-        if mobj.group('course') and mobj.group('video'): # A specific video
+        if mobj.group('course') and mobj.group('video'):  # A specific video
             course = mobj.group('course')
             video = mobj.group('video')
             info = {
@@ -35,7 +36,6 @@ class StanfordOpenClassroomIE(InfoExtractor):
                 'upload_date': None,
             }
 
-            self.report_extraction(info['id'])
             baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
             xmlUrl = baseUrl + video + '.xml'
             mdoc = self._download_xml(xmlUrl, info['id'])
@@ -43,63 +43,49 @@ class StanfordOpenClassroomIE(InfoExtractor):
                 info['title'] = mdoc.findall('./title')[0].text
                 info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
             except IndexError:
-                raise ExtractorError(u'Invalid metadata XML file')
-            info['ext'] = info['url'].rpartition('.')[2]
-            return [info]
-        elif mobj.group('course'): # A course page
+                raise ExtractorError('Invalid metadata XML file')
+            return info
+        elif mobj.group('course'):  # A course page
             course = mobj.group('course')
             info = {
                 'id': course,
-                'type': 'playlist',
+                '_type': 'playlist',
                 'uploader': None,
                 'upload_date': None,
             }
 
-            coursepage = self._download_webpage(url, info['id'],
-                                        note='Downloading course info page',
-                                        errnote='Unable to download course info page')
+            coursepage = self._download_webpage(
+                url, info['id'],
+                note='Downloading course info page',
+                errnote='Unable to download course info page')
 
-            info['title'] = self._html_search_regex('<h1>([^<]+)</h1>', coursepage, 'title', default=info['id'])
+            info['title'] = self._html_search_regex(
+                r'<h1>([^<]+)</h1>', coursepage, 'title', default=info['id'])
 
-            info['description'] = self._html_search_regex('<description>([^<]+)</description>',
-                coursepage, u'description', fatal=False)
+            info['description'] = self._html_search_regex(
+                r'(?s)<description>([^<]+)</description>',
+                coursepage, 'description', fatal=False)
 
             links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage))
-            info['list'] = [
-                {
-                    'type': 'reference',
-                    'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage),
-                }
-                    for vpage in links]
-            results = []
-            for entry in info['list']:
-                assert entry['type'] == 'reference'
-                results += self.extract(entry['url'])
-            return results
-        else: # Root page
+            info['entries'] = [self.url_result(
+                'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l)
+            ) for l in links]
+            return info
+        else:  # Root page
             info = {
                 'id': 'Stanford OpenClassroom',
-                'type': 'playlist',
+                '_type': 'playlist',
                 'uploader': None,
                 'upload_date': None,
             }
+            info['title'] = info['id']
 
             rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
             rootpage = self._download_webpage(rootURL, info['id'],
-                errnote=u'Unable to download course info page')
-
-            info['title'] = info['id']
+                                              errnote='Unable to download course info page')
 
             links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage))
-            info['list'] = [
-                {
-                    'type': 'reference',
-                    'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage),
-                }
-                    for cpage in links]
-
-            results = []
-            for entry in info['list']:
-                assert entry['type'] == 'reference'
-                results += self.extract(entry['url'])
-            return results
+            info['entries'] = [self.url_result(
+                'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l)
+            ) for l in links]
+            return info
index 172def221e1277298dc355a2cfdbea3ae4f9fdce..c1178f26de0b961ad68eb6d1ddb89550746f4dd7 100644 (file)
@@ -13,7 +13,7 @@ from ..utils import (
 
 class StreamcloudIE(InfoExtractor):
     IE_NAME = 'streamcloud.eu'
-    _VALID_URL = r'https?://streamcloud\.eu/(?P<id>[a-zA-Z0-9_-]+)/(?P<fname>[^#?]*)\.html'
+    _VALID_URL = r'https?://streamcloud\.eu/(?P<id>[a-zA-Z0-9_-]+)(?:/(?P<fname>[^#?]*)\.html)?'
 
     _TEST = {
         'url': 'http://streamcloud.eu/skp9j99s4bpz/youtube-dl_test_video_____________-BaW_jenozKc.mp4.html',
@@ -27,8 +27,8 @@ class StreamcloudIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
+        url = 'http://streamcloud.eu/%s' % video_id
 
         orig_webpage = self._download_webpage(url, video_id)
 
index db33745c14472f7f3e7749978585f0b2b2c53af2..59a51268d25ed4b0718c67ae91a5e8f07fcfe42c 100644 (file)
@@ -1,7 +1,8 @@
+from __future__ import unicode_literals
 from .common import InfoExtractor
 
+from ..compat import compat_str
 from ..utils import (
-    compat_str,
     ExtractorError,
 )
 
@@ -17,10 +18,10 @@ class SubtitlesInfoExtractor(InfoExtractor):
         sub_lang_list = self._get_available_subtitles(video_id, webpage)
         auto_captions_list = self._get_available_automatic_caption(video_id, webpage)
         sub_lang = ",".join(list(sub_lang_list.keys()))
-        self.to_screen(u'%s: Available subtitles for video: %s' %
+        self.to_screen('%s: Available subtitles for video: %s' %
                        (video_id, sub_lang))
         auto_lang = ",".join(auto_captions_list.keys())
-        self.to_screen(u'%s: Available automatic captions for video: %s' %
+        self.to_screen('%s: Available automatic captions for video: %s' %
                        (video_id, auto_lang))
 
     def extract_subtitles(self, video_id, webpage):
@@ -50,8 +51,8 @@ class SubtitlesInfoExtractor(InfoExtractor):
 
             sub_lang_list = {}
             for sub_lang in requested_langs:
-                if not sub_lang in available_subs_list:
-                    self._downloader.report_warning(u'no closed captions found in the specified language "%s"' % sub_lang)
+                if sub_lang not in available_subs_list:
+                    self._downloader.report_warning('no closed captions found in the specified language "%s"' % sub_lang)
                     continue
                 sub_lang_list[sub_lang] = available_subs_list[sub_lang]
 
@@ -70,10 +71,10 @@ class SubtitlesInfoExtractor(InfoExtractor):
         try:
             sub = self._download_subtitle_url(sub_lang, url)
         except ExtractorError as err:
-            self._downloader.report_warning(u'unable to download video subtitles for %s: %s' % (sub_lang, compat_str(err)))
+            self._downloader.report_warning('unable to download video subtitles for %s: %s' % (sub_lang, compat_str(err)))
             return
         if not sub:
-            self._downloader.report_warning(u'Did not fetch video subtitles')
+            self._downloader.report_warning('Did not fetch video subtitles')
             return
         return sub
 
@@ -94,5 +95,5 @@ class SubtitlesInfoExtractor(InfoExtractor):
         Must be redefined by the subclasses that support automatic captions,
         otherwise it will return {}
         """
-        self._downloader.report_warning(u'Automatic Captions not supported by this server')
+        self._downloader.report_warning('Automatic Captions not supported by this server')
         return {}
diff --git a/youtube_dl/extractor/sunporno.py b/youtube_dl/extractor/sunporno.py
new file mode 100644 (file)
index 0000000..263f09b
--- /dev/null
@@ -0,0 +1,70 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_duration,
+    int_or_none,
+    qualities,
+    determine_ext,
+)
+
+
+class SunPornoIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?sunporno\.com/videos/(?P<id>\d+)'
+    _TEST = {
+        'url': 'http://www.sunporno.com/videos/807778/',
+        'md5': '6457d3c165fd6de062b99ef6c2ff4c86',
+        'info_dict': {
+            'id': '807778',
+            'ext': 'flv',
+            'title': 'md5:0a400058e8105d39e35c35e7c5184164',
+            'description': 'md5:a31241990e1bd3a64e72ae99afb325fb',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'duration': 302,
+            'age_limit': 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._html_search_regex(r'<title>([^<]+)</title>', webpage, 'title')
+        description = self._html_search_meta('description', webpage, 'description')
+        thumbnail = self._html_search_regex(
+            r'poster="([^"]+)"', webpage, 'thumbnail', fatal=False)
+
+        duration = parse_duration(self._search_regex(
+            r'Duration:\s*(\d+:\d+)\s*<', webpage, 'duration', fatal=False))
+
+        view_count = int_or_none(self._html_search_regex(
+            r'class="views">\s*(\d+)\s*<', webpage, 'view count', fatal=False))
+        comment_count = int_or_none(self._html_search_regex(
+            r'(\d+)</b> Comments?', webpage, 'comment count', fatal=False))
+
+        formats = []
+        quality = qualities(['mp4', 'flv'])
+        for video_url in re.findall(r'<source src="([^"]+)"', webpage):
+            video_ext = determine_ext(video_url)
+            formats.append({
+                'url': video_url,
+                'format_id': video_ext,
+                'quality': quality(video_ext),
+            })
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'duration': duration,
+            'view_count': view_count,
+            'comment_count': comment_count,
+            'formats': formats,
+            'age_limit': 18,
+        }
index 5d9d703673265ca4a53a54f28e34494d570cb206..58073eefeffc0f3ebc244a6087cad36662940228 100644 (file)
@@ -52,20 +52,6 @@ class SWRMediathekIE(InfoExtractor):
             'uploader': 'SWR 2',
             'uploader_id': '284670',
         }
-    }, {
-        'url': 'http://swrmediathek.de/content/player.htm?show=52dc7e00-15c5-11e4-84bc-0026b975f2e6',
-        'md5': '881531487d0633080a8cc88d31ef896f',
-        'info_dict': {
-            'id': '52dc7e00-15c5-11e4-84bc-0026b975f2e6',
-            'ext': 'mp4',
-            'title': 'Familienspaß am Bodensee',
-            'description': 'md5:0b591225a32cfde7be1629ed49fe4315',
-            'thumbnail': 're:http://.*\.jpg',
-            'duration': 1784,
-            'upload_date': '20140727',
-            'uploader': 'SWR Fernsehen BW',
-            'uploader_id': '281130',
-        }
     }]
 
     def _real_extract(self, url):
@@ -94,7 +80,7 @@ class SWRMediathekIE(InfoExtractor):
 
             if media_type == 'Video':
                 fmt.update({
-                    'format_note': ['144p', '288p', '544p'][quality-1],
+                    'format_note': ['144p', '288p', '544p', '720p'][quality - 1],
                     'vcodec': codec,
                 })
             elif media_type == 'Audio':
@@ -115,4 +101,4 @@ class SWRMediathekIE(InfoExtractor):
             'uploader': attr['channel_title'],
             'uploader_id': attr['channel_idkey'],
             'formats': formats,
-        }
\ No newline at end of file
+        }
index f76b6e2b22c7fa391664218f9e59fa62908c4c08..5ca079f880717933a4216de6399046a44970d29b 100644 (file)
@@ -10,7 +10,6 @@ class SyfyIE(InfoExtractor):
 
     _TESTS = [{
         'url': 'http://www.syfy.com/videos/Robot%20Combat%20League/Behind%20the%20Scenes/vid:2631458',
-        'md5': 'e07de1d52c7278adbb9b9b1c93a66849',
         'info_dict': {
             'id': 'NmqMrGnXvmO1',
             'ext': 'flv',
index c9359fafb5c5989923c6320e3e684673b80057d6..aa5964acb6b3f40b0d663bd2169ac6aec0c210ae 100644 (file)
@@ -1,27 +1,24 @@
 # -*- coding: utf-8 -*-
-
-import re
+from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..utils import determine_ext
 
 
 class SztvHuIE(InfoExtractor):
-    _VALID_URL = r'(?:http://)?(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)'
+    _VALID_URL = r'http://(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)'
     _TEST = {
-        u'url': u'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909',
-        u'file': u'20130909.mp4',
-        u'md5': u'a6df607b11fb07d0e9f2ad94613375cb',
-        u'info_dict': {
-            u"title": u"Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren",
-            u"description": u'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...',
+        'url': 'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909',
+        'md5': 'a6df607b11fb07d0e9f2ad94613375cb',
+        'info_dict': {
+            'id': '20130909',
+            'ext': 'mp4',
+            'title': 'Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren',
+            'description': 'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...',
         },
-        u'skip': u'Service temporarily disabled as of 2013-11-20'
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
         video_file = self._search_regex(
             r'file: "...:(.*?)",', webpage, 'video file')
@@ -39,7 +36,6 @@ class SztvHuIE(InfoExtractor):
             'id': video_id,
             'url': video_url,
             'title': title,
-            'ext': determine_ext(video_url),
             'description': description,
             'thumbnail': thumbnail,
         }
index b870474515ba61ee33641c86554d53d68a6bf46d..bfe07b02417a2a44f23a09c10c25d48ec18b5535 100644 (file)
@@ -4,10 +4,11 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..utils import parse_filesize
 
 
 class TagesschauIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/video/video(?P<id>-?[0-9]+)\.html'
+    _VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/(?:sendung/ts|video/video)(?P<id>-?[0-9]+)\.html'
 
     _TESTS = [{
         'url': 'http://www.tagesschau.de/multimedia/video/video1399128.html',
@@ -19,6 +20,16 @@ class TagesschauIE(InfoExtractor):
             'description': 'md5:69da3c61275b426426d711bde96463ab',
             'thumbnail': 're:^http:.*\.jpg$',
         },
+    }, {
+        'url': 'http://www.tagesschau.de/multimedia/sendung/ts-5727.html',
+        'md5': '3c54c1f6243d279b706bde660ceec633',
+        'info_dict': {
+            'id': '5727',
+            'ext': 'mp4',
+            'description': 'md5:695c01bfd98b7e313c501386327aea59',
+            'title': 'Sendung: tagesschau \t04.12.2014 20:00 Uhr',
+            'thumbnail': 're:^http:.*\.jpg$',
+        }
     }]
 
     _FORMATS = {
@@ -28,42 +39,82 @@ class TagesschauIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
-        if video_id.startswith('-'):
-            display_id = video_id.strip('-')
-        else:
-            display_id = video_id
-
+        video_id = self._match_id(url)
+        display_id = video_id.lstrip('-')
         webpage = self._download_webpage(url, display_id)
 
-        playerpage = self._download_webpage(
-            'http://www.tagesschau.de/multimedia/video/video%s~player_autoplay-true.html' % video_id,
-            display_id, 'Downloading player page')
-
-        medias = re.findall(
-            r'"(http://media.+?)", type:"video/(.+?)", quality:"(.+?)"',
-            playerpage)
+        player_url = self._html_search_meta(
+            'twitter:player', webpage, 'player URL', default=None)
+        if player_url:
+            playerpage = self._download_webpage(
+                player_url, display_id, 'Downloading player page')
 
-        formats = []
-        for url, ext, res in medias:
-            f = {
-                'format_id': res + '_' + ext,
-                'url': url,
-                'ext': ext,
-            }
-            f.update(self._FORMATS.get(res, {}))
-            formats.append(f)
+            medias = re.findall(
+                r'"(http://media.+?)", type:"video/(.+?)", quality:"(.+?)"',
+                playerpage)
+            formats = []
+            for url, ext, res in medias:
+                f = {
+                    'format_id': res + '_' + ext,
+                    'url': url,
+                    'ext': ext,
+                }
+                f.update(self._FORMATS.get(res, {}))
+                formats.append(f)
+            thumbnail_fn = re.findall(r'"(/multimedia/.+?\.jpg)"', playerpage)[-1]
+            title = self._og_search_title(webpage).strip()
+            description = self._og_search_description(webpage).strip()
+        else:
+            download_text = self._search_regex(
+                r'(?s)<p>Wir bieten dieses Video in folgenden Formaten zum Download an:</p>\s*<div class="controls">(.*?)</div>\s*<p>',
+                webpage, 'download links')
+            links = re.finditer(
+                r'<div class="button" title="(?P<title>[^"]*)"><a href="(?P<url>[^"]+)">(?P<name>.+?)</a></div>',
+                download_text)
+            formats = []
+            for l in links:
+                format_id = self._search_regex(
+                    r'.*/[^/.]+\.([^/]+)\.[^/.]+', l.group('url'), 'format ID')
+                format = {
+                    'format_id': format_id,
+                    'url': l.group('url'),
+                    'format_name': l.group('name'),
+                }
+                m = re.match(
+                    r'''(?x)
+                        Video:\s*(?P<vcodec>[a-zA-Z0-9/._-]+)\s*&\#10;
+                        (?P<width>[0-9]+)x(?P<height>[0-9]+)px&\#10;
+                        (?P<vbr>[0-9]+)kbps&\#10;
+                        Audio:\s*(?P<abr>[0-9]+)kbps,\s*(?P<audio_desc>[A-Za-z\.0-9]+)&\#10;
+                        Gr&ouml;&szlig;e:\s*(?P<filesize_approx>[0-9.,]+\s+[a-zA-Z]*B)''',
+                    l.group('title'))
+                if m:
+                    format.update({
+                        'format_note': m.group('audio_desc'),
+                        'vcodec': m.group('vcodec'),
+                        'width': int(m.group('width')),
+                        'height': int(m.group('height')),
+                        'abr': int(m.group('abr')),
+                        'vbr': int(m.group('vbr')),
+                        'filesize_approx': parse_filesize(m.group('filesize_approx')),
+                    })
+                formats.append(format)
+            thumbnail_fn = self._search_regex(
+                r'(?s)<img alt="Sendungsbild".*?src="([^"]+)"',
+                webpage, 'thumbnail', fatal=False)
+            description = self._html_search_regex(
+                r'(?s)<p class="teasertext">(.*?)</p>',
+                webpage, 'description', fatal=False)
+            title = self._html_search_regex(
+                r'<span class="headline".*?>(.*?)</span>', webpage, 'title')
 
         self._sort_formats(formats)
-
-        thumbnail = re.findall(r'"(/multimedia/.+?\.jpg)"', playerpage)[-1]
+        thumbnail = 'http://www.tagesschau.de' + thumbnail_fn
 
         return {
             'id': display_id,
-            'title': self._og_search_title(webpage).strip(),
-            'thumbnail': 'http://www.tagesschau.de' + thumbnail,
+            'title': title,
+            'thumbnail': thumbnail,
             'formats': formats,
-            'description': self._og_search_description(webpage).strip(),
+            'description': description,
         }
diff --git a/youtube_dl/extractor/tapely.py b/youtube_dl/extractor/tapely.py
new file mode 100644 (file)
index 0000000..283e113
--- /dev/null
@@ -0,0 +1,105 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    clean_html,
+    compat_urllib_request,
+    float_or_none,
+    parse_iso8601,
+)
+
+
+class TapelyIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?tape\.ly/(?P<id>[A-Za-z0-9\-_]+)(?:/(?P<songnr>\d+))?'
+    _API_URL = 'http://tape.ly/showtape?id={0:}'
+    _S3_SONG_URL = 'http://mytape.s3.amazonaws.com/{0:}'
+    _SOUNDCLOUD_SONG_URL = 'http://api.soundcloud.com{0:}'
+    _TESTS = [
+        {
+            'url': 'http://tape.ly/my-grief-as-told-by-water',
+            'info_dict': {
+                'id': 23952,
+                'title': 'my grief as told by water',
+                'thumbnail': 're:^https?://.*\.png$',
+                'uploader_id': 16484,
+                'timestamp': 1411848286,
+                'description': 'For Robin and Ponkers, whom the tides of life have taken out to sea.',
+            },
+            'playlist_count': 13,
+        },
+        {
+            'url': 'http://tape.ly/my-grief-as-told-by-water/1',
+            'md5': '79031f459fdec6530663b854cbc5715c',
+            'info_dict': {
+                'id': 258464,
+                'title': 'Dreaming Awake  (My Brightest Diamond)',
+                'ext': 'm4a',
+            },
+        },
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        display_id = mobj.group('id')
+
+        playlist_url = self._API_URL.format(display_id)
+        request = compat_urllib_request.Request(playlist_url)
+        request.add_header('X-Requested-With', 'XMLHttpRequest')
+        request.add_header('Accept', 'application/json')
+        request.add_header('Referer', url)
+
+        playlist = self._download_json(request, display_id)
+
+        tape = playlist['tape']
+
+        entries = []
+        for s in tape['songs']:
+            song = s['song']
+            entry = {
+                'id': song['id'],
+                'duration': float_or_none(song.get('songduration'), 1000),
+                'title': song['title'],
+            }
+            if song['source'] == 'S3':
+                entry.update({
+                    'url': self._S3_SONG_URL.format(song['filename']),
+                })
+                entries.append(entry)
+            elif song['source'] == 'YT':
+                self.to_screen('YouTube video detected')
+                yt_id = song['filename'].replace('/youtube/', '')
+                entry.update(self.url_result(yt_id, 'Youtube', video_id=yt_id))
+                entries.append(entry)
+            elif song['source'] == 'SC':
+                self.to_screen('SoundCloud song detected')
+                sc_url = self._SOUNDCLOUD_SONG_URL.format(song['filename'])
+                entry.update(self.url_result(sc_url, 'Soundcloud'))
+                entries.append(entry)
+            else:
+                self.report_warning('Unknown song source: %s' % song['source'])
+
+        if mobj.group('songnr'):
+            songnr = int(mobj.group('songnr')) - 1
+            try:
+                return entries[songnr]
+            except IndexError:
+                raise ExtractorError(
+                    'No song with index: %s' % mobj.group('songnr'),
+                    expected=True)
+
+        return {
+            '_type': 'playlist',
+            'id': tape['id'],
+            'display_id': display_id,
+            'title': tape['name'],
+            'entries': entries,
+            'thumbnail': tape.get('image_url'),
+            'description': clean_html(tape.get('subtext')),
+            'like_count': tape.get('likescount'),
+            'uploader_id': tape.get('user_id'),
+            'timestamp': parse_iso8601(tape.get('published_at')),
+        }
diff --git a/youtube_dl/extractor/tass.py b/youtube_dl/extractor/tass.py
new file mode 100644 (file)
index 0000000..c4ef707
--- /dev/null
@@ -0,0 +1,62 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+    js_to_json,
+    qualities,
+)
+
+
+class TassIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:tass\.ru|itar-tass\.com)/[^/]+/(?P<id>\d+)'
+    _TESTS = [
+        {
+            'url': 'http://tass.ru/obschestvo/1586870',
+            'md5': '3b4cdd011bc59174596b6145cda474a4',
+            'info_dict': {
+                'id': '1586870',
+                'ext': 'mp4',
+                'title': 'Посетителям московского зоопарка показали красную панду',
+                'description': 'Приехавшую из Дублина Зейну можно увидеть в павильоне "Кошки тропиков"',
+                'thumbnail': 're:^https?://.*\.jpg$',
+            },
+        },
+        {
+            'url': 'http://itar-tass.com/obschestvo/1600009',
+            'only_matching': True,
+        },
+    ]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, video_id)
+
+        sources = json.loads(js_to_json(self._search_regex(
+            r'(?s)sources\s*:\s*(\[.+?\])', webpage, 'sources')))
+
+        quality = qualities(['sd', 'hd'])
+
+        formats = []
+        for source in sources:
+            video_url = source.get('file')
+            if not video_url or not video_url.startswith('http') or not video_url.endswith('.mp4'):
+                continue
+            label = source.get('label')
+            formats.append({
+                'url': video_url,
+                'format_id': label,
+                'quality': quality(label),
+            })
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': self._og_search_title(webpage),
+            'description': self._og_search_description(webpage),
+            'thumbnail': self._og_search_thumbnail(webpage),
+            'formats': formats,
+        }
index 46d727d1de6743edcb99109b77caa49ebc1bf0c6..6c3445d792206395b7a36d016b8a42ad255ea9cc 100644 (file)
@@ -106,6 +106,13 @@ class TeacherTubeUserIE(InfoExtractor):
         \s*
         <a\s+href="(https?://(?:www\.)?teachertube\.com/(?:video|audio)/[^"]+)"
     '''
+    _TEST = {
+        'url': 'http://www.teachertube.com/user/profile/rbhagwati2',
+        'info_dict': {
+            'id': 'rbhagwati2'
+        },
+        'playlist_mincount': 179,
+    }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -114,7 +121,7 @@ class TeacherTubeUserIE(InfoExtractor):
         urls = []
         webpage = self._download_webpage(url, user_id)
         urls.extend(re.findall(self._MEDIA_RE, webpage))
-        
+
         pages = re.findall(r'/ajax-user/user-videos/%s\?page=([0-9]+)' % user_id, webpage)[:-1]
         for p in pages:
             more = 'http://www.teachertube.com/ajax-user/user-videos/%s?page=%s' % (user_id, p)
index f8dd7e955ada5ce58fd04d668027587eda1b6c00..5fa67eb8d4441d62c1591289551171cdbcbcf45b 100644 (file)
@@ -8,24 +8,23 @@ from .common import InfoExtractor
 class TeamcocoIE(InfoExtractor):
     _VALID_URL = r'http://teamcoco\.com/video/(?P<video_id>[0-9]+)?/?(?P<display_id>.*)'
     _TESTS = [
-    {
-        'url': 'http://teamcoco.com/video/80187/conan-becomes-a-mary-kay-beauty-consultant',
-        'file': '80187.mp4',
-        'md5': '3f7746aa0dc86de18df7539903d399ea',
-        'info_dict': {
-            'title': 'Conan Becomes A Mary Kay Beauty Consultant',
-            'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.'
+        {
+            'url': 'http://teamcoco.com/video/80187/conan-becomes-a-mary-kay-beauty-consultant',
+            'file': '80187.mp4',
+            'md5': '3f7746aa0dc86de18df7539903d399ea',
+            'info_dict': {
+                'title': 'Conan Becomes A Mary Kay Beauty Consultant',
+                'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.'
+            }
+        }, {
+            'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush',
+            'file': '19705.mp4',
+            'md5': 'cde9ba0fa3506f5f017ce11ead928f9a',
+            'info_dict': {
+                "description": "Louis C.K. got starstruck by George W. Bush, so what? Part one.",
+                "title": "Louis C.K. Interview Pt. 1 11/3/11"
+            }
         }
-    },
-    {
-        'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush',
-        'file': '19705.mp4',
-        'md5': 'cde9ba0fa3506f5f017ce11ead928f9a',
-        'info_dict': {
-            "description": "Louis C.K. got starstruck by George W. Bush, so what? Part one.",
-            "title": "Louis C.K. Interview Pt. 1 11/3/11"
-        }
-    }
     ]
 
     def _real_extract(self, url):
@@ -33,11 +32,11 @@ class TeamcocoIE(InfoExtractor):
 
         display_id = mobj.group('display_id')
         webpage = self._download_webpage(url, display_id)
-        
+
         video_id = mobj.group("video_id")
         if not video_id:
             video_id = self._html_search_regex(
-                r'<article class="video" data-id="(\d+?)"',
+                r'data-node-id="(\d+?)"',
                 webpage, 'video id')
 
         data_url = 'http://teamcoco.com/cvp/2.0/%s.xml' % video_id
index a55f236cbbca0ac5ef70db2b22eb3c94a778b2c1..16e945d8e624adc51e6a68eab786bdece0a29960 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
@@ -11,24 +13,30 @@ class TechTalksIE(InfoExtractor):
     _VALID_URL = r'https?://techtalks\.tv/talks/[^/]*/(?P<id>\d+)/'
 
     _TEST = {
-        u'url': u'http://techtalks.tv/talks/learning-topic-models-going-beyond-svd/57758/',
-        u'playlist': [
+        'url': 'http://techtalks.tv/talks/learning-topic-models-going-beyond-svd/57758/',
+        'info_dict': {
+            'id': '57758',
+            'title': 'Learning Topic Models --- Going beyond SVD',
+        },
+        'playlist': [
             {
-                u'file': u'57758.flv',
-                u'info_dict': {
-                    u'title': u'Learning Topic Models --- Going beyond SVD',
+                'info_dict': {
+                    'id': '57758',
+                    'ext': 'flv',
+                    'title': 'Learning Topic Models --- Going beyond SVD',
                 },
             },
             {
-                u'file': u'57758-slides.flv',
-                u'info_dict': {
-                    u'title': u'Learning Topic Models --- Going beyond SVD',
+                'info_dict': {
+                    'id': '57758-slides',
+                    'ext': 'flv',
+                    'title': 'Learning Topic Models --- Going beyond SVD',
                 },
             },
         ],
-        u'params': {
+        'params': {
             # rtmp download
-            u'skip_download': True,
+            'skip_download': True,
         },
     }
 
@@ -36,30 +44,36 @@ class TechTalksIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         talk_id = mobj.group('id')
         webpage = self._download_webpage(url, talk_id)
-        rtmp_url = self._search_regex(r'netConnectionUrl: \'(.*?)\'', webpage,
-            u'rtmp url')
-        play_path = self._search_regex(r'href=\'(.*?)\' [^>]*id="flowplayer_presenter"',
-            webpage, u'presenter play path')
+        rtmp_url = self._search_regex(
+            r'netConnectionUrl: \'(.*?)\'', webpage, 'rtmp url')
+        play_path = self._search_regex(
+            r'href=\'(.*?)\' [^>]*id="flowplayer_presenter"',
+            webpage, 'presenter play path')
         title = clean_html(get_element_by_attribute('class', 'title', webpage))
         video_info = {
-                'id': talk_id,
-                'title': title,
-                'url': rtmp_url,
-                'play_path': play_path,
-                'ext': 'flv',
-            }
+            'id': talk_id,
+            'title': title,
+            'url': rtmp_url,
+            'play_path': play_path,
+            'ext': 'flv',
+        }
         m_slides = re.search(r'<a class="slides" href=\'(.*?)\'', webpage)
         if m_slides is None:
             return video_info
         else:
-            return [
-                video_info,
-                # The slides video
-                {
-                    'id': talk_id + '-slides',
-                    'title': title,
-                    'url': rtmp_url,
-                    'play_path': m_slides.group(1),
-                    'ext': 'flv',
-                },
-            ]
+            return {
+                '_type': 'playlist',
+                'id': talk_id,
+                'title': title,
+                'entries': [
+                    video_info,
+                    # The slides video
+                    {
+                        'id': talk_id + '-slides',
+                        'title': title,
+                        'url': rtmp_url,
+                        'play_path': m_slides.group(1),
+                        'ext': 'flv',
+                    },
+                ],
+            }
index bce32a87330731e229c17e267ca7f65342d22952..72160503ccd52e2f84e843184fbdf0e92080a19f 100644 (file)
@@ -33,11 +33,12 @@ class TEDIE(SubtitlesInfoExtractor):
             'ext': 'mp4',
             'title': 'The illusion of consciousness',
             'description': ('Philosopher Dan Dennett makes a compelling '
-                'argument that not only don\'t we understand our own '
-                'consciousness, but that half the time our brains are '
-                'actively fooling us.'),
+                            'argument that not only don\'t we understand our own '
+                            'consciousness, but that half the time our brains are '
+                            'actively fooling us.'),
             'uploader': 'Dan Dennett',
             'width': 854,
+            'duration': 1308,
         }
     }, {
         'url': 'http://www.ted.com/watch/ted-institute/ted-bcg/vishal-sikka-the-beauty-and-power-of-algorithms',
@@ -51,13 +52,36 @@ class TEDIE(SubtitlesInfoExtractor):
         }
     }, {
         'url': 'http://www.ted.com/talks/gabby_giffords_and_mark_kelly_be_passionate_be_courageous_be_your_best',
-        'md5': '49144e345a899b8cb34d315f3b9cfeeb',
         'info_dict': {
             'id': '1972',
             'ext': 'mp4',
             'title': 'Be passionate. Be courageous. Be your best.',
             'uploader': 'Gabby Giffords and Mark Kelly',
             'description': 'md5:5174aed4d0f16021b704120360f72b92',
+            'duration': 1128,
+        },
+    }, {
+        'url': 'http://www.ted.com/playlists/who_are_the_hackers',
+        'info_dict': {
+            'id': '10',
+            'title': 'Who are the hackers?',
+        },
+        'playlist_mincount': 6,
+    }, {
+        # contains a youtube video
+        'url': 'https://www.ted.com/talks/douglas_adams_parrots_the_universe_and_everything',
+        'add_ie': ['Youtube'],
+        'info_dict': {
+            'id': '_ZG8HBuDjgc',
+            'ext': 'mp4',
+            'title': 'Douglas Adams: Parrots the Universe and Everything',
+            'description': 'md5:01ad1e199c49ac640cb1196c0e9016af',
+            'uploader': 'University of California Television (UCTV)',
+            'uploader_id': 'UCtelevision',
+            'upload_date': '20080522',
+        },
+        'params': {
+            'skip_download': True,
         },
     }]
 
@@ -69,7 +93,7 @@ class TEDIE(SubtitlesInfoExtractor):
 
     def _extract_info(self, webpage):
         info_json = self._search_regex(r'q\("\w+.init",({.+})\)</script>',
-            webpage, 'info json')
+                                       webpage, 'info json')
         return json.loads(info_json)
 
     def _real_extract(self, url):
@@ -89,7 +113,7 @@ class TEDIE(SubtitlesInfoExtractor):
         '''Returns the videos of the playlist'''
 
         webpage = self._download_webpage(url, name,
-            'Downloading playlist webpage')
+                                         'Downloading playlist webpage')
         info = self._extract_info(webpage)
         playlist_info = info['playlist']
 
@@ -108,6 +132,13 @@ class TEDIE(SubtitlesInfoExtractor):
 
         talk_info = self._extract_info(webpage)['talks'][0]
 
+        if talk_info.get('external') is not None:
+            self.to_screen('Found video from %s' % talk_info['external']['service'])
+            return {
+                '_type': 'url',
+                'url': talk_info['external']['uri'],
+            }
+
         formats = [{
             'url': format_url,
             'format_id': format_id,
@@ -143,12 +174,13 @@ class TEDIE(SubtitlesInfoExtractor):
             thumbnail = 'http://' + thumbnail
         return {
             'id': video_id,
-            'title': talk_info['title'],
+            'title': talk_info['title'].strip(),
             'uploader': talk_info['speaker'],
             'thumbnail': thumbnail,
             'description': self._og_search_description(webpage),
             'subtitles': video_subtitles,
             'formats': formats,
+            'duration': talk_info.get('duration'),
         }
 
     def _get_available_subtitles(self, video_id, talk_info):
@@ -167,8 +199,9 @@ class TEDIE(SubtitlesInfoExtractor):
         webpage = self._download_webpage(url, name)
 
         config_json = self._html_search_regex(
-            r"data-config='([^']+)", webpage, 'config')
-        config = json.loads(config_json)
+            r'"pages\.jwplayer"\s*,\s*({.+?})\s*\)\s*</script>',
+            webpage, 'config')
+        config = json.loads(config_json)['config']
         video_url = config['video']['url']
         thumbnail = config.get('image', {}).get('url')
 
diff --git a/youtube_dl/extractor/telebruxelles.py b/youtube_dl/extractor/telebruxelles.py
new file mode 100644 (file)
index 0000000..a3d05f9
--- /dev/null
@@ -0,0 +1,60 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class TeleBruxellesIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?telebruxelles\.be/(news|sport|dernier-jt)/?(?P<id>[^/#?]+)'
+    _TESTS = [{
+        'url': 'http://www.telebruxelles.be/news/auditions-devant-parlement-francken-galant-tres-attendus/',
+        'md5': '59439e568c9ee42fb77588b2096b214f',
+        'info_dict': {
+            'id': '11942',
+            'display_id': 'auditions-devant-parlement-francken-galant-tres-attendus',
+            'ext': 'flv',
+            'title': 'Parlement : Francken et Galant répondent aux interpellations de l’opposition',
+            'description': 're:Les auditions des ministres se poursuivent*'
+        },
+        'params': {
+            'skip_download': 'requires rtmpdump'
+        },
+    }, {
+        'url': 'http://www.telebruxelles.be/sport/basket-brussels-bat-mons-80-74/',
+        'md5': '181d3fbdcf20b909309e5aef5c6c6047',
+        'info_dict': {
+            'id': '10091',
+            'display_id': 'basket-brussels-bat-mons-80-74',
+            'ext': 'flv',
+            'title': 'Basket : le Brussels bat Mons 80-74',
+            'description': 're:^Ils l\u2019on fait ! En basket, le B*',
+        },
+        'params': {
+            'skip_download': 'requires rtmpdump'
+        },
+    }]
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url)
+        webpage = self._download_webpage(url, display_id)
+
+        article_id = self._html_search_regex(
+            r"<article id=\"post-(\d+)\"", webpage, 'article ID')
+        title = self._html_search_regex(
+            r'<h1 class=\"entry-title\">(.*?)</h1>', webpage, 'title')
+        description = self._og_search_description(webpage)
+
+        rtmp_url = self._html_search_regex(
+            r"file: \"(rtmp://\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}/vod/mp4:\" \+ \"\w+\" \+ \".mp4)\"",
+            webpage, 'RTMP url')
+        rtmp_url = rtmp_url.replace("\" + \"", "")
+
+        return {
+            'id': article_id,
+            'display_id': display_id,
+            'title': title,
+            'description': description,
+            'url': rtmp_url,
+            'ext': 'flv',
+            'rtmp_live': True  # if rtmpdump is not called with "--live" argument, the download is blocked and can be completed
+        }
diff --git a/youtube_dl/extractor/telecinco.py b/youtube_dl/extractor/telecinco.py
new file mode 100644 (file)
index 0000000..2a2fff5
--- /dev/null
@@ -0,0 +1,19 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .mitele import MiTeleIE
+
+
+class TelecincoIE(MiTeleIE):
+    IE_NAME = 'telecinco.es'
+    _VALID_URL = r'https?://www\.telecinco\.es/[^/]+/[^/]+/[^/]+/(?P<episode>.*?)\.html'
+
+    _TEST = {
+        'url': 'http://www.telecinco.es/robinfood/temporada-01/t01xp14/Bacalao-cocochas-pil-pil_0_1876350223.html',
+        'info_dict': {
+            'id': 'MDSVID20141015_0058',
+            'ext': 'mp4',
+            'title': 'Con Martín Berasategui, hacer un bacalao al ...',
+            'duration': 662,
+        },
+    }
diff --git a/youtube_dl/extractor/telemb.py b/youtube_dl/extractor/telemb.py
new file mode 100644 (file)
index 0000000..1bbd0e7
--- /dev/null
@@ -0,0 +1,78 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import remove_start
+
+
+class TeleMBIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?telemb\.be/(?P<display_id>.+?)_d_(?P<id>\d+)\.html'
+    _TESTS = [
+        {
+            'url': 'http://www.telemb.be/mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-_d_13466.html',
+            'md5': 'f45ea69878516ba039835794e0f8f783',
+            'info_dict': {
+                'id': '13466',
+                'display_id': 'mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-',
+                'ext': 'mp4',
+                'title': 'Mons - Cook with Danielle : des cours de cuisine en anglais ! - Les reportages',
+                'description': 'md5:bc5225f47b17c309761c856ad4776265',
+                'thumbnail': 're:^http://.*\.(?:jpg|png)$',
+            }
+        },
+        {
+            # non-ASCII characters in download URL
+            'url': 'http://telemb.be/les-reportages-havre-incendie-mortel_d_13514.html',
+            'md5': '6e9682736e5ccd4eab7f21e855350733',
+            'info_dict': {
+                'id': '13514',
+                'display_id': 'les-reportages-havre-incendie-mortel',
+                'ext': 'mp4',
+                'title': 'Havré - Incendie mortel - Les reportages',
+                'description': 'md5:5e54cb449acb029c2b7734e2d946bd4a',
+                'thumbnail': 're:^http://.*\.(?:jpg|png)$',
+            }
+        },
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        display_id = mobj.group('display_id')
+
+        webpage = self._download_webpage(url, display_id)
+
+        formats = []
+        for video_url in re.findall(r'file\s*:\s*"([^"]+)"', webpage):
+            fmt = {
+                'url': video_url,
+                'format_id': video_url.split(':')[0]
+            }
+            rtmp = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', video_url)
+            if rtmp:
+                fmt.update({
+                    'play_path': rtmp.group('playpath'),
+                    'app': rtmp.group('app'),
+                    'player_url': 'http://p.jwpcdn.com/6/10/jwplayer.flash.swf',
+                    'page_url': 'http://www.telemb.be',
+                    'preference': -1,
+                })
+            formats.append(fmt)
+        self._sort_formats(formats)
+
+        title = remove_start(self._og_search_title(webpage), 'TéléMB : ')
+        description = self._html_search_regex(
+            r'<meta property="og:description" content="(.+?)" />',
+            webpage, 'description', fatal=False)
+        thumbnail = self._og_search_thumbnail(webpage)
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'formats': formats,
+        }
index fdae17b1b817efd2a7666d44cc2cc38de1ccfa22..6e61cc9e2ecf621b19fe13924456970b091bce1c 100644 (file)
@@ -30,7 +30,7 @@ class TF1IE(InfoExtractor):
         embed_url = self._html_search_regex(
             r'"(https://www.wat.tv/embedframe/.*?)"', webpage, 'embed url')
         embed_page = self._download_webpage(embed_url, video_id,
-            'Downloading embed player page')
+                                            'Downloading embed player page')
         wat_id = self._search_regex(r'UVID=(.*?)&', embed_page, 'wat id')
         wat_info = self._download_json(
             'http://www.wat.tv/interface/contentv3/%s' % wat_id, video_id)
diff --git a/youtube_dl/extractor/theonion.py b/youtube_dl/extractor/theonion.py
new file mode 100644 (file)
index 0000000..b65d8e0
--- /dev/null
@@ -0,0 +1,70 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class TheOnionIE(InfoExtractor):
+    _VALID_URL = r'(?x)https?://(?:www\.)?theonion\.com/video/[^,]+,(?P<article_id>[0-9]+)/?'
+    _TEST = {
+        'url': 'http://www.theonion.com/video/man-wearing-mm-jacket-gods-image,36918/',
+        'md5': '19eaa9a39cf9b9804d982e654dc791ee',
+        'info_dict': {
+            'id': '2133',
+            'ext': 'mp4',
+            'title': 'Man Wearing M&M Jacket Apparently Made In God\'s Image',
+            'description': 'md5:cc12448686b5600baae9261d3e180910',
+            'thumbnail': 're:^https?://.*\.jpg\?\d+$',
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        article_id = mobj.group('article_id')
+
+        webpage = self._download_webpage(url, article_id)
+
+        video_id = self._search_regex(
+            r'"videoId":\s(\d+),', webpage, 'video ID')
+        title = self._og_search_title(webpage)
+        description = self._og_search_description(webpage)
+        thumbnail = self._og_search_thumbnail(webpage)
+
+        sources = re.findall(r'<source src="([^"]+)" type="([^"]+)"', webpage)
+        if not sources:
+            raise ExtractorError(
+                'No sources found for video %s' % video_id, expected=True)
+
+        formats = []
+        for src, type_ in sources:
+            if type_ == 'video/mp4':
+                formats.append({
+                    'format_id': 'mp4_sd',
+                    'preference': 1,
+                    'url': src,
+                })
+            elif type_ == 'video/webm':
+                formats.append({
+                    'format_id': 'webm_sd',
+                    'preference': 0,
+                    'url': src,
+                })
+            elif type_ == 'application/x-mpegURL':
+                formats.extend(
+                    self._extract_m3u8_formats(src, video_id, preference=-1))
+            else:
+                self.report_warning(
+                    'Encountered unexpected format: %s' % type_)
+
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'thumbnail': thumbnail,
+            'description': description,
+        }
index b6b2dba9ca9e6ee02c7dc6b2cf01d3601874a6b2..e2653d62dc8c288ce8e58e5bfda52793aef7cfaf 100644 (file)
@@ -5,6 +5,8 @@ import json
 
 from .common import InfoExtractor
 from ..utils import (
+    compat_str,
+    determine_ext,
     ExtractorError,
     xpath_with_ns,
 )
@@ -34,9 +36,20 @@ class ThePlatformIE(InfoExtractor):
         },
     }
 
-    def _get_info(self, video_id, smil_url):
-        meta = self._download_xml(smil_url, video_id)
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        if mobj.group('config'):
+            config_url = url + '&form=json'
+            config_url = config_url.replace('swf/', 'config/')
+            config_url = config_url.replace('onsite/', 'onsite/config/')
+            config = self._download_json(config_url, video_id, 'Downloading config')
+            smil_url = config['releaseUrl'] + '&format=SMIL&formats=MPEG4&manifest=f4m'
+        else:
+            smil_url = ('http://link.theplatform.com/s/dJ5BDC/{0}/meta.smil?'
+                        'format=smil&mbr=true'.format(video_id))
 
+        meta = self._download_xml(smil_url, video_id)
         try:
             error_msg = next(
                 n.attrib['abstract']
@@ -55,36 +68,48 @@ class ThePlatformIE(InfoExtractor):
         body = meta.find(_x('smil:body'))
 
         f4m_node = body.find(_x('smil:seq//smil:video'))
-        if f4m_node is not None:
+        if f4m_node is not None and '.f4m' in f4m_node.attrib['src']:
             f4m_url = f4m_node.attrib['src']
             if 'manifest.f4m?' not in f4m_url:
                 f4m_url += '?'
             # the parameters are from syfy.com, other sites may use others,
             # they also work for nbc.com
             f4m_url += '&g=UXWGVKRWHFSP&hdcore=3.0.3'
-            formats = [{
-                'ext': 'flv',
-                'url': f4m_url,
-            }]
+            formats = self._extract_f4m_formats(f4m_url, video_id)
         else:
-            base_url = head.find(_x('smil:meta')).attrib['base']
-            switch = body.find(_x('smil:switch'))
             formats = []
-            for f in switch.findall(_x('smil:video')):
-                attr = f.attrib
-                width = int(attr['width'])
-                height = int(attr['height'])
-                vbr = int(attr['system-bitrate']) // 1000
-                format_id = '%dx%d_%dk' % (width, height, vbr)
-                formats.append({
-                    'format_id': format_id,
-                    'url': base_url,
-                    'play_path': 'mp4:' + attr['src'],
-                    'ext': 'flv',
-                    'width': width,
-                    'height': height,
-                    'vbr': vbr,
-                })
+            switch = body.find(_x('smil:switch'))
+            if switch is not None:
+                base_url = head.find(_x('smil:meta')).attrib['base']
+                for f in switch.findall(_x('smil:video')):
+                    attr = f.attrib
+                    width = int(attr['width'])
+                    height = int(attr['height'])
+                    vbr = int(attr['system-bitrate']) // 1000
+                    format_id = '%dx%d_%dk' % (width, height, vbr)
+                    formats.append({
+                        'format_id': format_id,
+                        'url': base_url,
+                        'play_path': 'mp4:' + attr['src'],
+                        'ext': 'flv',
+                        'width': width,
+                        'height': height,
+                        'vbr': vbr,
+                    })
+            else:
+                switch = body.find(_x('smil:seq//smil:switch'))
+                for f in switch.findall(_x('smil:video')):
+                    attr = f.attrib
+                    vbr = int(attr['system-bitrate']) // 1000
+                    ext = determine_ext(attr['src'])
+                    if ext == 'once':
+                        ext = 'mp4'
+                    formats.append({
+                        'format_id': compat_str(vbr),
+                        'url': attr['src'],
+                        'vbr': vbr,
+                        'ext': ext,
+                    })
             self._sort_formats(formats)
 
         return {
@@ -93,19 +118,5 @@ class ThePlatformIE(InfoExtractor):
             'formats': formats,
             'description': info['description'],
             'thumbnail': info['defaultThumbnailUrl'],
-            'duration': info['duration']//1000,
+            'duration': info['duration'] // 1000,
         }
-        
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-        if mobj.group('config'):
-            config_url = url+ '&form=json'
-            config_url = config_url.replace('swf/', 'config/')
-            config_url = config_url.replace('onsite/', 'onsite/config/')
-            config = self._download_json(config_url, video_id, 'Downloading config')
-            smil_url = config['releaseUrl'] + '&format=SMIL&formats=MPEG4&manifest=f4m'
-        else:
-            smil_url = ('http://link.theplatform.com/s/dJ5BDC/{0}/meta.smil?'
-                'format=smil&mbr=true'.format(video_id))
-        return self._get_info(video_id, smil_url)
diff --git a/youtube_dl/extractor/thesixtyone.py b/youtube_dl/extractor/thesixtyone.py
new file mode 100644 (file)
index 0000000..a77c6a2
--- /dev/null
@@ -0,0 +1,100 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import unified_strdate
+
+
+class TheSixtyOneIE(InfoExtractor):
+    _VALID_URL = r'''(?x)https?://(?:www\.)?thesixtyone\.com/
+        (?:.*?/)*
+        (?:
+            s|
+            song/comments/list|
+            song
+        )/(?P<id>[A-Za-z0-9]+)/?$'''
+    _SONG_URL_TEMPLATE = 'http://thesixtyone.com/s/{0:}'
+    _SONG_FILE_URL_TEMPLATE = 'http://{audio_server:}.thesixtyone.com/thesixtyone_production/audio/{0:}_stream'
+    _THUMBNAIL_URL_TEMPLATE = '{photo_base_url:}_desktop'
+    _TESTS = [
+        {
+            'url': 'http://www.thesixtyone.com/s/SrE3zD7s1jt/',
+            'md5': '821cc43b0530d3222e3e2b70bb4622ea',
+            'info_dict': {
+                'id': 'SrE3zD7s1jt',
+                'ext': 'mp3',
+                'title': 'CASIO - Unicorn War Mixtape',
+                'thumbnail': 're:^https?://.*_desktop$',
+                'upload_date': '20071217',
+                'duration': 3208,
+            }
+        },
+        {
+            'url': 'http://www.thesixtyone.com/song/comments/list/SrE3zD7s1jt',
+            'only_matching': True,
+        },
+        {
+            'url': 'http://www.thesixtyone.com/s/ULoiyjuJWli#/s/SrE3zD7s1jt/',
+            'only_matching': True,
+        },
+        {
+            'url': 'http://www.thesixtyone.com/#/s/SrE3zD7s1jt/',
+            'only_matching': True,
+        },
+        {
+            'url': 'http://www.thesixtyone.com/song/SrE3zD7s1jt/',
+            'only_matching': True,
+        },
+    ]
+
+    _DECODE_MAP = {
+        "x": "a",
+        "m": "b",
+        "w": "c",
+        "q": "d",
+        "n": "e",
+        "p": "f",
+        "a": "0",
+        "h": "1",
+        "e": "2",
+        "u": "3",
+        "s": "4",
+        "i": "5",
+        "o": "6",
+        "y": "7",
+        "r": "8",
+        "c": "9"
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        song_id = mobj.group('id')
+
+        webpage = self._download_webpage(
+            self._SONG_URL_TEMPLATE.format(song_id), song_id)
+
+        song_data = json.loads(self._search_regex(
+            r'"%s":\s(\{.*?\})' % song_id, webpage, 'song_data'))
+        keys = [self._DECODE_MAP.get(s, s) for s in song_data['key']]
+        url = self._SONG_FILE_URL_TEMPLATE.format(
+            "".join(reversed(keys)), **song_data)
+
+        formats = [{
+            'format_id': 'sd',
+            'url': url,
+            'ext': 'mp3',
+        }]
+
+        return {
+            'id': song_id,
+            'title': '{artist:} - {name:}'.format(**song_data),
+            'formats': formats,
+            'comment_count': song_data.get('comments_count'),
+            'duration': song_data.get('play_time'),
+            'like_count': song_data.get('score'),
+            'thumbnail': self._THUMBNAIL_URL_TEMPLATE.format(**song_data),
+            'upload_date': unified_strdate(song_data.get('publish_date')),
+        }
index bfb9d2fc9f3cef23466806519c6ac0d226efa945..7f323c938762f6ec1337b6dbf6b9c64ec2993dd8 100644 (file)
@@ -1,4 +1,4 @@
-#coding: utf-8
+# coding: utf-8
 from __future__ import unicode_literals
 
 import re
@@ -36,12 +36,12 @@ class ThisAVIE(InfoExtractor):
             r': <a href="http://www.thisav.com/user/[0-9]+/([^"]+)">(?:[^<]+)</a>',
             webpage, 'uploader id', fatal=False)
         ext = determine_ext(video_url)
-        
+
         return {
-            'id':          video_id,
-            'url':         video_url,
-            'uploader':    uploader,
+            'id': video_id,
+            'url': video_url,
+            'uploader': uploader,
             'uploader_id': uploader_id,
-            'title':       title,
-            'ext':         ext,
+            'title': title,
+            'ext': ext,
         }
diff --git a/youtube_dl/extractor/thvideo.py b/youtube_dl/extractor/thvideo.py
new file mode 100644 (file)
index 0000000..496f15d
--- /dev/null
@@ -0,0 +1,84 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    unified_strdate
+)
+
+
+class THVideoIE(InfoExtractor):
+    _VALID_URL = r'http://(?:www\.)?thvideo\.tv/(?:v/th|mobile\.php\?cid=)(?P<id>[0-9]+)'
+    _TEST = {
+        'url': 'http://thvideo.tv/v/th1987/',
+        'md5': 'fa107b1f73817e325e9433505a70db50',
+        'info_dict': {
+            'id': '1987',
+            'ext': 'mp4',
+            'title': '【动画】秘封活动记录 ~ The Sealed Esoteric History.分镜稿预览',
+            'display_id': 'th1987',
+            'thumbnail': 'http://thvideo.tv/uploadfile/2014/0722/20140722013459856.jpg',
+            'description': '社团京都幻想剧团的第一个东方二次同人动画作品「秘封活动记录 ~ The Sealed Esoteric History.」 本视频是该动画第一期的分镜草稿...',
+            'upload_date': '20140722'
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        # extract download link from mobile player page
+        webpage_player = self._download_webpage(
+            'http://thvideo.tv/mobile.php?cid=%s-0' % (video_id),
+            video_id, note='Downloading video source page')
+        video_url = self._html_search_regex(
+            r'<source src="(.*?)" type', webpage_player, 'video url')
+
+        # extract video info from main page
+        webpage = self._download_webpage(
+            'http://thvideo.tv/v/th%s' % (video_id), video_id)
+        title = self._og_search_title(webpage)
+        display_id = 'th%s' % video_id
+        thumbnail = self._og_search_thumbnail(webpage)
+        description = self._og_search_description(webpage)
+        upload_date = unified_strdate(self._html_search_regex(
+            r'span itemprop="datePublished" content="(.*?)">', webpage,
+            'upload date', fatal=False))
+
+        return {
+            'id': video_id,
+            'ext': 'mp4',
+            'url': video_url,
+            'title': title,
+            'display_id': display_id,
+            'thumbnail': thumbnail,
+            'description': description,
+            'upload_date': upload_date
+        }
+
+
+class THVideoPlaylistIE(InfoExtractor):
+    _VALID_URL = r'http?://(?:www\.)?thvideo\.tv/mylist(?P<id>[0-9]+)'
+    _TEST = {
+        'url': 'http://thvideo.tv/mylist2',
+        'info_dict': {
+            'id': '2',
+            'title': '幻想万華鏡',
+        },
+        'playlist_mincount': 23,
+    }
+
+    def _real_extract(self, url):
+        playlist_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, playlist_id)
+        list_title = self._html_search_regex(
+            r'<h1 class="show_title">(.*?)<b id', webpage, 'playlist title',
+            fatal=False)
+
+        entries = [
+            self.url_result('http://thvideo.tv/v/th' + id, 'THVideo')
+            for id in re.findall(r'<dd><a href="http://thvideo.tv/v/th(\d+)/" target=', webpage)]
+
+        return self.playlist_result(entries, playlist_id, list_title)
index a4aa25f661223301b9d16c7ac87b6c502aa0e0ff..4fe89dbe516f8e25eb1f84239bc9cbc9f26bd648 100644 (file)
@@ -26,9 +26,9 @@ class TinyPicIE(InfoExtractor):
         video_id = mobj.group('id')
 
         webpage = self._download_webpage(url, video_id, 'Downloading page')
-        
+
         mobj = re.search(r'(?m)fo\.addVariable\("file",\s"(?P<fileid>[\da-z]+)"\);\n'
-            '\s+fo\.addVariable\("s",\s"(?P<serverid>\d+)"\);', webpage)
+                         '\s+fo\.addVariable\("s",\s"(?P<serverid>\d+)"\);', webpage)
         if mobj is None:
             raise ExtractorError('Video %s does not exist' % video_id, expected=True)
 
@@ -47,4 +47,4 @@ class TinyPicIE(InfoExtractor):
             'url': video_url,
             'thumbnail': thumbnail,
             'title': title
-        }
\ No newline at end of file
+        }
index d848ee1863252dbc155652c997210476c42479e4..66d159e99f6b15c01d53017b24b0a70b57470bd3 100644 (file)
@@ -36,9 +36,10 @@ class TlcDeIE(InfoExtractor):
             'ext': 'mp4',
             'title': 'Breaking Amish: Die Welt da draußen',
             'uploader': 'Discovery Networks - Germany',
-            'description': 'Vier Amische und eine Mennonitin wagen in New York'
+            'description': (
+                'Vier Amische und eine Mennonitin wagen in New York'
                 '  den Sprung in ein komplett anderes Leben. Begleitet sie auf'
-                ' ihrem spannenden Weg.',
+                ' ihrem spannenden Weg.'),
         },
     }
 
diff --git a/youtube_dl/extractor/tmz.py b/youtube_dl/extractor/tmz.py
new file mode 100644 (file)
index 0000000..827aa08
--- /dev/null
@@ -0,0 +1,32 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class TMZIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?tmz\.com/videos/(?P<id>[^/]+)/?'
+    _TEST = {
+        'url': 'http://www.tmz.com/videos/0_okj015ty/',
+        'md5': '791204e3bf790b1426cb2db0706184c0',
+        'info_dict': {
+            'id': '0_okj015ty',
+            'url': 'http://tmz.vo.llnwd.net/o28/2014-03/13/0_okj015ty_0_rt8ro3si_2.mp4',
+            'ext': 'mp4',
+            'title': 'Kim Kardashian\'s Boobs Unlock a Mystery!',
+            'description': 'Did Kim Kardasain try to one-up Khloe by one-upping Kylie???  Or is she just showing off her amazing boobs?',
+            'thumbnail': 'http://cdnbakmi.kaltura.com/p/591531/sp/59153100/thumbnail/entry_id/0_okj015ty/version/100002/acv/182/width/640',
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        return {
+            'id': video_id,
+            'url': self._html_search_meta('VideoURL', webpage, fatal=True),
+            'title': self._og_search_title(webpage),
+            'description': self._og_search_description(webpage),
+            'thumbnail': self._html_search_meta('ThumbURL', webpage),
+        }
diff --git a/youtube_dl/extractor/tnaflix.py b/youtube_dl/extractor/tnaflix.py
new file mode 100644 (file)
index 0000000..0ecd695
--- /dev/null
@@ -0,0 +1,84 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_duration,
+    fix_xml_ampersands,
+)
+
+
+class TNAFlixIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?tnaflix\.com/(?P<cat_id>[\w-]+)/(?P<display_id>[\w-]+)/video(?P<id>\d+)'
+
+    _TITLE_REGEX = None
+    _DESCRIPTION_REGEX = r'<h3 itemprop="description">([^<]+)</h3>'
+    _CONFIG_REGEX = r'flashvars\.config\s*=\s*escape\("([^"]+)"'
+
+    _TEST = {
+        'url': 'http://www.tnaflix.com/porn-stars/Carmella-Decesare-striptease/video553878',
+        'md5': 'ecf3498417d09216374fc5907f9c6ec0',
+        'info_dict': {
+            'id': '553878',
+            'display_id': 'Carmella-Decesare-striptease',
+            'ext': 'mp4',
+            'title': 'Carmella Decesare - striptease',
+            'description': '',
+            'thumbnail': 're:https?://.*\.jpg$',
+            'duration': 91,
+            'age_limit': 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        display_id = mobj.group('display_id')
+
+        webpage = self._download_webpage(url, display_id)
+
+        title = self._html_search_regex(
+            self._TITLE_REGEX, webpage, 'title') if self._TITLE_REGEX else self._og_search_title(webpage)
+        description = self._html_search_regex(
+            self._DESCRIPTION_REGEX, webpage, 'description', fatal=False, default='')
+
+        age_limit = self._rta_search(webpage)
+
+        duration = self._html_search_meta('duration', webpage, 'duration', default=None)
+        if duration:
+            duration = parse_duration(duration[1:])
+
+        cfg_url = self._html_search_regex(
+            self._CONFIG_REGEX, webpage, 'flashvars.config')
+
+        cfg_xml = self._download_xml(
+            cfg_url, display_id, note='Downloading metadata',
+            transform_source=fix_xml_ampersands)
+
+        thumbnail = cfg_xml.find('./startThumb').text
+
+        formats = []
+        for item in cfg_xml.findall('./quality/item'):
+            video_url = re.sub('speed=\d+', 'speed=', item.find('videoLink').text)
+            format_id = item.find('res').text
+            fmt = {
+                'url': video_url,
+                'format_id': format_id,
+            }
+            m = re.search(r'^(\d+)', format_id)
+            if m:
+                fmt['height'] = int(m.group(1))
+            formats.append(fmt)
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'duration': duration,
+            'age_limit': age_limit,
+            'formats': formats,
+        }
index 0f389bd93a1f35eb35346f7ee99b0b91a9c9b876..2756f56d3a94ae8f2bed64aa39acf4d45616366b 100644 (file)
@@ -42,6 +42,13 @@ class ToypicsIE(InfoExtractor):
 class ToypicsUserIE(InfoExtractor):
     IE_DESC = 'Toypics user profile'
     _VALID_URL = r'http://videos\.toypics\.net/(?P<username>[^/?]+)(?:$|[?#])'
+    _TEST = {
+        'url': 'http://videos.toypics.net/Mikey',
+        'info_dict': {
+            'id': 'Mikey',
+        },
+        'playlist_mincount': 19,
+    }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
index 11407428b56cb779ee9411ee1ccc7736fa2fb718..1c53a3fd09459f31fdf188dc852141ef000af4a6 100644 (file)
@@ -25,7 +25,7 @@ class TrailerAddictIE(InfoExtractor):
         webpage = self._download_webpage(url, name)
 
         title = self._search_regex(r'<title>(.+?)</title>',
-                webpage, 'video title').replace(' - Trailer Addict','')
+                                   webpage, 'video title').replace(' - Trailer Addict', '')
         view_count_str = self._search_regex(
             r'<span class="views_n">([0-9,.]+)</span>',
             webpage, 'view count', fatal=False)
@@ -43,12 +43,12 @@ class TrailerAddictIE(InfoExtractor):
             fvar = "fvar"
 
         info_url = "http://www.traileraddict.com/%s.php?tid=%s" % (fvar, str(video_id))
-        info_webpage = self._download_webpage(info_url, video_id , "Downloading the info webpage")
+        info_webpage = self._download_webpage(info_url, video_id, "Downloading the info webpage")
 
         final_url = self._search_regex(r'&fileurl=(.+)',
-                info_webpage, 'Download url').replace('%3F','?')
+                                       info_webpage, 'Download url').replace('%3F', '?')
         thumbnail_url = self._search_regex(r'&image=(.+?)&',
-                info_webpage, 'thumbnail url')
+                                           info_webpage, 'thumbnail url')
 
         description = self._html_search_regex(
             r'(?s)<div class="synopsis">.*?<div class="movie_label_info"[^>]*>(.*?)</div>',
index d64aaa41f690956b08211ed4fe07e1bc27267641..220a05b7b493fb728f3cd3c6dab74208a8f587eb 100644 (file)
@@ -1,28 +1,28 @@
+from __future__ import unicode_literals
+
 import json
-import re
 
 from .common import InfoExtractor
 
 
 class TriluliluIE(InfoExtractor):
-    _VALID_URL = r'(?x)(?:https?://)?(?:www\.)?trilulilu\.ro/video-(?P<category>[^/]+)/(?P<video_id>[^/]+)'
+    _VALID_URL = r'https?://(?:www\.)?trilulilu\.ro/video-[^/]+/(?P<id>[^/]+)'
     _TEST = {
-        u"url": u"http://www.trilulilu.ro/video-animatie/big-buck-bunny-1",
-        u'file': u"big-buck-bunny-1.mp4",
-        u'info_dict': {
-            u"title": u"Big Buck Bunny",
-            u"description": u":) pentru copilul din noi",
+        'url': 'http://www.trilulilu.ro/video-animatie/big-buck-bunny-1',
+        'info_dict': {
+            'id': 'big-buck-bunny-1',
+            'ext': 'mp4',
+            'title': 'Big Buck Bunny',
+            'description': ':) pentru copilul din noi',
         },
         # Server ignores Range headers (--test)
-        u"params": {
-            u"skip_download": True
+        'params': {
+            'skip_download': True
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('video_id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         title = self._og_search_title(webpage)
@@ -30,20 +30,20 @@ class TriluliluIE(InfoExtractor):
         description = self._og_search_description(webpage)
 
         log_str = self._search_regex(
-            r'block_flash_vars[ ]=[ ]({[^}]+})', webpage, u'log info')
+            r'block_flash_vars[ ]=[ ]({[^}]+})', webpage, 'log info')
         log = json.loads(log_str)
 
-        format_url = (u'http://fs%(server)s.trilulilu.ro/%(hash)s/'
-                      u'video-formats2' % log)
+        format_url = ('http://fs%(server)s.trilulilu.ro/%(hash)s/'
+                      'video-formats2' % log)
         format_doc = self._download_xml(
             format_url, video_id,
-            note=u'Downloading formats',
-            errnote=u'Error while downloading formats')
+            note='Downloading formats',
+            errnote='Error while downloading formats')
+
         video_url_template = (
-            u'http://fs%(server)s.trilulilu.ro/stream.php?type=video'
-            u'&source=site&hash=%(hash)s&username=%(userid)s&'
-            u'key=ministhebest&format=%%s&sig=&exp=' %
+            'http://fs%(server)s.trilulilu.ro/stream.php?type=video'
+            '&source=site&hash=%(hash)s&username=%(userid)s&'
+            'key=ministhebest&format=%%s&sig=&exp=' %
             log)
         formats = [
             {
@@ -63,4 +63,3 @@ class TriluliluIE(InfoExtractor):
             'description': description,
             'thumbnail': thumbnail,
         }
-
index 57f9566832401ff8eb2d1a90aaf91d3e68cdf873..e7b79243a8fb9f091087f5c452c8192c49c81af2 100644 (file)
@@ -1,13 +1,12 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
+from ..utils import xpath_text
 
 
 class TruTubeIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?trutube\.tv/video/(?P<id>[0-9]+)/.*'
-    _TEST = {
+    _VALID_URL = r'https?://(?:www\.)?trutube\.tv/(?:video/|nuevo/player/embed\.php\?v=)(?P<id>[0-9]+)'
+    _TESTS = [{
         'url': 'http://trutube.tv/video/14880/Ramses-II-Proven-To-Be-A-Red-Headed-Caucasoid-',
         'md5': 'c5b6e301b0a2040b074746cbeaa26ca1',
         'info_dict': {
@@ -16,29 +15,26 @@ class TruTubeIE(InfoExtractor):
             'title': 'Ramses II - Proven To Be A Red Headed Caucasoid',
             'thumbnail': 're:^http:.*\.jpg$',
         }
-    }
+    }, {
+        'url': 'https://trutube.tv/nuevo/player/embed.php?v=14880',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
-        webpage = self._download_webpage(url, video_id)
-        video_title = self._og_search_title(webpage).strip()
-        thumbnail = self._search_regex(
-            r"var splash_img = '([^']+)';", webpage, 'thumbnail', fatal=False)
+        config = self._download_xml(
+            'https://trutube.tv/nuevo/player/config.php?v=%s' % video_id,
+            video_id, transform_source=lambda s: s.strip())
 
-        all_formats = re.finditer(
-            r"var (?P<key>[a-z]+)_video_file\s*=\s*'(?P<url>[^']+)';", webpage)
-        formats = [{
-            'format_id': m.group('key'),
-            'quality': -i,
-            'url': m.group('url'),
-        } for i, m in enumerate(all_formats)]
-        self._sort_formats(formats)
+        # filehd is always 404
+        video_url = xpath_text(config, './file', 'video URL', fatal=True)
+        title = xpath_text(config, './title', 'title').strip()
+        thumbnail = xpath_text(config, './image', ' thumbnail')
 
         return {
             'id': video_id,
-            'title': video_title,
-            'formats': formats,
+            'url': video_url,
+            'title': title,
             'thumbnail': thumbnail,
         }
index 08a48c05acf34b6cd190ba52d58189935fe6b20f..64a1e903022a78fa3a2b15eeff5eed20afce568d 100644 (file)
@@ -14,27 +14,35 @@ from ..aes import aes_decrypt_text
 
 
 class Tube8IE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?tube8\.com/(?:[^/]+/){2}(?P<id>\d+)'
-    _TEST = {
-        'url': 'http://www.tube8.com/teen/kasia-music-video/229795/',
-        'md5': '44bf12b98313827dd52d35b8706a4ea0',
-        'info_dict': {
-            'id': '229795',
-            'ext': 'mp4',
-            'description': 'hot teen Kasia grinding',
-            'uploader': 'unknown',
-            'title': 'Kasia music video',
-            'age_limit': 18,
-        }
-    }
+    _VALID_URL = r'https?://(?:www\.)?tube8\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/(?P<id>\d+)'
+    _TESTS = [
+        {
+            'url': 'http://www.tube8.com/teen/kasia-music-video/229795/',
+            'md5': '44bf12b98313827dd52d35b8706a4ea0',
+            'info_dict': {
+                'id': '229795',
+                'display_id': 'kasia-music-video',
+                'ext': 'mp4',
+                'description': 'hot teen Kasia grinding',
+                'uploader': 'unknown',
+                'title': 'Kasia music video',
+                'age_limit': 18,
+            }
+        },
+        {
+            'url': 'http://www.tube8.com/shemale/teen/blonde-cd-gets-kidnapped-by-two-blacks-and-punished-for-being-a-slutty-girl/19569151/',
+            'only_matching': True,
+        },
+    ]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
+        display_id = mobj.group('display_id')
 
         req = compat_urllib_request.Request(url)
         req.add_header('Cookie', 'age_verified=1')
-        webpage = self._download_webpage(req, video_id)
+        webpage = self._download_webpage(req, display_id)
 
         flashvars = json.loads(self._html_search_regex(
             r'var flashvars\s*=\s*({.+?})', webpage, 'flashvars'))
@@ -70,6 +78,7 @@ class Tube8IE(InfoExtractor):
 
         return {
             'id': video_id,
+            'display_id': display_id,
             'url': video_url,
             'title': title,
             'description': description,
index 7a3891b89b736fb05f4c09d441d7eb56e68d8dcd..161e47624b383dfe76ea1e2ea6ec73395a97db53 100644 (file)
@@ -1,5 +1,7 @@
 # coding: utf-8
 
+from __future__ import unicode_literals
+
 import re
 import json
 
@@ -9,30 +11,37 @@ from .common import InfoExtractor
 class TudouIE(InfoExtractor):
     _VALID_URL = r'(?:http://)?(?:www\.)?tudou\.com/(?:listplay|programs|albumplay)/(?:view|(.+?))/(?:([^/]+)|([^/]+))(?:\.html)?'
     _TESTS = [{
-        u'url': u'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html',
-        u'file': u'159448201.f4v',
-        u'md5': u'140a49ed444bd22f93330985d8475fcb',
-        u'info_dict': {
-            u"title": u"卡马乔国足开大脚长传冲吊集锦"
+        'url': 'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html',
+        'md5': '140a49ed444bd22f93330985d8475fcb',
+        'info_dict': {
+            'id': '159448201',
+            'ext': 'f4v',
+            'title': '卡马乔国足开大脚长传冲吊集锦',
+            'thumbnail': 're:^https?://.*\.jpg$',
+        }
+    }, {
+        'url': 'http://www.tudou.com/programs/view/ajX3gyhL0pc/',
+        'info_dict': {
+            'id': '117049447',
+            'ext': 'f4v',
+            'title': 'La Sylphide-Bolshoi-Ekaterina Krysanova & Vyacheslav Lopatin 2012',
+            'thumbnail': 're:^https?://.*\.jpg$',
         }
-    },
-    {
-        u'url': u'http://www.tudou.com/albumplay/TenTw_JgiPM/PzsAs5usU9A.html',
-        u'file': u'todo.mp4',
-        u'md5': u'todo.mp4',
-        u'info_dict': {
-            u'title': u'todo.mp4',
+    }, {
+        'url': 'http://www.tudou.com/albumplay/TenTw_JgiPM/PzsAs5usU9A.html',
+        'info_dict': {
+            'title': 'todo.mp4',
         },
-        u'add_ie': [u'Youku'],
-        u'skip': u'Only works from China'
+        'add_ie': ['Youku'],
+        'skip': 'Only works from China'
     }]
 
-    def _url_for_id(self, id, quality = None):
-        info_url = "http://v2.tudou.com/f?id="+str(id)
+    def _url_for_id(self, id, quality=None):
+        info_url = "http://v2.tudou.com/f?id=" + str(id)
         if quality:
             info_url += '&hd' + quality
         webpage = self._download_webpage(info_url, id, "Opening the info webpage")
-        final_url = self._html_search_regex('>(.+?)</f>',webpage, 'video url')
+        final_url = self._html_search_regex('>(.+?)</f>', webpage, 'video url')
         return final_url
 
     def _real_extract(self, url):
@@ -44,35 +53,38 @@ class TudouIE(InfoExtractor):
         if m and m.group(1):
             return {
                 '_type': 'url',
-                'url': u'youku:' + m.group(1),
+                'url': 'youku:' + m.group(1),
                 'ie_key': 'Youku'
             }
 
         title = self._search_regex(
-            r",kw:\s*['\"](.+?)[\"']", webpage, u'title')
+            r",kw:\s*['\"](.+?)[\"']", webpage, 'title')
         thumbnail_url = self._search_regex(
-            r",pic:\s*[\"'](.+?)[\"']", webpage, u'thumbnail URL', fatal=False)
+            r",pic:\s*[\"'](.+?)[\"']", webpage, 'thumbnail URL', fatal=False)
 
         segs_json = self._search_regex(r'segs: \'(.*)\'', webpage, 'segments')
         segments = json.loads(segs_json)
         # It looks like the keys are the arguments that have to be passed as
         # the hd field in the request url, we pick the higher
-        quality = sorted(segments.keys())[-1]
+        # Also, filter non-number qualities (see issue #3643).
+        quality = sorted(filter(lambda k: k.isdigit(), segments.keys()),
+                         key=lambda k: int(k))[-1]
         parts = segments[quality]
         result = []
         len_parts = len(parts)
         if len_parts > 1:
-            self.to_screen(u'%s: found %s parts' % (video_id, len_parts))
+            self.to_screen('%s: found %s parts' % (video_id, len_parts))
         for part in parts:
             part_id = part['k']
             final_url = self._url_for_id(part_id, quality)
             ext = (final_url.split('?')[0]).split('.')[-1]
-            part_info = {'id': part_id,
-                          'url': final_url,
-                          'ext': ext,
-                          'title': title,
-                          'thumbnail': thumbnail_url,
-                          }
+            part_info = {
+                'id': '%s' % part_id,
+                'url': final_url,
+                'ext': ext,
+                'title': title,
+                'thumbnail': thumbnail_url,
+            }
             result.append(part_info)
 
         return result
index 2882c1809e0bd55c1e6c8b441c19293aeb64d301..2a1ae5a717cf7b2af16bf5a1ce3ef7494e28a7a6 100644 (file)
@@ -4,13 +4,10 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
-)
 
 
 class TumblrIE(InfoExtractor):
-    _VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/((post)|(video))/(?P<id>\d*)($|/)'
+    _VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/(?:post|video)/(?P<id>[0-9]+)(?:$|[/?#])'
     _TESTS = [{
         'url': 'http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes',
         'md5': '479bb068e5b16462f5176a6828829767',
@@ -18,7 +15,7 @@ class TumblrIE(InfoExtractor):
             'id': '54196191430',
             'ext': 'mp4',
             'title': 'tatiana maslany news, Orphan Black || DVD extra - behind the scenes ↳...',
-            'description': 'md5:dfac39636969fe6bf1caa2d50405f069',
+            'description': 'md5:37db8211e40b50c7c44e95da14f630b7',
             'thumbnail': 're:http://.*\.jpg',
         }
     }, {
@@ -27,7 +24,7 @@ class TumblrIE(InfoExtractor):
         'info_dict': {
             'id': '90208453769',
             'ext': 'mp4',
-            'title': '5SOS STRUM ;)',
+            'title': '5SOS STRUM ;]',
             'description': 'md5:dba62ac8639482759c8eb10ce474586a',
             'thumbnail': 're:http://.*\.jpg',
         }
@@ -41,28 +38,24 @@ class TumblrIE(InfoExtractor):
         url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
         webpage = self._download_webpage(url, video_id)
 
-        re_video = r'src=\\x22(?P<video_url>http://%s\.tumblr\.com/video_file/%s/(.*?))\\x22 type=\\x22video/(?P<ext>.*?)\\x22' % (blog, video_id)
-        video = re.search(re_video, webpage)
-        if video is None:
-            raise ExtractorError('Unable to extract video')
-        video_url = video.group('video_url')
-        ext = video.group('ext')
-
-        video_thumbnail = self._search_regex(
-            r'posters.*?\[\\x22(.*?)\\x22',
-            webpage, 'thumbnail', fatal=False)  # We pick the first poster
-        if video_thumbnail:
-            video_thumbnail = video_thumbnail.replace('\\\\/', '/')
+        iframe_url = self._search_regex(
+            r'src=\'(https?://www\.tumblr\.com/video/[^\']+)\'',
+            webpage, 'iframe url')
+        iframe = self._download_webpage(iframe_url, video_id)
+        video_url = self._search_regex(r'<source src="([^"]+)"',
+                                       iframe, 'video url')
 
         # The only place where you can get a title, it's not complete,
         # but searching in other places doesn't work for all videos
-        video_title = self._html_search_regex(r'<title>(?P<title>.*?)(?: \| Tumblr)?</title>',
-            webpage, 'title', flags=re.DOTALL)
+        video_title = self._html_search_regex(
+            r'(?s)<title>(?P<title>.*?)(?: \| Tumblr)?</title>',
+            webpage, 'title')
 
-        return [{'id': video_id,
-                 'url': video_url,
-                 'title': video_title,
-                 'description': self._html_search_meta('description', webpage),
-                 'thumbnail': video_thumbnail,
-                 'ext': ext
-                 }]
+        return {
+            'id': video_id,
+            'url': video_url,
+            'ext': 'mp4',
+            'title': video_title,
+            'description': self._og_search_description(webpage),
+            'thumbnail': self._og_search_thumbnail(webpage),
+        }
diff --git a/youtube_dl/extractor/tunein.py b/youtube_dl/extractor/tunein.py
new file mode 100644 (file)
index 0000000..4ce5aee
--- /dev/null
@@ -0,0 +1,99 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class TuneInIE(InfoExtractor):
+    _VALID_URL = r'''(?x)https?://(?:www\.)?
+    (?:
+        tunein\.com/
+        (?:
+            radio/.*?-s|
+            station/.*?StationId\=
+        )(?P<id>[0-9]+)
+        |tun\.in/(?P<redirect_id>[A-Za-z0-9]+)
+    )
+    '''
+    _API_URL_TEMPLATE = 'http://tunein.com/tuner/tune/?stationId={0:}&tuneType=Station'
+
+    _INFO_DICT = {
+        'id': '34682',
+        'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2',
+        'ext': 'AAC',
+        'thumbnail': 're:^https?://.*\.png$',
+        'location': 'Tacoma, WA',
+    }
+    _TESTS = [
+        {
+            'url': 'http://tunein.com/radio/Jazz24-885-s34682/',
+            'info_dict': _INFO_DICT,
+            'params': {
+                'skip_download': True,  # live stream
+            },
+        },
+        {  # test redirection
+            'url': 'http://tun.in/ser7s',
+            'info_dict': _INFO_DICT,
+            'params': {
+                'skip_download': True,  # live stream
+            },
+        },
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        redirect_id = mobj.group('redirect_id')
+        if redirect_id:
+            # The server doesn't support HEAD requests
+            urlh = self._request_webpage(
+                url, redirect_id, note='Downloading redirect page')
+            url = urlh.geturl()
+            self.to_screen('Following redirect: %s' % url)
+            mobj = re.match(self._VALID_URL, url)
+        station_id = mobj.group('id')
+
+        station_info = self._download_json(
+            self._API_URL_TEMPLATE.format(station_id),
+            station_id, note='Downloading station JSON')
+
+        title = station_info['Title']
+        thumbnail = station_info.get('Logo')
+        location = station_info.get('Location')
+        streams_url = station_info.get('StreamUrl')
+        if not streams_url:
+            raise ExtractorError('No downloadable streams found',
+                                 expected=True)
+        stream_data = self._download_webpage(
+            streams_url, station_id, note='Downloading stream data')
+        streams = json.loads(self._search_regex(
+            r'\((.*)\);', stream_data, 'stream info'))['Streams']
+
+        is_live = None
+        formats = []
+        for stream in streams:
+            if stream.get('Type') == 'Live':
+                is_live = True
+            formats.append({
+                'abr': stream.get('Bandwidth'),
+                'ext': stream.get('MediaType'),
+                'acodec': stream.get('MediaType'),
+                'vcodec': 'none',
+                'url': stream.get('Url'),
+                # Sometimes streams with the highest quality do not exist
+                'preference': stream.get('Reliability'),
+            })
+        self._sort_formats(formats)
+
+        return {
+            'id': station_id,
+            'title': title,
+            'formats': formats,
+            'thumbnail': thumbnail,
+            'location': location,
+            'is_live': is_live,
+        }
diff --git a/youtube_dl/extractor/turbo.py b/youtube_dl/extractor/turbo.py
new file mode 100644 (file)
index 0000000..29703a8
--- /dev/null
@@ -0,0 +1,67 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    int_or_none,
+    qualities,
+    xpath_text,
+)
+
+
+class TurboIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?turbo\.fr/videos-voiture/(?P<id>[0-9]+)-'
+    _API_URL = 'http://www.turbo.fr/api/tv/xml.php?player_generique=player_generique&id={0:}'
+    _TEST = {
+        'url': 'http://www.turbo.fr/videos-voiture/454443-turbo-du-07-09-2014-renault-twingo-3-bentley-continental-gt-speed-ces-guide-achat-dacia.html',
+        'md5': '33f4b91099b36b5d5a91f84b5bcba600',
+        'info_dict': {
+            'id': '454443',
+            'ext': 'mp4',
+            'duration': 3715,
+            'title': 'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia... ',
+            'description': 'Retrouvez dans cette rubrique toutes les vidéos de l\'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia... ',
+            'thumbnail': 're:^https?://.*\.jpg$',
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        playlist = self._download_xml(self._API_URL.format(video_id), video_id)
+        item = playlist.find('./channel/item')
+        if item is None:
+            raise ExtractorError('Playlist item was not found', expected=True)
+
+        title = xpath_text(item, './title', 'title')
+        duration = int_or_none(xpath_text(item, './durate', 'duration'))
+        thumbnail = xpath_text(item, './visuel_clip', 'thumbnail')
+        description = self._og_search_description(webpage)
+
+        formats = []
+        get_quality = qualities(['3g', 'sd', 'hq'])
+        for child in item:
+            m = re.search(r'url_video_(?P<quality>.+)', child.tag)
+            if m:
+                quality = m.group('quality')
+                formats.append({
+                    'format_id': quality,
+                    'url': child.text,
+                    'quality': get_quality(quality),
+                })
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'duration': duration,
+            'thumbnail': thumbnail,
+            'description': description,
+            'formats': formats,
+        }
index 0921cc5f822f5bf0bcfefae8d6ef063e88f6e29d..ba65996dc01646e019cfd5820aa36c1934365d9b 100644 (file)
@@ -1,84 +1,84 @@
 # encoding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 from ..utils import (
-    unified_strdate,
-    clean_html,
-    int_or_none,
+    float_or_none,
+    parse_age_limit,
 )
 
 
 class TvigleIE(InfoExtractor):
     IE_NAME = 'tvigle'
     IE_DESC = 'Интернет-телевидение Tvigle.ru'
-    _VALID_URL = r'http://(?:www\.)?tvigle\.ru/category/.+?[\?&]v(?:ideo)?=(?P<id>\d+)'
+    _VALID_URL = r'http://(?:www\.)?tvigle\.ru/(?:[^/]+/)+(?P<id>[^/]+)/$'
 
     _TESTS = [
         {
-            'url': 'http://www.tvigle.ru/category/cinema/1608/?video=503081',
-            'md5': '09afba4616666249f087efc6dcf83cb3',
+            'url': 'http://www.tvigle.ru/video/sokrat/',
+            'md5': '36514aed3657d4f70b4b2cef8eb520cd',
             'info_dict': {
-                'id': '503081',
+                'id': '1848932',
+                'display_id': 'sokrat',
                 'ext': 'flv',
-                'title': 'Брат 2 ',
-                'description': 'md5:f5a42970f50648cee3d7ad740f3ae769',
-                'upload_date': '20110919',
+                'title': 'Сократ',
+                'description': 'md5:a05bd01be310074d5833efc6743be95e',
+                'duration': 6586,
+                'age_limit': 0,
             },
         },
         {
-            'url': 'http://www.tvigle.ru/category/men/vysotskiy_vospominaniya02/?flt=196&v=676433',
-            'md5': 'e7efe5350dd5011d0de6550b53c3ba7b',
+            'url': 'http://www.tvigle.ru/video/vladimir-vysotskii/vedushchii-teleprogrammy-60-minut-ssha-o-vladimire-vysotskom/',
+            'md5': 'd9012d7c7c598fe7a11d7fb46dc1f574',
             'info_dict': {
-                'id': '676433',
-                'ext': 'flv',
+                'id': '5142516',
+                'ext': 'mp4',
                 'title': 'Ведущий телепрограммы «60 минут» (США) о Владимире Высоцком',
                 'description': 'md5:027f7dc872948f14c96d19b4178428a4',
-                'upload_date': '20121218',
+                'duration': 186.080,
+                'age_limit': 0,
             },
         },
     ]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        display_id = self._match_id(url)
 
-        video_data = self._download_xml(
-            'http://www.tvigle.ru/xml/single.php?obj=%s' % video_id, video_id, 'Downloading video XML')
+        webpage = self._download_webpage(url, display_id)
 
-        video = video_data.find('./video')
+        video_id = self._html_search_regex(
+            r'<li class="video-preview current_playing" id="(\d+)">', webpage, 'video id')
 
-        title = video.get('name')
-        description = video.get('anons')
-        if description:
-            description = clean_html(description)
-        thumbnail = video_data.get('img')
-        upload_date = unified_strdate(video.get('date'))
-        like_count = int_or_none(video.get('vtp'))
+        video_data = self._download_json(
+            'http://cloud.tvigle.ru/api/play/video/%s/' % video_id, display_id)
 
-        formats = []
-        for num, (format_id, format_note) in enumerate([['low_file', 'SQ'], ['file', 'HQ'], ['hd', 'HD 720']]):
-            video_url = video.get(format_id)
-            if not video_url:
-                continue
-            formats.append({
-                'url': video_url,
-                'format_id': format_id,
-                'format_note': format_note,
-                'quality': num,
-            })
+        item = video_data['playlist']['items'][0]
 
+        title = item['title']
+        description = item['description']
+        thumbnail = item['thumbnail']
+        duration = float_or_none(item.get('durationMilliseconds'), 1000)
+        age_limit = parse_age_limit(item.get('ageRestrictions'))
+
+        formats = []
+        for vcodec, fmts in item['videos'].items():
+            for quality, video_url in fmts.items():
+                formats.append({
+                    'url': video_url,
+                    'format_id': '%s-%s' % (vcodec, quality),
+                    'vcodec': vcodec,
+                    'height': int(quality[:-1]),
+                    'filesize': item['video_files_size'][vcodec][quality],
+                })
         self._sort_formats(formats)
 
         return {
             'id': video_id,
+            'display_id': display_id,
             'title': title,
             'description': description,
             'thumbnail': thumbnail,
-            'upload_date': upload_date,
-            'like_count': like_count,
-            'age_limit': 18,
+            'duration': duration,
+            'age_limit': age_limit,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index bfed9dd042bf3e170f1af394954d64dbdadea0ac..a645800057fc6dc88850885cba7737243c95574e 100644 (file)
@@ -1,40 +1,35 @@
-import json
-import re
+from __future__ import unicode_literals
 
 from .common import InfoExtractor
 
 
 class TvpIE(InfoExtractor):
-    IE_NAME = u'tvp.pl'
+    IE_NAME = 'tvp.pl'
     _VALID_URL = r'https?://www\.tvp\.pl/.*?wideo/(?P<date>\d+)/(?P<id>\d+)'
 
     _TEST = {
-        u'url': u'http://www.tvp.pl/warszawa/magazyny/campusnews/wideo/31102013/12878238',
-        u'md5': u'148408967a6a468953c0a75cbdaf0d7a',
-        u'file': u'12878238.wmv',
-        u'info_dict': {
-            u'title': u'31.10.2013 - Odcinek 2',
-            u'description': u'31.10.2013 - Odcinek 2',
+        'url': 'http://www.tvp.pl/warszawa/magazyny/campusnews/wideo/31102013/12878238',
+        'md5': '148408967a6a468953c0a75cbdaf0d7a',
+        'info_dict': {
+            'id': '12878238',
+            'ext': 'wmv',
+            'title': '31.10.2013 - Odcinek 2',
+            'description': '31.10.2013 - Odcinek 2',
         },
-        u'skip': u'Download has to use same server IP as extraction. Therefore, a good (load-balancing) DNS resolver will make the download fail.'
+        'skip': 'Download has to use same server IP as extraction. Therefore, a good (load-balancing) DNS resolver will make the download fail.'
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
         json_url = 'http://www.tvp.pl/pub/stat/videofileinfo?video_id=%s' % video_id
-        json_params = self._download_webpage(
-            json_url, video_id, u"Downloading video metadata")
-
-        params = json.loads(json_params)
-        self.report_extraction(video_id)
+        params = self._download_json(
+            json_url, video_id, "Downloading video metadata")
         video_url = params['video_url']
 
-        title = self._og_search_title(webpage, fatal=True)
         return {
             'id': video_id,
-            'title': title,
+            'title': self._og_search_title(webpage),
             'ext': 'wmv',
             'url': video_url,
             'description': self._og_search_description(webpage),
index a56a7ab5fc2e1c307c9811687ca03b0f4d79e6e5..9a53a3c74143d72a14842ea70ce4063a8d28a30c 100644 (file)
@@ -4,15 +4,29 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import compat_str
 from ..utils import (
-    ExtractorError,
     parse_iso8601,
     qualities,
 )
 
 
 class TVPlayIE(InfoExtractor):
-    _VALID_URL = r'http://(?:www\.)?tvplay\.lv/parraides/[^/]+/(?P<id>\d+)'
+    IE_DESC = 'TV3Play and related services'
+    _VALID_URL = r'''(?x)http://(?:www\.)?
+        (?:tvplay\.lv/parraides|
+           tv3play\.lt/programos|
+           tv3play\.ee/sisu|
+           tv3play\.se/program|
+           tv6play\.se/program|
+           tv8play\.se/program|
+           tv10play\.se/program|
+           tv3play\.no/programmer|
+           viasat4play\.no/programmer|
+           tv6play\.no/programmer|
+           tv3play\.dk/programmer|
+        )/[^/]+/(?P<id>\d+)
+        '''
     _TESTS = [
         {
             'url': 'http://www.tvplay.lv/parraides/vinas-melo-labak/418113?autostart=true',
@@ -30,18 +44,145 @@ class TVPlayIE(InfoExtractor):
                 'skip_download': True,
             },
         },
+        {
+            'url': 'http://www.tv3play.lt/programos/moterys-meluoja-geriau/409229?autostart=true',
+            'info_dict': {
+                'id': '409229',
+                'ext': 'flv',
+                'title': 'Moterys meluoja geriau',
+                'description': 'md5:9aec0fc68e2cbc992d2a140bd41fa89e',
+                'duration': 1330,
+                'timestamp': 1403769181,
+                'upload_date': '20140626',
+            },
+            'params': {
+                # rtmp download
+                'skip_download': True,
+            },
+        },
+        {
+            'url': 'http://www.tv3play.ee/sisu/kodu-keset-linna/238551?autostart=true',
+            'info_dict': {
+                'id': '238551',
+                'ext': 'flv',
+                'title': 'Kodu keset linna 398537',
+                'description': 'md5:7df175e3c94db9e47c0d81ffa5d68701',
+                'duration': 1257,
+                'timestamp': 1292449761,
+                'upload_date': '20101215',
+            },
+            'params': {
+                # rtmp download
+                'skip_download': True,
+            },
+        },
+        {
+            'url': 'http://www.tv3play.se/program/husraddarna/395385?autostart=true',
+            'info_dict': {
+                'id': '395385',
+                'ext': 'flv',
+                'title': 'Husräddarna S02E07',
+                'description': 'md5:f210c6c89f42d4fc39faa551be813777',
+                'duration': 2574,
+                'timestamp': 1400596321,
+                'upload_date': '20140520',
+            },
+            'params': {
+                # rtmp download
+                'skip_download': True,
+            },
+        },
+        {
+            'url': 'http://www.tv6play.se/program/den-sista-dokusapan/266636?autostart=true',
+            'info_dict': {
+                'id': '266636',
+                'ext': 'flv',
+                'title': 'Den sista dokusåpan S01E08',
+                'description': 'md5:295be39c872520221b933830f660b110',
+                'duration': 1492,
+                'timestamp': 1330522854,
+                'upload_date': '20120229',
+            },
+            'params': {
+                # rtmp download
+                'skip_download': True,
+            },
+        },
+        {
+            'url': 'http://www.tv8play.se/program/antikjakten/282756?autostart=true',
+            'info_dict': {
+                'id': '282756',
+                'ext': 'flv',
+                'title': 'Antikjakten S01E10',
+                'description': 'md5:1b201169beabd97e20c5ad0ad67b13b8',
+                'duration': 2646,
+                'timestamp': 1348575868,
+                'upload_date': '20120925',
+            },
+            'params': {
+                # rtmp download
+                'skip_download': True,
+            },
+        },
+        {
+            'url': 'http://www.tv3play.no/programmer/anna-anka-soker-assistent/230898?autostart=true',
+            'info_dict': {
+                'id': '230898',
+                'ext': 'flv',
+                'title': 'Anna Anka søker assistent - Ep. 8',
+                'description': 'md5:f80916bf5bbe1c5f760d127f8dd71474',
+                'duration': 2656,
+                'timestamp': 1277720005,
+                'upload_date': '20100628',
+            },
+            'params': {
+                # rtmp download
+                'skip_download': True,
+            },
+        },
+        {
+            'url': 'http://www.viasat4play.no/programmer/budbringerne/21873?autostart=true',
+            'info_dict': {
+                'id': '21873',
+                'ext': 'flv',
+                'title': 'Budbringerne program 10',
+                'description': 'md5:4db78dc4ec8a85bb04fd322a3ee5092d',
+                'duration': 1297,
+                'timestamp': 1254205102,
+                'upload_date': '20090929',
+            },
+            'params': {
+                # rtmp download
+                'skip_download': True,
+            },
+        },
+        {
+            'url': 'http://www.tv6play.no/programmer/hotelinspektor-alex-polizzi/361883?autostart=true',
+            'info_dict': {
+                'id': '361883',
+                'ext': 'flv',
+                'title': 'Hotelinspektør Alex Polizzi - Ep. 10',
+                'description': 'md5:3ecf808db9ec96c862c8ecb3a7fdaf81',
+                'duration': 2594,
+                'timestamp': 1393236292,
+                'upload_date': '20140224',
+            },
+            'params': {
+                # rtmp download
+                'skip_download': True,
+            },
+        },
     ]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         video = self._download_json(
             'http://playapi.mtgx.tv/v1/videos/%s' % video_id, video_id, 'Downloading video JSON')
 
         if video['is_geo_blocked']:
-            raise ExtractorError(
-                'This content is not available in your country due to copyright reasons', expected=True)
+            self.report_warning(
+                'This content might not be available in your country due to copyright reasons')
 
         streams = self._download_json(
             'http://playapi.mtgx.tv/v1/videos/stream/%s' % video_id, video_id, 'Downloading streams JSON')
@@ -49,7 +190,7 @@ class TVPlayIE(InfoExtractor):
         quality = qualities(['hls', 'medium', 'high'])
         formats = []
         for format_id, video_url in streams['streams'].items():
-            if not video_url:
+            if not video_url or not isinstance(video_url, compat_str):
                 continue
             fmt = {
                 'format_id': format_id,
@@ -65,6 +206,10 @@ class TVPlayIE(InfoExtractor):
                     'app': m.group('app'),
                     'play_path': m.group('playpath'),
                 })
+            elif video_url.endswith('.f4m'):
+                formats.extend(self._extract_f4m_formats(
+                    video_url + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81', video_id))
+                continue
             else:
                 fmt.update({
                     'url': video_url,
diff --git a/youtube_dl/extractor/twentyfourvideo.py b/youtube_dl/extractor/twentyfourvideo.py
new file mode 100644 (file)
index 0000000..67e8bfe
--- /dev/null
@@ -0,0 +1,109 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_iso8601,
+    int_or_none,
+)
+
+
+class TwentyFourVideoIE(InfoExtractor):
+    IE_NAME = '24video'
+    _VALID_URL = r'https?://(?:www\.)?24video\.net/(?:video/(?:view|xml)/|player/new24_play\.swf\?id=)(?P<id>\d+)'
+
+    _TESTS = [
+        {
+            'url': 'http://www.24video.net/video/view/1044982',
+            'md5': '48dd7646775690a80447a8dca6a2df76',
+            'info_dict': {
+                'id': '1044982',
+                'ext': 'mp4',
+                'title': 'Эротика каменного века',
+                'description': 'Как смотрели порно в каменном веке.',
+                'thumbnail': 're:^https?://.*\.jpg$',
+                'uploader': 'SUPERTELO',
+                'duration': 31,
+                'timestamp': 1275937857,
+                'upload_date': '20100607',
+                'age_limit': 18,
+                'like_count': int,
+                'dislike_count': int,
+            },
+        },
+        {
+            'url': 'http://www.24video.net/player/new24_play.swf?id=1044982',
+            'only_matching': True,
+        }
+    ]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        webpage = self._download_webpage(
+            'http://www.24video.net/video/view/%s' % video_id, video_id)
+
+        title = self._og_search_title(webpage)
+        description = self._html_search_regex(
+            r'<span itemprop="description">([^<]+)</span>', webpage, 'description', fatal=False)
+        thumbnail = self._og_search_thumbnail(webpage)
+        duration = int_or_none(self._og_search_property(
+            'duration', webpage, 'duration', fatal=False))
+        timestamp = parse_iso8601(self._search_regex(
+            r'<time id="video-timeago" datetime="([^"]+)" itemprop="uploadDate">',
+            webpage, 'upload date'))
+
+        uploader = self._html_search_regex(
+            r'Загрузил\s*<a href="/jsecUser/movies/[^"]+" class="link">([^<]+)</a>',
+            webpage, 'uploader', fatal=False)
+
+        view_count = int_or_none(self._html_search_regex(
+            r'<span class="video-views">(\d+) просмотр',
+            webpage, 'view count', fatal=False))
+        comment_count = int_or_none(self._html_search_regex(
+            r'<div class="comments-title" id="comments-count">(\d+) комментари',
+            webpage, 'comment count', fatal=False))
+
+        formats = []
+
+        pc_video = self._download_xml(
+            'http://www.24video.net/video/xml/%s?mode=play' % video_id,
+            video_id, 'Downloading PC video URL').find('.//video')
+
+        formats.append({
+            'url': pc_video.attrib['url'],
+            'format_id': 'pc',
+            'quality': 1,
+        })
+
+        like_count = int_or_none(pc_video.get('ratingPlus'))
+        dislike_count = int_or_none(pc_video.get('ratingMinus'))
+        age_limit = 18 if pc_video.get('adult') == 'true' else 0
+
+        mobile_video = self._download_xml(
+            'http://www.24video.net/video/xml/%s' % video_id,
+            video_id, 'Downloading mobile video URL').find('.//video')
+
+        formats.append({
+            'url': mobile_video.attrib['url'],
+            'format_id': 'mobile',
+            'quality': 0,
+        })
+
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'uploader': uploader,
+            'duration': duration,
+            'timestamp': timestamp,
+            'view_count': view_count,
+            'comment_count': comment_count,
+            'like_count': like_count,
+            'dislike_count': dislike_count,
+            'age_limit': age_limit,
+            'formats': formats,
+        }
diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py
new file mode 100644 (file)
index 0000000..397d167
--- /dev/null
@@ -0,0 +1,229 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import itertools
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+    compat_urllib_request,
+    ExtractorError,
+    parse_iso8601,
+)
+
+
+class TwitchIE(InfoExtractor):
+    # TODO: One broadcast may be split into multiple videos. The key
+    # 'broadcast_id' is the same for all parts, and 'broadcast_part'
+    # starts at 1 and increases. Can we treat all parts as one video?
+    _VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?twitch\.tv/
+        (?:
+            (?P<channelid>[^/]+)|
+            (?:(?:[^/]+)/b/(?P<videoid>[^/]+))|
+            (?:(?:[^/]+)/c/(?P<chapterid>[^/]+))
+        )
+        /?(?:\#.*)?$
+        """
+    _PAGE_LIMIT = 100
+    _API_BASE = 'https://api.twitch.tv'
+    _LOGIN_URL = 'https://secure.twitch.tv/user/login'
+    _TESTS = [{
+        'url': 'http://www.twitch.tv/riotgames/b/577357806',
+        'info_dict': {
+            'id': 'a577357806',
+            'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
+        },
+        'playlist_mincount': 12,
+    }, {
+        'url': 'http://www.twitch.tv/acracingleague/c/5285812',
+        'info_dict': {
+            'id': 'c5285812',
+            'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
+        },
+        'playlist_mincount': 3,
+    }, {
+        'url': 'http://www.twitch.tv/vanillatv',
+        'info_dict': {
+            'id': 'vanillatv',
+            'title': 'VanillaTV',
+        },
+        'playlist_mincount': 412,
+    }]
+
+    def _handle_error(self, response):
+        if not isinstance(response, dict):
+            return
+        error = response.get('error')
+        if error:
+            raise ExtractorError(
+                '%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
+                expected=True)
+
+    def _download_json(self, url, video_id, note='Downloading JSON metadata'):
+        response = super(TwitchIE, self)._download_json(url, video_id, note)
+        self._handle_error(response)
+        return response
+
+    def _extract_media(self, item, item_id):
+        ITEMS = {
+            'a': 'video',
+            'c': 'chapter',
+        }
+        info = self._extract_info(self._download_json(
+            '%s/kraken/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
+            'Downloading %s info JSON' % ITEMS[item]))
+        response = self._download_json(
+            '%s/api/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
+            'Downloading %s playlist JSON' % ITEMS[item])
+        entries = []
+        chunks = response['chunks']
+        qualities = list(chunks.keys())
+        for num, fragment in enumerate(zip(*chunks.values()), start=1):
+            formats = []
+            for fmt_num, fragment_fmt in enumerate(fragment):
+                format_id = qualities[fmt_num]
+                fmt = {
+                    'url': fragment_fmt['url'],
+                    'format_id': format_id,
+                    'quality': 1 if format_id == 'live' else 0,
+                }
+                m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
+                if m:
+                    fmt['height'] = int(m.group('height'))
+                formats.append(fmt)
+            self._sort_formats(formats)
+            entry = dict(info)
+            entry['id'] = '%s_%d' % (entry['id'], num)
+            entry['title'] = '%s part %d' % (entry['title'], num)
+            entry['formats'] = formats
+            entries.append(entry)
+        return self.playlist_result(entries, info['id'], info['title'])
+
+    def _extract_info(self, info):
+        return {
+            'id': info['_id'],
+            'title': info['title'],
+            'description': info['description'],
+            'duration': info['length'],
+            'thumbnail': info['preview'],
+            'uploader': info['channel']['display_name'],
+            'uploader_id': info['channel']['name'],
+            'timestamp': parse_iso8601(info['recorded_at']),
+            'view_count': info['views'],
+        }
+
+    def _real_initialize(self):
+        self._login()
+
+    def _login(self):
+        (username, password) = self._get_login_info()
+        if username is None:
+            return
+
+        login_page = self._download_webpage(
+            self._LOGIN_URL, None, 'Downloading login page')
+
+        authenticity_token = self._search_regex(
+            r'<input name="authenticity_token" type="hidden" value="([^"]+)"',
+            login_page, 'authenticity token')
+
+        login_form = {
+            'utf8': '✓'.encode('utf-8'),
+            'authenticity_token': authenticity_token,
+            'redirect_on_login': '',
+            'embed_form': 'false',
+            'mp_source_action': '',
+            'follow': '',
+            'user[login]': username,
+            'user[password]': password,
+        }
+
+        request = compat_urllib_request.Request(
+            self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+        request.add_header('Referer', self._LOGIN_URL)
+        response = self._download_webpage(
+            request, None, 'Logging in as %s' % username)
+
+        m = re.search(
+            r"id=([\"'])login_error_message\1[^>]*>(?P<msg>[^<]+)", response)
+        if m:
+            raise ExtractorError(
+                'Unable to login: %s' % m.group('msg').strip(), expected=True)
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj.group('chapterid'):
+            return self._extract_media('c', mobj.group('chapterid'))
+
+            """
+            webpage = self._download_webpage(url, chapter_id)
+            m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
+            if not m:
+                raise ExtractorError('Cannot find archive of a chapter')
+            archive_id = m.group(1)
+
+            api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
+            doc = self._download_xml(
+                api, chapter_id,
+                note='Downloading chapter information',
+                errnote='Chapter information download failed')
+            for a in doc.findall('.//archive'):
+                if archive_id == a.find('./id').text:
+                    break
+            else:
+                raise ExtractorError('Could not find chapter in chapter information')
+
+            video_url = a.find('./video_file_url').text
+            video_ext = video_url.rpartition('.')[2] or 'flv'
+
+            chapter_api_url = 'https://api.twitch.tv/kraken/videos/c' + chapter_id
+            chapter_info = self._download_json(
+                chapter_api_url, 'c' + chapter_id,
+                note='Downloading chapter metadata',
+                errnote='Download of chapter metadata failed')
+
+            bracket_start = int(doc.find('.//bracket_start').text)
+            bracket_end = int(doc.find('.//bracket_end').text)
+
+            # TODO determine start (and probably fix up file)
+            #  youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
+            #video_url += '?start=' + TODO:start_timestamp
+            # bracket_start is 13290, but we want 51670615
+            self._downloader.report_warning('Chapter detected, but we can just download the whole file. '
+                                            'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
+
+            info = {
+                'id': 'c' + chapter_id,
+                'url': video_url,
+                'ext': video_ext,
+                'title': chapter_info['title'],
+                'thumbnail': chapter_info['preview'],
+                'description': chapter_info['description'],
+                'uploader': chapter_info['channel']['display_name'],
+                'uploader_id': chapter_info['channel']['name'],
+            }
+            return info
+            """
+        elif mobj.group('videoid'):
+            return self._extract_media('a', mobj.group('videoid'))
+        elif mobj.group('channelid'):
+            channel_id = mobj.group('channelid')
+            info = self._download_json(
+                '%s/kraken/channels/%s' % (self._API_BASE, channel_id),
+                channel_id, 'Downloading channel info JSON')
+            channel_name = info.get('display_name') or info.get('name')
+            entries = []
+            offset = 0
+            limit = self._PAGE_LIMIT
+            for counter in itertools.count(1):
+                response = self._download_json(
+                    '%s/kraken/channels/%s/videos/?offset=%d&limit=%d'
+                    % (self._API_BASE, channel_id, offset, limit),
+                    channel_id, 'Downloading channel videos JSON page %d' % counter)
+                videos = response['videos']
+                if not videos:
+                    break
+                entries.extend([self.url_result(video['url'], 'Twitch') for video in videos])
+                offset += limit
+            return self.playlist_result(entries, channel_id, channel_name)
diff --git a/youtube_dl/extractor/ubu.py b/youtube_dl/extractor/ubu.py
new file mode 100644 (file)
index 0000000..0182d67
--- /dev/null
@@ -0,0 +1,56 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import int_or_none
+
+
+class UbuIE(InfoExtractor):
+    _VALID_URL = r'http://(?:www\.)?ubu\.com/film/(?P<id>[\da-z_-]+)\.html'
+    _TEST = {
+        'url': 'http://ubu.com/film/her_noise.html',
+        'md5': '8edd46ee8aa6b265fb5ed6cf05c36bc9',
+        'info_dict': {
+            'id': 'her_noise',
+            'ext': 'mp4',
+            'title': 'Her Noise - The Making Of (2007)',
+            'duration': 3600,
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._html_search_regex(
+            r'<title>.+?Film &amp; Video: ([^<]+)</title>', webpage, 'title')
+
+        duration = int_or_none(self._html_search_regex(
+            r'Duration: (\d+) minutes', webpage, 'duration', fatal=False, default=None))
+        if duration:
+            duration *= 60
+
+        formats = []
+
+        FORMAT_REGEXES = [
+            ['sq', r"'flashvars'\s*,\s*'file=([^']+)'"],
+            ['hq', r'href="(http://ubumexico\.centro\.org\.mx/video/[^"]+)"']
+        ]
+
+        for format_id, format_regex in FORMAT_REGEXES:
+            m = re.search(format_regex, webpage)
+            if m:
+                formats.append({
+                    'url': m.group(1),
+                    'format_id': format_id,
+                })
+
+        return {
+            'id': video_id,
+            'title': title,
+            'duration': duration,
+            'formats': formats,
+        }
index 054f427252341306edd7698e6d150bcb619b64fc..5271611ac9f883af6a63e371b919c68398adaca0 100644 (file)
@@ -40,8 +40,24 @@ class UdemyIE(InfoExtractor):
                 error_str += ' - %s' % error_data.get('formErrors')
             raise ExtractorError(error_str, expected=True)
 
-    def _download_json(self, url, video_id, note='Downloading JSON metadata'):
-        response = super(UdemyIE, self)._download_json(url, video_id, note)
+    def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata'):
+        headers = {
+            'X-Udemy-Snail-Case': 'true',
+            'X-Requested-With': 'XMLHttpRequest',
+        }
+        for cookie in self._downloader.cookiejar:
+            if cookie.name == 'client_id':
+                headers['X-Udemy-Client-Id'] = cookie.value
+            elif cookie.name == 'access_token':
+                headers['X-Udemy-Bearer-Token'] = cookie.value
+
+        if isinstance(url_or_request, compat_urllib_request.Request):
+            for header, value in headers.items():
+                url_or_request.add_header(header, value)
+        else:
+            url_or_request = compat_urllib_request.Request(url_or_request, headers=headers)
+
+        response = super(UdemyIE, self)._download_json(url_or_request, video_id, note)
         self._handle_error(response)
         return response
 
@@ -62,7 +78,9 @@ class UdemyIE(InfoExtractor):
         if login_popup == '<div class="run-command close-popup redirect" data-url="https://www.udemy.com/"></div>':
             return
 
-        csrf = self._html_search_regex(r'<input type="hidden" name="csrf" value="(.+?)"', login_popup, 'csrf token')
+        csrf = self._html_search_regex(
+            r'<input type="hidden" name="csrf" value="(.+?)"',
+            login_popup, 'csrf token')
 
         login_form = {
             'email': username,
@@ -71,42 +89,49 @@ class UdemyIE(InfoExtractor):
             'displayType': 'json',
             'isSubmitted': '1',
         }
-        request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
-        response = self._download_json(request, None, 'Logging in as %s' % username)
+        request = compat_urllib_request.Request(
+            self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+        response = self._download_json(
+            request, None, 'Logging in as %s' % username)
 
         if 'returnUrl' not in response:
             raise ExtractorError('Unable to log in')
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        lecture_id = mobj.group('id')
+        lecture_id = self._match_id(url)
 
         lecture = self._download_json(
-            'https://www.udemy.com/api-1.1/lectures/%s' % lecture_id, lecture_id, 'Downloading lecture JSON')
+            'https://www.udemy.com/api-1.1/lectures/%s' % lecture_id,
+            lecture_id, 'Downloading lecture JSON')
 
-        if lecture['assetType'] != 'Video':
-            raise ExtractorError('Lecture %s is not a video' % lecture_id, expected=True)
+        asset_type = lecture.get('assetType') or lecture.get('asset_type')
+        if asset_type != 'Video':
+            raise ExtractorError(
+                'Lecture %s is not a video' % lecture_id, expected=True)
 
         asset = lecture['asset']
 
-        stream_url = asset['streamUrl']
+        stream_url = asset.get('streamUrl') or asset.get('stream_url')
         mobj = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url)
         if mobj:
             return self.url_result(mobj.group(1), 'Youtube')
 
         video_id = asset['id']
-        thumbnail = asset['thumbnailUrl']
+        thumbnail = asset.get('thumbnailUrl') or asset.get('thumbnail_url')
         duration = asset['data']['duration']
 
-        download_url = asset['downloadUrl']
+        download_url = asset.get('downloadUrl') or asset.get('download_url')
+
+        video = download_url.get('Video') or download_url.get('video')
+        video_480p = download_url.get('Video480p') or download_url.get('video_480p')
 
         formats = [
             {
-                'url': download_url['Video480p'][0],
+                'url': video_480p[0],
                 'format_id': '360p',
             },
             {
-                'url': download_url['Video'][0],
+                'url': video[0],
                 'format_id': '720p',
             },
         ]
@@ -140,25 +165,29 @@ class UdemyCourseIE(UdemyIE):
         course_path = mobj.group('coursepath')
 
         response = self._download_json(
-            'https://www.udemy.com/api-1.1/courses/%s' % course_path, course_path, 'Downloading course JSON')
+            'https://www.udemy.com/api-1.1/courses/%s' % course_path,
+            course_path, 'Downloading course JSON')
 
         course_id = int(response['id'])
         course_title = response['title']
 
         webpage = self._download_webpage(
-            'https://www.udemy.com/course/subscribe/?courseId=%s' % course_id, course_id, 'Enrolling in the course')
+            'https://www.udemy.com/course/subscribe/?courseId=%s' % course_id,
+            course_id, 'Enrolling in the course')
 
         if self._SUCCESSFULLY_ENROLLED in webpage:
             self.to_screen('%s: Successfully enrolled in' % course_id)
         elif self._ALREADY_ENROLLED in webpage:
             self.to_screen('%s: Already enrolled in' % course_id)
 
-        response = self._download_json('https://www.udemy.com/api-1.1/courses/%s/curriculum' % course_id,
+        response = self._download_json(
+            'https://www.udemy.com/api-1.1/courses/%s/curriculum' % course_id,
             course_id, 'Downloading course curriculum')
 
         entries = [
-            self.url_result('https://www.udemy.com/%s/#/lecture/%s' % (course_path, asset['id']), 'Udemy')
-            for asset in response if asset.get('assetType') == 'Video'
+            self.url_result(
+                'https://www.udemy.com/%s/#/lecture/%s' % (course_path, asset['id']), 'Udemy')
+            for asset in response if asset.get('assetType') or asset.get('asset_type') == 'Video'
         ]
 
-        return self.playlist_result(entries, course_id, course_title)
\ No newline at end of file
+        return self.playlist_result(entries, course_id, course_title)
index 474610eec79483da01c14ca3e1d985b7aa8fd49a..f70978299ac9e682f5cdb99a7396541fd08c115c 100644 (file)
@@ -1,32 +1,66 @@
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
+from ..utils import qualities
+
 
 class UnistraIE(InfoExtractor):
-    _VALID_URL = r'http://utv\.unistra\.fr/(?:index|video)\.php\?id_video\=(\d+)'
-
-    _TEST = {
-        u'url': u'http://utv.unistra.fr/video.php?id_video=154',
-        u'file': u'154.mp4',
-        u'md5': u'736f605cfdc96724d55bb543ab3ced24',
-        u'info_dict': {
-            u'title': u'M!ss Yella',
-            u'description': u'md5:104892c71bd48e55d70b902736b81bbf',
+    _VALID_URL = r'http://utv\.unistra\.fr/(?:index|video)\.php\?id_video\=(?P<id>\d+)'
+
+    _TESTS = [
+        {
+            'url': 'http://utv.unistra.fr/video.php?id_video=154',
+            'md5': '736f605cfdc96724d55bb543ab3ced24',
+            'info_dict': {
+                'id': '154',
+                'ext': 'mp4',
+                'title': 'M!ss Yella',
+                'description': 'md5:104892c71bd48e55d70b902736b81bbf',
+            },
         },
-    }
+        {
+            'url': 'http://utv.unistra.fr/index.php?id_video=437',
+            'md5': '1ddddd6cccaae76f622ce29b8779636d',
+            'info_dict': {
+                'id': '437',
+                'ext': 'mp4',
+                'title': 'Prix Louise Weiss 2014',
+                'description': 'md5:cc3a8735f079f4fb6b0b570fc10c135a',
+            },
+        }
+    ]
 
     def _real_extract(self, url):
-        id = re.match(self._VALID_URL, url).group(1)
-        webpage = self._download_webpage(url, id)
-        file = re.search(r'file: "(.*?)",', webpage).group(1)
-        title = self._html_search_regex(r'<title>UTV - (.*?)</', webpage, u'title')
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
 
-        video_url = 'http://vod-flash.u-strasbg.fr:8080/' + file
+        webpage = self._download_webpage(url, video_id)
 
-        return {'id': id,
-                'title': title,
-                'ext': 'mp4',
-                'url': video_url,
-                'description': self._html_search_regex(r'<meta name="Description" content="(.*?)"', webpage, u'description', flags=re.DOTALL),
-                'thumbnail': self._search_regex(r'image: "(.*?)"', webpage, u'thumbnail'),
-                }
+        files = set(re.findall(r'file\s*:\s*"([^"]+)"', webpage))
+
+        quality = qualities(['SD', 'HD'])
+        formats = []
+        for file_path in files:
+            format_id = 'HD' if file_path.endswith('-HD.mp4') else 'SD'
+            formats.append({
+                'url': 'http://vod-flash.u-strasbg.fr:8080%s' % file_path,
+                'format_id': format_id,
+                'quality': quality(format_id)
+            })
+
+        title = self._html_search_regex(
+            r'<title>UTV - (.*?)</', webpage, 'title')
+        description = self._html_search_regex(
+            r'<meta name="Description" content="(.*?)"', webpage, 'description', flags=re.DOTALL)
+        thumbnail = self._search_regex(
+            r'image: "(.*?)"', webpage, 'thumbnail')
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'formats': formats
+        }
index 488b10df96e298c683cd02287e2da0c49f21a1cc..53dc3a496ff65edf044137540080d9190ad8d72b 100644 (file)
@@ -1,12 +1,10 @@
 from __future__ import unicode_literals
 
-import json
 import re
 
 from .common import InfoExtractor
 from ..utils import (
     compat_urlparse,
-    get_meta_content,
 )
 
 
@@ -47,13 +45,13 @@ class UstreamIE(InfoExtractor):
         self.report_extraction(video_id)
 
         video_title = self._html_search_regex(r'data-title="(?P<title>.+)"',
-            webpage, 'title')
+                                              webpage, 'title')
 
         uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
-            webpage, 'uploader', fatal=False, flags=re.DOTALL)
+                                           webpage, 'uploader', fatal=False, flags=re.DOTALL)
 
         thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"',
-            webpage, 'thumbnail', fatal=False)
+                                            webpage, 'thumbnail', fatal=False)
 
         return {
             'id': video_id,
@@ -68,21 +66,36 @@ class UstreamIE(InfoExtractor):
 class UstreamChannelIE(InfoExtractor):
     _VALID_URL = r'https?://www\.ustream\.tv/channel/(?P<slug>.+)'
     IE_NAME = 'ustream:channel'
+    _TEST = {
+        'url': 'http://www.ustream.tv/channel/channeljapan',
+        'info_dict': {
+            'id': '10874166',
+        },
+        'playlist_mincount': 17,
+    }
 
     def _real_extract(self, url):
         m = re.match(self._VALID_URL, url)
-        slug = m.group('slug')
-        webpage = self._download_webpage(url, slug)
-        channel_id = get_meta_content('ustream:channel_id', webpage)
+        display_id = m.group('slug')
+        webpage = self._download_webpage(url, display_id)
+        channel_id = self._html_search_meta('ustream:channel_id', webpage)
 
         BASE = 'http://www.ustream.tv'
         next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id
         video_ids = []
         while next_url:
-            reply = json.loads(self._download_webpage(compat_urlparse.urljoin(BASE, next_url), channel_id))
+            reply = self._download_json(
+                compat_urlparse.urljoin(BASE, next_url), display_id,
+                note='Downloading video information (next: %d)' % (len(video_ids) + 1))
             video_ids.extend(re.findall(r'data-content-id="(\d.*)"', reply['data']))
             next_url = reply['nextUrl']
 
-        urls = ['http://www.ustream.tv/recorded/' + vid for vid in video_ids]
-        url_entries = [self.url_result(eurl, 'Ustream') for eurl in urls]
-        return self.playlist_result(url_entries, channel_id)
+        entries = [
+            self.url_result('http://www.ustream.tv/recorded/' + vid, 'Ustream')
+            for vid in video_ids]
+        return {
+            '_type': 'playlist',
+            'id': channel_id,
+            'display_id': display_id,
+            'entries': entries,
+        }
index df115d2516196517b1a66fe2fb12bd1fbf96b6e5..455b6d9da62f221cf0854655f707d5963546840f 100644 (file)
@@ -19,7 +19,7 @@ class Vbox7IE(InfoExtractor):
         'md5': '99f65c0c9ef9b682b97313e052734c3f',
         'info_dict': {
             'id': '249bb972c2',
-            'ext': 'flv',
+            'ext': 'mp4',
             'title': 'Смях! Чудо - чист за секунди - Скрита камера',
         },
     }
@@ -30,13 +30,13 @@ class Vbox7IE(InfoExtractor):
 
         redirect_page, urlh = self._download_webpage_handle(url, video_id)
         new_location = self._search_regex(r'window\.location = \'(.*)\';',
-            redirect_page, 'redirect location')
+                                          redirect_page, 'redirect location')
         redirect_url = urlh.geturl() + new_location
         webpage = self._download_webpage(redirect_url, video_id,
-            'Downloading redirect page')
+                                         'Downloading redirect page')
 
         title = self._html_search_regex(r'<title>(.*)</title>',
-            webpage, 'title').split('/')[0].strip()
+                                        webpage, 'title').split('/')[0].strip()
 
         info_url = "http://vbox7.com/play/magare.do"
         data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id})
@@ -50,7 +50,6 @@ class Vbox7IE(InfoExtractor):
         return {
             'id': video_id,
             'url': final_url,
-            'ext': 'flv',
             'title': title,
             'thumbnail': thumbnail_url,
         }
index b1c854a646c601d4dadaa1dce7fab8d6fc315b3d..94647d1c8c88a18cfb6abcba2ec5ed8e71e9c4b6 100644 (file)
@@ -16,8 +16,9 @@ class VeeHDIE(InfoExtractor):
 
     _TEST = {
         'url': 'http://veehd.com/video/4686958',
-        'file': '4686958.mp4',
         'info_dict': {
+            'id': '4686958',
+            'ext': 'mp4',
             'title': 'Time Lapse View from Space ( ISS)',
             'uploader_id': 'spotted',
             'description': 'md5:f0094c4cf3a72e22bc4e4239ef767ad7',
@@ -47,11 +48,11 @@ class VeeHDIE(InfoExtractor):
         video_url = compat_urlparse.unquote(config['clip']['url'])
         title = clean_html(get_element_by_id('videoName', webpage).rpartition('|')[0])
         uploader_id = self._html_search_regex(r'<a href="/profile/\d+">(.+?)</a>',
-            webpage, 'uploader')
+                                              webpage, 'uploader')
         thumbnail = self._search_regex(r'<img id="veehdpreview" src="(.+?)"',
-            webpage, 'thumbnail')
+                                       webpage, 'thumbnail')
         description = self._html_search_regex(r'<td class="infodropdown".*?<div>(.*?)<ul',
-            webpage, 'description', flags=re.DOTALL)
+                                              webpage, 'description', flags=re.DOTALL)
 
         return {
             '_type': 'video',
index 27f9acb670b1b10ffa2ee0220f62dc411e49d8d7..a0c59a2e0e1cb8fca2e0e3eb3ec2e4edce2918bb 100644 (file)
@@ -112,10 +112,10 @@ class VestiIE(InfoExtractor):
         if mobj:
             video_id = mobj.group('id')
             page = self._download_webpage('http://www.vesti.ru/only_video.html?vid=%s' % video_id, video_id,
-                'Downloading video page')
+                                          'Downloading video page')
 
         rutv_url = RUTVIE._extract_url(page)
         if rutv_url:
             return self.url_result(rutv_url, 'RUTV')
 
-        raise ExtractorError('No video found', expected=True)
\ No newline at end of file
+        raise ExtractorError('No video found', expected=True)
index d2ffd1b6ba893f2cb2cc50f00a3131a835dba97d..c912c3cbe7ae42b221816b50ec6a97139cb13d55 100644 (file)
@@ -5,7 +5,7 @@ import xml.etree.ElementTree
 
 from .common import InfoExtractor
 from ..utils import (
-    compat_HTTPError,
+    compat_urllib_request,
     ExtractorError,
 )
 
@@ -13,7 +13,7 @@ from ..utils import (
 class VevoIE(InfoExtractor):
     """
     Accepts urls from vevo.com or in the format 'vevo:{id}'
-    (currently used by MTVIE)
+    (currently used by MTVIE and MySpaceIE)
     """
     _VALID_URL = r'''(?x)
         (?:https?://www\.vevo\.com/watch/(?:[^/]+/(?:[^/]+/)?)?|
@@ -24,7 +24,7 @@ class VevoIE(InfoExtractor):
 
     _TESTS = [{
         'url': 'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280',
-        "md5": "06bea460acb744eab74a9d7dcb4bfd61",
+        "md5": "95ee28ee45e70130e3ab02b0f579ae23",
         'info_dict': {
             'id': 'GB1101300280',
             'ext': 'mp4',
@@ -40,7 +40,7 @@ class VevoIE(InfoExtractor):
     }, {
         'note': 'v3 SMIL format',
         'url': 'http://www.vevo.com/watch/cassadee-pope/i-wish-i-could-break-your-heart/USUV71302923',
-        'md5': '893ec0e0d4426a1d96c01de8f2bdff58',
+        'md5': 'f6ab09b034f8c22969020b042e5ac7fc',
         'info_dict': {
             'id': 'USUV71302923',
             'ext': 'mp4',
@@ -69,6 +69,21 @@ class VevoIE(InfoExtractor):
     }]
     _SMIL_BASE_URL = 'http://smil.lvl3.vevo.com/'
 
+    def _real_initialize(self):
+        req = compat_urllib_request.Request(
+            'http://www.vevo.com/auth', data=b'')
+        webpage = self._download_webpage(
+            req, None,
+            note='Retrieving oauth token',
+            errnote='Unable to retrieve oauth token',
+            fatal=False)
+        if webpage is False:
+            self._oauth_token = None
+        else:
+            self._oauth_token = self._search_regex(
+                r'access_token":\s*"([^"]+)"',
+                webpage, 'access token', fatal=False)
+
     def _formats_from_json(self, video_info):
         last_version = {'version': -1}
         for version in video_info['videoVersions']:
@@ -129,6 +144,26 @@ class VevoIE(InfoExtractor):
             })
         return formats
 
+    def _download_api_formats(self, video_id):
+        if not self._oauth_token:
+            self._downloader.report_warning(
+                'No oauth token available, skipping API HLS download')
+            return []
+
+        api_url = 'https://apiv2.vevo.com/video/%s/streams/hls?token=%s' % (
+            video_id, self._oauth_token)
+        api_data = self._download_json(
+            api_url, video_id,
+            note='Downloading HLS formats',
+            errnote='Failed to download HLS format list', fatal=False)
+        if api_data is None:
+            return []
+
+        m3u8_url = api_data[0]['url']
+        return self._extract_m3u8_formats(
+            m3u8_url, video_id, entry_protocol='m3u8_native', ext='mp4',
+            preference=0)
+
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
@@ -152,30 +187,8 @@ class VevoIE(InfoExtractor):
         else:
             age_limit = None
 
-        # Download SMIL
-        smil_blocks = sorted((
-            f for f in video_info['videoVersions']
-            if f['sourceType'] == 13),
-            key=lambda f: f['version'])
-
-        smil_url = '%s/Video/V2/VFILE/%s/%sr.smil' % (
-            self._SMIL_BASE_URL, video_id, video_id.lower())
-        if smil_blocks:
-            smil_url_m = self._search_regex(
-                r'url="([^"]+)"', smil_blocks[-1]['data'], 'SMIL URL',
-                fatal=False)
-            if smil_url_m is not None:
-                smil_url = smil_url_m
-
-        try:
-            smil_xml = self._download_webpage(smil_url, video_id,
-                                              'Downloading SMIL info')
-            formats.extend(self._formats_from_smil(smil_xml))
-        except ExtractorError as ee:
-            if not isinstance(ee.cause, compat_HTTPError):
-                raise
-            self._downloader.report_warning(
-                'Cannot download SMIL information, falling back to JSON ..')
+        # Download via HLS API
+        formats.extend(self._download_api_formats(video_id))
 
         self._sort_formats(formats)
         timestamp_ms = int(self._search_regex(
diff --git a/youtube_dl/extractor/vgtv.py b/youtube_dl/extractor/vgtv.py
new file mode 100644 (file)
index 0000000..2f111bf
--- /dev/null
@@ -0,0 +1,117 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import float_or_none
+
+
+class VGTVIE(InfoExtractor):
+    _VALID_URL = r'http://(?:www\.)?vgtv\.no/#!/(?:.*)/(?P<id>[0-9]+)'
+    _TESTS = [
+        {
+            # streamType: vod
+            'url': 'http://www.vgtv.no/#!/video/84196/hevnen-er-soet-episode-10-abu',
+            'md5': 'b8be7a234cebb840c0d512c78013e02f',
+            'info_dict': {
+                'id': '84196',
+                'ext': 'mp4',
+                'title': 'Hevnen er søt: Episode 10 - Abu',
+                'description': 'md5:e25e4badb5f544b04341e14abdc72234',
+                'thumbnail': 're:^https?://.*\.jpg',
+                'duration': 648.000,
+                'timestamp': 1404626400,
+                'upload_date': '20140706',
+                'view_count': int,
+            },
+        },
+        {
+            # streamType: wasLive
+            'url': 'http://www.vgtv.no/#!/live/100764/opptak-vgtv-foelger-em-kvalifiseringen',
+            'info_dict': {
+                'id': '100764',
+                'ext': 'flv',
+                'title': 'OPPTAK: VGTV følger EM-kvalifiseringen',
+                'description': 'md5:3772d9c0dc2dff92a886b60039a7d4d3',
+                'thumbnail': 're:^https?://.*\.jpg',
+                'duration': 9103.0,
+                'timestamp': 1410113864,
+                'upload_date': '20140907',
+                'view_count': int,
+            },
+            'params': {
+                # m3u8 download
+                'skip_download': True,
+            },
+        },
+        {
+            # streamType: live
+            'url': 'http://www.vgtv.no/#!/live/100015/direkte-her-kan-du-se-laksen-live-fra-suldalslaagen',
+            'info_dict': {
+                'id': '100015',
+                'ext': 'flv',
+                'title': 'DIREKTE: Her kan du se laksen live fra Suldalslågen!',
+                'description': 'md5:9a60cc23fa349f761628924e56eeec2d',
+                'thumbnail': 're:^https?://.*\.jpg',
+                'duration': 0,
+                'timestamp': 1407423348,
+                'upload_date': '20140807',
+                'view_count': int,
+            },
+            'params': {
+                # m3u8 download
+                'skip_download': True,
+            },
+        },
+    ]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        data = self._download_json(
+            'http://svp.vg.no/svp/api/v1/vgtv/assets/%s?appName=vgtv-website' % video_id,
+            video_id, 'Downloading media JSON')
+
+        streams = data['streamUrls']
+
+        formats = []
+
+        hls_url = streams.get('hls')
+        if hls_url:
+            formats.extend(self._extract_m3u8_formats(hls_url, video_id, 'mp4'))
+
+        hds_url = streams.get('hds')
+        if hds_url:
+            formats.extend(self._extract_f4m_formats(hds_url + '?hdcore=3.2.0&plugin=aasp-3.2.0.77.18', video_id))
+
+        mp4_url = streams.get('mp4')
+        if mp4_url:
+            _url = hls_url or hds_url
+            MP4_URL_TEMPLATE = '%s/%%s.%s' % (mp4_url.rpartition('/')[0], mp4_url.rpartition('.')[-1])
+            for mp4_format in _url.split(','):
+                m = re.search('(?P<width>\d+)_(?P<height>\d+)_(?P<vbr>\d+)', mp4_format)
+                if not m:
+                    continue
+                width = int(m.group('width'))
+                height = int(m.group('height'))
+                vbr = int(m.group('vbr'))
+                formats.append({
+                    'url': MP4_URL_TEMPLATE % mp4_format,
+                    'format_id': 'mp4-%s' % vbr,
+                    'width': width,
+                    'height': height,
+                    'vbr': vbr,
+                    'preference': 1,
+                })
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': data['title'],
+            'description': data['description'],
+            'thumbnail': data['images']['main'] + '?t[]=900x506q80',
+            'timestamp': data['published'],
+            'duration': float_or_none(data['duration'], 1000),
+            'view_count': data['displays'],
+            'formats': formats,
+        }
index 2f77e38981c3607b6dceb00cac3bca514bf2fcd8..6be3774b7aa7367e9a067b417d7a749fece55d05 100644 (file)
@@ -121,4 +121,7 @@ class VH1IE(MTVIE):
         idoc = self._download_xml(
             doc_url, video_id,
             'Downloading info', transform_source=fix_xml_ampersands)
-        return [self._get_video_info(item) for item in idoc.findall('.//item')]
+        return self.playlist_result(
+            [self._get_video_info(item) for item in idoc.findall('.//item')],
+            playlist_id=video_id,
+        )
diff --git a/youtube_dl/extractor/vice.py b/youtube_dl/extractor/vice.py
new file mode 100644 (file)
index 0000000..71f520f
--- /dev/null
@@ -0,0 +1,37 @@
+from __future__ import unicode_literals
+import re
+
+from .common import InfoExtractor
+from .ooyala import OoyalaIE
+from ..utils import ExtractorError
+
+
+class ViceIE(InfoExtractor):
+    _VALID_URL = r'http://www\.vice\.com/.*?/(?P<name>.+)'
+
+    _TEST = {
+        'url': 'http://www.vice.com/Fringes/cowboy-capitalists-part-1',
+        'info_dict': {
+            'id': '43cW1mYzpia9IlestBjVpd23Yu3afAfp',
+            'ext': 'mp4',
+            'title': 'VICE_COWBOYCAPITALISTS_PART01_v1_VICE_WM_1080p.mov',
+        },
+        'params': {
+            # Requires ffmpeg (m3u8 manifest)
+            'skip_download': True,
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        name = mobj.group('name')
+        webpage = self._download_webpage(url, name)
+        try:
+            embed_code = self._search_regex(
+                r'embedCode=([^&\'"]+)', webpage,
+                'ooyala embed code')
+            ooyala_url = OoyalaIE._url_for_embed_code(embed_code)
+            print(ooyala_url)
+        except ExtractorError:
+            raise ExtractorError('The page doesn\'t contain a video', expected=True)
+        return self.url_result(ooyala_url, ie='Ooyala')
index 9328ef4a2121f091c256e9324d0de0e8b7dcbecd..0faa729c60f916d69b885cfc76580104b226f84b 100644 (file)
@@ -1,55 +1,85 @@
-import json
-import re
+from __future__ import unicode_literals
 
 from .common import InfoExtractor
+from ..utils import (
+    float_or_none,
+    int_or_none,
+)
 
 
 class ViddlerIE(InfoExtractor):
-    _VALID_URL = r'(?P<domain>https?://(?:www\.)?viddler\.com)/(?:v|embed|player)/(?P<id>[a-z0-9]+)'
+    _VALID_URL = r'https?://(?:www\.)?viddler\.com/(?:v|embed|player)/(?P<id>[a-z0-9]+)'
     _TEST = {
-        u"url": u"http://www.viddler.com/v/43903784",
-        u'file': u'43903784.mp4',
-        u'md5': u'fbbaedf7813e514eb7ca30410f439ac9',
-        u'info_dict': {
-            u"title": u"Video Made Easy",
-            u"uploader": u"viddler",
-            u"duration": 100.89,
+        "url": "http://www.viddler.com/v/43903784",
+        'md5': 'ae43ad7cb59431ce043f0ff7fa13cbf4',
+        'info_dict': {
+            'id': '43903784',
+            'ext': 'mp4',
+            "title": "Video Made Easy",
+            'description': 'You don\'t need to be a professional to make high-quality video content. Viddler provides some quick and easy tips on how to produce great video content with limited resources. ',
+            "uploader": "viddler",
+            'timestamp': 1335371429,
+            'upload_date': '20120425',
+            "duration": 100.89,
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'view_count': int,
+            'categories': ['video content', 'high quality video', 'video made easy', 'how to produce video with limited resources', 'viddler'],
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
-        embed_url = mobj.group('domain') + u'/embed/' + video_id
-        webpage = self._download_webpage(embed_url, video_id)
-
-        video_sources_code = self._search_regex(
-            r"(?ms)sources\s*:\s*(\{.*?\})", webpage, u'video URLs')
-        video_sources = json.loads(video_sources_code.replace("'", '"'))
-
-        formats = [{
-            'url': video_url,
-            'format': format_id,
-        } for video_url, format_id in video_sources.items()]
-
-        title = self._html_search_regex(
-            r"title\s*:\s*'([^']*)'", webpage, u'title')
-        uploader = self._html_search_regex(
-            r"authorName\s*:\s*'([^']*)'", webpage, u'uploader', fatal=False)
-        duration_s = self._html_search_regex(
-            r"duration\s*:\s*([0-9.]*)", webpage, u'duration', fatal=False)
-        duration = float(duration_s) if duration_s else None
-        thumbnail = self._html_search_regex(
-            r"thumbnail\s*:\s*'([^']*)'",
-            webpage, u'thumbnail', fatal=False)
+        video_id = self._match_id(url)
+
+        json_url = (
+            'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json?video_id=%s&key=v0vhrt7bg2xq1vyxhkct' %
+            video_id)
+        data = self._download_json(json_url, video_id)['video']
+
+        formats = []
+        for filed in data['files']:
+            if filed.get('status', 'ready') != 'ready':
+                continue
+            f = {
+                'format_id': filed['profile_id'],
+                'format_note': filed['profile_name'],
+                'url': self._proto_relative_url(filed['url']),
+                'width': int_or_none(filed.get('width')),
+                'height': int_or_none(filed.get('height')),
+                'filesize': int_or_none(filed.get('size')),
+                'ext': filed.get('ext'),
+                'source_preference': -1,
+            }
+            formats.append(f)
+
+            if filed.get('cdn_url'):
+                f = f.copy()
+                f['url'] = self._proto_relative_url(filed['cdn_url'])
+                f['format_id'] = filed['profile_id'] + '-cdn'
+                f['source_preference'] = 1
+                formats.append(f)
+
+            if filed.get('html5_video_source'):
+                f = f.copy()
+                f['url'] = self._proto_relative_url(
+                    filed['html5_video_source'])
+                f['format_id'] = filed['profile_id'] + '-html5'
+                f['source_preference'] = 0
+                formats.append(f)
+        self._sort_formats(formats)
+
+        categories = [
+            t.get('text') for t in data.get('tags', []) if 'text' in t]
 
         return {
             '_type': 'video',
             'id': video_id,
-            'title': title,
-            'thumbnail': thumbnail,
-            'uploader': uploader,
-            'duration': duration,
+            'title': data['title'],
             'formats': formats,
+            'description': data.get('description'),
+            'timestamp': int_or_none(data.get('upload_time')),
+            'thumbnail': self._proto_relative_url(data.get('thumbnail_url')),
+            'uploader': data.get('author'),
+            'duration': float_or_none(data.get('length')),
+            'view_count': int_or_none(data.get('view_count')),
+            'categories': categories,
         }
index fed95ef71120a7137b47d953dcb830e6ede59f23..0eb3d9414ea339e0854949732f40ab9975ce8500 100644 (file)
@@ -78,4 +78,4 @@ class VideoBamIE(InfoExtractor):
             'view_count': view_count,
             'formats': formats,
             'age_limit': 18,
-        }
\ No newline at end of file
+        }
index f75169041b4f958b9f345daba99a4a1ba575cf4e..94f9e9be94f9a420fd0207339085cbc35aba6805 100644 (file)
@@ -1,46 +1,50 @@
-import re
+from __future__ import unicode_literals
 
 from .common import InfoExtractor
 from ..utils import (
     find_xpath_attr,
-    determine_ext,
+    int_or_none,
 )
 
+
 class VideofyMeIE(InfoExtractor):
-    _VALID_URL = r'https?://(www\.videofy\.me/.+?|p\.videofy\.me/v)/(?P<id>\d+)(&|#|$)'
-    IE_NAME = u'videofy.me'
+    _VALID_URL = r'https?://(?:www\.videofy\.me/.+?|p\.videofy\.me/v)/(?P<id>\d+)(&|#|$)'
+    IE_NAME = 'videofy.me'
 
     _TEST = {
-        u'url': u'http://www.videofy.me/thisisvideofyme/1100701',
-        u'file':  u'1100701.mp4',
-        u'md5': u'c77d700bdc16ae2e9f3c26019bd96143',
-        u'info_dict': {
-            u'title': u'This is VideofyMe',
-            u'description': None,
-            u'uploader': u'VideofyMe',
-            u'uploader_id': u'thisisvideofyme',
+        'url': 'http://www.videofy.me/thisisvideofyme/1100701',
+        'md5': 'c77d700bdc16ae2e9f3c26019bd96143',
+        'info_dict': {
+            'id': '1100701',
+            'ext': 'mp4',
+            'title': 'This is VideofyMe',
+            'description': None,
+            'uploader': 'VideofyMe',
+            'uploader_id': 'thisisvideofyme',
+            'view_count': int,
         },
-        
+
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         config = self._download_xml('http://sunshine.videofy.me/?videoId=%s' % video_id,
-                                            video_id)
+                                    video_id)
         video = config.find('video')
         sources = video.find('sources')
-        url_node = next(node for node in [find_xpath_attr(sources, 'source', 'id', 'HQ %s' % key) 
-            for key in ['on', 'av', 'off']] if node is not None)
+        url_node = next(node for node in [find_xpath_attr(sources, 'source', 'id', 'HQ %s' % key)
+                                          for key in ['on', 'av', 'off']] if node is not None)
         video_url = url_node.find('url').text
+        view_count = int_or_none(self._search_regex(
+            r'([0-9]+)', video.find('views').text, 'view count', fatal=False))
 
-        return {'id': video_id,
-                'title': video.find('title').text,
-                'url': video_url,
-                'ext': determine_ext(video_url),
-                'thumbnail': video.find('thumb').text,
-                'description': video.find('description').text,
-                'uploader': config.find('blog/name').text,
-                'uploader_id': video.find('identifier').text,
-                'view_count': re.search(r'\d+', video.find('views').text).group(),
-                }
+        return {
+            'id': video_id,
+            'title': video.find('title').text,
+            'url': video_url,
+            'thumbnail': video.find('thumb').text,
+            'description': video.find('description').text,
+            'uploader': config.find('blog/name').text,
+            'uploader_id': video.find('identifier').text,
+            'view_count': view_count,
+        }
diff --git a/youtube_dl/extractor/videomega.py b/youtube_dl/extractor/videomega.py
new file mode 100644 (file)
index 0000000..29c4e01
--- /dev/null
@@ -0,0 +1,57 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+    remove_start,
+)
+
+
+class VideoMegaIE(InfoExtractor):
+    _VALID_URL = r'''(?x)https?://
+        (?:www\.)?videomega\.tv/
+        (?:iframe\.php)?\?ref=(?P<id>[A-Za-z0-9]+)
+        '''
+    _TEST = {
+        'url': 'http://videomega.tv/?ref=GKeGPVedBe',
+        'md5': '240fb5bcf9199961f48eb17839b084d6',
+        'info_dict': {
+            'id': 'GKeGPVedBe',
+            'ext': 'mp4',
+            'title': 'XXL - All Sports United',
+            'thumbnail': 're:^https?://.*\.jpg$',
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        url = 'http://videomega.tv/iframe.php?ref={0:}'.format(video_id)
+        webpage = self._download_webpage(url, video_id)
+
+        escaped_data = self._search_regex(
+            r'unescape\("([^"]+)"\)', webpage, 'escaped data')
+        playlist = compat_urllib_parse.unquote(escaped_data)
+
+        thumbnail = self._search_regex(
+            r'image:\s*"([^"]+)"', playlist, 'thumbnail', fatal=False)
+        url = self._search_regex(r'file:\s*"([^"]+)"', playlist, 'URL')
+        title = remove_start(self._html_search_regex(
+            r'<title>(.*?)</title>', webpage, 'title'), 'VideoMega.tv - ')
+
+        formats = [{
+            'format_id': 'sd',
+            'url': url,
+        }]
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'thumbnail': thumbnail,
+        }
index 65463c73324ca83ab87b45bc33d569c3fe881163..3176e3b9dda8580ca4c693343508367d12b25751 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import re
 import random
 
@@ -5,23 +7,22 @@ from .common import InfoExtractor
 
 
 class VideoPremiumIE(InfoExtractor):
-    _VALID_URL = r'(?:https?://)?(?:www\.)?videopremium\.(?:tv|me)/(?P<id>\w+)(?:/.*)?'
+    _VALID_URL = r'https?://(?:www\.)?videopremium\.(?:tv|me)/(?P<id>\w+)(?:/.*)?'
     _TEST = {
-        u'url': u'http://videopremium.tv/4w7oadjsf156',
-        u'file': u'4w7oadjsf156.f4v',
-        u'info_dict': {
-            u"title": u"youtube-dl_test_video____a_________-BaW_jenozKc.mp4.mp4"
+        'url': 'http://videopremium.tv/4w7oadjsf156',
+        'info_dict': {
+            'id': '4w7oadjsf156',
+            'ext': 'f4v',
+            'title': 'youtube-dl_test_video____a_________-BaW_jenozKc.mp4.mp4'
         },
-        u'params': {
-            u'skip_download': True,
+        'params': {
+            'skip_download': True,
         },
-        u'skip': u'Test file has been deleted.',
+        'skip': 'Test file has been deleted.',
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage_url = 'http://videopremium.tv/' + video_id
         webpage = self._download_webpage(webpage_url, video_id)
 
@@ -29,17 +30,17 @@ class VideoPremiumIE(InfoExtractor):
             # Download again, we need a cookie
             webpage = self._download_webpage(
                 webpage_url, video_id,
-                note=u'Downloading webpage again (with cookie)')
+                note='Downloading webpage again (with cookie)')
 
         video_title = self._html_search_regex(
-            r'<h2(?:.*?)>\s*(.+?)\s*<', webpage, u'video title')
+            r'<h2(?:.*?)>\s*(.+?)\s*<', webpage, 'video title')
 
         return {
-            'id':          video_id,
-            'url':         "rtmp://e%d.md.iplay.md/play" % random.randint(1, 16),
-            'play_path':   "mp4:%s.f4v" % video_id,
-            'page_url':    "http://videopremium.tv/" + video_id,
-            'player_url':  "http://videopremium.tv/uplayer/uppod.swf",
-            'ext':         'f4v',
-            'title':       video_title,
+            'id': video_id,
+            'url': "rtmp://e%d.md.iplay.md/play" % random.randint(1, 16),
+            'play_path': "mp4:%s.f4v" % video_id,
+            'page_url': "http://videopremium.tv/" + video_id,
+            'player_url': "http://videopremium.tv/uplayer/uppod.swf",
+            'ext': 'f4v',
+            'title': video_title,
         }
index a647807d01f8b3e5a2ed3eab99acc545533327d4..1f938838cc9247e9e80c8ce32911cb40954c1830 100644 (file)
@@ -58,4 +58,4 @@ class VideoTtIE(InfoExtractor):
             'like_count': int_or_none(video['liked']),
             'dislike_count': int_or_none(video['disliked']),
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 4a08ddd4390fedb6fc0944b6f3f0034e72c8bc03..ca2e50935def482fb3d3f7f5e7594d24b3dba2f1 100644 (file)
@@ -23,4 +23,4 @@ class VideoWeedIE(NovaMovIE):
             'title': 'optical illusion  dissapeared image magic illusion',
             'description': ''
         },
-    }
\ No newline at end of file
+    }
diff --git a/youtube_dl/extractor/vidme.py b/youtube_dl/extractor/vidme.py
new file mode 100644 (file)
index 0000000..5c89824
--- /dev/null
@@ -0,0 +1,68 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    int_or_none,
+    float_or_none,
+    str_to_int,
+)
+
+
+class VidmeIE(InfoExtractor):
+    _VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z]+)'
+    _TEST = {
+        'url': 'https://vid.me/QNB',
+        'md5': 'f42d05e7149aeaec5c037b17e5d3dc82',
+        'info_dict': {
+            'id': 'QNB',
+            'ext': 'mp4',
+            'title': 'Fishing for piranha - the easy way',
+            'description': 'source: https://www.facebook.com/photo.php?v=312276045600871',
+            'duration': 119.92,
+            'timestamp': 1406313244,
+            'upload_date': '20140725',
+            'thumbnail': 're:^https?://.*\.jpg',
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        video_url = self._html_search_regex(r'<source src="([^"]+)"', webpage, 'video URL')
+
+        title = self._og_search_title(webpage)
+        description = self._og_search_description(webpage, default='')
+        thumbnail = self._og_search_thumbnail(webpage)
+        timestamp = int_or_none(self._og_search_property('updated_time', webpage, fatal=False))
+        width = int_or_none(self._og_search_property('video:width', webpage, fatal=False))
+        height = int_or_none(self._og_search_property('video:height', webpage, fatal=False))
+        duration = float_or_none(self._html_search_regex(
+            r'data-duration="([^"]+)"', webpage, 'duration', fatal=False))
+        view_count = str_to_int(self._html_search_regex(
+            r'<span class="video_views">\s*([\d,\.]+)\s*plays?', webpage, 'view count', fatal=False))
+        like_count = str_to_int(self._html_search_regex(
+            r'class="score js-video-vote-score"[^>]+data-score="([\d,\.\s]+)">',
+            webpage, 'like count', fatal=False))
+        comment_count = str_to_int(self._html_search_regex(
+            r'class="js-comment-count"[^>]+data-count="([\d,\.\s]+)">',
+            webpage, 'comment count', fatal=False))
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'timestamp': timestamp,
+            'width': width,
+            'height': height,
+            'duration': duration,
+            'view_count': view_count,
+            'like_count': like_count,
+            'comment_count': comment_count,
+        }
diff --git a/youtube_dl/extractor/vidzi.py b/youtube_dl/extractor/vidzi.py
new file mode 100644 (file)
index 0000000..08a5a7b
--- /dev/null
@@ -0,0 +1,32 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class VidziIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?vidzi\.tv/(?P<id>\w+)'
+    _TEST = {
+        'url': 'http://vidzi.tv/cghql9yq6emu.html',
+        'md5': '4f16c71ca0c8c8635ab6932b5f3f1660',
+        'info_dict': {
+            'id': 'cghql9yq6emu',
+            'ext': 'mp4',
+            'title': 'youtube-dl test video  1\\\\2\'3/4<5\\\\6ä7↭',
+        },
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, video_id)
+        video_url = self._html_search_regex(
+            r'{\s*file\s*:\s*"([^"]+)"\s*}', webpage, 'video url')
+        title = self._html_search_regex(
+            r'(?s)<h2 class="video-title">(.*?)</h2>', webpage, 'title')
+
+        return {
+            'id': video_id,
+            'title': title,
+            'url': video_url,
+        }
index a3c6e83b01194d37b683912d131c93664dbf0680..06b0bed41e68401a8667cbabdca0d9796ea8ca3d 100644 (file)
@@ -7,18 +7,20 @@ import itertools
 
 from .common import InfoExtractor
 from .subtitles import SubtitlesInfoExtractor
-from ..utils import (
+from ..compat import (
     compat_HTTPError,
     compat_urllib_parse,
     compat_urllib_request,
-    clean_html,
-    get_element_by_attribute,
+    compat_urlparse,
+)
+from ..utils import (
     ExtractorError,
+    InAdvancePagedList,
+    int_or_none,
     RegexNotFoundError,
     std_headers,
     unsmuggle_url,
     urlencode_postdata,
-    int_or_none,
 )
 
 
@@ -54,9 +56,10 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
 
     # _VALID_URL matches Vimeo URLs
     _VALID_URL = r'''(?x)
-        (?P<proto>(?:https?:)?//)?
+        https?://
         (?:(?:www|(?P<player>player))\.)?
         vimeo(?P<pro>pro)?\.com/
+        (?!channels/[^/?#]+/?(?:$|[?#])|album/)
         (?:.*?/)?
         (?:(?:play_redirect_hls|moogaloop\.swf)\?clip_id=)?
         (?:videos?/)?
@@ -88,6 +91,7 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
                 'uploader_id': 'openstreetmapus',
                 'uploader': 'OpenStreetMap US',
                 'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
+                'description': 'md5:380943ec71b89736ff4bf27183233d09',
                 'duration': 1595,
             },
         },
@@ -102,6 +106,7 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
                 'uploader': 'The BLN & Business of Software',
                 'uploader_id': 'theblnbusinessofsoftware',
                 'duration': 3610,
+                'description': None,
             },
         },
         {
@@ -116,11 +121,27 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
                 'uploader_id': 'user18948128',
                 'uploader': 'Jaime Marquínez Ferrándiz',
                 'duration': 10,
+                'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people who love them.',
             },
             'params': {
                 'videopassword': 'youtube-dl',
             },
         },
+        {
+            'url': 'http://vimeo.com/channels/keypeele/75629013',
+            'md5': '2f86a05afe9d7abc0b9126d229bbe15d',
+            'note': 'Video is freely available via original URL '
+                    'and protected with password when accessed via http://vimeo.com/75629013',
+            'info_dict': {
+                'id': '75629013',
+                'ext': 'mp4',
+                'title': 'Key & Peele: Terrorist Interrogation',
+                'description': 'md5:8678b246399b070816b12313e8b4eb5c',
+                'uploader_id': 'atencio',
+                'uploader': 'Peter Atencio',
+                'duration': 187,
+            },
+        },
         {
             'url': 'http://vimeo.com/76979871',
             'md5': '3363dd6ffebe3784d56f4132317fd446',
@@ -136,17 +157,20 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
                 'duration': 62,
             }
         },
+        {
+            # from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/
+            'url': 'https://player.vimeo.com/video/98044508',
+            'note': 'The js code contains assignments to the same variable as the config',
+            'info_dict': {
+                'id': '98044508',
+                'ext': 'mp4',
+                'title': 'Pier Solar OUYA Official Trailer',
+                'uploader': 'Tulio Gonçalves',
+                'uploader_id': 'user28849593',
+            },
+        },
     ]
 
-    @classmethod
-    def suitable(cls, url):
-        if VimeoChannelIE.suitable(url):
-            # Otherwise channel urls like http://vimeo.com/channels/31259 would
-            # match
-            return False
-        else:
-            return super(VimeoIE, cls).suitable(url)
-
     def _verify_video_password(self, url, video_id, webpage):
         password = self._downloader.params.get('videopassword', None)
         if password is None:
@@ -190,14 +214,15 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
         if data is not None:
             headers = headers.copy()
             headers.update(data)
+        if 'Referer' not in headers:
+            headers['Referer'] = url
 
         # Extract ID from URL
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
+        orig_url = url
         if mobj.group('pro') or mobj.group('player'):
             url = 'http://player.vimeo.com/video/' + video_id
-        else:
-            url = 'https://vimeo.com/' + video_id
 
         # Retrieve video webpage to extract further information
         request = compat_urllib_request.Request(url, None, headers)
@@ -231,11 +256,11 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
                 # We try to find out to which variable is assigned the config dic
                 m_variable_name = re.search('(\w)\.video\.id', webpage)
                 if m_variable_name is not None:
-                    config_re = r'%s=({.+?});' % re.escape(m_variable_name.group(1))
+                    config_re = r'%s=({[^}].+?});' % re.escape(m_variable_name.group(1))
                 else:
                     config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
                 config = self._search_regex(config_re, webpage, 'info section',
-                    flags=re.DOTALL)
+                                            flags=re.DOTALL)
                 config = json.loads(config)
         except Exception as e:
             if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
@@ -263,21 +288,26 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
         if video_thumbnail is None:
             video_thumbs = config["video"].get("thumbs")
             if video_thumbs and isinstance(video_thumbs, dict):
-                _, video_thumbnail = sorted((int(width), t_url) for (width, t_url) in video_thumbs.items())[-1]
+                _, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
 
         # Extract video description
-        video_description = None
-        try:
-            video_description = get_element_by_attribute("class", "description_wrapper", webpage)
-            if video_description:
-                video_description = clean_html(video_description)
-        except AssertionError as err:
-            # On some pages like (http://player.vimeo.com/video/54469442) the
-            # html tags are not closed, python 2.6 cannot handle it
-            if err.args[0] == 'we should not get here!':
-                pass
-            else:
-                raise
+
+        video_description = self._html_search_regex(
+            r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
+            webpage, 'description', default=None)
+        if not video_description:
+            video_description = self._html_search_meta(
+                'description', webpage, default=None)
+        if not video_description and mobj.group('pro'):
+            orig_webpage = self._download_webpage(
+                orig_url, video_id,
+                note='Downloading webpage for description',
+                fatal=False)
+            if orig_webpage:
+                video_description = self._html_search_meta(
+                    'description', orig_webpage, default=None)
+        if not video_description and not mobj.group('player'):
+            self._downloader.report_warning('Cannot find video description')
 
         # Extract video duration
         video_duration = int_or_none(config["video"].get("duration"))
@@ -365,9 +395,16 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
 
 class VimeoChannelIE(InfoExtractor):
     IE_NAME = 'vimeo:channel'
-    _VALID_URL = r'(?:https?://)?vimeo\.com/channels/(?P<id>[^/]+)/?(\?.*)?$'
+    _VALID_URL = r'https?://vimeo\.com/channels/(?P<id>[^/?#]+)/?(?:$|[?#])'
     _MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
     _TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
+    _TESTS = [{
+        'url': 'http://vimeo.com/channels/tributes',
+        'info_dict': {
+            'title': 'Vimeo Tributes',
+        },
+        'playlist_mincount': 25,
+    }]
 
     def _page_url(self, base_url, pagenum):
         return '%s/videos/page:%d/' % (base_url, pagenum)
@@ -401,14 +438,15 @@ class VimeoChannelIE(InfoExtractor):
 
 class VimeoUserIE(VimeoChannelIE):
     IE_NAME = 'vimeo:user'
-    _VALID_URL = r'(?:https?://)?vimeo\.com/(?P<name>[^/]+)(?:/videos|[#?]|$)'
+    _VALID_URL = r'https?://vimeo\.com/(?![0-9]+(?:$|[?#/]))(?P<name>[^/]+)(?:/videos|[#?]|$)'
     _TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
-
-    @classmethod
-    def suitable(cls, url):
-        if VimeoChannelIE.suitable(url) or VimeoIE.suitable(url) or VimeoAlbumIE.suitable(url) or VimeoGroupsIE.suitable(url):
-            return False
-        return super(VimeoUserIE, cls).suitable(url)
+    _TESTS = [{
+        'url': 'http://vimeo.com/nkistudio/videos',
+        'info_dict': {
+            'title': 'Nki',
+        },
+        'playlist_mincount': 66,
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -418,8 +456,15 @@ class VimeoUserIE(VimeoChannelIE):
 
 class VimeoAlbumIE(VimeoChannelIE):
     IE_NAME = 'vimeo:album'
-    _VALID_URL = r'(?:https?://)?vimeo\.com/album/(?P<id>\d+)'
+    _VALID_URL = r'https?://vimeo\.com/album/(?P<id>\d+)'
     _TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
+    _TESTS = [{
+        'url': 'http://vimeo.com/album/2632481',
+        'info_dict': {
+            'title': 'Staff Favorites: November 2013',
+        },
+        'playlist_mincount': 13,
+    }]
 
     def _page_url(self, base_url, pagenum):
         return '%s/page:%d/' % (base_url, pagenum)
@@ -433,6 +478,13 @@ class VimeoAlbumIE(VimeoChannelIE):
 class VimeoGroupsIE(VimeoAlbumIE):
     IE_NAME = 'vimeo:group'
     _VALID_URL = r'(?:https?://)?vimeo\.com/groups/(?P<name>[^/]+)'
+    _TESTS = [{
+        'url': 'http://vimeo.com/groups/rolexawards',
+        'info_dict': {
+            'title': 'Rolex Awards for Enterprise',
+        },
+        'playlist_mincount': 73,
+    }]
 
     def _extract_list_title(self, webpage):
         return self._og_search_title(webpage)
@@ -446,8 +498,8 @@ class VimeoGroupsIE(VimeoAlbumIE):
 class VimeoReviewIE(InfoExtractor):
     IE_NAME = 'vimeo:review'
     IE_DESC = 'Review pages on vimeo'
-    _VALID_URL = r'(?:https?://)?vimeo\.com/[^/]+/review/(?P<id>[^/]+)'
-    _TEST = {
+    _VALID_URL = r'https?://vimeo\.com/[^/]+/review/(?P<id>[^/]+)'
+    _TESTS = [{
         'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
         'file': '75524534.mp4',
         'md5': 'c507a72f780cacc12b2248bb4006d253',
@@ -455,7 +507,19 @@ class VimeoReviewIE(InfoExtractor):
             'title': "DICK HARDWICK 'Comedian'",
             'uploader': 'Richard Hardwick',
         }
-    }
+    }, {
+        'note': 'video player needs Referer',
+        'url': 'http://vimeo.com/user22258446/review/91613211/13f927e053',
+        'md5': '6295fdab8f4bf6a002d058b2c6dce276',
+        'info_dict': {
+            'id': '91613211',
+            'ext': 'mp4',
+            'title': 're:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn',
+            'uploader': 'DevWeek Events',
+            'duration': 2773,
+            'thumbnail': 're:^https?://.*\.jpg$',
+        }
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -470,6 +534,10 @@ class VimeoWatchLaterIE(VimeoBaseInfoExtractor, VimeoChannelIE):
     _VALID_URL = r'https?://vimeo\.com/home/watchlater|:vimeowatchlater'
     _LOGIN_REQUIRED = True
     _TITLE_RE = r'href="/home/watchlater".*?>(.*?)<'
+    _TESTS = [{
+        'url': 'http://vimeo.com/home/watchlater',
+        'only_matching': True,
+    }]
 
     def _real_initialize(self):
         self._login()
@@ -484,3 +552,58 @@ class VimeoWatchLaterIE(VimeoBaseInfoExtractor, VimeoChannelIE):
 
     def _real_extract(self, url):
         return self._extract_videos('watchlater', 'https://vimeo.com/home/watchlater')
+
+
+class VimeoLikesIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?vimeo\.com/user(?P<id>[0-9]+)/likes/?(?:$|[?#]|sort:)'
+    IE_NAME = 'vimeo:likes'
+    IE_DESC = 'Vimeo user likes'
+    _TEST = {
+        'url': 'https://vimeo.com/user755559/likes/',
+        'playlist_mincount': 293,
+        "info_dict": {
+            "description": "See all the videos urza likes",
+            "title": 'Videos urza likes',
+        },
+    }
+
+    def _real_extract(self, url):
+        user_id = self._match_id(url)
+        webpage = self._download_webpage(url, user_id)
+        page_count = self._int(
+            self._search_regex(
+                r'''(?x)<li><a\s+href="[^"]+"\s+data-page="([0-9]+)">
+                    .*?</a></li>\s*<li\s+class="pagination_next">
+                ''', webpage, 'page count'),
+            'page count', fatal=True)
+        PAGE_SIZE = 12
+        title = self._html_search_regex(
+            r'(?s)<h1>(.+?)</h1>', webpage, 'title', fatal=False)
+        description = self._html_search_meta('description', webpage)
+
+        def _get_page(idx):
+            page_url = '%s//vimeo.com/user%s/likes/page:%d/sort:date' % (
+                self.http_scheme(), user_id, idx + 1)
+            webpage = self._download_webpage(
+                page_url, user_id,
+                note='Downloading page %d/%d' % (idx + 1, page_count))
+            video_list = self._search_regex(
+                r'(?s)<ol class="js-browse_list[^"]+"[^>]*>(.*?)</ol>',
+                webpage, 'video content')
+            paths = re.findall(
+                r'<li[^>]*>\s*<a\s+href="([^"]+)"', video_list)
+            for path in paths:
+                yield {
+                    '_type': 'url',
+                    'url': compat_urlparse.urljoin(page_url, path),
+                }
+
+        pl = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
+
+        return {
+            '_type': 'playlist',
+            'id': 'user%s_likes' % user_id,
+            'title': title,
+            'description': description,
+            'entries': pl,
+        }
index 076c87119943f3879845ccc3aaf74cdbebf73859..0b58fe0fe0b5188e9c9865e56ce064e94dbc45e5 100644 (file)
@@ -17,6 +17,7 @@ class VineIE(InfoExtractor):
             'id': 'b9KOOWX7HUx',
             'ext': 'mp4',
             'title': 'Chicken.',
+            'alt_title': 'Vine by Jack Dorsey',
             'description': 'Chicken.',
             'upload_date': '20130519',
             'uploader': 'Jack Dorsey',
@@ -25,30 +26,26 @@ class VineIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage('https://vine.co/v/' + video_id, video_id)
 
         data = json.loads(self._html_search_regex(
             r'window\.POST_DATA = { %s: ({.+?}) }' % video_id, webpage, 'vine data'))
 
-        formats = [
-            {
-                'url': data['videoLowURL'],
-                'ext': 'mp4',
-                'format_id': 'low',
-            },
-            {
-                'url': data['videoUrl'],
-                'ext': 'mp4',
-                'format_id': 'standard',
-            }
-        ]
+        formats = [{
+            'url': data['videoLowURL'],
+            'ext': 'mp4',
+            'format_id': 'low',
+        }, {
+            'url': data['videoUrl'],
+            'ext': 'mp4',
+            'format_id': 'standard',
+        }]
 
         return {
             'id': video_id,
             'title': self._og_search_title(webpage),
+            'alt_title': self._og_search_description(webpage),
             'description': data['description'],
             'thumbnail': data['thumbnailUrl'],
             'upload_date': unified_strdate(data['created']),
@@ -63,22 +60,36 @@ class VineIE(InfoExtractor):
 
 class VineUserIE(InfoExtractor):
     IE_NAME = 'vine:user'
-    _VALID_URL = r'(?:https?://)?vine\.co/(?P<user>[^/]+)/?(\?.*)?$'
+    _VALID_URL = r'(?:https?://)?vine\.co/(?P<u>u/)?(?P<user>[^/]+)/?(\?.*)?$'
     _VINE_BASE_URL = "https://vine.co/"
+    _TESTS = [
+        {
+            'url': 'https://vine.co/Visa',
+            'info_dict': {
+                'id': 'Visa',
+            },
+            'playlist_mincount': 46,
+        },
+        {
+            'url': 'https://vine.co/u/941705360593584128',
+            'only_matching': True,
+        },
+    ]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         user = mobj.group('user')
+        u = mobj.group('u')
 
-        profile_url = "%sapi/users/profiles/vanity/%s" % (
-            self._VINE_BASE_URL, user)
+        profile_url = "%sapi/users/profiles/%s%s" % (
+            self._VINE_BASE_URL, 'vanity/' if not u else '', user)
         profile_data = self._download_json(
             profile_url, user, note='Downloading user profile data')
 
         user_id = profile_data['data']['userId']
         timeline_data = []
         for pagenum in itertools.count(1):
-            timeline_url = "%sapi/timelines/users/%s?page=%s" % (
+            timeline_url = "%sapi/timelines/users/%s?page=%s&size=100" % (
                 self._VINE_BASE_URL, user_id, pagenum)
             timeline_page = self._download_json(
                 timeline_url, user, note='Downloading page %d' % pagenum)
index 918bd10988a2c49fac2fc76bb7481e1a3ed7fdf1..ca6b0d5b3369c53b7e06715ea5a56fd338d71e41 100644 (file)
@@ -11,12 +11,13 @@ from ..utils import (
     compat_urllib_parse,
     compat_str,
     unescapeHTML,
-)
+    unified_strdate,
+    orderedSet)
 
 
 class VKIE(InfoExtractor):
     IE_NAME = 'vk.com'
-    _VALID_URL = r'https?://(?:m\.)?vk\.com/(?:video_ext\.php\?.*?\boid=(?P<oid>-?\d+).*?\bid=(?P<id>\d+)|(?:.+?\?.*?z=)?video(?P<videoid>.*?)(?:\?|%2F|$))'
+    _VALID_URL = r'https?://(?:m\.)?vk\.com/(?:video_ext\.php\?.*?\boid=(?P<oid>-?\d+).*?\bid=(?P<id>\d+)|(?:.+?\?.*?z=)?video(?P<videoid>[^s].*?)(?:\?|%2F|$))'
     _NETRC_MACHINE = 'vk'
 
     _TESTS = [
@@ -29,17 +30,19 @@ class VKIE(InfoExtractor):
                 'title': 'ProtivoGunz - Хуёвая песня',
                 'uploader': 're:Noize MC.*',
                 'duration': 195,
+                'upload_date': '20120212',
             },
         },
         {
-            'url': 'http://vk.com/video4643923_163339118',
-            'md5': 'f79bccb5cd182b1f43502ca5685b2b36',
+            'url': 'http://vk.com/video205387401_165548505',
+            'md5': '6c0aeb2e90396ba97035b9cbde548700',
             'info_dict': {
-                'id': '163339118',
+                'id': '165548505',
                 'ext': 'mp4',
-                'uploader': 'Elya Iskhakova',
-                'title': 'Dream Theater - Hollow Years Live at Budokan 720*',
-                'duration': 558,
+                'uploader': 'Tom Cruise',
+                'title': 'No name',
+                'duration': 9,
+                'upload_date': '20130721'
             }
         },
         {
@@ -52,9 +55,12 @@ class VKIE(InfoExtractor):
                 'uploader': 'Vladimir Gavrin',
                 'title': 'Lin Dan',
                 'duration': 101,
+                'upload_date': '20120730',
             }
         },
         {
+            # VIDEO NOW REMOVED
+            # please update if you find a video whose URL follows the same pattern
             'url': 'http://vk.com/video-8871596_164049491',
             'md5': 'a590bcaf3d543576c9bd162812387666',
             'note': 'Only available for registered users',
@@ -64,18 +70,7 @@ class VKIE(InfoExtractor):
                 'uploader': 'Триллеры',
                 'title': '► Бойцовский клуб / Fight Club 1999 [HD 720]',
                 'duration': 8352,
-            },
-            'skip': 'Requires vk account credentials',
-        },
-        {
-            'url': 'http://vk.com/feed?z=video-43215063_166094326%2Fbb50cacd3177146d7a',
-            'md5': 'd82c22e449f036282d1d3f7f4d276869',
-            'info_dict': {
-                'id': '166094326',
-                'ext': 'mp4',
-                'uploader': 'Киномания - лучшее из мира кино',
-                'title': 'Запах женщины (1992)',
-                'duration': 9392,
+                'upload_date': '20121218'
             },
             'skip': 'Requires vk account credentials',
         },
@@ -88,6 +83,7 @@ class VKIE(InfoExtractor):
                 'uploader': 'Киномания - лучшее из мира кино',
                 'title': ' ',
                 'duration': 7291,
+                'upload_date': '20140328',
             },
             'skip': 'Requires vk account credentials',
         },
@@ -100,9 +96,15 @@ class VKIE(InfoExtractor):
                 'ext': 'mp4',
                 'title': 'Книга Илая',
                 'duration': 6771,
+                'upload_date': '20140626',
             },
             'skip': 'Only works from Russia',
         },
+        {
+            # removed video, just testing that we match the pattern
+            'url': 'http://vk.com/feed?z=video-43215063_166094326%2Fbb50cacd3177146d7a',
+            'only_matching': True,
+        },
     ]
 
     def _login(self):
@@ -119,7 +121,7 @@ class VKIE(InfoExtractor):
         }
 
         request = compat_urllib_request.Request('https://login.vk.com/?act=login',
-            compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+                                                compat_urllib_parse.urlencode(login_form).encode('utf-8'))
         login_page = self._download_webpage(request, None, note='Logging in as %s' % username)
 
         if re.search(r'onLoginFailed', login_page):
@@ -138,9 +140,21 @@ class VKIE(InfoExtractor):
         info_url = 'http://vk.com/al_video.php?act=show&al=1&video=%s' % video_id
         info_page = self._download_webpage(info_url, video_id)
 
-        if re.search(r'<!>Please log in or <', info_page):
-            raise ExtractorError('This video is only available for registered users, '
-                'use --username and --password options to provide account credentials.', expected=True)
+        ERRORS = {
+            r'>Видеозапись .*? была изъята из публичного доступа в связи с обращением правообладателя.<':
+            'Video %s has been removed from public access due to rightholder complaint.',
+
+            r'<!>Please log in or <':
+            'Video %s is only available for registered users, '
+            'use --username and --password options to provide account credentials.',
+
+            r'<!>Unknown error':
+            'Video %s does not exist.'
+        }
+
+        for error_re, error_msg in ERRORS.items():
+            if re.search(error_re, info_page):
+                raise ExtractorError(error_msg % video_id, expected=True)
 
         m_yt = re.search(r'src="(http://www.youtube.com/.*?)"', info_page)
         if m_yt is not None:
@@ -159,6 +173,13 @@ class VKIE(InfoExtractor):
         data_json = self._search_regex(r'var vars = ({.*?});', info_page, 'vars')
         data = json.loads(data_json)
 
+        # Extract upload date
+        upload_date = None
+        mobj = re.search(r'id="mv_date_wrap".*?Added ([a-zA-Z]+ [0-9]+), ([0-9]+) at', info_page)
+        if mobj is not None:
+            mobj.group(1) + ' ' + mobj.group(2)
+            upload_date = unified_strdate(mobj.group(1) + ' ' + mobj.group(2))
+
         formats = [{
             'format_id': k,
             'url': v,
@@ -173,5 +194,28 @@ class VKIE(InfoExtractor):
             'title': unescapeHTML(data['md_title']),
             'thumbnail': data.get('jpg'),
             'uploader': data.get('md_author'),
-            'duration': data.get('duration')
+            'duration': data.get('duration'),
+            'upload_date': upload_date,
         }
+
+
+class VKUserVideosIE(InfoExtractor):
+    IE_NAME = 'vk.com:user-videos'
+    IE_DESC = 'vk.com:All of a user\'s videos'
+    _VALID_URL = r'https?://vk\.com/videos(?P<id>[0-9]+)(?:m\?.*)?'
+    _TEMPLATE_URL = 'https://vk.com/videos'
+    _TEST = {
+        'url': 'http://vk.com/videos205387401',
+        'playlist_mincount': 4,
+    }
+
+    def _real_extract(self, url):
+        page_id = self._match_id(url)
+        page = self._download_webpage(url, page_id)
+        video_ids = orderedSet(
+            m.group(1) for m in re.finditer(r'href="/video([0-9_]+)"', page))
+        url_entries = [
+            self.url_result(
+                'http://vk.com/video' + video_id, 'VK', video_id=video_id)
+            for video_id in video_ids]
+        return self.playlist_result(url_entries, page_id)
index 6d3b78749eaf1b25d23422343ab36c9d84c7cdc0..affef650726d716b7e80aaab5c66dab3bc3ddc28 100644 (file)
@@ -44,7 +44,7 @@ class VodlockerIE(InfoExtractor):
                 req, video_id, 'Downloading video page')
 
         title = self._search_regex(
-            r'id="file_title".*?>\s*(.*?)\s*<span', webpage, 'title')
+            r'id="file_title".*?>\s*(.*?)\s*<(?:br|span)', webpage, 'title')
         thumbnail = self._search_regex(
             r'image:\s*"(http[^\"]+)",', webpage, 'thumbnail')
         url = self._search_regex(
diff --git a/youtube_dl/extractor/vporn.py b/youtube_dl/extractor/vporn.py
new file mode 100644 (file)
index 0000000..2d23eff
--- /dev/null
@@ -0,0 +1,125 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_duration,
+    str_to_int,
+)
+
+
+class VpornIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?vporn\.com/[^/]+/(?P<display_id>[^/]+)/(?P<id>\d+)'
+    _TESTS = [
+        {
+            'url': 'http://www.vporn.com/masturbation/violet-on-her-th-birthday/497944/',
+            'md5': 'facf37c1b86546fa0208058546842c55',
+            'info_dict': {
+                'id': '497944',
+                'display_id': 'violet-on-her-th-birthday',
+                'ext': 'mp4',
+                'title': 'Violet on her 19th birthday',
+                'description': 'Violet dances in front of the camera which is sure to get you horny.',
+                'thumbnail': 're:^https?://.*\.jpg$',
+                'uploader': 'kileyGrope',
+                'categories': ['Masturbation', 'Teen'],
+                'duration': 393,
+                'age_limit': 18,
+                'view_count': int,
+                'like_count': int,
+                'dislike_count': int,
+                'comment_count': int,
+            }
+        },
+        {
+            'url': 'http://www.vporn.com/female/hana-shower/523564/',
+            'md5': 'ced35a4656198a1664cf2cda1575a25f',
+            'info_dict': {
+                'id': '523564',
+                'display_id': 'hana-shower',
+                'ext': 'mp4',
+                'title': 'Hana Shower',
+                'description': 'Hana showers at the bathroom.',
+                'thumbnail': 're:^https?://.*\.jpg$',
+                'uploader': 'Hmmmmm',
+                'categories': ['Big Boobs', 'Erotic', 'Teen', 'Female'],
+                'duration': 588,
+                'age_limit': 18,
+                'view_count': int,
+                'like_count': int,
+                'dislike_count': int,
+                'comment_count': int,
+            }
+        },
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        display_id = mobj.group('display_id')
+
+        webpage = self._download_webpage(url, display_id)
+
+        title = self._html_search_regex(
+            r'videoname\s*=\s*\'([^\']+)\'', webpage, 'title').strip()
+        description = self._html_search_regex(
+            r'<div class="description_txt">(.*?)</div>', webpage, 'description', fatal=False)
+        thumbnail = self._html_search_regex(
+            r'flashvars\.imageUrl\s*=\s*"([^"]+)"', webpage, 'description', fatal=False, default=None)
+        if thumbnail:
+            thumbnail = 'http://www.vporn.com' + thumbnail
+
+        uploader = self._html_search_regex(
+            r'(?s)UPLOADED BY.*?<a href="/user/[^"]+">([^<]+)</a>',
+            webpage, 'uploader', fatal=False)
+
+        categories = re.findall(r'<a href="/cat/[^"]+">([^<]+)</a>', webpage)
+
+        duration = parse_duration(self._search_regex(
+            r'duration (\d+ min \d+ sec)', webpage, 'duration', fatal=False))
+
+        view_count = str_to_int(self._html_search_regex(
+            r'<span>([\d,\.]+) VIEWS</span>', webpage, 'view count', fatal=False))
+        like_count = str_to_int(self._html_search_regex(
+            r'<span id="like" class="n">([\d,\.]+)</span>', webpage, 'like count', fatal=False))
+        dislike_count = str_to_int(self._html_search_regex(
+            r'<span id="dislike" class="n">([\d,\.]+)</span>', webpage, 'dislike count', fatal=False))
+        comment_count = str_to_int(self._html_search_regex(
+            r'<h4>Comments \(<b>([\d,\.]+)</b>\)</h4>', webpage, 'comment count', fatal=False))
+
+        formats = []
+
+        for video in re.findall(r'flashvars\.videoUrl([^=]+?)\s*=\s*"(https?://[^"]+)"', webpage):
+            video_url = video[1]
+            fmt = {
+                'url': video_url,
+                'format_id': video[0],
+            }
+            m = re.search(r'_(?P<width>\d+)x(?P<height>\d+)_(?P<vbr>\d+)k\.mp4$', video_url)
+            if m:
+                fmt.update({
+                    'width': int(m.group('width')),
+                    'height': int(m.group('height')),
+                    'vbr': int(m.group('vbr')),
+                })
+            formats.append(fmt)
+
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'uploader': uploader,
+            'categories': categories,
+            'duration': duration,
+            'view_count': view_count,
+            'like_count': like_count,
+            'dislike_count': dislike_count,
+            'comment_count': comment_count,
+            'age_limit': 18,
+            'formats': formats,
+        }
diff --git a/youtube_dl/extractor/vrt.py b/youtube_dl/extractor/vrt.py
new file mode 100644 (file)
index 0000000..bbd3bbf
--- /dev/null
@@ -0,0 +1,95 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import float_or_none
+
+
+class VRTIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:deredactie|sporza|cobra)\.be/cm/(?:[^/]+/)+(?P<id>[^/]+)/*'
+    _TESTS = [
+        # deredactie.be
+        {
+            'url': 'http://deredactie.be/cm/vrtnieuws/videozone/programmas/journaal/EP_141025_JOL',
+            'md5': '4cebde1eb60a53782d4f3992cbd46ec8',
+            'info_dict': {
+                'id': '2129880',
+                'ext': 'flv',
+                'title': 'Het journaal L - 25/10/14',
+                'description': None,
+                'timestamp': 1414271750.949,
+                'upload_date': '20141025',
+                'duration': 929,
+            }
+        },
+        # sporza.be
+        {
+            'url': 'http://sporza.be/cm/sporza/videozone/programmas/extratime/EP_141020_Extra_time',
+            'md5': '11f53088da9bf8e7cfc42456697953ff',
+            'info_dict': {
+                'id': '2124639',
+                'ext': 'flv',
+                'title': 'Bekijk Extra Time van 20 oktober',
+                'description': 'md5:83ac5415a4f1816c6a93f8138aef2426',
+                'timestamp': 1413835980.560,
+                'upload_date': '20141020',
+                'duration': 3238,
+            }
+        },
+        # cobra.be
+        {
+            'url': 'http://cobra.be/cm/cobra/videozone/rubriek/film-videozone/141022-mv-ellis-cafecorsari',
+            'md5': '78a2b060a5083c4f055449a72477409d',
+            'info_dict': {
+                'id': '2126050',
+                'ext': 'flv',
+                'title': 'Bret Easton Ellis in Café Corsari',
+                'description': 'md5:f699986e823f32fd6036c1855a724ee9',
+                'timestamp': 1413967500.494,
+                'upload_date': '20141022',
+                'duration': 661,
+            }
+        },
+    ]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, video_id)
+
+        video_id = self._search_regex(
+            r'data-video-id="([^"]+)_[^"]+"', webpage, 'video id', fatal=False)
+
+        formats = []
+        mobj = re.search(
+            r'data-video-iphone-server="(?P<server>[^"]+)"\s+data-video-iphone-path="(?P<path>[^"]+)"',
+            webpage)
+        if mobj:
+            formats.extend(self._extract_m3u8_formats(
+                '%s/%s' % (mobj.group('server'), mobj.group('path')),
+                video_id, 'mp4'))
+        mobj = re.search(r'data-video-src="(?P<src>[^"]+)"', webpage)
+        if mobj:
+            formats.extend(self._extract_f4m_formats(
+                '%s/manifest.f4m' % mobj.group('src'), video_id))
+        self._sort_formats(formats)
+
+        title = self._og_search_title(webpage)
+        description = self._og_search_description(webpage, default=None)
+        thumbnail = self._og_search_thumbnail(webpage)
+        timestamp = float_or_none(self._search_regex(
+            r'data-video-sitestat-pubdate="(\d+)"', webpage, 'timestamp', fatal=False), 1000)
+        duration = float_or_none(self._search_regex(
+            r'data-video-duration="(\d+)"', webpage, 'duration', fatal=False), 1000)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'timestamp': timestamp,
+            'duration': duration,
+            'formats': formats,
+        }
index f1b9e9a19d05d9026feb24b6f22d395cd3990e5f..1b2f731e932a63fbc7722251c0b4e57f0963c34c 100644 (file)
@@ -1,10 +1,13 @@
 from __future__ import unicode_literals
 
-import json
 import re
 
 from .common import InfoExtractor
-from ..utils import int_or_none
+from ..utils import (
+    int_or_none,
+    compat_str,
+    ExtractorError,
+)
 
 
 class VubeIE(InfoExtractor):
@@ -14,6 +17,24 @@ class VubeIE(InfoExtractor):
 
     _TESTS = [
         {
+            'url': 'http://vube.com/trending/William+Wei/Y8NUZ69Tf7?t=s',
+            'md5': 'e7aabe1f8f1aa826b9e4735e1f9cee42',
+            'info_dict': {
+                'id': 'Y8NUZ69Tf7',
+                'ext': 'mp4',
+                'title': 'Best Drummer Ever [HD]',
+                'description': 'md5:2d63c4b277b85c2277761c2cf7337d71',
+                'thumbnail': 're:^https?://.*\.jpg',
+                'uploader': 'William',
+                'timestamp': 1406876915,
+                'upload_date': '20140801',
+                'duration': 258.051,
+                'like_count': int,
+                'dislike_count': int,
+                'comment_count': int,
+                'categories': ['amazing', 'hd', 'best drummer ever', 'william wei', 'bucket drumming', 'street drummer', 'epic street drumming'],
+            },
+        }, {
             'url': 'http://vube.com/Chiara+Grispo+Video+Channel/YL2qNPkqon',
             'md5': 'db7aba89d4603dadd627e9d1973946fe',
             'info_dict': {
@@ -29,7 +50,9 @@ class VubeIE(InfoExtractor):
                 'like_count': int,
                 'dislike_count': int,
                 'comment_count': int,
-            }
+                'categories': ['pop', 'music', 'cover', 'singing', 'jessie j', 'price tag', 'chiara grispo'],
+            },
+            'skip': 'Removed due to DMCA',
         },
         {
             'url': 'http://vube.com/SerainaMusic/my-7-year-old-sister-and-i-singing-alive-by-krewella/UeBhTudbfS?t=s&n=1',
@@ -47,7 +70,9 @@ class VubeIE(InfoExtractor):
                 'like_count': int,
                 'dislike_count': int,
                 'comment_count': int,
-            }
+                'categories': ['seraina', 'jessica', 'krewella', 'alive'],
+            },
+            'skip': 'Removed due to DMCA',
         }, {
             'url': 'http://vube.com/vote/Siren+Gene/0nmsMY5vEq?n=2&t=s',
             'md5': '0584fc13b50f887127d9d1007589d27f',
@@ -56,14 +81,17 @@ class VubeIE(InfoExtractor):
                 'ext': 'mp4',
                 'title': 'Frozen - Let It Go Cover by Siren Gene',
                 'description': 'My rendition of "Let It Go" originally sung by Idina Menzel.',
-                'uploader': 'Siren Gene',
-                'uploader_id': 'Siren',
                 'thumbnail': 're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/10283ab622a-86c9-4681-51f2-30d1f65774af\.jpg$',
+                'uploader': 'Siren',
+                'timestamp': 1395448018,
+                'upload_date': '20140322',
                 'duration': 221.788,
                 'like_count': int,
                 'dislike_count': int,
                 'comment_count': int,
-            }
+                'categories': ['let it go', 'cover', 'idina menzel', 'frozen', 'singing', 'disney', 'siren gene'],
+            },
+            'skip': 'Removed due to DMCA',
         }
     ]
 
@@ -71,47 +99,45 @@ class VubeIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
 
-        webpage = self._download_webpage(url, video_id)
-        data_json = self._search_regex(
-            r'(?s)window\["(?:tapiVideoData|vubeOriginalVideoData)"\]\s*=\s*(\{.*?\n});\n',
-            webpage, 'video data'
-        )
-        data = json.loads(data_json)
-        video = (
-            data.get('video') or
-            data)
-        assert isinstance(video, dict)
+        video = self._download_json(
+            'http://vube.com/t-api/v1/video/%s' % video_id, video_id, 'Downloading video JSON')
 
         public_id = video['public_id']
 
-        formats = [
-            {
-                'url': 'http://video.thestaticvube.com/video/%s/%s.mp4' % (fmt['media_resolution_id'], public_id),
-                'height': int(fmt['height']),
-                'abr': int(fmt['audio_bitrate']),
-                'vbr': int(fmt['video_bitrate']),
-                'format_id': fmt['media_resolution_id']
-            } for fmt in video['mtm'] if fmt['transcoding_status'] == 'processed'
-        ]
+        formats = []
+
+        for media in video['media'].get('video', []) + video['media'].get('audio', []):
+            if media['transcoding_status'] != 'processed':
+                continue
+            fmt = {
+                'url': 'http://video.thestaticvube.com/video/%s/%s.mp4' % (media['media_resolution_id'], public_id),
+                'abr': int(media['audio_bitrate']),
+                'format_id': compat_str(media['media_resolution_id']),
+            }
+            vbr = int(media['video_bitrate'])
+            if vbr:
+                fmt.update({
+                    'vbr': vbr,
+                    'height': int(media['height']),
+                })
+            formats.append(fmt)
 
         self._sort_formats(formats)
 
+        if not formats and video.get('vst') == 'dmca':
+            raise ExtractorError(
+                'This video has been removed in response to a complaint received under the US Digital Millennium Copyright Act.',
+                expected=True)
+
         title = video['title']
         description = video.get('description')
-        thumbnail = self._proto_relative_url(
-            video.get('thumbnail') or video.get('thumbnail_src'),
-            scheme='http:')
-        uploader = data.get('user', {}).get('channel', {}).get('name') or video.get('user_alias')
-        uploader_id = data.get('user', {}).get('name')
+        thumbnail = self._proto_relative_url(video.get('thumbnail_src'), scheme='http:')
+        uploader = video.get('user_alias') or video.get('channel')
         timestamp = int_or_none(video.get('upload_time'))
         duration = video['duration']
         view_count = video.get('raw_view_count')
-        like_count = video.get('rlikes')
-        if like_count is None:
-            like_count = video.get('total_likes')
-        dislike_count = video.get('rhates')
-        if dislike_count is None:
-            dislike_count = video.get('total_hates')
+        like_count = video.get('total_likes')
+        dislike_count = video.get('total_hates')
 
         comments = video.get('comments')
         comment_count = None
@@ -124,6 +150,8 @@ class VubeIE(InfoExtractor):
         else:
             comment_count = len(comments)
 
+        categories = [tag['text'] for tag in video['tags']]
+
         return {
             'id': video_id,
             'formats': formats,
@@ -131,11 +159,11 @@ class VubeIE(InfoExtractor):
             'description': description,
             'thumbnail': thumbnail,
             'uploader': uploader,
-            'uploader_id': uploader_id,
             'timestamp': timestamp,
             'duration': duration,
             'view_count': view_count,
             'like_count': like_count,
             'dislike_count': dislike_count,
             'comment_count': comment_count,
+            'categories': categories,
         }
index fb0600f1a911b0f63922f112221e0cc025d27cce..ec3c010ad7e151bfc304315cdc5fd32bc21e8f43 100644 (file)
@@ -5,6 +5,7 @@ import re
 from .common import InfoExtractor
 from ..utils import (
     compat_urllib_parse_urlparse,
+    ExtractorError,
     parse_duration,
     qualities,
 )
@@ -14,13 +15,12 @@ class VuClipIE(InfoExtractor):
     _VALID_URL = r'http://(?:m\.)?vuclip\.com/w\?.*?cid=(?P<id>[0-9]+)'
 
     _TEST = {
-        'url': 'http://m.vuclip.com/w?cid=843902317&fid=63532&z=1007&nvar&frm=index.html&bu=4757321434',
-        'md5': '92ac9d1ccefec4f0bb474661ab144fcf',
+        'url': 'http://m.vuclip.com/w?cid=922692425&fid=70295&z=1010&nvar&frm=index.html',
         'info_dict': {
-            'id': '843902317',
+            'id': '922692425',
             'ext': '3gp',
-            'title': 'Movie Trailer: Noah',
-            'duration': 139,
+            'title': 'The Toy Soldiers - Hollywood Movie Trailer',
+            'duration': 180,
         }
     }
 
@@ -37,16 +37,32 @@ class VuClipIE(InfoExtractor):
             webpage = self._download_webpage(
                 adfree_url, video_id, note='Download post-ad page')
 
+        error_msg = self._html_search_regex(
+            r'<p class="message">(.*?)</p>', webpage, 'error message',
+            default=None)
+        if error_msg:
+            raise ExtractorError(
+                '%s said: %s' % (self.IE_NAME, error_msg), expected=True)
+
+        # These clowns alternate between two page types
         links_code = self._search_regex(
-            r'(?s)<div class="social align_c".*?>(.*?)<hr\s*/?>', webpage,
-            'links')
+            r'''(?xs)
+                (?:
+                    <img\s+src="/im/play.gif".*?>|
+                    <!--\ player\ end\ -->\s*</div><!--\ thumb\ end-->
+                )
+                (.*?)
+                (?:
+                    <a\s+href="fblike|<div\s+class="social">
+                )
+            ''', webpage, 'links')
         title = self._html_search_regex(
             r'<title>(.*?)-\s*Vuclip</title>', webpage, 'title').strip()
 
         quality_order = qualities(['Reg', 'Hi'])
         formats = []
         for url, q in re.findall(
-                r'<a href="(?P<url>[^"]+)".*?>(?P<q>[^<]+)</a>', links_code):
+                r'<a\s+href="(?P<url>[^"]+)".*?>(?:<button[^>]*>)?(?P<q>[^<]+)(?:</button>)?</a>', links_code):
             format_id = compat_urllib_parse_urlparse(url).scheme + '-' + q
             formats.append({
                 'format_id': format_id,
@@ -56,7 +72,7 @@ class VuClipIE(InfoExtractor):
         self._sort_formats(formats)
 
         duration = parse_duration(self._search_regex(
-            r'\(([0-9:]+)\)</span></h1>', webpage, 'duration', fatal=False))
+            r'\(([0-9:]+)\)</span>', webpage, 'duration', fatal=False))
 
         return {
             'id': video_id,
diff --git a/youtube_dl/extractor/walla.py b/youtube_dl/extractor/walla.py
new file mode 100644 (file)
index 0000000..672bda7
--- /dev/null
@@ -0,0 +1,89 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .subtitles import SubtitlesInfoExtractor
+from ..utils import (
+    xpath_text,
+    int_or_none,
+)
+
+
+class WallaIE(SubtitlesInfoExtractor):
+    _VALID_URL = r'http://vod\.walla\.co\.il/[^/]+/(?P<id>\d+)/(?P<display_id>.+)'
+    _TEST = {
+        'url': 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one',
+        'info_dict': {
+            'id': '2642630',
+            'display_id': 'one-direction-all-for-one',
+            'ext': 'flv',
+            'title': 'וואן דיירקשן: ההיסטריה',
+            'description': 'md5:de9e2512a92442574cdb0913c49bc4d8',
+            'thumbnail': 're:^https?://.*\.jpg',
+            'duration': 3600,
+        },
+        'params': {
+            # rtmp download
+            'skip_download': True,
+        }
+    }
+
+    _SUBTITLE_LANGS = {
+        'עברית': 'heb',
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        display_id = mobj.group('display_id')
+
+        video = self._download_xml(
+            'http://video2.walla.co.il/?w=null/null/%s/@@/video/flv_pl' % video_id,
+            display_id)
+
+        item = video.find('./items/item')
+
+        title = xpath_text(item, './title', 'title')
+        description = xpath_text(item, './synopsis', 'description')
+        thumbnail = xpath_text(item, './preview_pic', 'thumbnail')
+        duration = int_or_none(xpath_text(item, './duration', 'duration'))
+
+        subtitles = {}
+        for subtitle in item.findall('./subtitles/subtitle'):
+            lang = xpath_text(subtitle, './title')
+            subtitles[self._SUBTITLE_LANGS.get(lang, lang)] = xpath_text(subtitle, './src')
+
+        if self._downloader.params.get('listsubtitles', False):
+            self._list_available_subtitles(video_id, subtitles)
+            return
+
+        subtitles = self.extract_subtitles(video_id, subtitles)
+
+        formats = []
+        for quality in item.findall('./qualities/quality'):
+            format_id = xpath_text(quality, './title')
+            fmt = {
+                'url': 'rtmp://wafla.walla.co.il/vod',
+                'play_path': xpath_text(quality, './src'),
+                'player_url': 'http://isc.walla.co.il/w9/swf/video_swf/vod/WallaMediaPlayerAvod.swf',
+                'page_url': url,
+                'ext': 'flv',
+                'format_id': xpath_text(quality, './title'),
+            }
+            m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
+            if m:
+                fmt['height'] = int(m.group('height'))
+            formats.append(fmt)
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'duration': duration,
+            'formats': formats,
+            'subtitles': subtitles,
+        }
index cb8f0887de292c2d36f5f2ac698e7321674e4b2d..88bbbb21967c6807c536ecc5b06d8d8f41095219 100644 (file)
@@ -13,6 +13,9 @@ class WashingtonPostIE(InfoExtractor):
     _VALID_URL = r'^https?://(?:www\.)?washingtonpost\.com/.*?/(?P<id>[^/]+)/(?:$|[?#])'
     _TEST = {
         'url': 'http://www.washingtonpost.com/sf/national/2014/03/22/sinkhole-of-bureaucracy/',
+        'info_dict': {
+            'title': 'Sinkhole of bureaucracy',
+        },
         'playlist': [{
             'md5': 'c3f4b4922ffa259243f68e928db2db8c',
             'info_dict': {
index a584e08966ac57354c51a71d7fee520d7ce67df8..bf9e40bad7c29e01b231b2ecf9e0cbb53295d52f 100644 (file)
@@ -2,29 +2,47 @@
 from __future__ import unicode_literals
 
 import re
+import hashlib
 
 from .common import InfoExtractor
 from ..utils import (
+    ExtractorError,
     unified_strdate,
 )
 
 
 class WatIE(InfoExtractor):
-    _VALID_URL = r'http://www\.wat\.tv/.*-(?P<shortID>.*?)_.*?\.html'
+    _VALID_URL = r'http://www\.wat\.tv/video/(?P<display_id>.*)-(?P<short_id>.*?)_.*?\.html'
     IE_NAME = 'wat.tv'
-    _TEST = {
-        'url': 'http://www.wat.tv/video/world-war-philadelphia-vost-6bv55_2fjr7_.html',
-        'info_dict': {
-            'id': '10631273',
-            'ext': 'mp4',
-            'title': 'World War Z - Philadelphia VOST',
-            'description': 'La menace est partout. Que se passe-t-il à Philadelphia ?\r\nWORLD WAR Z, avec Brad Pitt, au cinéma le 3 juillet.\r\nhttp://www.worldwarz.fr',
+    _TESTS = [
+        {
+            'url': 'http://www.wat.tv/video/soupe-figues-l-orange-aux-epices-6z1uz_2hvf7_.html',
+            'md5': 'ce70e9223945ed26a8056d413ca55dc9',
+            'info_dict': {
+                'id': '11713067',
+                'display_id': 'soupe-figues-l-orange-aux-epices',
+                'ext': 'mp4',
+                'title': 'Soupe de figues à l\'orange et aux épices',
+                'description': 'Retrouvez l\'émission "Petits plats en équilibre", diffusée le 18 août 2014.',
+                'upload_date': '20140819',
+                'duration': 120,
+            },
         },
-        'params': {
-            # Sometimes wat serves the whole file with the --test option
-            'skip_download': True,
+        {
+            'url': 'http://www.wat.tv/video/gregory-lemarchal-voix-ange-6z1v7_6ygkj_.html',
+            'md5': 'fbc84e4378165278e743956d9c1bf16b',
+            'info_dict': {
+                'id': '11713075',
+                'display_id': 'gregory-lemarchal-voix-ange',
+                'ext': 'mp4',
+                'title': 'Grégory Lemarchal, une voix d\'ange depuis 10 ans (1/3)',
+                'description': 'md5:b7a849cf16a2b733d9cd10c52906dee3',
+                'upload_date': '20140816',
+                'duration': 2910,
+            },
+            'skip': "Ce contenu n'est pas disponible pour l'instant.",
         },
-    }
+    ]
 
     def download_video_info(self, real_id):
         # 'contentv4' is used in the website, but it also returns the related
@@ -36,13 +54,25 @@ class WatIE(InfoExtractor):
         def real_id_for_chapter(chapter):
             return chapter['tc_start'].split('-')[0]
         mobj = re.match(self._VALID_URL, url)
-        short_id = mobj.group('shortID')
-        webpage = self._download_webpage(url, short_id)
+        short_id = mobj.group('short_id')
+        display_id = mobj.group('display_id')
+        webpage = self._download_webpage(url, display_id or short_id)
         real_id = self._search_regex(r'xtpage = ".*-(.*?)";', webpage, 'real id')
 
         video_info = self.download_video_info(real_id)
+
+        error_desc = video_info.get('error_desc')
+        if error_desc:
+            raise ExtractorError(
+                '%s returned error: %s' % (self.IE_NAME, error_desc), expected=True)
+
+        geo_list = video_info.get('geoList')
+        country = geo_list[0] if geo_list else ''
+
         chapters = video_info['chapters']
         first_chapter = chapters[0]
+        files = video_info['files']
+        first_file = files[0]
 
         if real_id_for_chapter(first_chapter) != real_id:
             self.to_screen('Multipart video detected')
@@ -61,12 +91,47 @@ class WatIE(InfoExtractor):
             upload_date = unified_strdate(first_chapter['date_diffusion'])
         # Otherwise we can continue and extract just one part, we have to use
         # the short id for getting the video url
+
+        formats = [{
+            'url': 'http://wat.tv/get/android5/%s.mp4' % real_id,
+            'format_id': 'Mobile',
+        }]
+
+        fmts = [('SD', 'web')]
+        if first_file.get('hasHD'):
+            fmts.append(('HD', 'webhd'))
+
+        def compute_token(param):
+            timestamp = '%08x' % int(self._download_webpage(
+                'http://www.wat.tv/servertime', real_id,
+                'Downloading server time').split('|')[0])
+            magic = '9b673b13fa4682ed14c3cfa5af5310274b514c4133e9b3a81e6e3aba009l2564'
+            return '%s/%s' % (hashlib.md5((magic + param + timestamp).encode('ascii')).hexdigest(), timestamp)
+
+        for fmt in fmts:
+            webid = '/%s/%s' % (fmt[1], real_id)
+            video_url = self._download_webpage(
+                'http://www.wat.tv/get%s?token=%s&getURL=1&country=%s' % (webid, compute_token(webid), country),
+                real_id,
+                'Downloding %s video URL' % fmt[0],
+                'Failed to download %s video URL' % fmt[0],
+                False)
+            if not video_url:
+                continue
+            formats.append({
+                'url': video_url,
+                'ext': 'mp4',
+                'format_id': fmt[0],
+            })
+
         return {
             'id': real_id,
-            'url': 'http://wat.tv/get/android5/%s.mp4' % real_id,
+            'display_id': display_id,
             'title': first_chapter['title'],
             'thumbnail': first_chapter['preview'],
             'description': first_chapter['description'],
             'view_count': video_info['views'],
             'upload_date': upload_date,
+            'duration': first_file['duration'],
+            'formats': formats,
         }
diff --git a/youtube_dl/extractor/wayofthemaster.py b/youtube_dl/extractor/wayofthemaster.py
new file mode 100644 (file)
index 0000000..af7bb8b
--- /dev/null
@@ -0,0 +1,52 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class WayOfTheMasterIE(InfoExtractor):
+    _VALID_URL = r'https?://www\.wayofthemaster\.com/([^/?#]*/)*(?P<id>[^/?#]+)\.s?html(?:$|[?#])'
+
+    _TEST = {
+        'url': 'http://www.wayofthemaster.com/hbks.shtml',
+        'md5': '5316b57487ada8480606a93cb3d18d24',
+        'info_dict': {
+            'id': 'hbks',
+            'ext': 'mp4',
+            'title': 'Intelligent Design vs. Evolution',
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._search_regex(
+            r'<img src="images/title_[^"]+".*?alt="([^"]+)"',
+            webpage, 'title', default=None)
+        if title is None:
+            title = self._html_search_regex(
+                r'<title>(.*?)</title>', webpage, 'page title')
+
+        url_base = self._search_regex(
+            r'<param\s+name="?movie"?\s+value=".*?/wotm_videoplayer_highlow[0-9]*\.swf\?vid=([^"]+)"',
+            webpage, 'URL base')
+        formats = [{
+            'format_id': 'low',
+            'quality': 1,
+            'url': url_base + '_low.mp4',
+        }, {
+            'format_id': 'high',
+            'quality': 2,
+            'url': url_base + '_high.mp4',
+        }]
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+        }
index 54d37da618960d46d977ff4b97682ca0c7b2a99c..93a6e64542c71be1f05dac1e351d5c57ecd28ff7 100644 (file)
@@ -224,4 +224,4 @@ class WDRMausIE(InfoExtractor):
             'upload_date': upload_date,
         }
 
-# TODO test _1
\ No newline at end of file
+# TODO test _1
index b24297a409911c79433cca404dc94206009aefe5..20bb039d38dd9c62ff6c38135f67f9aa41f484aa 100644 (file)
@@ -41,7 +41,7 @@ class WeiboIE(InfoExtractor):
         videos_urls = sorted(videos_urls, key=lambda u: 'video.sina.com' in u)
         player_url = videos_urls[-1]
         m_sina = re.match(r'https?://video\.sina\.com\.cn/v/b/(\d+)-\d+\.html',
-            player_url)
+                          player_url)
         if m_sina is not None:
             self.to_screen('Sina video detected')
             sina_id = m_sina.group(1)
index c27dda9440e62274e13b9359f24c2a909516b4bc..d6dec25ca9e7bb9de539e89c147e22b7381e3719 100644 (file)
@@ -37,7 +37,7 @@ class WimpIE(InfoExtractor):
         video_id = mobj.group(1)
         webpage = self._download_webpage(url, video_id)
         video_url = self._search_regex(
-            r's1\.addVariable\("file",\s*"([^"]+)"\);', webpage, 'video URL')
+            r"[\"']file[\"']\s*[:,]\s*[\"'](.+?)[\"']", webpage, 'video URL')
         if YoutubeIE.suitable(video_url):
             self.to_screen('Found YouTube video')
             return {
index e6bfa9e147a2b62e6dbcdd52343675bb9ca52a65..748443f811f184d4276d4628cd13ed1e2bf92d9c 100644 (file)
@@ -1,13 +1,14 @@
 from __future__ import unicode_literals
 
-import json
 import re
 
 from .common import InfoExtractor
+from ..utils import ExtractorError, compat_urllib_request
 
 
 class WistiaIE(InfoExtractor):
     _VALID_URL = r'https?://(?:fast\.)?wistia\.net/embed/iframe/(?P<id>[a-z0-9]+)'
+    _API_URL = 'http://fast.wistia.com/embed/medias/{0:}.json'
 
     _TEST = {
         'url': 'http://fast.wistia.net/embed/iframe/sh7fpupwlt',
@@ -24,11 +25,13 @@ class WistiaIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
 
-        webpage = self._download_webpage(url, video_id)
-        data_json = self._html_search_regex(
-            r'Wistia\.iframeInit\((.*?), {}\);', webpage, 'video data')
-
-        data = json.loads(data_json)
+        request = compat_urllib_request.Request(self._API_URL.format(video_id))
+        request.add_header('Referer', url)  # Some videos require this.
+        data_json = self._download_json(request, video_id)
+        if data_json.get('error'):
+            raise ExtractorError('Error while getting the playlist',
+                                 expected=True)
+        data = data_json['media']
 
         formats = []
         thumbnails = []
index 4e89acd81bbb5ddfb97b1da3381b1b9f25873c96..d5c26a032bcf28a9c8ae79e1d083d67ed29b2726 100644 (file)
@@ -13,37 +13,35 @@ class WorldStarHipHopIE(InfoExtractor):
         "info_dict": {
             "id": "wshh6a7q1ny0G34ZwuIO",
             "ext": "mp4",
-            "title": "Video: KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!"
+            "title": "KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!"
         }
     }
 
     def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('id')
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
 
-        webpage_src = self._download_webpage(url, video_id)
-
-        m_vevo_id = re.search(r'videoId=(.*?)&amp?',
-                              webpage_src)
+        m_vevo_id = re.search(r'videoId=(.*?)&amp?', webpage)
         if m_vevo_id is not None:
             return self.url_result('vevo:%s' % m_vevo_id.group(1), ie='Vevo')
 
         video_url = self._search_regex(
-            r'so\.addVariable\("file","(.*?)"\)', webpage_src, 'video URL')
+            r'so\.addVariable\("file","(.*?)"\)', webpage, 'video URL')
 
         if 'youtube' in video_url:
             return self.url_result(video_url, ie='Youtube')
 
         video_title = self._html_search_regex(
-            r"<title>(.*)</title>", webpage_src, 'title')
+            r'(?s)<div class="content-heading">\s*<h1>(.*?)</h1>',
+            webpage, 'title')
 
         # Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
         thumbnail = self._html_search_regex(
-            r'rel="image_src" href="(.*)" />', webpage_src, 'thumbnail',
+            r'rel="image_src" href="(.*)" />', webpage, 'thumbnail',
             fatal=False)
         if not thumbnail:
-            _title = r"""candytitles.*>(.*)</span>"""
-            mobj = re.search(_title, webpage_src)
+            _title = r'candytitles.*>(.*)</span>'
+            mobj = re.search(_title, webpage)
             if mobj is not None:
                 video_title = mobj.group(1)
 
@@ -53,4 +51,3 @@ class WorldStarHipHopIE(InfoExtractor):
             'title': video_title,
             'thumbnail': thumbnail,
         }
-
index 34dd6d9528ee9d4746b798e10cfeecf19e3c8277..c427649211079715a5510eef3eaf35981bdb1034 100644 (file)
@@ -27,15 +27,15 @@ class WrzutaIE(InfoExtractor):
             'description': 'md5:7fb5ef3c21c5893375fda51d9b15d9cd',
         },
     }, {
-        'url': 'http://w729.wrzuta.pl/audio/9oXJqdcndqv/david_guetta_amp_showtek_ft._vassy_-_bad',
-        'md5': '1e546a18e1c22ac6e9adce17b8961ff5',
+        'url': 'http://jolka85.wrzuta.pl/audio/063jOPX5ue2/liber_natalia_szroeder_-_teraz_ty',
+        'md5': 'bc78077859bea7bcfe4295d7d7fc9025',
         'info_dict': {
-            'id': '9oXJqdcndqv',
+            'id': '063jOPX5ue2',
             'ext': 'ogg',
-            'title': 'David Guetta & Showtek ft. Vassy - Bad',
-            'duration': 270,
-            'uploader_id': 'w729',
-            'description': 'md5:4628f01c666bbaaecefa83476cfa794a',
+            'title': 'Liber & Natalia Szroeder - Teraz Ty',
+            'duration': 203,
+            'uploader_id': 'jolka85',
+            'description': 'md5:2d2b6340f9188c8c4cd891580e481096',
         },
     }]
 
@@ -49,16 +49,17 @@ class WrzutaIE(InfoExtractor):
 
         quality = qualities(['SD', 'MQ', 'HQ', 'HD'])
 
-        audio_table = {'flv': 'mp3', 'webm': 'ogg'}
+        audio_table = {'flv': 'mp3', 'webm': 'ogg', '???': 'mp3'}
 
         embedpage = self._download_json('http://www.wrzuta.pl/npp/embed/%s/%s' % (uploader, video_id), video_id)
 
         formats = []
         for media in embedpage['url']:
+            fmt = media['type'].split('@')[0]
             if typ == 'audio':
-                ext = audio_table[media['type'].split('@')[0]]
+                ext = audio_table.get(fmt, fmt)
             else:
-                ext = media['type'].split('@')[0]
+                ext = fmt
 
             formats.append({
                 'format_id': '%s_%s' % (ext, media['quality'].lower()),
index 71bd7c463595a549786d3156f33f114b97b54226..1b4e883652667f2c2109d34014154c71fd443196 100644 (file)
@@ -47,4 +47,3 @@ class XBefIE(InfoExtractor):
             'thumbnail': thumbnail,
             'age_limit': 18,
         }
-
diff --git a/youtube_dl/extractor/xboxclips.py b/youtube_dl/extractor/xboxclips.py
new file mode 100644 (file)
index 0000000..a9aa72e
--- /dev/null
@@ -0,0 +1,57 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_iso8601,
+    float_or_none,
+    int_or_none,
+)
+
+
+class XboxClipsIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?xboxclips\.com/video\.php\?.*vid=(?P<id>[\w-]{36})'
+    _TEST = {
+        'url': 'https://xboxclips.com/video.php?uid=2533274823424419&gamertag=Iabdulelah&vid=074a69a9-5faf-46aa-b93b-9909c1720325',
+        'md5': 'fbe1ec805e920aeb8eced3c3e657df5d',
+        'info_dict': {
+            'id': '074a69a9-5faf-46aa-b93b-9909c1720325',
+            'ext': 'mp4',
+            'title': 'Iabdulelah playing Upload Studio',
+            'filesize_approx': 28101836.8,
+            'timestamp': 1407388500,
+            'upload_date': '20140807',
+            'duration': 56,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        video_url = self._html_search_regex(
+            r'>Link: <a href="([^"]+)">', webpage, 'video URL')
+        title = self._html_search_regex(
+            r'<title>XboxClips \| ([^<]+)</title>', webpage, 'title')
+        timestamp = parse_iso8601(self._html_search_regex(
+            r'>Recorded: ([^<]+)<', webpage, 'upload date', fatal=False))
+        filesize = float_or_none(self._html_search_regex(
+            r'>Size: ([\d\.]+)MB<', webpage, 'file size', fatal=False), invscale=1024 * 1024)
+        duration = int_or_none(self._html_search_regex(
+            r'>Duration: (\d+) Seconds<', webpage, 'duration', fatal=False))
+        view_count = int_or_none(self._html_search_regex(
+            r'>Views: (\d+)<', webpage, 'view count', fatal=False))
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+            'timestamp': timestamp,
+            'filesize_approx': filesize,
+            'duration': duration,
+            'view_count': view_count,
+        }
index 5374495f9b08f4d13fd7552fd612c19339b99e54..6b37bcbc959a8e8b83fee052da18728ca9a9c298 100644 (file)
@@ -14,11 +14,10 @@ from ..utils import (
 
 class XHamsterIE(InfoExtractor):
     """Information Extractor for xHamster"""
-    _VALID_URL = r'http://(?:www\.)?xhamster\.com/movies/(?P<id>[0-9]+)/(?P<seo>.+?)\.html(?:\?.*)?'
+    _VALID_URL = r'http://(?:.+?\.)?xhamster\.com/movies/(?P<id>[0-9]+)/(?P<seo>.+?)\.html(?:\?.*)?'
     _TESTS = [
         {
             'url': 'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html',
-            'md5': '8281348b8d3c53d39fffb377d24eac4e',
             'info_dict': {
                 'id': '1509445',
                 'ext': 'mp4',
@@ -31,7 +30,6 @@ class XHamsterIE(InfoExtractor):
         },
         {
             'url': 'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd',
-            'md5': '4cbd8d56708ecb4fb4124c23e4acb81a',
             'info_dict': {
                 'id': '2221348',
                 'ext': 'mp4',
@@ -44,7 +42,7 @@ class XHamsterIE(InfoExtractor):
         }
     ]
 
-    def _real_extract(self,url):
+    def _real_extract(self, url):
         def extract_video_url(webpage):
             mp4 = re.search(r'<video\s+.*?file="([^"]+)".*?>', webpage)
             if mp4 is None:
@@ -69,17 +67,17 @@ class XHamsterIE(InfoExtractor):
         description = mobj.group(1) if mobj else None
 
         upload_date = self._html_search_regex(r'hint=\'(\d{4}-\d{2}-\d{2}) \d{2}:\d{2}:\d{2} [A-Z]{3,4}\'',
-            webpage, 'upload date', fatal=False)
+                                              webpage, 'upload date', fatal=False)
         if upload_date:
             upload_date = unified_strdate(upload_date)
 
         uploader_id = self._html_search_regex(r'<a href=\'/user/[^>]+>(?P<uploader_id>[^<]+)',
-            webpage, 'uploader id', default='anonymous')
+                                              webpage, 'uploader id', default='anonymous')
 
         thumbnail = self._html_search_regex(r'<video\s+.*?poster="([^"]+)".*?>', webpage, 'thumbnail', fatal=False)
 
         duration = parse_duration(self._html_search_regex(r'<span>Runtime:</span> (\d+:\d+)</div>',
-            webpage, 'duration', fatal=False))
+                                                          webpage, 'duration', fatal=False))
 
         view_count = self._html_search_regex(r'<span>Views:</span> ([^<]+)</div>', webpage, 'view count', fatal=False)
         if view_count:
diff --git a/youtube_dl/extractor/xminus.py b/youtube_dl/extractor/xminus.py
new file mode 100644 (file)
index 0000000..8c6241a
--- /dev/null
@@ -0,0 +1,76 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..compat import (
+    compat_chr,
+    compat_ord,
+)
+from ..utils import (
+    int_or_none,
+    parse_filesize,
+)
+
+
+class XMinusIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?x-minus\.org/track/(?P<id>[0-9]+)'
+    _TEST = {
+        'url': 'http://x-minus.org/track/4542/%D0%BF%D0%B5%D1%81%D0%B5%D0%BD%D0%BA%D0%B0-%D1%88%D0%BE%D1%84%D0%B5%D1%80%D0%B0.html',
+        'md5': '401a15f2d2dcf6d592cb95528d72a2a8',
+        'info_dict': {
+            'id': '4542',
+            'ext': 'mp3',
+            'title': 'Леонид Агутин-Песенка шофера',
+            'duration': 156,
+            'tbr': 320,
+            'filesize_approx': 5900000,
+            'view_count': int,
+            'description': 'md5:03238c5b663810bc79cf42ef3c03e371',
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        artist = self._html_search_regex(
+            r'minus_track\.artist="(.+?)"', webpage, 'artist')
+        title = artist + '-' + self._html_search_regex(
+            r'minus_track\.title="(.+?)"', webpage, 'title')
+        duration = int_or_none(self._html_search_regex(
+            r'minus_track\.dur_sec=\'([0-9]*?)\'',
+            webpage, 'duration', fatal=False))
+        filesize_approx = parse_filesize(self._html_search_regex(
+            r'<div class="filesize[^"]*"></div>\s*([0-9.]+\s*[a-zA-Z][bB])',
+            webpage, 'approximate filesize', fatal=False))
+        tbr = int_or_none(self._html_search_regex(
+            r'<div class="quality[^"]*"></div>\s*([0-9]+)\s*kbps',
+            webpage, 'bitrate', fatal=False))
+        view_count = int_or_none(self._html_search_regex(
+            r'<div class="quality.*?► ([0-9]+)',
+            webpage, 'view count', fatal=False))
+        description = self._html_search_regex(
+            r'(?s)<div id="song_texts">(.*?)</div><br',
+            webpage, 'song lyrics', fatal=False)
+        if description:
+            description = re.sub(' *\r *', '\n', description)
+
+        enc_token = self._html_search_regex(
+            r'minus_track\.tkn="(.+?)"', webpage, 'enc_token')
+        token = ''.join(
+            c if pos == 3 else compat_chr(compat_ord(c) - 1)
+            for pos, c in enumerate(reversed(enc_token)))
+        video_url = 'http://x-minus.org/dwlf/%s/%s.mp3' % (video_id, token)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'url': video_url,
+            'duration': duration,
+            'filesize_approx': filesize_approx,
+            'tbr': tbr,
+            'view_count': view_count,
+            'description': description,
+        }
index 7a73b243080406b29b6c4a17d50a3e4d1ac023cb..53ed7ef5a6ea95826d0324bac53a71bea4913fa4 100644 (file)
@@ -30,14 +30,14 @@ class XNXXIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
 
         video_url = self._search_regex(r'flv_url=(.*?)&amp;',
-            webpage, 'video URL')
+                                       webpage, 'video URL')
         video_url = compat_urllib_parse.unquote(video_url)
 
         video_title = self._html_search_regex(r'<title>(.*?)\s+-\s+XNXX.COM',
-            webpage, 'title')
+                                              webpage, 'title')
 
         video_thumbnail = self._search_regex(r'url_bigthumb=(.*?)&amp;',
-            webpage, 'thumbnail', fatal=False)
+                                             webpage, 'thumbnail', fatal=False)
 
         return {
             'id': video_id,
index b293e2665b81b9a486bff2ec91b410da4a6d9998..38448e7c0fbfe3641cc364a2707a97910ab16cf8 100644 (file)
@@ -20,7 +20,7 @@ class XTubeIE(InfoExtractor):
             'id': 'kVTUy_G222_',
             'ext': 'mp4',
             'title': 'strange erotica',
-            'description': 'surreal gay themed erotica...almost an ET kind of thing',
+            'description': 'http://www.xtube.com an ET kind of thing',
             'uploader': 'greenshowers',
             'duration': 450,
             'age_limit': 18,
@@ -77,9 +77,17 @@ class XTubeIE(InfoExtractor):
             'age_limit': 18,
         }
 
+
 class XTubeUserIE(InfoExtractor):
     IE_DESC = 'XTube user profile'
     _VALID_URL = r'https?://(?:www\.)?xtube\.com/community/profile\.php\?(.*?)user=(?P<username>[^&#]+)(?:$|[&#])'
+    _TEST = {
+        'url': 'http://www.xtube.com/community/profile.php?user=greenshowers',
+        'info_dict': {
+            'id': 'greenshowers',
+        },
+        'playlist_mincount': 155,
+    }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -89,7 +97,7 @@ class XTubeUserIE(InfoExtractor):
             url, username, note='Retrieving profile page')
 
         video_count = int(self._search_regex(
-            r'<strong>%s\'s Videos \(([0-9]+)\)</strong>'%username, profile_page,
+            r'<strong>%s\'s Videos \(([0-9]+)\)</strong>' % username, profile_page,
             'video count'))
 
         PAGE_SIZE = 25
index d84be25620eecb944845b74299510067772c583f..0fdb122436d9b7e158a706cd1048630e252d5666 100644 (file)
@@ -1,3 +1,4 @@
+# coding: utf-8
 from __future__ import unicode_literals
 
 import itertools
@@ -6,6 +7,7 @@ import re
 
 from .common import InfoExtractor, SearchInfoExtractor
 from ..utils import (
+    ExtractorError,
     compat_urllib_parse,
     compat_urlparse,
     clean_html,
@@ -15,7 +17,7 @@ from ..utils import (
 
 class YahooIE(InfoExtractor):
     IE_DESC = 'Yahoo screen and movies'
-    _VALID_URL = r'https?://(?:screen|movies)\.yahoo\.com/.*?-(?P<id>[0-9]+)(?:-[a-z]+)?\.html'
+    _VALID_URL = r'(?P<url>(?P<host>https?://(?:[a-zA-Z]{2}\.)?[\da-zA-Z_-]+\.yahoo\.com)/(?:[^/]+/)*(?P<display_id>.+?)-(?P<id>[0-9]+)(?:-[a-z]+)?\.html)'
     _TESTS = [
         {
             'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html',
@@ -25,6 +27,7 @@ class YahooIE(InfoExtractor):
                 'ext': 'mp4',
                 'title': 'Julian Smith & Travis Legg Watch Julian Smith',
                 'description': 'Julian and Travis watch Julian Smith',
+                'duration': 6863,
             },
         },
         {
@@ -34,25 +37,107 @@ class YahooIE(InfoExtractor):
                 'id': 'd1dedf8c-d58c-38c3-8963-e899929ae0a9',
                 'ext': 'mp4',
                 'title': 'Codefellas - The Cougar Lies with Spanish Moss',
-                'description': 'Agent Topple\'s mustache does its dirty work, and Nicole brokers a deal for peace. But why is the NSA collecting millions of Instagram brunch photos? And if your waffles have nothing to hide, what are they so worried about?',
+                'description': 'md5:66b627ab0a282b26352136ca96ce73c1',
+                'duration': 151,
             },
         },
         {
-            'url': 'https://movies.yahoo.com/video/world-loves-spider-man-190819223.html',
-            'md5': '410b7104aa9893b765bc22787a22f3d9',
+            'url': 'https://screen.yahoo.com/community/community-sizzle-reel-203225340.html?format=embed',
+            'md5': '60e8ac193d8fb71997caa8fce54c6460',
             'info_dict': {
-                'id': '516ed8e2-2c4f-339f-a211-7a8b49d30845',
+                'id': '4fe78544-8d48-39d8-97cd-13f205d9fcdb',
                 'ext': 'mp4',
-                'title': 'The World Loves Spider-Man',
-                'description': '''People all over the world are celebrating the release of \"The Amazing Spider-Man 2.\" We're taking a look at the enthusiastic response Spider-Man has received from viewers all over the world.''',
+                'title': "Yahoo Saves 'Community'",
+                'description': 'md5:4d4145af2fd3de00cbb6c1d664105053',
+                'duration': 170,
             }
+        },
+        {
+            'url': 'https://tw.screen.yahoo.com/taipei-opinion-poll/選情站報-街頭民調-台北市篇-102823042.html',
+            'md5': '92a7fdd8a08783c68a174d7aa067dde8',
+            'info_dict': {
+                'id': '7a23b569-7bea-36cb-85b9-bd5301a0a1fb',
+                'ext': 'mp4',
+                'title': '選情站報 街頭民調 台北市篇',
+                'description': '選情站報 街頭民調 台北市篇',
+                'duration': 429,
+            }
+        },
+        {
+            'url': 'https://uk.screen.yahoo.com/editor-picks/cute-raccoon-freed-drain-using-091756545.html',
+            'md5': '0b51660361f0e27c9789e7037ef76f4b',
+            'info_dict': {
+                'id': 'b3affa53-2e14-3590-852b-0e0db6cd1a58',
+                'ext': 'mp4',
+                'title': 'Cute Raccoon Freed From Drain\u00a0Using Angle Grinder',
+                'description': 'md5:f66c890e1490f4910a9953c941dee944',
+                'duration': 97,
+            }
+        },
+        {
+            'url': 'https://ca.sports.yahoo.com/video/program-makes-hockey-more-affordable-013127711.html',
+            'md5': '57e06440778b1828a6079d2f744212c4',
+            'info_dict': {
+                'id': 'c9fa2a36-0d4d-3937-b8f6-cc0fb1881e73',
+                'ext': 'mp4',
+                'title': 'Program that makes hockey more affordable not offered in Manitoba',
+                'description': 'md5:c54a609f4c078d92b74ffb9bf1f496f4',
+                'duration': 121,
+            }
+        }, {
+            'url': 'https://ca.finance.yahoo.com/news/20-most-valuable-brands-world-112600775.html',
+            'md5': '3e401e4eed6325aa29d9b96125fd5b4f',
+            'info_dict': {
+                'id': 'c1b4c09c-8ed8-3b65-8b05-169c55358a83',
+                'ext': 'mp4',
+                'title': "Apple Is The World's Most Valuable Brand",
+                'description': 'md5:73eabc1a11c6f59752593b2ceefa1262',
+                'duration': 21,
+            }
+        }, {
+            'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
+            'md5': '67010fdf3a08d290e060a4dd96baa07b',
+            'info_dict': {
+                'id': 'f885cf7f-43d4-3450-9fac-46ac30ece521',
+                'ext': 'mp4',
+                'title': 'China Moses Is Crazy About the Blues',
+                'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0',
+                'duration': 128,
+            }
+        }, {
+            'url': 'https://in.lifestyle.yahoo.com/video/connect-dots-dark-side-virgo-090247395.html',
+            'md5': 'd9a083ccf1379127bf25699d67e4791b',
+            'info_dict': {
+                'id': '52aeeaa3-b3d1-30d8-9ef8-5d0cf05efb7c',
+                'ext': 'mp4',
+                'title': 'Connect the Dots: Dark Side of Virgo',
+                'description': 'md5:1428185051cfd1949807ad4ff6d3686a',
+                'duration': 201,
+            }
+        }, {
+            'url': 'https://gma.yahoo.com/pizza-delivery-man-surprised-huge-tip-college-kids-195200785.html',
+            'only_matching': True,
         }
     ]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-        webpage = self._download_webpage(url, video_id)
+        display_id = mobj.group('display_id')
+        url = mobj.group('url')
+        host = mobj.group('host')
+        webpage = self._download_webpage(url, display_id)
+
+        # Look for iframed media first
+        iframe_m = re.search(r'<iframe[^>]+src="(/video/.+?-\d+\.html\?format=embed.*?)"', webpage)
+        if iframe_m:
+            iframepage = self._download_webpage(
+                host + iframe_m.group(1), display_id, 'Downloading iframe webpage')
+            items_json = self._search_regex(
+                r'mediaItems: (\[.+?\])$', iframepage, 'items', flags=re.MULTILINE, default=None)
+            if items_json:
+                items = json.loads(items_json)
+                video_id = items[0]['id']
+                return self._get_info(video_id, display_id, webpage)
 
         items_json = self._search_regex(
             r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE,
@@ -60,22 +145,25 @@ class YahooIE(InfoExtractor):
         if items_json is None:
             CONTENT_ID_REGEXES = [
                 r'YUI\.namespace\("Media"\)\.CONTENT_ID\s*=\s*"([^"]+)"',
-                r'root\.App\.Cache\.context\.videoCache\.curVideo = \{"([^"]+)"'
+                r'root\.App\.Cache\.context\.videoCache\.curVideo = \{"([^"]+)"',
+                r'"first_videoid"\s*:\s*"([^"]+)"',
             ]
-            long_id = self._search_regex(CONTENT_ID_REGEXES, webpage, 'content ID')
-            video_id = long_id
+            video_id = self._search_regex(CONTENT_ID_REGEXES, webpage, 'content ID')
         else:
             items = json.loads(items_json)
             info = items['mediaItems']['query']['results']['mediaObj'][0]
             # The 'meta' field is not always in the video webpage, we request it
             # from another page
-            long_id = info['id']
-        return self._get_info(long_id, video_id, webpage)
+            video_id = info['id']
+        return self._get_info(video_id, display_id, webpage)
 
-    def _get_info(self, long_id, video_id, webpage):
+    def _get_info(self, video_id, display_id, webpage):
+        region = self._search_regex(
+            r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
+            webpage, 'region', fatal=False, default='US')
         query = ('SELECT * FROM yahoo.media.video.streams WHERE id="%s"'
-                 ' AND plrs="86Gj0vCaSzV_Iuf6hNylf2" AND region="US"'
-                 ' AND protocol="http"' % long_id)
+                 ' AND plrs="86Gj0vCaSzV_Iuf6hNylf2" AND region="%s"'
+                 ' AND protocol="http"' % (video_id, region))
         data = compat_urllib_parse.urlencode({
             'q': query,
             'env': 'prod',
@@ -83,9 +171,17 @@ class YahooIE(InfoExtractor):
         })
         query_result = self._download_json(
             'http://video.query.yahoo.com/v1/public/yql?' + data,
-            video_id, 'Downloading video info')
+            display_id, 'Downloading video info')
+
         info = query_result['query']['results']['mediaObj'][0]
-        meta = info['meta']
+        meta = info.get('meta')
+
+        if not meta:
+            msg = info['status'].get('msg')
+            if msg:
+                raise ExtractorError(
+                    '%s returned error: %s' % (self.IE_NAME, msg), expected=True)
+            raise ExtractorError('Unable to extract media object meta')
 
         formats = []
         for s in info['streams']:
@@ -112,36 +208,15 @@ class YahooIE(InfoExtractor):
 
         return {
             'id': video_id,
+            'display_id': display_id,
             'title': meta['title'],
             'formats': formats,
             'description': clean_html(meta['description']),
             'thumbnail': meta['thumbnail'] if meta.get('thumbnail') else self._og_search_thumbnail(webpage),
+            'duration': int_or_none(meta.get('duration')),
         }
 
 
-class YahooNewsIE(YahooIE):
-    IE_NAME = 'yahoo:news'
-    _VALID_URL = r'http://news\.yahoo\.com/video/.*?-(?P<id>\d*?)\.html'
-
-    _TESTS = [{
-        'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
-        'md5': '67010fdf3a08d290e060a4dd96baa07b',
-        'info_dict': {
-            'id': '104538833',
-            'ext': 'mp4',
-            'title': 'China Moses Is Crazy About the Blues',
-            'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0',
-        },
-    }]
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-        webpage = self._download_webpage(url, video_id)
-        long_id = self._search_regex(r'contentId: \'(.+?)\',', webpage, 'long id')
-        return self._get_info(long_id, video_id, webpage)
-
-
 class YahooSearchIE(SearchInfoExtractor):
     IE_DESC = 'Yahoo screen search'
     _MAX_RESULTS = 1000
@@ -154,7 +229,7 @@ class YahooSearchIE(SearchInfoExtractor):
         for pagenum in itertools.count(0):
             result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
             info = self._download_json(result_url, query,
-                note='Downloading results page '+str(pagenum+1))
+                                       note='Downloading results page ' + str(pagenum + 1))
             m = info['m']
             results = info['results']
 
diff --git a/youtube_dl/extractor/ynet.py b/youtube_dl/extractor/ynet.py
new file mode 100644 (file)
index 0000000..7b621a9
--- /dev/null
@@ -0,0 +1,50 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+import json
+
+from .common import InfoExtractor
+from ..utils import compat_urllib_parse
+
+
+class YnetIE(InfoExtractor):
+    _VALID_URL = r'http://(?:.+?\.)?ynet\.co\.il/(?:.+?/)?0,7340,(?P<id>L(?:-[0-9]+)+),00\.html'
+    _TESTS = [
+        {
+            'url': 'http://hot.ynet.co.il/home/0,7340,L-11659-99244,00.html',
+            'info_dict': {
+                'id': 'L-11659-99244',
+                'ext': 'flv',
+                'title': 'איש לא יודע מאיפה באנו',
+                'thumbnail': 're:^https?://.*\.jpg',
+            }
+        }, {
+            'url': 'http://hot.ynet.co.il/home/0,7340,L-8859-84418,00.html',
+            'info_dict': {
+                'id': 'L-8859-84418',
+                'ext': 'flv',
+                'title': "צפו: הנשיקה הלוהטת של תורגי' ויוליה פלוטקין",
+                'thumbnail': 're:^https?://.*\.jpg',
+            }
+        }
+    ]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        content = compat_urllib_parse.unquote_plus(self._og_search_video_url(webpage))
+        config = json.loads(self._search_regex(r'config=({.+?})$', content, 'video config'))
+        f4m_url = config['clip']['url']
+        title = self._og_search_title(webpage)
+        m = re.search(r'ynet - HOT -- (["\']+)(?P<title>.+?)\1', title)
+        if m:
+            title = m.group('title')
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': self._extract_f4m_formats(f4m_url, video_id),
+            'thumbnail': self._og_search_thumbnail(webpage),
+        }
index fcb5ff758deae198614e821dc132871e5fb90679..c642075dcfabbfb025d64b92e392d614578f42b1 100644 (file)
@@ -9,40 +9,30 @@ from ..utils import (
 
 
 class YouJizzIE(InfoExtractor):
-    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youjizz\.com/videos/(?P<videoid>[^.]+)\.html$'
+    _VALID_URL = r'https?://(?:\w+\.)?youjizz\.com/videos/[^/#?]+-(?P<id>[0-9]+)\.html(?:$|[?#])'
     _TEST = {
         'url': 'http://www.youjizz.com/videos/zeichentrick-1-2189178.html',
-        'file': '2189178.flv',
         'md5': '07e15fa469ba384c7693fd246905547c',
         'info_dict': {
+            'id': '2189178',
+            'ext': 'flv',
             "title": "Zeichentrick 1",
             "age_limit": 18,
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-
-        video_id = mobj.group('videoid')
-
-        # Get webpage content
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
-
         age_limit = self._rta_search(webpage)
-
-        # Get the video title
-        video_title = self._html_search_regex(r'<title>(?P<title>.*)</title>',
-            webpage, 'title').strip()
-
-        # Get the embed page
-        result = re.search(r'https?://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)', webpage)
-        if result is None:
-            raise ExtractorError('ERROR: unable to extract embed page')
-
-        embed_page_url = result.group(0).strip()
-        video_id = result.group('videoid')
-
-        webpage = self._download_webpage(embed_page_url, video_id)
+        video_title = self._html_search_regex(
+            r'<title>\s*(.*)\s*</title>', webpage, 'title')
+
+        embed_page_url = self._search_regex(
+            r'(https?://www.youjizz.com/videos/embed/[0-9]+)',
+            webpage, 'embed page')
+        webpage = self._download_webpage(
+            embed_page_url, video_id, note='downloading embed page')
 
         # Get the video URL
         m_playlist = re.search(r'so.addVariable\("playlist", ?"(?P<playlist>.+?)"\);', webpage)
index a8fd40c833fb7707eb1cd8760c288da5f2299025..97b98bbe88715f644da6bec1709d697af2c8e0e0 100644 (file)
@@ -1,6 +1,7 @@
 # coding: utf-8
 
-import json
+from __future__ import unicode_literals
+
 import math
 import random
 import re
@@ -13,35 +14,42 @@ from ..utils import (
 
 
 class YoukuIE(InfoExtractor):
-    _VALID_URL =  r'(?:(?:http://)?(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|youku:)(?P<ID>[A-Za-z0-9]+)(?:\.html|/v\.swf|)'
-    _TEST =   {
-        u"url": u"http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html",
-        u"file": u"XNDgyMDQ2NTQw_part00.flv",
-        u"md5": u"ffe3f2e435663dc2d1eea34faeff5b5b",
-        u"params": {u"test": False},
-        u"info_dict": {
-            u"title": u"youtube-dl test video \"'/\\ä↭𝕐"
+    _VALID_URL = r'''(?x)
+        (?:
+            http://(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|
+            youku:)
+        (?P<id>[A-Za-z0-9]+)(?:\.html|/v\.swf|)
+    '''
+    _TEST = {
+        'url': 'http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html',
+        'md5': 'ffe3f2e435663dc2d1eea34faeff5b5b',
+        'params': {
+            'test': False
+        },
+        'info_dict': {
+            'id': 'XNDgyMDQ2NTQw_part00',
+            'ext': 'flv',
+            'title': 'youtube-dl test video "\'/\\ä↭𝕐'
         }
     }
 
-
     def _gen_sid(self):
         nowTime = int(time.time() * 1000)
-        random1 = random.randint(1000,1998)
-        random2 = random.randint(1000,9999)
+        random1 = random.randint(1000, 1998)
+        random2 = random.randint(1000, 9999)
 
-        return "%d%d%d" %(nowTime,random1,random2)
+        return "%d%d%d" % (nowTime, random1, random2)
 
     def _get_file_ID_mix_string(self, seed):
         mixed = []
         source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
         seed = float(seed)
         for i in range(len(source)):
-            seed  =  (seed * 211 + 30031) % 65536
-            index  =  math.floor(seed / 65536 * len(source))
+            seed = (seed * 211 + 30031) % 65536
+            index = math.floor(seed / 65536 * len(source))
             mixed.append(source[int(index)])
             source.remove(source[int(index)])
-        #return ''.join(mixed)
+        # return ''.join(mixed)
         return mixed
 
     def _get_file_id(self, fileId, seed):
@@ -55,60 +63,52 @@ class YoukuIE(InfoExtractor):
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        video_id = mobj.group('ID')
+        video_id = mobj.group('id')
 
         info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
 
-        jsondata = self._download_webpage(info_url, video_id)
-
-        self.report_extraction(video_id)
-        try:
-            config = json.loads(jsondata)
-            error_code = config['data'][0].get('error_code')
-            if error_code:
-                # -8 means blocked outside China.
-                error = config['data'][0].get('error')  # Chinese and English, separated by newline.
-                raise ExtractorError(error or u'Server reported error %i' % error_code,
-                    expected=True)
-
-            video_title =  config['data'][0]['title']
-            seed = config['data'][0]['seed']
-
-            format = self._downloader.params.get('format', None)
-            supported_format = list(config['data'][0]['streamfileids'].keys())
-
-            if format is None or format == 'best':
-                if 'hd2' in supported_format:
-                    format = 'hd2'
-                else:
-                    format = 'flv'
-                ext = u'flv'
-            elif format == 'worst':
-                format = 'mp4'
-                ext = u'mp4'
-            else:
-                format = 'flv'
-                ext = u'flv'
+        config = self._download_json(info_url, video_id)
 
+        error_code = config['data'][0].get('error_code')
+        if error_code:
+            # -8 means blocked outside China.
+            error = config['data'][0].get('error')  # Chinese and English, separated by newline.
+            raise ExtractorError(error or 'Server reported error %i' % error_code,
+                                 expected=True)
 
-            fileid = config['data'][0]['streamfileids'][format]
-            keys = [s['k'] for s in config['data'][0]['segs'][format]]
-            # segs is usually a dictionary, but an empty *list* if an error occured.
-        except (UnicodeDecodeError, ValueError, KeyError):
-            raise ExtractorError(u'Unable to extract info section')
+        video_title = config['data'][0]['title']
+        seed = config['data'][0]['seed']
 
-        files_info=[]
+        format = self._downloader.params.get('format', None)
+        supported_format = list(config['data'][0]['streamfileids'].keys())
+
+        # TODO proper format selection
+        if format is None or format == 'best':
+            if 'hd2' in supported_format:
+                format = 'hd2'
+            else:
+                format = 'flv'
+            ext = 'flv'
+        elif format == 'worst':
+            format = 'mp4'
+            ext = 'mp4'
+        else:
+            format = 'flv'
+            ext = 'flv'
+
+        fileid = config['data'][0]['streamfileids'][format]
+        keys = [s['k'] for s in config['data'][0]['segs'][format]]
+        # segs is usually a dictionary, but an empty *list* if an error occured.
+
+        files_info = []
         sid = self._gen_sid()
         fileid = self._get_file_id(fileid, seed)
 
-        #column 8,9 of fileid represent the segment number
-        #fileid[7:9] should be changed
+        # column 8,9 of fileid represent the segment number
+        # fileid[7:9] should be changed
         for index, key in enumerate(keys):
-
             temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
-            download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
+            download_url = 'http://k.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
 
             info = {
                 'id': '%s_part%02d' % (video_id, index),
index d456c4da522d689ac7bcbd33c5f8a3b1204c3b00..d9c06a2ee6d934391560b6ca7db4ebecf40ff1d0 100644 (file)
@@ -23,7 +23,6 @@ class YouPornIE(InfoExtractor):
     _VALID_URL = r'^(?P<proto>https?://)(?:www\.)?(?P<url>youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+))'
     _TEST = {
         'url': 'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
-        'md5': '71ec5fcfddacf80f495efa8b6a8d9a89',
         'info_dict': {
             'id': '505835',
             'ext': 'mp4',
@@ -46,11 +45,13 @@ class YouPornIE(InfoExtractor):
         age_limit = self._rta_search(webpage)
 
         # Get JSON parameters
-        json_params = self._search_regex(r'var currentVideo = new Video\((.*)\);', webpage, 'JSON parameters')
+        json_params = self._search_regex(
+            r'var currentVideo = new Video\((.*)\)[,;]',
+            webpage, 'JSON parameters')
         try:
             params = json.loads(json_params)
         except:
-            raise ExtractorError(u'Invalid JSON')
+            raise ExtractorError('Invalid JSON')
 
         self.report_extraction(video_id)
         try:
@@ -65,7 +66,7 @@ class YouPornIE(InfoExtractor):
         # Get all of the links from the page
         DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
         download_list_html = self._search_regex(DOWNLOAD_LIST_RE,
-            webpage, 'download list').strip()
+                                                webpage, 'download list').strip()
         LINK_RE = r'<a href="([^"]+)">'
         links = re.findall(LINK_RE, download_list_html)
 
@@ -74,7 +75,7 @@ class YouPornIE(InfoExtractor):
         for encrypted_link in encrypted_links:
             link = aes_decrypt_text(encrypted_link, video_title, 32).decode('utf-8')
             links.append(link)
-        
+
         formats = []
         for link in links:
             # A link looks like this:
@@ -104,8 +105,8 @@ class YouPornIE(InfoExtractor):
         self._sort_formats(formats)
 
         if not formats:
-            raise ExtractorError(u'ERROR: no known formats available for video')
-        
+            raise ExtractorError('ERROR: no known formats available for video')
+
         return {
             'id': video_id,
             'uploader': video_uploader,
diff --git a/youtube_dl/extractor/yourupload.py b/youtube_dl/extractor/yourupload.py
new file mode 100644 (file)
index 0000000..40fc416
--- /dev/null
@@ -0,0 +1,58 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class YourUploadIE(InfoExtractor):
+    _VALID_URL = r'''(?x)https?://(?:www\.)?
+        (?:yourupload\.com/watch|
+           embed\.yourupload\.com|
+           embed\.yucache\.net
+        )/(?P<id>[A-Za-z0-9]+)
+        '''
+    _TESTS = [
+        {
+            'url': 'http://yourupload.com/watch/14i14h',
+            'md5': 'bf5c2f95c4c917536e80936af7bc51e1',
+            'info_dict': {
+                'id': '14i14h',
+                'ext': 'mp4',
+                'title': 'BigBuckBunny_320x180.mp4',
+                'thumbnail': 're:^https?://.*\.jpe?g',
+            }
+        },
+        {
+            'url': 'http://embed.yourupload.com/14i14h',
+            'only_matching': True,
+        },
+        {
+            'url': 'http://embed.yucache.net/14i14h?client_file_id=803349',
+            'only_matching': True,
+        },
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        url = 'http://embed.yucache.net/{0:}'.format(video_id)
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._og_search_title(webpage)
+        thumbnail = self._og_search_thumbnail(webpage)
+        url = self._og_search_video_url(webpage)
+
+        formats = [{
+            'format_id': 'sd',
+            'url': url,
+        }]
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'thumbnail': thumbnail,
+        }
index c48d1b8efd38a5bfd7b0c961e6cd827325694719..7b6179a2abd1261b787f1b19486ab04af31feddf 100644 (file)
 # coding: utf-8
 
-import errno
-import io
+from __future__ import unicode_literals
+
+
 import itertools
 import json
 import os.path
 import re
+import time
 import traceback
 
 from .common import InfoExtractor, SearchInfoExtractor
 from .subtitles import SubtitlesInfoExtractor
 from ..jsinterp import JSInterpreter
 from ..swfinterp import SWFInterpreter
-from ..utils import (
+from ..compat import (
     compat_chr,
     compat_parse_qs,
     compat_urllib_parse,
     compat_urllib_request,
     compat_urlparse,
     compat_str,
-
+)
+from ..utils import (
     clean_html,
-    get_cachedir,
-    get_element_by_id,
-    get_element_by_attribute,
     ExtractorError,
+    get_element_by_attribute,
+    get_element_by_id,
     int_or_none,
-    PagedList,
+    OnDemandPagedList,
+    orderedSet,
     unescapeHTML,
     unified_strdate,
-    orderedSet,
-    write_json_file,
     uppercase_escape,
 )
 
+
 class YoutubeBaseInfoExtractor(InfoExtractor):
     """Provide base functions for Youtube extractors"""
     _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
-    _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
-    _AGE_URL = 'https://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
+    _TWOFACTOR_URL = 'https://accounts.google.com/SecondFactor'
     _NETRC_MACHINE = 'youtube'
     # If True it will raise an error if no login info is provided
     _LOGIN_REQUIRED = False
 
     def _set_language(self):
-        return bool(self._download_webpage(
-            self._LANG_URL, None,
-            note=u'Setting language', errnote='unable to set language',
-            fatal=False))
+        self._set_cookie(
+            '.youtube.com', 'PREF', 'f1=50000000&hl=en',
+            # YouTube sets the expire time to about two months
+            expire_time=time.time() + 2 * 30 * 24 * 3600)
 
     def _login(self):
+        """
+        Attempt to log in to YouTube.
+        True is returned if successful or skipped.
+        False is returned if login failed.
+
+        If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
+        """
         (username, password) = self._get_login_info()
         # No authentication to be performed
         if username is None:
             if self._LOGIN_REQUIRED:
-                raise ExtractorError(u'No login info available, needed for using %s.' % self.IE_NAME, expected=True)
-            return False
+                raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
+            return True
 
         login_page = self._download_webpage(
             self._LOGIN_URL, None,
-            note=u'Downloading login page',
-            errnote=u'unable to fetch login page', fatal=False)
+            note='Downloading login page',
+            errnote='unable to fetch login page', fatal=False)
         if login_page is False:
             return
 
         galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
-                                  login_page, u'Login GALX parameter')
+                                  login_page, 'Login GALX parameter')
 
         # Log in
         login_form_strs = {
-                u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
-                u'Email': username,
-                u'GALX': galx,
-                u'Passwd': password,
-                u'PersistentCookie': u'yes',
-                u'_utf8': u'霱',
-                u'bgresponse': u'js_disabled',
-                u'checkConnection': u'',
-                u'checkedDomains': u'youtube',
-                u'dnConn': u'',
-                u'pstMsg': u'0',
-                u'rmShown': u'1',
-                u'secTok': u'',
-                u'signIn': u'Sign in',
-                u'timeStmp': u'',
-                u'service': u'youtube',
-                u'uilel': u'3',
-                u'hl': u'en_US',
+            'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
+            'Email': username,
+            'GALX': galx,
+            'Passwd': password,
+
+            'PersistentCookie': 'yes',
+            '_utf8': '霱',
+            'bgresponse': 'js_disabled',
+            'checkConnection': '',
+            'checkedDomains': 'youtube',
+            'dnConn': '',
+            'pstMsg': '0',
+            'rmShown': '1',
+            'secTok': '',
+            'signIn': 'Sign in',
+            'timeStmp': '',
+            'service': 'youtube',
+            'uilel': '3',
+            'hl': 'en_US',
         }
+
         # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
         # chokes on unicode
-        login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
+        login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items())
         login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
 
         req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
         login_results = self._download_webpage(
             req, None,
-            note=u'Logging in', errnote=u'unable to log in', fatal=False)
+            note='Logging in', errnote='unable to log in', fatal=False)
         if login_results is False:
             return False
-        if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
-            self._downloader.report_warning(u'unable to log in: bad username or password')
-            return False
-        return True
 
-    def _confirm_age(self):
-        age_form = {
-            'next_url': '/',
-            'action_confirm': 'Confirm',
-        }
-        req = compat_urllib_request.Request(self._AGE_URL,
-            compat_urllib_parse.urlencode(age_form).encode('ascii'))
+        if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
+            raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
+
+        # Two-Factor
+        # TODO add SMS and phone call support - these require making a request and then prompting the user
+
+        if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', login_results) is not None:
+            tfa_code = self._get_tfa_info()
+
+            if tfa_code is None:
+                self._downloader.report_warning('Two-factor authentication required. Provide it with --twofactor <code>')
+                self._downloader.report_warning('(Note that only TOTP (Google Authenticator App) codes work at this time.)')
+                return False
+
+            # Unlike the first login form, secTok and timeStmp are both required for the TFA form
+
+            match = re.search(r'id="secTok"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
+            if match is None:
+                self._downloader.report_warning('Failed to get secTok - did the page structure change?')
+            secTok = match.group(1)
+            match = re.search(r'id="timeStmp"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
+            if match is None:
+                self._downloader.report_warning('Failed to get timeStmp - did the page structure change?')
+            timeStmp = match.group(1)
+
+            tfa_form_strs = {
+                'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
+                'smsToken': '',
+                'smsUserPin': tfa_code,
+                'smsVerifyPin': 'Verify',
+
+                'PersistentCookie': 'yes',
+                'checkConnection': '',
+                'checkedDomains': 'youtube',
+                'pstMsg': '1',
+                'secTok': secTok,
+                'timeStmp': timeStmp,
+                'service': 'youtube',
+                'hl': 'en_US',
+            }
+            tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in tfa_form_strs.items())
+            tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii')
+
+            tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
+            tfa_results = self._download_webpage(
+                tfa_req, None,
+                note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
+
+            if tfa_results is False:
+                return False
+
+            if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', tfa_results) is not None:
+                self._downloader.report_warning('Two-factor code expired. Please try again, or use a one-use backup code instead.')
+                return False
+            if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
+                self._downloader.report_warning('unable to log in - did the page structure change?')
+                return False
+            if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
+                self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
+                return False
 
-        self._download_webpage(
-            req, None,
-            note=u'Confirming age', errnote=u'Unable to confirm age')
+        if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
+            self._downloader.report_warning('unable to log in: bad username or password')
+            return False
         return True
 
     def _real_initialize(self):
         if self._downloader is None:
             return
-        if not self._set_language():
-            return
+        self._set_language()
         if not self._login():
             return
-        self._confirm_age()
 
 
 class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
-    IE_DESC = u'YouTube.com'
+    IE_DESC = 'YouTube.com'
     _VALID_URL = r"""(?x)^
                      (
-                         (?:https?://|//)?                                    # http(s):// or protocol-independent URL (optional)
+                         (?:https?://|//)                                    # http(s):// or protocol-independent URL
                          (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
                             (?:www\.)?deturl\.com/www\.youtube\.com/|
                             (?:www\.)?pwnyoutube\.com/|
@@ -140,7 +197,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                             youtube\.googleapis\.com/)                        # the various hostnames, with wildcard subdomains
                          (?:.*?\#/)?                                          # handle anchor (#/) redirect urls
                          (?:                                                  # the various things that can precede the ID:
-                             (?:(?:v|embed|e)/)                               # v/ or embed/ or e/
+                             (?:(?:v|embed|e)/(?!videoseries))                # v/ or embed/ or e/
                              |(?:                                             # or the v= param in all its forms
                                  (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)?  # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
                                  (?:\?|\#!?)                                  # the params delimiter ? or # or #!
@@ -149,10 +206,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                              )
                          ))
                          |youtu\.be/                                          # just youtu.be/xxxx
-                         |https?://(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
+                         |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
                          )
                      )?                                                       # all until now is optional -> you can pass the naked ID
                      ([0-9A-Za-z_-]{11})                                      # here is it! the YouTube video ID
+                     (?!.*?&list=)                                            # combined list/video URLs are handled by the playlist IE
                      (?(1).+)?                                                # if we found the ID, everything can follow
                      $"""
     _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
@@ -201,6 +259,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         '138': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
         '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
         '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
+        '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
+        '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
+        '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'h264'},
 
         # Dash mp4 audio
         '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 48, 'preference': -50},
@@ -214,6 +275,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
         '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
         '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
+        '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'VP9'},
         '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
         '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
         '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
@@ -223,128 +285,198 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
         '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
         '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
+        '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
+        '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
+        '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'VP9'},
 
         # Dash webm audio
-        '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 48, 'preference': -50},
+        '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
         '172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
 
+        # Dash webm audio with opus inside
+        '249': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
+        '250': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
+        '251': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
+
         # RTMP (unnamed)
         '_rtmp': {'protocol': 'rtmp'},
     }
 
-    IE_NAME = u'youtube'
+    IE_NAME = 'youtube'
     _TESTS = [
         {
-            u"url":  u"http://www.youtube.com/watch?v=BaW_jenozKc",
-            u"file":  u"BaW_jenozKc.mp4",
-            u"info_dict": {
-                u"title": u"youtube-dl test video \"'/\\ä↭𝕐",
-                u"uploader": u"Philipp Hagemeister",
-                u"uploader_id": u"phihag",
-                u"upload_date": u"20121002",
-                u"description": u"test chars:  \"'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .",
-                u"categories": [u'Science & Technology'],
+            'url': 'http://www.youtube.com/watch?v=BaW_jenozKc',
+            'info_dict': {
+                'id': 'BaW_jenozKc',
+                'ext': 'mp4',
+                'title': 'youtube-dl test video "\'/\\ä↭𝕐',
+                'uploader': 'Philipp Hagemeister',
+                'uploader_id': 'phihag',
+                'upload_date': '20121002',
+                'description': 'test chars:  "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
+                'categories': ['Science & Technology'],
+                'like_count': int,
+                'dislike_count': int,
             }
         },
         {
-            u"url":  u"http://www.youtube.com/watch?v=UxxajLWwzqY",
-            u"file":  u"UxxajLWwzqY.mp4",
-            u"note": u"Test generic use_cipher_signature video (#897)",
-            u"info_dict": {
-                u"upload_date": u"20120506",
-                u"title": u"Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]",
-                u"description": u"md5:fea86fda2d5a5784273df5c7cc994d9f",
-                u"uploader": u"Icona Pop",
-                u"uploader_id": u"IconaPop"
+            'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
+            'note': 'Test generic use_cipher_signature video (#897)',
+            'info_dict': {
+                'id': 'UxxajLWwzqY',
+                'ext': 'mp4',
+                'upload_date': '20120506',
+                'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
+                'description': 'md5:fea86fda2d5a5784273df5c7cc994d9f',
+                'uploader': 'Icona Pop',
+                'uploader_id': 'IconaPop',
             }
         },
         {
-            u"url":  u"https://www.youtube.com/watch?v=07FYdnEawAQ",
-            u"file":  u"07FYdnEawAQ.mp4",
-            u"note": u"Test VEVO video with age protection (#956)",
-            u"info_dict": {
-                u"upload_date": u"20130703",
-                u"title": u"Justin Timberlake - Tunnel Vision (Explicit)",
-                u"description": u"md5:64249768eec3bc4276236606ea996373",
-                u"uploader": u"justintimberlakeVEVO",
-                u"uploader_id": u"justintimberlakeVEVO"
+            'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
+            'note': 'Test VEVO video with age protection (#956)',
+            'info_dict': {
+                'id': '07FYdnEawAQ',
+                'ext': 'mp4',
+                'upload_date': '20130703',
+                'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
+                'description': 'md5:64249768eec3bc4276236606ea996373',
+                'uploader': 'justintimberlakeVEVO',
+                'uploader_id': 'justintimberlakeVEVO',
             }
         },
         {
-            u"url":  u"//www.YouTube.com/watch?v=yZIXLfi8CZQ",
-            u"file":  u"yZIXLfi8CZQ.mp4",
-            u"note": u"Embed-only video (#1746)",
-            u"info_dict": {
-                u"upload_date": u"20120608",
-                u"title": u"Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012",
-                u"description": u"md5:09b78bd971f1e3e289601dfba15ca4f7",
-                u"uploader": u"SET India",
-                u"uploader_id": u"setindia"
+            'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
+            'note': 'Embed-only video (#1746)',
+            'info_dict': {
+                'id': 'yZIXLfi8CZQ',
+                'ext': 'mp4',
+                'upload_date': '20120608',
+                'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
+                'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
+                'uploader': 'SET India',
+                'uploader_id': 'setindia'
             }
         },
         {
-            u"url": u"http://www.youtube.com/watch?v=a9LDPn-MO4I",
-            u"file": u"a9LDPn-MO4I.m4a",
-            u"note": u"256k DASH audio (format 141) via DASH manifest",
-            u"info_dict": {
-                u"upload_date": "20121002",
-                u"uploader_id": "8KVIDEO",
-                u"description": "No description available.",
-                u"uploader": "8KVIDEO",
-                u"title": "UHDTV TEST 8K VIDEO.mp4"
+            'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
+            'note': '256k DASH audio (format 141) via DASH manifest',
+            'info_dict': {
+                'id': 'a9LDPn-MO4I',
+                'ext': 'm4a',
+                'upload_date': '20121002',
+                'uploader_id': '8KVIDEO',
+                'description': '',
+                'uploader': '8KVIDEO',
+                'title': 'UHDTV TEST 8K VIDEO.mp4'
             },
-            u"params": {
-                u"youtube_include_dash_manifest": True,
-                u"format": "141",
+            'params': {
+                'youtube_include_dash_manifest': True,
+                'format': '141',
             },
         },
         # DASH manifest with encrypted signature
         {
-            u'url': u'https://www.youtube.com/watch?v=IB3lcPjvWLA',
-            u'info_dict': {
-                u'id': u'IB3lcPjvWLA',
-                u'ext': u'm4a',
-                u'title': u'Afrojack - The Spark ft. Spree Wilson',
-                u'description': u'md5:9717375db5a9a3992be4668bbf3bc0a8',
-                u'uploader': u'AfrojackVEVO',
-                u'uploader_id': u'AfrojackVEVO',
-                u'upload_date': u'20131011',
+            'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
+            'info_dict': {
+                'id': 'IB3lcPjvWLA',
+                'ext': 'm4a',
+                'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
+                'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
+                'uploader': 'AfrojackVEVO',
+                'uploader_id': 'AfrojackVEVO',
+                'upload_date': '20131011',
+            },
+            'params': {
+                'youtube_include_dash_manifest': True,
+                'format': '141',
+            },
+        },
+        # Controversy video
+        {
+            'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
+            'info_dict': {
+                'id': 'T4XJQO3qol8',
+                'ext': 'mp4',
+                'upload_date': '20100909',
+                'uploader': 'The Amazing Atheist',
+                'uploader_id': 'TheAmazingAtheist',
+                'title': 'Burning Everyone\'s Koran',
+                'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
+            }
+        },
+        # Normal age-gate video (No vevo, embed allowed)
+        {
+            'url': 'http://youtube.com/watch?v=HtVdAasjOgU',
+            'info_dict': {
+                'id': 'HtVdAasjOgU',
+                'ext': 'mp4',
+                'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
+                'description': 'md5:eca57043abae25130f58f655ad9a7771',
+                'uploader': 'The Witcher',
+                'uploader_id': 'WitcherGame',
+                'upload_date': '20140605',
+            },
+        },
+        # video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
+        {
+            'url': '__2ABJjxzNo',
+            'info_dict': {
+                'id': '__2ABJjxzNo',
+                'ext': 'mp4',
+                'upload_date': '20100430',
+                'uploader_id': 'deadmau5',
+                'description': 'md5:12c56784b8032162bb936a5f76d55360',
+                'uploader': 'deadmau5',
+                'title': 'Deadmau5 - Some Chords (HD)',
             },
-            u"params": {
-                u'youtube_include_dash_manifest': True,
-                u'format': '141',
+            'expected_warnings': [
+                'DASH manifest missing',
+            ]
+        },
+        # Olympics (https://github.com/rg3/youtube-dl/issues/4431)
+        {
+            'url': 'lqQg6PlCWgI',
+            'info_dict': {
+                'id': 'lqQg6PlCWgI',
+                'ext': 'mp4',
+                'upload_date': '20120731',
+                'uploader_id': 'olympic',
+                'description': 'HO09  - Women -  GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
+                'uploader': 'Olympics',
+                'title': 'Hockey - Women -  GER-AUS - London 2012 Olympic Games',
             },
+            'params': {
+                'skip_download': 'requires avconv',
+            }
         },
     ]
 
-
-    @classmethod
-    def suitable(cls, url):
-        """Receives a URL and returns True if suitable for this IE."""
-        if YoutubePlaylistIE.suitable(url): return False
-        return re.match(cls._VALID_URL, url) is not None
-
     def __init__(self, *args, **kwargs):
         super(YoutubeIE, self).__init__(*args, **kwargs)
         self._player_cache = {}
 
     def report_video_info_webpage_download(self, video_id):
         """Report attempt to download video info webpage."""
-        self.to_screen(u'%s: Downloading video info webpage' % video_id)
+        self.to_screen('%s: Downloading video info webpage' % video_id)
 
     def report_information_extraction(self, video_id):
         """Report attempt to extract video information."""
-        self.to_screen(u'%s: Extracting video information' % video_id)
+        self.to_screen('%s: Extracting video information' % video_id)
 
     def report_unavailable_format(self, video_id, format):
         """Report extracted video URL."""
-        self.to_screen(u'%s: Format %s not available' % (video_id, format))
+        self.to_screen('%s: Format %s not available' % (video_id, format))
 
     def report_rtmp_download(self):
         """Indicate the download will use the RTMP protocol."""
-        self.to_screen(u'RTMP download detected')
+        self.to_screen('RTMP download detected')
 
-    def _extract_signature_function(self, video_id, player_url, slen):
+    def _signature_cache_id(self, example_sig):
+        """ Return a string representation of a signature """
+        return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
+
+    def _extract_signature_function(self, video_id, player_url, example_sig):
         id_m = re.match(
             r'.*-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.(?P<ext>[a-z]+)$',
             player_url)
@@ -354,63 +486,45 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         player_id = id_m.group('id')
 
         # Read from filesystem cache
-        func_id = '%s_%s_%d' % (player_type, player_id, slen)
+        func_id = '%s_%s_%s' % (
+            player_type, player_id, self._signature_cache_id(example_sig))
         assert os.path.basename(func_id) == func_id
-        cache_dir = get_cachedir(self._downloader.params)
 
-        cache_enabled = cache_dir is not None
-        if cache_enabled:
-            cache_fn = os.path.join(os.path.expanduser(cache_dir),
-                                    u'youtube-sigfuncs',
-                                    func_id + '.json')
-            try:
-                with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
-                    cache_spec = json.load(cachef)
-                return lambda s: u''.join(s[i] for i in cache_spec)
-            except IOError:
-                pass  # No cache available
+        cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
+        if cache_spec is not None:
+            return lambda s: ''.join(s[i] for i in cache_spec)
 
         if player_type == 'js':
             code = self._download_webpage(
                 player_url, video_id,
-                note=u'Downloading %s player %s' % (player_type, player_id),
-                errnote=u'Download of %s failed' % player_url)
+                note='Downloading %s player %s' % (player_type, player_id),
+                errnote='Download of %s failed' % player_url)
             res = self._parse_sig_js(code)
         elif player_type == 'swf':
             urlh = self._request_webpage(
                 player_url, video_id,
-                note=u'Downloading %s player %s' % (player_type, player_id),
-                errnote=u'Download of %s failed' % player_url)
+                note='Downloading %s player %s' % (player_type, player_id),
+                errnote='Download of %s failed' % player_url)
             code = urlh.read()
             res = self._parse_sig_swf(code)
         else:
             assert False, 'Invalid player type %r' % player_type
 
-        if cache_enabled:
-            try:
-                test_string = u''.join(map(compat_chr, range(slen)))
-                cache_res = res(test_string)
-                cache_spec = [ord(c) for c in cache_res]
-                try:
-                    os.makedirs(os.path.dirname(cache_fn))
-                except OSError as ose:
-                    if ose.errno != errno.EEXIST:
-                        raise
-                write_json_file(cache_spec, cache_fn)
-            except Exception:
-                tb = traceback.format_exc()
-                self._downloader.report_warning(
-                    u'Writing cache to %r failed: %s' % (cache_fn, tb))
+        if cache_spec is None:
+            test_string = ''.join(map(compat_chr, range(len(example_sig))))
+            cache_res = res(test_string)
+            cache_spec = [ord(c) for c in cache_res]
 
+        self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
         return res
 
-    def _print_sig_code(self, func, slen):
+    def _print_sig_code(self, func, example_sig):
         def gen_sig_code(idxs):
             def _genslice(start, end, step):
-                starts = u'' if start == 0 else str(start)
-                ends = (u':%d' % (end+step)) if end + step >= 0 else u':'
-                steps = u'' if step == 1 else (u':%d' % step)
-                return u's[%s%s%s]' % (starts, ends, steps)
+                starts = '' if start == 0 else str(start)
+                ends = (':%d' % (end + step)) if end + step >= 0 else ':'
+                steps = '' if step == 1 else (':%d' % step)
+                return 's[%s%s%s]' % (starts, ends, steps)
 
             step = None
             start = '(Never used)'  # Quelch pyflakes warnings - start will be
@@ -427,23 +541,26 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                     start = prev
                     continue
                 else:
-                    yield u's[%d]' % prev
+                    yield 's[%d]' % prev
             if step is None:
-                yield u's[%d]' % i
+                yield 's[%d]' % i
             else:
                 yield _genslice(start, i, step)
 
-        test_string = u''.join(map(compat_chr, range(slen)))
+        test_string = ''.join(map(compat_chr, range(len(example_sig))))
         cache_res = func(test_string)
         cache_spec = [ord(c) for c in cache_res]
-        expr_code = u' + '.join(gen_sig_code(cache_spec))
-        code = u'if len(s) == %d:\n    return %s\n' % (slen, expr_code)
-        self.to_screen(u'Extracted signature function:\n' + code)
+        expr_code = ' + '.join(gen_sig_code(cache_spec))
+        signature_id_tuple = '(%s)' % (
+            ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
+        code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
+                '    return %s\n') % (signature_id_tuple, expr_code)
+        self.to_screen('Extracted signature function:\n' + code)
 
     def _parse_sig_js(self, jscode):
         funcname = self._search_regex(
-            r'signature=([$a-zA-Z]+)', jscode,
-             u'Initial JS player signature function name')
+            r'\.sig\|\|([a-zA-Z0-9]+)\(', jscode,
+            'Initial JS player signature function name')
 
         jsi = JSInterpreter(jscode)
         initial_function = jsi.extract_function(funcname)
@@ -451,34 +568,34 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
 
     def _parse_sig_swf(self, file_contents):
         swfi = SWFInterpreter(file_contents)
-        TARGET_CLASSNAME = u'SignatureDecipher'
+        TARGET_CLASSNAME = 'SignatureDecipher'
         searched_class = swfi.extract_class(TARGET_CLASSNAME)
-        initial_function = swfi.extract_function(searched_class, u'decipher')
+        initial_function = swfi.extract_function(searched_class, 'decipher')
         return lambda s: initial_function([s])
 
     def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
         """Turn the encrypted s field into a working signature"""
 
         if player_url is None:
-            raise ExtractorError(u'Cannot decrypt signature without player_url')
+            raise ExtractorError('Cannot decrypt signature without player_url')
 
-        if player_url.startswith(u'//'):
-            player_url = u'https:' + player_url
+        if player_url.startswith('//'):
+            player_url = 'https:' + player_url
         try:
-            player_id = (player_url, len(s))
+            player_id = (player_url, self._signature_cache_id(s))
             if player_id not in self._player_cache:
                 func = self._extract_signature_function(
-                    video_id, player_url, len(s)
+                    video_id, player_url, s
                 )
                 self._player_cache[player_id] = func
             func = self._player_cache[player_id]
             if self._downloader.params.get('youtube_print_sig_code'):
-                self._print_sig_code(func, len(s))
+                self._print_sig_code(func, s)
             return func(s)
         except Exception as e:
             tb = traceback.format_exc()
             raise ExtractorError(
-                u'Automatic signature extraction failed: ' + tb, cause=e)
+                'Signature extraction failed: ' + tb, cause=e)
 
     def _get_available_subtitles(self, video_id, webpage):
         try:
@@ -486,23 +603,25 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
                 video_id, note=False)
         except ExtractorError as err:
-            self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
+            self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
             return {}
         lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
 
         sub_lang_list = {}
         for l in lang_list:
             lang = l[1]
+            if lang in sub_lang_list:
+                continue
             params = compat_urllib_parse.urlencode({
                 'lang': lang,
                 'v': video_id,
                 'fmt': self._downloader.params.get('subtitlesformat', 'srt'),
                 'name': unescapeHTML(l[0]).encode('utf-8'),
             })
-            url = u'https://www.youtube.com/api/timedtext?' + params
+            url = 'https://www.youtube.com/api/timedtext?' + params
             sub_lang_list[lang] = url
         if not sub_lang_list:
-            self._downloader.report_warning(u'video doesn\'t have subtitles')
+            self._downloader.report_warning('video doesn\'t have subtitles')
             return {}
         return sub_lang_list
 
@@ -510,17 +629,17 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         """We need the webpage for getting the captions url, pass it as an
            argument to speed up the process."""
         sub_format = self._downloader.params.get('subtitlesformat', 'srt')
-        self.to_screen(u'%s: Looking for automatic captions' % video_id)
+        self.to_screen('%s: Looking for automatic captions' % video_id)
         mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
-        err_msg = u'Couldn\'t find automatic captions for %s' % video_id
+        err_msg = 'Couldn\'t find automatic captions for %s' % video_id
         if mobj is None:
             self._downloader.report_warning(err_msg)
             return {}
         player_config = json.loads(mobj.group(1))
         try:
-            args = player_config[u'args']
-            caption_url = args[u'ttsurl']
-            timestamp = args[u'timestamp']
+            args = player_config['args']
+            caption_url = args['ttsurl']
+            timestamp = args['timestamp']
             # We get the available subtitles
             list_params = compat_urllib_parse.urlencode({
                 'type': 'list',
@@ -530,8 +649,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             list_url = caption_url + '&' + list_params
             caption_list = self._download_xml(list_url, video_id)
             original_lang_node = caption_list.find('track')
-            if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr' :
-                self._downloader.report_warning(u'Video doesn\'t have automatic captions')
+            if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr':
+                self._downloader.report_warning('Video doesn\'t have automatic captions')
                 return {}
             original_lang = original_lang_node.attrib['lang_code']
 
@@ -557,18 +676,19 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
     def extract_id(cls, url):
         mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
         if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
+            raise ExtractorError('Invalid URL: %s' % url)
         video_id = mobj.group(2)
         return video_id
 
     def _extract_from_m3u8(self, manifest_url, video_id):
         url_map = {}
+
         def _get_urls(_manifest):
             lines = _manifest.split('\n')
             urls = filter(lambda l: l and not l.startswith('#'),
-                            lines)
+                          lines)
             return urls
-        manifest = self._download_webpage(manifest_url, video_id, u'Downloading formats manifest')
+        manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
         formats_urls = _get_urls(manifest)
         for format_url in formats_urls:
             itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
@@ -577,12 +697,52 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
 
     def _extract_annotations(self, video_id):
         url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
-        return self._download_webpage(url, video_id, note=u'Searching for annotations.', errnote=u'Unable to download video annotations.')
+        return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
+
+    def _parse_dash_manifest(
+            self, video_id, dash_manifest_url, player_url, age_gate):
+        def decrypt_sig(mobj):
+            s = mobj.group(1)
+            dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
+            return '/signature/%s' % dec_s
+        dash_manifest_url = re.sub(r'/s/([\w\.]+)', decrypt_sig, dash_manifest_url)
+        dash_doc = self._download_xml(
+            dash_manifest_url, video_id,
+            note='Downloading DASH manifest',
+            errnote='Could not download DASH manifest')
+
+        formats = []
+        for r in dash_doc.findall('.//{urn:mpeg:DASH:schema:MPD:2011}Representation'):
+            url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
+            if url_el is None:
+                continue
+            format_id = r.attrib['id']
+            video_url = url_el.text
+            filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
+            f = {
+                'format_id': format_id,
+                'url': video_url,
+                'width': int_or_none(r.attrib.get('width')),
+                'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
+                'asr': int_or_none(r.attrib.get('audioSamplingRate')),
+                'filesize': filesize,
+                'fps': int_or_none(r.attrib.get('frameRate')),
+            }
+            try:
+                existing_format = next(
+                    fo for fo in formats
+                    if fo['format_id'] == format_id)
+            except StopIteration:
+                f.update(self._formats.get(format_id, {}))
+                formats.append(f)
+            else:
+                existing_format.update(f)
+        return formats
 
     def _real_extract(self, url):
         proto = (
-            u'http' if self._downloader.params.get('prefer_insecure', False)
-            else u'https')
+            'http' if self._downloader.params.get('prefer_insecure', False)
+            else 'https')
 
         # Extract original video URL from URL with redirection, like age verification, using next_url parameter
         mobj = re.search(self._NEXT_URL_RE, url)
@@ -591,7 +751,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         video_id = self.extract_id(url)
 
         # Get video webpage
-        url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
+        url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
         video_webpage = self._download_webpage(url, video_id)
 
         # Attempt to extract SWF player URL
@@ -602,9 +762,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             player_url = None
 
         # Get video info
-        self.report_video_info_webpage_download(video_id)
         if re.search(r'player-age-gate-content">', video_webpage) is not None:
-            self.report_age_confirmation()
             age_gate = True
             # We simulate the access to the video from www.youtube.com/v/{video_id}
             # this can be viewed without login into Youtube
@@ -612,32 +770,50 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                 'video_id': video_id,
                 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
                 'sts': self._search_regex(
-                    r'"sts"\s*:\s*(\d+)', video_webpage, 'sts'),
+                    r'"sts"\s*:\s*(\d+)', video_webpage, 'sts', default=''),
             })
             video_info_url = proto + '://www.youtube.com/get_video_info?' + data
-            video_info_webpage = self._download_webpage(video_info_url, video_id,
-                                    note=False,
-                                    errnote='unable to download video info webpage')
+            video_info_webpage = self._download_webpage(
+                video_info_url, video_id,
+                note='Refetching age-gated info webpage',
+                errnote='unable to download video info webpage')
             video_info = compat_parse_qs(video_info_webpage)
         else:
             age_gate = False
-            for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
-                video_info_url = (proto + '://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
-                        % (video_id, el_type))
-                video_info_webpage = self._download_webpage(video_info_url, video_id,
-                                        note=False,
-                                        errnote='unable to download video info webpage')
-                video_info = compat_parse_qs(video_info_webpage)
-                if 'token' in video_info:
-                    break
+            try:
+                # Try looking directly into the video webpage
+                mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage)
+                if not mobj:
+                    raise ValueError('Could not find ytplayer.config')  # caught below
+                json_code = uppercase_escape(mobj.group(1))
+                ytplayer_config = json.loads(json_code)
+                args = ytplayer_config['args']
+                # Convert to the same format returned by compat_parse_qs
+                video_info = dict((k, [v]) for k, v in args.items())
+                if 'url_encoded_fmt_stream_map' not in args:
+                    raise ValueError('No stream_map present')  # caught below
+            except ValueError:
+                # We fallback to the get_video_info pages (used by the embed page)
+                self.report_video_info_webpage_download(video_id)
+                for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
+                    video_info_url = (
+                        '%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
+                        % (proto, video_id, el_type))
+                    video_info_webpage = self._download_webpage(
+                        video_info_url,
+                        video_id, note=False,
+                        errnote='unable to download video info webpage')
+                    video_info = compat_parse_qs(video_info_webpage)
+                    if 'token' in video_info:
+                        break
         if 'token' not in video_info:
             if 'reason' in video_info:
                 raise ExtractorError(
-                    u'YouTube said: %s' % video_info['reason'][0],
+                    'YouTube said: %s' % video_info['reason'][0],
                     expected=True, video_id=video_id)
             else:
                 raise ExtractorError(
-                    u'"token" parameter not in video info for unknown reason',
+                    '"token" parameter not in video info for unknown reason',
                     video_id=video_id)
 
         if 'view_count' in video_info:
@@ -647,14 +823,14 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
 
         # Check for "rental" videos
         if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
-            raise ExtractorError(u'"rental" videos not supported')
+            raise ExtractorError('"rental" videos not supported')
 
         # Start extracting information
         self.report_information_extraction(video_id)
 
         # uploader
         if 'author' not in video_info:
-            raise ExtractorError(u'Unable to extract uploader name')
+            raise ExtractorError('Unable to extract uploader name')
         video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
 
         # uploader_id
@@ -663,14 +839,14 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         if mobj is not None:
             video_uploader_id = mobj.group(1)
         else:
-            self._downloader.report_warning(u'unable to extract uploader nickname')
+            self._downloader.report_warning('unable to extract uploader nickname')
 
         # title
         if 'title' in video_info:
             video_title = video_info['title'][0]
         else:
-            self._downloader.report_warning(u'Unable to extract video title')
-            video_title = u'_'
+            self._downloader.report_warning('Unable to extract video title')
+            video_title = '_'
 
         # thumbnail image
         # We try first to get a high quality image:
@@ -679,7 +855,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         if m_thumb is not None:
             video_thumbnail = m_thumb.group(1)
         elif 'thumbnail_url' not in video_info:
-            self._downloader.report_warning(u'unable to extract video thumbnail')
+            self._downloader.report_warning('unable to extract video thumbnail')
             video_thumbnail = None
         else:   # don't panic if we can't find it
             video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
@@ -695,7 +871,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
             upload_date = unified_strdate(upload_date)
 
-        m_cat_container = get_element_by_id("eow-category", video_webpage)
+        m_cat_container = self._search_regex(
+            r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
+            video_webpage, 'categories', default=None)
         if m_cat_container:
             category = self._html_search_regex(
                 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
@@ -722,17 +900,17 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             if fd_mobj:
                 video_description = unescapeHTML(fd_mobj.group(1))
             else:
-                video_description = u''
+                video_description = ''
 
-        def _extract_count(klass):
+        def _extract_count(count_name):
             count = self._search_regex(
-                r'class="%s">([\d,]+)</span>' % re.escape(klass),
-                video_webpage, klass, default=None)
+                r'id="watch-%s"[^>]*>.*?([\d,]+)\s*</span>' % re.escape(count_name),
+                video_webpage, count_name, default=None)
             if count is not None:
                 return int(count.replace(',', ''))
             return None
-        like_count = _extract_count(u'likes-count')
-        dislike_count = _extract_count(u'dislikes-count')
+        like_count = _extract_count('like')
+        dislike_count = _extract_count('dislike')
 
         # subtitles
         video_subtitles = self.extract_subtitles(video_id, video_webpage)
@@ -742,7 +920,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             return
 
         if 'length_seconds' not in video_info:
-            self._downloader.report_warning(u'unable to extract video duration')
+            self._downloader.report_warning('unable to extract video duration')
             video_duration = None
         else:
             video_duration = int(compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]))
@@ -750,33 +928,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         # annotations
         video_annotations = None
         if self._downloader.params.get('writeannotations', False):
-                video_annotations = self._extract_annotations(video_id)
-
-        # Decide which formats to download
-        try:
-            mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage)
-            if not mobj:
-                raise ValueError('Could not find vevo ID')
-            json_code = uppercase_escape(mobj.group(1))
-            ytplayer_config = json.loads(json_code)
-            args = ytplayer_config['args']
-            # Easy way to know if the 's' value is in url_encoded_fmt_stream_map
-            # this signatures are encrypted
-            if 'url_encoded_fmt_stream_map' not in args:
-                raise ValueError(u'No stream_map present')  # caught below
-            re_signature = re.compile(r'[&,]s=')
-            m_s = re_signature.search(args['url_encoded_fmt_stream_map'])
-            if m_s is not None:
-                self.to_screen(u'%s: Encrypted signatures detected.' % video_id)
-                video_info['url_encoded_fmt_stream_map'] = [args['url_encoded_fmt_stream_map']]
-            m_s = re_signature.search(args.get('adaptive_fmts', u''))
-            if m_s is not None:
-                if 'adaptive_fmts' in video_info:
-                    video_info['adaptive_fmts'][0] += ',' + args['adaptive_fmts']
-                else:
-                    video_info['adaptive_fmts'] = [args['adaptive_fmts']]
-        except ValueError:
-            pass
+            video_annotations = self._extract_annotations(video_id)
 
         def _map_to_format_list(urlmap):
             formats = []
@@ -799,151 +951,120 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                 'url': video_info['conn'][0],
                 'player_url': player_url,
             }]
-        elif len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len(video_info.get('adaptive_fmts', [])) >= 1:
-            encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts',[''])[0]
+        elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
+            encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
             if 'rtmpe%3Dyes' in encoded_url_map:
                 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
             url_map = {}
             for url_data_str in encoded_url_map.split(','):
                 url_data = compat_parse_qs(url_data_str)
-                if 'itag' in url_data and 'url' in url_data:
-                    url = url_data['url'][0]
-                    if 'sig' in url_data:
-                        url += '&signature=' + url_data['sig'][0]
-                    elif 's' in url_data:
-                        encrypted_sig = url_data['s'][0]
-
-                        if not age_gate:
-                            jsplayer_url_json = self._search_regex(
-                                r'"assets":.+?"js":\s*("[^"]+")',
-                                video_webpage, u'JS player URL')
-                            player_url = json.loads(jsplayer_url_json)
+                if 'itag' not in url_data or 'url' not in url_data:
+                    continue
+                format_id = url_data['itag'][0]
+                url = url_data['url'][0]
+
+                if 'sig' in url_data:
+                    url += '&signature=' + url_data['sig'][0]
+                elif 's' in url_data:
+                    encrypted_sig = url_data['s'][0]
+
+                    if not age_gate:
+                        jsplayer_url_json = self._search_regex(
+                            r'"assets":.+?"js":\s*("[^"]+")',
+                            video_webpage, 'JS player URL')
+                        player_url = json.loads(jsplayer_url_json)
+                    if player_url is None:
+                        player_url_json = self._search_regex(
+                            r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
+                            video_webpage, 'age gate player URL')
+                        player_url = json.loads(player_url_json)
+
+                    if self._downloader.params.get('verbose'):
                         if player_url is None:
-                            player_url_json = self._search_regex(
-                                r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
-                                video_webpage, u'age gate player URL')
-                            player_url = json.loads(player_url_json)
-
-                        if self._downloader.params.get('verbose'):
-                            if player_url is None:
-                                player_version = 'unknown'
-                                player_desc = 'unknown'
+                            player_version = 'unknown'
+                            player_desc = 'unknown'
+                        else:
+                            if player_url.endswith('swf'):
+                                player_version = self._search_regex(
+                                    r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
+                                    'flash player', fatal=False)
+                                player_desc = 'flash player %s' % player_version
                             else:
-                                if player_url.endswith('swf'):
-                                    player_version = self._search_regex(
-                                        r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
-                                        u'flash player', fatal=False)
-                                    player_desc = 'flash player %s' % player_version
-                                else:
-                                    player_version = self._search_regex(
-                                        r'html5player-([^/]+?)(?:/html5player)?\.js',
-                                        player_url,
-                                        'html5 player', fatal=False)
-                                    player_desc = u'html5 player %s' % player_version
-
-                            parts_sizes = u'.'.join(compat_str(len(part)) for part in encrypted_sig.split('.'))
-                            self.to_screen(u'encrypted signature length %d (%s), itag %s, %s' %
-                                (len(encrypted_sig), parts_sizes, url_data['itag'][0], player_desc))
-
-                        signature = self._decrypt_signature(
-                            encrypted_sig, video_id, player_url, age_gate)
-                        url += '&signature=' + signature
-                    if 'ratebypass' not in url:
-                        url += '&ratebypass=yes'
-                    url_map[url_data['itag'][0]] = url
+                                player_version = self._search_regex(
+                                    r'html5player-([^/]+?)(?:/html5player)?\.js',
+                                    player_url,
+                                    'html5 player', fatal=False)
+                                player_desc = 'html5 player %s' % player_version
+
+                        parts_sizes = self._signature_cache_id(encrypted_sig)
+                        self.to_screen('{%s} signature length %s, %s' %
+                                       (format_id, parts_sizes, player_desc))
+
+                    signature = self._decrypt_signature(
+                        encrypted_sig, video_id, player_url, age_gate)
+                    url += '&signature=' + signature
+                if 'ratebypass' not in url:
+                    url += '&ratebypass=yes'
+                url_map[format_id] = url
             formats = _map_to_format_list(url_map)
         elif video_info.get('hlsvp'):
             manifest_url = video_info['hlsvp'][0]
             url_map = self._extract_from_m3u8(manifest_url, video_id)
             formats = _map_to_format_list(url_map)
         else:
-            raise ExtractorError(u'no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
+            raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
 
         # Look for the DASH manifest
-        if (self._downloader.params.get('youtube_include_dash_manifest', False)):
-            try:
-                # The DASH manifest used needs to be the one from the original video_webpage.
-                # The one found in get_video_info seems to be using different signatures.
-                # However, in the case of an age restriction there won't be any embedded dashmpd in the video_webpage.
-                # Luckily, it seems, this case uses some kind of default signature (len == 86), so the
-                # combination of get_video_info and the _static_decrypt_signature() decryption fallback will work here.
-                if age_gate:
-                    dash_manifest_url = video_info.get('dashmpd')[0]
+        if self._downloader.params.get('youtube_include_dash_manifest', True):
+            dash_mpd = video_info.get('dashmpd')
+            if dash_mpd:
+                dash_manifest_url = dash_mpd[0]
+                try:
+                    dash_formats = self._parse_dash_manifest(
+                        video_id, dash_manifest_url, player_url, age_gate)
+                except (ExtractorError, KeyError) as e:
+                    self.report_warning(
+                        'Skipping DASH manifest: %r' % e, video_id)
                 else:
-                    dash_manifest_url = ytplayer_config['args']['dashmpd']
-                def decrypt_sig(mobj):
-                    s = mobj.group(1)
-                    dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
-                    return '/signature/%s' % dec_s
-                dash_manifest_url = re.sub(r'/s/([\w\.]+)', decrypt_sig, dash_manifest_url)
-                dash_doc = self._download_xml(
-                    dash_manifest_url, video_id,
-                    note=u'Downloading DASH manifest',
-                    errnote=u'Could not download DASH manifest')
-                for r in dash_doc.findall(u'.//{urn:mpeg:DASH:schema:MPD:2011}Representation'):
-                    url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
-                    if url_el is None:
-                        continue
-                    format_id = r.attrib['id']
-                    video_url = url_el.text
-                    filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
-                    f = {
-                        'format_id': format_id,
-                        'url': video_url,
-                        'width': int_or_none(r.attrib.get('width')),
-                        'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
-                        'asr': int_or_none(r.attrib.get('audioSamplingRate')),
-                        'filesize': filesize,
-                    }
-                    try:
-                        existing_format = next(
-                            fo for fo in formats
-                            if fo['format_id'] == format_id)
-                    except StopIteration:
-                        f.update(self._formats.get(format_id, {}))
-                        formats.append(f)
-                    else:
-                        existing_format.update(f)
-
-            except (ExtractorError, KeyError) as e:
-                self.report_warning(u'Skipping DASH manifest: %s' % e, video_id)
+                    formats.extend(dash_formats)
 
         self._sort_formats(formats)
 
         return {
-            'id':           video_id,
-            'uploader':     video_uploader,
-            'uploader_id':  video_uploader_id,
-            'upload_date':  upload_date,
-            'title':        video_title,
-            'thumbnail':    video_thumbnail,
-            'description':  video_description,
-            'categories':   video_categories,
-            'subtitles':    video_subtitles,
-            'duration':     video_duration,
-            'age_limit':    18 if age_gate else 0,
-            'annotations':  video_annotations,
+            'id': video_id,
+            'uploader': video_uploader,
+            'uploader_id': video_uploader_id,
+            'upload_date': upload_date,
+            'title': video_title,
+            'thumbnail': video_thumbnail,
+            'description': video_description,
+            'categories': video_categories,
+            'subtitles': video_subtitles,
+            'duration': video_duration,
+            'age_limit': 18 if age_gate else 0,
+            'annotations': video_annotations,
             'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
-            'view_count':   view_count,
+            'view_count': view_count,
             'like_count': like_count,
             'dislike_count': dislike_count,
-            'formats':      formats,
+            'formats': formats,
         }
 
+
 class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
-    IE_DESC = u'YouTube.com playlists'
+    IE_DESC = 'YouTube.com playlists'
     _VALID_URL = r"""(?x)(?:
                         (?:https?://)?
                         (?:\w+\.)?
                         youtube\.com/
                         (?:
-                           (?:course|view_play_list|my_playlists|artist|playlist|watch)
+                           (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
                            \? (?:.*?&)*? (?:p|a|list)=
                         |  p/
                         )
                         (
                             (?:PL|LL|EC|UU|FL|RD)?[0-9A-Za-z-_]{10,}
-                            # Top tracks, they can also include dots 
+                            # Top tracks, they can also include dots
                             |(?:MC)[\w\.]*
                         )
                         .*
@@ -953,27 +1074,87 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
     _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
     _MORE_PAGES_INDICATOR = r'data-link-type="next"'
     _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)'
-    IE_NAME = u'youtube:playlist'
+    IE_NAME = 'youtube:playlist'
+    _TESTS = [{
+        'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
+        'info_dict': {
+            'title': 'ytdl test PL',
+            'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
+        },
+        'playlist_count': 3,
+    }, {
+        'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
+        'info_dict': {
+            'title': 'YDL_Empty_List',
+        },
+        'playlist_count': 0,
+    }, {
+        'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
+        'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
+        'info_dict': {
+            'title': '29C3: Not my department',
+        },
+        'playlist_count': 95,
+    }, {
+        'note': 'issue #673',
+        'url': 'PLBB231211A4F62143',
+        'info_dict': {
+            'title': '[OLD]Team Fortress 2 (Class-based LP)',
+        },
+        'playlist_mincount': 26,
+    }, {
+        'note': 'Large playlist',
+        'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
+        'info_dict': {
+            'title': 'Uploads from Cauchemar',
+        },
+        'playlist_mincount': 799,
+    }, {
+        'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
+        'info_dict': {
+            'title': 'YDL_safe_search',
+        },
+        'playlist_count': 2,
+    }, {
+        'note': 'embedded',
+        'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
+        'playlist_count': 4,
+        'info_dict': {
+            'title': 'JODA15',
+        }
+    }, {
+        'note': 'Embedded SWF player',
+        'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
+        'playlist_count': 4,
+        'info_dict': {
+            'title': 'JODA7',
+        }
+    }]
 
     def _real_initialize(self):
         self._login()
 
     def _ids_to_results(self, ids):
-        return [self.url_result(vid_id, 'Youtube', video_id=vid_id)
-                       for vid_id in ids]
+        return [
+            self.url_result(vid_id, 'Youtube', video_id=vid_id)
+            for vid_id in ids]
 
     def _extract_mix(self, playlist_id):
         # The mixes are generated from a a single video
         # the id of the playlist is just 'RD' + video_id
         url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
-        webpage = self._download_webpage(url, playlist_id, u'Downloading Youtube mix')
+        webpage = self._download_webpage(
+            url, playlist_id, 'Downloading Youtube mix')
         search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
-        title_span = (search_title('playlist-title') or
-            search_title('title long-title') or search_title('title'))
+        title_span = (
+            search_title('playlist-title') or
+            search_title('title long-title') or
+            search_title('title'))
         title = clean_html(title_span)
-        video_re = r'''(?x)data-video-username=".*?".*?
-                       href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id)
-        ids = orderedSet(re.findall(video_re, webpage, flags=re.DOTALL))
+        ids = orderedSet(re.findall(
+            r'''(?xs)data-video-username=".*?".*?
+                       href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id),
+            webpage))
         url_results = self._ids_to_results(ids)
 
         return self.playlist_result(url_results, playlist_id, title)
@@ -982,7 +1163,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
         # Extract playlist id
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
+            raise ExtractorError('Invalid URL: %s' % url)
         playlist_id = mobj.group(1) or mobj.group(2)
 
         # Check if it's a video-specific URL
@@ -990,17 +1171,17 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
         if 'v' in query_dict:
             video_id = query_dict['v'][0]
             if self._downloader.params.get('noplaylist'):
-                self.to_screen(u'Downloading just video %s because of --no-playlist' % video_id)
+                self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
                 return self.url_result(video_id, 'Youtube', video_id=video_id)
             else:
-                self.to_screen(u'Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
+                self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
 
         if playlist_id.startswith('RD'):
             # Mixes require a custom extraction process
             return self._extract_mix(playlist_id)
         if playlist_id.startswith('TL'):
-            raise ExtractorError(u'For downloading YouTube.com top lists, use '
-                u'the "yttoplist" keyword, for example "youtube-dl \'yttoplist:music:Top Tracks\'"', expected=True)
+            raise ExtractorError('For downloading YouTube.com top lists, use '
+                                 'the "yttoplist" keyword, for example "youtube-dl \'yttoplist:music:Top Tracks\'"', expected=True)
 
         url = self._TEMPLATE_URL % playlist_id
         page = self._download_webpage(url, playlist_id)
@@ -1009,7 +1190,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
         # Check if the playlist exists or is private
         if re.search(r'<div class="yt-alert-message">[^<]*?(The|This) playlist (does not exist|is private)[^<]*?</div>', page) is not None:
             raise ExtractorError(
-                u'The playlist doesn\'t exist or is private, use --username or '
+                'The playlist doesn\'t exist or is private, use --username or '
                 '--netrc to access it.',
                 expected=True)
 
@@ -1036,36 +1217,47 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
 
         playlist_title = self._html_search_regex(
             r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
-            page, u'title')
+            page, 'title')
 
         url_results = self._ids_to_results(ids)
         return self.playlist_result(url_results, playlist_id, playlist_title)
 
 
 class YoutubeTopListIE(YoutubePlaylistIE):
-    IE_NAME = u'youtube:toplist'
-    IE_DESC = (u'YouTube.com top lists, "yttoplist:{channel}:{list title}"'
-        u' (Example: "yttoplist:music:Top Tracks")')
+    IE_NAME = 'youtube:toplist'
+    IE_DESC = ('YouTube.com top lists, "yttoplist:{channel}:{list title}"'
+               ' (Example: "yttoplist:music:Top Tracks")')
     _VALID_URL = r'yttoplist:(?P<chann>.*?):(?P<title>.*?)$'
+    _TESTS = [{
+        'url': 'yttoplist:music:Trending',
+        'playlist_mincount': 5,
+        'skip': 'Only works for logged-in users',
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         channel = mobj.group('chann')
         title = mobj.group('title')
         query = compat_urllib_parse.urlencode({'title': title})
-        playlist_re = 'href="([^"]+?%s.*?)"' % re.escape(query)
-        channel_page = self._download_webpage('https://www.youtube.com/%s' % channel, title)
-        link = self._html_search_regex(playlist_re, channel_page, u'list')
+        channel_page = self._download_webpage(
+            'https://www.youtube.com/%s' % channel, title)
+        link = self._html_search_regex(
+            r'''(?x)
+                <a\s+href="([^"]+)".*?>\s*
+                <span\s+class="branded-page-module-title-text">\s*
+                <span[^>]*>.*?%s.*?</span>''' % re.escape(query),
+            channel_page, 'list')
         url = compat_urlparse.urljoin('https://www.youtube.com/', link)
-        
+
         video_re = r'data-index="\d+".*?data-video-id="([0-9A-Za-z_-]{11})"'
         ids = []
         # sometimes the webpage doesn't contain the videos
         # retry until we get them
         for i in itertools.count(0):
-            msg = u'Downloading Youtube mix'
+            msg = 'Downloading Youtube mix'
             if i > 0:
                 msg += ', retry #%d' % i
+
             webpage = self._download_webpage(url, title, msg)
             ids = orderedSet(re.findall(video_re, webpage))
             if ids:
@@ -1075,11 +1267,16 @@ class YoutubeTopListIE(YoutubePlaylistIE):
 
 
 class YoutubeChannelIE(InfoExtractor):
-    IE_DESC = u'YouTube.com channels'
-    _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)"
+    IE_DESC = 'YouTube.com channels'
+    _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
     _MORE_PAGES_INDICATOR = 'yt-uix-load-more'
     _MORE_PAGES_URL = 'https://www.youtube.com/c4_browse_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s'
-    IE_NAME = u'youtube:channel'
+    IE_NAME = 'youtube:channel'
+    _TESTS = [{
+        'note': 'paginated channel',
+        'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
+        'playlist_mincount': 91,
+    }]
 
     def extract_videos_from_page(self, page):
         ids_in_page = []
@@ -1089,13 +1286,8 @@ class YoutubeChannelIE(InfoExtractor):
         return ids_in_page
 
     def _real_extract(self, url):
-        # Extract channel id
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
+        channel_id = self._match_id(url)
 
-        # Download channel page
-        channel_id = mobj.group(1)
         video_ids = []
         url = 'https://www.youtube.com/channel/%s/videos' % channel_id
         channel_page = self._download_webpage(url, channel_id)
@@ -1109,50 +1301,60 @@ class YoutubeChannelIE(InfoExtractor):
             # The videos are contained in a single page
             # the ajax pages can't be used, they are empty
             video_ids = self.extract_videos_from_page(channel_page)
-        else:
-            # Download all channel pages using the json-based channel_ajax query
+            entries = [
+                self.url_result(video_id, 'Youtube', video_id=video_id)
+                for video_id in video_ids]
+            return self.playlist_result(entries, channel_id)
+
+        def _entries():
             for pagenum in itertools.count(1):
                 url = self._MORE_PAGES_URL % (pagenum, channel_id)
                 page = self._download_json(
-                    url, channel_id, note=u'Downloading page #%s' % pagenum,
+                    url, channel_id, note='Downloading page #%s' % pagenum,
                     transform_source=uppercase_escape)
 
                 ids_in_page = self.extract_videos_from_page(page['content_html'])
-                video_ids.extend(ids_in_page)
-    
+                for video_id in ids_in_page:
+                    yield self.url_result(
+                        video_id, 'Youtube', video_id=video_id)
+
                 if self._MORE_PAGES_INDICATOR not in page['load_more_widget_html']:
                     break
 
-        self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
-
-        url_entries = [self.url_result(video_id, 'Youtube', video_id=video_id)
-                       for video_id in video_ids]
-        return self.playlist_result(url_entries, channel_id)
+        return self.playlist_result(_entries(), channel_id)
 
 
 class YoutubeUserIE(InfoExtractor):
-    IE_DESC = u'YouTube.com user videos (URL or "ytuser" keyword)'
-    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)([A-Za-z0-9_-]+)'
+    IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
+    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
     _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/users/%s'
     _GDATA_PAGE_SIZE = 50
     _GDATA_URL = 'https://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d&alt=json'
-    IE_NAME = u'youtube:user'
+    IE_NAME = 'youtube:user'
+
+    _TESTS = [{
+        'url': 'https://www.youtube.com/user/TheLinuxFoundation',
+        'playlist_mincount': 320,
+        'info_dict': {
+            'title': 'TheLinuxFoundation',
+        }
+    }, {
+        'url': 'ytuser:phihag',
+        'only_matching': True,
+    }]
 
     @classmethod
     def suitable(cls, url):
         # Don't return True if the url can be extracted with other youtube
         # extractor, the regex would is too permissive and it would match.
         other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
-        if any(ie.suitable(url) for ie in other_ies): return False
-        else: return super(YoutubeUserIE, cls).suitable(url)
+        if any(ie.suitable(url) for ie in other_ies):
+            return False
+        else:
+            return super(YoutubeUserIE, cls).suitable(url)
 
     def _real_extract(self, url):
-        # Extract username
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        username = mobj.group(1)
+        username = self._match_id(url)
 
         # Download video ids using YouTube Data API. Result size per
         # query is limited (currently to 50 videos) so we need to query
@@ -1165,13 +1367,13 @@ class YoutubeUserIE(InfoExtractor):
             gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
             page = self._download_webpage(
                 gdata_url, username,
-                u'Downloading video ids from %d to %d' % (
+                'Downloading video ids from %d to %d' % (
                     start_index, start_index + self._GDATA_PAGE_SIZE))
 
             try:
                 response = json.loads(page)
             except ValueError as err:
-                raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err))
+                raise ExtractorError('Invalid JSON in API response: ' + compat_str(err))
             if 'entry' not in response['feed']:
                 return
 
@@ -1187,16 +1389,16 @@ class YoutubeUserIE(InfoExtractor):
                     'id': video_id,
                     'title': title,
                 }
-        url_results = PagedList(download_page, self._GDATA_PAGE_SIZE)
+        url_results = OnDemandPagedList(download_page, self._GDATA_PAGE_SIZE)
 
         return self.playlist_result(url_results, playlist_title=username)
 
 
 class YoutubeSearchIE(SearchInfoExtractor):
-    IE_DESC = u'YouTube.com searches'
-    _API_URL = u'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
+    IE_DESC = 'YouTube.com searches'
+    _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
     _MAX_RESULTS = 1000
-    IE_NAME = u'youtube:search'
+    IE_NAME = 'youtube:search'
     _SEARCH_KEY = 'ytsearch'
 
     def _get_n_results(self, query, n):
@@ -1212,15 +1414,15 @@ class YoutubeSearchIE(SearchInfoExtractor):
                 compat_urllib_parse.quote_plus(query.encode('utf-8')),
                 (PAGE_SIZE * pagenum) + 1)
             data_json = self._download_webpage(
-                result_url, video_id=u'query "%s"' % query,
-                note=u'Downloading page %s' % (pagenum + 1),
-                errnote=u'Unable to download API page')
+                result_url, video_id='query "%s"' % query,
+                note='Downloading page %s' % (pagenum + 1),
+                errnote='Unable to download API page')
             data = json.loads(data_json)
             api_response = data['data']
 
             if 'items' not in api_response:
                 raise ExtractorError(
-                    u'[youtube] No video results', expected=True)
+                    '[youtube] No video results', expected=True)
 
             new_ids = list(video['id'] for video in api_response['items'])
             video_ids += new_ids
@@ -1239,13 +1441,20 @@ class YoutubeSearchDateIE(YoutubeSearchIE):
     IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
     _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc&orderby=published'
     _SEARCH_KEY = 'ytsearchdate'
-    IE_DESC = u'YouTube.com searches, newest videos first'
+    IE_DESC = 'YouTube.com searches, newest videos first'
 
 
 class YoutubeSearchURLIE(InfoExtractor):
-    IE_DESC = u'YouTube.com search URLs'
-    IE_NAME = u'youtube:search_url'
+    IE_DESC = 'YouTube.com search URLs'
+    IE_NAME = 'youtube:search_url'
     _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
+    _TESTS = [{
+        'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
+        'playlist_mincount': 5,
+        'info_dict': {
+            'title': 'youtube-dl test video',
+        }
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -1253,7 +1462,7 @@ class YoutubeSearchURLIE(InfoExtractor):
 
         webpage = self._download_webpage(url, query)
         result_code = self._search_regex(
-            r'(?s)<ol class="item-section"(.*?)</ol>', webpage, u'result HTML')
+            r'(?s)<ol class="item-section"(.*?)</ol>', webpage, 'result HTML')
 
         part_codes = re.findall(
             r'(?s)<h3 class="yt-lockup-title">(.*?)</h3>', result_code)
@@ -1279,18 +1488,39 @@ class YoutubeSearchURLIE(InfoExtractor):
 
 
 class YoutubeShowIE(InfoExtractor):
-    IE_DESC = u'YouTube.com (multi-season) shows'
-    _VALID_URL = r'https?://www\.youtube\.com/show/(.*)'
-    IE_NAME = u'youtube:show'
+    IE_DESC = 'YouTube.com (multi-season) shows'
+    _VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
+    IE_NAME = 'youtube:show'
+    _TESTS = [{
+        'url': 'http://www.youtube.com/show/airdisasters',
+        'playlist_mincount': 3,
+        'info_dict': {
+            'id': 'airdisasters',
+            'title': 'Air Disasters',
+        }
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        show_name = mobj.group(1)
-        webpage = self._download_webpage(url, show_name, u'Downloading show webpage')
+        playlist_id = mobj.group('id')
+        webpage = self._download_webpage(
+            url, playlist_id, 'Downloading show webpage')
         # There's one playlist for each season of the show
         m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage))
-        self.to_screen(u'%s: Found %s seasons' % (show_name, len(m_seasons)))
-        return [self.url_result('https://www.youtube.com' + season.group(1), 'YoutubePlaylist') for season in m_seasons]
+        self.to_screen('%s: Found %s seasons' % (playlist_id, len(m_seasons)))
+        entries = [
+            self.url_result(
+                'https://www.youtube.com' + season.group(1), 'YoutubePlaylist')
+            for season in m_seasons
+        ]
+        title = self._og_search_title(webpage, fatal=False)
+
+        return {
+            '_type': 'playlist',
+            'id': playlist_id,
+            'title': title,
+            'entries': entries,
+        }
 
 
 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
@@ -1312,7 +1542,7 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
 
     @property
     def IE_NAME(self):
-        return u'youtube:%s' % self._FEED_NAME
+        return 'youtube:%s' % self._FEED_NAME
 
     def _real_initialize(self):
         self._login()
@@ -1322,9 +1552,10 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
         paging = 0
         for i in itertools.count(1):
             info = self._download_json(self._FEED_TEMPLATE % paging,
-                                          u'%s feed' % self._FEED_NAME,
-                                          u'Downloading page %s' % i)
+                                       '%s feed' % self._FEED_NAME,
+                                       'Downloading page %s' % i)
             feed_html = info.get('feed_html') or info.get('content_html')
+            load_more_widget_html = info.get('load_more_widget_html') or feed_html
             m_ids = re.finditer(r'"/watch\?v=(.*?)["&]', feed_html)
             ids = orderedSet(m.group(1) for m in m_ids)
             feed_entries.extend(
@@ -1332,50 +1563,86 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
                 for video_id in ids)
             mobj = re.search(
                 r'data-uix-load-more-href="/?[^"]+paging=(?P<paging>\d+)',
-                feed_html)
+                load_more_widget_html)
             if mobj is None:
                 break
             paging = mobj.group('paging')
         return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE)
 
-class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
-    IE_DESC = u'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
-    _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
-    _FEED_NAME = 'subscriptions'
-    _PLAYLIST_TITLE = u'Youtube Subscriptions'
 
 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
-    IE_DESC = u'YouTube.com recommended videos, "ytrec" keyword (requires authentication)'
+    IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
     _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
     _FEED_NAME = 'recommended'
-    _PLAYLIST_TITLE = u'Youtube Recommended videos'
+    _PLAYLIST_TITLE = 'Youtube Recommended videos'
+
 
 class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor):
-    IE_DESC = u'Youtube watch later list, "ytwatchlater" keyword (requires authentication)'
+    IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
     _VALID_URL = r'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater'
     _FEED_NAME = 'watch_later'
-    _PLAYLIST_TITLE = u'Youtube Watch Later'
+    _PLAYLIST_TITLE = 'Youtube Watch Later'
     _PERSONAL_FEED = True
 
+
 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
-    IE_DESC = u'Youtube watch history, "ythistory" keyword (requires authentication)'
-    _VALID_URL = u'https?://www\.youtube\.com/feed/history|:ythistory'
+    IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
+    _VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
     _FEED_NAME = 'history'
     _PERSONAL_FEED = True
-    _PLAYLIST_TITLE = u'Youtube Watch History'
+    _PLAYLIST_TITLE = 'Youtube Watch History'
+
 
 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
-    IE_NAME = u'youtube:favorites'
-    IE_DESC = u'YouTube.com favourite videos, "ytfav" keyword (requires authentication)'
+    IE_NAME = 'youtube:favorites'
+    IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
     _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
     _LOGIN_REQUIRED = True
 
     def _real_extract(self, url):
         webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
-        playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, u'favourites playlist id')
+        playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
         return self.url_result(playlist_id, 'YoutubePlaylist')
 
 
+class YoutubeSubscriptionsIE(YoutubePlaylistIE):
+    IE_NAME = 'youtube:subscriptions'
+    IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
+    _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
+    _TESTS = []
+
+    def _real_extract(self, url):
+        title = 'Youtube Subscriptions'
+        page = self._download_webpage('https://www.youtube.com/feed/subscriptions', title)
+
+        # The extraction process is the same as for playlists, but the regex
+        # for the video ids doesn't contain an index
+        ids = []
+        more_widget_html = content_html = page
+
+        for page_num in itertools.count(1):
+            matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
+            new_ids = orderedSet(matches)
+            ids.extend(new_ids)
+
+            mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
+            if not mobj:
+                break
+
+            more = self._download_json(
+                'https://youtube.com/%s' % mobj.group('more'), title,
+                'Downloading page #%s' % page_num,
+                transform_source=uppercase_escape)
+            content_html = more['content_html']
+            more_widget_html = more['load_more_widget_html']
+
+        return {
+            '_type': 'playlist',
+            'title': title,
+            'entries': self._ids_to_results(ids),
+        }
+
+
 class YoutubeTruncatedURLIE(InfoExtractor):
     IE_NAME = 'youtube:truncated_url'
     IE_DESC = False  # Do not list
@@ -1397,9 +1664,9 @@ class YoutubeTruncatedURLIE(InfoExtractor):
 
     def _real_extract(self, url):
         raise ExtractorError(
-            u'Did you forget to quote the URL? Remember that & is a meta '
-            u'character in most shells, so you want to put the URL in quotes, '
-            u'like  youtube-dl '
-            u'"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
-            u' or simply  youtube-dl BaW_jenozKc  .',
+            'Did you forget to quote the URL? Remember that & is a meta '
+            'character in most shells, so you want to put the URL in quotes, '
+            'like  youtube-dl '
+            '"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
+            ' or simply  youtube-dl BaW_jenozKc  .',
             expected=True)
index 3b1ac4e9f5246e268e0c0b49d64249196270e9d4..74c76a9a0446482c303f3b4182f3ef2bd4942c0d 100644 (file)
@@ -1,17 +1,95 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import functools
 import re
 
 from .common import InfoExtractor
 from ..utils import (
     int_or_none,
     unified_strdate,
+    OnDemandPagedList,
 )
 
 
+def extract_from_xml_url(ie, video_id, xml_url):
+    doc = ie._download_xml(
+        xml_url, video_id,
+        note='Downloading video info',
+        errnote='Failed to download video info')
+
+    title = doc.find('.//information/title').text
+    description = doc.find('.//information/detail').text
+    duration = int(doc.find('.//details/lengthSec').text)
+    uploader_node = doc.find('.//details/originChannelTitle')
+    uploader = None if uploader_node is None else uploader_node.text
+    uploader_id_node = doc.find('.//details/originChannelId')
+    uploader_id = None if uploader_id_node is None else uploader_id_node.text
+    upload_date = unified_strdate(doc.find('.//details/airtime').text)
+
+    def xml_to_format(fnode):
+        video_url = fnode.find('url').text
+        is_available = 'http://www.metafilegenerator' not in video_url
+
+        format_id = fnode.attrib['basetype']
+        format_m = re.match(r'''(?x)
+            (?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_
+            (?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+)
+        ''', format_id)
+
+        ext = format_m.group('container')
+        proto = format_m.group('proto').lower()
+
+        quality = fnode.find('./quality').text
+        abr = int(fnode.find('./audioBitrate').text) // 1000
+        vbr_node = fnode.find('./videoBitrate')
+        vbr = None if vbr_node is None else int(vbr_node.text) // 1000
+
+        width_node = fnode.find('./width')
+        width = None if width_node is None else int_or_none(width_node.text)
+        height_node = fnode.find('./height')
+        height = None if height_node is None else int_or_none(height_node.text)
+
+        format_note = ''
+        if not format_note:
+            format_note = None
+
+        return {
+            'format_id': format_id + '-' + quality,
+            'url': video_url,
+            'ext': ext,
+            'acodec': format_m.group('acodec'),
+            'vcodec': format_m.group('vcodec'),
+            'abr': abr,
+            'vbr': vbr,
+            'width': width,
+            'height': height,
+            'filesize': int_or_none(fnode.find('./filesize').text),
+            'format_note': format_note,
+            'protocol': proto,
+            '_available': is_available,
+        }
+
+    format_nodes = doc.findall('.//formitaeten/formitaet')
+    formats = list(filter(
+        lambda f: f['_available'],
+        map(xml_to_format, format_nodes)))
+    ie._sort_formats(formats)
+
+    return {
+        'id': video_id,
+        'title': title,
+        'description': description,
+        'duration': duration,
+        'uploader': uploader,
+        'uploader_id': uploader_id,
+        'upload_date': upload_date,
+        'formats': formats,
+    }
+
+
 class ZDFIE(InfoExtractor):
-    _VALID_URL = r'^https?://www\.zdf\.de/ZDFmediathek(?P<hash>#)?/(.*beitrag/(?:video/)?)(?P<video_id>[0-9]+)(?:/[^/?]+)?(?:\?.*)?'
+    _VALID_URL = r'(?:zdf:|zdf:video:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/(.*beitrag/(?:video/)?))(?P<id>[0-9]+)(?:/[^/?]+)?(?:\?.*)?'
 
     _TEST = {
         'url': 'http://www.zdf.de/ZDFmediathek/beitrag/video/2037704/ZDFspezial---Ende-des-Machtpokers--?bc=sts;stt',
@@ -29,81 +107,53 @@ class ZDFIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('video_id')
-
+        video_id = self._match_id(url)
         xml_url = 'http://www.zdf.de/ZDFmediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
+        return extract_from_xml_url(self, video_id, xml_url)
+
+
+class ZDFChannelIE(InfoExtractor):
+    _VALID_URL = r'(?:zdf:topic:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/.*kanaluebersicht/)(?P<id>[0-9]+)'
+    _TEST = {
+        'url': 'http://www.zdf.de/ZDFmediathek#/kanaluebersicht/1586442/sendung/Titanic',
+        'info_dict': {
+            'id': '1586442',
+        },
+        'playlist_count': 4,
+    }
+    _PAGE_SIZE = 50
+
+    def _fetch_page(self, channel_id, page):
+        offset = page * self._PAGE_SIZE
+        xml_url = (
+            'http://www.zdf.de/ZDFmediathek/xmlservice/web/aktuellste?ak=web&offset=%d&maxLength=%d&id=%s'
+            % (offset, self._PAGE_SIZE, channel_id))
         doc = self._download_xml(
-            xml_url, video_id,
-            note='Downloading video info',
-            errnote='Failed to download video info')
+            xml_url, channel_id,
+            note='Downloading channel info',
+            errnote='Failed to download channel info')
 
         title = doc.find('.//information/title').text
         description = doc.find('.//information/detail').text
-        duration = int(doc.find('.//details/lengthSec').text)
-        uploader_node = doc.find('.//details/originChannelTitle')
-        uploader = None if uploader_node is None else uploader_node.text
-        uploader_id_node = doc.find('.//details/originChannelId')
-        uploader_id = None if uploader_id_node is None else uploader_id_node.text
-        upload_date = unified_strdate(doc.find('.//details/airtime').text)
-
-        def xml_to_format(fnode):
-            video_url = fnode.find('url').text
-            is_available = 'http://www.metafilegenerator' not in video_url
-
-            format_id = fnode.attrib['basetype']
-            format_m = re.match(r'''(?x)
-                (?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_
-                (?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+)
-            ''', format_id)
-
-            ext = format_m.group('container')
-            proto = format_m.group('proto').lower()
-
-            quality = fnode.find('./quality').text
-            abr = int(fnode.find('./audioBitrate').text) // 1000
-            vbr_node = fnode.find('./videoBitrate')
-            vbr = None if vbr_node is None else int(vbr_node.text) // 1000
-
-            width_node = fnode.find('./width')
-            width = None if width_node is None else int_or_none(width_node.text)
-            height_node = fnode.find('./height')
-            height = None if height_node is None else int_or_none(height_node.text)
-
-            format_note = ''
-            if not format_note:
-                format_note = None
-
-            return {
-                'format_id': format_id + '-' + quality,
-                'url': video_url,
-                'ext': ext,
-                'acodec': format_m.group('acodec'),
-                'vcodec': format_m.group('vcodec'),
-                'abr': abr,
-                'vbr': vbr,
-                'width': width,
-                'height': height,
-                'filesize': int_or_none(fnode.find('./filesize').text),
-                'format_note': format_note,
-                'protocol': proto,
-                '_available': is_available,
+        for asset in doc.findall('.//teasers/teaser'):
+            a_type = asset.find('./type').text
+            a_id = asset.find('./details/assetId').text
+            if a_type not in ('video', 'topic'):
+                continue
+            yield {
+                '_type': 'url',
+                'playlist_title': title,
+                'playlist_description': description,
+                'url': 'zdf:%s:%s' % (a_type, a_id),
             }
 
-        format_nodes = doc.findall('.//formitaeten/formitaet')
-        formats = list(filter(
-            lambda f: f['_available'],
-            map(xml_to_format, format_nodes)))
-
-        self._sort_formats(formats)
+    def _real_extract(self, url):
+        channel_id = self._match_id(url)
+        entries = OnDemandPagedList(
+            functools.partial(self._fetch_page, channel_id), self._PAGE_SIZE)
 
         return {
-            'id': video_id,
-            'title': title,
-            'description': description,
-            'duration': duration,
-            'uploader': uploader,
-            'uploader_id': uploader_id,
-            'upload_date': upload_date,
-            'formats': formats,
-        }
\ No newline at end of file
+            '_type': 'playlist',
+            'id': channel_id,
+            'entries': entries,
+        }
diff --git a/youtube_dl/extractor/zingmp3.py b/youtube_dl/extractor/zingmp3.py
new file mode 100644 (file)
index 0000000..1afbe68
--- /dev/null
@@ -0,0 +1,107 @@
+# coding=utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class ZingMp3BaseInfoExtractor(InfoExtractor):
+
+    @staticmethod
+    def _extract_item(item):
+        title = item.find('./title').text.strip()
+        source = item.find('./source').text
+        extension = item.attrib['type']
+        thumbnail = item.find('./backimage').text
+
+        return {
+            'title': title,
+            'url': source,
+            'ext': extension,
+            'thumbnail': thumbnail,
+        }
+
+    def _extract_player_xml(self, player_xml_url, id, playlist_title=None):
+        player_xml = self._download_xml(player_xml_url, id, 'Downloading Player XML')
+        items = player_xml.findall('./item')
+
+        if len(items) == 1:
+            # one single song
+            data = self._extract_item(items[0])
+            data['id'] = id
+
+            return data
+        else:
+            # playlist of songs
+            entries = []
+
+            for i, item in enumerate(items, 1):
+                entry = self._extract_item(item)
+                entry['id'] = '%s-%d' % (id, i)
+                entries.append(entry)
+
+            return {
+                '_type': 'playlist',
+                'id': id,
+                'title': playlist_title,
+                'entries': entries,
+            }
+
+
+class ZingMp3SongIE(ZingMp3BaseInfoExtractor):
+    _VALID_URL = r'https?://mp3\.zing\.vn/bai-hat/(?P<slug>[^/]+)/(?P<song_id>\w+)\.html'
+    _TESTS = [{
+        'url': 'http://mp3.zing.vn/bai-hat/Xa-Mai-Xa-Bao-Thy/ZWZB9WAB.html',
+        'md5': 'ead7ae13693b3205cbc89536a077daed',
+        'info_dict': {
+            'id': 'ZWZB9WAB',
+            'title': 'Xa Mãi Xa',
+            'ext': 'mp3',
+            'thumbnail': 're:^https?://.*\.jpg$',
+        },
+    }]
+    IE_NAME = 'zingmp3:song'
+    IE_DESC = 'mp3.zing.vn songs'
+
+    def _real_extract(self, url):
+        matched = re.match(self._VALID_URL, url)
+        slug = matched.group('slug')
+        song_id = matched.group('song_id')
+
+        webpage = self._download_webpage(
+            'http://mp3.zing.vn/bai-hat/%s/%s.html' % (slug, song_id), song_id)
+
+        player_xml_url = self._search_regex(
+            r'&amp;xmlURL=(?P<xml_url>[^&]+)&', webpage, 'player xml url')
+
+        return self._extract_player_xml(player_xml_url, song_id)
+
+
+class ZingMp3AlbumIE(ZingMp3BaseInfoExtractor):
+    _VALID_URL = r'https?://mp3\.zing\.vn/album/(?P<slug>[^/]+)/(?P<album_id>\w+)\.html'
+    _TESTS = [{
+        'url': 'http://mp3.zing.vn/album/Lau-Dai-Tinh-Ai-Bang-Kieu-Minh-Tuyet/ZWZBWDAF.html',
+        'info_dict': {
+            '_type': 'playlist',
+            'id': 'ZWZBWDAF',
+            'title': 'Lâu Đài Tình Ái - Bằng Kiều ft. Minh Tuyết | Album 320 lossless',
+        },
+        'playlist_count': 10,
+    }]
+    IE_NAME = 'zingmp3:album'
+    IE_DESC = 'mp3.zing.vn albums'
+
+    def _real_extract(self, url):
+        matched = re.match(self._VALID_URL, url)
+        slug = matched.group('slug')
+        album_id = matched.group('album_id')
+
+        webpage = self._download_webpage(
+            'http://mp3.zing.vn/album/%s/%s.html' % (slug, album_id), album_id)
+        player_xml_url = self._search_regex(
+            r'&amp;xmlURL=(?P<xml_url>[^&]+)&', webpage, 'player xml url')
+
+        return self._extract_player_xml(
+            player_xml_url, album_id,
+            playlist_title=self._og_search_title(webpage))
index c40cd376d120f2063bb4cf6958ca4cf701db1f00..b4617fbad0fc40323a129ce1218f9f97590c89bb 100644 (file)
@@ -61,7 +61,7 @@ class JSInterpreter(object):
             pass
 
         m = re.match(
-            r'^(?P<var>[a-zA-Z0-9_]+)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$',
+            r'^(?P<var>[$a-zA-Z0-9_]+)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$',
             expr)
         if m:
             variable = m.group('var')
diff --git a/youtube_dl/options.py b/youtube_dl/options.py
new file mode 100644 (file)
index 0000000..21c4521
--- /dev/null
@@ -0,0 +1,668 @@
+from __future__ import unicode_literals
+
+import os.path
+import optparse
+import shlex
+import sys
+
+from .compat import (
+    compat_expanduser,
+    compat_getenv,
+    compat_kwargs,
+)
+from .utils import (
+    get_term_width,
+    write_string,
+)
+from .version import __version__
+
+
+def parseOpts(overrideArguments=None):
+    def _readOptions(filename_bytes, default=[]):
+        try:
+            optionf = open(filename_bytes)
+        except IOError:
+            return default  # silently skip if file is not present
+        try:
+            res = []
+            for l in optionf:
+                res += shlex.split(l, comments=True)
+        finally:
+            optionf.close()
+        return res
+
+    def _readUserConf():
+        xdg_config_home = compat_getenv('XDG_CONFIG_HOME')
+        if xdg_config_home:
+            userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
+            if not os.path.isfile(userConfFile):
+                userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
+        else:
+            userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl', 'config')
+            if not os.path.isfile(userConfFile):
+                userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl.conf')
+        userConf = _readOptions(userConfFile, None)
+
+        if userConf is None:
+            appdata_dir = compat_getenv('appdata')
+            if appdata_dir:
+                userConf = _readOptions(
+                    os.path.join(appdata_dir, 'youtube-dl', 'config'),
+                    default=None)
+                if userConf is None:
+                    userConf = _readOptions(
+                        os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
+                        default=None)
+
+        if userConf is None:
+            userConf = _readOptions(
+                os.path.join(compat_expanduser('~'), 'youtube-dl.conf'),
+                default=None)
+        if userConf is None:
+            userConf = _readOptions(
+                os.path.join(compat_expanduser('~'), 'youtube-dl.conf.txt'),
+                default=None)
+
+        if userConf is None:
+            userConf = []
+
+        return userConf
+
+    def _format_option_string(option):
+        ''' ('-o', '--option') -> -o, --format METAVAR'''
+
+        opts = []
+
+        if option._short_opts:
+            opts.append(option._short_opts[0])
+        if option._long_opts:
+            opts.append(option._long_opts[0])
+        if len(opts) > 1:
+            opts.insert(1, ', ')
+
+        if option.takes_value():
+            opts.append(' %s' % option.metavar)
+
+        return "".join(opts)
+
+    def _comma_separated_values_options_callback(option, opt_str, value, parser):
+        setattr(parser.values, option.dest, value.split(','))
+
+    def _hide_login_info(opts):
+        opts = list(opts)
+        for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
+            try:
+                i = opts.index(private_opt)
+                opts[i + 1] = 'PRIVATE'
+            except ValueError:
+                pass
+        return opts
+
+    # No need to wrap help messages if we're on a wide console
+    columns = get_term_width()
+    max_width = columns if columns else 80
+    max_help_position = 80
+
+    fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
+    fmt.format_option_strings = _format_option_string
+
+    kw = {
+        'version': __version__,
+        'formatter': fmt,
+        'usage': '%prog [options] url [url...]',
+        'conflict_handler': 'resolve',
+    }
+
+    parser = optparse.OptionParser(**compat_kwargs(kw))
+
+    general = optparse.OptionGroup(parser, 'General Options')
+    general.add_option(
+        '-h', '--help',
+        action='help',
+        help='print this help text and exit')
+    general.add_option(
+        '-v', '--version',
+        action='version',
+        help='print program version and exit')
+    general.add_option(
+        '-U', '--update',
+        action='store_true', dest='update_self',
+        help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
+    general.add_option(
+        '-i', '--ignore-errors',
+        action='store_true', dest='ignoreerrors', default=False,
+        help='continue on download errors, for example to skip unavailable videos in a playlist')
+    general.add_option(
+        '--abort-on-error',
+        action='store_false', dest='ignoreerrors',
+        help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
+    general.add_option(
+        '--dump-user-agent',
+        action='store_true', dest='dump_user_agent', default=False,
+        help='display the current browser identification')
+    general.add_option(
+        '--list-extractors',
+        action='store_true', dest='list_extractors', default=False,
+        help='List all supported extractors and the URLs they would handle')
+    general.add_option(
+        '--extractor-descriptions',
+        action='store_true', dest='list_extractor_descriptions', default=False,
+        help='Output descriptions of all supported extractors')
+    general.add_option(
+        '--proxy', dest='proxy',
+        default=None, metavar='URL',
+        help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
+    general.add_option(
+        '--socket-timeout',
+        dest='socket_timeout', type=float, default=None,
+        help='Time to wait before giving up, in seconds')
+    general.add_option(
+        '--default-search',
+        dest='default_search', metavar='PREFIX',
+        help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for  youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.')
+    general.add_option(
+        '--ignore-config',
+        action='store_true',
+        help='Do not read configuration files. '
+        'When given in the global configuration file /etc/youtube-dl.conf: '
+        'Do not read the user configuration in ~/.config/youtube-dl/config '
+        '(%APPDATA%/youtube-dl/config.txt on Windows)')
+    general.add_option(
+        '--flat-playlist',
+        action='store_const', dest='extract_flat', const='in_playlist',
+        default=False,
+        help='Do not extract the videos of a playlist, only list them.')
+
+    selection = optparse.OptionGroup(parser, 'Video Selection')
+    selection.add_option(
+        '--playlist-start',
+        dest='playliststart', metavar='NUMBER', default=1, type=int,
+        help='playlist video to start at (default is %default)')
+    selection.add_option(
+        '--playlist-end',
+        dest='playlistend', metavar='NUMBER', default=None, type=int,
+        help='playlist video to end at (default is last)')
+    selection.add_option(
+        '--match-title',
+        dest='matchtitle', metavar='REGEX',
+        help='download only matching titles (regex or caseless sub-string)')
+    selection.add_option(
+        '--reject-title',
+        dest='rejecttitle', metavar='REGEX',
+        help='skip download for matching titles (regex or caseless sub-string)')
+    selection.add_option(
+        '--max-downloads',
+        dest='max_downloads', metavar='NUMBER', type=int, default=None,
+        help='Abort after downloading NUMBER files')
+    selection.add_option(
+        '--min-filesize',
+        metavar='SIZE', dest='min_filesize', default=None,
+        help='Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)')
+    selection.add_option(
+        '--max-filesize',
+        metavar='SIZE', dest='max_filesize', default=None,
+        help='Do not download any videos larger than SIZE (e.g. 50k or 44.6m)')
+    selection.add_option(
+        '--date',
+        metavar='DATE', dest='date', default=None,
+        help='download only videos uploaded in this date')
+    selection.add_option(
+        '--datebefore',
+        metavar='DATE', dest='datebefore', default=None,
+        help='download only videos uploaded on or before this date (i.e. inclusive)')
+    selection.add_option(
+        '--dateafter',
+        metavar='DATE', dest='dateafter', default=None,
+        help='download only videos uploaded on or after this date (i.e. inclusive)')
+    selection.add_option(
+        '--min-views',
+        metavar='COUNT', dest='min_views', default=None, type=int,
+        help='Do not download any videos with less than COUNT views',)
+    selection.add_option(
+        '--max-views',
+        metavar='COUNT', dest='max_views', default=None, type=int,
+        help='Do not download any videos with more than COUNT views')
+    selection.add_option(
+        '--no-playlist',
+        action='store_true', dest='noplaylist', default=False,
+        help='If the URL refers to a video and a playlist, download only the video.')
+    selection.add_option(
+        '--age-limit',
+        metavar='YEARS', dest='age_limit', default=None, type=int,
+        help='download only videos suitable for the given age')
+    selection.add_option(
+        '--download-archive', metavar='FILE',
+        dest='download_archive',
+        help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
+    selection.add_option(
+        '--include-ads',
+        dest='include_ads', action='store_true',
+        help='Download advertisements as well (experimental)')
+
+    authentication = optparse.OptionGroup(parser, 'Authentication Options')
+    authentication.add_option(
+        '-u', '--username',
+        dest='username', metavar='USERNAME',
+        help='login with this account ID')
+    authentication.add_option(
+        '-p', '--password',
+        dest='password', metavar='PASSWORD',
+        help='account password')
+    authentication.add_option(
+        '-2', '--twofactor',
+        dest='twofactor', metavar='TWOFACTOR',
+        help='two-factor auth code')
+    authentication.add_option(
+        '-n', '--netrc',
+        action='store_true', dest='usenetrc', default=False,
+        help='use .netrc authentication data')
+    authentication.add_option(
+        '--video-password',
+        dest='videopassword', metavar='PASSWORD',
+        help='video password (vimeo, smotri)')
+
+    video_format = optparse.OptionGroup(parser, 'Video Format Options')
+    video_format.add_option(
+        '-f', '--format',
+        action='store', dest='format', metavar='FORMAT', default=None,
+        help=(
+            'video format code, specify the order of preference using'
+            ' slashes: -f 22/17/18 .  -f mp4 , -f m4a and  -f flv  are also'
+            ' supported. You can also use the special names "best",'
+            ' "bestvideo", "bestaudio", "worst", "worstvideo" and'
+            ' "worstaudio". By default, youtube-dl will pick the best quality.'
+            ' Use commas to download multiple audio formats, such as'
+            ' -f  136/137/mp4/bestvideo,140/m4a/bestaudio.'
+            ' You can merge the video and audio of two formats into a single'
+            ' file using -f <video-format>+<audio-format> (requires ffmpeg or'
+            ' avconv), for example -f bestvideo+bestaudio.'))
+    video_format.add_option(
+        '--all-formats',
+        action='store_const', dest='format', const='all',
+        help='download all available video formats')
+    video_format.add_option(
+        '--prefer-free-formats',
+        action='store_true', dest='prefer_free_formats', default=False,
+        help='prefer free video formats unless a specific one is requested')
+    video_format.add_option(
+        '--max-quality',
+        action='store', dest='format_limit', metavar='FORMAT',
+        help='highest quality format to download')
+    video_format.add_option(
+        '-F', '--list-formats',
+        action='store_true', dest='listformats',
+        help='list all available formats')
+    video_format.add_option(
+        '--youtube-include-dash-manifest',
+        action='store_true', dest='youtube_include_dash_manifest', default=True,
+        help=optparse.SUPPRESS_HELP)
+    video_format.add_option(
+        '--youtube-skip-dash-manifest',
+        action='store_false', dest='youtube_include_dash_manifest',
+        help='Do not download the DASH manifest on YouTube videos')
+
+    subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
+    subtitles.add_option(
+        '--write-sub', '--write-srt',
+        action='store_true', dest='writesubtitles', default=False,
+        help='write subtitle file')
+    subtitles.add_option(
+        '--write-auto-sub', '--write-automatic-sub',
+        action='store_true', dest='writeautomaticsub', default=False,
+        help='write automatic subtitle file (youtube only)')
+    subtitles.add_option(
+        '--all-subs',
+        action='store_true', dest='allsubtitles', default=False,
+        help='downloads all the available subtitles of the video')
+    subtitles.add_option(
+        '--list-subs',
+        action='store_true', dest='listsubtitles', default=False,
+        help='lists all available subtitles for the video')
+    subtitles.add_option(
+        '--sub-format',
+        action='store', dest='subtitlesformat', metavar='FORMAT', default='srt',
+        help='subtitle format (default=srt) ([sbv/vtt] youtube only)')
+    subtitles.add_option(
+        '--sub-lang', '--sub-langs', '--srt-lang',
+        action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
+        default=[], callback=_comma_separated_values_options_callback,
+        help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
+
+    downloader = optparse.OptionGroup(parser, 'Download Options')
+    downloader.add_option(
+        '-r', '--rate-limit',
+        dest='ratelimit', metavar='LIMIT',
+        help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
+    downloader.add_option(
+        '-R', '--retries',
+        dest='retries', metavar='RETRIES', default=10,
+        help='number of retries (default is %default)')
+    downloader.add_option(
+        '--buffer-size',
+        dest='buffersize', metavar='SIZE', default='1024',
+        help='size of download buffer (e.g. 1024 or 16K) (default is %default)')
+    downloader.add_option(
+        '--no-resize-buffer',
+        action='store_true', dest='noresizebuffer', default=False,
+        help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.')
+    downloader.add_option(
+        '--test',
+        action='store_true', dest='test', default=False,
+        help=optparse.SUPPRESS_HELP)
+    downloader.add_option(
+        '--playlist-reverse',
+        action='store_true',
+        help='Download playlist videos in reverse order')
+
+    workarounds = optparse.OptionGroup(parser, 'Workarounds')
+    workarounds.add_option(
+        '--encoding',
+        dest='encoding', metavar='ENCODING',
+        help='Force the specified encoding (experimental)')
+    workarounds.add_option(
+        '--no-check-certificate',
+        action='store_true', dest='no_check_certificate', default=False,
+        help='Suppress HTTPS certificate validation.')
+    workarounds.add_option(
+        '--prefer-insecure',
+        '--prefer-unsecure', action='store_true', dest='prefer_insecure',
+        help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
+    workarounds.add_option(
+        '--user-agent',
+        metavar='UA', dest='user_agent',
+        help='specify a custom user agent')
+    workarounds.add_option(
+        '--referer',
+        metavar='URL', dest='referer', default=None,
+        help='specify a custom referer, use if the video access is restricted to one domain',
+    )
+    workarounds.add_option(
+        '--add-header',
+        metavar='FIELD:VALUE', dest='headers', action='append',
+        help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
+    )
+    workarounds.add_option(
+        '--bidi-workaround',
+        dest='bidi_workaround', action='store_true',
+        help='Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
+
+    verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
+    verbosity.add_option(
+        '-q', '--quiet',
+        action='store_true', dest='quiet', default=False,
+        help='activates quiet mode')
+    verbosity.add_option(
+        '--no-warnings',
+        dest='no_warnings', action='store_true', default=False,
+        help='Ignore warnings')
+    verbosity.add_option(
+        '-s', '--simulate',
+        action='store_true', dest='simulate', default=False,
+        help='do not download the video and do not write anything to disk',)
+    verbosity.add_option(
+        '--skip-download',
+        action='store_true', dest='skip_download', default=False,
+        help='do not download the video',)
+    verbosity.add_option(
+        '-g', '--get-url',
+        action='store_true', dest='geturl', default=False,
+        help='simulate, quiet but print URL')
+    verbosity.add_option(
+        '-e', '--get-title',
+        action='store_true', dest='gettitle', default=False,
+        help='simulate, quiet but print title')
+    verbosity.add_option(
+        '--get-id',
+        action='store_true', dest='getid', default=False,
+        help='simulate, quiet but print id')
+    verbosity.add_option(
+        '--get-thumbnail',
+        action='store_true', dest='getthumbnail', default=False,
+        help='simulate, quiet but print thumbnail URL')
+    verbosity.add_option(
+        '--get-description',
+        action='store_true', dest='getdescription', default=False,
+        help='simulate, quiet but print video description')
+    verbosity.add_option(
+        '--get-duration',
+        action='store_true', dest='getduration', default=False,
+        help='simulate, quiet but print video length')
+    verbosity.add_option(
+        '--get-filename',
+        action='store_true', dest='getfilename', default=False,
+        help='simulate, quiet but print output filename')
+    verbosity.add_option(
+        '--get-format',
+        action='store_true', dest='getformat', default=False,
+        help='simulate, quiet but print output format')
+    verbosity.add_option(
+        '-j', '--dump-json',
+        action='store_true', dest='dumpjson', default=False,
+        help='simulate, quiet but print JSON information. See --output for a description of available keys.')
+    verbosity.add_option(
+        '-J', '--dump-single-json',
+        action='store_true', dest='dump_single_json', default=False,
+        help='simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist information in a single line.')
+    verbosity.add_option(
+        '--newline',
+        action='store_true', dest='progress_with_newline', default=False,
+        help='output progress bar as new lines')
+    verbosity.add_option(
+        '--no-progress',
+        action='store_true', dest='noprogress', default=False,
+        help='do not print progress bar')
+    verbosity.add_option(
+        '--console-title',
+        action='store_true', dest='consoletitle', default=False,
+        help='display progress in console titlebar')
+    verbosity.add_option(
+        '-v', '--verbose',
+        action='store_true', dest='verbose', default=False,
+        help='print various debugging information')
+    verbosity.add_option(
+        '--dump-intermediate-pages',
+        action='store_true', dest='dump_intermediate_pages', default=False,
+        help='print downloaded pages to debug problems (very verbose)')
+    verbosity.add_option(
+        '--write-pages',
+        action='store_true', dest='write_pages', default=False,
+        help='Write downloaded intermediary pages to files in the current directory to debug problems')
+    verbosity.add_option(
+        '--youtube-print-sig-code',
+        action='store_true', dest='youtube_print_sig_code', default=False,
+        help=optparse.SUPPRESS_HELP)
+    verbosity.add_option(
+        '--print-traffic',
+        dest='debug_printtraffic', action='store_true', default=False,
+        help='Display sent and read HTTP traffic')
+
+    filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
+    filesystem.add_option(
+        '-a', '--batch-file',
+        dest='batchfile', metavar='FILE',
+        help='file containing URLs to download (\'-\' for stdin)')
+    filesystem.add_option(
+        '--id', default=False,
+        action='store_true', dest='useid', help='use only video ID in file name')
+    filesystem.add_option(
+        '-o', '--output',
+        dest='outtmpl', metavar='TEMPLATE',
+        help=('output filename template. Use %(title)s to get the title, '
+              '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
+              '%(autonumber)s to get an automatically incremented number, '
+              '%(ext)s for the filename extension, '
+              '%(format)s for the format description (like "22 - 1280x720" or "HD"), '
+              '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
+              '%(upload_date)s for the upload date (YYYYMMDD), '
+              '%(extractor)s for the provider (youtube, metacafe, etc), '
+              '%(id)s for the video id, '
+              '%(playlist_title)s, %(playlist_id)s, or %(playlist)s (=title if present, ID otherwise) for the playlist the video is in, '
+              '%(playlist_index)s for the position in the playlist. '
+              '%(height)s and %(width)s for the width and height of the video format. '
+              '%(resolution)s for a textual description of the resolution of the video format. '
+              '%% for a literal percent. '
+              'Use - to output to stdout. Can also be used to download to a different directory, '
+              'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
+    filesystem.add_option(
+        '--autonumber-size',
+        dest='autonumber_size', metavar='NUMBER',
+        help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
+    filesystem.add_option(
+        '--restrict-filenames',
+        action='store_true', dest='restrictfilenames', default=False,
+        help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames')
+    filesystem.add_option(
+        '-A', '--auto-number',
+        action='store_true', dest='autonumber', default=False,
+        help='[deprecated; use  -o "%(autonumber)s-%(title)s.%(ext)s" ] number downloaded files starting from 00000')
+    filesystem.add_option(
+        '-t', '--title',
+        action='store_true', dest='usetitle', default=False,
+        help='[deprecated] use title in file name (default)')
+    filesystem.add_option(
+        '-l', '--literal', default=False,
+        action='store_true', dest='usetitle',
+        help='[deprecated] alias of --title')
+    filesystem.add_option(
+        '-w', '--no-overwrites',
+        action='store_true', dest='nooverwrites', default=False,
+        help='do not overwrite files')
+    filesystem.add_option(
+        '-c', '--continue',
+        action='store_true', dest='continue_dl', default=True,
+        help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.')
+    filesystem.add_option(
+        '--no-continue',
+        action='store_false', dest='continue_dl',
+        help='do not resume partially downloaded files (restart from beginning)')
+    filesystem.add_option(
+        '--no-part',
+        action='store_true', dest='nopart', default=False,
+        help='do not use .part files - write directly into output file')
+    filesystem.add_option(
+        '--no-mtime',
+        action='store_false', dest='updatetime', default=True,
+        help='do not use the Last-modified header to set the file modification time')
+    filesystem.add_option(
+        '--write-description',
+        action='store_true', dest='writedescription', default=False,
+        help='write video description to a .description file')
+    filesystem.add_option(
+        '--write-info-json',
+        action='store_true', dest='writeinfojson', default=False,
+        help='write video metadata to a .info.json file')
+    filesystem.add_option(
+        '--write-annotations',
+        action='store_true', dest='writeannotations', default=False,
+        help='write video annotations to a .annotation file')
+    filesystem.add_option(
+        '--write-thumbnail',
+        action='store_true', dest='writethumbnail', default=False,
+        help='write thumbnail image to disk')
+    filesystem.add_option(
+        '--load-info',
+        dest='load_info_filename', metavar='FILE',
+        help='json file containing the video information (created with the "--write-json" option)')
+    filesystem.add_option(
+        '--cookies',
+        dest='cookiefile', metavar='FILE',
+        help='file to read cookies from and dump cookie jar in')
+    filesystem.add_option(
+        '--cache-dir', dest='cachedir', default=None, metavar='DIR',
+        help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
+    filesystem.add_option(
+        '--no-cache-dir', action='store_const', const=False, dest='cachedir',
+        help='Disable filesystem caching')
+    filesystem.add_option(
+        '--rm-cache-dir',
+        action='store_true', dest='rm_cachedir',
+        help='Delete all filesystem cache files')
+
+    postproc = optparse.OptionGroup(parser, 'Post-processing Options')
+    postproc.add_option(
+        '-x', '--extract-audio',
+        action='store_true', dest='extractaudio', default=False,
+        help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
+    postproc.add_option(
+        '--audio-format', metavar='FORMAT', dest='audioformat', default='best',
+        help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "%default" by default')
+    postproc.add_option(
+        '--audio-quality', metavar='QUALITY',
+        dest='audioquality', default='5',
+        help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default %default)')
+    postproc.add_option(
+        '--recode-video',
+        metavar='FORMAT', dest='recodevideo', default=None,
+        help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)')
+    postproc.add_option(
+        '-k', '--keep-video',
+        action='store_true', dest='keepvideo', default=False,
+        help='keeps the video file on disk after the post-processing; the video is erased by default')
+    postproc.add_option(
+        '--no-post-overwrites',
+        action='store_true', dest='nopostoverwrites', default=False,
+        help='do not overwrite post-processed files; the post-processed files are overwritten by default')
+    postproc.add_option(
+        '--embed-subs',
+        action='store_true', dest='embedsubtitles', default=False,
+        help='embed subtitles in the video (only for mp4 videos)')
+    postproc.add_option(
+        '--embed-thumbnail',
+        action='store_true', dest='embedthumbnail', default=False,
+        help='embed thumbnail in the audio as cover art')
+    postproc.add_option(
+        '--add-metadata',
+        action='store_true', dest='addmetadata', default=False,
+        help='write metadata to the video file')
+    postproc.add_option(
+        '--xattrs',
+        action='store_true', dest='xattrs', default=False,
+        help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
+    postproc.add_option(
+        '--prefer-avconv',
+        action='store_false', dest='prefer_ffmpeg',
+        help='Prefer avconv over ffmpeg for running the postprocessors (default)')
+    postproc.add_option(
+        '--prefer-ffmpeg',
+        action='store_true', dest='prefer_ffmpeg',
+        help='Prefer ffmpeg over avconv for running the postprocessors')
+    postproc.add_option(
+        '--exec',
+        metavar='CMD', dest='exec_cmd',
+        help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'')
+
+    parser.add_option_group(general)
+    parser.add_option_group(selection)
+    parser.add_option_group(downloader)
+    parser.add_option_group(filesystem)
+    parser.add_option_group(verbosity)
+    parser.add_option_group(workarounds)
+    parser.add_option_group(video_format)
+    parser.add_option_group(subtitles)
+    parser.add_option_group(authentication)
+    parser.add_option_group(postproc)
+
+    if overrideArguments is not None:
+        opts, args = parser.parse_args(overrideArguments)
+        if opts.verbose:
+            write_string('[debug] Override config: ' + repr(overrideArguments) + '\n')
+    else:
+        commandLineConf = sys.argv[1:]
+        if '--ignore-config' in commandLineConf:
+            systemConf = []
+            userConf = []
+        else:
+            systemConf = _readOptions('/etc/youtube-dl.conf')
+            if '--ignore-config' in systemConf:
+                userConf = []
+            else:
+                userConf = _readUserConf()
+        argv = systemConf + userConf + commandLineConf
+
+        opts, args = parser.parse_args(argv)
+        if opts.verbose:
+            write_string('[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
+            write_string('[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
+            write_string('[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
+
+    return parser, opts, args
index 08e6ddd00cbfe5691fb14943d6b2217748e96398..fb367ebe4474063a279fcd096b21461d11deafe8 100644 (file)
@@ -1,22 +1,27 @@
+from __future__ import unicode_literals
 
 from .atomicparsley import AtomicParsleyPP
 from .ffmpeg import (
+    FFmpegPostProcessor,
     FFmpegAudioFixPP,
+    FFmpegEmbedSubtitlePP,
+    FFmpegExtractAudioPP,
     FFmpegMergerPP,
     FFmpegMetadataPP,
     FFmpegVideoConvertor,
-    FFmpegExtractAudioPP,
-    FFmpegEmbedSubtitlePP,
 )
 from .xattrpp import XAttrMetadataPP
+from .execafterdownload import ExecAfterDownloadPP
 
 __all__ = [
     'AtomicParsleyPP',
+    'ExecAfterDownloadPP',
     'FFmpegAudioFixPP',
+    'FFmpegEmbedSubtitlePP',
+    'FFmpegExtractAudioPP',
     'FFmpegMergerPP',
     'FFmpegMetadataPP',
+    'FFmpegPostProcessor',
     'FFmpegVideoConvertor',
-    'FFmpegExtractAudioPP',
-    'FFmpegEmbedSubtitlePP',
     'XAttrMetadataPP',
 ]
index 765b2d9ee7c834ee1d03be6fc4e20d7628ad0f76..448ccc5f342e42959aae0619854fa80d1e1cd978 100644 (file)
@@ -6,10 +6,11 @@ import os
 import subprocess
 
 from .common import PostProcessor
-
+from ..compat import (
+    compat_urlretrieve,
+)
 from ..utils import (
     check_executable,
-    compat_urlretrieve,
     encodeFilename,
     PostProcessingError,
     prepend_extension,
index 788f94d021fa71648c3c5f521f8344f94d981e61..e54ae678da17bef5c5848bc7165d42d5eec912a4 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 from ..utils import PostProcessingError
 
 
diff --git a/youtube_dl/postprocessor/execafterdownload.py b/youtube_dl/postprocessor/execafterdownload.py
new file mode 100644 (file)
index 0000000..75c0f7b
--- /dev/null
@@ -0,0 +1,28 @@
+from __future__ import unicode_literals
+
+import subprocess
+
+from .common import PostProcessor
+from ..compat import shlex_quote
+from ..utils import PostProcessingError
+
+
+class ExecAfterDownloadPP(PostProcessor):
+    def __init__(self, downloader=None, verboseOutput=None, exec_cmd=None):
+        self.verboseOutput = verboseOutput
+        self.exec_cmd = exec_cmd
+
+    def run(self, information):
+        cmd = self.exec_cmd
+        if '{}' not in cmd:
+            cmd += ' {}'
+
+        cmd = cmd.replace('{}', shlex_quote(information['filepath']))
+
+        self._downloader.to_screen("[exec] Executing command: %s" % cmd)
+        retCode = subprocess.call(cmd, shell=True)
+        if retCode != 0:
+            raise PostProcessingError(
+                'Command returned error code %d' % retCode)
+
+        return None, information  # by default, keep file and do nothing
index 8c5f7c43b75b17466a91a2b54c928ec8ca2298f3..965ded4c1590eb3cccfebcfc3fd460f5a83d1c90 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import os
 import subprocess
 import sys
@@ -6,11 +8,14 @@ import time
 
 from .common import AudioConversionError, PostProcessor
 
-from ..utils import (
-    check_executable,
+from ..compat import (
     compat_subprocess_get_DEVNULL,
+)
+from ..utils import (
     encodeArgument,
     encodeFilename,
+    get_exe_version,
+    is_outdated_version,
     PostProcessingError,
     prepend_extension,
     shell_quote,
@@ -25,36 +30,63 @@ class FFmpegPostProcessorError(PostProcessingError):
 class FFmpegPostProcessor(PostProcessor):
     def __init__(self, downloader=None, deletetempfiles=False):
         PostProcessor.__init__(self, downloader)
-        self._exes = self.detect_executables()
+        self._versions = self.get_versions()
         self._deletetempfiles = deletetempfiles
 
+    def check_version(self):
+        if not self._executable:
+            raise FFmpegPostProcessorError('ffmpeg or avconv not found. Please install one.')
+
+        required_version = '10-0' if self._uses_avconv() else '1.0'
+        if is_outdated_version(
+                self._versions[self._executable], required_version):
+            warning = 'Your copy of %s is outdated, update %s to version %s or newer if you encounter any errors.' % (
+                self._executable, self._executable, required_version)
+            if self._downloader:
+                self._downloader.report_warning(warning)
+
     @staticmethod
-    def detect_executables():
+    def get_versions():
         programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
-        return dict((program, check_executable(program, ['-version'])) for program in programs)
+        return dict((p, get_exe_version(p, args=['-version'])) for p in programs)
 
-    def _get_executable(self):
+    @property
+    def _executable(self):
         if self._downloader.params.get('prefer_ffmpeg', False):
-            return self._exes['ffmpeg'] or self._exes['avconv']
+            prefs = ('ffmpeg', 'avconv')
         else:
-            return self._exes['avconv'] or self._exes['ffmpeg']
+            prefs = ('avconv', 'ffmpeg')
+        for p in prefs:
+            if self._versions[p]:
+                return p
+        return None
+
+    @property
+    def _probe_executable(self):
+        if self._downloader.params.get('prefer_ffmpeg', False):
+            prefs = ('ffprobe', 'avprobe')
+        else:
+            prefs = ('avprobe', 'ffprobe')
+        for p in prefs:
+            if self._versions[p]:
+                return p
+        return None
 
     def _uses_avconv(self):
-        return self._get_executable() == self._exes['avconv']
+        return self._executable == 'avconv'
 
     def run_ffmpeg_multiple_files(self, input_paths, out_path, opts):
-        if not self._get_executable():
-            raise FFmpegPostProcessorError(u'ffmpeg or avconv not found. Please install one.')
+        self.check_version()
 
         files_cmd = []
         for path in input_paths:
             files_cmd.extend(['-i', encodeFilename(path, True)])
-        cmd = ([self._get_executable(), '-y'] + files_cmd
+        cmd = ([self._executable, '-y'] + files_cmd
                + [encodeArgument(o) for o in opts] +
                [encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
 
         if self._downloader.params.get('verbose', False):
-            self._downloader.to_screen(u'[debug] ffmpeg command line: %s' % shell_quote(cmd))
+            self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd))
         p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
         stdout, stderr = p.communicate()
         if p.returncode != 0:
@@ -70,8 +102,8 @@ class FFmpegPostProcessor(PostProcessor):
 
     def _ffmpeg_filename_argument(self, fn):
         # ffmpeg broke --, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details
-        if fn.startswith(u'-'):
-            return u'./' + fn
+        if fn.startswith('-'):
+            return './' + fn
         return fn
 
 
@@ -85,11 +117,12 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
         self._nopostoverwrites = nopostoverwrites
 
     def get_audio_codec(self, path):
-        if not self._exes['ffprobe'] and not self._exes['avprobe']:
-            raise PostProcessingError(u'ffprobe or avprobe not found. Please install one.')
+
+        if not self._probe_executable:
+            raise PostProcessingError('ffprobe or avprobe not found. Please install one.')
         try:
             cmd = [
-                self._exes['avprobe'] or self._exes['ffprobe'],
+                self._probe_executable,
                 '-show_streams',
                 encodeFilename(self._ffmpeg_filename_argument(path), True)]
             handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE)
@@ -122,7 +155,7 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
 
         filecodec = self.get_audio_codec(path)
         if filecodec is None:
-            raise PostProcessingError(u'WARNING: unable to obtain file audio codec with ffprobe')
+            raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe')
 
         uses_avconv = self._uses_avconv()
         more_opts = []
@@ -171,7 +204,7 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
                 extension = 'wav'
                 more_opts += ['-f', 'wav']
 
-        prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups
+        prefix, sep, ext = path.rpartition('.')  # not os.path.splitext, since the latter does not work on unicode in all setups
         new_path = prefix + sep + extension
 
         # If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly.
@@ -180,16 +213,16 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
 
         try:
             if self._nopostoverwrites and os.path.exists(encodeFilename(new_path)):
-                self._downloader.to_screen(u'[youtube] Post-process file %s exists, skipping' % new_path)
+                self._downloader.to_screen('[youtube] Post-process file %s exists, skipping' % new_path)
             else:
-                self._downloader.to_screen(u'[' + self._get_executable() + '] Destination: ' + new_path)
+                self._downloader.to_screen('[' + self._executable + '] Destination: ' + new_path)
                 self.run_ffmpeg(path, new_path, acodec, more_opts)
         except:
-            etype,e,tb = sys.exc_info()
+            etype, e, tb = sys.exc_info()
             if isinstance(e, AudioConversionError):
-                msg = u'audio conversion failed: ' + e.msg
+                msg = 'audio conversion failed: ' + e.msg
             else:
-                msg = u'error running ' + self._get_executable()
+                msg = 'error running ' + self._executable
             raise PostProcessingError(msg)
 
         # Try to update the date time for extracted audio file.
@@ -197,30 +230,30 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
             try:
                 os.utime(encodeFilename(new_path), (time.time(), information['filetime']))
             except:
-                self._downloader.report_warning(u'Cannot update utime of audio file')
+                self._downloader.report_warning('Cannot update utime of audio file')
 
         information['filepath'] = new_path
-        return self._nopostoverwrites,information
+        return self._nopostoverwrites, information
 
 
 class FFmpegVideoConvertor(FFmpegPostProcessor):
-    def __init__(self, downloader=None,preferedformat=None):
+    def __init__(self, downloader=None, preferedformat=None):
         super(FFmpegVideoConvertor, self).__init__(downloader)
-        self._preferedformat=preferedformat
+        self._preferedformat = preferedformat
 
     def run(self, information):
         path = information['filepath']
-        prefix, sep, ext = path.rpartition(u'.')
+        prefix, sep, ext = path.rpartition('.')
         outpath = prefix + sep + self._preferedformat
         if information['ext'] == self._preferedformat:
-            self._downloader.to_screen(u'[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
-            return True,information
-        self._downloader.to_screen(u'['+'ffmpeg'+'] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) +outpath)
+            self._downloader.to_screen('[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
+            return True, information
+        self._downloader.to_screen('[' + 'ffmpeg' + '] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath)
         self.run_ffmpeg(path, outpath, [])
         information['filepath'] = outpath
         information['format'] = self._preferedformat
         information['ext'] = self._preferedformat
-        return False,information
+        return False, information
 
 
 class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
@@ -422,11 +455,11 @@ class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
         return cls._lang_map.get(code[:2])
 
     def run(self, information):
-        if information['ext'] != u'mp4':
-            self._downloader.to_screen(u'[ffmpeg] Subtitles can only be embedded in mp4 files')
+        if information['ext'] != 'mp4':
+            self._downloader.to_screen('[ffmpeg] Subtitles can only be embedded in mp4 files')
             return True, information
         if not information.get('subtitles'):
-            self._downloader.to_screen(u'[ffmpeg] There aren\'t any subtitles to embed') 
+            self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to embed')
             return True, information
 
         sub_langs = [key for key in information['subtitles']]
@@ -435,14 +468,14 @@ class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
 
         opts = ['-map', '0:0', '-map', '0:1', '-c:v', 'copy', '-c:a', 'copy']
         for (i, lang) in enumerate(sub_langs):
-            opts.extend(['-map', '%d:0' % (i+1), '-c:s:%d' % i, 'mov_text'])
+            opts.extend(['-map', '%d:0' % (i + 1), '-c:s:%d' % i, 'mov_text'])
             lang_code = self._conver_lang_code(lang)
             if lang_code is not None:
                 opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])
         opts.extend(['-f', 'mp4'])
 
-        temp_filename = filename + u'.temp'
-        self._downloader.to_screen(u'[ffmpeg] Embedding subtitles in \'%s\'' % filename)
+        temp_filename = filename + '.temp'
+        self._downloader.to_screen('[ffmpeg] Embedding subtitles in \'%s\'' % filename)
         self.run_ffmpeg_multiple_files(input_files, temp_filename, opts)
         os.remove(encodeFilename(filename))
         os.rename(encodeFilename(temp_filename), encodeFilename(filename))
@@ -463,13 +496,13 @@ class FFmpegMetadataPP(FFmpegPostProcessor):
             metadata['artist'] = info['uploader_id']
 
         if not metadata:
-            self._downloader.to_screen(u'[ffmpeg] There isn\'t any metadata to add')
+            self._downloader.to_screen('[ffmpeg] There isn\'t any metadata to add')
             return True, info
 
         filename = info['filepath']
         temp_filename = prepend_extension(filename, 'temp')
 
-        if info['ext'] == u'm4a':
+        if info['ext'] == 'm4a':
             options = ['-vn', '-acodec', 'copy']
         else:
             options = ['-c', 'copy']
@@ -477,7 +510,7 @@ class FFmpegMetadataPP(FFmpegPostProcessor):
         for (name, value) in metadata.items():
             options.extend(['-metadata', '%s=%s' % (name, value)])
 
-        self._downloader.to_screen(u'[ffmpeg] Adding metadata to \'%s\'' % filename)
+        self._downloader.to_screen('[ffmpeg] Adding metadata to \'%s\'' % filename)
         self.run_ffmpeg(filename, temp_filename, options)
         os.remove(encodeFilename(filename))
         os.rename(encodeFilename(temp_filename), encodeFilename(filename))
@@ -487,8 +520,8 @@ class FFmpegMetadataPP(FFmpegPostProcessor):
 class FFmpegMergerPP(FFmpegPostProcessor):
     def run(self, info):
         filename = info['filepath']
-        args = ['-c', 'copy']
-        self._downloader.to_screen(u'[ffmpeg] Merging formats into "%s"' % filename)
+        args = ['-c', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-shortest']
+        self._downloader.to_screen('[ffmpeg] Merging formats into "%s"' % filename)
         self.run_ffmpeg_multiple_files(info['__files_to_merge'], filename, args)
         return True, info
 
@@ -499,7 +532,7 @@ class FFmpegAudioFixPP(FFmpegPostProcessor):
         temp_filename = prepend_extension(filename, 'temp')
 
         options = ['-vn', '-acodec', 'copy']
-        self._downloader.to_screen(u'[ffmpeg] Fixing audio file "%s"' % filename)
+        self._downloader.to_screen('[ffmpeg] Fixing audio file "%s"' % filename)
         self.run_ffmpeg(filename, temp_filename, options)
 
         os.remove(encodeFilename(filename))
index f6940940b340dea5c23e5ce118b8fcc4d7ee8574..f6c63fe97545d86947ef1ef4bf2d70e9ea7144be 100644 (file)
@@ -1,12 +1,16 @@
+from __future__ import unicode_literals
+
 import os
 import subprocess
 import sys
 
 from .common import PostProcessor
+from ..compat import (
+    subprocess_check_output
+)
 from ..utils import (
     check_executable,
     hyphenate_date,
-    subprocess_check_output
 )
 
 
@@ -106,4 +110,3 @@ class XAttrMetadataPP(PostProcessor):
         except (subprocess.CalledProcessError, OSError):
             self._downloader.report_error("This filesystem doesn't support extended attributes. (You may have to enable them in your /etc/fstab)")
             return False, info
-
index b63c65b201ec2b34c6482e8526694431041f75fe..2bd264b306f8dc8f53bf6e0b9dec97ecd6e85cb4 100644 (file)
@@ -62,15 +62,17 @@ class _ScopeDict(dict):
 
 
 class _AVMClass(object):
-    def __init__(self, name_idx, name):
+    def __init__(self, name_idx, name, static_properties=None):
         self.name_idx = name_idx
         self.name = name
         self.method_names = {}
         self.method_idxs = {}
         self.methods = {}
         self.method_pyfunctions = {}
+        self.static_properties = static_properties if static_properties else {}
 
         self.variables = _ScopeDict(self)
+        self.constants = {}
 
     def make_object(self):
         return _AVMClass_Object(self)
@@ -148,8 +150,38 @@ def _read_byte(reader):
     return res
 
 
+StringClass = _AVMClass('(no name idx)', 'String')
+ByteArrayClass = _AVMClass('(no name idx)', 'ByteArray')
+TimerClass = _AVMClass('(no name idx)', 'Timer')
+TimerEventClass = _AVMClass('(no name idx)', 'TimerEvent', {'TIMER': 'timer'})
+_builtin_classes = {
+    StringClass.name: StringClass,
+    ByteArrayClass.name: ByteArrayClass,
+    TimerClass.name: TimerClass,
+    TimerEventClass.name: TimerEventClass,
+}
+
+
+class _Undefined(object):
+    def __bool__(self):
+        return False
+    __nonzero__ = __bool__
+
+    def __hash__(self):
+        return 0
+
+    def __str__(self):
+        return 'undefined'
+    __repr__ = __str__
+
+undefined = _Undefined()
+
+
 class SWFInterpreter(object):
     def __init__(self, file_contents):
+        self._patched_functions = {
+            (TimerClass, 'addEventListener'): lambda params: undefined,
+        }
         code_tag = next(tag
                         for tag_code, tag in _extract_tags(file_contents)
                         if tag_code == 82)
@@ -170,11 +202,13 @@ class SWFInterpreter(object):
 
         # Constant pool
         int_count = u30()
+        self.constant_ints = [0]
         for _c in range(1, int_count):
-            s32()
+            self.constant_ints.append(s32())
+        self.constant_uints = [0]
         uint_count = u30()
         for _c in range(1, uint_count):
-            u32()
+            self.constant_uints.append(u32())
         double_count = u30()
         read_bytes(max(0, (double_count - 1)) * 8)
         string_count = u30()
@@ -212,6 +246,10 @@ class SWFInterpreter(object):
                 u30()  # namespace_idx
                 name_idx = u30()
                 self.multinames.append(self.constant_strings[name_idx])
+            elif kind == 0x09:
+                name_idx = u30()
+                u30()
+                self.multinames.append(self.constant_strings[name_idx])
             else:
                 self.multinames.append(_Multiname(kind))
                 for _c2 in range(MULTINAME_SIZES[kind]):
@@ -258,13 +296,28 @@ class SWFInterpreter(object):
             kind = kind_full & 0x0f
             attrs = kind_full >> 4
             methods = {}
-            if kind in [0x00, 0x06]:  # Slot or Const
+            constants = None
+            if kind == 0x00:  # Slot
                 u30()  # Slot id
                 u30()  # type_name_idx
                 vindex = u30()
                 if vindex != 0:
                     read_byte()  # vkind
-            elif kind in [0x01, 0x02, 0x03]:  # Method / Getter / Setter
+            elif kind == 0x06:  # Const
+                u30()  # Slot id
+                u30()  # type_name_idx
+                vindex = u30()
+                vkind = 'any'
+                if vindex != 0:
+                    vkind = read_byte()
+                if vkind == 0x03:  # Constant_Int
+                    value = self.constant_ints[vindex]
+                elif vkind == 0x04:  # Constant_UInt
+                    value = self.constant_uints[vindex]
+                else:
+                    return {}, None  # Ignore silently for now
+                constants = {self.multinames[trait_name_idx]: value}
+            elif kind in (0x01, 0x02, 0x03):  # Method / Getter / Setter
                 u30()  # disp_id
                 method_idx = u30()
                 methods[self.multinames[trait_name_idx]] = method_idx
@@ -283,7 +336,7 @@ class SWFInterpreter(object):
                 for _c3 in range(metadata_count):
                     u30()  # metadata index
 
-            return methods
+            return methods, constants
 
         # Classes
         class_count = u30()
@@ -305,18 +358,22 @@ class SWFInterpreter(object):
             u30()  # iinit
             trait_count = u30()
             for _c2 in range(trait_count):
-                trait_methods = parse_traits_info()
+                trait_methods, trait_constants = parse_traits_info()
                 avm_class.register_methods(trait_methods)
+                if trait_constants:
+                    avm_class.constants.update(trait_constants)
 
         assert len(classes) == class_count
         self._classes_by_name = dict((c.name, c) for c in classes)
 
         for avm_class in classes:
-            u30()  # cinit
+            avm_class.cinit_idx = u30()
             trait_count = u30()
             for _c2 in range(trait_count):
-                trait_methods = parse_traits_info()
+                trait_methods, trait_constants = parse_traits_info()
                 avm_class.register_methods(trait_methods)
+                if trait_constants:
+                    avm_class.constants.update(trait_constants)
 
         # Scripts
         script_count = u30()
@@ -329,6 +386,7 @@ class SWFInterpreter(object):
         # Method bodies
         method_body_count = u30()
         Method = collections.namedtuple('Method', ['code', 'local_count'])
+        self._all_methods = []
         for _c in range(method_body_count):
             method_idx = u30()
             u30()  # max_stack
@@ -337,9 +395,10 @@ class SWFInterpreter(object):
             u30()  # max_scope_depth
             code_length = u30()
             code = read_bytes(code_length)
+            m = Method(code, local_count)
+            self._all_methods.append(m)
             for avm_class in classes:
                 if method_idx in avm_class.method_idxs:
-                    m = Method(code, local_count)
                     avm_class.methods[avm_class.method_idxs[method_idx]] = m
             exception_count = u30()
             for _c2 in range(exception_count):
@@ -354,13 +413,27 @@ class SWFInterpreter(object):
 
         assert p + code_reader.tell() == len(code_tag)
 
-    def extract_class(self, class_name):
+    def patch_function(self, avm_class, func_name, f):
+        self._patched_functions[(avm_class, func_name)] = f
+
+    def extract_class(self, class_name, call_cinit=True):
         try:
-            return self._classes_by_name[class_name]
+            res = self._classes_by_name[class_name]
         except KeyError:
             raise ExtractorError('Class %r not found' % class_name)
 
+        if call_cinit and hasattr(res, 'cinit_idx'):
+            res.register_methods({'$cinit': res.cinit_idx})
+            res.methods['$cinit'] = self._all_methods[res.cinit_idx]
+            cinit = self.extract_function(res, '$cinit')
+            cinit([])
+
+        return res
+
     def extract_function(self, avm_class, func_name):
+        p = self._patched_functions.get((avm_class, func_name))
+        if p:
+            return p
         if func_name in avm_class.method_pyfunctions:
             return avm_class.method_pyfunctions[func_name]
         if func_name in self._classes_by_name:
@@ -379,10 +452,15 @@ class SWFInterpreter(object):
             registers = [avm_class.variables] + list(args) + [None] * m.local_count
             stack = []
             scopes = collections.deque([
-                self._classes_by_name, avm_class.variables])
+                self._classes_by_name, avm_class.constants, avm_class.variables])
             while True:
                 opcode = _read_byte(coder)
-                if opcode == 17:  # iftrue
+                if opcode == 9:  # label
+                    pass  # Spec says: "Do nothing."
+                elif opcode == 16:  # jump
+                    offset = s24()
+                    coder.seek(coder.tell() + offset)
+                elif opcode == 17:  # iftrue
                     offset = s24()
                     value = stack.pop()
                     if value:
@@ -392,9 +470,40 @@ class SWFInterpreter(object):
                     value = stack.pop()
                     if not value:
                         coder.seek(coder.tell() + offset)
+                elif opcode == 19:  # ifeq
+                    offset = s24()
+                    value2 = stack.pop()
+                    value1 = stack.pop()
+                    if value2 == value1:
+                        coder.seek(coder.tell() + offset)
+                elif opcode == 20:  # ifne
+                    offset = s24()
+                    value2 = stack.pop()
+                    value1 = stack.pop()
+                    if value2 != value1:
+                        coder.seek(coder.tell() + offset)
+                elif opcode == 21:  # iflt
+                    offset = s24()
+                    value2 = stack.pop()
+                    value1 = stack.pop()
+                    if value1 < value2:
+                        coder.seek(coder.tell() + offset)
+                elif opcode == 32:  # pushnull
+                    stack.append(None)
+                elif opcode == 33:  # pushundefined
+                    stack.append(undefined)
                 elif opcode == 36:  # pushbyte
                     v = _read_byte(coder)
                     stack.append(v)
+                elif opcode == 37:  # pushshort
+                    v = u30()
+                    stack.append(v)
+                elif opcode == 38:  # pushtrue
+                    stack.append(True)
+                elif opcode == 39:  # pushfalse
+                    stack.append(False)
+                elif opcode == 40:  # pushnan
+                    stack.append(float('NaN'))
                 elif opcode == 42:  # dup
                     value = stack[-1]
                     stack.append(value)
@@ -419,11 +528,31 @@ class SWFInterpreter(object):
                         [stack.pop() for _ in range(arg_count)]))
                     obj = stack.pop()
 
-                    if isinstance(obj, _AVMClass_Object):
+                    if obj == StringClass:
+                        if mname == 'String':
+                            assert len(args) == 1
+                            assert isinstance(args[0], (
+                                int, compat_str, _Undefined))
+                            if args[0] == undefined:
+                                res = 'undefined'
+                            else:
+                                res = compat_str(args[0])
+                            stack.append(res)
+                            continue
+                        else:
+                            raise NotImplementedError(
+                                'Function String.%s is not yet implemented'
+                                % mname)
+                    elif isinstance(obj, _AVMClass_Object):
                         func = self.extract_function(obj.avm_class, mname)
                         res = func(args)
                         stack.append(res)
                         continue
+                    elif isinstance(obj, _AVMClass):
+                        func = self.extract_function(obj, mname)
+                        res = func(args)
+                        stack.append(res)
+                        continue
                     elif isinstance(obj, _ScopeDict):
                         if mname in obj.avm_class.method_names:
                             func = self.extract_function(obj.avm_class, mname)
@@ -442,6 +571,13 @@ class SWFInterpreter(object):
                                 res = obj.split(args[0])
                             stack.append(res)
                             continue
+                        elif mname == 'charCodeAt':
+                            assert len(args) <= 1
+                            idx = 0 if len(args) == 0 else args[0]
+                            assert isinstance(idx, int)
+                            res = ord(obj[idx])
+                            stack.append(res)
+                            continue
                     elif isinstance(obj, list):
                         if mname == 'slice':
                             assert len(args) == 1
@@ -458,9 +594,18 @@ class SWFInterpreter(object):
                     raise NotImplementedError(
                         'Unsupported property %r on %r'
                         % (mname, obj))
+                elif opcode == 71:  # returnvoid
+                    res = undefined
+                    return res
                 elif opcode == 72:  # returnvalue
                     res = stack.pop()
                     return res
+                elif opcode == 73:  # constructsuper
+                    # Not yet implemented, just hope it works without it
+                    arg_count = u30()
+                    args = list(reversed(
+                        [stack.pop() for _ in range(arg_count)]))
+                    obj = stack.pop()
                 elif opcode == 74:  # constructproperty
                     index = u30()
                     arg_count = u30()
@@ -481,6 +626,17 @@ class SWFInterpreter(object):
                     args = list(reversed(
                         [stack.pop() for _ in range(arg_count)]))
                     obj = stack.pop()
+                    if isinstance(obj, _AVMClass_Object):
+                        func = self.extract_function(obj.avm_class, mname)
+                        res = func(args)
+                        assert res is undefined
+                        continue
+                    if isinstance(obj, _ScopeDict):
+                        assert mname in obj.avm_class.method_names
+                        func = self.extract_function(obj.avm_class, mname)
+                        res = func(args)
+                        assert res is undefined
+                        continue
                     if mname == 'reverse':
                         assert isinstance(obj, list)
                         obj.reverse()
@@ -504,7 +660,10 @@ class SWFInterpreter(object):
                             break
                     else:
                         res = scopes[0]
-                    stack.append(res[mname])
+                    if mname not in res and mname in _builtin_classes:
+                        stack.append(_builtin_classes[mname])
+                    else:
+                        stack.append(res[mname])
                 elif opcode == 94:  # findproperty
                     index = u30()
                     mname = self.multinames[index]
@@ -524,9 +683,15 @@ class SWFInterpreter(object):
                             break
                     else:
                         scope = avm_class.variables
-                    # I cannot find where static variables are initialized
-                    # so let's just return None
-                    res = scope.get(mname)
+
+                    if mname in scope:
+                        res = scope[mname]
+                    elif mname in _builtin_classes:
+                        res = _builtin_classes[mname]
+                    else:
+                        # Assume unitialized
+                        # TODO warn here
+                        res = undefined
                     stack.append(res)
                 elif opcode == 97:  # setproperty
                     index = u30()
@@ -548,22 +713,57 @@ class SWFInterpreter(object):
                     pname = self.multinames[index]
                     if pname == 'length':
                         obj = stack.pop()
-                        assert isinstance(obj, list)
+                        assert isinstance(obj, (compat_str, list))
                         stack.append(len(obj))
+                    elif isinstance(pname, compat_str):  # Member access
+                        obj = stack.pop()
+                        if isinstance(obj, _AVMClass):
+                            res = obj.static_properties[pname]
+                            stack.append(res)
+                            continue
+
+                        assert isinstance(obj, (dict, _ScopeDict)),\
+                            'Accessing member %r on %r' % (pname, obj)
+                        res = obj.get(pname, undefined)
+                        stack.append(res)
                     else:  # Assume attribute access
                         idx = stack.pop()
                         assert isinstance(idx, int)
                         obj = stack.pop()
                         assert isinstance(obj, list)
                         stack.append(obj[idx])
+                elif opcode == 104:  # initproperty
+                    index = u30()
+                    value = stack.pop()
+                    idx = self.multinames[index]
+                    if isinstance(idx, _Multiname):
+                        idx = stack.pop()
+                    obj = stack.pop()
+                    obj[idx] = value
                 elif opcode == 115:  # convert_
                     value = stack.pop()
                     intvalue = int(value)
                     stack.append(intvalue)
                 elif opcode == 128:  # coerce
                     u30()
+                elif opcode == 130:  # coerce_a
+                    value = stack.pop()
+                    # um, yes, it's any value
+                    stack.append(value)
                 elif opcode == 133:  # coerce_s
                     assert isinstance(stack[-1], (type(None), compat_str))
+                elif opcode == 147:  # decrement
+                    value = stack.pop()
+                    assert isinstance(value, int)
+                    stack.append(value - 1)
+                elif opcode == 149:  # typeof
+                    value = stack.pop()
+                    return {
+                        _Undefined: 'undefined',
+                        compat_str: 'String',
+                        int: 'Number',
+                        float: 'Number',
+                    }[type(value)]
                 elif opcode == 160:  # add
                     value2 = stack.pop()
                     value1 = stack.pop()
@@ -574,16 +774,37 @@ class SWFInterpreter(object):
                     value1 = stack.pop()
                     res = value1 - value2
                     stack.append(res)
+                elif opcode == 162:  # multiply
+                    value2 = stack.pop()
+                    value1 = stack.pop()
+                    res = value1 * value2
+                    stack.append(res)
                 elif opcode == 164:  # modulo
                     value2 = stack.pop()
                     value1 = stack.pop()
                     res = value1 % value2
                     stack.append(res)
+                elif opcode == 168:  # bitand
+                    value2 = stack.pop()
+                    value1 = stack.pop()
+                    assert isinstance(value1, int)
+                    assert isinstance(value2, int)
+                    res = value1 & value2
+                    stack.append(res)
+                elif opcode == 171:  # equals
+                    value2 = stack.pop()
+                    value1 = stack.pop()
+                    result = value1 == value2
+                    stack.append(result)
                 elif opcode == 175:  # greaterequals
                     value2 = stack.pop()
                     value1 = stack.pop()
                     result = value1 >= value2
                     stack.append(result)
+                elif opcode == 192:  # increment_i
+                    value = stack.pop()
+                    assert isinstance(value, int)
+                    stack.append(value + 1)
                 elif opcode == 208:  # getlocal_0
                     stack.append(registers[0])
                 elif opcode == 209:  # getlocal_1
@@ -606,4 +827,3 @@ class SWFInterpreter(object):
 
         avm_class.method_pyfunctions[func_name] = resfunc
         return resfunc
-
index 27308376130b97cc9cf6059b1f9dd4a64753ef42..2d2703368d8c2974e665985a10d1b6dcace8b235 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import io
 import json
 import traceback
@@ -7,20 +9,18 @@ import subprocess
 import sys
 from zipimport import zipimporter
 
-from .utils import (
+from .compat import (
     compat_str,
     compat_urllib_request,
 )
 from .version import __version__
 
+
 def rsa_verify(message, signature, key):
     from struct import pack
     from hashlib import sha256
-    from sys import version_info
-    def b(x):
-        if version_info[0] == 2: return x
-        else: return x.encode('latin1')
-    assert(type(message) == type(b('')))
+
+    assert isinstance(message, bytes)
     block_size = 0
     n = key[0]
     while n:
@@ -31,14 +31,18 @@ def rsa_verify(message, signature, key):
     while signature:
         raw_bytes.insert(0, pack("B", signature & 0xFF))
         signature >>= 8
-    signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
-    if signature[0:2] != b('\x00\x01'): return False
+    signature = (block_size - len(raw_bytes)) * b'\x00' + b''.join(raw_bytes)
+    if signature[0:2] != b'\x00\x01':
+        return False
     signature = signature[2:]
-    if not b('\x00') in signature: return False
-    signature = signature[signature.index(b('\x00'))+1:]
-    if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
+    if b'\x00' not in signature:
+        return False
+    signature = signature[signature.index(b'\x00') + 1:]
+    if not signature.startswith(b'\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20'):
+        return False
     signature = signature[19:]
-    if signature != sha256(message).digest(): return False
+    if signature != sha256(message).digest():
+        return False
     return True
 
 
@@ -51,18 +55,19 @@ def update_self(to_screen, verbose):
     UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
 
     if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, "frozen"):
-        to_screen(u'It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.')
+        to_screen('It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.')
         return
 
     # Check if there is a new version
     try:
         newversion = compat_urllib_request.urlopen(VERSION_URL).read().decode('utf-8').strip()
     except:
-        if verbose: to_screen(compat_str(traceback.format_exc()))
-        to_screen(u'ERROR: can\'t find the current version. Please try again later.')
+        if verbose:
+            to_screen(compat_str(traceback.format_exc()))
+        to_screen('ERROR: can\'t find the current version. Please try again later.')
         return
     if newversion == __version__:
-        to_screen(u'youtube-dl is up-to-date (' + __version__ + ')')
+        to_screen('youtube-dl is up-to-date (' + __version__ + ')')
         return
 
     # Download and check versions info
@@ -70,16 +75,17 @@ def update_self(to_screen, verbose):
         versions_info = compat_urllib_request.urlopen(JSON_URL).read().decode('utf-8')
         versions_info = json.loads(versions_info)
     except:
-        if verbose: to_screen(compat_str(traceback.format_exc()))
-        to_screen(u'ERROR: can\'t obtain versions info. Please try again later.')
+        if verbose:
+            to_screen(compat_str(traceback.format_exc()))
+        to_screen('ERROR: can\'t obtain versions info. Please try again later.')
         return
-    if not 'signature' in versions_info:
-        to_screen(u'ERROR: the versions file is not signed or corrupted. Aborting.')
+    if 'signature' not in versions_info:
+        to_screen('ERROR: the versions file is not signed or corrupted. Aborting.')
         return
     signature = versions_info['signature']
     del versions_info['signature']
     if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY):
-        to_screen(u'ERROR: the versions file signature is invalid. Aborting.')
+        to_screen('ERROR: the versions file signature is invalid. Aborting.')
         return
 
     version_id = versions_info['latest']
@@ -87,10 +93,10 @@ def update_self(to_screen, verbose):
     def version_tuple(version_str):
         return tuple(map(int, version_str.split('.')))
     if version_tuple(__version__) >= version_tuple(version_id):
-        to_screen(u'youtube-dl is up to date (%s)' % __version__)
+        to_screen('youtube-dl is up to date (%s)' % __version__)
         return
 
-    to_screen(u'Updating to version ' + version_id + ' ...')
+    to_screen('Updating to version ' + version_id + ' ...')
     version = versions_info['versions'][version_id]
 
     print_notes(to_screen, versions_info['versions'])
@@ -98,11 +104,11 @@ def update_self(to_screen, verbose):
     filename = sys.argv[0]
     # Py2EXE: Filename could be different
     if hasattr(sys, "frozen") and not os.path.isfile(filename):
-        if os.path.isfile(filename + u'.exe'):
-            filename += u'.exe'
+        if os.path.isfile(filename + '.exe'):
+            filename += '.exe'
 
     if not os.access(filename, os.W_OK):
-        to_screen(u'ERROR: no write permissions on %s' % filename)
+        to_screen('ERROR: no write permissions on %s' % filename)
         return
 
     # Py2EXE
@@ -110,7 +116,7 @@ def update_self(to_screen, verbose):
         exe = os.path.abspath(filename)
         directory = os.path.dirname(exe)
         if not os.access(directory, os.W_OK):
-            to_screen(u'ERROR: no write permissions on %s' % directory)
+            to_screen('ERROR: no write permissions on %s' % directory)
             return
 
         try:
@@ -118,40 +124,43 @@ def update_self(to_screen, verbose):
             newcontent = urlh.read()
             urlh.close()
         except (IOError, OSError):
-            if verbose: to_screen(compat_str(traceback.format_exc()))
-            to_screen(u'ERROR: unable to download latest version')
+            if verbose:
+                to_screen(compat_str(traceback.format_exc()))
+            to_screen('ERROR: unable to download latest version')
             return
 
         newcontent_hash = hashlib.sha256(newcontent).hexdigest()
         if newcontent_hash != version['exe'][1]:
-            to_screen(u'ERROR: the downloaded file hash does not match. Aborting.')
+            to_screen('ERROR: the downloaded file hash does not match. Aborting.')
             return
 
         try:
             with open(exe + '.new', 'wb') as outf:
                 outf.write(newcontent)
         except (IOError, OSError):
-            if verbose: to_screen(compat_str(traceback.format_exc()))
-            to_screen(u'ERROR: unable to write the new version')
+            if verbose:
+                to_screen(compat_str(traceback.format_exc()))
+            to_screen('ERROR: unable to write the new version')
             return
 
         try:
             bat = os.path.join(directory, 'youtube-dl-updater.bat')
             with io.open(bat, 'w') as batfile:
-                batfile.write(u"""
+                batfile.write('''
 @echo off
 echo Waiting for file handle to be closed ...
 ping 127.0.0.1 -n 5 -w 1000 > NUL
 move /Y "%s.new" "%s" > NUL
 echo Updated youtube-dl to version %s.
 start /b "" cmd /c del "%%~f0"&exit /b"
-                \n""" % (exe, exe, version_id))
+                \n''' % (exe, exe, version_id))
 
             subprocess.Popen([bat])  # Continues to run in the background
             return  # Do not show premature success messages
         except (IOError, OSError):
-            if verbose: to_screen(compat_str(traceback.format_exc()))
-            to_screen(u'ERROR: unable to overwrite current version')
+            if verbose:
+                to_screen(compat_str(traceback.format_exc()))
+            to_screen('ERROR: unable to overwrite current version')
             return
 
     # Zip unix package
@@ -161,35 +170,39 @@ start /b "" cmd /c del "%%~f0"&exit /b"
             newcontent = urlh.read()
             urlh.close()
         except (IOError, OSError):
-            if verbose: to_screen(compat_str(traceback.format_exc()))
-            to_screen(u'ERROR: unable to download latest version')
+            if verbose:
+                to_screen(compat_str(traceback.format_exc()))
+            to_screen('ERROR: unable to download latest version')
             return
 
         newcontent_hash = hashlib.sha256(newcontent).hexdigest()
         if newcontent_hash != version['bin'][1]:
-            to_screen(u'ERROR: the downloaded file hash does not match. Aborting.')
+            to_screen('ERROR: the downloaded file hash does not match. Aborting.')
             return
 
         try:
             with open(filename, 'wb') as outf:
                 outf.write(newcontent)
         except (IOError, OSError):
-            if verbose: to_screen(compat_str(traceback.format_exc()))
-            to_screen(u'ERROR: unable to overwrite current version')
+            if verbose:
+                to_screen(compat_str(traceback.format_exc()))
+            to_screen('ERROR: unable to overwrite current version')
             return
 
-    to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')
+    to_screen('Updated youtube-dl. Restart youtube-dl to use the new version.')
+
 
 def get_notes(versions, fromVersion):
     notes = []
-    for v,vdata in sorted(versions.items()):
+    for v, vdata in sorted(versions.items()):
         if v > fromVersion:
             notes.extend(vdata.get('notes', []))
     return notes
 
+
 def print_notes(to_screen, versions, fromVersion=__version__):
     notes = get_notes(versions, fromVersion)
     if notes:
-        to_screen(u'PLEASE NOTE:')
+        to_screen('PLEASE NOTE:')
         for note in notes:
             to_screen(note)
index 0d0bbe8f66e67e35eb732349e6cfd374a3bbd2b5..51a822e4f06b5da70e09311afe8789ee4383b133 100644 (file)
@@ -1,6 +1,8 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+from __future__ import unicode_literals
+
 import calendar
 import codecs
 import contextlib
@@ -8,7 +10,6 @@ import ctypes
 import datetime
 import email.utils
 import errno
-import getpass
 import gzip
 import itertools
 import io
@@ -24,176 +25,25 @@ import socket
 import struct
 import subprocess
 import sys
+import tempfile
 import traceback
 import xml.etree.ElementTree
 import zlib
 
-try:
-    import urllib.request as compat_urllib_request
-except ImportError: # Python 2
-    import urllib2 as compat_urllib_request
-
-try:
-    import urllib.error as compat_urllib_error
-except ImportError: # Python 2
-    import urllib2 as compat_urllib_error
-
-try:
-    import urllib.parse as compat_urllib_parse
-except ImportError: # Python 2
-    import urllib as compat_urllib_parse
-
-try:
-    from urllib.parse import urlparse as compat_urllib_parse_urlparse
-except ImportError: # Python 2
-    from urlparse import urlparse as compat_urllib_parse_urlparse
-
-try:
-    import urllib.parse as compat_urlparse
-except ImportError: # Python 2
-    import urlparse as compat_urlparse
-
-try:
-    import http.cookiejar as compat_cookiejar
-except ImportError: # Python 2
-    import cookielib as compat_cookiejar
-
-try:
-    import html.entities as compat_html_entities
-except ImportError: # Python 2
-    import htmlentitydefs as compat_html_entities
-
-try:
-    import html.parser as compat_html_parser
-except ImportError: # Python 2
-    import HTMLParser as compat_html_parser
-
-try:
-    import http.client as compat_http_client
-except ImportError: # Python 2
-    import httplib as compat_http_client
-
-try:
-    from urllib.error import HTTPError as compat_HTTPError
-except ImportError:  # Python 2
-    from urllib2 import HTTPError as compat_HTTPError
-
-try:
-    from urllib.request import urlretrieve as compat_urlretrieve
-except ImportError:  # Python 2
-    from urllib import urlretrieve as compat_urlretrieve
-
-
-try:
-    from subprocess import DEVNULL
-    compat_subprocess_get_DEVNULL = lambda: DEVNULL
-except ImportError:
-    compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
-
-try:
-    from urllib.parse import unquote as compat_urllib_parse_unquote
-except ImportError:
-    def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
-        if string == '':
-            return string
-        res = string.split('%')
-        if len(res) == 1:
-            return string
-        if encoding is None:
-            encoding = 'utf-8'
-        if errors is None:
-            errors = 'replace'
-        # pct_sequence: contiguous sequence of percent-encoded bytes, decoded
-        pct_sequence = b''
-        string = res[0]
-        for item in res[1:]:
-            try:
-                if not item:
-                    raise ValueError
-                pct_sequence += item[:2].decode('hex')
-                rest = item[2:]
-                if not rest:
-                    # This segment was just a single percent-encoded character.
-                    # May be part of a sequence of code units, so delay decoding.
-                    # (Stored in pct_sequence).
-                    continue
-            except ValueError:
-                rest = '%' + item
-            # Encountered non-percent-encoded characters. Flush the current
-            # pct_sequence.
-            string += pct_sequence.decode(encoding, errors) + rest
-            pct_sequence = b''
-        if pct_sequence:
-            # Flush the final pct_sequence
-            string += pct_sequence.decode(encoding, errors)
-        return string
-
-
-try:
-    from urllib.parse import parse_qs as compat_parse_qs
-except ImportError: # Python 2
-    # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
-    # Python 2's version is apparently totally broken
-
-    def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
-                encoding='utf-8', errors='replace'):
-        qs, _coerce_result = qs, unicode
-        pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
-        r = []
-        for name_value in pairs:
-            if not name_value and not strict_parsing:
-                continue
-            nv = name_value.split('=', 1)
-            if len(nv) != 2:
-                if strict_parsing:
-                    raise ValueError("bad query field: %r" % (name_value,))
-                # Handle case of a control-name with no equal sign
-                if keep_blank_values:
-                    nv.append('')
-                else:
-                    continue
-            if len(nv[1]) or keep_blank_values:
-                name = nv[0].replace('+', ' ')
-                name = compat_urllib_parse_unquote(
-                    name, encoding=encoding, errors=errors)
-                name = _coerce_result(name)
-                value = nv[1].replace('+', ' ')
-                value = compat_urllib_parse_unquote(
-                    value, encoding=encoding, errors=errors)
-                value = _coerce_result(value)
-                r.append((name, value))
-        return r
-
-    def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
-                encoding='utf-8', errors='replace'):
-        parsed_result = {}
-        pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
-                        encoding=encoding, errors=errors)
-        for name, value in pairs:
-            if name in parsed_result:
-                parsed_result[name].append(value)
-            else:
-                parsed_result[name] = [value]
-        return parsed_result
-
-try:
-    compat_str = unicode # Python 2
-except NameError:
-    compat_str = str
+from .compat import (
+    compat_chr,
+    compat_getenv,
+    compat_html_entities,
+    compat_parse_qs,
+    compat_str,
+    compat_urllib_error,
+    compat_urllib_parse,
+    compat_urllib_parse_urlparse,
+    compat_urllib_request,
+    compat_urlparse,
+    shlex_quote,
+)
 
-try:
-    compat_chr = unichr # Python 2
-except NameError:
-    compat_chr = chr
-
-try:
-    from xml.etree.ElementTree import ParseError as compat_xml_parse_error
-except ImportError:  # Python 2.6
-    from xml.parsers.expat import ExpatError as compat_xml_parse_error
-
-def compat_ord(c):
-    if type(c) is int: return c
-    else: return ord(c)
 
 # This is not clearly defined otherwise
 compiled_regex_type = type(re.compile(''))
@@ -206,6 +56,7 @@ std_headers = {
     'Accept-Language': 'en-us,en;q=0.5',
 }
 
+
 def preferredencoding():
     """Get preferred encoding.
 
@@ -214,40 +65,81 @@ def preferredencoding():
     """
     try:
         pref = locale.getpreferredencoding()
-        u'TEST'.encode(pref)
+        'TEST'.encode(pref)
     except:
         pref = 'UTF-8'
 
     return pref
 
-if sys.version_info < (3,0):
-    def compat_print(s):
-        print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
-else:
-    def compat_print(s):
-        assert type(s) == type(u'')
-        print(s)
-
-# In Python 2.x, json.dump expects a bytestream.
-# In Python 3.x, it writes to a character stream
-if sys.version_info < (3,0):
-    def write_json_file(obj, fn):
-        with open(fn, 'wb') as f:
-            json.dump(obj, f)
-else:
-    def write_json_file(obj, fn):
-        with open(fn, 'w', encoding='utf-8') as f:
-            json.dump(obj, f)
 
-if sys.version_info >= (2,7):
+def write_json_file(obj, fn):
+    """ Encode obj as JSON and write it to fn, atomically if possible """
+
+    fn = encodeFilename(fn)
+    if sys.version_info < (3, 0) and sys.platform != 'win32':
+        encoding = get_filesystem_encoding()
+        # os.path.basename returns a bytes object, but NamedTemporaryFile
+        # will fail if the filename contains non ascii characters unless we
+        # use a unicode object
+        path_basename = lambda f: os.path.basename(fn).decode(encoding)
+        # the same for os.path.dirname
+        path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
+    else:
+        path_basename = os.path.basename
+        path_dirname = os.path.dirname
+
+    args = {
+        'suffix': '.tmp',
+        'prefix': path_basename(fn) + '.',
+        'dir': path_dirname(fn),
+        'delete': False,
+    }
+
+    # In Python 2.x, json.dump expects a bytestream.
+    # In Python 3.x, it writes to a character stream
+    if sys.version_info < (3, 0):
+        args['mode'] = 'wb'
+    else:
+        args.update({
+            'mode': 'w',
+            'encoding': 'utf-8',
+        })
+
+    tf = tempfile.NamedTemporaryFile(**args)
+
+    try:
+        with tf:
+            json.dump(obj, tf)
+        if sys.platform == 'win32':
+            # Need to remove existing file on Windows, else os.rename raises
+            # WindowsError or FileExistsError.
+            try:
+                os.unlink(fn)
+            except OSError:
+                pass
+        os.rename(tf.name, fn)
+    except:
+        try:
+            os.remove(tf.name)
+        except OSError:
+            pass
+        raise
+
+
+if sys.version_info >= (2, 7):
     def find_xpath_attr(node, xpath, key, val):
         """ Find the xpath xpath[@key=val] """
         assert re.match(r'^[a-zA-Z-]+$', key)
         assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val)
-        expr = xpath + u"[@%s='%s']" % (key, val)
+        expr = xpath + "[@%s='%s']" % (key, val)
         return node.find(expr)
 else:
     def find_xpath_attr(node, xpath, key, val):
+        # Here comes the crazy part: In 2.6, if the xpath is a unicode,
+        # .//node does not match if a node is a direct child of . !
+        if isinstance(xpath, unicode):
+            xpath = xpath.encode('ascii')
+
         for f in node.findall(xpath):
             if f.attrib.get(key) == val:
                 return f
@@ -255,6 +147,8 @@ else:
 
 # On python2.6 the xml.etree.ElementTree.Element methods don't support
 # the namespace parameter
+
+
 def xpath_with_ns(path, ns_map):
     components = [c.split(':') for c in path.split('/')]
     replaced = []
@@ -266,152 +160,47 @@ def xpath_with_ns(path, ns_map):
             replaced.append('{%s}%s' % (ns_map[ns], tag))
     return '/'.join(replaced)
 
-def htmlentity_transform(matchobj):
-    """Transforms an HTML entity to a character.
-
-    This function receives a match object and is intended to be used with
-    the re.sub() function.
-    """
-    entity = matchobj.group(1)
 
-    # Known non-numeric HTML entity
-    if entity in compat_html_entities.name2codepoint:
-        return compat_chr(compat_html_entities.name2codepoint[entity])
+def xpath_text(node, xpath, name=None, fatal=False):
+    if sys.version_info < (2, 7):  # Crazy 2.6
+        xpath = xpath.encode('ascii')
 
-    mobj = re.match(u'(?u)#(x?\\d+)', entity)
-    if mobj is not None:
-        numstr = mobj.group(1)
-        if numstr.startswith(u'x'):
-            base = 16
-            numstr = u'0%s' % numstr
+    n = node.find(xpath)
+    if n is None or n.text is None:
+        if fatal:
+            name = xpath if name is None else name
+            raise ExtractorError('Could not find XML element %s' % name)
         else:
-            base = 10
-        return compat_chr(int(numstr, base))
-
-    # Unknown entity in name, return its literal representation
-    return (u'&%s;' % entity)
-
-compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix
-class BaseHTMLParser(compat_html_parser.HTMLParser):
-    def __init(self):
-        compat_html_parser.HTMLParser.__init__(self)
-        self.html = None
-
-    def loads(self, html):
-        self.html = html
-        self.feed(html)
-        self.close()
-
-class AttrParser(BaseHTMLParser):
-    """Modified HTMLParser that isolates a tag with the specified attribute"""
-    def __init__(self, attribute, value):
-        self.attribute = attribute
-        self.value = value
-        self.result = None
-        self.started = False
-        self.depth = {}
-        self.watch_startpos = False
-        self.error_count = 0
-        BaseHTMLParser.__init__(self)
-
-    def error(self, message):
-        if self.error_count > 10 or self.started:
-            raise compat_html_parser.HTMLParseError(message, self.getpos())
-        self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line
-        self.error_count += 1
-        self.goahead(1)
-
-    def handle_starttag(self, tag, attrs):
-        attrs = dict(attrs)
-        if self.started:
-            self.find_startpos(None)
-        if self.attribute in attrs and attrs[self.attribute] == self.value:
-            self.result = [tag]
-            self.started = True
-            self.watch_startpos = True
-        if self.started:
-            if not tag in self.depth: self.depth[tag] = 0
-            self.depth[tag] += 1
-
-    def handle_endtag(self, tag):
-        if self.started:
-            if tag in self.depth: self.depth[tag] -= 1
-            if self.depth[self.result[0]] == 0:
-                self.started = False
-                self.result.append(self.getpos())
-
-    def find_startpos(self, x):
-        """Needed to put the start position of the result (self.result[1])
-        after the opening tag with the requested id"""
-        if self.watch_startpos:
-            self.watch_startpos = False
-            self.result.append(self.getpos())
-    handle_entityref = handle_charref = handle_data = handle_comment = \
-    handle_decl = handle_pi = unknown_decl = find_startpos
-
-    def get_result(self):
-        if self.result is None:
             return None
-        if len(self.result) != 3:
-            return None
-        lines = self.html.split('\n')
-        lines = lines[self.result[1][0]-1:self.result[2][0]]
-        lines[0] = lines[0][self.result[1][1]:]
-        if len(lines) == 1:
-            lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]]
-        lines[-1] = lines[-1][:self.result[2][1]]
-        return '\n'.join(lines).strip()
-# Hack for https://github.com/rg3/youtube-dl/issues/662
-if sys.version_info < (2, 7, 3):
-    AttrParser.parse_endtag = (lambda self, i:
-        i + len("</scr'+'ipt>")
-        if self.rawdata[i:].startswith("</scr'+'ipt>")
-        else compat_html_parser.HTMLParser.parse_endtag(self, i))
+    return n.text
+
 
 def get_element_by_id(id, html):
     """Return the content of the tag with the specified ID in the passed HTML document"""
     return get_element_by_attribute("id", id, html)
 
+
 def get_element_by_attribute(attribute, value, html):
     """Return the content of the tag with the specified attribute in the passed HTML document"""
-    parser = AttrParser(attribute, value)
-    try:
-        parser.loads(html)
-    except compat_html_parser.HTMLParseError:
-        pass
-    return parser.get_result()
 
-class MetaParser(BaseHTMLParser):
-    """
-    Modified HTMLParser that isolates a meta tag with the specified name 
-    attribute.
-    """
-    def __init__(self, name):
-        BaseHTMLParser.__init__(self)
-        self.name = name
-        self.content = None
-        self.result = None
-
-    def handle_starttag(self, tag, attrs):
-        if tag != 'meta':
-            return
-        attrs = dict(attrs)
-        if attrs.get('name') == self.name:
-            self.result = attrs.get('content')
+    m = re.search(r'''(?xs)
+        <([a-zA-Z0-9:._-]+)
+         (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*?
+         \s+%s=['"]?%s['"]?
+         (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*?
+        \s*>
+        (?P<content>.*?)
+        </\1>
+    ''' % (re.escape(attribute), re.escape(value)), html)
 
-    def get_result(self):
-        return self.result
+    if not m:
+        return None
+    res = m.group('content')
 
-def get_meta_content(name, html):
-    """
-    Return the content attribute from the meta tag with the given name attribute.
-    """
-    parser = MetaParser(name)
-    try:
-        parser.loads(html)
-    except compat_html_parser.HTMLParseError:
-        pass
-    return parser.get_result()
+    if res.startswith('"') or res.startswith("'"):
+        res = res[1:-1]
+
+    return unescapeHTML(res)
 
 
 def clean_html(html):
@@ -438,7 +227,7 @@ def sanitize_open(filename, open_mode):
     It returns the tuple (stream, definitive_file_name).
     """
     try:
-        if filename == u'-':
+        if filename == '-':
             if sys.platform == 'win32':
                 import msvcrt
                 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
@@ -451,9 +240,9 @@ def sanitize_open(filename, open_mode):
 
         # In case of error, try to remove win32 forbidden chars
         alt_filename = os.path.join(
-                        re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', path_part)
-                        for path_part in os.path.split(filename)
-                       )
+            re.sub('[/<>:"\\|\\\\?\\*]', '#', path_part)
+            for path_part in os.path.split(filename)
+        )
         if alt_filename == filename:
             raise
         else:
@@ -470,6 +259,7 @@ def timeconvert(timestr):
         timestamp = email.utils.mktime_tz(timetuple)
     return timestamp
 
+
 def sanitize_filename(s, restricted=False, is_id=False):
     """Sanitizes a string so it could be used as part of a filename.
     If restricted is set, use a stricter subset of allowed characters.
@@ -490,7 +280,7 @@ def sanitize_filename(s, restricted=False, is_id=False):
             return '_'
         return char
 
-    result = u''.join(map(replace_insane, s))
+    result = ''.join(map(replace_insane, s))
     if not is_id:
         while '__' in result:
             result = result.replace('__', '_')
@@ -502,6 +292,7 @@ def sanitize_filename(s, restricted=False, is_id=False):
             result = '_'
     return result
 
+
 def orderedSet(iterable):
     """ Remove all duplicates from the input iterable """
     res = []
@@ -511,13 +302,33 @@ def orderedSet(iterable):
     return res
 
 
+def _htmlentity_transform(entity):
+    """Transforms an HTML entity to a character."""
+    # Known non-numeric HTML entity
+    if entity in compat_html_entities.name2codepoint:
+        return compat_chr(compat_html_entities.name2codepoint[entity])
+
+    mobj = re.match(r'#(x?[0-9]+)', entity)
+    if mobj is not None:
+        numstr = mobj.group(1)
+        if numstr.startswith('x'):
+            base = 16
+            numstr = '0%s' % numstr
+        else:
+            base = 10
+        return compat_chr(int(numstr, base))
+
+    # Unknown entity in name, return its literal representation
+    return ('&%s;' % entity)
+
+
 def unescapeHTML(s):
     if s is None:
         return None
     assert type(s) == compat_str
 
-    result = re.sub(r'(?u)&(.+?);', htmlentity_transform, s)
-    return result
+    return re.sub(
+        r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s)
 
 
 def encodeFilename(s, for_subprocess=False):
@@ -532,7 +343,7 @@ def encodeFilename(s, for_subprocess=False):
         return s
 
     if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
-        # Pass u'' directly to use Unicode APIs on Windows 2000 and up
+        # Pass '' directly to use Unicode APIs on Windows 2000 and up
         # (Detecting Windows NT 4 is tricky because 'major >= 4' would
         # match Windows 9x series as well. Besides, NT 4 is obsolete.)
         if not for_subprocess:
@@ -566,6 +377,7 @@ def decodeOption(optval):
     assert isinstance(optval, compat_str)
     return optval
 
+
 def formatSeconds(secs):
     if secs > 3600:
         return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
@@ -589,7 +401,7 @@ def make_HTTPS_handler(opts_no_check_certificate, **kwargs):
                     self.sock = sock
                     self._tunnel()
                 try:
-                    self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv3)
+                    self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1)
                 except ssl.SSLError:
                     self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv23)
 
@@ -597,8 +409,14 @@ def make_HTTPS_handler(opts_no_check_certificate, **kwargs):
             def https_open(self, req):
                 return self.do_open(HTTPSConnectionV3, req)
         return HTTPSHandlerV3(**kwargs)
-    else:
-        context = ssl.SSLContext(ssl.PROTOCOL_SSLv3)
+    elif hasattr(ssl, 'create_default_context'):  # Python >= 3.4
+        context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+        context.options &= ~ssl.OP_NO_SSLv3  # Allow older, not-as-secure SSLv3
+        if opts_no_check_certificate:
+            context.verify_mode = ssl.CERT_NONE
+        return compat_urllib_request.HTTPSHandler(context=context, **kwargs)
+    else:  # Python < 3.4
+        context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
         context.verify_mode = (ssl.CERT_NONE
                                if opts_no_check_certificate
                                else ssl.CERT_REQUIRED)
@@ -609,8 +427,10 @@ def make_HTTPS_handler(opts_no_check_certificate, **kwargs):
             pass  # Python < 3.4
         return compat_urllib_request.HTTPSHandler(context=context, **kwargs)
 
+
 class ExtractorError(Exception):
     """Error during info extraction."""
+
     def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
         """ tb, if given, is the original traceback (so that it can be printed out).
         If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
@@ -620,8 +440,16 @@ class ExtractorError(Exception):
             expected = True
         if video_id is not None:
             msg = video_id + ': ' + msg
+        if cause:
+            msg += ' (caused by %r)' % cause
         if not expected:
-            msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type  youtube-dl -U  to update.'
+            if ytdl_is_updateable():
+                update_cmd = 'type  youtube-dl -U  to update'
+            else:
+                update_cmd = 'see  https://yt-dl.org/update  on how to update'
+            msg += '; please report this issue on https://yt-dl.org/bug .'
+            msg += ' Make sure you are using the latest version; %s.' % update_cmd
+            msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
         super(ExtractorError, self).__init__(msg)
 
         self.traceback = tb
@@ -632,7 +460,7 @@ class ExtractorError(Exception):
     def format_traceback(self):
         if self.traceback is None:
             return None
-        return u''.join(traceback.format_tb(self.traceback))
+        return ''.join(traceback.format_tb(self.traceback))
 
 
 class RegexNotFoundError(ExtractorError):
@@ -647,6 +475,7 @@ class DownloadError(Exception):
     configured to continue on errors. They will contain the appropriate
     error message.
     """
+
     def __init__(self, msg, exc_info=None):
         """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
         super(DownloadError, self).__init__(msg)
@@ -668,9 +497,11 @@ class PostProcessingError(Exception):
     This exception may be raised by PostProcessor's .run() method to
     indicate an error in the postprocessing task.
     """
+
     def __init__(self, msg):
         self.msg = msg
 
+
 class MaxDownloadsReached(Exception):
     """ --max-downloads limit has been reached. """
     pass
@@ -700,6 +531,7 @@ class ContentTooShortError(Exception):
         self.downloaded = downloaded
         self.expected = expected
 
+
 class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
     """Handler for HTTP requests and responses.
 
@@ -734,10 +566,9 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
         return ret
 
     def http_request(self, req):
-        for h,v in std_headers.items():
-            if h in req.headers:
-                del req.headers[h]
-            req.add_header(h, v)
+        for h, v in std_headers.items():
+            if h not in req.headers:
+                req.add_header(h, v)
         if 'Youtubedl-no-compression' in req.headers:
             if 'Accept-encoding' in req.headers:
                 del req.headers['Accept-encoding']
@@ -747,6 +578,12 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
                 del req.headers['User-agent']
             req.headers['User-agent'] = req.headers['Youtubedl-user-agent']
             del req.headers['Youtubedl-user-agent']
+
+        if sys.version_info < (2, 7) and '#' in req.get_full_url():
+            # Python 2.6 is brain-dead when it comes to fragments
+            req._Request__original = req._Request__original.partition('#')[0]
+            req._Request__r_type = req._Request__r_type.partition('#')[0]
+
         return req
 
     def http_response(self, req, resp):
@@ -789,7 +626,7 @@ def parse_iso8601(date_str, delimiter='T'):
         return None
 
     m = re.search(
-        r'Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$',
+        r'(\.[0-9]+)?(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
         date_str)
     if not m:
         timezone = datetime.timedelta()
@@ -802,22 +639,24 @@ def parse_iso8601(date_str, delimiter='T'):
             timezone = datetime.timedelta(
                 hours=sign * int(m.group('hours')),
                 minutes=sign * int(m.group('minutes')))
-    date_format =  '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
+    date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
     dt = datetime.datetime.strptime(date_str, date_format) - timezone
     return calendar.timegm(dt.timetuple())
 
 
-def unified_strdate(date_str):
+def unified_strdate(date_str, day_first=True):
     """Return a string with the date in the format YYYYMMDD"""
 
     if date_str is None:
         return None
-
     upload_date = None
-    #Replace commas
+    # Replace commas
     date_str = date_str.replace(',', ' ')
     # %z (UTC offset) is only supported in python>=3.2
     date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str)
+    # Remove AM/PM + timezone
+    date_str = re.sub(r'(?i)\s*(?:AM|PM)\s+[A-Z]+', '', date_str)
+
     format_expressions = [
         '%d %B %Y',
         '%d %b %Y',
@@ -827,10 +666,13 @@ def unified_strdate(date_str):
         '%b %dnd %Y %I:%M%p',
         '%b %dth %Y %I:%M%p',
         '%Y-%m-%d',
+        '%Y/%m/%d',
         '%d.%m.%Y',
         '%d/%m/%Y',
+        '%d/%m/%y',
         '%Y/%m/%d %H:%M:%S',
         '%Y-%m-%d %H:%M:%S',
+        '%Y-%m-%d %H:%M:%S.%f',
         '%d.%m.%Y %H:%M',
         '%d.%m.%Y %H.%M',
         '%Y-%m-%dT%H:%M:%SZ',
@@ -840,6 +682,14 @@ def unified_strdate(date_str):
         '%Y-%m-%dT%H:%M:%S.%f',
         '%Y-%m-%dT%H:%M',
     ]
+    if day_first:
+        format_expressions.extend([
+            '%d/%m/%Y %H:%M:%S',
+        ])
+    else:
+        format_expressions.extend([
+            '%m/%d/%Y %H:%M:%S',
+        ])
     for expression in format_expressions:
         try:
             upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
@@ -851,23 +701,30 @@ def unified_strdate(date_str):
             upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
     return upload_date
 
-def determine_ext(url, default_ext=u'unknown_video'):
-    guess = url.partition(u'?')[0].rpartition(u'.')[2]
+
+def determine_ext(url, default_ext='unknown_video'):
+    if url is None:
+        return default_ext
+    guess = url.partition('?')[0].rpartition('.')[2]
     if re.match(r'^[A-Za-z0-9]+$', guess):
         return guess
     else:
         return default_ext
 
+
 def subtitles_filename(filename, sub_lang, sub_format):
-    return filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
+    return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
+
 
 def date_from_str(date_str):
     """
     Return a datetime object from a string in the format YYYYMMDD or
     (now|today)[+-][0-9](day|week|month|year)(s)?"""
     today = datetime.date.today()
-    if date_str == 'now'or date_str == 'today':
+    if date_str in ('now', 'today'):
         return today
+    if date_str == 'yesterday':
+        return today - datetime.timedelta(days=1)
     match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
     if match is not None:
         sign = match.group('sign')
@@ -875,7 +732,7 @@ def date_from_str(date_str):
         if sign == '-':
             time = -time
         unit = match.group('unit')
-        #A bad aproximation?
+        # A bad aproximation?
         if unit == 'month':
             unit = 'day'
             time *= 30
@@ -886,7 +743,8 @@ def date_from_str(date_str):
         delta = datetime.timedelta(**{unit: time})
         return today + delta
     return datetime.datetime.strptime(date_str, "%Y%m%d").date()
-    
+
+
 def hyphenate_date(date_str):
     """
     Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
@@ -896,8 +754,10 @@ def hyphenate_date(date_str):
     else:
         return date_str
 
+
 class DateRange(object):
     """Represents a time interval between two dates"""
+
     def __init__(self, start=None, end=None):
         """start and end must be strings in the format accepted by date"""
         if start is not None:
@@ -910,17 +770,20 @@ class DateRange(object):
             self.end = datetime.datetime.max.date()
         if self.start > self.end:
             raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
+
     @classmethod
     def day(cls, day):
         """Returns a range that only contains the given day"""
-        return cls(day,day)
+        return cls(day, day)
+
     def __contains__(self, date):
         """Check if the date is in the range"""
         if not isinstance(date, datetime.date):
             date = date_from_str(date)
         return self.start <= date <= self.end
+
     def __str__(self):
-        return '%s - %s' % ( self.start.isoformat(), self.end.isoformat())
+        return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
 
 
 def platform_name():
@@ -956,22 +819,22 @@ def _windows_write_string(s, out):
 
     GetStdHandle = ctypes.WINFUNCTYPE(
         ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
-        ("GetStdHandle", ctypes.windll.kernel32))
+        (b"GetStdHandle", ctypes.windll.kernel32))
     h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
 
     WriteConsoleW = ctypes.WINFUNCTYPE(
         ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
         ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
-        ctypes.wintypes.LPVOID)(("WriteConsoleW", ctypes.windll.kernel32))
+        ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32))
     written = ctypes.wintypes.DWORD(0)
 
-    GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(("GetFileType", ctypes.windll.kernel32))
+    GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32))
     FILE_TYPE_CHAR = 0x0002
     FILE_TYPE_REMOTE = 0x8000
     GetConsoleMode = ctypes.WINFUNCTYPE(
         ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
         ctypes.POINTER(ctypes.wintypes.DWORD))(
-        ("GetConsoleMode", ctypes.windll.kernel32))
+        (b"GetConsoleMode", ctypes.windll.kernel32))
     INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
 
     def not_a_console(handle):
@@ -1039,16 +902,7 @@ def bytes_to_intlist(bs):
 def intlist_to_bytes(xs):
     if not xs:
         return b''
-    if isinstance(chr(0), bytes):  # Python 2
-        return ''.join([chr(x) for x in xs])
-    else:
-        return bytes(xs)
-
-
-def get_cachedir(params={}):
-    cache_root = os.environ.get('XDG_CACHE_HOME',
-                                os.path.expanduser('~/.cache'))
-    return params.get('cachedir', os.path.join(cache_root, 'youtube-dl'))
+    return struct_pack('%dB' % len(xs), *xs)
 
 
 # Cross-platform file locking
@@ -1110,10 +964,10 @@ else:
     import fcntl
 
     def _lock_file(f, exclusive):
-        fcntl.lockf(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
+        fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
 
     def _unlock_file(f):
-        fcntl.lockf(f, fcntl.LOCK_UN)
+        fcntl.flock(f, fcntl.LOCK_UN)
 
 
 class locked_file(object):
@@ -1147,17 +1001,20 @@ class locked_file(object):
         return self.f.read(*args)
 
 
+def get_filesystem_encoding():
+    encoding = sys.getfilesystemencoding()
+    return encoding if encoding is not None else 'utf-8'
+
+
 def shell_quote(args):
     quoted_args = []
-    encoding = sys.getfilesystemencoding()
-    if encoding is None:
-        encoding = 'utf-8'
+    encoding = get_filesystem_encoding()
     for a in args:
         if isinstance(a, bytes):
             # We may get a filename encoded with 'encodeFilename'
             a = a.decode(encoding)
         quoted_args.append(pipes.quote(a))
-    return u' '.join(quoted_args)
+    return ' '.join(quoted_args)
 
 
 def takewhile_inclusive(pred, seq):
@@ -1173,35 +1030,89 @@ def smuggle_url(url, data):
     """ Pass additional data in a URL for internal use. """
 
     sdata = compat_urllib_parse.urlencode(
-        {u'__youtubedl_smuggle': json.dumps(data)})
-    return url + u'#' + sdata
+        {'__youtubedl_smuggle': json.dumps(data)})
+    return url + '#' + sdata
 
 
 def unsmuggle_url(smug_url, default=None):
-    if not '#__youtubedl_smuggle' in smug_url:
+    if '#__youtubedl_smuggle' not in smug_url:
         return smug_url, default
-    url, _, sdata = smug_url.rpartition(u'#')
-    jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0]
+    url, _, sdata = smug_url.rpartition('#')
+    jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
     data = json.loads(jsond)
     return url, data
 
 
 def format_bytes(bytes):
     if bytes is None:
-        return u'N/A'
+        return 'N/A'
     if type(bytes) is str:
         bytes = float(bytes)
     if bytes == 0.0:
         exponent = 0
     else:
         exponent = int(math.log(bytes, 1024.0))
-    suffix = [u'B', u'KiB', u'MiB', u'GiB', u'TiB', u'PiB', u'EiB', u'ZiB', u'YiB'][exponent]
+    suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
     converted = float(bytes) / float(1024 ** exponent)
-    return u'%.2f%s' % (converted, suffix)
+    return '%.2f%s' % (converted, suffix)
+
+
+def parse_filesize(s):
+    if s is None:
+        return None
+
+    # The lower-case forms are of course incorrect and inofficial,
+    # but we support those too
+    _UNIT_TABLE = {
+        'B': 1,
+        'b': 1,
+        'KiB': 1024,
+        'KB': 1000,
+        'kB': 1024,
+        'Kb': 1000,
+        'MiB': 1024 ** 2,
+        'MB': 1000 ** 2,
+        'mB': 1024 ** 2,
+        'Mb': 1000 ** 2,
+        'GiB': 1024 ** 3,
+        'GB': 1000 ** 3,
+        'gB': 1024 ** 3,
+        'Gb': 1000 ** 3,
+        'TiB': 1024 ** 4,
+        'TB': 1000 ** 4,
+        'tB': 1024 ** 4,
+        'Tb': 1000 ** 4,
+        'PiB': 1024 ** 5,
+        'PB': 1000 ** 5,
+        'pB': 1024 ** 5,
+        'Pb': 1000 ** 5,
+        'EiB': 1024 ** 6,
+        'EB': 1000 ** 6,
+        'eB': 1024 ** 6,
+        'Eb': 1000 ** 6,
+        'ZiB': 1024 ** 7,
+        'ZB': 1000 ** 7,
+        'zB': 1024 ** 7,
+        'Zb': 1000 ** 7,
+        'YiB': 1024 ** 8,
+        'YB': 1000 ** 8,
+        'yB': 1024 ** 8,
+        'Yb': 1000 ** 8,
+    }
+
+    units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE)
+    m = re.match(
+        r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re, s)
+    if not m:
+        return None
+
+    num_str = m.group('num').replace(',', '.')
+    mult = _UNIT_TABLE[m.group('unit')]
+    return int(float(num_str) * mult)
 
 
 def get_term_width():
-    columns = os.environ.get('COLUMNS', None)
+    columns = compat_getenv('COLUMNS', None)
     if columns:
         return int(columns)
 
@@ -1220,8 +1131,8 @@ def month_by_name(name):
     """ Return the number of a month by (locale-independently) English name """
 
     ENGLISH_NAMES = [
-        u'January', u'February', u'March', u'April', u'May', u'June',
-        u'July', u'August', u'September', u'October', u'November', u'December']
+        'January', 'February', 'March', 'April', 'May', 'June',
+        'July', 'August', 'September', 'October', 'November', 'December']
     try:
         return ENGLISH_NAMES.index(name) + 1
     except ValueError:
@@ -1232,7 +1143,7 @@ def fix_xml_ampersands(xml_str):
     """Replace all the '&' by '&amp;' in XML"""
     return re.sub(
         r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
-        u'&amp;',
+        '&amp;',
         xml_str)
 
 
@@ -1257,9 +1168,15 @@ def remove_start(s, start):
     return s
 
 
+def remove_end(s, end):
+    if s.endswith(end):
+        return s[:-len(end)]
+    return s
+
+
 def url_basename(url):
     path = compat_urlparse.urlparse(url).path
-    return path.strip(u'/').split(u'/')[-1]
+    return path.strip('/').split('/')[-1]
 
 
 class HEADRequest(compat_urllib_request.Request):
@@ -1271,13 +1188,20 @@ def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
     if get_attr:
         if v is not None:
             v = getattr(v, get_attr, None)
+    if v == '':
+        v = None
     return default if v is None else (int(v) * invscale // scale)
 
 
+def str_or_none(v, default=None):
+    return default if v is None else compat_str(v)
+
+
 def str_to_int(int_str):
+    """ A more relaxed version of int_or_none """
     if int_str is None:
         return None
-    int_str = re.sub(r'[,\.]', u'', int_str)
+    int_str = re.sub(r'[,\.\+]', '', int_str)
     return int(int_str)
 
 
@@ -1289,21 +1213,41 @@ def parse_duration(s):
     if s is None:
         return None
 
+    s = s.strip()
+
     m = re.match(
-        r'(?:(?:(?P<hours>[0-9]+)[:h])?(?P<mins>[0-9]+)[:m])?(?P<secs>[0-9]+)s?(?::[0-9]+)?$', s)
+        r'''(?ix)T?
+        (?:
+            (?P<only_mins>[0-9.]+)\s*(?:mins?|minutes?)\s*|
+            (?P<only_hours>[0-9.]+)\s*(?:hours?)|
+
+            (?:
+                (?:(?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s*)?
+                (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s*
+            )?
+            (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)?
+        )$''', s)
     if not m:
         return None
-    res = int(m.group('secs'))
+    res = 0
+    if m.group('only_mins'):
+        return float_or_none(m.group('only_mins'), invscale=60)
+    if m.group('only_hours'):
+        return float_or_none(m.group('only_hours'), invscale=60 * 60)
+    if m.group('secs'):
+        res += int(m.group('secs'))
     if m.group('mins'):
         res += int(m.group('mins')) * 60
-        if m.group('hours'):
-            res += int(m.group('hours')) * 60 * 60
+    if m.group('hours'):
+        res += int(m.group('hours')) * 60 * 60
+    if m.group('ms'):
+        res += float(m.group('ms'))
     return res
 
 
 def prepend_extension(filename, ext):
-    name, real_ext = os.path.splitext(filename) 
-    return u'{0}.{1}{2}'.format(name, ext, real_ext)
+    name, real_ext = os.path.splitext(filename)
+    return '{0}.{1}{2}'.format(name, ext, real_ext)
 
 
 def check_executable(exe, args=[]):
@@ -1316,15 +1260,36 @@ def check_executable(exe, args=[]):
     return exe
 
 
-class PagedList(object):
-    def __init__(self, pagefunc, pagesize):
-        self._pagefunc = pagefunc
-        self._pagesize = pagesize
+def get_exe_version(exe, args=['--version'],
+                    version_re=r'version\s+([0-9._-a-zA-Z]+)',
+                    unrecognized='present'):
+    """ Returns the version of the specified executable,
+    or False if the executable is not present """
+    try:
+        out, err = subprocess.Popen(
+            [exe] + args,
+            stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
+    except OSError:
+        return False
+    firstline = out.partition(b'\n')[0].decode('ascii', 'ignore')
+    m = re.search(version_re, firstline)
+    if m:
+        return m.group(1)
+    else:
+        return unrecognized
 
+
+class PagedList(object):
     def __len__(self):
         # This is only useful for tests
         return len(self.getslice())
 
+
+class OnDemandPagedList(PagedList):
+    def __init__(self, pagefunc, pagesize):
+        self._pagefunc = pagefunc
+        self._pagesize = pagesize
+
     def getslice(self, start=0, end=None):
         res = []
         for pagenum in itertools.count(start // self._pagesize):
@@ -1363,6 +1328,35 @@ class PagedList(object):
         return res
 
 
+class InAdvancePagedList(PagedList):
+    def __init__(self, pagefunc, pagecount, pagesize):
+        self._pagefunc = pagefunc
+        self._pagecount = pagecount
+        self._pagesize = pagesize
+
+    def getslice(self, start=0, end=None):
+        res = []
+        start_page = start // self._pagesize
+        end_page = (
+            self._pagecount if end is None else (end // self._pagesize + 1))
+        skip_elems = start - start_page * self._pagesize
+        only_more = None if end is None else end - start
+        for pagenum in range(start_page, end_page):
+            page = list(self._pagefunc(pagenum))
+            if skip_elems:
+                page = page[skip_elems:]
+                skip_elems = None
+            if only_more is not None:
+                if len(page) < only_more:
+                    only_more -= len(page)
+                else:
+                    page = page[:only_more]
+                    res.extend(page)
+                    break
+            res.extend(page)
+        return res
+
+
 def uppercase_escape(s):
     unicode_escape = codecs.getdecoder('unicode_escape')
     return re.sub(
@@ -1370,8 +1364,26 @@ def uppercase_escape(s):
         lambda m: unicode_escape(m.group(0))[0],
         s)
 
+
+def escape_rfc3986(s):
+    """Escape non-ASCII characters as suggested by RFC 3986"""
+    if sys.version_info < (3, 0) and isinstance(s, unicode):
+        s = s.encode('utf-8')
+    return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
+
+
+def escape_url(url):
+    """Escape URL as suggested by RFC 3986"""
+    url_parsed = compat_urllib_parse_urlparse(url)
+    return url_parsed._replace(
+        path=escape_rfc3986(url_parsed.path),
+        params=escape_rfc3986(url_parsed.params),
+        query=escape_rfc3986(url_parsed.query),
+        fragment=escape_rfc3986(url_parsed.fragment)
+    ).geturl()
+
 try:
-    struct.pack(u'!I', 0)
+    struct.pack('!I', 0)
 except TypeError:
     # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument
     def struct_pack(spec, *args):
@@ -1392,7 +1404,7 @@ def read_batch_urls(batch_fd):
     def fixup(url):
         if not isinstance(url, compat_str):
             url = url.decode('utf-8', 'replace')
-        BOM_UTF8 = u'\xef\xbb\xbf'
+        BOM_UTF8 = '\xef\xbb\xbf'
         if url.startswith(BOM_UTF8):
             url = url[len(BOM_UTF8):]
         url = url.strip()
@@ -1408,6 +1420,12 @@ def urlencode_postdata(*args, **kargs):
     return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii')
 
 
+try:
+    etree_iter = xml.etree.ElementTree.Element.iter
+except AttributeError:  # Python <=2.6
+    etree_iter = lambda n: n.findall('.//*')
+
+
 def parse_xml(s):
     class TreeBuilder(xml.etree.ElementTree.TreeBuilder):
         def doctype(self, name, pubid, system):
@@ -1415,16 +1433,14 @@ def parse_xml(s):
 
     parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder())
     kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {}
-    return xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs)
-
-
-if sys.version_info < (3, 0) and sys.platform == 'win32':
-    def compat_getpass(prompt, *args, **kwargs):
-        if isinstance(prompt, compat_str):
-            prompt = prompt.encode(preferredencoding())
-        return getpass.getpass(prompt, *args, **kwargs)
-else:
-    compat_getpass = getpass.getpass
+    tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs)
+    # Fix up XML parser in Python 2.x
+    if sys.version_info < (3, 0):
+        for n in etree_iter(tree):
+            if n.text is not None:
+                if not isinstance(n.text, compat_str):
+                    n.text = n.text.decode('utf-8')
+    return tree
 
 
 US_RATINGS = {
@@ -1436,8 +1452,41 @@ US_RATINGS = {
 }
 
 
+def parse_age_limit(s):
+    if s is None:
+        return None
+    m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
+    return int(m.group('age')) if m else US_RATINGS.get(s, None)
+
+
 def strip_jsonp(code):
-    return re.sub(r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?\s*$', r'\1', code)
+    return re.sub(
+        r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code)
+
+
+def js_to_json(code):
+    def fix_kv(m):
+        v = m.group(0)
+        if v in ('true', 'false', 'null'):
+            return v
+        if v.startswith('"'):
+            return v
+        if v.startswith("'"):
+            v = v[1:-1]
+            v = re.sub(r"\\\\|\\'|\"", lambda m: {
+                '\\\\': '\\\\',
+                "\\'": "'",
+                '"': '\\"',
+            }[m.group(0)], v)
+        return '"%s"' % v
+
+    res = re.sub(r'''(?x)
+        "(?:[^"\\]*(?:\\\\|\\")?)*"|
+        '(?:[^'\\]*(?:\\\\|\\')?)*'|
+        [a-zA-Z_][a-zA-Z_0-9]*
+        ''', fix_kv, code)
+    res = re.sub(r',(\s*\])', lambda m: m.group(1), res)
+    return res
 
 
 def qualities(quality_ids):
@@ -1452,14 +1501,37 @@ def qualities(quality_ids):
 
 DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
 
-try:
-    subprocess_check_output = subprocess.check_output
-except AttributeError:
-    def subprocess_check_output(*args, **kwargs):
-        assert 'input' not in kwargs
-        p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
-        output, _ = p.communicate()
-        ret = p.poll()
-        if ret:
-            raise subprocess.CalledProcessError(ret, p.args, output=output)
-        return output
+
+def limit_length(s, length):
+    """ Add ellipses to overly long strings """
+    if s is None:
+        return None
+    ELLIPSES = '...'
+    if len(s) > length:
+        return s[:length - len(ELLIPSES)] + ELLIPSES
+    return s
+
+
+def version_tuple(v):
+    return tuple(int(e) for e in re.split(r'[-.]', v))
+
+
+def is_outdated_version(version, limit, assume_new=True):
+    if not version:
+        return not assume_new
+    try:
+        return version_tuple(version) < version_tuple(limit)
+    except ValueError:
+        return not assume_new
+
+
+def ytdl_is_updateable():
+    """ Returns if youtube-dl can be updated with -U """
+    from zipimport import zipimporter
+
+    return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
+
+
+def args_to_str(args):
+    # Get a short string representation for a subprocess command
+    return ' '.join(shlex_quote(a) for a in args)
index 6e7d56cf75bfcc0f0d74b1ac917dab02112be51b..995bae3d45ae87760424da8fda8a161a78e929a5 100644 (file)
@@ -1,2 +1,3 @@
+from __future__ import unicode_literals
 
-__version__ = '2014.07.30'
+__version__ = '2014.12.12.3'