Merge pull request #3927 from qrtt1/master
authorPhilipp Hagemeister <phihag@phihag.de>
Sat, 13 Dec 2014 11:59:12 +0000 (12:59 +0100)
committerPhilipp Hagemeister <phihag@phihag.de>
Sat, 13 Dec 2014 11:59:12 +0000 (12:59 +0100)
apply ratelimit to f4m

391 files changed:
.gitignore
AUTHORS [new file with mode: 0644]
CONTRIBUTING.md [new file with mode: 0644]
Makefile
README.md
devscripts/bash-completion.py
devscripts/buildserver.py
devscripts/check-porn.py
devscripts/fish-completion.py
devscripts/gh-pages/add-version.py
devscripts/gh-pages/generate-download.py
devscripts/gh-pages/sign-versions.py
devscripts/gh-pages/update-copyright.py
devscripts/gh-pages/update-feed.py
devscripts/gh-pages/update-sites.py
devscripts/make_contributing.py [new file with mode: 0755]
devscripts/make_readme.py
devscripts/prepare_manpage.py
devscripts/transition_helper.py [deleted file]
devscripts/transition_helper_exe/setup.py [deleted file]
devscripts/transition_helper_exe/youtube-dl.py [deleted file]
devscripts/zsh-completion.in [new file with mode: 0644]
devscripts/zsh-completion.py [new file with mode: 0755]
docs/conf.py
setup.py
test/helper.py
test/swftests/ConstArrayAccess.as [new file with mode: 0644]
test/swftests/ConstantInt.as [new file with mode: 0644]
test/swftests/DictCall.as [new file with mode: 0644]
test/swftests/EqualsOperator.as [new file with mode: 0644]
test/swftests/MemberAssignment.as [new file with mode: 0644]
test/swftests/NeOperator.as [new file with mode: 0644]
test/swftests/PrivateVoidCall.as [new file with mode: 0644]
test/swftests/StringBasics.as [new file with mode: 0644]
test/swftests/StringCharCodeAt.as [new file with mode: 0644]
test/swftests/StringConversion.as [new file with mode: 0644]
test/test_YoutubeDL.py
test/test_age_restriction.py
test/test_all_urls.py
test/test_compat.py [new file with mode: 0644]
test/test_download.py
test/test_execution.py
test/test_subtitles.py
test/test_swfinterp.py
test/test_unicode_literals.py
test/test_utils.py
test/test_write_annotations.py
test/test_write_info_json.py
test/test_youtube_lists.py
test/test_youtube_signature.py
youtube_dl/YoutubeDL.py
youtube_dl/__init__.py
youtube_dl/__main__.py
youtube_dl/aes.py
youtube_dl/cache.py
youtube_dl/compat.py [new file with mode: 0644]
youtube_dl/downloader/__init__.py
youtube_dl/downloader/common.py
youtube_dl/downloader/f4m.py
youtube_dl/downloader/hls.py
youtube_dl/downloader/http.py
youtube_dl/downloader/mplayer.py
youtube_dl/downloader/rtmp.py
youtube_dl/extractor/__init__.py
youtube_dl/extractor/abc.py
youtube_dl/extractor/academicearth.py
youtube_dl/extractor/addanime.py
youtube_dl/extractor/adultswim.py
youtube_dl/extractor/allocine.py
youtube_dl/extractor/aol.py
youtube_dl/extractor/aparat.py
youtube_dl/extractor/appletrailers.py
youtube_dl/extractor/ard.py
youtube_dl/extractor/arte.py
youtube_dl/extractor/audiomack.py [new file with mode: 0644]
youtube_dl/extractor/auengine.py
youtube_dl/extractor/azubu.py [new file with mode: 0644]
youtube_dl/extractor/bambuser.py
youtube_dl/extractor/bandcamp.py
youtube_dl/extractor/bbccouk.py
youtube_dl/extractor/beeg.py
youtube_dl/extractor/behindkink.py
youtube_dl/extractor/bet.py [new file with mode: 0644]
youtube_dl/extractor/bild.py [new file with mode: 0644]
youtube_dl/extractor/bilibili.py
youtube_dl/extractor/bliptv.py
youtube_dl/extractor/bpb.py [new file with mode: 0644]
youtube_dl/extractor/br.py
youtube_dl/extractor/breakcom.py
youtube_dl/extractor/brightcove.py
youtube_dl/extractor/buzzfeed.py [new file with mode: 0644]
youtube_dl/extractor/byutv.py
youtube_dl/extractor/canalplus.py
youtube_dl/extractor/cbs.py
youtube_dl/extractor/cbsnews.py
youtube_dl/extractor/ceskatelevize.py
youtube_dl/extractor/channel9.py
youtube_dl/extractor/cinchcast.py [new file with mode: 0644]
youtube_dl/extractor/cinemassacre.py [deleted file]
youtube_dl/extractor/clipfish.py
youtube_dl/extractor/cliphunter.py
youtube_dl/extractor/clipsyndicate.py
youtube_dl/extractor/cloudy.py
youtube_dl/extractor/cnet.py
youtube_dl/extractor/cnn.py
youtube_dl/extractor/collegehumor.py
youtube_dl/extractor/comcarcoff.py [new file with mode: 0644]
youtube_dl/extractor/comedycentral.py
youtube_dl/extractor/common.py
youtube_dl/extractor/condenast.py
youtube_dl/extractor/cracked.py
youtube_dl/extractor/crunchyroll.py
youtube_dl/extractor/d8.py [deleted file]
youtube_dl/extractor/dailymotion.py
youtube_dl/extractor/daum.py
youtube_dl/extractor/defense.py
youtube_dl/extractor/discovery.py
youtube_dl/extractor/dotsub.py
youtube_dl/extractor/dropbox.py
youtube_dl/extractor/drtv.py
youtube_dl/extractor/ebaumsworld.py
youtube_dl/extractor/ehow.py
youtube_dl/extractor/eighttracks.py
youtube_dl/extractor/engadget.py
youtube_dl/extractor/eporner.py
youtube_dl/extractor/escapist.py
youtube_dl/extractor/everyonesmixtape.py
youtube_dl/extractor/extremetube.py
youtube_dl/extractor/facebook.py
youtube_dl/extractor/faz.py
youtube_dl/extractor/fc2.py
youtube_dl/extractor/firedrive.py
youtube_dl/extractor/firsttv.py
youtube_dl/extractor/fivemin.py
youtube_dl/extractor/fktv.py
youtube_dl/extractor/flickr.py
youtube_dl/extractor/folketinget.py [new file with mode: 0644]
youtube_dl/extractor/fourtube.py
youtube_dl/extractor/foxgay.py [new file with mode: 0644]
youtube_dl/extractor/foxnews.py [new file with mode: 0644]
youtube_dl/extractor/franceculture.py
youtube_dl/extractor/francetv.py
youtube_dl/extractor/freevideo.py [new file with mode: 0644]
youtube_dl/extractor/funnyordie.py
youtube_dl/extractor/gamekings.py
youtube_dl/extractor/gamespot.py
youtube_dl/extractor/gdcvault.py
youtube_dl/extractor/generic.py
youtube_dl/extractor/giantbomb.py [new file with mode: 0644]
youtube_dl/extractor/glide.py [new file with mode: 0644]
youtube_dl/extractor/globo.py
youtube_dl/extractor/goldenmoustache.py [new file with mode: 0644]
youtube_dl/extractor/golem.py
youtube_dl/extractor/googlesearch.py
youtube_dl/extractor/gorillavid.py
youtube_dl/extractor/goshgay.py
youtube_dl/extractor/grooveshark.py
youtube_dl/extractor/groupon.py [new file with mode: 0644]
youtube_dl/extractor/hark.py
youtube_dl/extractor/heise.py
youtube_dl/extractor/helsinki.py
youtube_dl/extractor/hornbunny.py
youtube_dl/extractor/hostingbulk.py
youtube_dl/extractor/hotnewhiphop.py
youtube_dl/extractor/howcast.py
youtube_dl/extractor/howstuffworks.py
youtube_dl/extractor/huffpost.py
youtube_dl/extractor/hypem.py
youtube_dl/extractor/iconosquare.py
youtube_dl/extractor/ign.py
youtube_dl/extractor/imdb.py
youtube_dl/extractor/infoq.py
youtube_dl/extractor/instagram.py
youtube_dl/extractor/internetvideoarchive.py
youtube_dl/extractor/iprima.py
youtube_dl/extractor/ivi.py
youtube_dl/extractor/izlesene.py
youtube_dl/extractor/jadorecettepub.py
youtube_dl/extractor/jeuxvideo.py
youtube_dl/extractor/jukebox.py
youtube_dl/extractor/justintv.py [deleted file]
youtube_dl/extractor/kankan.py
youtube_dl/extractor/keek.py
youtube_dl/extractor/keezmovies.py
youtube_dl/extractor/kickstarter.py
youtube_dl/extractor/kontrtube.py
youtube_dl/extractor/ku6.py
youtube_dl/extractor/laola1tv.py [new file with mode: 0644]
youtube_dl/extractor/lifenews.py
youtube_dl/extractor/liveleak.py
youtube_dl/extractor/livestream.py
youtube_dl/extractor/lrt.py
youtube_dl/extractor/lynda.py
youtube_dl/extractor/m6.py
youtube_dl/extractor/mailru.py
youtube_dl/extractor/malemotion.py
youtube_dl/extractor/mdr.py
youtube_dl/extractor/metacafe.py
youtube_dl/extractor/metacritic.py
youtube_dl/extractor/minhateca.py [new file with mode: 0644]
youtube_dl/extractor/mit.py
youtube_dl/extractor/mitele.py
youtube_dl/extractor/mixcloud.py
youtube_dl/extractor/mlb.py
youtube_dl/extractor/moevideo.py
youtube_dl/extractor/mofosex.py
youtube_dl/extractor/mojvideo.py
youtube_dl/extractor/moniker.py
youtube_dl/extractor/mooshare.py
youtube_dl/extractor/motherless.py
youtube_dl/extractor/motorsport.py
youtube_dl/extractor/movieclips.py
youtube_dl/extractor/moviezine.py
youtube_dl/extractor/movshare.py
youtube_dl/extractor/mpora.py
youtube_dl/extractor/mtv.py
youtube_dl/extractor/muenchentv.py
youtube_dl/extractor/musicplayon.py
youtube_dl/extractor/muzu.py
youtube_dl/extractor/myspace.py
youtube_dl/extractor/myspass.py
youtube_dl/extractor/myvideo.py
youtube_dl/extractor/myvidster.py [new file with mode: 0644]
youtube_dl/extractor/naver.py
youtube_dl/extractor/nba.py
youtube_dl/extractor/nbc.py
youtube_dl/extractor/ndr.py
youtube_dl/extractor/newgrounds.py
youtube_dl/extractor/newstube.py
youtube_dl/extractor/nfb.py
youtube_dl/extractor/nfl.py
youtube_dl/extractor/nhl.py
youtube_dl/extractor/niconico.py
youtube_dl/extractor/ninegag.py
youtube_dl/extractor/noco.py
youtube_dl/extractor/normalboots.py
youtube_dl/extractor/nosvideo.py
youtube_dl/extractor/novamov.py
youtube_dl/extractor/nowvideo.py
youtube_dl/extractor/npo.py
youtube_dl/extractor/ntv.py
youtube_dl/extractor/nuvid.py
youtube_dl/extractor/nytimes.py
youtube_dl/extractor/ooyala.py
youtube_dl/extractor/orf.py
youtube_dl/extractor/pbs.py
youtube_dl/extractor/phoenix.py [new file with mode: 0644]
youtube_dl/extractor/photobucket.py
youtube_dl/extractor/played.py
youtube_dl/extractor/playfm.py
youtube_dl/extractor/playvid.py
youtube_dl/extractor/podomatic.py
youtube_dl/extractor/pornhd.py
youtube_dl/extractor/pornhub.py
youtube_dl/extractor/pornotube.py
youtube_dl/extractor/promptfile.py
youtube_dl/extractor/prosiebensat1.py
youtube_dl/extractor/quickvid.py [new file with mode: 0644]
youtube_dl/extractor/radiode.py [new file with mode: 0644]
youtube_dl/extractor/rai.py
youtube_dl/extractor/rbmaradio.py
youtube_dl/extractor/redtube.py
youtube_dl/extractor/ringtv.py
youtube_dl/extractor/ro220.py
youtube_dl/extractor/rtlnl.py
youtube_dl/extractor/rtlnow.py
youtube_dl/extractor/rtp.py [new file with mode: 0644]
youtube_dl/extractor/rts.py
youtube_dl/extractor/rtve.py
youtube_dl/extractor/ruhd.py
youtube_dl/extractor/rutube.py
youtube_dl/extractor/rutv.py
youtube_dl/extractor/sbs.py
youtube_dl/extractor/scivee.py
youtube_dl/extractor/screencast.py
youtube_dl/extractor/screenwavemedia.py [new file with mode: 0644]
youtube_dl/extractor/servingsys.py
youtube_dl/extractor/sexu.py [new file with mode: 0644]
youtube_dl/extractor/sexykarma.py [new file with mode: 0644]
youtube_dl/extractor/shared.py
youtube_dl/extractor/sharesix.py
youtube_dl/extractor/sina.py
youtube_dl/extractor/slideshare.py
youtube_dl/extractor/slutload.py
youtube_dl/extractor/smotri.py
youtube_dl/extractor/sockshare.py
youtube_dl/extractor/sohu.py
youtube_dl/extractor/soundcloud.py
youtube_dl/extractor/space.py
youtube_dl/extractor/spankwire.py
youtube_dl/extractor/spiegel.py
youtube_dl/extractor/spiegeltv.py
youtube_dl/extractor/sport5.py
youtube_dl/extractor/sportbox.py
youtube_dl/extractor/sportdeutschland.py
youtube_dl/extractor/srmediathek.py [new file with mode: 0644]
youtube_dl/extractor/stanfordoc.py
youtube_dl/extractor/streamcloud.py
youtube_dl/extractor/streamcz.py
youtube_dl/extractor/subtitles.py
youtube_dl/extractor/swrmediathek.py
youtube_dl/extractor/syfy.py
youtube_dl/extractor/sztvhu.py
youtube_dl/extractor/tagesschau.py
youtube_dl/extractor/tapely.py
youtube_dl/extractor/tass.py [new file with mode: 0644]
youtube_dl/extractor/teachertube.py
youtube_dl/extractor/teamcoco.py
youtube_dl/extractor/ted.py
youtube_dl/extractor/telebruxelles.py [new file with mode: 0644]
youtube_dl/extractor/telecinco.py [new file with mode: 0644]
youtube_dl/extractor/tf1.py
youtube_dl/extractor/theonion.py [new file with mode: 0644]
youtube_dl/extractor/theplatform.py
youtube_dl/extractor/thisav.py
youtube_dl/extractor/tinypic.py
youtube_dl/extractor/tlc.py
youtube_dl/extractor/tmz.py [new file with mode: 0644]
youtube_dl/extractor/tnaflix.py
youtube_dl/extractor/traileraddict.py
youtube_dl/extractor/trilulilu.py
youtube_dl/extractor/trutube.py
youtube_dl/extractor/tube8.py
youtube_dl/extractor/tudou.py
youtube_dl/extractor/tumblr.py
youtube_dl/extractor/tunein.py [new file with mode: 0644]
youtube_dl/extractor/tutv.py
youtube_dl/extractor/tvigle.py
youtube_dl/extractor/tvp.py
youtube_dl/extractor/tvplay.py
youtube_dl/extractor/twentyfourvideo.py [new file with mode: 0644]
youtube_dl/extractor/twitch.py [new file with mode: 0644]
youtube_dl/extractor/udemy.py
youtube_dl/extractor/urort.py
youtube_dl/extractor/ustream.py
youtube_dl/extractor/vbox7.py
youtube_dl/extractor/veehd.py
youtube_dl/extractor/veoh.py
youtube_dl/extractor/vesti.py
youtube_dl/extractor/vevo.py
youtube_dl/extractor/vgtv.py
youtube_dl/extractor/vh1.py
youtube_dl/extractor/vice.py [new file with mode: 0644]
youtube_dl/extractor/viddler.py
youtube_dl/extractor/videobam.py
youtube_dl/extractor/videodetective.py
youtube_dl/extractor/videofyme.py
youtube_dl/extractor/videomega.py
youtube_dl/extractor/videopremium.py
youtube_dl/extractor/videott.py
youtube_dl/extractor/videoweed.py
youtube_dl/extractor/vidzi.py [new file with mode: 0644]
youtube_dl/extractor/vimeo.py
youtube_dl/extractor/vine.py
youtube_dl/extractor/vk.py
youtube_dl/extractor/vodlocker.py
youtube_dl/extractor/vrt.py [new file with mode: 0644]
youtube_dl/extractor/vube.py
youtube_dl/extractor/vuclip.py
youtube_dl/extractor/wdr.py
youtube_dl/extractor/weibo.py
youtube_dl/extractor/wimp.py
youtube_dl/extractor/wistia.py
youtube_dl/extractor/worldstarhiphop.py
youtube_dl/extractor/wrzuta.py
youtube_dl/extractor/xbef.py
youtube_dl/extractor/xhamster.py
youtube_dl/extractor/xminus.py [new file with mode: 0644]
youtube_dl/extractor/xnxx.py
youtube_dl/extractor/xtube.py
youtube_dl/extractor/xvideos.py
youtube_dl/extractor/yahoo.py
youtube_dl/extractor/ynet.py
youtube_dl/extractor/youjizz.py
youtube_dl/extractor/youku.py
youtube_dl/extractor/youporn.py
youtube_dl/extractor/youtube.py
youtube_dl/extractor/zdf.py
youtube_dl/extractor/zingmp3.py [new file with mode: 0644]
youtube_dl/jsinterp.py
youtube_dl/options.py
youtube_dl/postprocessor/__init__.py
youtube_dl/postprocessor/atomicparsley.py
youtube_dl/postprocessor/common.py
youtube_dl/postprocessor/execafterdownload.py
youtube_dl/postprocessor/ffmpeg.py
youtube_dl/postprocessor/xattrpp.py
youtube_dl/swfinterp.py
youtube_dl/update.py
youtube_dl/utils.py
youtube_dl/version.py

index e44977ca36ed367c009fea0144f50d3d1893d082..86312d4e4185cd6dbd56627a3f2ce9f8ef5e9a43 100644 (file)
@@ -30,3 +30,4 @@ updates_key.pem
 *.swp
 test/testdata
 .tox
+youtube-dl.zsh
diff --git a/AUTHORS b/AUTHORS
new file mode 100644 (file)
index 0000000..bfa00f9
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,94 @@
+Ricardo Garcia Gonzalez
+Danny Colligan
+Benjamin Johnson
+Vasyl' Vavrychuk
+Witold Baryluk
+Paweł Paprota
+Gergely Imreh
+Rogério Brito
+Philipp Hagemeister
+Sören Schulze
+Kevin Ngo
+Ori Avtalion
+shizeeg
+Filippo Valsorda
+Christian Albrecht
+Dave Vasilevsky
+Jaime Marquínez Ferrándiz
+Jeff Crouse
+Osama Khalid
+Michael Walter
+M. Yasoob Ullah Khalid
+Julien Fraichard
+Johny Mo Swag
+Axel Noack
+Albert Kim
+Pierre Rudloff
+Huarong Huo
+Ismael Mejía
+Steffan 'Ruirize' James
+Andras Elso
+Jelle van der Waa
+Marcin Cieślak
+Anton Larionov
+Takuya Tsuchida
+Sergey M.
+Michael Orlitzky
+Chris Gahan
+Saimadhav Heblikar
+Mike Col
+Oleg Prutz
+pulpe
+Andreas Schmitz
+Michael Kaiser
+Niklas Laxström
+David Triendl
+Anthony Weems
+David Wagner
+Juan C. Olivares
+Mattias Harrysson
+phaer
+Sainyam Kapoor
+Nicolas Évrard
+Jason Normore
+Hoje Lee
+Adam Thalhammer
+Georg Jähnig
+Ralf Haring
+Koki Takahashi
+Ariset Llerena
+Adam Malcontenti-Wilson
+Tobias Bell
+Naglis Jonaitis
+Charles Chen
+Hassaan Ali
+Dobrosław Żybort
+David Fabijan
+Sebastian Haas
+Alexander Kirk
+Erik Johnson
+Keith Beckman
+Ole Ernst
+Aaron McDaniel (mcd1992)
+Magnus Kolstad
+Hari Padmanaban
+Carlos Ramos
+5moufl
+lenaten
+Dennis Scheiba
+Damon Timm
+winwon
+Xavier Beynon
+Gabriel Schubiner
+xantares
+Jan Matějka
+Mauroy Sébastien
+William Sewell
+Dao Hoang Son
+Oskar Jauch
+Matthew Rayfield
+t0mm0
+Tithen-Firion
+Zack Fernandes
+cryptonaut
+Adrian Kretz
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..0ff7b39
--- /dev/null
@@ -0,0 +1,136 @@
+Please include the full output of the command when run with `--verbose`. The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
+
+Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist):
+
+### Is the description of the issue itself sufficient?
+
+We often get issue reports that we cannot really decipher. While in most cases we eventually get the required information after asking back multiple times, this poses an unnecessary drain on our resources. Many contributors, including myself, are also not native speakers, so we may misread some parts.
+
+So please elaborate on what feature you are requesting, or what bug you want to be fixed. Make sure that it's obvious
+
+- What the problem is
+- How it could be fixed
+- How your proposed solution would look like
+
+If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a commiter myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over.
+
+For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the -v flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
+
+Site support requests **must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL.
+
+###  Are you using the latest version?
+
+Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
+
+###  Is the issue already documented?
+
+Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or at https://github.com/rg3/youtube-dl/search?type=Issues . If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity.
+
+###  Why are existing options not enough?
+
+Before requesting a new feature, please have a quick peek at [the list of supported options](https://github.com/rg3/youtube-dl/blob/master/README.md#synopsis). Many feature requests are for features that actually exist already! Please, absolutely do show off your work in the issue report and detail how the existing similar options do *not* solve your problem.
+
+###  Is there enough context in your bug report?
+
+People want to solve problems, and often think they do us a favor by breaking down their larger problems (e.g. wanting to skip already downloaded files) to a specific request (e.g. requesting us to look whether the file exists before downloading the info page). However, what often happens is that they break down the problem into two steps: One simple, and one impossible (or extremely complicated one).
+
+We are then presented with a very complicated request when the original problem could be solved far easier, e.g. by recording the downloaded video IDs in a separate file. To avoid this, you must include the greater context where it is non-obvious. In particular, every feature request that does not consist of adding support for a new site should contain a use case scenario that explains in what situation the missing feature would be useful.
+
+###  Does the issue involve one problem, and one problem only?
+
+Some of our users seem to think there is a limit of issues they can or should open. There is no limit of issues they can or should open. While it may seem appealing to be able to dump all your issues into one ticket, that means that someone who solves one of your issues cannot mark the issue as closed. Typically, reporting a bunch of issues leads to the ticket lingering since nobody wants to attack that behemoth, until someone mercifully splits the issue into multiple ones.
+
+In particular, every site support request issue should only pertain to services at one site (generally under a common domain, but always using the same backend technology). Do not request support for vimeo user videos, Whitehouse podcasts, and Google Plus pages in the same issue. Also, make sure that you don't post bug reports alongside feature requests. As a rule of thumb, a feature request does not include outputs of youtube-dl that are not immediately related to the feature at hand. Do not post reports of a network error alongside the request for a new video service.
+
+###  Is anyone going to need the feature?
+
+Only post features that you (or an incapicated friend you can personally talk to) require. Do not post features because they seem like a good idea. If they are really useful, they will be requested by someone who requires them.
+
+###  Is your question about youtube-dl?
+
+It may sound strange, but some bug reports we receive are completely unrelated to youtube-dl and relate to a different or even the reporter's own application. Please make sure that you are actually using youtube-dl. If you are using a UI for youtube-dl, report the bug to the maintainer of the actual application providing the UI. On the other hand, if your UI for youtube-dl fails in some way you believe is related to youtube-dl, by all means, go ahead and report the bug.
+
+# DEVELOPER INSTRUCTIONS
+
+Most users do not need to build youtube-dl and can [download the builds](http://rg3.github.io/youtube-dl/download.html) or get them from their distribution.
+
+To run youtube-dl as a developer, you don't need to build anything either. Simply execute
+
+    python -m youtube_dl
+
+To run the test, simply invoke your favorite test runner, or execute a test file directly; any of the following work:
+
+    python -m unittest discover
+    python test/test_download.py
+    nosetests
+
+If you want to create a build of youtube-dl yourself, you'll need
+
+* python
+* make
+* pandoc
+* zip
+* nosetests
+
+### Adding support for a new site
+
+If you want to add support for a new site, you can follow this quick list (assuming your service is called `yourextractor`):
+
+1. [Fork this repository](https://github.com/rg3/youtube-dl/fork)
+2. Check out the source code with `git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git`
+3. Start a new git branch with `cd youtube-dl; git checkout -b yourextractor`
+4. Start with this simple template and save it to `youtube_dl/extractor/yourextractor.py`:
+    ```python
+    # coding: utf-8
+    from __future__ import unicode_literals
+
+    from .common import InfoExtractor
+
+
+    class YourExtractorIE(InfoExtractor):
+        _VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
+        _TEST = {
+            'url': 'http://yourextractor.com/watch/42',
+            'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
+            'info_dict': {
+                'id': '42',
+                'ext': 'mp4',
+                'title': 'Video title goes here',
+                'thumbnail': 're:^https?://.*\.jpg$',
+                # TODO more properties, either as:
+                # * A value
+                # * MD5 checksum; start the string with md5:
+                # * A regular expression; start the string with re:
+                # * Any Python type (for example int or float)
+            }
+        }
+
+        def _real_extract(self, url):
+            video_id = self._match_id(url)
+            webpage = self._download_webpage(url, video_id)
+
+            # TODO more code goes here, for example ...
+            title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
+
+            return {
+                'id': video_id,
+                'title': title,
+                'description': self._og_search_description(webpage),
+                # TODO more properties (see youtube_dl/extractor/common.py)
+            }
+    ```
+5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
+6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
+7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
+8. If you can, check the code with [pyflakes](https://pypi.python.org/pypi/pyflakes) (a good idea) and [pep8](https://pypi.python.org/pypi/pep8) (optional, ignore E501).
+9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
+
+        $ git add youtube_dl/extractor/__init__.py
+        $ git add youtube_dl/extractor/yourextractor.py
+        $ git commit -m '[yourextractor] Add new extractor'
+        $ git push origin yourextractor
+
+10. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
+
+In any case, thank you very much for your contributions!
+
index 6272b826ce0bc86749948684c81f8436f29c7b9b..d5b6e4307cb7a258016203b225eb2bf99331ecf9 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
-all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.fish
+all: youtube-dl README.md CONTRIBUTING.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
 
 clean:
-       rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.fish
+       rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish *.dump *.part *.info.json CONTRIBUTING.md.tmp
 
 cleanall: clean
        rm -f youtube-dl youtube-dl.exe
@@ -9,6 +9,7 @@ cleanall: clean
 PREFIX ?= /usr/local
 BINDIR ?= $(PREFIX)/bin
 MANDIR ?= $(PREFIX)/man
+SHAREDIR ?= $(PREFIX)/share
 PYTHON ?= /usr/bin/env python
 
 # set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
@@ -22,13 +23,15 @@ else
        endif
 endif
 
-install: youtube-dl youtube-dl.1 youtube-dl.bash-completion
+install: youtube-dl youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
        install -d $(DESTDIR)$(BINDIR)
        install -m 755 youtube-dl $(DESTDIR)$(BINDIR)
        install -d $(DESTDIR)$(MANDIR)/man1
        install -m 644 youtube-dl.1 $(DESTDIR)$(MANDIR)/man1
        install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d
        install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
+       install -d $(DESTDIR)$(SHAREDIR)/zsh/site-functions
+       install -m 644 youtube-dl.zsh $(DESTDIR)$(SHAREDIR)/zsh/site-functions/_youtube-dl
        install -d $(DESTDIR)$(SYSCONFDIR)/fish/completions
        install -m 644 youtube-dl.fish $(DESTDIR)$(SYSCONFDIR)/fish/completions/youtube-dl.fish
 
@@ -38,7 +41,7 @@ test:
 
 tar: youtube-dl.tar.gz
 
-.PHONY: all clean install test tar bash-completion pypi-files fish-completion
+.PHONY: all clean install test tar bash-completion pypi-files zsh-completion fish-completion
 
 pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1 youtube-dl.fish
 
@@ -53,6 +56,9 @@ youtube-dl: youtube_dl/*.py youtube_dl/*/*.py
 README.md: youtube_dl/*.py youtube_dl/*/*.py
        COLUMNS=80 python -m youtube_dl --help | python devscripts/make_readme.py
 
+CONTRIBUTING.md: README.md
+       python devscripts/make_contributing.py README.md CONTRIBUTING.md
+
 README.txt: README.md
        pandoc -f markdown -t plain README.md -o README.txt
 
@@ -66,12 +72,17 @@ youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-co
 
 bash-completion: youtube-dl.bash-completion
 
+youtube-dl.zsh: youtube_dl/*.py youtube_dl/*/*.py devscripts/zsh-completion.in
+       python devscripts/zsh-completion.py
+
+zsh-completion: youtube-dl.zsh
+
 youtube-dl.fish: youtube_dl/*.py youtube_dl/*/*.py devscripts/fish-completion.in
        python devscripts/fish-completion.py
 
 fish-completion: youtube-dl.fish
 
-youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.fish
+youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
        @tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
                --exclude '*.DS_Store' \
                --exclude '*.kate-swp' \
@@ -86,5 +97,5 @@ youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-
                bin devscripts test youtube_dl docs \
                LICENSE README.md README.txt \
                Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion \
-               youtube-dl.fish setup.py \
+               youtube-dl.zsh youtube-dl.fish setup.py \
                youtube-dl
index cabc5eb9adb998791256214ec6b1633ba9a075c6..f10f06ee8c9d26a01a51d7b29bdb93b7b390b4e0 100644 (file)
--- a/README.md
+++ b/README.md
@@ -30,7 +30,7 @@ Alternatively, refer to the developer instructions below for how to check out an
 # DESCRIPTION
 **youtube-dl** is a small command-line program to download videos from
 YouTube.com and a few more sites. It requires the Python interpreter, version
-2.6, 2.7, or 3.3+, and it is not platform specific. It should work on
+2.6, 2.7, or 3.2+, and it is not platform specific. It should work on
 your Unix box, on Windows or on Mac OS X. It is released to the public domain,
 which means you can modify it, redistribute it or use it however you like.
 
@@ -65,10 +65,12 @@ which means you can modify it, redistribute it or use it however you like.
                                      this is not possible instead of searching.
     --ignore-config                  Do not read configuration files. When given
                                      in the global configuration file /etc
-                                     /youtube-dl.conf: do not read the user
-                                     configuration in ~/.config/youtube-dl.conf
-                                     (%APPDATA%/youtube-dl/config.txt on
-                                     Windows)
+                                     /youtube-dl.conf: Do not read the user
+                                     configuration in ~/.config/youtube-
+                                     dl/config (%APPDATA%/youtube-dl/config.txt
+                                     on Windows)
+    --flat-playlist                  Do not extract the videos of a playlist,
+                                     only list them.
 
 ## Video Selection:
     --playlist-start NUMBER          playlist video to start at (default is 1)
@@ -91,7 +93,8 @@ which means you can modify it, redistribute it or use it however you like.
                                      COUNT views
     --max-views COUNT                Do not download any videos with more than
                                      COUNT views
-    --no-playlist                    download only the currently playing video
+    --no-playlist                    If the URL refers to a video and a
+                                     playlist, download only the video.
     --age-limit YEARS                download only videos suitable for the given
                                      age
     --download-archive FILE          Download only videos not listed in the
@@ -99,8 +102,6 @@ which means you can modify it, redistribute it or use it however you like.
                                      downloaded videos in it.
     --include-ads                    Download advertisements as well
                                      (experimental)
-    --youtube-include-dash-manifest  Try to download the DASH manifest on
-                                     YouTube videos (experimental)
 
 ## Download Options:
     -r, --rate-limit LIMIT           maximum download rate in bytes per second
@@ -112,12 +113,12 @@ which means you can modify it, redistribute it or use it however you like.
                                      size. By default, the buffer size is
                                      automatically resized from an initial value
                                      of SIZE.
+    --playlist-reverse               Download playlist videos in reverse order
 
 ## Filesystem Options:
     -a, --batch-file FILE            file containing URLs to download ('-' for
                                      stdin)
     --id                             use only video ID in file name
-    -A, --auto-number                number downloaded files starting from 00000
     -o, --output TEMPLATE            output filename template. Use %(title)s to
                                      get the title, %(uploader)s for the
                                      uploader name, %(uploader_id)s for the
@@ -131,17 +132,19 @@ which means you can modify it, redistribute it or use it however you like.
                                      %(upload_date)s for the upload date
                                      (YYYYMMDD), %(extractor)s for the provider
                                      (youtube, metacafe, etc), %(id)s for the
-                                     video id, %(playlist)s for the playlist the
+                                     video id, %(playlist_title)s,
+                                     %(playlist_id)s, or %(playlist)s (=title if
+                                     present, ID otherwise) for the playlist the
                                      video is in, %(playlist_index)s for the
-                                     position in the playlist and %% for a
-                                     literal percent. %(height)s and %(width)s
-                                     for the width and height of the video
-                                     format. %(resolution)s for a textual
+                                     position in the playlist. %(height)s and
+                                     %(width)s for the width and height of the
+                                     video format. %(resolution)s for a textual
                                      description of the resolution of the video
-                                     format. Use - to output to stdout. Can also
-                                     be used to download to a different
-                                     directory, for example with -o '/my/downloa
-                                     ds/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
+                                     format. %% for a literal percent. Use - to
+                                     output to stdout. Can also be used to
+                                     download to a different directory, for
+                                     example with -o '/my/downloads/%(uploader)s
+                                     /%(title)s-%(id)s.%(ext)s' .
     --autonumber-size NUMBER         Specifies the number of digits in
                                      %(autonumber)s when it is present in output
                                      filename template or --auto-number option
@@ -149,6 +152,9 @@ which means you can modify it, redistribute it or use it however you like.
     --restrict-filenames             Restrict filenames to only ASCII
                                      characters, and avoid "&" and spaces in
                                      filenames
+    -A, --auto-number                [deprecated; use  -o
+                                     "%(autonumber)s-%(title)s.%(ext)s" ] number
+                                     downloaded files starting from 00000
     -t, --title                      [deprecated] use title in file name
                                      (default)
     -l, --literal                    [deprecated] alias of --title
@@ -158,7 +164,8 @@ which means you can modify it, redistribute it or use it however you like.
                                      downloads if possible.
     --no-continue                    do not resume partially downloaded files
                                      (restart from beginning)
-    --no-part                        do not use .part files
+    --no-part                        do not use .part files - write directly
+                                     into output file
     --no-mtime                       do not use the Last-modified header to set
                                      the file modification time
     --write-description              write video description to a .description
@@ -198,6 +205,10 @@ which means you can modify it, redistribute it or use it however you like.
     -j, --dump-json                  simulate, quiet but print JSON information.
                                      See --output for a description of available
                                      keys.
+    -J, --dump-single-json           simulate, quiet but print JSON information
+                                     for each command-line argument. If the URL
+                                     refers to a playlist, dump the whole
+                                     playlist information in a single line.
     --newline                        output progress bar as new lines
     --no-progress                    do not print progress bar
     --console-title                  display progress in console titlebar
@@ -216,7 +227,7 @@ which means you can modify it, redistribute it or use it however you like.
                                      information about the video. (Currently
                                      supported only for YouTube)
     --user-agent UA                  specify a custom user agent
-    --referer REF                    specify a custom referer, use if the video
+    --referer URL                    specify a custom referer, use if the video
                                      access is restricted to one domain
     --add-header FIELD:VALUE         specify a custom HTTP header and its value,
                                      separated by a colon ':'. You can use this
@@ -234,13 +245,20 @@ which means you can modify it, redistribute it or use it however you like.
                                      "worst", "worstvideo" and "worstaudio". By
                                      default, youtube-dl will pick the best
                                      quality. Use commas to download multiple
-                                     audio formats, such as  -f
-                                     136/137/mp4/bestvideo,140/m4a/bestaudio
+                                     audio formats, such as -f
+                                     136/137/mp4/bestvideo,140/m4a/bestaudio.
+                                     You can merge the video and audio of two
+                                     formats into a single file using -f <video-
+                                     format>+<audio-format> (requires ffmpeg or
+                                     avconv), for example -f
+                                     bestvideo+bestaudio.
     --all-formats                    download all available video formats
     --prefer-free-formats            prefer free video formats unless a specific
                                      one is requested
     --max-quality FORMAT             highest quality format to download
     -F, --list-formats               list all available formats
+    --youtube-skip-dash-manifest     Do not download the DASH manifest on
+                                     YouTube videos
 
 ## Subtitle Options:
     --write-sub                      write subtitle file
@@ -256,7 +274,7 @@ which means you can modify it, redistribute it or use it however you like.
                                      language tags like 'en,pt'
 
 ## Authentication Options:
-    -u, --username USERNAME          account username
+    -u, --username USERNAME          login with this account ID
     -p, --password PASSWORD          account password
     -2, --twofactor TWOFACTOR        two-factor auth code
     -n, --netrc                      use .netrc authentication data
@@ -267,7 +285,7 @@ which means you can modify it, redistribute it or use it however you like.
                                      (requires ffmpeg or avconv and ffprobe or
                                      avprobe)
     --audio-format FORMAT            "best", "aac", "vorbis", "mp3", "m4a",
-                                     "opus", or "wav"; best by default
+                                     "opus", or "wav"; "best" by default
     --audio-quality QUALITY          ffmpeg/avconv audio quality specification,
                                      insert a value between 0 (better) and 9
                                      (worse) for VBR or a specific bitrate like
@@ -374,7 +392,7 @@ Again, from then on you'll be able to update with `sudo youtube-dl -U`.
 
 YouTube changed their playlist format in March 2014 and later on, so you'll need at least youtube-dl 2014.07.25 to download all YouTube videos.
 
-If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to report bugs to the Ubuntu packaging guys - all they have to do is update the package to a somewhat recent version. See above for a way to update.
+If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to [report bugs](https://bugs.launchpad.net/ubuntu/+source/youtube-dl/+filebug) to the [Ubuntu packaging guys](mailto:ubuntu-motu@lists.ubuntu.com?subject=outdated%20version%20of%20youtube-dl) - all they have to do is update the package to a somewhat recent version. See above for a way to update.
 
 ### Do I always have to pass in `--max-quality FORMAT`, or `-citw`?
 
@@ -478,14 +496,15 @@ If you want to add support for a new site, you can follow this quick list (assum
 
         def _real_extract(self, url):
             video_id = self._match_id(url)
+            webpage = self._download_webpage(url, video_id)
 
             # TODO more code goes here, for example ...
-            webpage = self._download_webpage(url, video_id)
             title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
 
             return {
                 'id': video_id,
                 'title': title,
+                'description': self._og_search_description(webpage),
                 # TODO more properties (see youtube_dl/extractor/common.py)
             }
     ```
@@ -493,7 +512,7 @@ If you want to add support for a new site, you can follow this quick list (assum
 6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
 7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
 8. If you can, check the code with [pyflakes](https://pypi.python.org/pypi/pyflakes) (a good idea) and [pep8](https://pypi.python.org/pypi/pep8) (optional, ignore E501).
-9. When the tests pass, [add](https://www.kernel.org/pub/software/scm/git/docs/git-add.html) the new files and [commit](https://www.kernel.org/pub/software/scm/git/docs/git-commit.html) them and [push](https://www.kernel.org/pub/software/scm/git/docs/git-push.html) the result, like this:
+9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
 
         $ git add youtube_dl/extractor/__init__.py
         $ git add youtube_dl/extractor/yourextractor.py
@@ -504,15 +523,27 @@ If you want to add support for a new site, you can follow this quick list (assum
 
 In any case, thank you very much for your contributions!
 
+# EMBEDDING YOUTUBE-DL
+
+youtube-dl makes the best effort to be a good command-line program, and thus should be callable from any programming language. If you encounter any problems parsing its output, feel free to [create a report](https://github.com/rg3/youtube-dl/issues/new).
+
+From a Python program, you can embed youtube-dl in a more powerful fashion, like this:
+
+    import youtube_dl
+
+    ydl_opts = {}
+    with youtube_dl.YoutubeDL(ydl_opts) as ydl:
+        ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
+
+Most likely, you'll want to use various options. For a list of what can be done, have a look at [youtube_dl/YoutubeDL.py](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L69). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
+
 # BUGS
 
-Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues> . Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email.
+Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues> . Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the irc channel #youtube-dl on freenode.
 
 Please include the full output of the command when run with `--verbose`. The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
 
-For discussions, join us in the irc channel #youtube-dl on freenode.
-
-When you submit a request, please re-read it once to avoid a couple of mistakes (you can and should use this as a checklist):
+Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist):
 
 ### Is the description of the issue itself sufficient?
 
index 49287724d6b8d1cb6215aaadf150b8a441582993..cd26cc0895d033af03541f48815e8dad23f5161d 100755 (executable)
@@ -1,4 +1,6 @@
 #!/usr/bin/env python
+from __future__ import unicode_literals
+
 import os
 from os.path import dirname as dirn
 import sys
@@ -9,16 +11,17 @@ import youtube_dl
 BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
 BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
 
+
 def build_completion(opt_parser):
     opts_flag = []
     for group in opt_parser.option_groups:
         for option in group.option_list:
-            #for every long flag
+            # for every long flag
             opts_flag.append(option.get_opt_string())
     with open(BASH_COMPLETION_TEMPLATE) as f:
         template = f.read()
     with open(BASH_COMPLETION_FILE, "w") as f:
-        #just using the special char
+        # just using the special char
         filled_template = template.replace("{{flags}}", " ".join(opts_flag))
         f.write(filled_template)
 
index e0c3cc83ed5dd73544b383d983aca0281e86408f..7c2f49f8bb63bbe2b47efca151129a7e6b49674d 100644 (file)
@@ -142,7 +142,7 @@ def win_service_set_status(handle, status_code):
 
 def win_service_main(service_name, real_main, argc, argv_raw):
     try:
-        #args = [argv_raw[i].value for i in range(argc)]
+        # args = [argv_raw[i].value for i in range(argc)]
         stop_event = threading.Event()
         handler = HandlerEx(functools.partial(stop_event, win_service_handler))
         h = advapi32.RegisterServiceCtrlHandlerExW(service_name, handler, None)
@@ -233,6 +233,7 @@ def rmtree(path):
 
 #==============================================================================
 
+
 class BuildError(Exception):
     def __init__(self, output, code=500):
         self.output = output
@@ -369,7 +370,7 @@ class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, Clea
 
 
 class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
-    actionDict = { 'build': Builder, 'download': Builder } # They're the same, no more caching.
+    actionDict = {'build': Builder, 'download': Builder}  # They're the same, no more caching.
 
     def do_GET(self):
         path = urlparse.urlparse(self.path)
index 86aa37b5fb687acc91f29753f1bf8ba0db7b8e94..216282712c1b38b96c049f74a6cfe8a0fcd30806 100644 (file)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+from __future__ import unicode_literals
 
 """
 This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check
index f4aaf0201f5a43341a0dfa1de3eb1087a293485b..c2f2387980d0867adc3cea4c3e14047fdaf1aa28 100755 (executable)
@@ -23,13 +23,13 @@ EXTRA_ARGS = {
     'batch-file': ['--require-parameter'],
 }
 
+
 def build_completion(opt_parser):
     commands = []
 
     for group in opt_parser.option_groups:
         for option in group.option_list:
             long_option = option.get_opt_string().strip('-')
-            help_msg = shell_quote([option.help])
             complete_cmd = ['complete', '--command', 'youtube-dl', '--long-option', long_option]
             if option._short_opts:
                 complete_cmd += ['--short-option', option._short_opts[0].strip('-')]
index 35865b2f30f9526f4a05b4bad594f078f5a56443..867ea0048fb88f1ca1382f11b1c60b17110d4fc8 100755 (executable)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python3
+from __future__ import unicode_literals
 
 import json
 import sys
index 55912e12c4e25e400aa68cf8685d2e523d0bd253..392e3ba21ab86070f2df164362fbd177b750c726 100755 (executable)
@@ -1,8 +1,7 @@
 #!/usr/bin/env python3
+from __future__ import unicode_literals
+
 import hashlib
-import shutil
-import subprocess
-import tempfile
 import urllib.request
 import json
 
index 8a824df56fe7677868ca9b03aa24de8d4b6eba76..fa389c35872c3f2b937ef6a875b46bc4dc0d04f8 100755 (executable)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python3
+from __future__ import unicode_literals, with_statement
 
 import rsa
 import json
@@ -11,22 +12,23 @@ except NameError:
 
 versions_info = json.load(open('update/versions.json'))
 if 'signature' in versions_info:
-       del versions_info['signature']
+    del versions_info['signature']
 
 print('Enter the PKCS1 private key, followed by a blank line:')
 privkey = b''
 while True:
-       try:
-               line = input()
-       except EOFError:
-               break
-       if line == '':
-               break
-       privkey += line.encode('ascii') + b'\n'
+    try:
+        line = input()
+    except EOFError:
+        break
+    if line == '':
+        break
+    privkey += line.encode('ascii') + b'\n'
 privkey = rsa.PrivateKey.load_pkcs1(privkey)
 
 signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode()
 print('signature: ' + signature)
 
 versions_info['signature'] = signature
-json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)
\ No newline at end of file
+with open('update/versions.json', 'w') as versionsf:
+    json.dump(versions_info, versionsf, indent=4, sort_keys=True)
index 12c2a91949135d72edf53e41d15ae532690bc6bf..3663c8afef278f132518ed9c0286bdfe34d028a7 100755 (executable)
@@ -1,11 +1,11 @@
 #!/usr/bin/env python
 # coding: utf-8
 
-from __future__ import with_statement
+from __future__ import with_statement, unicode_literals
 
 import datetime
 import glob
-import io # For Python 2 compatibilty
+import io  # For Python 2 compatibilty
 import os
 import re
 
@@ -13,7 +13,7 @@ year = str(datetime.datetime.now().year)
 for fn in glob.glob('*.html*'):
     with io.open(fn, encoding='utf-8') as f:
         content = f.read()
-    newc = re.sub(u'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', u'Copyright © 2006-' + year, content)
+    newc = re.sub(r'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', 'Copyright © 2006-' + year, content)
     if content != newc:
         tmpFn = fn + '.part'
         with io.open(tmpFn, 'wt', encoding='utf-8') as outf:
index 0ba15ae0f7c83a4eb2ac6b2b56aeb72d55b3a951..e93eb60fb8af28a72b7aceb5d62d1c2e16b69bc5 100755 (executable)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python3
+from __future__ import unicode_literals
 
 import datetime
 import io
@@ -73,4 +74,3 @@ atom_template = atom_template.replace('@ENTRIES@', entries_str)
 
 with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
     atom_file.write(atom_template)
-
index 153e15c8ab674f44e3681e5440c9372634b272d8..f0f0481c781ab40f8386de5c87a99c6468e00708 100755 (executable)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python3
+from __future__ import unicode_literals
 
 import sys
 import os
@@ -9,6 +10,7 @@ sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(
 
 import youtube_dl
 
+
 def main():
     with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
         template = tmplf.read()
@@ -21,7 +23,7 @@ def main():
             continue
         elif ie_desc is not None:
             ie_html += ': {}'.format(ie.IE_DESC)
-        if ie.working() == False:
+        if not ie.working():
             ie_html += ' (Currently broken)'
         ie_htmls.append('<li>{}</li>'.format(ie_html))
 
diff --git a/devscripts/make_contributing.py b/devscripts/make_contributing.py
new file mode 100755 (executable)
index 0000000..5fa8cf8
--- /dev/null
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+from __future__ import unicode_literals
+
+import argparse
+import io
+import re
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        'INFILE', help='README.md file name to read from')
+    parser.add_argument(
+        'OUTFILE', help='CONTRIBUTING.md file name to write to')
+    args = parser.parse_args()
+
+    with io.open(args.INFILE, encoding='utf-8') as inf:
+        readme = inf.read()
+
+    bug_text = re.search(
+        r'(?s)#\s*BUGS\s*[^\n]*\s*(.*?)#\s*COPYRIGHT', readme).group(1)
+    dev_text = re.search(
+        r'(?s)(#\s*DEVELOPER INSTRUCTIONS.*?)#\s*EMBEDDING YOUTUBE-DL',
+        readme).group(1)
+
+    out = bug_text + dev_text
+
+    with io.open(args.OUTFILE, 'w', encoding='utf-8') as outf:
+        outf.write(out)
+
+if __name__ == '__main__':
+    main()
index 70fa942dd12f7a75ee71b1fc9668e615cda0d747..8fbce07967c177217f5d39162d9f0958f1d41bf5 100755 (executable)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import io
 import sys
 import re
index d9c857015c520fcd5b6dd46ecf3663e167a458df..f66bebfea6de2cec60a10f943380b9247e5c3d60 100644 (file)
@@ -1,3 +1,4 @@
+from __future__ import unicode_literals
 
 import io
 import os.path
diff --git a/devscripts/transition_helper.py b/devscripts/transition_helper.py
deleted file mode 100644 (file)
index d5ca2d4..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-
-import sys, os
-
-try:
-    import urllib.request as compat_urllib_request
-except ImportError: # Python 2
-    import urllib2 as compat_urllib_request
-
-sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
-sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
-sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n')
-
-try:
-       raw_input()
-except NameError: # Python 3
-       input()
-
-filename = sys.argv[0]
-
-API_URL = "https://api.github.com/repos/rg3/youtube-dl/downloads"
-BIN_URL = "https://github.com/downloads/rg3/youtube-dl/youtube-dl"
-
-if not os.access(filename, os.W_OK):
-    sys.exit('ERROR: no write permissions on %s' % filename)
-
-try:
-    urlh = compat_urllib_request.urlopen(BIN_URL)
-    newcontent = urlh.read()
-    urlh.close()
-except (IOError, OSError) as err:
-    sys.exit('ERROR: unable to download latest version')
-
-try:
-    with open(filename, 'wb') as outf:
-        outf.write(newcontent)
-except (IOError, OSError) as err:
-    sys.exit('ERROR: unable to overwrite current version')
-
-sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
diff --git a/devscripts/transition_helper_exe/setup.py b/devscripts/transition_helper_exe/setup.py
deleted file mode 100644 (file)
index aaf5c29..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-from distutils.core import setup
-import py2exe
-
-py2exe_options = {
-    "bundle_files": 1,
-    "compressed": 1,
-    "optimize": 2,
-    "dist_dir": '.',
-    "dll_excludes": ['w9xpopen.exe']
-}
-
-setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None)
\ No newline at end of file
diff --git a/devscripts/transition_helper_exe/youtube-dl.py b/devscripts/transition_helper_exe/youtube-dl.py
deleted file mode 100644 (file)
index 6297dfd..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-#!/usr/bin/env python
-
-import sys, os
-import urllib2
-import json, hashlib
-
-def rsa_verify(message, signature, key):
-    from struct import pack
-    from hashlib import sha256
-    from sys import version_info
-    def b(x):
-        if version_info[0] == 2: return x
-        else: return x.encode('latin1')
-    assert(type(message) == type(b('')))
-    block_size = 0
-    n = key[0]
-    while n:
-        block_size += 1
-        n >>= 8
-    signature = pow(int(signature, 16), key[1], key[0])
-    raw_bytes = []
-    while signature:
-        raw_bytes.insert(0, pack("B", signature & 0xFF))
-        signature >>= 8
-    signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
-    if signature[0:2] != b('\x00\x01'): return False
-    signature = signature[2:]
-    if not b('\x00') in signature: return False
-    signature = signature[signature.index(b('\x00'))+1:]
-    if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
-    signature = signature[19:]
-    if signature != sha256(message).digest(): return False
-    return True
-
-sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
-sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
-sys.stderr.write(u'From now on, get the binaries from http://rg3.github.com/youtube-dl/download.html, not from the git repository.\n\n')
-
-raw_input()
-
-filename = sys.argv[0]
-
-UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
-VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
-JSON_URL = UPDATE_URL + 'versions.json'
-UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
-
-if not os.access(filename, os.W_OK):
-    sys.exit('ERROR: no write permissions on %s' % filename)
-
-exe = os.path.abspath(filename)
-directory = os.path.dirname(exe)
-if not os.access(directory, os.W_OK):
-    sys.exit('ERROR: no write permissions on %s' % directory)
-
-try:
-    versions_info = urllib2.urlopen(JSON_URL).read().decode('utf-8')
-    versions_info = json.loads(versions_info)
-except:
-    sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.')
-if not 'signature' in versions_info:
-    sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.')
-signature = versions_info['signature']
-del versions_info['signature']
-if not rsa_verify(json.dumps(versions_info, sort_keys=True), signature, UPDATES_RSA_KEY):
-    sys.exit(u'ERROR: the versions file signature is invalid. Aborting.')
-
-version = versions_info['versions'][versions_info['latest']]
-
-try:
-    urlh = urllib2.urlopen(version['exe'][0])
-    newcontent = urlh.read()
-    urlh.close()
-except (IOError, OSError) as err:
-    sys.exit('ERROR: unable to download latest version')
-
-newcontent_hash = hashlib.sha256(newcontent).hexdigest()
-if newcontent_hash != version['exe'][1]:
-    sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.')
-
-try:
-    with open(exe + '.new', 'wb') as outf:
-        outf.write(newcontent)
-except (IOError, OSError) as err:
-    sys.exit(u'ERROR: unable to write the new version')
-
-try:
-    bat = os.path.join(directory, 'youtube-dl-updater.bat')
-    b = open(bat, 'w')
-    b.write("""
-echo Updating youtube-dl...
-ping 127.0.0.1 -n 5 -w 1000 > NUL
-move /Y "%s.new" "%s"
-del "%s"
-    \n""" %(exe, exe, bat))
-    b.close()
-
-    os.startfile(bat)
-except (IOError, OSError) as err:
-    sys.exit('ERROR: unable to overwrite current version')
-
-sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
diff --git a/devscripts/zsh-completion.in b/devscripts/zsh-completion.in
new file mode 100644 (file)
index 0000000..b394a1a
--- /dev/null
@@ -0,0 +1,28 @@
+#compdef youtube-dl
+
+__youtube_dl() {
+    local curcontext="$curcontext" fileopts diropts cur prev
+    typeset -A opt_args
+    fileopts="{{fileopts}}"
+    diropts="{{diropts}}"
+    cur=$words[CURRENT]
+    case $cur in
+        :)
+            _arguments '*: :(::ytfavorites ::ytrecommended ::ytsubscriptions ::ytwatchlater ::ythistory)'
+        ;;
+        *)
+            prev=$words[CURRENT-1]
+            if [[ ${prev} =~ ${fileopts} ]]; then
+                _path_files
+            elif [[ ${prev} =~ ${diropts} ]]; then
+                _path_files -/
+            elif [[ ${prev} == "--recode-video" ]]; then
+                _arguments '*: :(mp4 flv ogg webm mkv)'
+            else
+                _arguments '*: :({{flags}})'
+            fi
+        ;;
+    esac
+}
+
+__youtube_dl
\ No newline at end of file
diff --git a/devscripts/zsh-completion.py b/devscripts/zsh-completion.py
new file mode 100755 (executable)
index 0000000..f200f2c
--- /dev/null
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+from __future__ import unicode_literals
+
+import os
+from os.path import dirname as dirn
+import sys
+
+sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
+import youtube_dl
+
+ZSH_COMPLETION_FILE = "youtube-dl.zsh"
+ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in"
+
+
+def build_completion(opt_parser):
+    opts = [opt for group in opt_parser.option_groups
+            for opt in group.option_list]
+    opts_file = [opt for opt in opts if opt.metavar == "FILE"]
+    opts_dir = [opt for opt in opts if opt.metavar == "DIR"]
+
+    fileopts = []
+    for opt in opts_file:
+        if opt._short_opts:
+            fileopts.extend(opt._short_opts)
+        if opt._long_opts:
+            fileopts.extend(opt._long_opts)
+
+    diropts = []
+    for opt in opts_dir:
+        if opt._short_opts:
+            diropts.extend(opt._short_opts)
+        if opt._long_opts:
+            diropts.extend(opt._long_opts)
+
+    flags = [opt.get_opt_string() for opt in opts]
+
+    with open(ZSH_COMPLETION_TEMPLATE) as f:
+        template = f.read()
+
+    template = template.replace("{{fileopts}}", "|".join(fileopts))
+    template = template.replace("{{diropts}}", "|".join(diropts))
+    template = template.replace("{{flags}}", " ".join(flags))
+
+    with open(ZSH_COMPLETION_FILE, "w") as f:
+        f.write(template)
+
+parser = youtube_dl.parseOpts()[0]
+build_completion(parser)
index 4a04ad779722f191085cf5bd3b927d89e4afa385..594ca61a6bf984d173620a3e95eaca28b22cda5a 100644 (file)
@@ -44,8 +44,8 @@ copyright = u'2014, Ricardo Garcia Gonzalez'
 # built documents.
 #
 # The short X.Y version.
-import youtube_dl
-version = youtube_dl.__version__
+from youtube_dl.version import __version__
+version = __version__
 # The full version, including alpha/beta/rc tags.
 release = version
 
index cf6b92b0f7e61b504dfdc16b6b04568fd073982b..4686260e0bbd25adf39e683bdae1b475e267b0b8 100644 (file)
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,6 @@
 from __future__ import print_function
 
 import os.path
-import pkg_resources
 import warnings
 import sys
 
@@ -103,7 +102,9 @@ setup(
         "Programming Language :: Python :: 2.6",
         "Programming Language :: Python :: 2.7",
         "Programming Language :: Python :: 3",
-        "Programming Language :: Python :: 3.3"
+        "Programming Language :: Python :: 3.2",
+        "Programming Language :: Python :: 3.3",
+        "Programming Language :: Python :: 3.4",
     ],
 
     **params
index 62cb3ce0219ba46dadc1cc0c08891bf0941d2304..8a820526abfe5dbae31a1921312c60d075667c32 100644 (file)
@@ -57,9 +57,9 @@ class FakeYDL(YoutubeDL):
         # Different instances of the downloader can't share the same dictionary
         # some test set the "sublang" parameter, which would break the md5 checks.
         params = get_params(override=override)
-        super(FakeYDL, self).__init__(params)
+        super(FakeYDL, self).__init__(params, auto_init=False)
         self.result = []
-        
+
     def to_screen(self, s, skip_eol=None):
         print(s)
 
@@ -72,8 +72,10 @@ class FakeYDL(YoutubeDL):
     def expect_warning(self, regex):
         # Silence an expected warning matching a regex
         old_report_warning = self.report_warning
+
         def report_warning(self, message):
-            if re.match(regex, message): return
+            if re.match(regex, message):
+                return
             old_report_warning(message)
         self.report_warning = types.MethodType(report_warning, self)
 
@@ -114,14 +116,14 @@ def expect_info_dict(self, expected_dict, got_dict):
         elif isinstance(expected, type):
             got = got_dict.get(info_field)
             self.assertTrue(isinstance(got, expected),
-                'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
+                            'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
         else:
             if isinstance(expected, compat_str) and expected.startswith('md5:'):
                 got = 'md5:' + md5(got_dict.get(info_field))
             else:
                 got = got_dict.get(info_field)
             self.assertEqual(expected, got,
-                'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
+                             'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
 
     # Check for the presence of mandatory fields
     if got_dict.get('_type') != 'playlist':
@@ -133,19 +135,20 @@ def expect_info_dict(self, expected_dict, got_dict):
 
     # Are checkable fields missing from the test case definition?
     test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
-        for key, value in got_dict.items()
-        if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
+                          for key, value in got_dict.items()
+                          if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
     missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
     if missing_keys:
         def _repr(v):
             if isinstance(v, compat_str):
-                return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'")
+                return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n')
             else:
                 return repr(v)
         info_dict_str = ''.join(
             '    %s: %s,\n' % (_repr(k), _repr(v))
             for k, v in test_info_dict.items())
-        write_string('\n"info_dict": {' + info_dict_str + '}\n', out=sys.stderr)
+        write_string(
+            '\n\'info_dict\': {\n' + info_dict_str + '}\n', out=sys.stderr)
         self.assertFalse(
             missing_keys,
             'Missing keys in test definition: %s' % (
@@ -158,7 +161,9 @@ def assertRegexpMatches(self, text, regexp, msg=None):
     else:
         m = re.match(regexp, text)
         if not m:
-            note = 'Regexp didn\'t match: %r not found in %r' % (regexp, text)
+            note = 'Regexp didn\'t match: %r not found' % (regexp)
+            if len(text) < 1000:
+                note += ' in %r' % text
             if msg is None:
                 msg = note
             else:
@@ -171,3 +176,13 @@ def assertGreaterEqual(self, got, expected, msg=None):
         if msg is None:
             msg = '%r not greater than or equal to %r' % (got, expected)
         self.assertTrue(got >= expected, msg)
+
+
+def expect_warnings(ydl, warnings_re):
+    real_warning = ydl.report_warning
+
+    def _report_warning(w):
+        if not any(re.search(w_re, w) for w_re in warnings_re):
+            real_warning(w)
+
+    ydl.report_warning = _report_warning
diff --git a/test/swftests/ConstArrayAccess.as b/test/swftests/ConstArrayAccess.as
new file mode 100644 (file)
index 0000000..07dc3f4
--- /dev/null
@@ -0,0 +1,18 @@
+// input: []
+// output: 4
+
+package {
+public class ConstArrayAccess {
+       private static const x:int = 2;
+       private static const ar:Array = ["42", "3411"];
+
+    public static function main():int{
+        var c:ConstArrayAccess = new ConstArrayAccess();
+        return c.f();
+    }
+
+    public function f(): int {
+       return ar[1].length;
+    }
+}
+}
diff --git a/test/swftests/ConstantInt.as b/test/swftests/ConstantInt.as
new file mode 100644 (file)
index 0000000..e0bbb61
--- /dev/null
@@ -0,0 +1,12 @@
+// input: []
+// output: 2
+
+package {
+public class ConstantInt {
+       private static const x:int = 2;
+
+    public static function main():int{
+        return x;
+    }
+}
+}
diff --git a/test/swftests/DictCall.as b/test/swftests/DictCall.as
new file mode 100644 (file)
index 0000000..c2d174c
--- /dev/null
@@ -0,0 +1,10 @@
+// input: [{"x": 1, "y": 2}]
+// output: 3
+
+package {
+public class DictCall {
+    public static function main(d:Object):int{
+        return d.x + d.y;
+    }
+}
+}
diff --git a/test/swftests/EqualsOperator.as b/test/swftests/EqualsOperator.as
new file mode 100644 (file)
index 0000000..837a69a
--- /dev/null
@@ -0,0 +1,10 @@
+// input: []
+// output: false
+
+package {
+public class EqualsOperator {
+    public static function main():Boolean{
+        return 1 == 2;
+    }
+}
+}
diff --git a/test/swftests/MemberAssignment.as b/test/swftests/MemberAssignment.as
new file mode 100644 (file)
index 0000000..dcba5e3
--- /dev/null
@@ -0,0 +1,22 @@
+// input: [1]
+// output: 2
+
+package {
+public class MemberAssignment {
+    public var v:int;
+
+    public function g():int {
+        return this.v;
+    }
+
+    public function f(a:int):int{
+        this.v = a;
+        return this.v + this.g();
+    }
+
+    public static function main(a:int): int {
+        var v:MemberAssignment = new MemberAssignment();
+        return v.f(a);
+    }
+}
+}
diff --git a/test/swftests/NeOperator.as b/test/swftests/NeOperator.as
new file mode 100644 (file)
index 0000000..61dcbc4
--- /dev/null
@@ -0,0 +1,24 @@
+// input: []
+// output: 123
+
+package {
+public class NeOperator {
+    public static function main(): int {
+        var res:int = 0;
+        if (1 != 2) {
+            res += 3;
+        } else {
+            res += 4;
+        }
+        if (2 != 2) {
+            res += 10;
+        } else {
+            res += 20;
+        }
+        if (9 == 9) {
+            res += 100;
+        }
+        return res;
+    }
+}
+}
diff --git a/test/swftests/PrivateVoidCall.as b/test/swftests/PrivateVoidCall.as
new file mode 100644 (file)
index 0000000..2cc0167
--- /dev/null
@@ -0,0 +1,22 @@
+// input: []
+// output: 9
+
+package {
+public class PrivateVoidCall {
+    public static function main():int{
+        var f:OtherClass = new OtherClass();
+        f.func();
+        return 9;
+    }
+}
+}
+
+class OtherClass {
+    private function pf():void {
+        ;
+    }
+
+    public function func():void {
+        this.pf();
+    }
+}
diff --git a/test/swftests/StringBasics.as b/test/swftests/StringBasics.as
new file mode 100644 (file)
index 0000000..d27430b
--- /dev/null
@@ -0,0 +1,11 @@
+// input: []
+// output: 3
+
+package {
+public class StringBasics {
+    public static function main():int{
+        var s:String = "abc";
+        return s.length;
+    }
+}
+}
diff --git a/test/swftests/StringCharCodeAt.as b/test/swftests/StringCharCodeAt.as
new file mode 100644 (file)
index 0000000..c20d74d
--- /dev/null
@@ -0,0 +1,11 @@
+// input: []
+// output: 9897
+
+package {
+public class StringCharCodeAt {
+    public static function main():int{
+        var s:String = "abc";
+        return s.charCodeAt(1) * 100 + s.charCodeAt();
+    }
+}
+}
diff --git a/test/swftests/StringConversion.as b/test/swftests/StringConversion.as
new file mode 100644 (file)
index 0000000..c976f50
--- /dev/null
@@ -0,0 +1,11 @@
+// input: []
+// output: 2
+
+package {
+public class StringConversion {
+    public static function main():int{
+        var s:String = String(99);
+        return s.length;
+    }
+}
+}
index ab61e19768e4454f061ab9f832cc70c968440d85..f8e4f930ebe6d5aefdbf128909b1be720ec046f3 100644 (file)
@@ -266,6 +266,7 @@ class TestFormatSelection(unittest.TestCase):
             'ext': 'mp4',
             'width': None,
         }
+
         def fname(templ):
             ydl = YoutubeDL({'outtmpl': templ})
             return ydl.prepare_filename(info)
index 71e80b037a5cc99fd0cb1a6711d20cfb59e01b34..5be065c437ae978d8c4ccc1a67bea87a599a77ed 100644 (file)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+from __future__ import unicode_literals
 
 # Allow direct execution
 import os
@@ -19,7 +20,7 @@ def _download_restricted(url, filename, age):
         'age_limit': age,
         'skip_download': True,
         'writeinfojson': True,
-        "outtmpl": "%(id)s.%(ext)s",
+        'outtmpl': '%(id)s.%(ext)s',
     }
     ydl = YoutubeDL(params)
     ydl.add_default_info_extractors()
index 84b05da39e1e28d0df4d65acb6248aa77d7b6b65..bd4fe17bf2c0f37b4f9ac2b291a8f7d664f74534 100644 (file)
@@ -14,7 +14,7 @@ from test.helper import gettestcases
 from youtube_dl.extractor import (
     FacebookIE,
     gen_extractors,
-    JustinTVIE,
+    TwitchIE,
     YoutubeIE,
 )
 
@@ -32,19 +32,19 @@ class TestAllURLsMatching(unittest.TestCase):
     def test_youtube_playlist_matching(self):
         assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
         assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
-        assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') #585
+        assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q')  # 585
         assertPlaylist('PL63F0C78739B09958')
         assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
         assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
         assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
-        assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668
+        assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')  # 668
         self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
         # Top tracks
         assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
 
     def test_youtube_matching(self):
         self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
-        self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
+        self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012'))  # 668
         self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
         self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
         self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])
@@ -72,21 +72,17 @@ class TestAllURLsMatching(unittest.TestCase):
         self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
         self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
 
-    def test_justin_tv_channelid_matching(self):
-        self.assertTrue(JustinTVIE.suitable('justin.tv/vanillatv'))
-        self.assertTrue(JustinTVIE.suitable('twitch.tv/vanillatv'))
-        self.assertTrue(JustinTVIE.suitable('www.justin.tv/vanillatv'))
-        self.assertTrue(JustinTVIE.suitable('www.twitch.tv/vanillatv'))
-        self.assertTrue(JustinTVIE.suitable('http://www.justin.tv/vanillatv'))
-        self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv'))
-        self.assertTrue(JustinTVIE.suitable('http://www.justin.tv/vanillatv/'))
-        self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv/'))
-
-    def test_justintv_videoid_matching(self):
-        self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv/b/328087483'))
-
-    def test_justin_tv_chapterid_matching(self):
-        self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/tsm_theoddone/c/2349361'))
+    def test_twitch_channelid_matching(self):
+        self.assertTrue(TwitchIE.suitable('twitch.tv/vanillatv'))
+        self.assertTrue(TwitchIE.suitable('www.twitch.tv/vanillatv'))
+        self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv'))
+        self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv/'))
+
+    def test_twitch_videoid_matching(self):
+        self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv/b/328087483'))
+
+    def test_twitch_chapterid_matching(self):
+        self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/tsm_theoddone/c/2349361'))
 
     def test_youtube_extract(self):
         assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)
diff --git a/test/test_compat.py b/test/test_compat.py
new file mode 100644 (file)
index 0000000..1eb454e
--- /dev/null
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+from __future__ import unicode_literals
+
+# Allow direct execution
+import os
+import sys
+import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+
+from youtube_dl.utils import get_filesystem_encoding
+from youtube_dl.compat import (
+    compat_getenv,
+    compat_expanduser,
+)
+
+
+class TestCompat(unittest.TestCase):
+    def test_compat_getenv(self):
+        test_str = 'тест'
+        os.environ['YOUTUBE-DL-TEST'] = (
+            test_str if sys.version_info >= (3, 0)
+            else test_str.encode(get_filesystem_encoding()))
+        self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str)
+
+    def test_compat_expanduser(self):
+        old_home = os.environ.get('HOME')
+        test_str = 'C:\Documents and Settings\тест\Application Data'
+        os.environ['HOME'] = (
+            test_str if sys.version_info >= (3, 0)
+            else test_str.encode(get_filesystem_encoding()))
+        self.assertEqual(compat_expanduser('~'), test_str)
+        os.environ['HOME'] = old_home
+
+    def test_all_present(self):
+        import youtube_dl.compat
+        all_names = youtube_dl.compat.__all__
+        present_names = set(filter(
+            lambda c: '_' in c and not c.startswith('_'),
+            dir(youtube_dl.compat))) - set(['unicode_literals'])
+        self.assertEqual(all_names, sorted(present_names))
+
+if __name__ == '__main__':
+    unittest.main()
index 8178015eacb5600f99c983a77bfa43abd9d37b6a..a009aa475442ae588405a99f432af6feade92836 100644 (file)
@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 
+from __future__ import unicode_literals
+
 # Allow direct execution
 import os
 import sys
@@ -8,6 +10,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 from test.helper import (
     assertGreaterEqual,
+    expect_warnings,
     get_params,
     gettestcases,
     expect_info_dict,
@@ -22,10 +25,12 @@ import json
 import socket
 
 import youtube_dl.YoutubeDL
-from youtube_dl.utils import (
+from youtube_dl.compat import (
     compat_http_client,
     compat_urllib_error,
     compat_HTTPError,
+)
+from youtube_dl.utils import (
     DownloadError,
     ExtractorError,
     format_bytes,
@@ -35,18 +40,22 @@ from youtube_dl.extractor import get_info_extractor
 
 RETRIES = 3
 
+
 class YoutubeDL(youtube_dl.YoutubeDL):
     def __init__(self, *args, **kwargs):
         self.to_stderr = self.to_screen
         self.processed_info_dicts = []
         super(YoutubeDL, self).__init__(*args, **kwargs)
+
     def report_warning(self, message):
         # Don't accept warnings during tests
         raise ExtractorError(message)
+
     def process_info(self, info_dict):
         self.processed_info_dicts.append(info_dict)
         return super(YoutubeDL, self).process_info(info_dict)
 
+
 def _file_md5(fn):
     with open(fn, 'rb') as f:
         return hashlib.md5(f.read()).hexdigest()
@@ -56,10 +65,13 @@ defs = gettestcases()
 
 class TestDownload(unittest.TestCase):
     maxDiff = None
+
     def setUp(self):
         self.defs = defs
 
-### Dynamically generate tests
+# Dynamically generate tests
+
+
 def generator(test_case):
 
     def test_template(self):
@@ -85,7 +97,7 @@ def generator(test_case):
             return
         for other_ie in other_ies:
             if not other_ie.working():
-                print_skipping(u'test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
+                print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
                 return
 
         params = get_params(test_case.get('params', {}))
@@ -93,18 +105,21 @@ def generator(test_case):
             params.setdefault('extract_flat', True)
             params.setdefault('skip_download', True)
 
-        ydl = YoutubeDL(params)
+        ydl = YoutubeDL(params, auto_init=False)
         ydl.add_default_info_extractors()
         finished_hook_called = set()
+
         def _hook(status):
             if status['status'] == 'finished':
                 finished_hook_called.add(status['filename'])
         ydl.add_progress_hook(_hook)
+        expect_warnings(ydl, test_case.get('expected_warnings', []))
 
         def get_tc_filename(tc):
             return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
 
         res_dict = None
+
         def try_rm_tcs_files(tcs=None):
             if tcs is None:
                 tcs = test_cases
@@ -128,7 +143,7 @@ def generator(test_case):
                         raise
 
                     if try_num == RETRIES:
-                        report_warning(u'Failed due to network errors, skipping...')
+                        report_warning('Failed due to network errors, skipping...')
                         return
 
                     print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num))
@@ -183,7 +198,9 @@ def generator(test_case):
                         md5_for_file = _file_md5(tc_filename)
                         self.assertEqual(md5_for_file, tc['md5'])
                 info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
-                self.assertTrue(os.path.exists(info_json_fn))
+                self.assertTrue(
+                    os.path.exists(info_json_fn),
+                    'Missing info file %s' % info_json_fn)
                 with io.open(info_json_fn, encoding='utf-8') as infof:
                     info_dict = json.load(infof)
 
@@ -198,15 +215,15 @@ def generator(test_case):
 
     return test_template
 
-### And add them to TestDownload
+# And add them to TestDownload
 for n, test_case in enumerate(defs):
     test_method = generator(test_case)
     tname = 'test_' + str(test_case['name'])
     i = 1
     while hasattr(TestDownload, tname):
-        tname = 'test_'  + str(test_case['name']) + '_' + str(i)
+        tname = 'test_%s_%d' % (test_case['name'], i)
         i += 1
-    test_method.__name__ = tname
+    test_method.__name__ = str(tname)
     setattr(TestDownload, test_method.__name__, test_method)
     del test_method
 
index 2b115fb31a76fd1de7f6ff90733b4d33dd60d0a9..60df187de4921dfa7df808302f55f1ccd66bcb13 100644 (file)
@@ -1,3 +1,6 @@
+#!/usr/bin/env python
+from __future__ import unicode_literals
+
 import unittest
 
 import sys
@@ -6,17 +9,19 @@ import subprocess
 
 rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 
+
 try:
     _DEV_NULL = subprocess.DEVNULL
 except AttributeError:
     _DEV_NULL = open(os.devnull, 'wb')
 
+
 class TestExecution(unittest.TestCase):
     def test_import(self):
         subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
 
     def test_module_exec(self):
-        if sys.version_info >= (2,7): # Python 2.6 doesn't support package execution
+        if sys.version_info >= (2, 7):  # Python 2.6 doesn't support package execution
             subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
 
     def test_main_exec(self):
index 8f4602e5f62df6217ede56dae359588d152294ed..7c4cd8218e218a6d3319747705c2893c4fbf4cd2 100644 (file)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+from __future__ import unicode_literals
 
 # Allow direct execution
 import os
@@ -22,6 +23,7 @@ from youtube_dl.extractor import (
 class BaseTestSubtitles(unittest.TestCase):
     url = None
     IE = None
+
     def setUp(self):
         self.DL = FakeYDL()
         self.ie = self.IE(self.DL)
@@ -74,7 +76,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
         self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06')
 
     def test_youtube_list_subtitles(self):
-        self.DL.expect_warning(u'Video doesn\'t have automatic captions')
+        self.DL.expect_warning('Video doesn\'t have automatic captions')
         self.DL.params['listsubtitles'] = True
         info_dict = self.getInfoDict()
         self.assertEqual(info_dict, None)
@@ -87,7 +89,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
         self.assertTrue(subtitles['it'] is not None)
 
     def test_youtube_nosubtitles(self):
-        self.DL.expect_warning(u'video doesn\'t have subtitles')
+        self.DL.expect_warning('video doesn\'t have subtitles')
         self.url = 'n5BB19UTcdA'
         self.DL.params['writesubtitles'] = True
         self.DL.params['allsubtitles'] = True
@@ -101,7 +103,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
         self.DL.params['subtitleslangs'] = langs
         subtitles = self.getSubtitles()
         for lang in langs:
-            self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
+            self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
 
 
 class TestDailymotionSubtitles(BaseTestSubtitles):
@@ -130,20 +132,20 @@ class TestDailymotionSubtitles(BaseTestSubtitles):
         self.assertEqual(len(subtitles.keys()), 5)
 
     def test_list_subtitles(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['listsubtitles'] = True
         info_dict = self.getInfoDict()
         self.assertEqual(info_dict, None)
 
     def test_automatic_captions(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['writeautomaticsub'] = True
         self.DL.params['subtitleslang'] = ['en']
         subtitles = self.getSubtitles()
         self.assertTrue(len(subtitles.keys()) == 0)
 
     def test_nosubtitles(self):
-        self.DL.expect_warning(u'video doesn\'t have subtitles')
+        self.DL.expect_warning('video doesn\'t have subtitles')
         self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv'
         self.DL.params['writesubtitles'] = True
         self.DL.params['allsubtitles'] = True
@@ -156,7 +158,7 @@ class TestDailymotionSubtitles(BaseTestSubtitles):
         self.DL.params['subtitleslangs'] = langs
         subtitles = self.getSubtitles()
         for lang in langs:
-            self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
+            self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
 
 
 class TestTedSubtitles(BaseTestSubtitles):
@@ -185,13 +187,13 @@ class TestTedSubtitles(BaseTestSubtitles):
         self.assertTrue(len(subtitles.keys()) >= 28)
 
     def test_list_subtitles(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['listsubtitles'] = True
         info_dict = self.getInfoDict()
         self.assertEqual(info_dict, None)
 
     def test_automatic_captions(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['writeautomaticsub'] = True
         self.DL.params['subtitleslang'] = ['en']
         subtitles = self.getSubtitles()
@@ -203,7 +205,7 @@ class TestTedSubtitles(BaseTestSubtitles):
         self.DL.params['subtitleslangs'] = langs
         subtitles = self.getSubtitles()
         for lang in langs:
-            self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
+            self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
 
 
 class TestBlipTVSubtitles(BaseTestSubtitles):
@@ -211,13 +213,13 @@ class TestBlipTVSubtitles(BaseTestSubtitles):
     IE = BlipTVIE
 
     def test_list_subtitles(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['listsubtitles'] = True
         info_dict = self.getInfoDict()
         self.assertEqual(info_dict, None)
 
     def test_allsubtitles(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['writesubtitles'] = True
         self.DL.params['allsubtitles'] = True
         subtitles = self.getSubtitles()
@@ -236,7 +238,7 @@ class TestVimeoSubtitles(BaseTestSubtitles):
     def test_subtitles(self):
         self.DL.params['writesubtitles'] = True
         subtitles = self.getSubtitles()
-        self.assertEqual(md5(subtitles['en']), '8062383cf4dec168fc40a088aa6d5888')
+        self.assertEqual(md5(subtitles['en']), '26399116d23ae3cf2c087cea94bc43b4')
 
     def test_subtitles_lang(self):
         self.DL.params['writesubtitles'] = True
@@ -251,20 +253,20 @@ class TestVimeoSubtitles(BaseTestSubtitles):
         self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr']))
 
     def test_list_subtitles(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['listsubtitles'] = True
         info_dict = self.getInfoDict()
         self.assertEqual(info_dict, None)
 
     def test_automatic_captions(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['writeautomaticsub'] = True
         self.DL.params['subtitleslang'] = ['en']
         subtitles = self.getSubtitles()
         self.assertTrue(len(subtitles.keys()) == 0)
 
     def test_nosubtitles(self):
-        self.DL.expect_warning(u'video doesn\'t have subtitles')
+        self.DL.expect_warning('video doesn\'t have subtitles')
         self.url = 'http://vimeo.com/56015672'
         self.DL.params['writesubtitles'] = True
         self.DL.params['allsubtitles'] = True
@@ -277,7 +279,7 @@ class TestVimeoSubtitles(BaseTestSubtitles):
         self.DL.params['subtitleslangs'] = langs
         subtitles = self.getSubtitles()
         for lang in langs:
-            self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
+            self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
 
 
 class TestWallaSubtitles(BaseTestSubtitles):
@@ -285,13 +287,13 @@ class TestWallaSubtitles(BaseTestSubtitles):
     IE = WallaIE
 
     def test_list_subtitles(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['listsubtitles'] = True
         info_dict = self.getInfoDict()
         self.assertEqual(info_dict, None)
 
     def test_allsubtitles(self):
-        self.DL.expect_warning(u'Automatic Captions not supported by this server')
+        self.DL.expect_warning('Automatic Captions not supported by this server')
         self.DL.params['writesubtitles'] = True
         self.DL.params['allsubtitles'] = True
         subtitles = self.getSubtitles()
@@ -299,7 +301,7 @@ class TestWallaSubtitles(BaseTestSubtitles):
         self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920')
 
     def test_nosubtitles(self):
-        self.DL.expect_warning(u'video doesn\'t have subtitles')
+        self.DL.expect_warning('video doesn\'t have subtitles')
         self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one'
         self.DL.params['writesubtitles'] = True
         self.DL.params['allsubtitles'] = True
index b42cd74c738b70704e1eb9ff9d70fefc89cf6f15..9f18055e629d3c21826ad8159bdf0ae55409bca2 100644 (file)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+from __future__ import unicode_literals
 
 # Allow direct execution
 import os
@@ -37,7 +38,9 @@ def _make_testfunc(testfile):
                 or os.path.getmtime(swf_file) < os.path.getmtime(as_file)):
             # Recompile
             try:
-                subprocess.check_call(['mxmlc', '-output', swf_file, as_file])
+                subprocess.check_call([
+                    'mxmlc', '-output', swf_file,
+                    '-static-link-runtime-shared-libraries', as_file])
             except OSError as ose:
                 if ose.errno == errno.ENOENT:
                     print('mxmlc not found! Skipping test.')
index a4ba7bad03b85f7c7e69ded7b435898c7b17e11e..19813e034cb533c58a75d1a1be330e6781296657 100644 (file)
@@ -1,5 +1,11 @@
 from __future__ import unicode_literals
 
+# Allow direct execution
+import os
+import sys
+import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
 import io
 import os
 import re
@@ -9,14 +15,16 @@ rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 
 IGNORED_FILES = [
     'setup.py',  # http://bugs.python.org/issue13943
+    'conf.py',
+    'buildserver.py',
 ]
 
 
+from test.helper import assertRegexpMatches
+
+
 class TestUnicodeLiterals(unittest.TestCase):
     def test_all_files(self):
-        print('Skipping this test (not yet fully implemented)')
-        return
-
         for dirpath, _, filenames in os.walk(rootDir):
             for basename in filenames:
                 if not basename.endswith('.py'):
@@ -30,10 +38,11 @@ class TestUnicodeLiterals(unittest.TestCase):
 
                 if "'" not in code and '"' not in code:
                     continue
-                imps = 'from __future__ import unicode_literals'
-                self.assertTrue(
-                    imps in code,
-                    ' %s  missing in %s' % (imps, fn))
+                assertRegexpMatches(
+                    self,
+                    code,
+                    r'(?:(?:#.*?|\s*)\n)*from __future__ import (?:[a-z_]+,\s*)*unicode_literals',
+                    'unicode_literals import  missing in %s' % fn)
 
                 m = re.search(r'(?<=\s)u[\'"](?!\)|,|$)', code)
                 if m is not None:
index bcca0efead42b85f39337a4c28f0d654447cd8e2..d42df6d96d92f7cde133e2bddfc769793920c37d 100644 (file)
@@ -16,11 +16,11 @@ import json
 import xml.etree.ElementTree
 
 from youtube_dl.utils import (
+    clean_html,
     DateRange,
     encodeFilename,
     find_xpath_attr,
     fix_xml_ampersands,
-    get_meta_content,
     orderedSet,
     OnDemandPagedList,
     InAdvancePagedList,
@@ -45,6 +45,10 @@ from youtube_dl.utils import (
     escape_rfc3986,
     escape_url,
     js_to_json,
+    intlist_to_bytes,
+    args_to_str,
+    parse_filesize,
+    version_tuple,
 )
 
 
@@ -117,16 +121,16 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
         self.assertEqual(orderedSet([]), [])
         self.assertEqual(orderedSet([1]), [1])
-        #keep the list ordered
+        # keep the list ordered
         self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
 
     def test_unescape_html(self):
         self.assertEqual(unescapeHTML('%20;'), '%20;')
         self.assertEqual(
             unescapeHTML('&eacute;'), 'é')
-        
+
     def test_daterange(self):
-        _20century = DateRange("19000101","20000101")
+        _20century = DateRange("19000101", "20000101")
         self.assertFalse("17890714" in _20century)
         _ac = DateRange("00010101")
         self.assertTrue("19690721" in _ac)
@@ -140,6 +144,9 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
         self.assertEqual(unified_strdate('1968-12-10'), '19681210')
         self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
+        self.assertEqual(
+            unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False),
+            '20141126')
 
     def test_find_xpath_attr(self):
         testxml = '''<root>
@@ -154,17 +161,6 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
         self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
 
-    def test_meta_parser(self):
-        testhtml = '''
-        <head>
-            <meta name="description" content="foo &amp; bar">
-            <meta content='Plato' name='author'/>
-        </head>
-        '''
-        get_meta = lambda name: get_meta_content(name, testhtml)
-        self.assertEqual(get_meta('description'), 'foo & bar')
-        self.assertEqual(get_meta('author'), 'Plato')
-
     def test_xpath_with_ns(self):
         testxml = '''<root xmlns:media="http://example.com/">
             <media:song>
@@ -179,7 +175,7 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
 
     def test_smuggle_url(self):
-        data = {u"ö": u"ö", u"abc": [3]}
+        data = {"ö": "ö", "abc": [3]}
         url = 'https://foo.bar/baz?x=y#a'
         smug_url = smuggle_url(url, data)
         unsmug_url, unsmug_data = unsmuggle_url(smug_url)
@@ -227,6 +223,10 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(parse_duration('0m0s'), 0)
         self.assertEqual(parse_duration('0s'), 0)
         self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
+        self.assertEqual(parse_duration('T30M38S'), 1838)
+        self.assertEqual(parse_duration('5 s'), 5)
+        self.assertEqual(parse_duration('3 min'), 180)
+        self.assertEqual(parse_duration('2.5 hours'), 9000)
 
     def test_fix_xml_ampersands(self):
         self.assertEqual(
@@ -286,12 +286,17 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
         self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
         self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
+        self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
 
     def test_strip_jsonp(self):
         stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
         d = json.loads(stripped)
         self.assertEqual(d, [{"id": "532cb", "x": 3}])
 
+        stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
+        d = json.loads(stripped)
+        self.assertEqual(d, {'STATUS': 'OK'})
+
     def test_uppercase_escape(self):
         self.assertEqual(uppercase_escape('aä'), 'aä')
         self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
@@ -355,5 +360,35 @@ class TestUtil(unittest.TestCase):
         on = js_to_json('{"abc": true}')
         self.assertEqual(json.loads(on), {'abc': True})
 
+    def test_clean_html(self):
+        self.assertEqual(clean_html('a:\nb'), 'a: b')
+        self.assertEqual(clean_html('a:\n   "b"'), 'a:    "b"')
+
+    def test_intlist_to_bytes(self):
+        self.assertEqual(
+            intlist_to_bytes([0, 1, 127, 128, 255]),
+            b'\x00\x01\x7f\x80\xff')
+
+    def test_args_to_str(self):
+        self.assertEqual(
+            args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
+            'foo ba/r -baz \'2 be\' \'\''
+        )
+
+    def test_parse_filesize(self):
+        self.assertEqual(parse_filesize(None), None)
+        self.assertEqual(parse_filesize(''), None)
+        self.assertEqual(parse_filesize('91 B'), 91)
+        self.assertEqual(parse_filesize('foobar'), None)
+        self.assertEqual(parse_filesize('2 MiB'), 2097152)
+        self.assertEqual(parse_filesize('5 GB'), 5000000000)
+        self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
+        self.assertEqual(parse_filesize('1,24 KB'), 1240)
+
+    def test_version_tuple(self):
+        self.assertEqual(version_tuple('1'), (1,))
+        self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
+        self.assertEqual(version_tuple('10.1-6'), (10, 1, 6))  # avconv style
+
 if __name__ == '__main__':
     unittest.main()
index eac53b285ab6740b368f278784aced9625abb9a6..780636c7730d396c381fd45fd6d4e8126d1c9fe2 100644 (file)
@@ -1,5 +1,6 @@
 #!/usr/bin/env python
 # coding: utf-8
+from __future__ import unicode_literals
 
 # Allow direct execution
 import os
@@ -31,19 +32,18 @@ params = get_params({
 })
 
 
-
 TEST_ID = 'gr51aVj-mLg'
 ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
 EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
 
+
 class TestAnnotations(unittest.TestCase):
     def setUp(self):
         # Clear old files
         self.tearDown()
 
-
     def test_info_json(self):
-        expected = list(EXPECTED_ANNOTATIONS) #Two annotations could have the same text.
+        expected = list(EXPECTED_ANNOTATIONS)  # Two annotations could have the same text.
         ie = youtube_dl.extractor.YoutubeIE()
         ydl = YoutubeDL(params)
         ydl.add_info_extractor(ie)
@@ -51,7 +51,7 @@ class TestAnnotations(unittest.TestCase):
         self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
         annoxml = None
         with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof:
-                annoxml = xml.etree.ElementTree.parse(annof)
+            annoxml = xml.etree.ElementTree.parse(annof)
         self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
         root = annoxml.getroot()
         self.assertEqual(root.tag, 'document')
@@ -59,18 +59,17 @@ class TestAnnotations(unittest.TestCase):
         self.assertEqual(annotationsTag.tag, 'annotations')
         annotations = annotationsTag.findall('annotation')
 
-        #Not all the annotations have TEXT children and the annotations are returned unsorted.
+        # Not all the annotations have TEXT children and the annotations are returned unsorted.
         for a in annotations:
-                self.assertEqual(a.tag, 'annotation')
-                if a.get('type') == 'text':
-                        textTag = a.find('TEXT')
-                        text = textTag.text
-                        self.assertTrue(text in expected) #assertIn only added in python 2.7
-                        #remove the first occurance, there could be more than one annotation with the same text
-                        expected.remove(text)
-        #We should have seen (and removed) all the expected annotation texts.
+            self.assertEqual(a.tag, 'annotation')
+            if a.get('type') == 'text':
+                textTag = a.find('TEXT')
+                text = textTag.text
+                self.assertTrue(text in expected)  # assertIn only added in python 2.7
+                remove the first occurance, there could be more than one annotation with the same text
+                expected.remove(text)
+        # We should have seen (and removed) all the expected annotation texts.
         self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
-        
 
     def tearDown(self):
         try_rm(ANNOTATIONS_FILE)
index 90426a559551571a7769c6137a76270a4ad47b1c..0396ef26283f58c05ee261b0cf23773d462ad103 100644 (file)
@@ -1,5 +1,6 @@
 #!/usr/bin/env python
 # coding: utf-8
+from __future__ import unicode_literals
 
 # Allow direct execution
 import os
@@ -32,7 +33,7 @@ params = get_params({
 TEST_ID = 'BaW_jenozKc'
 INFO_JSON_FILE = TEST_ID + '.info.json'
 DESCRIPTION_FILE = TEST_ID + '.mp4.description'
-EXPECTED_DESCRIPTION = u'''test chars:  "'/\ä↭𝕐
+EXPECTED_DESCRIPTION = '''test chars:  "'/\ä↭𝕐
 test URL: https://github.com/rg3/youtube-dl/issues/1892
 
 This is a test video for youtube-dl.
@@ -53,11 +54,11 @@ class TestInfoJSON(unittest.TestCase):
         self.assertTrue(os.path.exists(INFO_JSON_FILE))
         with io.open(INFO_JSON_FILE, 'r', encoding='utf-8') as jsonf:
             jd = json.load(jsonf)
-        self.assertEqual(jd['upload_date'], u'20121002')
+        self.assertEqual(jd['upload_date'], '20121002')
         self.assertEqual(jd['description'], EXPECTED_DESCRIPTION)
         self.assertEqual(jd['id'], TEST_ID)
         self.assertEqual(jd['extractor'], 'youtube')
-        self.assertEqual(jd['title'], u'''youtube-dl test video "'/\ä↭𝕐''')
+        self.assertEqual(jd['title'], '''youtube-dl test video "'/\ä↭𝕐''')
         self.assertEqual(jd['uploader'], 'Philipp Hagemeister')
 
         self.assertTrue(os.path.exists(DESCRIPTION_FILE))
index 410f9edc297036d7aeb59250c3241dc10bb7cf2d..c889b6f15c40f5ea91a4dd4ea5a86bb8c62c830d 100644 (file)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+from __future__ import unicode_literals
 
 # Allow direct execution
 import os
@@ -12,10 +13,6 @@ from test.helper import FakeYDL
 from youtube_dl.extractor import (
     YoutubePlaylistIE,
     YoutubeIE,
-    YoutubeChannelIE,
-    YoutubeShowIE,
-    YoutubeTopListIE,
-    YoutubeSearchURLIE,
 )
 
 
@@ -31,7 +28,7 @@ class TestYoutubeLists(unittest.TestCase):
         result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
         self.assertEqual(result['_type'], 'url')
         self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
-    
+
     def test_youtube_course(self):
         dl = FakeYDL()
         ie = YoutubePlaylistIE(dl)
index df2cb09f2a87dcacbb97de9193265f9bf1e852af..13d228cd85e9e260942635a36652c4cdc010dc8d 100644 (file)
@@ -14,7 +14,7 @@ import re
 import string
 
 from youtube_dl.extractor import YoutubeIE
-from youtube_dl.utils import compat_str, compat_urlretrieve
+from youtube_dl.compat import compat_str, compat_urlretrieve
 
 _TESTS = [
     (
index dec0e20e7907d9fcf0110d6c992f14456336580d..578c8daf255048b90caaf6caa5f09825e1d2bfe6 100755 (executable)
@@ -7,6 +7,7 @@ import collections
 import datetime
 import errno
 import io
+import itertools
 import json
 import locale
 import os
@@ -22,12 +23,15 @@ import traceback
 if os.name == 'nt':
     import ctypes
 
-from .utils import (
+from .compat import (
     compat_cookiejar,
+    compat_expanduser,
     compat_http_client,
     compat_str,
     compat_urllib_error,
     compat_urllib_request,
+)
+from .utils import (
     escape_url,
     ContentTooShortError,
     date_from_str,
@@ -57,11 +61,13 @@ from .utils import (
     write_string,
     YoutubeDLHandler,
     prepend_extension,
+    args_to_str,
 )
 from .cache import Cache
 from .extractor import get_info_extractor, gen_extractors
 from .downloader import get_suitable_downloader
-from .postprocessor import FFmpegMergerPP
+from .downloader.rtmp import rtmpdump_version
+from .postprocessor import FFmpegMergerPP, FFmpegPostProcessor
 from .version import __version__
 
 
@@ -107,6 +113,8 @@ class YoutubeDL(object):
     forcefilename:     Force printing final filename.
     forceduration:     Force printing duration.
     forcejson:         Force printing info_dict as JSON.
+    dump_single_json:  Force printing the info_dict of the whole playlist
+                       (or video) as a single JSON line.
     simulate:          Do not download the video files.
     format:            Video format code.
     format_limit:      Highest quality format to try.
@@ -116,6 +124,7 @@ class YoutubeDL(object):
     nooverwrites:      Prevent overwriting files.
     playliststart:     Playlist item to start at.
     playlistend:       Playlist item to end at.
+    playlistreverse:   Download playlist items in reverse order.
     matchtitle:        Download only matching titles.
     rejecttitle:       Reject downloads for matching titles.
     logger:            Log messages to a logging.Logger instance.
@@ -165,6 +174,8 @@ class YoutubeDL(object):
                        'auto' for elaborate guessing
     encoding:          Use this encoding instead of the system-specified.
     extract_flat:      Do not resolve URLs, return the immediate result.
+                       Pass in 'in_playlist' to only show this behavior for
+                       playlist items.
 
     The following parameters are not used by YoutubeDL itself, they are used by
     the FileDownloader:
@@ -184,7 +195,7 @@ class YoutubeDL(object):
     _num_downloads = None
     _screen_file = None
 
-    def __init__(self, params=None):
+    def __init__(self, params=None, auto_init=True):
         """Create a FileDownloader object with the given options."""
         if params is None:
             params = {}
@@ -241,6 +252,26 @@ class YoutubeDL(object):
 
         self._setup_opener()
 
+        if auto_init:
+            self.print_debug_header()
+            self.add_default_info_extractors()
+
+    def warn_if_short_id(self, argv):
+        # short YouTube ID starting with dash?
+        idxs = [
+            i for i, a in enumerate(argv)
+            if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
+        if idxs:
+            correct_argv = (
+                ['youtube-dl'] +
+                [a for i, a in enumerate(argv) if i not in idxs] +
+                ['--'] + [argv[i] for i in idxs]
+            )
+            self.report_warning(
+                'Long argument string detected. '
+                'Use -- to separate parameters and URLs, like this:\n%s\n' %
+                args_to_str(correct_argv))
+
     def add_info_extractor(self, ie):
         """Add an InfoExtractor object to the end of the list."""
         self._ies.append(ie)
@@ -285,7 +316,7 @@ class YoutubeDL(object):
         self._output_process.stdin.write((message + '\n').encode('utf-8'))
         self._output_process.stdin.flush()
         res = ''.join(self._output_channel.readline().decode('utf-8')
-                       for _ in range(line_count))
+                      for _ in range(line_count))
         return res[:-len('\n')]
 
     def to_screen(self, message, skip_eol=False):
@@ -447,7 +478,7 @@ class YoutubeDL(object):
             template_dict = collections.defaultdict(lambda: 'NA', template_dict)
 
             outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
-            tmpl = os.path.expanduser(outtmpl)
+            tmpl = compat_expanduser(outtmpl)
             filename = tmpl % template_dict
             return filename
         except ValueError as err:
@@ -522,7 +553,7 @@ class YoutubeDL(object):
 
             try:
                 ie_result = ie.extract(url)
-                if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
+                if ie_result is None:  # Finished already (backwards compatibility; listformats and friends should be moved here)
                     break
                 if isinstance(ie_result, list):
                     # Backwards compatibility: old IE result format
@@ -535,7 +566,7 @@ class YoutubeDL(object):
                     return self.process_ie_result(ie_result, download, extra_info)
                 else:
                     return ie_result
-            except ExtractorError as de: # An error we somewhat expected
+            except ExtractorError as de:  # An error we somewhat expected
                 self.report_error(compat_str(de), de.format_traceback())
                 break
             except MaxDownloadsReached:
@@ -568,8 +599,12 @@ class YoutubeDL(object):
 
         result_type = ie_result.get('_type', 'video')
 
-        if self.params.get('extract_flat', False):
-            if result_type in ('url', 'url_transparent'):
+        if result_type in ('url', 'url_transparent'):
+            extract_flat = self.params.get('extract_flat', False)
+            if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or
+                    extract_flat is True):
+                if self.params.get('forcejson', False):
+                    self.to_stdout(json.dumps(ie_result))
                 return ie_result
 
         if result_type == 'video':
@@ -588,27 +623,19 @@ class YoutubeDL(object):
                 ie_result['url'], ie_key=ie_result.get('ie_key'),
                 extra_info=extra_info, download=False, process=False)
 
-            def make_result(embedded_info):
-                new_result = ie_result.copy()
-                for f in ('_type', 'url', 'ext', 'player_url', 'formats',
-                          'entries', 'ie_key', 'duration',
-                          'subtitles', 'annotations', 'format',
-                          'thumbnail', 'thumbnails'):
-                    if f in new_result:
-                        del new_result[f]
-                    if f in embedded_info:
-                        new_result[f] = embedded_info[f]
-                return new_result
-            new_result = make_result(info)
+            force_properties = dict(
+                (k, v) for k, v in ie_result.items() if v is not None)
+            for f in ('_type', 'url'):
+                if f in force_properties:
+                    del force_properties[f]
+            new_result = info.copy()
+            new_result.update(force_properties)
 
             assert new_result.get('_type') != 'url_transparent'
-            if new_result.get('_type') == 'compat_list':
-                new_result['entries'] = [
-                    make_result(e) for e in new_result['entries']]
 
             return self.process_ie_result(
                 new_result, download=download, extra_info=extra_info)
-        elif result_type == 'playlist':
+        elif result_type == 'playlist' or result_type == 'multi_video':
             # We process each entry in the playlist
             playlist = ie_result.get('title', None) or ie_result.get('id', None)
             self.to_screen('[download] Downloading playlist: %s' % playlist)
@@ -621,27 +648,39 @@ class YoutubeDL(object):
             if playlistend == -1:
                 playlistend = None
 
-            if isinstance(ie_result['entries'], list):
-                n_all_entries = len(ie_result['entries'])
-                entries = ie_result['entries'][playliststart:playlistend]
+            ie_entries = ie_result['entries']
+            if isinstance(ie_entries, list):
+                n_all_entries = len(ie_entries)
+                entries = ie_entries[playliststart:playlistend]
                 n_entries = len(entries)
                 self.to_screen(
                     "[%s] playlist %s: Collected %d video ids (downloading %d of them)" %
                     (ie_result['extractor'], playlist, n_all_entries, n_entries))
-            else:
-                assert isinstance(ie_result['entries'], PagedList)
-                entries = ie_result['entries'].getslice(
+            elif isinstance(ie_entries, PagedList):
+                entries = ie_entries.getslice(
                     playliststart, playlistend)
                 n_entries = len(entries)
                 self.to_screen(
                     "[%s] playlist %s: Downloading %d videos" %
                     (ie_result['extractor'], playlist, n_entries))
+            else:  # iterable
+                entries = list(itertools.islice(
+                    ie_entries, playliststart, playlistend))
+                n_entries = len(entries)
+                self.to_screen(
+                    "[%s] playlist %s: Downloading %d videos" %
+                    (ie_result['extractor'], playlist, n_entries))
+
+            if self.params.get('playlistreverse', False):
+                entries = entries[::-1]
 
             for i, entry in enumerate(entries, 1):
                 self.to_screen('[download] Downloading video #%s of %s' % (i, n_entries))
                 extra = {
                     'n_entries': n_entries,
                     'playlist': playlist,
+                    'playlist_id': ie_result.get('id'),
+                    'playlist_title': ie_result.get('title'),
                     'playlist_index': i + playliststart,
                     'extractor': ie_result['extractor'],
                     'webpage_url': ie_result['webpage_url'],
@@ -661,14 +700,20 @@ class YoutubeDL(object):
             ie_result['entries'] = playlist_results
             return ie_result
         elif result_type == 'compat_list':
+            self.report_warning(
+                'Extractor %s returned a compat_list result. '
+                'It needs to be updated.' % ie_result.get('extractor'))
+
             def _fixup(r):
-                self.add_extra_info(r,
+                self.add_extra_info(
+                    r,
                     {
                         'extractor': ie_result['extractor'],
                         'webpage_url': ie_result['webpage_url'],
                         'webpage_url_basename': url_basename(ie_result['webpage_url']),
                         'extractor_key': ie_result['extractor_key'],
-                    })
+                    }
+                )
                 return r
             ie_result['entries'] = [
                 self.process_ie_result(_fixup(r), download, extra_info)
@@ -746,6 +791,10 @@ class YoutubeDL(object):
             info_dict['display_id'] = info_dict['id']
 
         if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
+            # Working around negative timestamps in Windows
+            # (see http://bugs.python.org/issue1646728)
+            if info_dict['timestamp'] < 0 and os.name == 'nt':
+                info_dict['timestamp'] = 0
             upload_date = datetime.datetime.utcfromtimestamp(
                 info_dict['timestamp'])
             info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
@@ -818,8 +867,15 @@ class YoutubeDL(object):
                         # Two formats have been requested like '137+139'
                         format_1, format_2 = rf.split('+')
                         formats_info = (self.select_format(format_1, formats),
-                            self.select_format(format_2, formats))
+                                        self.select_format(format_2, formats))
                         if all(formats_info):
+                            # The first format must contain the video and the
+                            # second the audio
+                            if formats_info[0].get('vcodec') == 'none':
+                                self.report_error('The first format must '
+                                                  'contain the video, try using '
+                                                  '"-f %s+%s"' % (format_2, format_1))
+                                return
                             selected_format = {
                                 'requested_formats': formats_info,
                                 'format': rf,
@@ -882,8 +938,12 @@ class YoutubeDL(object):
         if self.params.get('forceid', False):
             self.to_stdout(info_dict['id'])
         if self.params.get('forceurl', False):
-            # For RTMP URLs, also include the playpath
-            self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
+            if info_dict.get('requested_formats') is not None:
+                for f in info_dict['requested_formats']:
+                    self.to_stdout(f['url'] + f.get('play_path', ''))
+            else:
+                # For RTMP URLs, also include the playpath
+                self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
         if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
             self.to_stdout(info_dict['thumbnail'])
         if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
@@ -897,6 +957,8 @@ class YoutubeDL(object):
         if self.params.get('forcejson', False):
             info_dict['_filename'] = filename
             self.to_stdout(json.dumps(info_dict))
+        if self.params.get('dump_single_json', False):
+            info_dict['_filename'] = filename
 
         # Do nothing else if in simulate mode
         if self.params.get('simulate', False):
@@ -962,7 +1024,7 @@ class YoutubeDL(object):
                     else:
                         self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
                         with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
-                                subfile.write(sub)
+                            subfile.write(sub)
                 except (OSError, IOError):
                     self.report_error('Cannot write subtitles file ' + sub_filename)
                     return
@@ -974,7 +1036,7 @@ class YoutubeDL(object):
             else:
                 self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
                 try:
-                    write_json_file(info_dict, encodeFilename(infofn))
+                    write_json_file(info_dict, infofn)
                 except (OSError, IOError):
                     self.report_error('Cannot write metadata to JSON file ' + infofn)
                     return
@@ -994,10 +1056,10 @@ class YoutubeDL(object):
                         with open(thumb_filename, 'wb') as thumbf:
                             shutil.copyfileobj(uf, thumbf)
                         self.to_screen('[%s] %s: Writing thumbnail to: %s' %
-                            (info_dict['extractor'], info_dict['id'], thumb_filename))
+                                       (info_dict['extractor'], info_dict['id'], thumb_filename))
                     except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
                         self.report_warning('Unable to download thumbnail "%s": %s' %
-                            (info_dict['thumbnail'], compat_str(err)))
+                                            (info_dict['thumbnail'], compat_str(err)))
 
         if not self.params.get('skip_download', False):
             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
@@ -1015,11 +1077,11 @@ class YoutubeDL(object):
                         downloaded = []
                         success = True
                         merger = FFmpegMergerPP(self, not self.params.get('keepvideo'))
-                        if not merger._get_executable():
+                        if not merger._executable:
                             postprocessors = []
                             self.report_warning('You have requested multiple '
-                                'formats but ffmpeg or avconv are not installed.'
-                                ' The formats won\'t be merged')
+                                                'formats but ffmpeg or avconv are not installed.'
+                                                ' The formats won\'t be merged')
                         else:
                             postprocessors = [merger]
                         for f in info_dict['requested_formats']:
@@ -1063,13 +1125,16 @@ class YoutubeDL(object):
 
         for url in url_list:
             try:
-                #It also downloads the videos
-                self.extract_info(url)
+                # It also downloads the videos
+                res = self.extract_info(url)
             except UnavailableVideoError:
                 self.report_error('unable to download video')
             except MaxDownloadsReached:
                 self.to_screen('[info] Maximum number of downloaded files reached.')
                 raise
+            else:
+                if self.params.get('dump_single_json', False):
+                    self.to_stdout(json.dumps(res))
 
         return self._download_retcode
 
@@ -1193,6 +1258,8 @@ class YoutubeDL(object):
             res += 'video@'
         if fdict.get('vbr') is not None:
             res += '%4dk' % fdict['vbr']
+        if fdict.get('fps') is not None:
+            res += ', %sfps' % fdict['fps']
         if fdict.get('acodec') is not None:
             if res:
                 res += ', '
@@ -1274,11 +1341,13 @@ class YoutubeDL(object):
             self.report_warning(
                 'Your Python is broken! Update to a newer and supported version')
 
+        stdout_encoding = getattr(
+            sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
         encoding_str = (
             '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
                 locale.getpreferredencoding(),
                 sys.getfilesystemencoding(),
-                sys.stdout.encoding,
+                stdout_encoding,
                 self.get_encoding()))
         write_string(encoding_str, encoding=None)
 
@@ -1297,8 +1366,19 @@ class YoutubeDL(object):
                 sys.exc_clear()
             except:
                 pass
-        self._write_string('[debug] Python version %s - %s' %
-                     (platform.python_version(), platform_name()) + '\n')
+        self._write_string('[debug] Python version %s - %s\n' % (
+            platform.python_version(), platform_name()))
+
+        exe_versions = FFmpegPostProcessor.get_versions()
+        exe_versions['rtmpdump'] = rtmpdump_version()
+        exe_str = ', '.join(
+            '%s %s' % (exe, v)
+            for exe, v in sorted(exe_versions.items())
+            if v
+        )
+        if not exe_str:
+            exe_str = 'none'
+        self._write_string('[debug] exe versions: %s\n' % exe_str)
 
         proxy_map = {}
         for handler in self._opener.handlers:
index 7f2b4dfcc60ddada121b7b662a61fc10c62de580..70c4f25b161d5d7b158da52a64b22dedc4b1a9f8 100644 (file)
@@ -1,85 +1,7 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
-__authors__  = (
-    'Ricardo Garcia Gonzalez',
-    'Danny Colligan',
-    'Benjamin Johnson',
-    'Vasyl\' Vavrychuk',
-    'Witold Baryluk',
-    'Paweł Paprota',
-    'Gergely Imreh',
-    'Rogério Brito',
-    'Philipp Hagemeister',
-    'Sören Schulze',
-    'Kevin Ngo',
-    'Ori Avtalion',
-    'shizeeg',
-    'Filippo Valsorda',
-    'Christian Albrecht',
-    'Dave Vasilevsky',
-    'Jaime Marquínez Ferrándiz',
-    'Jeff Crouse',
-    'Osama Khalid',
-    'Michael Walter',
-    'M. Yasoob Ullah Khalid',
-    'Julien Fraichard',
-    'Johny Mo Swag',
-    'Axel Noack',
-    'Albert Kim',
-    'Pierre Rudloff',
-    'Huarong Huo',
-    'Ismael Mejía',
-    'Steffan \'Ruirize\' James',
-    'Andras Elso',
-    'Jelle van der Waa',
-    'Marcin Cieślak',
-    'Anton Larionov',
-    'Takuya Tsuchida',
-    'Sergey M.',
-    'Michael Orlitzky',
-    'Chris Gahan',
-    'Saimadhav Heblikar',
-    'Mike Col',
-    'Oleg Prutz',
-    'pulpe',
-    'Andreas Schmitz',
-    'Michael Kaiser',
-    'Niklas Laxström',
-    'David Triendl',
-    'Anthony Weems',
-    'David Wagner',
-    'Juan C. Olivares',
-    'Mattias Harrysson',
-    'phaer',
-    'Sainyam Kapoor',
-    'Nicolas Évrard',
-    'Jason Normore',
-    'Hoje Lee',
-    'Adam Thalhammer',
-    'Georg Jähnig',
-    'Ralf Haring',
-    'Koki Takahashi',
-    'Ariset Llerena',
-    'Adam Malcontenti-Wilson',
-    'Tobias Bell',
-    'Naglis Jonaitis',
-    'Charles Chen',
-    'Hassaan Ali',
-    'Dobrosław Żybort',
-    'David Fabijan',
-    'Sebastian Haas',
-    'Alexander Kirk',
-    'Erik Johnson',
-    'Keith Beckman',
-    'Ole Ernst',
-    'Aaron McDaniel (mcd1992)',
-    'Magnus Kolstad',
-    'Hari Padmanaban',
-    'Carlos Ramos',
-    '5moufl',
-    'lenaten',
-)
+from __future__ import unicode_literals
 
 __license__ = 'Public Domain'
 
@@ -93,9 +15,13 @@ import sys
 from .options import (
     parseOpts,
 )
-from .utils import (
+from .compat import (
+    compat_expanduser,
     compat_getpass,
     compat_print,
+    workaround_optparse_bug9161,
+)
+from .utils import (
     DateRange,
     DEFAULT_OUTTMPL,
     decodeOption,
@@ -132,7 +58,9 @@ def _real_main(argv=None):
         # https://github.com/rg3/youtube-dl/issues/820
         codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
 
-    setproctitle(u'youtube-dl')
+    workaround_optparse_bug9161()
+
+    setproctitle('youtube-dl')
 
     parser, opts, args = parseOpts(argv)
 
@@ -148,10 +76,10 @@ def _real_main(argv=None):
     if opts.headers is not None:
         for h in opts.headers:
             if h.find(':', 1) < 0:
-                parser.error(u'wrong header formatting, it should be key:value, not "%s"'%h)
+                parser.error('wrong header formatting, it should be key:value, not "%s"' % h)
             key, value = h.split(':', 2)
             if opts.verbose:
-                write_string(u'[debug] Adding header from command line option %s:%s\n'%(key, value))
+                write_string('[debug] Adding header from command line option %s:%s\n' % (key, value))
             std_headers[key] = value
 
     # Dump user agent
@@ -169,9 +97,9 @@ def _real_main(argv=None):
                 batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore')
             batch_urls = read_batch_urls(batchfd)
             if opts.verbose:
-                write_string(u'[debug] Batch file urls: ' + repr(batch_urls) + u'\n')
+                write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
         except IOError:
-            sys.exit(u'ERROR: batch file could not be read')
+            sys.exit('ERROR: batch file could not be read')
     all_urls = batch_urls + args
     all_urls = [url.strip() for url in all_urls]
     _enc = preferredencoding()
@@ -184,7 +112,7 @@ def _real_main(argv=None):
             compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
             matchedUrls = [url for url in all_urls if ie.suitable(url)]
             for mu in matchedUrls:
-                compat_print(u'  ' + mu)
+                compat_print('  ' + mu)
         sys.exit(0)
     if opts.list_extractor_descriptions:
         for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()):
@@ -194,69 +122,66 @@ def _real_main(argv=None):
             if desc is False:
                 continue
             if hasattr(ie, 'SEARCH_KEY'):
-                _SEARCHES = (u'cute kittens', u'slithering pythons', u'falling cat', u'angry poodle', u'purple fish', u'running tortoise', u'sleeping bunny')
-                _COUNTS = (u'', u'5', u'10', u'all')
-                desc += u' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
+                _SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny')
+                _COUNTS = ('', '5', '10', 'all')
+                desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
             compat_print(desc)
         sys.exit(0)
 
-
     # Conflicting, missing and erroneous options
     if opts.usenetrc and (opts.username is not None or opts.password is not None):
-        parser.error(u'using .netrc conflicts with giving username/password')
+        parser.error('using .netrc conflicts with giving username/password')
     if opts.password is not None and opts.username is None:
-        parser.error(u'account username missing\n')
+        parser.error('account username missing\n')
     if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
-        parser.error(u'using output template conflicts with using title, video ID or auto number')
+        parser.error('using output template conflicts with using title, video ID or auto number')
     if opts.usetitle and opts.useid:
-        parser.error(u'using title conflicts with using video ID')
+        parser.error('using title conflicts with using video ID')
     if opts.username is not None and opts.password is None:
-        opts.password = compat_getpass(u'Type account password and press [Return]: ')
+        opts.password = compat_getpass('Type account password and press [Return]: ')
     if opts.ratelimit is not None:
         numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
         if numeric_limit is None:
-            parser.error(u'invalid rate limit specified')
+            parser.error('invalid rate limit specified')
         opts.ratelimit = numeric_limit
     if opts.min_filesize is not None:
         numeric_limit = FileDownloader.parse_bytes(opts.min_filesize)
         if numeric_limit is None:
-            parser.error(u'invalid min_filesize specified')
+            parser.error('invalid min_filesize specified')
         opts.min_filesize = numeric_limit
     if opts.max_filesize is not None:
         numeric_limit = FileDownloader.parse_bytes(opts.max_filesize)
         if numeric_limit is None:
-            parser.error(u'invalid max_filesize specified')
+            parser.error('invalid max_filesize specified')
         opts.max_filesize = numeric_limit
     if opts.retries is not None:
         try:
             opts.retries = int(opts.retries)
         except (TypeError, ValueError):
-            parser.error(u'invalid retry count specified')
+            parser.error('invalid retry count specified')
     if opts.buffersize is not None:
         numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
         if numeric_buffersize is None:
-            parser.error(u'invalid buffer size specified')
+            parser.error('invalid buffer size specified')
         opts.buffersize = numeric_buffersize
     if opts.playliststart <= 0:
-        raise ValueError(u'Playlist start must be positive')
+        raise ValueError('Playlist start must be positive')
     if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
-        raise ValueError(u'Playlist end must be greater than playlist start')
+        raise ValueError('Playlist end must be greater than playlist start')
     if opts.extractaudio:
         if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
-            parser.error(u'invalid audio format specified')
+            parser.error('invalid audio format specified')
     if opts.audioquality:
         opts.audioquality = opts.audioquality.strip('k').strip('K')
         if not opts.audioquality.isdigit():
-            parser.error(u'invalid audio quality specified')
+            parser.error('invalid audio quality specified')
     if opts.recodevideo is not None:
         if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']:
-            parser.error(u'invalid video recode format specified')
+            parser.error('invalid video recode format specified')
     if opts.date is not None:
         date = DateRange.day(opts.date)
     else:
         date = DateRange(opts.dateafter, opts.datebefore)
-    if opts.default_search not in ('auto', 'auto_warning', 'error', 'fixup_error', None) and ':' not in opts.default_search:
-        parser.error(u'--default-search invalid; did you forget a colon (:) at the end?')
 
     # Do not download videos when there are audio-only formats
     if opts.extractaudio and not opts.keepvideo and opts.format is None:
@@ -264,28 +189,28 @@ def _real_main(argv=None):
 
     # --all-sub automatically sets --write-sub if --write-auto-sub is not given
     # this was the old behaviour if only --all-sub was given.
-    if opts.allsubtitles and (opts.writeautomaticsub == False):
+    if opts.allsubtitles and not opts.writeautomaticsub:
         opts.writesubtitles = True
 
     if sys.version_info < (3,):
         # In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
         if opts.outtmpl is not None:
             opts.outtmpl = opts.outtmpl.decode(preferredencoding())
-    outtmpl =((opts.outtmpl is not None and opts.outtmpl)
-            or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s')
-            or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
-            or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
-            or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s')
-            or (opts.useid and u'%(id)s.%(ext)s')
-            or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
-            or DEFAULT_OUTTMPL)
+    outtmpl = ((opts.outtmpl is not None and opts.outtmpl)
+               or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s')
+               or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s')
+               or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s')
+               or (opts.usetitle and '%(title)s-%(id)s.%(ext)s')
+               or (opts.useid and '%(id)s.%(ext)s')
+               or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s')
+               or DEFAULT_OUTTMPL)
     if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
-        parser.error(u'Cannot download a video and extract audio into the same'
-                     u' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
-                     u' template'.format(outtmpl))
+        parser.error('Cannot download a video and extract audio into the same'
+                     ' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
+                     ' template'.format(outtmpl))
 
-    any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson
-    download_archive_fn = os.path.expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
+    any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
+    download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
 
     ydl_opts = {
         'usenetrc': opts.usenetrc,
@@ -304,8 +229,9 @@ def _real_main(argv=None):
         'forcefilename': opts.getfilename,
         'forceformat': opts.getformat,
         'forcejson': opts.dumpjson,
-        'simulate': opts.simulate,
-        'skip_download': (opts.skip_download or opts.simulate or any_printing),
+        'dump_single_json': opts.dump_single_json,
+        'simulate': opts.simulate or any_printing,
+        'skip_download': opts.skip_download,
         'format': opts.format,
         'format_limit': opts.format_limit,
         'listformats': opts.listformats,
@@ -323,6 +249,7 @@ def _real_main(argv=None):
         'progress_with_newline': opts.progress_with_newline,
         'playliststart': opts.playliststart,
         'playlistend': opts.playlistend,
+        'playlistreverse': opts.playlist_reverse,
         'noplaylist': opts.noplaylist,
         'logtostderr': opts.outtmpl == '-',
         'consoletitle': opts.consoletitle,
@@ -369,12 +296,10 @@ def _real_main(argv=None):
         'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
         'encoding': opts.encoding,
         'exec_cmd': opts.exec_cmd,
+        'extract_flat': opts.extract_flat,
     }
 
     with YoutubeDL(ydl_opts) as ydl:
-        ydl.print_debug_header()
-        ydl.add_default_info_extractors()
-
         # PostProcessors
         # Add the metadata pp first, the other pps will copy it
         if opts.addmetadata:
@@ -392,7 +317,6 @@ def _real_main(argv=None):
                 ydl.add_post_processor(FFmpegAudioFixPP())
             ydl.add_post_processor(AtomicParsleyPP())
 
-
         # Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way.
         # So if the user is able to remove the file before your postprocessor runs it might cause a few problems.
         if opts.exec_cmd:
@@ -409,18 +333,19 @@ def _real_main(argv=None):
 
         # Maybe do nothing
         if (len(all_urls) < 1) and (opts.load_info_filename is None):
-            if not (opts.update_self or opts.rm_cachedir):
-                parser.error(u'you must provide at least one URL')
-            else:
+            if opts.update_self or opts.rm_cachedir:
                 sys.exit()
 
+            ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv)
+            parser.error('you must provide at least one URL')
+
         try:
             if opts.load_info_filename is not None:
                 retcode = ydl.download_with_info_file(opts.load_info_filename)
             else:
                 retcode = ydl.download(all_urls)
         except MaxDownloadsReached:
-            ydl.to_screen(u'--max-download limit reached, aborting.')
+            ydl.to_screen('--max-download limit reached, aborting.')
             retcode = 101
 
     sys.exit(retcode)
@@ -432,6 +357,6 @@ def main(argv=None):
     except DownloadError:
         sys.exit(1)
     except SameFileError:
-        sys.exit(u'ERROR: fixed output name but more than one file to download')
+        sys.exit('ERROR: fixed output name but more than one file to download')
     except KeyboardInterrupt:
-        sys.exit(u'\nERROR: Interrupted by user')
+        sys.exit('\nERROR: Interrupted by user')
index 3fe29c91f416e0d6c957ed750d3f0f69950dc9c0..65a0f891c5998cd49c7e1a98dbca75b6c926ccb8 100755 (executable)
@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+from __future__ import unicode_literals
 
 # Execute with
 # $ python youtube_dl/__main__.py (2.6+)
index e9c5e21521d66baa177986e8ca878e3fc1a75461..5efd0f836bcf2375b065be008a36e5b8a54d2e69 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 __all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text']
 
 import base64
@@ -7,10 +9,11 @@ from .utils import bytes_to_intlist, intlist_to_bytes
 
 BLOCK_SIZE_BYTES = 16
 
+
 def aes_ctr_decrypt(data, key, counter):
     """
     Decrypt with aes in counter mode
-    
+
     @param {int[]} data        cipher
     @param {int[]} key         16/24/32-Byte cipher key
     @param {instance} counter  Instance whose next_value function (@returns {int[]}  16-Byte block)
@@ -19,23 +22,24 @@ def aes_ctr_decrypt(data, key, counter):
     """
     expanded_key = key_expansion(key)
     block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
-    
-    decrypted_data=[]
+
+    decrypted_data = []
     for i in range(block_count):
         counter_block = counter.next_value()
-        block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
-        block += [0]*(BLOCK_SIZE_BYTES - len(block))
-        
+        block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
+        block += [0] * (BLOCK_SIZE_BYTES - len(block))
+
         cipher_counter_block = aes_encrypt(counter_block, expanded_key)
         decrypted_data += xor(block, cipher_counter_block)
     decrypted_data = decrypted_data[:len(data)]
-    
+
     return decrypted_data
 
+
 def aes_cbc_decrypt(data, key, iv):
     """
     Decrypt with aes in CBC mode
-    
+
     @param {int[]} data        cipher
     @param {int[]} key         16/24/32-Byte cipher key
     @param {int[]} iv          16-Byte IV
@@ -43,94 +47,98 @@ def aes_cbc_decrypt(data, key, iv):
     """
     expanded_key = key_expansion(key)
     block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
-    
-    decrypted_data=[]
+
+    decrypted_data = []
     previous_cipher_block = iv
     for i in range(block_count):
-        block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
-        block += [0]*(BLOCK_SIZE_BYTES - len(block))
-        
+        block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
+        block += [0] * (BLOCK_SIZE_BYTES - len(block))
+
         decrypted_block = aes_decrypt(block, expanded_key)
         decrypted_data += xor(decrypted_block, previous_cipher_block)
         previous_cipher_block = block
     decrypted_data = decrypted_data[:len(data)]
-    
+
     return decrypted_data
 
+
 def key_expansion(data):
     """
     Generate key schedule
-    
+
     @param {int[]} data  16/24/32-Byte cipher key
-    @returns {int[]}     176/208/240-Byte expanded key 
+    @returns {int[]}     176/208/240-Byte expanded key
     """
-    data = data[:] # copy
+    data = data[:]  # copy
     rcon_iteration = 1
     key_size_bytes = len(data)
     expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES
-    
+
     while len(data) < expanded_key_size_bytes:
         temp = data[-4:]
         temp = key_schedule_core(temp, rcon_iteration)
         rcon_iteration += 1
-        data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
-        
+        data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
+
         for _ in range(3):
             temp = data[-4:]
-            data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
-        
+            data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
+
         if key_size_bytes == 32:
             temp = data[-4:]
             temp = sub_bytes(temp)
-            data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
-        
-        for _ in range(3 if key_size_bytes == 32  else 2 if key_size_bytes == 24 else 0):
+            data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
+
+        for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
             temp = data[-4:]
-            data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
+            data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
     data = data[:expanded_key_size_bytes]
-    
+
     return data
 
+
 def aes_encrypt(data, expanded_key):
     """
     Encrypt one block with aes
-    
+
     @param {int[]} data          16-Byte state
-    @param {int[]} expanded_key  176/208/240-Byte expanded key 
+    @param {int[]} expanded_key  176/208/240-Byte expanded key
     @returns {int[]}             16-Byte cipher
     """
     rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
 
     data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
-    for i in range(1, rounds+1):
+    for i in range(1, rounds + 1):
         data = sub_bytes(data)
         data = shift_rows(data)
         if i != rounds:
             data = mix_columns(data)
-        data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
+        data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
 
     return data
 
+
 def aes_decrypt(data, expanded_key):
     """
     Decrypt one block with aes
-    
+
     @param {int[]} data          16-Byte cipher
     @param {int[]} expanded_key  176/208/240-Byte expanded key
     @returns {int[]}             16-Byte state
     """
     rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
-    
+
     for i in range(rounds, 0, -1):
-        data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
+        data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
         if i != rounds:
             data = mix_columns_inv(data)
         data = shift_rows_inv(data)
         data = sub_bytes_inv(data)
     data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
-    
+
     return data
 
+
 def aes_decrypt_text(data, password, key_size_bytes):
     """
     Decrypt text
@@ -138,33 +146,34 @@ def aes_decrypt_text(data, password, key_size_bytes):
     - The cipher key is retrieved by encrypting the first 16 Byte of 'password'
       with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's)
     - Mode of operation is 'counter'
-    
+
     @param {str} data                    Base64 encoded string
     @param {str,unicode} password        Password (will be encoded with utf-8)
     @param {int} key_size_bytes          Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit
     @returns {str}                       Decrypted data
     """
     NONCE_LENGTH_BYTES = 8
-    
+
     data = bytes_to_intlist(base64.b64decode(data))
     password = bytes_to_intlist(password.encode('utf-8'))
-    
-    key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password))
+
+    key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))
     key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
-    
+
     nonce = data[:NONCE_LENGTH_BYTES]
     cipher = data[NONCE_LENGTH_BYTES:]
-    
+
     class Counter:
-        __value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
+        __value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
+
         def next_value(self):
             temp = self.__value
             self.__value = inc(self.__value)
             return temp
-    
+
     decrypted_data = aes_ctr_decrypt(cipher, key, Counter())
     plaintext = intlist_to_bytes(decrypted_data)
-    
+
     return plaintext
 
 RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
@@ -200,14 +209,14 @@ SBOX_INV = (0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x
             0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
             0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
             0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d)
-MIX_COLUMN_MATRIX = ((0x2,0x3,0x1,0x1),
-                     (0x1,0x2,0x3,0x1),
-                     (0x1,0x1,0x2,0x3),
-                     (0x3,0x1,0x1,0x2))
-MIX_COLUMN_MATRIX_INV = ((0xE,0xB,0xD,0x9),
-                         (0x9,0xE,0xB,0xD),
-                         (0xD,0x9,0xE,0xB),
-                         (0xB,0xD,0x9,0xE))
+MIX_COLUMN_MATRIX = ((0x2, 0x3, 0x1, 0x1),
+                     (0x1, 0x2, 0x3, 0x1),
+                     (0x1, 0x1, 0x2, 0x3),
+                     (0x3, 0x1, 0x1, 0x2))
+MIX_COLUMN_MATRIX_INV = ((0xE, 0xB, 0xD, 0x9),
+                         (0x9, 0xE, 0xB, 0xD),
+                         (0xD, 0x9, 0xE, 0xB),
+                         (0xB, 0xD, 0x9, 0xE))
 RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35,
                       0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA,
                       0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31,
@@ -241,30 +250,37 @@ RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7
                       0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
                       0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
 
+
 def sub_bytes(data):
     return [SBOX[x] for x in data]
 
+
 def sub_bytes_inv(data):
     return [SBOX_INV[x] for x in data]
 
+
 def rotate(data):
     return data[1:] + [data[0]]
 
+
 def key_schedule_core(data, rcon_iteration):
     data = rotate(data)
     data = sub_bytes(data)
     data[0] = data[0] ^ RCON[rcon_iteration]
-    
+
     return data
 
+
 def xor(data1, data2):
-    return [x^y for x, y in zip(data1, data2)]
+    return [x ^ y for x, y in zip(data1, data2)]
+
 
 def rijndael_mul(a, b):
-    if(a==0 or b==0):
+    if(a == 0 or b == 0):
         return 0
     return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
 
+
 def mix_column(data, matrix):
     data_mixed = []
     for row in range(4):
@@ -275,33 +291,38 @@ def mix_column(data, matrix):
         data_mixed.append(mixed)
     return data_mixed
 
+
 def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
     data_mixed = []
     for i in range(4):
-        column = data[i*4 : (i+1)*4]
+        column = data[i * 4: (i + 1) * 4]
         data_mixed += mix_column(column, matrix)
     return data_mixed
 
+
 def mix_columns_inv(data):
     return mix_columns(data, MIX_COLUMN_MATRIX_INV)
 
+
 def shift_rows(data):
     data_shifted = []
     for column in range(4):
         for row in range(4):
-            data_shifted.append( data[((column + row) & 0b11) * 4 + row] )
+            data_shifted.append(data[((column + row) & 0b11) * 4 + row])
     return data_shifted
 
+
 def shift_rows_inv(data):
     data_shifted = []
     for column in range(4):
         for row in range(4):
-            data_shifted.append( data[((column - row) & 0b11) * 4 + row] )
+            data_shifted.append(data[((column - row) & 0b11) * 4 + row])
     return data_shifted
 
+
 def inc(data):
-    data = data[:] # copy
-    for i in range(len(data)-1,-1,-1):
+    data = data[:]  # copy
+    for i in range(len(data) - 1, -1, -1):
         if data[i] == 255:
             data[i] = 0
         else:
index 79ff09f7897c2987d0824becca088f2e4cd3a164..5fe839eb1269586db95b9e25b4c8dabcdcd965d9 100644 (file)
@@ -8,9 +8,8 @@ import re
 import shutil
 import traceback
 
-from .utils import (
-    write_json_file,
-)
+from .compat import compat_expanduser, compat_getenv
+from .utils import write_json_file
 
 
 class Cache(object):
@@ -20,9 +19,9 @@ class Cache(object):
     def _get_root_dir(self):
         res = self._ydl.params.get('cachedir')
         if res is None:
-            cache_root = os.environ.get('XDG_CACHE_HOME', '~/.cache')
+            cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache')
             res = os.path.join(cache_root, 'youtube-dl')
-        return os.path.expanduser(res)
+        return compat_expanduser(res)
 
     def _get_cache_fn(self, section, key, dtype):
         assert re.match(r'^[a-zA-Z0-9_.-]+$', section), \
diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py
new file mode 100644 (file)
index 0000000..46d4388
--- /dev/null
@@ -0,0 +1,358 @@
+from __future__ import unicode_literals
+
+import getpass
+import optparse
+import os
+import re
+import subprocess
+import sys
+
+
+try:
+    import urllib.request as compat_urllib_request
+except ImportError:  # Python 2
+    import urllib2 as compat_urllib_request
+
+try:
+    import urllib.error as compat_urllib_error
+except ImportError:  # Python 2
+    import urllib2 as compat_urllib_error
+
+try:
+    import urllib.parse as compat_urllib_parse
+except ImportError:  # Python 2
+    import urllib as compat_urllib_parse
+
+try:
+    from urllib.parse import urlparse as compat_urllib_parse_urlparse
+except ImportError:  # Python 2
+    from urlparse import urlparse as compat_urllib_parse_urlparse
+
+try:
+    import urllib.parse as compat_urlparse
+except ImportError:  # Python 2
+    import urlparse as compat_urlparse
+
+try:
+    import http.cookiejar as compat_cookiejar
+except ImportError:  # Python 2
+    import cookielib as compat_cookiejar
+
+try:
+    import html.entities as compat_html_entities
+except ImportError:  # Python 2
+    import htmlentitydefs as compat_html_entities
+
+try:
+    import html.parser as compat_html_parser
+except ImportError:  # Python 2
+    import HTMLParser as compat_html_parser
+
+try:
+    import http.client as compat_http_client
+except ImportError:  # Python 2
+    import httplib as compat_http_client
+
+try:
+    from urllib.error import HTTPError as compat_HTTPError
+except ImportError:  # Python 2
+    from urllib2 import HTTPError as compat_HTTPError
+
+try:
+    from urllib.request import urlretrieve as compat_urlretrieve
+except ImportError:  # Python 2
+    from urllib import urlretrieve as compat_urlretrieve
+
+
+try:
+    from subprocess import DEVNULL
+    compat_subprocess_get_DEVNULL = lambda: DEVNULL
+except ImportError:
+    compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
+
+try:
+    from urllib.parse import unquote as compat_urllib_parse_unquote
+except ImportError:
+    def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
+        if string == '':
+            return string
+        res = string.split('%')
+        if len(res) == 1:
+            return string
+        if encoding is None:
+            encoding = 'utf-8'
+        if errors is None:
+            errors = 'replace'
+        # pct_sequence: contiguous sequence of percent-encoded bytes, decoded
+        pct_sequence = b''
+        string = res[0]
+        for item in res[1:]:
+            try:
+                if not item:
+                    raise ValueError
+                pct_sequence += item[:2].decode('hex')
+                rest = item[2:]
+                if not rest:
+                    # This segment was just a single percent-encoded character.
+                    # May be part of a sequence of code units, so delay decoding.
+                    # (Stored in pct_sequence).
+                    continue
+            except ValueError:
+                rest = '%' + item
+            # Encountered non-percent-encoded characters. Flush the current
+            # pct_sequence.
+            string += pct_sequence.decode(encoding, errors) + rest
+            pct_sequence = b''
+        if pct_sequence:
+            # Flush the final pct_sequence
+            string += pct_sequence.decode(encoding, errors)
+        return string
+
+
+try:
+    from urllib.parse import parse_qs as compat_parse_qs
+except ImportError:  # Python 2
+    # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
+    # Python 2's version is apparently totally broken
+
+    def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
+                   encoding='utf-8', errors='replace'):
+        qs, _coerce_result = qs, unicode
+        pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
+        r = []
+        for name_value in pairs:
+            if not name_value and not strict_parsing:
+                continue
+            nv = name_value.split('=', 1)
+            if len(nv) != 2:
+                if strict_parsing:
+                    raise ValueError("bad query field: %r" % (name_value,))
+                # Handle case of a control-name with no equal sign
+                if keep_blank_values:
+                    nv.append('')
+                else:
+                    continue
+            if len(nv[1]) or keep_blank_values:
+                name = nv[0].replace('+', ' ')
+                name = compat_urllib_parse_unquote(
+                    name, encoding=encoding, errors=errors)
+                name = _coerce_result(name)
+                value = nv[1].replace('+', ' ')
+                value = compat_urllib_parse_unquote(
+                    value, encoding=encoding, errors=errors)
+                value = _coerce_result(value)
+                r.append((name, value))
+        return r
+
+    def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
+                        encoding='utf-8', errors='replace'):
+        parsed_result = {}
+        pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
+                           encoding=encoding, errors=errors)
+        for name, value in pairs:
+            if name in parsed_result:
+                parsed_result[name].append(value)
+            else:
+                parsed_result[name] = [value]
+        return parsed_result
+
+try:
+    compat_str = unicode  # Python 2
+except NameError:
+    compat_str = str
+
+try:
+    compat_chr = unichr  # Python 2
+except NameError:
+    compat_chr = chr
+
+try:
+    from xml.etree.ElementTree import ParseError as compat_xml_parse_error
+except ImportError:  # Python 2.6
+    from xml.parsers.expat import ExpatError as compat_xml_parse_error
+
+try:
+    from shlex import quote as shlex_quote
+except ImportError:  # Python < 3.3
+    def shlex_quote(s):
+        if re.match(r'^[-_\w./]+$', s):
+            return s
+        else:
+            return "'" + s.replace("'", "'\"'\"'") + "'"
+
+
+def compat_ord(c):
+    if type(c) is int:
+        return c
+    else:
+        return ord(c)
+
+
+if sys.version_info >= (3, 0):
+    compat_getenv = os.getenv
+    compat_expanduser = os.path.expanduser
+else:
+    # Environment variables should be decoded with filesystem encoding.
+    # Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
+
+    def compat_getenv(key, default=None):
+        from .utils import get_filesystem_encoding
+        env = os.getenv(key, default)
+        if env:
+            env = env.decode(get_filesystem_encoding())
+        return env
+
+    # HACK: The default implementations of os.path.expanduser from cpython do not decode
+    # environment variables with filesystem encoding. We will work around this by
+    # providing adjusted implementations.
+    # The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
+    # for different platforms with correct environment variables decoding.
+
+    if os.name == 'posix':
+        def compat_expanduser(path):
+            """Expand ~ and ~user constructions.  If user or $HOME is unknown,
+            do nothing."""
+            if not path.startswith('~'):
+                return path
+            i = path.find('/', 1)
+            if i < 0:
+                i = len(path)
+            if i == 1:
+                if 'HOME' not in os.environ:
+                    import pwd
+                    userhome = pwd.getpwuid(os.getuid()).pw_dir
+                else:
+                    userhome = compat_getenv('HOME')
+            else:
+                import pwd
+                try:
+                    pwent = pwd.getpwnam(path[1:i])
+                except KeyError:
+                    return path
+                userhome = pwent.pw_dir
+            userhome = userhome.rstrip('/')
+            return (userhome + path[i:]) or '/'
+    elif os.name == 'nt' or os.name == 'ce':
+        def compat_expanduser(path):
+            """Expand ~ and ~user constructs.
+
+            If user or $HOME is unknown, do nothing."""
+            if path[:1] != '~':
+                return path
+            i, n = 1, len(path)
+            while i < n and path[i] not in '/\\':
+                i = i + 1
+
+            if 'HOME' in os.environ:
+                userhome = compat_getenv('HOME')
+            elif 'USERPROFILE' in os.environ:
+                userhome = compat_getenv('USERPROFILE')
+            elif 'HOMEPATH' not in os.environ:
+                return path
+            else:
+                try:
+                    drive = compat_getenv('HOMEDRIVE')
+                except KeyError:
+                    drive = ''
+                userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
+
+            if i != 1:  # ~user
+                userhome = os.path.join(os.path.dirname(userhome), path[1:i])
+
+            return userhome + path[i:]
+    else:
+        compat_expanduser = os.path.expanduser
+
+
+if sys.version_info < (3, 0):
+    def compat_print(s):
+        from .utils import preferredencoding
+        print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
+else:
+    def compat_print(s):
+        assert isinstance(s, compat_str)
+        print(s)
+
+
+try:
+    subprocess_check_output = subprocess.check_output
+except AttributeError:
+    def subprocess_check_output(*args, **kwargs):
+        assert 'input' not in kwargs
+        p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
+        output, _ = p.communicate()
+        ret = p.poll()
+        if ret:
+            raise subprocess.CalledProcessError(ret, p.args, output=output)
+        return output
+
+if sys.version_info < (3, 0) and sys.platform == 'win32':
+    def compat_getpass(prompt, *args, **kwargs):
+        if isinstance(prompt, compat_str):
+            from .utils import preferredencoding
+            prompt = prompt.encode(preferredencoding())
+        return getpass.getpass(prompt, *args, **kwargs)
+else:
+    compat_getpass = getpass.getpass
+
+# Old 2.6 and 2.7 releases require kwargs to be bytes
+try:
+    def _testfunc(x):
+        pass
+    _testfunc(**{'x': 0})
+except TypeError:
+    def compat_kwargs(kwargs):
+        return dict((bytes(k), v) for k, v in kwargs.items())
+else:
+    compat_kwargs = lambda kwargs: kwargs
+
+
+# Fix https://github.com/rg3/youtube-dl/issues/4223
+# See http://bugs.python.org/issue9161 for what is broken
+def workaround_optparse_bug9161():
+    op = optparse.OptionParser()
+    og = optparse.OptionGroup(op, 'foo')
+    try:
+        og.add_option('-t')
+    except TypeError:
+        real_add_option = optparse.OptionGroup.add_option
+
+        def _compat_add_option(self, *args, **kwargs):
+            enc = lambda v: (
+                v.encode('ascii', 'replace') if isinstance(v, compat_str)
+                else v)
+            bargs = [enc(a) for a in args]
+            bkwargs = dict(
+                (k, enc(v)) for k, v in kwargs.items())
+            return real_add_option(self, *bargs, **bkwargs)
+        optparse.OptionGroup.add_option = _compat_add_option
+
+
+__all__ = [
+    'compat_HTTPError',
+    'compat_chr',
+    'compat_cookiejar',
+    'compat_expanduser',
+    'compat_getenv',
+    'compat_getpass',
+    'compat_html_entities',
+    'compat_html_parser',
+    'compat_http_client',
+    'compat_kwargs',
+    'compat_ord',
+    'compat_parse_qs',
+    'compat_print',
+    'compat_str',
+    'compat_subprocess_get_DEVNULL',
+    'compat_urllib_error',
+    'compat_urllib_parse',
+    'compat_urllib_parse_unquote',
+    'compat_urllib_parse_urlparse',
+    'compat_urllib_request',
+    'compat_urlparse',
+    'compat_urlretrieve',
+    'compat_xml_parse_error',
+    'shlex_quote',
+    'subprocess_check_output',
+    'workaround_optparse_bug9161',
+]
index 3f941596edd83edda99917b57187485941133e8f..31e28df58e828f8c5434390ff18a63358215924a 100644 (file)
@@ -30,3 +30,8 @@ def get_suitable_downloader(info_dict):
         return F4mFD
     else:
         return HttpFD
+
+__all__ = [
+    'get_suitable_downloader',
+    'FileDownloader',
+]
index f85f0c94e7544b6eabad3d1f4158f88b9e3c981d..8181bca093c227d1f11b1c2d91d61f026749a731 100644 (file)
@@ -1,10 +1,12 @@
+from __future__ import unicode_literals
+
 import os
 import re
 import sys
 import time
 
+from ..compat import compat_str
 from ..utils import (
-    compat_str,
     encodeFilename,
     format_bytes,
     timeconvert,
@@ -78,8 +80,10 @@ class FileDownloader(object):
     def calc_eta(start, now, total, current):
         if total is None:
             return None
+        if now is None:
+            now = time.time()
         dif = now - start
-        if current == 0 or dif < 0.001: # One millisecond
+        if current == 0 or dif < 0.001:  # One millisecond
             return None
         rate = float(current) / dif
         return int((float(total) - float(current)) / rate)
@@ -93,7 +97,7 @@ class FileDownloader(object):
     @staticmethod
     def calc_speed(start, now, bytes):
         dif = now - start
-        if bytes == 0 or dif < 0.001: # One millisecond
+        if bytes == 0 or dif < 0.001:  # One millisecond
             return None
         return float(bytes) / dif
 
@@ -106,7 +110,7 @@ class FileDownloader(object):
     @staticmethod
     def best_block_size(elapsed_time, bytes):
         new_min = max(bytes / 2.0, 1.0)
-        new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
+        new_max = min(max(bytes * 2.0, 1.0), 4194304)  # Do not surpass 4 MB
         if elapsed_time < 0.001:
             return int(new_max)
         rate = bytes / elapsed_time
@@ -144,29 +148,30 @@ class FileDownloader(object):
     def report_error(self, *args, **kargs):
         self.ydl.report_error(*args, **kargs)
 
-    def slow_down(self, start_time, byte_counter):
+    def slow_down(self, start_time, now, byte_counter):
         """Sleep if the download speed is over the rate limit."""
         rate_limit = self.params.get('ratelimit', None)
         if rate_limit is None or byte_counter == 0:
             return
-        now = time.time()
+        if now is None:
+            now = time.time()
         elapsed = now - start_time
         if elapsed <= 0.0:
             return
         speed = float(byte_counter) / elapsed
         if speed > rate_limit:
-            time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
+            time.sleep(max((byte_counter // rate_limit) - elapsed, 0))
 
     def temp_name(self, filename):
         """Returns a temporary filename for the given filename."""
-        if self.params.get('nopart', False) or filename == u'-' or \
+        if self.params.get('nopart', False) or filename == '-' or \
                 (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
             return filename
-        return filename + u'.part'
+        return filename + '.part'
 
     def undo_temp_name(self, filename):
-        if filename.endswith(u'.part'):
-            return filename[:-len(u'.part')]
+        if filename.endswith('.part'):
+            return filename[:-len('.part')]
         return filename
 
     def try_rename(self, old_filename, new_filename):
@@ -175,7 +180,7 @@ class FileDownloader(object):
                 return
             os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
         except (IOError, OSError) as err:
-            self.report_error(u'unable to rename file: %s' % compat_str(err))
+            self.report_error('unable to rename file: %s' % compat_str(err))
 
     def try_utime(self, filename, last_modified_hdr):
         """Try to set the last-modified time of the given file."""
@@ -200,10 +205,10 @@ class FileDownloader(object):
 
     def report_destination(self, filename):
         """Report destination filename."""
-        self.to_screen(u'[download] Destination: ' + filename)
+        self.to_screen('[download] Destination: ' + filename)
 
     def _report_progress_status(self, msg, is_last_line=False):
-        fullmsg = u'[download] ' + msg
+        fullmsg = '[download] ' + msg
         if self.params.get('progress_with_newline', False):
             self.to_screen(fullmsg)
         else:
@@ -211,13 +216,13 @@ class FileDownloader(object):
                 prev_len = getattr(self, '_report_progress_prev_line_length',
                                    0)
                 if prev_len > len(fullmsg):
-                    fullmsg += u' ' * (prev_len - len(fullmsg))
+                    fullmsg += ' ' * (prev_len - len(fullmsg))
                 self._report_progress_prev_line_length = len(fullmsg)
-                clear_line = u'\r'
+                clear_line = '\r'
             else:
-                clear_line = (u'\r\x1b[K' if sys.stderr.isatty() else u'\r')
+                clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r')
             self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
-        self.to_console_title(u'youtube-dl ' + msg)
+        self.to_console_title('youtube-dl ' + msg)
 
     def report_progress(self, percent, data_len_str, speed, eta):
         """Report download progress."""
@@ -233,7 +238,7 @@ class FileDownloader(object):
             percent_str = 'Unknown %'
         speed_str = self.format_speed(speed)
 
-        msg = (u'%s of %s at %s ETA %s' %
+        msg = ('%s of %s at %s ETA %s' %
                (percent_str, data_len_str, speed_str, eta_str))
         self._report_progress_status(msg)
 
@@ -243,37 +248,37 @@ class FileDownloader(object):
         downloaded_str = format_bytes(downloaded_data_len)
         speed_str = self.format_speed(speed)
         elapsed_str = FileDownloader.format_seconds(elapsed)
-        msg = u'%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str)
+        msg = '%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str)
         self._report_progress_status(msg)
 
     def report_finish(self, data_len_str, tot_time):
         """Report download finished."""
         if self.params.get('noprogress', False):
-            self.to_screen(u'[download] Download completed')
+            self.to_screen('[download] Download completed')
         else:
             self._report_progress_status(
-                (u'100%% of %s in %s' %
+                ('100%% of %s in %s' %
                  (data_len_str, self.format_seconds(tot_time))),
                 is_last_line=True)
 
     def report_resuming_byte(self, resume_len):
         """Report attempt to resume at given byte."""
-        self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
+        self.to_screen('[download] Resuming download at byte %s' % resume_len)
 
     def report_retry(self, count, retries):
         """Report retry in case of HTTP error 5xx"""
-        self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
+        self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
 
     def report_file_already_downloaded(self, file_name):
         """Report file has already been fully downloaded."""
         try:
-            self.to_screen(u'[download] %s has already been downloaded' % file_name)
+            self.to_screen('[download] %s has already been downloaded' % file_name)
         except UnicodeEncodeError:
-            self.to_screen(u'[download] The file has already been downloaded')
+            self.to_screen('[download] The file has already been downloaded')
 
     def report_unable_to_resume(self):
         """Report it was impossible to resume download."""
-        self.to_screen(u'[download] Unable to resume')
+        self.to_screen('[download] Unable to resume')
 
     def download(self, filename, info_dict):
         """Download to a filename using the info from info_dict
@@ -293,7 +298,7 @@ class FileDownloader(object):
 
     def real_download(self, filename, info_dict):
         """Real download process. Redefine in subclasses."""
-        raise NotImplementedError(u'This method must be implemented by subclasses')
+        raise NotImplementedError('This method must be implemented by subclasses')
 
     def _hook_progress(self, status):
         for ph in self._progress_hooks:
index 54dd6ac3fc55439f3a5e4b8740331f1593a5ff5f..ef3e0d5f44122621eb9252f93dd1c5471b3fcfee 100644 (file)
@@ -9,10 +9,12 @@ import xml.etree.ElementTree as etree
 
 from .common import FileDownloader
 from .http import HttpFD
+from ..compat import (
+    compat_urlparse,
+)
 from ..utils import (
     struct_pack,
     struct_unpack,
-    compat_urlparse,
     format_bytes,
     encodeFilename,
     sanitize_open,
@@ -55,7 +57,7 @@ class FlvReader(io.BytesIO):
         if size == 1:
             real_size = self.read_unsigned_long_long()
             header_end = 16
-        return real_size, box_type, self.read(real_size-header_end)
+        return real_size, box_type, self.read(real_size - header_end)
 
     def read_asrt(self):
         # version
@@ -180,7 +182,7 @@ def build_fragments_list(boot_info):
     n_frags = segment_run_entry[1]
     fragment_run_entry_table = boot_info['fragments'][0]['fragments']
     first_frag_number = fragment_run_entry_table[0]['first']
-    for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)):
+    for (i, frag_number) in zip(range(1, n_frags + 1), itertools.count(first_frag_number)):
         res.append((1, frag_number))
     return res
 
@@ -225,14 +227,16 @@ class F4mFD(FileDownloader):
         self.to_screen('[download] Downloading f4m manifest')
         manifest = self.ydl.urlopen(man_url).read()
         self.report_destination(filename)
-        http_dl = HttpQuietDownloader(self.ydl,
+        http_dl = HttpQuietDownloader(
+            self.ydl,
             {
                 'continuedl': True,
                 'quiet': True,
                 'noprogress': True,
                 'ratelimit': self.params.get('ratelimit', None),
                 'test': self.params.get('test', False),
-            })
+            }
+        )
 
         doc = etree.fromstring(manifest)
         formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))]
@@ -245,9 +249,16 @@ class F4mFD(FileDownloader):
                 lambda f: int(f[0]) == requested_bitrate, formats))[0]
 
         base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
-        bootstrap = base64.b64decode(doc.find(_add_ns('bootstrapInfo')).text)
+        bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
+        if bootstrap_node.text is None:
+            bootstrap_url = compat_urlparse.urljoin(
+                base_url, bootstrap_node.attrib['url'])
+            bootstrap = self.ydl.urlopen(bootstrap_url).read()
+        else:
+            bootstrap = base64.b64decode(bootstrap_node.text)
         metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
         boot_info = read_bootstrap_info(bootstrap)
+
         fragments_list = build_fragments_list(boot_info)
         if self.params.get('test', False):
             # We only download the first fragment
@@ -271,7 +282,7 @@ class F4mFD(FileDownloader):
         def frag_progress_hook(status):
             frag_total_bytes = status.get('total_bytes', 0)
             estimated_size = (state['downloaded_bytes'] +
-                (total_frags - state['frag_counter']) * frag_total_bytes)
+                              (total_frags - state['frag_counter']) * frag_total_bytes)
             if status['status'] == 'finished':
                 state['downloaded_bytes'] += frag_total_bytes
                 state['frag_counter'] += 1
@@ -281,13 +292,13 @@ class F4mFD(FileDownloader):
                 frag_downloaded_bytes = status['downloaded_bytes']
                 byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes
                 frag_progress = self.calc_percent(frag_downloaded_bytes,
-                    frag_total_bytes)
+                                                  frag_total_bytes)
                 progress = self.calc_percent(state['frag_counter'], total_frags)
                 progress += frag_progress / float(total_frags)
 
             eta = self.calc_eta(start, time.time(), estimated_size, byte_counter)
             self.report_progress(progress, format_bytes(estimated_size),
-                status.get('speed'), eta)
+                                 status.get('speed'), eta)
         http_dl.add_progress_hook(frag_progress_hook)
 
         frags_filenames = []
index 68eafa403df4ad157feb13174584786205b41bea..5bb0f3cfd19632f126d4f7d7b0df407b608778fc 100644 (file)
@@ -4,10 +4,13 @@ import os
 import re
 import subprocess
 
+from ..postprocessor.ffmpeg import FFmpegPostProcessor
 from .common import FileDownloader
-from ..utils import (
+from ..compat import (
     compat_urlparse,
     compat_urllib_request,
+)
+from ..utils import (
     check_executable,
     encodeFilename,
 )
@@ -28,14 +31,17 @@ class HlsFD(FileDownloader):
             if check_executable(program, ['-version']):
                 break
         else:
-            self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
+            self.report_error('m3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
             return False
         cmd = [program] + args
 
+        ffpp = FFmpegPostProcessor(downloader=self)
+        ffpp.check_version()
+
         retval = subprocess.call(cmd)
         if retval == 0:
             fsize = os.path.getsize(encodeFilename(tmpfilename))
-            self.to_screen(u'\r[%s] %s bytes' % (cmd[0], fsize))
+            self.to_screen('\r[%s] %s bytes' % (cmd[0], fsize))
             self.try_rename(tmpfilename, filename)
             self._hook_progress({
                 'downloaded_bytes': fsize,
@@ -45,8 +51,8 @@ class HlsFD(FileDownloader):
             })
             return True
         else:
-            self.to_stderr(u"\n")
-            self.report_error(u'%s exited with code %d' % (program, retval))
+            self.to_stderr('\n')
+            self.report_error('%s exited with code %d' % (program, retval))
             return False
 
 
@@ -101,4 +107,3 @@ class NativeHlsFD(FileDownloader):
         })
         self.try_rename(tmpfilename, filename)
         return True
-
index f62555ce0e33353f5eac848e3956b263f9d43bcb..e68f20c9f46a93ebfeca2ff47dc0843f4ab94874 100644 (file)
@@ -1,12 +1,15 @@
+from __future__ import unicode_literals
+
 import os
 import time
 
 from .common import FileDownloader
-from ..utils import (
+from ..compat import (
     compat_urllib_request,
     compat_urllib_error,
+)
+from ..utils import (
     ContentTooShortError,
-
     encodeFilename,
     sanitize_open,
     format_bytes,
@@ -106,7 +109,7 @@ class HttpFD(FileDownloader):
                 self.report_retry(count, retries)
 
         if count > retries:
-            self.report_error(u'giving up after %s retries' % retries)
+            self.report_error('giving up after %s retries' % retries)
             return False
 
         data_len = data.info().get('Content-length', None)
@@ -124,26 +127,31 @@ class HttpFD(FileDownloader):
             min_data_len = self.params.get("min_filesize", None)
             max_data_len = self.params.get("max_filesize", None)
             if min_data_len is not None and data_len < min_data_len:
-                self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
+                self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
                 return False
             if max_data_len is not None and data_len > max_data_len:
-                self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
+                self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
                 return False
 
         data_len_str = format_bytes(data_len)
         byte_counter = 0 + resume_len
         block_size = self.params.get('buffersize', 1024)
         start = time.time()
+
+        # measure time over whole while-loop, so slow_down() and best_block_size() work together properly
+        now = None  # needed for slow_down() in the first loop run
+        before = start  # start measuring
         while True:
+
             # Download and write
-            before = time.time()
             data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
-            after = time.time()
+            byte_counter += len(data_block)
+
+            # exit loop when download is finished
             if len(data_block) == 0:
                 break
-            byte_counter += len(data_block)
 
-            # Open file just in time
+            # Open destination file just in time
             if stream is None:
                 try:
                     (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
@@ -151,19 +159,30 @@ class HttpFD(FileDownloader):
                     filename = self.undo_temp_name(tmpfilename)
                     self.report_destination(filename)
                 except (OSError, IOError) as err:
-                    self.report_error(u'unable to open for writing: %s' % str(err))
+                    self.report_error('unable to open for writing: %s' % str(err))
                     return False
             try:
                 stream.write(data_block)
             except (IOError, OSError) as err:
-                self.to_stderr(u"\n")
-                self.report_error(u'unable to write data: %s' % str(err))
+                self.to_stderr('\n')
+                self.report_error('unable to write data: %s' % str(err))
                 return False
+
+            # Apply rate limit
+            self.slow_down(start, now, byte_counter - resume_len)
+
+            # end measuring of one loop run
+            now = time.time()
+            after = now
+
+            # Adjust block size
             if not self.params.get('noresizebuffer', False):
                 block_size = self.best_block_size(after - before, len(data_block))
 
+            before = after
+
             # Progress message
-            speed = self.calc_speed(start, time.time(), byte_counter - resume_len)
+            speed = self.calc_speed(start, now, byte_counter - resume_len)
             if data_len is None:
                 eta = percent = None
             else:
@@ -184,14 +203,11 @@ class HttpFD(FileDownloader):
             if is_test and byte_counter == data_len:
                 break
 
-            # Apply rate limit
-            self.slow_down(start, byte_counter - resume_len)
-
         if stream is None:
-            self.to_stderr(u"\n")
-            self.report_error(u'Did not get any data blocks')
+            self.to_stderr('\n')
+            self.report_error('Did not get any data blocks')
             return False
-        if tmpfilename != u'-':
+        if tmpfilename != '-':
             stream.close()
         self.report_finish(data_len_str, (time.time() - start))
         if data_len is not None and byte_counter != data_len:
index 4de7f15f4ee3dd11b51afbb9c0e1e221b12d0784..c53195da0c9471d55a61b53b1041e05ee209697e 100644 (file)
@@ -1,7 +1,10 @@
+from __future__ import unicode_literals
+
 import os
 import subprocess
 
 from .common import FileDownloader
+from ..compat import compat_subprocess_get_DEVNULL
 from ..utils import (
     encodeFilename,
 )
@@ -13,19 +16,23 @@ class MplayerFD(FileDownloader):
         self.report_destination(filename)
         tmpfilename = self.temp_name(filename)
 
-        args = ['mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', '-dumpstream', '-dumpfile', tmpfilename, url]
+        args = [
+            'mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy',
+            '-dumpstream', '-dumpfile', tmpfilename, url]
         # Check for mplayer first
         try:
-            subprocess.call(['mplayer', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
+            subprocess.call(
+                ['mplayer', '-h'],
+                stdout=compat_subprocess_get_DEVNULL(), stderr=subprocess.STDOUT)
         except (OSError, IOError):
-            self.report_error(u'MMS or RTSP download detected but "%s" could not be run' % args[0])
+            self.report_error('MMS or RTSP download detected but "%s" could not be run' % args[0])
             return False
 
         # Download using mplayer.
         retval = subprocess.call(args)
         if retval == 0:
             fsize = os.path.getsize(encodeFilename(tmpfilename))
-            self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize))
+            self.to_screen('\r[%s] %s bytes' % (args[0], fsize))
             self.try_rename(tmpfilename, filename)
             self._hook_progress({
                 'downloaded_bytes': fsize,
@@ -35,6 +42,6 @@ class MplayerFD(FileDownloader):
             })
             return True
         else:
-            self.to_stderr(u"\n")
-            self.report_error(u'mplayer exited with code %d' % retval)
+            self.to_stderr('\n')
+            self.report_error('mplayer exited with code %d' % retval)
             return False
index 5eb108302339ec1678458fd8572c219a0980200b..5759126751c3628da461136c4439f7e9985027d2 100644 (file)
@@ -7,14 +7,20 @@ import sys
 import time
 
 from .common import FileDownloader
+from ..compat import compat_str
 from ..utils import (
     check_executable,
-    compat_str,
     encodeFilename,
     format_bytes,
+    get_exe_version,
 )
 
 
+def rtmpdump_version():
+    return get_exe_version(
+        'rtmpdump', ['--help'], r'(?i)RTMPDump\s*v?([0-9a-zA-Z._-]+)')
+
+
 class RtmpFD(FileDownloader):
     def real_download(self, filename, info_dict):
         def run_rtmpdump(args):
@@ -40,13 +46,13 @@ class RtmpFD(FileDownloader):
                     continue
                 mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
                 if mobj:
-                    downloaded_data_len = int(float(mobj.group(1))*1024)
+                    downloaded_data_len = int(float(mobj.group(1)) * 1024)
                     percent = float(mobj.group(2))
                     if not resume_percent:
                         resume_percent = percent
                         resume_downloaded_data_len = downloaded_data_len
-                    eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent)
-                    speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len)
+                    eta = self.calc_eta(start, time.time(), 100 - resume_percent, percent - resume_percent)
+                    speed = self.calc_speed(start, time.time(), downloaded_data_len - resume_downloaded_data_len)
                     data_len = None
                     if percent > 0:
                         data_len = int(downloaded_data_len * 100 / percent)
@@ -66,7 +72,7 @@ class RtmpFD(FileDownloader):
                     # no percent for live streams
                     mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
                     if mobj:
-                        downloaded_data_len = int(float(mobj.group(1))*1024)
+                        downloaded_data_len = int(float(mobj.group(1)) * 1024)
                         time_now = time.time()
                         speed = self.calc_speed(start, time_now, downloaded_data_len)
                         self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
@@ -82,7 +88,7 @@ class RtmpFD(FileDownloader):
                         if not cursor_in_new_line:
                             self.to_screen('')
                         cursor_in_new_line = True
-                        self.to_screen('[rtmpdump] '+line)
+                        self.to_screen('[rtmpdump] ' + line)
             proc.wait()
             if not cursor_in_new_line:
                 self.to_screen('')
@@ -174,7 +180,7 @@ class RtmpFD(FileDownloader):
         while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live:
             prevsize = os.path.getsize(encodeFilename(tmpfilename))
             self.to_screen('[rtmpdump] %s bytes' % prevsize)
-            time.sleep(5.0) # This seems to be needed
+            time.sleep(5.0)  # This seems to be needed
             retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == RD_FAILED])
             cursize = os.path.getsize(encodeFilename(tmpfilename))
             if prevsize == cursize and retval == RD_FAILED:
index dd770fdf13342e4499e42a6d74a63c0d06bca8ad..119ec2044efca360652008105e27772b4fc13033 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 from .abc import ABCIE
 from .academicearth import AcademicEarthCourseIE
 from .addanime import AddAnimeIE
@@ -20,19 +22,25 @@ from .arte import (
     ArteTVDDCIE,
     ArteTVEmbedIE,
 )
+from .audiomack import AudiomackIE
 from .auengine import AUEngineIE
+from .azubu import AzubuIE
 from .bambuser import BambuserIE, BambuserChannelIE
 from .bandcamp import BandcampIE, BandcampAlbumIE
 from .bbccouk import BBCCoUkIE
 from .beeg import BeegIE
 from .behindkink import BehindKinkIE
+from .bet import BetIE
+from .bild import BildIE
 from .bilibili import BiliBiliIE
 from .blinkx import BlinkxIE
 from .bliptv import BlipTVIE, BlipTVUserIE
 from .bloomberg import BloombergIE
+from .bpb import BpbIE
 from .br import BRIE
 from .breakcom import BreakIE
 from .brightcove import BrightcoveIE
+from .buzzfeed import BuzzFeedIE
 from .byutv import BYUtvIE
 from .c56 import C56IE
 from .canal13cl import Canal13clIE
@@ -43,7 +51,7 @@ from .cbsnews import CBSNewsIE
 from .ceskatelevize import CeskaTelevizeIE
 from .channel9 import Channel9IE
 from .chilloutzone import ChilloutzoneIE
-from .cinemassacre import CinemassacreIE
+from .cinchcast import CinchcastIE
 from .clipfish import ClipfishIE
 from .cliphunter import CliphunterIE
 from .clipsyndicate import ClipsyndicateIE
@@ -57,12 +65,15 @@ from .cnn import (
 )
 from .collegehumor import CollegeHumorIE
 from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
+from .comcarcoff import ComCarCoffIE
 from .condenast import CondeNastIE
 from .cracked import CrackedIE
 from .criterion import CriterionIE
-from .crunchyroll import CrunchyrollIE
+from .crunchyroll import (
+    CrunchyrollIE,
+    CrunchyrollShowPlaylistIE
+)
 from .cspan import CSpanIE
-from .d8 import D8IE
 from .dailymotion import (
     DailymotionIE,
     DailymotionPlaylistIE,
@@ -111,7 +122,10 @@ from .fktv import (
     FKTVPosteckeIE,
 )
 from .flickr import FlickrIE
+from .folketinget import FolketingetIE
 from .fourtube import FourTubeIE
+from .foxgay import FoxgayIE
+from .foxnews import FoxNewsIE
 from .franceculture import FranceCultureIE
 from .franceinter import FranceInterIE
 from .francetv import (
@@ -123,6 +137,7 @@ from .francetv import (
 )
 from .freesound import FreesoundIE
 from .freespeech import FreespeechIE
+from .freevideo import FreeVideoIE
 from .funnyordie import FunnyOrDieIE
 from .gamekings import GamekingsIE
 from .gameone import (
@@ -134,14 +149,18 @@ from .gamestar import GameStarIE
 from .gametrailers import GametrailersIE
 from .gdcvault import GDCVaultIE
 from .generic import GenericIE
+from .giantbomb import GiantBombIE
+from .glide import GlideIE
 from .globo import GloboIE
 from .godtube import GodTubeIE
+from .goldenmoustache import GoldenMoustacheIE
 from .golem import GolemIE
 from .googleplus import GooglePlusIE
 from .googlesearch import GoogleSearchIE
 from .gorillavid import GorillaVidIE
 from .goshgay import GoshgayIE
 from .grooveshark import GroovesharkIE
+from .groupon import GrouponIE
 from .hark import HarkIE
 from .heise import HeiseIE
 from .helsinki import HelsinkiIE
@@ -173,7 +192,6 @@ from .jadorecettepub import JadoreCettePubIE
 from .jeuxvideo import JeuxVideoIE
 from .jove import JoveIE
 from .jukebox import JukeboxIE
-from .justintv import JustinTVIE
 from .jpopsukitv import JpopsukiIE
 from .kankan import KankanIE
 from .keezmovies import KeezMoviesIE
@@ -184,6 +202,7 @@ from .kontrtube import KontrTubeIE
 from .krasview import KrasViewIE
 from .ku6 import Ku6IE
 from .la7 import LA7IE
+from .laola1tv import Laola1TvIE
 from .lifenews import LifeNewsIE
 from .liveleak import LiveLeakIE
 from .livestream import (
@@ -204,6 +223,7 @@ from .mdr import MDRIE
 from .metacafe import MetacafeIE
 from .metacritic import MetacriticIE
 from .mgoon import MgoonIE
+from .minhateca import MinhatecaIE
 from .ministrygrid import MinistryGridIE
 from .mit import TechTVMITIE, MITIE, OCWMITIE
 from .mitele import MiTeleIE
@@ -230,9 +250,10 @@ from .muenchentv import MuenchenTVIE
 from .musicplayon import MusicPlayOnIE
 from .musicvault import MusicVaultIE
 from .muzu import MuzuTVIE
-from .myspace import MySpaceIE
+from .myspace import MySpaceIE, MySpaceAlbumIE
 from .myspass import MySpassIE
 from .myvideo import MyVideoIE
+from .myvidster import MyVidsterIE
 from .naver import NaverIE
 from .nba import NBAIE
 from .nbc import (
@@ -246,7 +267,7 @@ from .newstube import NewstubeIE
 from .nfb import NFBIE
 from .nfl import NFLIE
 from .nhl import NHLIE, NHLVideocenterIE
-from .niconico import NiconicoIE
+from .niconico import NiconicoIE, NiconicoPlaylistIE
 from .ninegag import NineGagIE
 from .noco import NocoIE
 from .normalboots import NormalbootsIE
@@ -275,6 +296,7 @@ from .orf import (
 from .parliamentliveuk import ParliamentLiveUKIE
 from .patreon import PatreonIE
 from .pbs import PBSIE
+from .phoenix import PhoenixIE
 from .photobucket import PhotobucketIE
 from .planetaplay import PlanetaPlayIE
 from .played import PlayedIE
@@ -288,6 +310,8 @@ from .pornoxo import PornoXOIE
 from .promptfile import PromptFileIE
 from .prosiebensat1 import ProSiebenSat1IE
 from .pyvideo import PyvideoIE
+from .quickvid import QuickVidIE
+from .radiode import RadioDeIE
 from .radiofrance import RadioFranceIE
 from .rai import RaiIE
 from .rbmaradio import RBMARadioIE
@@ -300,6 +324,7 @@ from .roxwel import RoxwelIE
 from .rtbf import RTBFIE
 from .rtlnl import RtlXlIE
 from .rtlnow import RTLnowIE
+from .rtp import RTPIE
 from .rts import RTSIE
 from .rtve import RTVEALaCartaIE, RTVELiveIE
 from .ruhd import RUHDIE
@@ -315,7 +340,10 @@ from .savefrom import SaveFromIE
 from .sbs import SBSIE
 from .scivee import SciVeeIE
 from .screencast import ScreencastIE
+from .screenwavemedia import CinemassacreIE, ScreenwaveMediaIE, TeamFourIE
 from .servingsys import ServingSysIE
+from .sexu import SexuIE
+from .sexykarma import SexyKarmaIE
 from .shared import SharedIE
 from .sharesix import ShareSixIE
 from .sina import SinaIE
@@ -349,6 +377,7 @@ from .spike import SpikeIE
 from .sport5 import Sport5IE
 from .sportbox import SportBoxIE
 from .sportdeutschland import SportDeutschlandIE
+from .srmediathek import SRMediathekIE
 from .stanfordoc import StanfordOpenClassroomIE
 from .steam import SteamIE
 from .streamcloud import StreamcloudIE
@@ -359,6 +388,7 @@ from .syfy import SyfyIE
 from .sztvhu import SztvHuIE
 from .tagesschau import TagesschauIE
 from .tapely import TapelyIE
+from .tass import TassIE
 from .teachertube import (
     TeacherTubeIE,
     TeacherTubeUserIE,
@@ -367,15 +397,19 @@ from .teachingchannel import TeachingChannelIE
 from .teamcoco import TeamcocoIE
 from .techtalks import TechTalksIE
 from .ted import TEDIE
+from .telebruxelles import TeleBruxellesIE
+from .telecinco import TelecincoIE
 from .telemb import TeleMBIE
 from .tenplay import TenPlayIE
 from .testurl import TestURLIE
 from .tf1 import TF1IE
+from .theonion import TheOnionIE
 from .theplatform import ThePlatformIE
 from .thesixtyone import TheSixtyOneIE
 from .thisav import ThisAVIE
 from .tinypic import TinyPicIE
 from .tlc import TlcIE, TlcDeIE
+from .tmz import TMZIE
 from .tnaflix import TNAFlixIE
 from .thvideo import (
     THVideoIE,
@@ -389,11 +423,14 @@ from .trutube import TruTubeIE
 from .tube8 import Tube8IE
 from .tudou import TudouIE
 from .tumblr import TumblrIE
+from .tunein import TuneInIE
 from .turbo import TurboIE
 from .tutv import TutvIE
 from .tvigle import TvigleIE
 from .tvp import TvpIE
 from .tvplay import TVPlayIE
+from .twentyfourvideo import TwentyFourVideoIE
+from .twitch import TwitchIE
 from .ubu import UbuIE
 from .udemy import (
     UdemyIE,
@@ -409,6 +446,7 @@ from .vesti import VestiIE
 from .vevo import VevoIE
 from .vgtv import VGTVIE
 from .vh1 import VH1IE
+from .vice import ViceIE
 from .viddler import ViddlerIE
 from .videobam import VideoBamIE
 from .videodetective import VideoDetectiveIE
@@ -419,6 +457,7 @@ from .videopremium import VideoPremiumIE
 from .videott import VideoTtIE
 from .videoweed import VideoWeedIE
 from .vidme import VidmeIE
+from .vidzi import VidziIE
 from .vimeo import (
     VimeoIE,
     VimeoAlbumIE,
@@ -435,9 +474,13 @@ from .vine import (
     VineUserIE,
 )
 from .viki import VikiIE
-from .vk import VKIE
+from .vk import (
+    VKIE,
+    VKUserVideosIE,
+)
 from .vodlocker import VodlockerIE
 from .vporn import VpornIE
+from .vrt import VRTIE
 from .vube import VubeIE
 from .vuclip import VuClipIE
 from .vulture import VultureIE
@@ -458,6 +501,7 @@ from .wrzuta import WrzutaIE
 from .xbef import XBefIE
 from .xboxclips import XboxClipsIE
 from .xhamster import XHamsterIE
+from .xminus import XMinusIE
 from .xnxx import XNXXIE
 from .xvideos import XVideosIE
 from .xtube import XTubeUserIE, XTubeIE
@@ -487,9 +531,11 @@ from .youtube import (
     YoutubeUserIE,
     YoutubeWatchLaterIE,
 )
-
-from .zdf import ZDFIE
-
+from .zdf import ZDFIE, ZDFChannelIE
+from .zingmp3 import (
+    ZingMp3SongIE,
+    ZingMp3AlbumIE,
+)
 
 _ALL_CLASSES = [
     klass
@@ -508,4 +554,4 @@ def gen_extractors():
 
 def get_info_extractor(ie_name):
     """Returns the info extractor class with the given ie_name"""
-    return globals()[ie_name+'IE']
+    return globals()[ie_name + 'IE']
index 69f89320ce7e30a5fdf65acff01df1668620334f..dc0fb85d6048962505d1d207ae590940d69f52e6 100644 (file)
@@ -11,13 +11,13 @@ class ABCIE(InfoExtractor):
     _VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P<id>\d+)'
 
     _TEST = {
-        'url': 'http://www.abc.net.au/news/2014-07-25/bringing-asylum-seekers-to-australia-would-give/5624716',
-        'md5': 'dad6f8ad011a70d9ddf887ce6d5d0742',
+        'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
+        'md5': 'cb3dd03b18455a661071ee1e28344d9f',
         'info_dict': {
-            'id': '5624716',
+            'id': '5868334',
             'ext': 'mp4',
-            'title': 'Bringing asylum seekers to Australia would give them right to asylum claims: professor',
-            'description': 'md5:ba36fa5e27e5c9251fd929d339aea4af',
+            'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
+            'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
         },
     }
 
index c983ef0f519c303f880f471d357db4b60657ef17..47313fba8735902f964c0cd39992f9962e0f47fb 100644 (file)
@@ -1,4 +1,5 @@
 from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
@@ -18,15 +19,14 @@ class AcademicEarthCourseIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        playlist_id = m.group('id')
+        playlist_id = self._match_id(url)
 
         webpage = self._download_webpage(url, playlist_id)
         title = self._html_search_regex(
-            r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, u'title')
+            r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, 'title')
         description = self._html_search_regex(
             r'<p class="excerpt"[^>]*?>(.*?)</p>',
-            webpage, u'description', fatal=False)
+            webpage, 'description', fatal=False)
         urls = re.findall(
             r'<li class="lecture-preview">\s*?<a target="_blank" href="([^"]+)">',
             webpage)
index fcf296057cc807edbdca5ca1effbc9ad50153400..203936e54a3797ae37535022ad02757a925f24d7 100644 (file)
@@ -3,19 +3,19 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_HTTPError,
     compat_str,
     compat_urllib_parse,
     compat_urllib_parse_urlparse,
-
+)
+from ..utils import (
     ExtractorError,
 )
 
 
 class AddAnimeIE(InfoExtractor):
-
-    _VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video\.php\?(?:.*?)v=(?P<video_id>[\w_]+)(?:.*)'
+    _VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video\.php\?(?:.*?)v=(?P<id>[\w_]+)(?:.*)'
     _TEST = {
         'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
         'md5': '72954ea10bc979ab5e2eb288b21425a0',
@@ -28,9 +28,9 @@ class AddAnimeIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
+        video_id = self._match_id(url)
+
         try:
-            mobj = re.match(self._VALID_URL, url)
-            video_id = mobj.group('video_id')
             webpage = self._download_webpage(url, video_id)
         except ExtractorError as ee:
             if not isinstance(ee.cause, compat_HTTPError) or \
@@ -48,7 +48,7 @@ class AddAnimeIE(InfoExtractor):
                 r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);',
                 redir_webpage)
             if av is None:
-                raise ExtractorError(u'Cannot find redirect math task')
+                raise ExtractorError('Cannot find redirect math task')
             av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3))
 
             parsed_url = compat_urllib_parse_urlparse(url)
index b4b40f2d4f21432f6b12a883513ae00827af00e5..39e4ca296f97a8fe20e65ab7160f9931fee89a67 100644 (file)
 from __future__ import unicode_literals
 
 import re
+import json
 
 from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+)
+
 
 class AdultSwimIE(InfoExtractor):
-    _VALID_URL = r'https?://video\.adultswim\.com/(?P<path>.+?)(?:\.html)?(?:\?.*)?(?:#.*)?$'
-    _TEST = {
-        'url': 'http://video.adultswim.com/rick-and-morty/close-rick-counters-of-the-rick-kind.html?x=y#title',
+    _VALID_URL = r'https?://(?:www\.)?adultswim\.com/videos/(?P<is_playlist>playlists/)?(?P<show_path>[^/]+)/(?P<episode_path>[^/?#]+)/?'
+
+    _TESTS = [{
+        'url': 'http://adultswim.com/videos/rick-and-morty/pilot',
         'playlist': [
             {
-                'md5': '4da359ec73b58df4575cd01a610ba5dc',
+                'md5': '247572debc75c7652f253c8daa51a14d',
                 'info_dict': {
-                    'id': '8a250ba1450996e901453d7f02ca02f5',
+                    'id': 'rQxZvXQ4ROaSOqq-or2Mow-0',
                     'ext': 'flv',
-                    'title': 'Rick and Morty Close Rick-Counters of the Rick Kind part 1',
-                    'description': 'Rick has a run in with some old associates, resulting in a fallout with Morty. You got any chips, broh?',
-                    'uploader': 'Rick and Morty',
-                    'thumbnail': 'http://i.cdn.turner.com/asfix/repository/8a250ba13f865824013fc9db8b6b0400/thumbnail_267549017116827057.jpg'
-                }
+                    'title': 'Rick and Morty - Pilot Part 1',
+                    'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
+                },
             },
             {
-                'md5': 'ffbdf55af9331c509d95350bd0cc1819',
+                'md5': '77b0e037a4b20ec6b98671c4c379f48d',
                 'info_dict': {
-                    'id': '8a250ba1450996e901453d7f4bd102f6',
+                    'id': 'rQxZvXQ4ROaSOqq-or2Mow-3',
                     'ext': 'flv',
-                    'title': 'Rick and Morty Close Rick-Counters of the Rick Kind part 2',
-                    'description': 'Rick has a run in with some old associates, resulting in a fallout with Morty. You got any chips, broh?',
-                    'uploader': 'Rick and Morty',
-                    'thumbnail': 'http://i.cdn.turner.com/asfix/repository/8a250ba13f865824013fc9db8b6b0400/thumbnail_267549017116827057.jpg'
-                }
-            },
-            {
-                'md5': 'b92409635540304280b4b6c36bd14a0a',
-                'info_dict': {
-                    'id': '8a250ba1450996e901453d7fa73c02f7',
-                    'ext': 'flv',
-                    'title': 'Rick and Morty Close Rick-Counters of the Rick Kind part 3',
-                    'description': 'Rick has a run in with some old associates, resulting in a fallout with Morty. You got any chips, broh?',
-                    'uploader': 'Rick and Morty',
-                    'thumbnail': 'http://i.cdn.turner.com/asfix/repository/8a250ba13f865824013fc9db8b6b0400/thumbnail_267549017116827057.jpg'
-                }
+                    'title': 'Rick and Morty - Pilot Part 4',
+                    'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
+                },
             },
+        ],
+        'info_dict': {
+            'title': 'Rick and Morty - Pilot',
+            'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
+        }
+    }, {
+        'url': 'http://www.adultswim.com/videos/playlists/american-parenting/putting-francine-out-of-business/',
+        'playlist': [
             {
-                'md5': 'e8818891d60e47b29cd89d7b0278156d',
+                'md5': '2eb5c06d0f9a1539da3718d897f13ec5',
                 'info_dict': {
-                    'id': '8a250ba1450996e901453d7fc8ba02f8',
+                    'id': '-t8CamQlQ2aYZ49ItZCFog-0',
                     'ext': 'flv',
-                    'title': 'Rick and Morty Close Rick-Counters of the Rick Kind part 4',
-                    'description': 'Rick has a run in with some old associates, resulting in a fallout with Morty. You got any chips, broh?',
-                    'uploader': 'Rick and Morty',
-                    'thumbnail': 'http://i.cdn.turner.com/asfix/repository/8a250ba13f865824013fc9db8b6b0400/thumbnail_267549017116827057.jpg'
-                }
+                    'title': 'American Dad - Putting Francine Out of Business',
+                    'description': 'Stan hatches a plan to get Francine out of the real estate business.Watch more American Dad on [adult swim].'
+                },
             }
-        ]
-    }
-
-    _video_extensions = {
-        '3500': 'flv',
-        '640': 'mp4',
-        '150': 'mp4',
-        'ipad': 'm3u8',
-        'iphone': 'm3u8'
-    }
-    _video_dimensions = {
-        '3500': (1280, 720),
-        '640': (480, 270),
-        '150': (320, 180)
-    }
+        ],
+        'info_dict': {
+            'title': 'American Dad - Putting Francine Out of Business',
+            'description': 'Stan hatches a plan to get Francine out of the real estate business.Watch more American Dad on [adult swim].'
+        },
+    }]
+
+    @staticmethod
+    def find_video_info(collection, slug):
+        for video in collection.get('videos'):
+            if video.get('slug') == slug:
+                return video
+
+    @staticmethod
+    def find_collection_by_linkURL(collections, linkURL):
+        for collection in collections:
+            if collection.get('linkURL') == linkURL:
+                return collection
+
+    @staticmethod
+    def find_collection_containing_video(collections, slug):
+        for collection in collections:
+            for video in collection.get('videos'):
+                if video.get('slug') == slug:
+                    return collection, video
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        video_path = mobj.group('path')
-
-        webpage = self._download_webpage(url, video_path)
-        episode_id = self._html_search_regex(
-            r'<link rel="video_src" href="http://i\.adultswim\.com/adultswim/adultswimtv/tools/swf/viralplayer.swf\?id=([0-9a-f]+?)"\s*/?\s*>',
-            webpage, 'episode_id')
-        title = self._og_search_title(webpage)
-
-        index_url = 'http://asfix.adultswim.com/asfix-svc/episodeSearch/getEpisodesByIDs?networkName=AS&ids=%s' % episode_id
-        idoc = self._download_xml(index_url, title, 'Downloading episode index', 'Unable to download episode index')
-
-        episode_el = idoc.find('.//episode')
-        show_title = episode_el.attrib.get('collectionTitle')
-        episode_title = episode_el.attrib.get('title')
-        thumbnail = episode_el.attrib.get('thumbnailUrl')
-        description = episode_el.find('./description').text.strip()
+        show_path = mobj.group('show_path')
+        episode_path = mobj.group('episode_path')
+        is_playlist = True if mobj.group('is_playlist') else False
+
+        webpage = self._download_webpage(url, episode_path)
+
+        # Extract the value of `bootstrappedData` from the Javascript in the page.
+        bootstrappedDataJS = self._search_regex(r'var bootstrappedData = ({.*});', webpage, episode_path)
+
+        try:
+            bootstrappedData = json.loads(bootstrappedDataJS)
+        except ValueError as ve:
+            errmsg = '%s: Failed to parse JSON ' % episode_path
+            raise ExtractorError(errmsg, cause=ve)
+
+        # Downloading videos from a /videos/playlist/ URL needs to be handled differently.
+        # NOTE: We are only downloading one video (the current one) not the playlist
+        if is_playlist:
+            collections = bootstrappedData['playlists']['collections']
+            collection = self.find_collection_by_linkURL(collections, show_path)
+            video_info = self.find_video_info(collection, episode_path)
+
+            show_title = video_info['showTitle']
+            segment_ids = [video_info['videoPlaybackID']]
+        else:
+            collections = bootstrappedData['show']['collections']
+            collection, video_info = self.find_collection_containing_video(collections, episode_path)
+
+            show = bootstrappedData['show']
+            show_title = show['title']
+            segment_ids = [clip['videoPlaybackID'] for clip in video_info['clips']]
+
+        episode_id = video_info['id']
+        episode_title = video_info['title']
+        episode_description = video_info['description']
+        episode_duration = video_info.get('duration')
 
         entries = []
-        segment_els = episode_el.findall('./segments/segment')
+        for part_num, segment_id in enumerate(segment_ids):
+            segment_url = 'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=mobile' % segment_id
 
-        for part_num, segment_el in enumerate(segment_els):
-            segment_id = segment_el.attrib.get('id')
-            segment_title = '%s %s part %d' % (show_title, episode_title, part_num + 1)
-            thumbnail = segment_el.attrib.get('thumbnailUrl')
-            duration = segment_el.attrib.get('duration')
+            segment_title = '%s - %s' % (show_title, episode_title)
+            if len(segment_ids) > 1:
+                segment_title += ' Part %d' % (part_num + 1)
 
-            segment_url = 'http://asfix.adultswim.com/asfix-svc/episodeservices/getCvpPlaylist?networkName=AS&id=%s' % segment_id
             idoc = self._download_xml(
                 segment_url, segment_title,
                 'Downloading segment information', 'Unable to download segment information')
 
+            segment_duration = idoc.find('.//trt').text.strip()
+
             formats = []
             file_els = idoc.findall('.//files/file')
 
             for file_el in file_els:
                 bitrate = file_el.attrib.get('bitrate')
-                type = file_el.attrib.get('type')
-                width, height = self._video_dimensions.get(bitrate, (None, None))
+                ftype = file_el.attrib.get('type')
+
                 formats.append({
-                    'format_id': '%s-%s' % (bitrate, type),
-                    'url': file_el.text,
-                    'ext': self._video_extensions.get(bitrate, 'mp4'),
+                    'format_id': '%s_%s' % (bitrate, ftype),
+                    'url': file_el.text.strip(),
                     # The bitrate may not be a number (for example: 'iphone')
                     'tbr': int(bitrate) if bitrate.isdigit() else None,
-                    'height': height,
-                    'width': width
+                    'quality': 1 if ftype == 'hd' else -1
                 })
 
             self._sort_formats(formats)
@@ -126,18 +151,16 @@ class AdultSwimIE(InfoExtractor):
                 'id': segment_id,
                 'title': segment_title,
                 'formats': formats,
-                'uploader': show_title,
-                'thumbnail': thumbnail,
-                'duration': duration,
-                'description': description
+                'duration': segment_duration,
+                'description': episode_description
             })
 
         return {
             '_type': 'playlist',
             'id': episode_id,
-            'display_id': video_path,
+            'display_id': episode_path,
             'entries': entries,
-            'title': '%s %s' % (show_title, episode_title),
-            'description': description,
-            'thumbnail': thumbnail
+            'title': '%s %s' % (show_title, episode_title),
+            'description': episode_description,
+            'duration': episode_duration
         }
index 7bd7978841d06747145feeda56624de84747fcc1..623aeaf3490c30f6fb2a26753a0e1cace3e4a560 100644 (file)
@@ -5,10 +5,9 @@ import re
 import json
 
 from .common import InfoExtractor
+from ..compat import compat_str
 from ..utils import (
-    compat_str,
     qualities,
-    determine_ext,
 )
 
 
@@ -22,7 +21,7 @@ class AllocineIE(InfoExtractor):
             'id': '19546517',
             'ext': 'mp4',
             'title': 'Astérix - Le Domaine des Dieux Teaser VF',
-            'description': 'md5:4a754271d9c6f16c72629a8a993ee884',
+            'description': 'md5:abcd09ce503c6560512c14ebfdb720d2',
             'thumbnail': 're:http://.*\.jpg',
         },
     }, {
@@ -75,9 +74,7 @@ class AllocineIE(InfoExtractor):
                     'format_id': format_id,
                     'quality': quality(format_id),
                     'url': v,
-                    'ext': determine_ext(v),
                 })
-
         self._sort_formats(formats)
 
         return {
index 47f8e415777ee21bfa5e001921077f3c9aaa16af..b51eafc45928f8e6ff4ce571763593f71b715583 100644 (file)
@@ -3,7 +3,6 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from .fivemin import FiveMinIE
 
 
 class AolIE(InfoExtractor):
@@ -42,31 +41,30 @@ class AolIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
-
         playlist_id = mobj.group('playlist_id')
-        if playlist_id and not self._downloader.params.get('noplaylist'):
-            self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
+        if not playlist_id or self._downloader.params.get('noplaylist'):
+            return self.url_result('5min:%s' % video_id)
 
-            webpage = self._download_webpage(url, playlist_id)
-            title = self._html_search_regex(
-                r'<h1 class="video-title[^"]*">(.+?)</h1>', webpage, 'title')
-            playlist_html = self._search_regex(
-                r"(?s)<ul\s+class='video-related[^']*'>(.*?)</ul>", webpage,
-                'playlist HTML')
-            entries = [{
-                '_type': 'url',
-                'url': 'aol-video:%s' % m.group('id'),
-                'ie_key': 'Aol',
-            } for m in re.finditer(
-                r"<a\s+href='.*videoid=(?P<id>[0-9]+)'\s+class='video-thumb'>",
-                playlist_html)]
+        self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
 
-            return {
-                '_type': 'playlist',
-                'id': playlist_id,
-                'display_id': mobj.group('playlist_display_id'),
-                'title': title,
-                'entries': entries,
-            }
+        webpage = self._download_webpage(url, playlist_id)
+        title = self._html_search_regex(
+            r'<h1 class="video-title[^"]*">(.+?)</h1>', webpage, 'title')
+        playlist_html = self._search_regex(
+            r"(?s)<ul\s+class='video-related[^']*'>(.*?)</ul>", webpage,
+            'playlist HTML')
+        entries = [{
+            '_type': 'url',
+            'url': 'aol-video:%s' % m.group('id'),
+            'ie_key': 'Aol',
+        } for m in re.finditer(
+            r"<a\s+href='.*videoid=(?P<id>[0-9]+)'\s+class='video-thumb'>",
+            playlist_html)]
 
-        return FiveMinIE._build_result(video_id)
+        return {
+            '_type': 'playlist',
+            'id': playlist_id,
+            'display_id': mobj.group('playlist_display_id'),
+            'title': title,
+            'entries': entries,
+        }
index 74860882628017c5ab7a44f22bd9b05286ad556e..15006336faacb0c7f6ab9c24263726776866dbb6 100644 (file)
@@ -1,5 +1,4 @@
-#coding: utf-8
-
+# coding: utf-8
 from __future__ import unicode_literals
 
 import re
@@ -26,8 +25,7 @@ class AparatIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('id')
+        video_id = self._match_id(url)
 
         # Note: There is an easier-to-parse configuration at
         # http://www.aparat.com/video/video/config/videohash/%video_id
@@ -40,15 +38,15 @@ class AparatIE(InfoExtractor):
         for i, video_url in enumerate(video_urls):
             req = HEADRequest(video_url)
             res = self._request_webpage(
-                req, video_id, note=u'Testing video URL %d' % i, errnote=False)
+                req, video_id, note='Testing video URL %d' % i, errnote=False)
             if res:
                 break
         else:
-            raise ExtractorError(u'No working video URLs found')
+            raise ExtractorError('No working video URLs found')
 
-        title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, u'title')
+        title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, 'title')
         thumbnail = self._search_regex(
-            r'\s+image:\s*"([^"]+)"', webpage, u'thumbnail', fatal=False)
+            r'\s+image:\s*"([^"]+)"', webpage, 'thumbnail', fatal=False)
 
         return {
             'id': video_id,
index 4359b88d1b7057944beb126eb8a1c82dbb818758..7cd0482c75d7157df218071a2e22ce2904d094b6 100644 (file)
@@ -4,8 +4,8 @@ import re
 import json
 
 from .common import InfoExtractor
+from ..compat import compat_urlparse
 from ..utils import (
-    compat_urlparse,
     int_or_none,
 )
 
@@ -70,15 +70,17 @@ class AppleTrailersIE(InfoExtractor):
         uploader_id = mobj.group('company')
 
         playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc')
+
         def fix_html(s):
             s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s)
             s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s)
             # The ' in the onClick attributes are not escaped, it couldn't be parsed
             # like: http://trailers.apple.com/trailers/wb/gravity/
+
             def _clean_json(m):
                 return 'iTunes.playURL(%s);' % m.group(1).replace('\'', '&#39;')
             s = re.sub(self._JSON_RE, _clean_json, s)
-            s = '<html>' + s + u'</html>'
+            s = '<html>%s</html>' % s
             return s
         doc = self._download_xml(playlist_url, movie, transform_source=fix_html)
 
@@ -86,7 +88,7 @@ class AppleTrailersIE(InfoExtractor):
         for li in doc.findall('./div/ul/li'):
             on_click = li.find('.//a').attrib['onClick']
             trailer_info_json = self._search_regex(self._JSON_RE,
-                on_click, 'trailer info')
+                                                   on_click, 'trailer info')
             trailer_info = json.loads(trailer_info_json)
             title = trailer_info['title']
             video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower()
index 8de9c11eaedcd078b70d2c5bb74f9b40fdb9c46c..967bd865c53229e7ff38997ea9a7f4a6ab19f92d 100644 (file)
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from .generic import GenericIE
 from ..utils import (
     determine_ext,
     ExtractorError,
@@ -12,6 +13,7 @@ from ..utils import (
     parse_duration,
     unified_strdate,
     xpath_text,
+    parse_xml,
 )
 
 
@@ -54,6 +56,11 @@ class ARDMediathekIE(InfoExtractor):
         if '>Der gewünschte Beitrag ist nicht mehr verfügbar.<' in webpage:
             raise ExtractorError('Video %s is no longer available' % video_id, expected=True)
 
+        if re.search(r'[\?&]rss($|[=&])', url):
+            doc = parse_xml(webpage)
+            if doc.tag == 'rss':
+                return GenericIE()._extract_rss(url, video_id, doc)
+
         title = self._html_search_regex(
             [r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
              r'<meta name="dcterms.title" content="(.*?)"/>',
@@ -185,4 +192,3 @@ class ARDIE(InfoExtractor):
             'upload_date': upload_date,
             'thumbnail': thumbnail,
         }
-
index c3d02f85e8f023deac51287b72cd45623db72f07..219631b9b0dfa690a37e09a7b3473566543370e2 100644 (file)
@@ -5,16 +5,15 @@ import re
 
 from .common import InfoExtractor
 from ..utils import (
-    ExtractorError,
     find_xpath_attr,
     unified_strdate,
-    determine_ext,
     get_element_by_id,
-    compat_str,
     get_element_by_attribute,
+    int_or_none,
+    qualities,
 )
 
-# There are different sources of video in arte.tv, the extraction process 
+# There are different sources of video in arte.tv, the extraction process
 # is different for each one. The videos usually expire in 7 days, so we can't
 # add tests.
 
@@ -90,92 +89,66 @@ class ArteTVPlus7IE(InfoExtractor):
         if not upload_date_str:
             upload_date_str = player_info.get('VDA', '').split(' ')[0]
 
+        title = player_info['VTI'].strip()
+        subtitle = player_info.get('VSU', '').strip()
+        if subtitle:
+            title += ' - %s' % subtitle
+
         info_dict = {
             'id': player_info['VID'],
-            'title': player_info['VTI'],
+            'title': title,
             'description': player_info.get('VDE'),
             'upload_date': unified_strdate(upload_date_str),
             'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
         }
-
-        all_formats = player_info['VSR'].values()
-        # Some formats use the m3u8 protocol
-        all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats))
-        def _match_lang(f):
-            if f.get('versionCode') is None:
-                return True
-            # Return true if that format is in the language of the url
-            if lang == 'fr':
-                l = 'F'
-            elif lang == 'de':
-                l = 'A'
-            else:
-                l = lang
-            regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l]
-            return any(re.match(r, f['versionCode']) for r in regexes)
-        # Some formats may not be in the same language as the url
-        # TODO: Might want not to drop videos that does not match requested language
-        # but to process those formats with lower precedence
-        formats = filter(_match_lang, all_formats)
-        formats = list(formats)  # in python3 filter returns an iterator
-        if not formats:
-            # Some videos are only available in the 'Originalversion'
-            # they aren't tagged as being in French or German
-            # Sometimes there are neither videos of requested lang code
-            # nor original version videos available
-            # For such cases we just take all_formats as is
-            formats = all_formats
-            if not formats:
-                raise ExtractorError('The formats list is empty')
-
-        if re.match(r'[A-Z]Q', formats[0]['quality']) is not None:
-            def sort_key(f):
-                return ['HQ', 'MQ', 'EQ', 'SQ'].index(f['quality'])
-        else:
-            def sort_key(f):
-                versionCode = f.get('versionCode')
-                if versionCode is None:
-                    versionCode = ''
-                return (
-                    # Sort first by quality
-                    int(f.get('height', -1)),
-                    int(f.get('bitrate', -1)),
-                    # The original version with subtitles has lower relevance
-                    re.match(r'VO-ST(F|A)', versionCode) is None,
-                    # The version with sourds/mal subtitles has also lower relevance
-                    re.match(r'VO?(F|A)-STM\1', versionCode) is None,
-                    # Prefer http downloads over m3u8
-                    0 if f['url'].endswith('m3u8') else 1,
-                )
-        formats = sorted(formats, key=sort_key)
-        def _format(format_info):
-            quality = ''
-            height = format_info.get('height')
-            if height is not None:
-                quality = compat_str(height)
-            bitrate = format_info.get('bitrate')
-            if bitrate is not None:
-                quality += '-%d' % bitrate
-            if format_info.get('versionCode') is not None:
-                format_id = '%s-%s' % (quality, format_info['versionCode'])
-            else:
-                format_id = quality
-            info = {
+        qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ'])
+
+        formats = []
+        for format_id, format_dict in player_info['VSR'].items():
+            f = dict(format_dict)
+            versionCode = f.get('versionCode')
+
+            langcode = {
+                'fr': 'F',
+                'de': 'A',
+            }.get(lang, lang)
+            lang_rexs = [r'VO?%s' % langcode, r'VO?.-ST%s' % langcode]
+            lang_pref = (
+                None if versionCode is None else (
+                    10 if any(re.match(r, versionCode) for r in lang_rexs)
+                    else -10))
+            source_pref = 0
+            if versionCode is not None:
+                # The original version with subtitles has lower relevance
+                if re.match(r'VO-ST(F|A)', versionCode):
+                    source_pref -= 10
+                # The version with sourds/mal subtitles has also lower relevance
+                elif re.match(r'VO?(F|A)-STM\1', versionCode):
+                    source_pref -= 9
+            format = {
                 'format_id': format_id,
-                'format_note': format_info.get('versionLibelle'),
-                'width': format_info.get('width'),
-                'height': height,
+                'preference': -10 if f.get('videoFormat') == 'M3U8' else None,
+                'language_preference': lang_pref,
+                'format_note': '%s, %s' % (f.get('versionCode'), f.get('versionLibelle')),
+                'width': int_or_none(f.get('width')),
+                'height': int_or_none(f.get('height')),
+                'tbr': int_or_none(f.get('bitrate')),
+                'quality': qfunc(f['quality']),
+                'source_preference': source_pref,
             }
-            if format_info['mediaType'] == 'rtmp':
-                info['url'] = format_info['streamer']
-                info['play_path'] = 'mp4:' + format_info['url']
-                info['ext'] = 'flv'
+
+            if f.get('mediaType') == 'rtmp':
+                format['url'] = f['streamer']
+                format['play_path'] = 'mp4:' + f['url']
+                format['ext'] = 'flv'
             else:
-                info['url'] = format_info['url']
-                info['ext'] = determine_ext(info['url'])
-            return info
-        info_dict['formats'] = [_format(f) for f in formats]
+                format['url'] = f['url']
+
+            formats.append(format)
+
+        self._sort_formats(formats)
 
+        info_dict['formats'] = formats
         return info_dict
 
 
diff --git a/youtube_dl/extractor/audiomack.py b/youtube_dl/extractor/audiomack.py
new file mode 100644 (file)
index 0000000..622b209
--- /dev/null
@@ -0,0 +1,69 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from .soundcloud import SoundcloudIE
+from ..utils import ExtractorError
+
+import time
+
+
+class AudiomackIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P<id>[\w/-]+)'
+    IE_NAME = 'audiomack'
+    _TESTS = [
+        # hosted on audiomack
+        {
+            'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary',
+            'info_dict':
+            {
+                'id': 'roosh-williams/extraordinary',
+                'ext': 'mp3',
+                'title': 'Roosh Williams - Extraordinary'
+            }
+        },
+        # hosted on soundcloud via audiomack
+        {
+            'add_ie': ['Soundcloud'],
+            'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare',
+            'info_dict': {
+                'id': '172419696',
+                'ext': 'mp3',
+                'description': 'md5:1fc3272ed7a635cce5be1568c2822997',
+                'title': 'Young Thug ft Lil Wayne - Take Kare',
+                'uploader': 'Young Thug World',
+                'upload_date': '20141016',
+            }
+        },
+    ]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        api_response = self._download_json(
+            "http://www.audiomack.com/api/music/url/song/%s?_=%d" % (
+                video_id, time.time()),
+            video_id)
+
+        if "url" not in api_response:
+            raise ExtractorError("Unable to deduce api url of song")
+        realurl = api_response["url"]
+
+        # Audiomack wraps a lot of soundcloud tracks in their branded wrapper
+        # - if so, pass the work off to the soundcloud extractor
+        if SoundcloudIE.suitable(realurl):
+            return {'_type': 'url', 'url': realurl, 'ie_key': 'Soundcloud'}
+
+        webpage = self._download_webpage(url, video_id)
+        artist = self._html_search_regex(
+            r'<span class="artist">(.*?)</span>', webpage, "artist")
+        songtitle = self._html_search_regex(
+            r'<h1 class="profile-title song-title"><span class="artist">.*?</span>(.*?)</h1>',
+            webpage, "title")
+        title = artist + " - " + songtitle
+
+        return {
+            'id': video_id,
+            'title': title,
+            'url': realurl,
+        }
index 20bf12550d4b4493982ca3ea6f31578368e31aba..014a219522d5de5ab85415cb8aeca0a93561a409 100644 (file)
@@ -3,8 +3,8 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import compat_urllib_parse
 from ..utils import (
-    compat_urllib_parse,
     determine_ext,
     ExtractorError,
 )
@@ -24,8 +24,7 @@ class AUEngineIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         webpage = self._download_webpage(url, video_id)
         title = self._html_search_regex(r'<title>(?P<title>.+?)</title>', webpage, 'title')
diff --git a/youtube_dl/extractor/azubu.py b/youtube_dl/extractor/azubu.py
new file mode 100644 (file)
index 0000000..0961d33
--- /dev/null
@@ -0,0 +1,93 @@
+from __future__ import unicode_literals
+
+import json
+
+from .common import InfoExtractor
+from ..utils import float_or_none
+
+
+class AzubuIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?azubu\.tv/[^/]+#!/play/(?P<id>\d+)'
+    _TESTS = [
+        {
+            'url': 'http://www.azubu.tv/GSL#!/play/15575/2014-hot6-cup-last-big-match-ro8-day-1',
+            'md5': 'a88b42fcf844f29ad6035054bd9ecaf4',
+            'info_dict': {
+                'id': '15575',
+                'ext': 'mp4',
+                'title': '2014 HOT6 CUP LAST BIG MATCH Ro8 Day 1',
+                'description': 'md5:d06bdea27b8cc4388a90ad35b5c66c01',
+                'thumbnail': 're:^https?://.*\.jpe?g',
+                'timestamp': 1417523507.334,
+                'upload_date': '20141202',
+                'duration': 9988.7,
+                'uploader': 'GSL',
+                'uploader_id': 414310,
+                'view_count': int,
+            },
+        },
+        {
+            'url': 'http://www.azubu.tv/FnaticTV#!/play/9344/-fnatic-at-worlds-2014:-toyz---%22i-love-rekkles,-he-has-amazing-mechanics%22-',
+            'md5': 'b72a871fe1d9f70bd7673769cdb3b925',
+            'info_dict': {
+                'id': '9344',
+                'ext': 'mp4',
+                'title': 'Fnatic at Worlds 2014: Toyz - "I love Rekkles, he has amazing mechanics"',
+                'description': 'md5:4a649737b5f6c8b5c5be543e88dc62af',
+                'thumbnail': 're:^https?://.*\.jpe?g',
+                'timestamp': 1410530893.320,
+                'upload_date': '20140912',
+                'duration': 172.385,
+                'uploader': 'FnaticTV',
+                'uploader_id': 272749,
+                'view_count': int,
+            },
+        },
+    ]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        data = self._download_json(
+            'http://www.azubu.tv/api/video/%s' % video_id, video_id)['data']
+
+        title = data['title'].strip()
+        description = data['description']
+        thumbnail = data['thumbnail']
+        view_count = data['view_count']
+        uploader = data['user']['username']
+        uploader_id = data['user']['id']
+
+        stream_params = json.loads(data['stream_params'])
+
+        timestamp = float_or_none(stream_params['creationDate'], 1000)
+        duration = float_or_none(stream_params['length'], 1000)
+
+        renditions = stream_params.get('renditions') or []
+        video = stream_params.get('FLVFullLength') or stream_params.get('videoFullLength')
+        if video:
+            renditions.append(video)
+
+        formats = [{
+            'url': fmt['url'],
+            'width': fmt['frameWidth'],
+            'height': fmt['frameHeight'],
+            'vbr': float_or_none(fmt['encodingRate'], 1000),
+            'filesize': fmt['size'],
+            'vcodec': fmt['videoCodec'],
+            'container': fmt['videoContainer'],
+        } for fmt in renditions if fmt['url']]
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'timestamp': timestamp,
+            'duration': duration,
+            'uploader': uploader,
+            'uploader_id': uploader_id,
+            'view_count': view_count,
+            'formats': formats,
+        }
index de5d4faf3b920ddb0f2231f05311b9858dc5ef86..98e1443ab0c3d380737f34be2c67fa760e08a221 100644 (file)
@@ -5,7 +5,7 @@ import json
 import itertools
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_request,
 )
 
@@ -18,7 +18,7 @@ class BambuserIE(InfoExtractor):
     _TEST = {
         'url': 'http://bambuser.com/v/4050584',
         # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
-        #u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
+        # 'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
         'info_dict': {
             'id': '4050584',
             'ext': 'flv',
@@ -38,7 +38,7 @@ class BambuserIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
         info_url = ('http://player-c.api.bambuser.com/getVideo.json?'
-            '&api_key=%s&vid=%s' % (self._API_KEY, video_id))
+                    '&api_key=%s&vid=%s' % (self._API_KEY, video_id))
         info_json = self._download_webpage(info_url, video_id)
         info = json.loads(info_json)['result']
 
@@ -73,10 +73,11 @@ class BambuserChannelIE(InfoExtractor):
         urls = []
         last_id = ''
         for i in itertools.count(1):
-            req_url = ('http://bambuser.com/xhr-api/index.php?username={user}'
+            req_url = (
+                'http://bambuser.com/xhr-api/index.php?username={user}'
                 '&sort=created&access_mode=0%2C1%2C2&limit={count}'
                 '&method=broadcast&format=json&vid_older_than={last}'
-                ).format(user=user, count=self._STEP, last=last_id)
+            ).format(user=user, count=self._STEP, last=last_id)
             req = compat_urllib_request.Request(req_url)
             # Without setting this header, we wouldn't get any result
             req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
index c13446665d2fb0e202973a26f5c7499c325719d1..9fb770cb1562f6efbe004d4b1689bf3aa1eab8d4 100644 (file)
@@ -4,9 +4,11 @@ import json
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_str,
     compat_urlparse,
+)
+from ..utils import (
     ExtractorError,
 )
 
@@ -83,12 +85,12 @@ class BandcampIE(InfoExtractor):
         initial_url = mp3_info['url']
         re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
         m_url = re.match(re_url, initial_url)
-        #We build the url we will use to get the final track url
+        # We build the url we will use to get the final track url
         # This url is build in Bandcamp in the script download_bunde_*.js
         request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts'))
         final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
         # If we could correctly generate the .rand field the url would be
-        #in the "download_url" key
+        # in the "download_url" key
         final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
 
         return {
@@ -110,20 +112,25 @@ class BandcampAlbumIE(InfoExtractor):
         'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
         'playlist': [
             {
-                'file': '1353101989.mp3',
                 'md5': '39bc1eded3476e927c724321ddf116cf',
                 'info_dict': {
+                    'id': '1353101989',
+                    'ext': 'mp3',
                     'title': 'Intro',
                 }
             },
             {
-                'file': '38097443.mp3',
                 'md5': '1a2c32e2691474643e912cc6cd4bffaa',
                 'info_dict': {
+                    'id': '38097443',
+                    'ext': 'mp3',
                     'title': 'Kero One - Keep It Alive (Blazo remix)',
                 }
             },
         ],
+        'info_dict': {
+            'title': 'Jazz Format Mixtape vol.1',
+        },
         'params': {
             'playlistend': 2
         },
index 75e608f99de4ff3cc14234ab370f370b1ae83940..01c02d360cd7255b14aa7aa8259de52e44701884 100644 (file)
@@ -1,9 +1,10 @@
 from __future__ import unicode_literals
 
-import re
+import xml.etree.ElementTree
 
 from .subtitles import SubtitlesInfoExtractor
 from ..utils import ExtractorError
+from ..compat import compat_HTTPError
 
 
 class BBCCoUkIE(SubtitlesInfoExtractor):
@@ -55,7 +56,22 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
                 'skip_download': True,
             },
             'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
-        }
+        },
+        {
+            'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
+            'info_dict': {
+                'id': 'b03k3pb7',
+                'ext': 'flv',
+                'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
+                'description': '2. Invasion',
+                'duration': 3600,
+            },
+            'params': {
+                # rtmp download
+                'skip_download': True,
+            },
+            'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
+        },
     ]
 
     def _extract_asx_playlist(self, connection, programme_id):
@@ -102,6 +118,10 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
         return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item')
 
     def _extract_medias(self, media_selection):
+        error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error')
+        if error is not None:
+            raise ExtractorError(
+                '%s returned error: %s' % (self.IE_NAME, error.get('id')), expected=True)
         return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media')
 
     def _extract_connections(self, media):
@@ -158,54 +178,73 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
             subtitles[lang] = srt
         return subtitles
 
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        group_id = mobj.group('id')
-
-        webpage = self._download_webpage(url, group_id, 'Downloading video page')
-        if re.search(r'id="emp-error" class="notinuk">', webpage):
-            raise ExtractorError('Currently BBC iPlayer TV programmes are available to play in the UK only',
-                expected=True)
-
-        playlist = self._download_xml('http://www.bbc.co.uk/iplayer/playlist/%s' % group_id, group_id,
-            'Downloading playlist XML')
-
-        no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
-        if no_items is not None:
-            reason = no_items.get('reason')
-            if reason == 'preAvailability':
-                msg = 'Episode %s is not yet available' % group_id
-            elif reason == 'postAvailability':
-                msg = 'Episode %s is no longer available' % group_id
+    def _download_media_selector(self, programme_id):
+        try:
+            media_selection = self._download_xml(
+                'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
+                programme_id, 'Downloading media selection XML')
+        except ExtractorError as ee:
+            if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
+                media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().encode('utf-8'))
             else:
-                msg = 'Episode %s is not available: %s' % (group_id, reason)
-            raise ExtractorError(msg, expected=True)
+                raise
 
         formats = []
         subtitles = None
 
-        for item in self._extract_items(playlist):
-            kind = item.get('kind')
-            if kind != 'programme' and kind != 'radioProgramme':
-                continue
-            title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text
-            description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text
+        for media in self._extract_medias(media_selection):
+            kind = media.get('kind')
+            if kind == 'audio':
+                formats.extend(self._extract_audio(media, programme_id))
+            elif kind == 'video':
+                formats.extend(self._extract_video(media, programme_id))
+            elif kind == 'captions':
+                subtitles = self._extract_captions(media, programme_id)
 
-            programme_id = item.get('identifier')
-            duration = int(item.get('duration'))
+        return formats, subtitles
 
-            media_selection = self._download_xml(
-                'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s'  % programme_id,
-                programme_id, 'Downloading media selection XML')
+    def _real_extract(self, url):
+        group_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, group_id, 'Downloading video page')
 
-            for media in self._extract_medias(media_selection):
-                kind = media.get('kind')
-                if kind == 'audio':
-                    formats.extend(self._extract_audio(media, programme_id))
-                elif kind == 'video':
-                    formats.extend(self._extract_video(media, programme_id))
-                elif kind == 'captions':
-                    subtitles = self._extract_captions(media, programme_id)
+        programme_id = self._search_regex(
+            r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False)
+        if programme_id:
+            player = self._download_json(
+                'http://www.bbc.co.uk/iplayer/episode/%s.json' % group_id,
+                group_id)['jsConf']['player']
+            title = player['title']
+            description = player['subtitle']
+            duration = player['duration']
+            formats, subtitles = self._download_media_selector(programme_id)
+        else:
+            playlist = self._download_xml(
+                'http://www.bbc.co.uk/iplayer/playlist/%s' % group_id,
+                group_id, 'Downloading playlist XML')
+
+            no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
+            if no_items is not None:
+                reason = no_items.get('reason')
+                if reason == 'preAvailability':
+                    msg = 'Episode %s is not yet available' % group_id
+                elif reason == 'postAvailability':
+                    msg = 'Episode %s is no longer available' % group_id
+                elif reason == 'noMedia':
+                    msg = 'Episode %s is not currently available' % group_id
+                else:
+                    msg = 'Episode %s is not available: %s' % (group_id, reason)
+                raise ExtractorError(msg, expected=True)
+
+            for item in self._extract_items(playlist):
+                kind = item.get('kind')
+                if kind != 'programme' and kind != 'radioProgramme':
+                    continue
+                title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text
+                description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text
+                programme_id = item.get('identifier')
+                duration = int(item.get('duration'))
+                formats, subtitles = self._download_media_selector(programme_id)
 
         if self._downloader.params.get('listsubtitles', False):
             self._list_available_subtitles(programme_id, subtitles)
@@ -220,4 +259,4 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
             'duration': duration,
             'formats': formats,
             'subtitles': subtitles,
-        }
\ No newline at end of file
+        }
index 314e37f8bea9d44bc5ffe1545b17d02cc7d13fe2..4e79fea8f0346d8aca19bc0182fd087a78779809 100644 (file)
@@ -40,7 +40,7 @@ class BeegIE(InfoExtractor):
 
         title = self._html_search_regex(
             r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title')
-        
+
         description = self._html_search_regex(
             r'<meta name="description" content="([^"]*)"',
             webpage, 'description', fatal=False)
index 31fdc0dcc0614babf4ff3b48186566904cfcc57a..1bdc25812b6afb4cf133007f2d12b89fd56b353f 100644 (file)
@@ -10,15 +10,15 @@ from ..utils import url_basename
 class BehindKinkIE(InfoExtractor):
     _VALID_URL = r'http://(?:www\.)?behindkink\.com/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<id>[^/#?_]+)'
     _TEST = {
-        'url': 'http://www.behindkink.com/2014/08/14/ab1576-performers-voice-finally-heard-the-bill-is-killed/',
-        'md5': '41ad01222b8442089a55528fec43ec01',
+        'url': 'http://www.behindkink.com/2014/12/05/what-are-you-passionate-about-marley-blaze/',
+        'md5': '507b57d8fdcd75a41a9a7bdb7989c762',
         'info_dict': {
-            'id': '36370',
+            'id': '37127',
             'ext': 'mp4',
-            'title': 'AB1576 - PERFORMERS VOICE FINALLY HEARD - THE BILL IS KILLED!',
-            'description': 'The adult industry voice was finally heard as Assembly Bill 1576 remained\xa0 in suspense today at the Senate Appropriations Hearing. AB1576 was, among other industry damaging issues, a condom mandate...',
-            'upload_date': '20140814',
-            'thumbnail': 'http://www.behindkink.com/wp-content/uploads/2014/08/36370_AB1576_Win.jpg',
+            'title': 'What are you passionate about – Marley Blaze',
+            'description': 'md5:aee8e9611b4ff70186f752975d9b94b4',
+            'upload_date': '20141205',
+            'thumbnail': 'http://www.behindkink.com/wp-content/uploads/2014/12/blaze-1.jpg',
             'age_limit': 18,
         }
     }
@@ -26,26 +26,19 @@ class BehindKinkIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         display_id = mobj.group('id')
-        year = mobj.group('year')
-        month = mobj.group('month')
-        day = mobj.group('day')
-        upload_date = year + month + day
 
         webpage = self._download_webpage(url, display_id)
 
         video_url = self._search_regex(
-            r"'file':\s*'([^']+)'",
-            webpage, 'URL base')
-
-        video_id = url_basename(video_url)
-        video_id = video_id.split('_')[0]
+            r'<source src="([^"]+)"', webpage, 'video URL')
+        video_id = url_basename(video_url).split('_')[0]
+        upload_date = mobj.group('year') + mobj.group('month') + mobj.group('day')
 
         return {
             'id': video_id,
+            'display_id': display_id,
             'url': video_url,
-            'ext': 'mp4',
             'title': self._og_search_title(webpage),
-            'display_id': display_id,
             'thumbnail': self._og_search_thumbnail(webpage),
             'description': self._og_search_description(webpage),
             'upload_date': upload_date,
diff --git a/youtube_dl/extractor/bet.py b/youtube_dl/extractor/bet.py
new file mode 100644 (file)
index 0000000..003e500
--- /dev/null
@@ -0,0 +1,108 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_urllib_parse
+from ..utils import (
+    xpath_text,
+    xpath_with_ns,
+    int_or_none,
+    parse_iso8601,
+)
+
+
+class BetIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?bet\.com/(?:[^/]+/)+(?P<id>.+?)\.html'
+    _TESTS = [
+        {
+            'url': 'http://www.bet.com/news/politics/2014/12/08/in-bet-exclusive-obama-talks-race-and-racism.html',
+            'info_dict': {
+                'id': '417cd61c-c793-4e8e-b006-e445ecc45add',
+                'display_id': 'in-bet-exclusive-obama-talks-race-and-racism',
+                'ext': 'flv',
+                'title': 'BET News Presents: A Conversation With President Obama',
+                'description': 'md5:5a88d8ae912c1b33e090290af7ec33c6',
+                'duration': 1534,
+                'timestamp': 1418075340,
+                'upload_date': '20141208',
+                'uploader': 'admin',
+                'thumbnail': 're:(?i)^https?://.*\.jpg$',
+            },
+            'params': {
+                # rtmp download
+                'skip_download': True,
+            },
+        },
+        {
+            'url': 'http://www.bet.com/video/news/national/2014/justice-for-ferguson-a-community-reacts.html',
+            'info_dict': {
+                'id': '4160e53b-ad41-43b1-980f-8d85f63121f4',
+                'display_id': 'justice-for-ferguson-a-community-reacts',
+                'ext': 'flv',
+                'title': 'Justice for Ferguson: A Community Reacts',
+                'description': 'A BET News special.',
+                'duration': 1696,
+                'timestamp': 1416942360,
+                'upload_date': '20141125',
+                'uploader': 'admin',
+                'thumbnail': 're:(?i)^https?://.*\.jpg$',
+            },
+            'params': {
+                # rtmp download
+                'skip_download': True,
+            },
+        }
+    ]
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, display_id)
+
+        media_url = compat_urllib_parse.unquote(self._search_regex(
+            [r'mediaURL\s*:\s*"([^"]+)"', r"var\s+mrssMediaUrl\s*=\s*'([^']+)'"],
+            webpage, 'media URL'))
+
+        mrss = self._download_xml(media_url, display_id)
+
+        item = mrss.find('./channel/item')
+
+        NS_MAP = {
+            'dc': 'http://purl.org/dc/elements/1.1/',
+            'media': 'http://search.yahoo.com/mrss/',
+            'ka': 'http://kickapps.com/karss',
+        }
+
+        title = xpath_text(item, './title', 'title')
+        description = xpath_text(
+            item, './description', 'description', fatal=False)
+
+        video_id = xpath_text(item, './guid', 'video id', fatal=False)
+
+        timestamp = parse_iso8601(xpath_text(
+            item, xpath_with_ns('./dc:date', NS_MAP),
+            'upload date', fatal=False))
+        uploader = xpath_text(
+            item, xpath_with_ns('./dc:creator', NS_MAP),
+            'uploader', fatal=False)
+
+        media_content = item.find(
+            xpath_with_ns('./media:content', NS_MAP))
+        duration = int_or_none(media_content.get('duration'))
+        smil_url = media_content.get('url')
+
+        thumbnail = media_content.find(
+            xpath_with_ns('./media:thumbnail', NS_MAP)).get('url')
+
+        formats = self._extract_smil_formats(smil_url, display_id)
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'timestamp': timestamp,
+            'uploader': uploader,
+            'duration': duration,
+            'formats': formats,
+        }
diff --git a/youtube_dl/extractor/bild.py b/youtube_dl/extractor/bild.py
new file mode 100644 (file)
index 0000000..77b562d
--- /dev/null
@@ -0,0 +1,39 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import int_or_none
+
+
+class BildIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?bild\.de/(?:[^/]+/)+(?P<display_id>[^/]+)-(?P<id>\d+)(?:,auto=true)?\.bild\.html'
+    IE_DESC = 'Bild.de'
+    _TEST = {
+        'url': 'http://www.bild.de/video/clip/apple-ipad-air/das-koennen-die-neuen-ipads-38184146.bild.html',
+        'md5': 'dd495cbd99f2413502a1713a1156ac8a',
+        'info_dict': {
+            'id': '38184146',
+            'ext': 'mp4',
+            'title': 'BILD hat sie getestet',
+            'thumbnail': 'http://bilder.bild.de/fotos/stand-das-koennen-die-neuen-ipads-38184138/Bild/1.bild.jpg',
+            'duration': 196,
+            'description': 'Mit dem iPad Air 2 und dem iPad Mini 3 hat Apple zwei neue Tablet-Modelle präsentiert. BILD-Reporter Sven Stein durfte die Geräte bereits testen. ',
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        xml_url = url.split(".bild.html")[0] + ",view=xml.bild.xml"
+        doc = self._download_xml(xml_url, video_id)
+
+        duration = int_or_none(doc.attrib.get('duration'), scale=1000)
+
+        return {
+            'id': video_id,
+            'title': doc.attrib['ueberschrift'],
+            'description': doc.attrib.get('text'),
+            'url': doc.attrib['src'],
+            'thumbnail': doc.attrib.get('img'),
+            'duration': duration,
+        }
index 0d5889f5d17c17ffa75eeca1f1079efd7f9c2b8f..241b904a9e57f7cc3e61b6f086550578feb71b05 100644 (file)
@@ -4,8 +4,8 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import compat_parse_qs
 from ..utils import (
-    compat_parse_qs,
     ExtractorError,
     int_or_none,
     unified_strdate,
@@ -29,10 +29,9 @@ class BiliBiliIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
+
         video_code = self._search_regex(
             r'(?s)<div itemprop="video".*?>(.*?)</div>', webpage, 'video code')
 
index 57d17bea349a302efb0c811bc35f2158313f326a..14b814120be3b8215a28fc00a95f87bd22e0c062 100644 (file)
@@ -4,13 +4,17 @@ import re
 
 from .common import InfoExtractor
 from .subtitles import SubtitlesInfoExtractor
-from ..utils import (
+
+from ..compat import (
+    compat_str,
     compat_urllib_request,
-    unescapeHTML,
-    parse_iso8601,
     compat_urlparse,
+)
+from ..utils import (
     clean_html,
-    compat_str,
+    int_or_none,
+    parse_iso8601,
+    unescapeHTML,
 )
 
 
@@ -64,20 +68,55 @@ class BlipTVIE(SubtitlesInfoExtractor):
                 'uploader': 'redvsblue',
                 'uploader_id': '792887',
             }
-        }
+        },
+        {
+            'url': 'http://blip.tv/play/gbk766dkj4Yn',
+            'md5': 'fe0a33f022d49399a241e84a8ea8b8e3',
+            'info_dict': {
+                'id': '1749452',
+                'ext': 'mp4',
+                'upload_date': '20090208',
+                'description': 'Witness the first appearance of the Nostalgia Critic character, as Doug reviews the movie Transformers.',
+                'title': 'Nostalgia Critic: Transformers',
+                'timestamp': 1234068723,
+                'uploader': 'NostalgiaCritic',
+                'uploader_id': '246467',
+            }
+        },
+        {
+            # https://github.com/rg3/youtube-dl/pull/4404
+            'note': 'Audio only',
+            'url': 'http://blip.tv/hilarios-productions/weekly-manga-recap-kingdom-7119982',
+            'md5': '76c0a56f24e769ceaab21fbb6416a351',
+            'info_dict': {
+                'id': '7103299',
+                'ext': 'flv',
+                'title': 'Weekly Manga Recap: Kingdom',
+                'description': 'And then Shin breaks the enemy line, and he&apos;s all like HWAH! And then he slices a guy and it&apos;s all like FWASHING! And... it&apos;s really hard to describe the best parts of this series without breaking down into sound effects, okay?',
+                'timestamp': 1417660321,
+                'upload_date': '20141204',
+                'uploader': 'The Rollo T',
+                'uploader_id': '407429',
+                'duration': 7251,
+                'vcodec': 'none',
+            }
+        },
     ]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         lookup_id = mobj.group('lookup_id')
 
-        # See https://github.com/rg3/youtube-dl/issues/857
+        # See https://github.com/rg3/youtube-dl/issues/857 and
+        # https://github.com/rg3/youtube-dl/issues/4197
         if lookup_id:
-            info_page = self._download_webpage(
-                'http://blip.tv/play/%s.x?p=1' % lookup_id, lookup_id, 'Resolving lookup id')
-            video_id = self._search_regex(r'data-episode-id="([0-9]+)', info_page, 'video_id')
-        else:
-            video_id = mobj.group('id')
+            urlh = self._request_webpage(
+                'http://blip.tv/play/%s' % lookup_id, lookup_id, 'Resolving lookup id')
+            url = compat_urlparse.urlparse(urlh.geturl())
+            qs = compat_urlparse.parse_qs(url.query)
+            mobj = re.match(self._VALID_URL, qs['file'][0])
+
+        video_id = mobj.group('id')
 
         rss = self._download_xml('http://blip.tv/rss/flash/%s' % video_id, video_id, 'Downloading video RSS')
 
@@ -113,7 +152,7 @@ class BlipTVIE(SubtitlesInfoExtractor):
             msg = self._download_webpage(
                 url + '?showplayer=20140425131715&referrer=http://blip.tv&mask=7&skin=flashvars&view=url',
                 video_id, 'Resolving URL for %s' % role)
-            real_url = compat_urlparse.parse_qs(msg)['message'][0]
+            real_url = compat_urlparse.parse_qs(msg.strip())['message'][0]
 
             media_type = media_content.get('type')
             if media_type == 'text/srt' or url.endswith('.srt'):
@@ -128,11 +167,11 @@ class BlipTVIE(SubtitlesInfoExtractor):
                     'url': real_url,
                     'format_id': role,
                     'format_note': media_type,
-                    'vcodec': media_content.get(blip('vcodec')),
+                    'vcodec': media_content.get(blip('vcodec')) or 'none',
                     'acodec': media_content.get(blip('acodec')),
                     'filesize': media_content.get('filesize'),
-                    'width': int(media_content.get('width')),
-                    'height': int(media_content.get('height')),
+                    'width': int_or_none(media_content.get('width')),
+                    'height': int_or_none(media_content.get('height')),
                 })
         self._sort_formats(formats)
 
@@ -165,9 +204,17 @@ class BlipTVIE(SubtitlesInfoExtractor):
 
 
 class BlipTVUserIE(InfoExtractor):
-    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)(?!api\.swf)([^/]+)/*$'
+    _VALID_URL = r'(?:(?:https?://(?:\w+\.)?blip\.tv/)|bliptvuser:)(?!api\.swf)([^/]+)/*$'
     _PAGE_SIZE = 12
     IE_NAME = 'blip.tv:user'
+    _TEST = {
+        'url': 'http://blip.tv/actone',
+        'info_dict': {
+            'id': 'actone',
+            'title': 'Act One: The Series',
+        },
+        'playlist_count': 5,
+    }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -178,6 +225,7 @@ class BlipTVUserIE(InfoExtractor):
         page = self._download_webpage(url, username, 'Downloading user page')
         mobj = re.search(r'data-users-id="([^"]+)"', page)
         page_base = page_base % mobj.group(1)
+        title = self._og_search_title(page)
 
         # Download video ids using BlipTV Ajax calls. Result size per
         # query is limited (currently to 12 videos) so we need to query
@@ -214,4 +262,5 @@ class BlipTVUserIE(InfoExtractor):
 
         urls = ['http://blip.tv/%s' % video_id for video_id in video_ids]
         url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
-        return [self.playlist_result(url_entries, playlist_title=username)]
+        return self.playlist_result(
+            url_entries, playlist_title=title, playlist_id=username)
diff --git a/youtube_dl/extractor/bpb.py b/youtube_dl/extractor/bpb.py
new file mode 100644 (file)
index 0000000..510813f
--- /dev/null
@@ -0,0 +1,37 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class BpbIE(InfoExtractor):
+    IE_DESC = 'Bundeszentrale für politische Bildung'
+    _VALID_URL = r'http://www\.bpb\.de/mediathek/(?P<id>[0-9]+)/'
+
+    _TEST = {
+        'url': 'http://www.bpb.de/mediathek/297/joachim-gauck-zu-1989-und-die-erinnerung-an-die-ddr',
+        'md5': '0792086e8e2bfbac9cdf27835d5f2093',
+        'info_dict': {
+            'id': '297',
+            'ext': 'mp4',
+            'title': 'Joachim Gauck zu 1989 und die Erinnerung an die DDR',
+            'description': 'Joachim Gauck, erster Beauftragter für die Stasi-Unterlagen, spricht auf dem Geschichtsforum über die friedliche Revolution 1989 und eine "gewisse Traurigkeit" im Umgang mit der DDR-Vergangenheit.'
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._html_search_regex(
+            r'<h2 class="white">(.*?)</h2>', webpage, 'title')
+        video_url = self._html_search_regex(
+            r'(http://film\.bpb\.de/player/dokument_[0-9]+\.mp4)',
+            webpage, 'video URL')
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+            'description': self._og_search_description(webpage),
+        }
index 2e277c8c3c28af872750bc8108db56b1382fd992..45ba5173246575ab617dbab911280b75d61d61e8 100644 (file)
@@ -1,8 +1,6 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 from ..utils import (
     ExtractorError,
index 2c0e5eea2e0285ffce1b89340e9ae8894260f866..4bcc897c95229ea0ee509fe53443d355309a66aa 100644 (file)
@@ -14,7 +14,6 @@ class BreakIE(InfoExtractor):
     _VALID_URL = r'http://(?:www\.)?break\.com/video/(?:[^/]+/)*.+-(?P<id>\d+)'
     _TESTS = [{
         'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056',
-        'md5': '33aa4ff477ecd124d18d7b5d23b87ce5',
         'info_dict': {
             'id': '2468056',
             'ext': 'mp4',
index 294670386256dc45a071544345e259cbf545e7c7..1eca0047076f422bb77329e5b83657df44002738 100644 (file)
@@ -6,24 +6,26 @@ import json
 import xml.etree.ElementTree
 
 from .common import InfoExtractor
-from ..utils import (
-    compat_urllib_parse,
-    find_xpath_attr,
-    fix_xml_ampersands,
-    compat_urlparse,
+from ..compat import (
+    compat_parse_qs,
     compat_str,
+    compat_urllib_parse,
+    compat_urllib_parse_urlparse,
     compat_urllib_request,
-    compat_parse_qs,
-
+    compat_urlparse,
+)
+from ..utils import (
     determine_ext,
     ExtractorError,
-    unsmuggle_url,
+    find_xpath_attr,
+    fix_xml_ampersands,
     unescapeHTML,
+    unsmuggle_url,
 )
 
 
 class BrightcoveIE(InfoExtractor):
-    _VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*\?(?P<query>.*)'
+    _VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*?\?(?P<query>.*)'
     _FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
 
     _TESTS = [
@@ -87,6 +89,15 @@ class BrightcoveIE(InfoExtractor):
                 'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
             },
         },
+        {
+            # playlist test
+            # from http://support.brightcove.com/en/video-cloud/docs/playlist-support-single-video-players
+            'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL',
+            'info_dict': {
+                'title': 'Sealife',
+            },
+            'playlist_mincount': 7,
+        },
     ]
 
     @classmethod
@@ -101,6 +112,8 @@ class BrightcoveIE(InfoExtractor):
                             lambda m: m.group(1) + '/>', object_str)
         # Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
         object_str = object_str.replace('<--', '<!--')
+        # remove namespace to simplify extraction
+        object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str)
         object_str = fix_xml_ampersands(object_str)
 
         object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8'))
@@ -209,7 +222,7 @@ class BrightcoveIE(InfoExtractor):
         webpage = self._download_webpage(req, video_id)
 
         error_msg = self._html_search_regex(
-            r"<h1>We're sorry.</h1>\s*<p>(.*?)</p>", webpage,
+            r"<h1>We're sorry.</h1>([\s\n]*<p>.*?</p>)+", webpage,
             'error message', default=None)
         if error_msg is not None:
             raise ExtractorError(
@@ -251,12 +264,21 @@ class BrightcoveIE(InfoExtractor):
             formats = []
             for rend in renditions:
                 url = rend['defaultURL']
+                if not url:
+                    continue
+                ext = None
                 if rend['remote']:
-                    # This type of renditions are served through akamaihd.net,
-                    # but they don't use f4m manifests
-                    url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
-                    ext = 'flv'
-                else:
+                    url_comp = compat_urllib_parse_urlparse(url)
+                    if url_comp.path.endswith('.m3u8'):
+                        formats.extend(
+                            self._extract_m3u8_formats(url, info['id'], 'mp4'))
+                        continue
+                    elif 'akamaihd.net' in url_comp.netloc:
+                        # This type of renditions are served through
+                        # akamaihd.net, but they don't use f4m manifests
+                        url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
+                        ext = 'flv'
+                if ext is None:
                     ext = determine_ext(url)
                 size = rend.get('size')
                 formats.append({
diff --git a/youtube_dl/extractor/buzzfeed.py b/youtube_dl/extractor/buzzfeed.py
new file mode 100644 (file)
index 0000000..a40a1bb
--- /dev/null
@@ -0,0 +1,74 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+
+
+class BuzzFeedIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?buzzfeed\.com/[^?#]*?/(?P<id>[^?#]+)'
+    _TESTS = [{
+        'url': 'http://www.buzzfeed.com/abagg/this-angry-ram-destroys-a-punching-bag-like-a-boss?utm_term=4ldqpia',
+        'info_dict': {
+            'id': 'this-angry-ram-destroys-a-punching-bag-like-a-boss',
+            'title': 'This Angry Ram Destroys A Punching Bag Like A Boss',
+            'description': 'Rambro!',
+        },
+        'playlist': [{
+            'info_dict': {
+                'id': 'aVCR29aE_OQ',
+                'ext': 'mp4',
+                'upload_date': '20141024',
+                'uploader_id': 'Buddhanz1',
+                'description': 'He likes to stay in shape with his heavy bag, he wont stop until its on the ground\n\nFollow Angry Ram on Facebook for regular updates -\nhttps://www.facebook.com/pages/Angry-Ram/1436897249899558?ref=hl',
+                'uploader': 'Buddhanz',
+                'title': 'Angry Ram destroys a punching bag',
+            }
+        }]
+    }, {
+        'url': 'http://www.buzzfeed.com/sheridanwatson/look-at-this-cute-dog-omg?utm_term=4ldqpia',
+        'params': {
+            'skip_download': True,  # Got enough YouTube download tests
+        },
+        'info_dict': {
+            'description': 'Munchkin the Teddy Bear is back !',
+            'title': 'You Need To Stop What You\'re Doing And Watching This Dog Walk On A Treadmill',
+        },
+        'playlist': [{
+            'info_dict': {
+                'id': 'mVmBL8B-In0',
+                'ext': 'mp4',
+                'upload_date': '20141124',
+                'uploader_id': 'CindysMunchkin',
+                'description': '© 2014 Munchkin the Shih Tzu\nAll rights reserved\nFacebook: http://facebook.com/MunchkintheShihTzu',
+                'uploader': 'Munchkin the Shih Tzu',
+                'title': 'Munchkin the Teddy Bear gets her exercise',
+            },
+        }]
+    }]
+
+    def _real_extract(self, url):
+        playlist_id = self._match_id(url)
+        webpage = self._download_webpage(url, playlist_id)
+
+        all_buckets = re.findall(
+            r'(?s)<div class="video-embed[^"]*"..*?rel:bf_bucket_data=\'([^\']+)\'',
+            webpage)
+
+        entries = []
+        for bd_json in all_buckets:
+            bd = json.loads(bd_json)
+            video = bd.get('video') or bd.get('progload_video')
+            if not video:
+                continue
+            entries.append(self.url_result(video['url']))
+
+        return {
+            '_type': 'playlist',
+            'id': playlist_id,
+            'title': self._og_search_title(webpage),
+            'description': self._og_search_description(webpage),
+            'entries': entries,
+        }
index cf19b7b0cf952c3b14d9ef5b91f541332d3e5e69..6252be05b7f4b57787152b4edae5378675a96847 100644 (file)
@@ -10,12 +10,12 @@ from ..utils import ExtractorError
 class BYUtvIE(InfoExtractor):
     _VALID_URL = r'^https?://(?:www\.)?byutv.org/watch/[0-9a-f-]+/(?P<video_id>[^/?#]+)'
     _TEST = {
-        'url': 'http://www.byutv.org/watch/44e80f7b-e3ba-43ba-8c51-b1fd96c94a79/granite-flats-talking',
+        'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d/studio-c-season-5-episode-5',
         'info_dict': {
-            'id': 'granite-flats-talking',
+            'id': 'studio-c-season-5-episode-5',
             'ext': 'mp4',
-            'description': 'md5:4e9a7ce60f209a33eca0ac65b4918e1c',
-            'title': 'Talking',
+            'description': 'md5:5438d33774b6bdc662f9485a340401cc',
+            'title': 'Season 5 Episode 5',
             'thumbnail': 're:^https?://.*promo.*'
         },
         'params': {
index 0202078b0cdcef31f300e1419eed6fc38fa7b424..9873728df6f3bb1adbfddc1959aa5e7e70241f5b 100644 (file)
@@ -7,15 +7,21 @@ from .common import InfoExtractor
 from ..utils import (
     unified_strdate,
     url_basename,
+    qualities,
 )
 
 
 class CanalplusIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.canalplus\.fr/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>[0-9]+))'
-    _VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/cplus/%s'
-    IE_NAME = 'canalplus.fr'
+    IE_DESC = 'canalplus.fr, piwiplus.fr and d8.tv'
+    _VALID_URL = r'https?://(?:www\.(?P<site>canalplus\.fr|piwiplus\.fr|d8\.tv)/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>[0-9]+))'
+    _VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/%s/%s'
+    _SITE_ID_MAP = {
+        'canalplus.fr': 'cplus',
+        'piwiplus.fr': 'teletoon',
+        'd8.tv': 'd8',
+    }
 
-    _TEST = {
+    _TESTS = [{
         'url': 'http://www.canalplus.fr/c-infos-documentaires/pid1830-c-zapping.html?vid=922470',
         'md5': '3db39fb48b9685438ecf33a1078023e4',
         'info_dict': {
@@ -25,36 +31,73 @@ class CanalplusIE(InfoExtractor):
             'description': 'Le meilleur de toutes les chaînes, tous les jours.\nEmission du 26 août 2013',
             'upload_date': '20130826',
         },
-    }
+    }, {
+        'url': 'http://www.piwiplus.fr/videos-piwi/pid1405-le-labyrinthe-boing-super-ranger.html?vid=1108190',
+        'info_dict': {
+            'id': '1108190',
+            'ext': 'flv',
+            'title': 'Le labyrinthe - Boing super ranger',
+            'description': 'md5:4cea7a37153be42c1ba2c1d3064376ff',
+            'upload_date': '20140724',
+        },
+        'skip': 'Only works from France',
+    }, {
+        'url': 'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
+        'info_dict': {
+            'id': '966289',
+            'ext': 'flv',
+            'title': 'Campagne intime - Documentaire exceptionnel',
+            'description': 'md5:d2643b799fb190846ae09c61e59a859f',
+            'upload_date': '20131108',
+        },
+        'skip': 'videos get deleted after a while',
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.groupdict().get('id')
 
+        site_id = self._SITE_ID_MAP[mobj.group('site') or 'canal']
+
         # Beware, some subclasses do not define an id group
         display_id = url_basename(mobj.group('path'))
 
         if video_id is None:
             webpage = self._download_webpage(url, display_id)
-            video_id = self._search_regex(r'<canal:player videoId="(\d+)"', webpage, 'video id')
+            video_id = self._search_regex(
+                r'<canal:player[^>]+?videoId="(\d+)"', webpage, 'video id')
 
-        info_url = self._VIDEO_INFO_TEMPLATE % video_id
+        info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id)
         doc = self._download_xml(info_url, video_id, 'Downloading video XML')
 
         video_info = [video for video in doc if video.find('ID').text == video_id][0]
         media = video_info.find('MEDIA')
         infos = video_info.find('INFOS')
 
-        preferences = ['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD', 'HLS', 'HDS']
+        preference = qualities(['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD', 'HLS', 'HDS'])
 
-        formats = [
-            {
-                'url': fmt.text + '?hdcore=2.11.3' if fmt.tag == 'HDS' else fmt.text,
-                'format_id': fmt.tag,
-                'ext': 'mp4' if fmt.tag == 'HLS' else 'flv',
-                'preference': preferences.index(fmt.tag) if fmt.tag in preferences else -1,
-            } for fmt in media.find('VIDEOS') if fmt.text
-        ]
+        formats = []
+        for fmt in media.find('VIDEOS'):
+            format_url = fmt.text
+            if not format_url:
+                continue
+            format_id = fmt.tag
+            if format_id == 'HLS':
+                hls_formats = self._extract_m3u8_formats(format_url, video_id, 'flv')
+                for fmt in hls_formats:
+                    fmt['preference'] = preference(format_id)
+                formats.extend(hls_formats)
+            elif format_id == 'HDS':
+                hds_formats = self._extract_f4m_formats(format_url + '?hdcore=2.11.3', video_id)
+                for fmt in hds_formats:
+                    fmt['preference'] = preference(format_id)
+                formats.extend(hds_formats)
+            else:
+                formats.append({
+                    'url': format_url,
+                    'format_id': format_id,
+                    'preference': preference(format_id),
+                })
         self._sort_formats(formats)
 
         return {
@@ -69,4 +112,4 @@ class CanalplusIE(InfoExtractor):
             'like_count': int(infos.find('NB_LIKES').text),
             'comment_count': int(infos.find('NB_COMMENTS').text),
             'formats': formats,
-        }
\ No newline at end of file
+        }
index db48dc24fa2a4698ac0dc8a28033b8cca51d3d44..e43756ec69b1d7f1872e45cc6901b41752ec6ef6 100644 (file)
@@ -45,4 +45,4 @@ class CBSIE(InfoExtractor):
         real_id = self._search_regex(
             r"video\.settings\.pid\s*=\s*'([^']+)';",
             webpage, 'real video ID')
-        return self.url_result(u'theplatform:%s' % real_id)
+        return self.url_result('theplatform:%s' % real_id)
index 0bce7937f830c1bc8178b88f95d619693edf7ae3..7e47960ab08f3ffba7a1e596b34c2fbfc7fd6c59 100644 (file)
@@ -84,4 +84,4 @@ class CBSNewsIE(InfoExtractor):
             'thumbnail': thumbnail,
             'duration': duration,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 90a3dddb9b79a875c57942a8dcb58951aa973090..2f866f3ef925c8402f00a3c0f922cf530eaa2010 100644 (file)
@@ -4,10 +4,12 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_request,
     compat_urllib_parse,
     compat_urllib_parse_urlparse,
+)
+from ..utils import (
     ExtractorError,
 )
 
@@ -92,7 +94,7 @@ class CeskaTelevizeIE(InfoExtractor):
         req.add_header('Referer', url)
 
         playlist = self._download_xml(req, video_id)
-        
+
         formats = []
         for i in playlist.find('smilRoot/body'):
             if 'AD' not in i.attrib['id']:
index 4f000292b7c4273c40df11252852986df08f5e01..3dfc24f5ba447ea92858e89868ad3684caf3a6d2 100644 (file)
@@ -5,6 +5,7 @@ import re
 from .common import InfoExtractor
 from ..utils import ExtractorError
 
+
 class Channel9IE(InfoExtractor):
     '''
     Common extractor for channel9.msdn.com.
@@ -27,11 +28,11 @@ class Channel9IE(InfoExtractor):
                 'title': 'Developer Kick-Off Session: Stuff We Love',
                 'description': 'md5:c08d72240b7c87fcecafe2692f80e35f',
                 'duration': 4576,
-                'thumbnail': 'http://media.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
+                'thumbnail': 'http://video.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
                 'session_code': 'KOS002',
                 'session_day': 'Day 1',
                 'session_room': 'Arena 1A',
-                'session_speakers': [ 'Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen' ],
+                'session_speakers': ['Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen'],
             },
         },
         {
@@ -43,8 +44,8 @@ class Channel9IE(InfoExtractor):
                 'title': 'Self-service BI with Power BI - nuclear testing',
                 'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
                 'duration': 1540,
-                'thumbnail': 'http://media.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
-                'authors': [ 'Mike Wilmot' ],
+                'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
+                'authors': ['Mike Wilmot'],
             },
         }
     ]
@@ -83,7 +84,7 @@ class Channel9IE(InfoExtractor):
             'format_id': x.group('quality'),
             'format_note': x.group('note'),
             'format': '%s (%s)' % (x.group('quality'), x.group('note')),
-            'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
+            'filesize': self._restore_bytes(x.group('filesize')),  # File size is approximate
             'preference': self._known_formats.index(x.group('quality')),
             'vcodec': 'none' if x.group('note') == 'Audio only' else None,
         } for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
@@ -94,7 +95,7 @@ class Channel9IE(InfoExtractor):
 
     def _extract_title(self, html):
         title = self._html_search_meta('title', html, 'title')
-        if title is None:           
+        if title is None:
             title = self._og_search_title(html)
             TITLE_SUFFIX = ' (Channel 9)'
             if title is not None and title.endswith(TITLE_SUFFIX):
@@ -115,7 +116,7 @@ class Channel9IE(InfoExtractor):
         return self._html_search_meta('description', html, 'description')
 
     def _extract_duration(self, html):
-        m = re.search(r'data-video_duration="(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
+        m = re.search(r'"length": *"(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
         return ((int(m.group('hours')) * 60 * 60) + (int(m.group('minutes')) * 60) + int(m.group('seconds'))) if m else None
 
     def _extract_slides(self, html):
@@ -167,7 +168,7 @@ class Channel9IE(InfoExtractor):
         return re.findall(r'<a href="/Events/Speakers/[^"]+">([^<]+)</a>', html)
 
     def _extract_content(self, html, content_path):
-        # Look for downloadable content        
+        # Look for downloadable content
         formats = self._formats_from_html(html)
         slides = self._extract_slides(html)
         zip_ = self._extract_zip(html)
@@ -187,32 +188,33 @@ class Channel9IE(InfoExtractor):
         view_count = self._extract_view_count(html)
         comment_count = self._extract_comment_count(html)
 
-        common = {'_type': 'video',
-                  'id': content_path,
-                  'description': description,
-                  'thumbnail': thumbnail,
-                  'duration': duration,
-                  'avg_rating': avg_rating,
-                  'rating_count': rating_count,
-                  'view_count': view_count,
-                  'comment_count': comment_count,
-                }
+        common = {
+            '_type': 'video',
+            'id': content_path,
+            'description': description,
+            'thumbnail': thumbnail,
+            'duration': duration,
+            'avg_rating': avg_rating,
+            'rating_count': rating_count,
+            'view_count': view_count,
+            'comment_count': comment_count,
+        }
 
         result = []
 
         if slides is not None:
             d = common.copy()
-            d.update({ 'title': title + '-Slides', 'url': slides })
+            d.update({'title': title + '-Slides', 'url': slides})
             result.append(d)
 
         if zip_ is not None:
             d = common.copy()
-            d.update({ 'title': title + '-Zip', 'url': zip_ })
+            d.update({'title': title + '-Zip', 'url': zip_})
             result.append(d)
 
         if len(formats) > 0:
             d = common.copy()
-            d.update({ 'title': title, 'formats': formats })
+            d.update({'title': title, 'formats': formats})
             result.append(d)
 
         return result
@@ -234,16 +236,17 @@ class Channel9IE(InfoExtractor):
         if contents is None:
             return contents
 
-        session_meta = {'session_code': self._extract_session_code(html),
-                        'session_day': self._extract_session_day(html),
-                        'session_room': self._extract_session_room(html),
-                        'session_speakers': self._extract_session_speakers(html),
-                        }
+        session_meta = {
+            'session_code': self._extract_session_code(html),
+            'session_day': self._extract_session_day(html),
+            'session_room': self._extract_session_room(html),
+            'session_speakers': self._extract_session_speakers(html),
+        }
 
         for content in contents:
             content.update(session_meta)
 
-        return contents
+        return self.playlist_result(contents)
 
     def _extract_list(self, content_path):
         rss = self._download_xml(self._RSS_URL % content_path, content_path, 'Downloading RSS')
@@ -258,16 +261,17 @@ class Channel9IE(InfoExtractor):
 
         webpage = self._download_webpage(url, content_path, 'Downloading web page')
 
-        page_type_m = re.search(r'<meta name="Search.PageType" content="(?P<pagetype>[^"]+)"/>', webpage)
-        if page_type_m is None:
-            raise ExtractorError('Search.PageType not found, don\'t know how to process this page', expected=True)
-
-        page_type = page_type_m.group('pagetype')
-        if page_type == 'List':         # List page, may contain list of 'item'-like objects
+        page_type_m = re.search(r'<meta name="WT.entryid" content="(?P<pagetype>[^:]+)[^"]+"/>', webpage)
+        if page_type_m is not None:
+            page_type = page_type_m.group('pagetype')
+            if page_type == 'Entry':      # Any 'item'-like page, may contain downloadable content
+                return self._extract_entry_item(webpage, content_path)
+            elif page_type == 'Session':  # Event session page, may contain downloadable content
+                return self._extract_session(webpage, content_path)
+            elif page_type == 'Event':
+                return self._extract_list(content_path)
+            else:
+                raise ExtractorError('Unexpected WT.entryid %s' % page_type, expected=True)
+
+        else:  # Assuming list
             return self._extract_list(content_path)
-        elif page_type == 'Entry.Item': # Any 'item'-like page, may contain downloadable content
-            return self._extract_entry_item(webpage, content_path)
-        elif page_type == 'Session':    # Event session page, may contain downloadable content
-            return self._extract_session(webpage, content_path)
-        else:
-            raise ExtractorError('Unexpected Search.PageType %s' % page_type, expected=True)
\ No newline at end of file
diff --git a/youtube_dl/extractor/cinchcast.py b/youtube_dl/extractor/cinchcast.py
new file mode 100644 (file)
index 0000000..0c9a24b
--- /dev/null
@@ -0,0 +1,52 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+    unified_strdate,
+    xpath_text,
+)
+
+
+class CinchcastIE(InfoExtractor):
+    _VALID_URL = r'https?://player\.cinchcast\.com/.*?assetId=(?P<id>[0-9]+)'
+    _TEST = {
+        # Actual test is run in generic, look for undergroundwellness
+        'url': 'http://player.cinchcast.com/?platformId=1&#038;assetType=single&#038;assetId=7141703',
+        'only_matching': True,
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        doc = self._download_xml(
+            'http://www.blogtalkradio.com/playerasset/mrss?assetType=single&assetId=%s' % video_id,
+            video_id)
+
+        item = doc.find('.//item')
+        title = xpath_text(item, './title', fatal=True)
+        date_str = xpath_text(
+            item, './{http://developer.longtailvideo.com/trac/}date')
+        upload_date = unified_strdate(date_str, day_first=False)
+        # duration is present but wrong
+        formats = []
+        formats.append({
+            'format_id': 'main',
+            'url': item.find(
+                './{http://search.yahoo.com/mrss/}content').attrib['url'],
+        })
+        backup_url = xpath_text(
+            item, './{http://developer.longtailvideo.com/trac/}backupContent')
+        if backup_url:
+            formats.append({
+                'preference': 2,  # seems to be more reliable
+                'format_id': 'backup',
+                'url': backup_url,
+            })
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'upload_date': upload_date,
+            'formats': formats,
+        }
diff --git a/youtube_dl/extractor/cinemassacre.py b/youtube_dl/extractor/cinemassacre.py
deleted file mode 100644 (file)
index 496271b..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-# encoding: utf-8
-from __future__ import unicode_literals
-
-import re
-
-from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
-    int_or_none,
-)
-
-
-class CinemassacreIE(InfoExtractor):
-    _VALID_URL = r'http://(?:www\.)?cinemassacre\.com/(?P<date_Y>[0-9]{4})/(?P<date_m>[0-9]{2})/(?P<date_d>[0-9]{2})/(?P<display_id>[^?#/]+)'
-    _TESTS = [
-        {
-            'url': 'http://cinemassacre.com/2012/11/10/avgn-the-movie-trailer/',
-            'md5': 'fde81fbafaee331785f58cd6c0d46190',
-            'info_dict': {
-                'id': '19911',
-                'ext': 'mp4',
-                'upload_date': '20121110',
-                'title': '“Angry Video Game Nerd: The Movie” – Trailer',
-                'description': 'md5:fb87405fcb42a331742a0dce2708560b',
-            },
-        },
-        {
-            'url': 'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
-            'md5': 'd72f10cd39eac4215048f62ab477a511',
-            'info_dict': {
-                'id': '521be8ef82b16',
-                'ext': 'mp4',
-                'upload_date': '20131002',
-                'title': 'The Mummy’s Hand (1940)',
-            },
-        }
-    ]
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        display_id = mobj.group('display_id')
-
-        webpage = self._download_webpage(url, display_id)
-        video_date = mobj.group('date_Y') + mobj.group('date_m') + mobj.group('date_d')
-        mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?id=(?:Cinemassacre-)?(?P<video_id>.+?))"', webpage)
-        if not mobj:
-            raise ExtractorError('Can\'t extract embed url and video id')
-        playerdata_url = mobj.group('embed_url')
-        video_id = mobj.group('video_id')
-
-        video_title = self._html_search_regex(
-            r'<title>(?P<title>.+?)\|', webpage, 'title')
-        video_description = self._html_search_regex(
-            r'<div class="entry-content">(?P<description>.+?)</div>',
-            webpage, 'description', flags=re.DOTALL, fatal=False)
-
-        playerdata = self._download_webpage(playerdata_url, video_id, 'Downloading player webpage')
-        video_thumbnail = self._search_regex(
-            r'image: \'(?P<thumbnail>[^\']+)\'', playerdata, 'thumbnail', fatal=False)
-        sd_url = self._search_regex(r'file: \'([^\']+)\', label: \'SD\'', playerdata, 'sd_file')
-        videolist_url = self._search_regex(r'file: \'([^\']+\.smil)\'}', playerdata, 'videolist_url')
-
-        videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
-
-        formats = []
-        baseurl = sd_url[:sd_url.rfind('/')+1]
-        for video in videolist.findall('.//video'):
-            src = video.get('src')
-            if not src:
-                continue
-            file_ = src.partition(':')[-1]
-            width = int_or_none(video.get('width'))
-            height = int_or_none(video.get('height'))
-            bitrate = int_or_none(video.get('system-bitrate'))
-            format = {
-                'url': baseurl + file_,
-                'format_id': src.rpartition('.')[0].rpartition('_')[-1],
-            }
-            if width or height:
-                format.update({
-                    'tbr': bitrate // 1000 if bitrate else None,
-                    'width': width,
-                    'height': height,
-                })
-            else:
-                format.update({
-                    'abr': bitrate // 1000 if bitrate else None,
-                    'vcodec': 'none',
-                })
-            formats.append(format)
-        self._sort_formats(formats)
-
-        return {
-            'id': video_id,
-            'title': video_title,
-            'formats': formats,
-            'description': video_description,
-            'upload_date': video_date,
-            'thumbnail': video_thumbnail,
-        }
index 669919a2cc9039ffb91ae052b96d5531665341e0..a5c3cb7c6253776062fb5834d0fe4c121dfb9c99 100644 (file)
@@ -24,7 +24,7 @@ class ClipfishIE(InfoExtractor):
             'title': 'FIFA 14 - E3 2013 Trailer',
             'duration': 82,
         },
-        u'skip': 'Blocked in the US'
+        'skip': 'Blocked in the US'
     }
 
     def _real_extract(self, url):
@@ -34,7 +34,7 @@ class ClipfishIE(InfoExtractor):
         info_url = ('http://www.clipfish.de/devxml/videoinfo/%s?ts=%d' %
                     (video_id, int(time.time())))
         doc = self._download_xml(
-            info_url, video_id, note=u'Downloading info page')
+            info_url, video_id, note='Downloading info page')
         title = doc.find('title').text
         video_url = doc.find('filename').text
         if video_url is None:
index d4227e6ebb51244018d24da87927c54061058dc8..2edab90a33d553225b8c790b8d391f0e40b55cf8 100644 (file)
@@ -4,7 +4,6 @@ import json
 import re
 
 from .common import InfoExtractor
-from ..utils import int_or_none
 
 
 _translation_table = {
@@ -39,9 +38,7 @@ class CliphunterIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         video_title = self._search_regex(
index 02a1667fa3fbf7cbe1a822db7b82f9c087864249..d07d544eaf7742bb782a8b367a09561f14c44a4e 100644 (file)
@@ -39,6 +39,7 @@ class ClipsyndicateIE(InfoExtractor):
             transform_source=fix_xml_ampersands)
 
         track_doc = pdoc.find('trackList/track')
+
         def find_param(name):
             node = find_xpath_attr(track_doc, './/param', 'name', name)
             if node is not None:
index 386f080d241d19b30b131d2700a25ddf8de0ecc8..abf8cc280b3d6f1aeefe8219a1fd0ea5d1224be1 100644 (file)
@@ -4,14 +4,16 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
+from ..compat import (
     compat_parse_qs,
     compat_urllib_parse,
-    remove_end,
-    HEADRequest,
     compat_HTTPError,
 )
+from ..utils import (
+    ExtractorError,
+    HEADRequest,
+    remove_end,
+)
 
 
 class CloudyIE(InfoExtractor):
index 710d5009b71aafe0da901771048b8c0ba68def04..3145b30514ea2a075f92077b9f87b64c9e8820a7 100644 (file)
@@ -2,12 +2,10 @@
 from __future__ import unicode_literals
 
 import json
-import re
 
 from .common import InfoExtractor
 from ..utils import (
     ExtractorError,
-    int_or_none,
 )
 
 
@@ -15,23 +13,24 @@ class CNETIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?cnet\.com/videos/(?P<id>[^/]+)/'
     _TEST = {
         'url': 'http://www.cnet.com/videos/hands-on-with-microsofts-windows-8-1-update/',
-        'md5': '041233212a0d06b179c87cbcca1577b8',
         'info_dict': {
             'id': '56f4ea68-bd21-4852-b08c-4de5b8354c60',
-            'ext': 'mp4',
+            'ext': 'flv',
             'title': 'Hands-on with Microsoft Windows 8.1 Update',
             'description': 'The new update to the Windows 8 OS brings improved performance for mouse and keyboard users.',
             'thumbnail': 're:^http://.*/flmswindows8.jpg$',
-            'uploader_id': 'sarah.mitroff@cbsinteractive.com',
+            'uploader_id': '6085384d-619e-11e3-b231-14feb5ca9861',
             'uploader': 'Sarah Mitroff',
+        },
+        'params': {
+            'skip_download': 'requires rtmpdump',
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        display_id = mobj.group('id')
-
+        display_id = self._match_id(url)
         webpage = self._download_webpage(url, display_id)
+
         data_json = self._html_search_regex(
             r"<div class=\"cnetVideoPlayer\"\s+.*?data-cnet-video-options='([^']+)'",
             webpage, 'data json')
@@ -42,37 +41,31 @@ class CNETIE(InfoExtractor):
         if not vdata:
             raise ExtractorError('Cannot find video data')
 
+        mpx_account = data['config']['players']['default']['mpx_account']
+        vid = vdata['files']['rtmp']
+        tp_link = 'http://link.theplatform.com/s/%s/%s' % (mpx_account, vid)
+
         video_id = vdata['id']
         title = vdata.get('headline')
         if title is None:
             title = vdata.get('title')
         if title is None:
             raise ExtractorError('Cannot find title!')
-        description = vdata.get('dek')
         thumbnail = vdata.get('image', {}).get('path')
         author = vdata.get('author')
         if author:
             uploader = '%s %s' % (author['firstName'], author['lastName'])
-            uploader_id = author.get('email')
+            uploader_id = author.get('id')
         else:
             uploader = None
             uploader_id = None
 
-        formats = [{
-            'format_id': '%s-%s-%s' % (
-                f['type'], f['format'],
-                int_or_none(f.get('bitrate'), 1000, default='')),
-            'url': f['uri'],
-            'tbr': int_or_none(f.get('bitrate'), 1000),
-        } for f in vdata['files']['data']]
-        self._sort_formats(formats)
-
         return {
+            '_type': 'url_transparent',
+            'url': tp_link,
             'id': video_id,
             'display_id': display_id,
             'title': title,
-            'formats': formats,
-            'description': description,
             'uploader': uploader,
             'uploader_id': uploader_id,
             'thumbnail': thumbnail,
index dae40c136bae20fd54cae401e711b9233c750e14..81142ee419d45b9df9f75bdc152ab87e1317650f 100644 (file)
@@ -12,24 +12,25 @@ from ..utils import (
 
 class CNNIE(InfoExtractor):
     _VALID_URL = r'''(?x)https?://((edition|www)\.)?cnn\.com/video/(data/.+?|\?)/
-        (?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn|(?=&)))'''
+        (?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn(-ap)?|(?=&)))'''
 
     _TESTS = [{
         'url': 'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
-        'file': 'sports_2013_06_09_nadal-1-on-1.cnn.mp4',
         'md5': '3e6121ea48df7e2259fe73a0628605c4',
         'info_dict': {
+            'id': 'sports_2013_06_09_nadal-1-on-1.cnn',
+            'ext': 'mp4',
             'title': 'Nadal wins 8th French Open title',
             'description': 'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.',
             'duration': 135,
             'upload_date': '20130609',
         },
-    },
-    {
+    }, {
         "url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
-        "file": "us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4",
         "md5": "b5cc60c60a3477d185af8f19a2a26f4e",
         "info_dict": {
+            'id': 'us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology',
+            'ext': 'mp4',
             "title": "Student's epic speech stuns new freshmen",
             "description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
             "upload_date": "20130821",
index 6f866e7fcee7f401362b24d69db2285e22cfa6a4..002b240378299bf18000b2ee92c76ae062115126 100644 (file)
@@ -10,47 +10,46 @@ from ..utils import int_or_none
 class CollegeHumorIE(InfoExtractor):
     _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P<videoid>[0-9]+)/?(?P<shorttitle>.*)$'
 
-    _TESTS = [{
-        'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
-        'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd',
-        'info_dict': {
-            'id': '6902724',
-            'ext': 'mp4',
-            'title': 'Comic-Con Cosplay Catastrophe',
-            'description': "Fans get creative this year at San Diego.  Too creative.  And yes, that's really Joss Whedon.",
-            'age_limit': 13,
-            'duration': 187,
+    _TESTS = [
+        {
+            'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
+            'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd',
+            'info_dict': {
+                'id': '6902724',
+                'ext': 'mp4',
+                'title': 'Comic-Con Cosplay Catastrophe',
+                'description': "Fans get creative this year at San Diego.  Too creative.  And yes, that's really Joss Whedon.",
+                'age_limit': 13,
+                'duration': 187,
+            },
+        }, {
+            'url': 'http://www.collegehumor.com/video/3505939/font-conference',
+            'md5': '72fa701d8ef38664a4dbb9e2ab721816',
+            'info_dict': {
+                'id': '3505939',
+                'ext': 'mp4',
+                'title': 'Font Conference',
+                'description': "This video wasn't long enough, so we made it double-spaced.",
+                'age_limit': 10,
+                'duration': 179,
+            },
+        }, {
+            # embedded youtube video
+            'url': 'http://www.collegehumor.com/embed/6950306',
+            'info_dict': {
+                'id': 'Z-bao9fg6Yc',
+                'ext': 'mp4',
+                'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!',
+                'uploader': 'Mark Dice',
+                'uploader_id': 'MarkDice',
+                'description': 'md5:62c3dab9351fac7bb44b53b69511d87f',
+                'upload_date': '20140127',
+            },
+            'params': {
+                'skip_download': True,
+            },
+            'add_ie': ['Youtube'],
         },
-    },
-    {
-        'url': 'http://www.collegehumor.com/video/3505939/font-conference',
-        'md5': '72fa701d8ef38664a4dbb9e2ab721816',
-        'info_dict': {
-            'id': '3505939',
-            'ext': 'mp4',
-            'title': 'Font Conference',
-            'description': "This video wasn't long enough, so we made it double-spaced.",
-            'age_limit': 10,
-            'duration': 179,
-        },
-    },
-    # embedded youtube video
-    {
-        'url': 'http://www.collegehumor.com/embed/6950306',
-        'info_dict': {
-            'id': 'Z-bao9fg6Yc',
-            'ext': 'mp4',
-            'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!',
-            'uploader': 'Mark Dice',
-            'uploader_id': 'MarkDice',
-            'description': 'md5:62c3dab9351fac7bb44b53b69511d87f',
-            'upload_date': '20140127',
-        },
-        'params': {
-            'skip_download': True,
-        },
-        'add_ie': ['Youtube'],
-    },
     ]
 
     def _real_extract(self, url):
diff --git a/youtube_dl/extractor/comcarcoff.py b/youtube_dl/extractor/comcarcoff.py
new file mode 100644 (file)
index 0000000..a2211ee
--- /dev/null
@@ -0,0 +1,56 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import json
+
+from .common import InfoExtractor
+from ..utils import parse_iso8601
+
+
+class ComCarCoffIE(InfoExtractor):
+    _VALID_URL = r'http://(?:www\.)?comediansincarsgettingcoffee\.com/(?P<id>[a-z0-9\-]*)'
+    _TESTS = [{
+        'url': 'http://comediansincarsgettingcoffee.com/miranda-sings-happy-thanksgiving-miranda/',
+        'info_dict': {
+            'id': 'miranda-sings-happy-thanksgiving-miranda',
+            'ext': 'mp4',
+            'upload_date': '20141127',
+            'timestamp': 1417107600,
+            'title': 'Happy Thanksgiving Miranda',
+            'description': 'Jerry Seinfeld and his special guest Miranda Sings cruise around town in search of coffee, complaining and apologizing along the way.',
+            'thumbnail': 'http://ccc.crackle.com/images/s5e4_thumb.jpg',
+        },
+        'params': {
+            'skip_download': 'requires ffmpeg',
+        }
+    }]
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url)
+        if not display_id:
+            display_id = 'comediansincarsgettingcoffee.com'
+        webpage = self._download_webpage(url, display_id)
+
+        full_data = json.loads(self._search_regex(
+            r'<script type="application/json" id="videoData">(?P<json>.+?)</script>',
+            webpage, 'full data json'))
+
+        video_id = full_data['activeVideo']['video']
+        video_data = full_data['videos'][video_id]
+        thumbnails = [{
+            'url': video_data['images']['thumb'],
+        }, {
+            'url': video_data['images']['poster'],
+        }]
+        formats = self._extract_m3u8_formats(
+            video_data['mediaUrl'], video_id, ext='mp4')
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'title': video_data['title'],
+            'description': video_data.get('description'),
+            'timestamp': parse_iso8601(video_data.get('pubDate')),
+            'thumbnails': thumbnails,
+            'formats': formats,
+        }
index 035046120152f264278b4edc4bd5b11e0183da98..48e2410b621f64896ebe769d36cce722260db532 100644 (file)
@@ -2,11 +2,12 @@ from __future__ import unicode_literals
 
 import re
 
-from .common import InfoExtractor
 from .mtv import MTVServicesInfoExtractor
-from ..utils import (
+from ..compat import (
     compat_str,
     compat_urllib_parse,
+)
+from ..utils import (
     ExtractorError,
     float_or_none,
     unified_strdate,
@@ -31,7 +32,7 @@ class ComedyCentralIE(MTVServicesInfoExtractor):
     }
 
 
-class ComedyCentralShowsIE(InfoExtractor):
+class ComedyCentralShowsIE(MTVServicesInfoExtractor):
     IE_DESC = 'The Daily Show / The Colbert Report'
     # urls can be abbreviations like :thedailyshow or :colbert
     # urls for episodes like:
@@ -109,18 +110,8 @@ class ComedyCentralShowsIE(InfoExtractor):
         '400': (384, 216),
     }
 
-    @staticmethod
-    def _transform_rtmp_url(rtmp_video_url):
-        m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\.comedystor/.*)$', rtmp_video_url)
-        if not m:
-            raise ExtractorError('Cannot transform RTMP url')
-        base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
-        return base + m.group('finalid')
-
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url, re.VERBOSE)
-        if mobj is None:
-            raise ExtractorError('Invalid URL: %s' % url)
+        mobj = re.match(self._VALID_URL, url)
 
         if mobj.group('shortname'):
             if mobj.group('shortname') in ('tds', 'thedailyshow'):
@@ -212,9 +203,6 @@ class ComedyCentralShowsIE(InfoExtractor):
                     'ext': self._video_extensions.get(format, 'mp4'),
                     'height': h,
                     'width': w,
-
-                    'format_note': 'HTTP 400 at the moment (patches welcome!)',
-                    'preference': -100,
                 })
                 formats.append({
                     'format_id': 'rtmp-%s' % format,
index 450c7dfd69d0000c810f18ef35741aae05221c40..d302fe45fdea0bc7556fdbda4f321d64d86c2c7c 100644 (file)
@@ -12,13 +12,15 @@ import sys
 import time
 import xml.etree.ElementTree
 
-from ..utils import (
+from ..compat import (
+    compat_cookiejar,
     compat_http_client,
     compat_urllib_error,
     compat_urllib_parse_urlparse,
     compat_urlparse,
     compat_str,
-
+)
+from ..utils import (
     clean_html,
     compiled_regex_type,
     ExtractorError,
@@ -42,7 +44,11 @@ class InfoExtractor(object):
     information possibly downloading the video to the file system, among
     other possible outcomes.
 
-    The dictionaries must include the following fields:
+    The type field determines the the type of the result.
+    By far the most common value (and the default if _type is missing) is
+    "video", which indicates a single video.
+
+    For a video, the dictionaries must include the following fields:
 
     id:             Video identifier.
     title:          Video title, unescaped.
@@ -72,6 +78,7 @@ class InfoExtractor(object):
                     * acodec     Name of the audio codec in use
                     * asr        Audio sampling rate in Hertz
                     * vbr        Average video bitrate in KBit/s
+                    * fps        Frame rate
                     * vcodec     Name of the video codec in use
                     * container  Name of the container format
                     * filesize   The number of bytes, if known in advance
@@ -85,10 +92,19 @@ class InfoExtractor(object):
                                  by this field, regardless of all other values.
                                  -1 for default (order by other properties),
                                  -2 or smaller for less than default.
+                    * language_preference  Is this in the correct requested
+                                 language?
+                                 10 if it's what the URL is about,
+                                 -1 for default (don't know),
+                                 -10 otherwise, other values reserved for now.
                     * quality    Order number of the video quality of this
                                  format, irrespective of the file format.
                                  -1 for default (order by other properties),
                                  -2 or smaller for less than default.
+                    * source_preference  Order number for this video source
+                                  (quality takes higher priority)
+                                 -1 for default (order by other properties),
+                                 -2 or smaller for less than default.
                     * http_referer  HTTP Referer header value to set.
                     * http_method  HTTP method to use for the download.
                     * http_headers  A dictionary of additional HTTP headers
@@ -102,6 +118,7 @@ class InfoExtractor(object):
 
     The following fields are optional:
 
+    alt_title:      A secondary title of the video.
     display_id      An alternative identifier for the video, not necessarily
                     unique, but available before title. Typically, id is
                     something like "4234987", title "Dancing naked mole rats",
@@ -113,7 +130,7 @@ class InfoExtractor(object):
                         * "resolution" (optional, string "{width}x{height"},
                                         deprecated)
     thumbnail:      Full URL to a video thumbnail image.
-    description:    One-line video description.
+    description:    Full video description.
     uploader:       Full name of the video uploader.
     timestamp:      UNIX timestamp of the moment the video became available.
     upload_date:    Video upload date (YYYYMMDD).
@@ -140,6 +157,39 @@ class InfoExtractor(object):
 
     Unless mentioned otherwise, None is equivalent to absence of information.
 
+
+    _type "playlist" indicates multiple videos.
+    There must be a key "entries", which is a list, an iterable, or a PagedList
+    object, each element of which is a valid dictionary by this specification.
+
+    Additionally, playlists can have "title" and "id" attributes with the same
+    semantics as videos (see above).
+
+
+    _type "multi_video" indicates that there are multiple videos that
+    form a single show, for examples multiple acts of an opera or TV episode.
+    It must have an entries key like a playlist and contain all the keys
+    required for a video at the same time.
+
+
+    _type "url" indicates that the video must be extracted from another
+    location, possibly by a different extractor. Its only required key is:
+    "url" - the next URL to extract.
+    The key "ie_key" can be set to the class name (minus the trailing "IE",
+    e.g. "Youtube") if the extractor class is known in advance.
+    Additionally, the dictionary may have any properties of the resolved entity
+    known in advance, for example "title" if the title of the referred video is
+    known ahead of time.
+
+
+    _type "url_transparent" entities have the same specification as "url", but
+    indicate that the given additional information is more precise than the one
+    associated with the resolved URL.
+    This is useful when a site employs a video service that hosts the video and
+    its technical metadata, but that video service does not embed a useful
+    title, description etc.
+
+
     Subclasses of this one should re-define the _real_initialize() and
     _real_extract() methods and define a _VALID_URL regexp.
     Probably, they should also be added to the list of extractors.
@@ -238,7 +288,6 @@ class InfoExtractor(object):
 
     def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
         """ Returns a tuple (page content as string, URL handle) """
-
         # Strip hashes from the URL (#1038)
         if isinstance(url_or_request, (compat_str, str)):
             url_or_request = url_or_request.partition('#')[0]
@@ -247,8 +296,14 @@ class InfoExtractor(object):
         if urlh is False:
             assert not fatal
             return False
+        content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal)
+        return (content, urlh)
+
+    def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None):
         content_type = urlh.headers.get('Content-Type', '')
         webpage_bytes = urlh.read()
+        if prefix is not None:
+            webpage_bytes = prefix + webpage_bytes
         m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
         if m:
             encoding = m.group(1)
@@ -281,6 +336,12 @@ class InfoExtractor(object):
             raw_filename = basen + '.dump'
             filename = sanitize_filename(raw_filename, restricted=True)
             self.to_screen('Saving request to ' + filename)
+            # Working around MAX_PATH limitation on Windows (see
+            # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
+            if os.name == 'nt':
+                absfilepath = os.path.abspath(filename)
+                if len(absfilepath) > 259:
+                    filename = '\\\\?\\' + absfilepath
             with open(filename, 'wb') as outf:
                 outf.write(webpage_bytes)
 
@@ -299,7 +360,7 @@ class InfoExtractor(object):
                 msg += ' Visit %s for more details' % blocked_iframe
             raise ExtractorError(msg, expected=True)
 
-        return (content, urlh)
+        return content
 
     def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
         """ Returns the data of the page as a string """
@@ -331,6 +392,10 @@ class InfoExtractor(object):
             url_or_request, video_id, note, errnote, fatal=fatal)
         if (not fatal) and json_string is False:
             return None
+        return self._parse_json(
+            json_string, video_id, transform_source=transform_source, fatal=fatal)
+
+    def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
         if transform_source:
             json_string = transform_source(json_string)
         try:
@@ -367,19 +432,20 @@ class InfoExtractor(object):
         """Report attempt to log in."""
         self.to_screen('Logging in')
 
-    #Methods for following #608
+    # Methods for following #608
     @staticmethod
     def url_result(url, ie=None, video_id=None):
         """Returns a url that points to a page that should be processed"""
-        #TODO: ie should be the class used for getting the info
+        # TODO: ie should be the class used for getting the info
         video_info = {'_type': 'url',
                       'url': url,
                       'ie_key': ie}
         if video_id is not None:
             video_info['id'] = video_id
         return video_info
+
     @staticmethod
-    def playlist_result(entries, playlist_id=None, playlist_title=None):
+    def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
         """Returns a playlist"""
         video_info = {'_type': 'playlist',
                       'entries': entries}
@@ -387,9 +453,11 @@ class InfoExtractor(object):
             video_info['id'] = playlist_id
         if playlist_title:
             video_info['title'] = playlist_title
+        if playlist_description:
+            video_info['description'] = playlist_description
         return video_info
 
-    def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
+    def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
         """
         Perform a regex search on the given string, using a single or a list of
         patterns returning the first matching group.
@@ -410,22 +478,25 @@ class InfoExtractor(object):
             _name = name
 
         if mobj:
-            # return the first matching group
-            return next(g for g in mobj.groups() if g is not None)
+            if group is None:
+                # return the first matching group
+                return next(g for g in mobj.groups() if g is not None)
+            else:
+                return mobj.group(group)
         elif default is not _NO_DEFAULT:
             return default
         elif fatal:
             raise RegexNotFoundError('Unable to extract %s' % _name)
         else:
             self._downloader.report_warning('unable to extract %s; '
-                'please report this issue on http://yt-dl.org/bug' % _name)
+                                            'please report this issue on http://yt-dl.org/bug' % _name)
             return None
 
-    def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
+    def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
         """
         Like _search_regex, but strips HTML tags and unescapes entities.
         """
-        res = self._search_regex(pattern, string, name, default, fatal, flags)
+        res = self._search_regex(pattern, string, name, default, fatal, flags, group)
         if res:
             return clean_html(res).strip()
         else:
@@ -458,7 +529,7 @@ class InfoExtractor(object):
                     raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
             except (IOError, netrc.NetrcParseError) as err:
                 self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
-        
+
         return (username, password)
 
     def _get_tfa_info(self):
@@ -519,9 +590,9 @@ class InfoExtractor(object):
             display_name = name
         return self._html_search_regex(
             r'''(?ix)<meta
-                    (?=[^>]+(?:itemprop|name|property)=["\']?%s["\']?)
-                    [^>]+content=["\']([^"\']+)["\']''' % re.escape(name),
-            html, display_name, fatal=fatal, **kwargs)
+                    (?=[^>]+(?:itemprop|name|property)=(["\']?)%s\1)
+                    [^>]+content=(["\'])(?P<content>.*?)\1''' % re.escape(name),
+            html, display_name, fatal=fatal, group='content', **kwargs)
 
     def _dc_search_uploader(self, html):
         return self._html_search_meta('dc.creator', html, 'uploader')
@@ -552,7 +623,7 @@ class InfoExtractor(object):
 
     def _twitter_search_player(self, html):
         return self._html_search_meta('twitter:player', html,
-            'twitter card player')
+                                      'twitter card player')
 
     def _sort_formats(self, formats):
         if not formats:
@@ -597,6 +668,7 @@ class InfoExtractor(object):
 
             return (
                 preference,
+                f.get('language_preference') if f.get('language_preference') is not None else -1,
                 f.get('quality') if f.get('quality') is not None else -1,
                 f.get('height') if f.get('height') is not None else -1,
                 f.get('width') if f.get('width') is not None else -1,
@@ -605,14 +677,16 @@ class InfoExtractor(object):
                 f.get('vbr') if f.get('vbr') is not None else -1,
                 f.get('abr') if f.get('abr') is not None else -1,
                 audio_ext_preference,
+                f.get('fps') if f.get('fps') is not None else -1,
                 f.get('filesize') if f.get('filesize') is not None else -1,
                 f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
+                f.get('source_preference') if f.get('source_preference') is not None else -1,
                 f.get('format_id'),
             )
         formats.sort(key=_formats_key)
 
     def http_scheme(self):
-        """ Either "https:" or "https:", depending on the user's preferences """
+        """ Either "http:" or "https:", depending on the user's preferences """
         return (
             'http:'
             if self._downloader.params.get('prefer_insecure', False)
@@ -675,7 +749,10 @@ class InfoExtractor(object):
             if re.match(r'^https?://', u)
             else compat_urlparse.urljoin(m3u8_url, u))
 
-        m3u8_doc = self._download_webpage(m3u8_url, video_id)
+        m3u8_doc = self._download_webpage(
+            m3u8_url, video_id,
+            note='Downloading m3u8 information',
+            errnote='Failed to download m3u8 information')
         last_info = None
         kv_rex = re.compile(
             r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
@@ -721,6 +798,49 @@ class InfoExtractor(object):
         self._sort_formats(formats)
         return formats
 
+    # TODO: improve extraction
+    def _extract_smil_formats(self, smil_url, video_id):
+        smil = self._download_xml(
+            smil_url, video_id, 'Downloading SMIL file',
+            'Unable to download SMIL file')
+
+        base = smil.find('./head/meta').get('base')
+
+        formats = []
+        rtmp_count = 0
+        for video in smil.findall('./body/switch/video'):
+            src = video.get('src')
+            if not src:
+                continue
+            bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
+            width = int_or_none(video.get('width'))
+            height = int_or_none(video.get('height'))
+            proto = video.get('proto')
+            if not proto:
+                if base:
+                    if base.startswith('rtmp'):
+                        proto = 'rtmp'
+                    elif base.startswith('http'):
+                        proto = 'http'
+            ext = video.get('ext')
+            if proto == 'm3u8':
+                formats.extend(self._extract_m3u8_formats(src, video_id, ext))
+            elif proto == 'rtmp':
+                rtmp_count += 1
+                streamer = video.get('streamer') or base
+                formats.append({
+                    'url': streamer,
+                    'play_path': src,
+                    'ext': 'flv',
+                    'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
+                    'tbr': bitrate,
+                    'width': width,
+                    'height': height,
+                })
+        self._sort_formats(formats)
+
+        return formats
+
     def _live_title(self, name):
         """ Generate the title for a live video """
         now = datetime.datetime.now()
@@ -749,6 +869,12 @@ class InfoExtractor(object):
                 self._downloader.report_warning(msg)
         return res
 
+    def _set_cookie(self, domain, name, value, expire_time=None):
+        cookie = compat_cookiejar.Cookie(
+            0, name, value, None, None, domain, None,
+            None, '/', True, False, expire_time, '', None, None, None)
+        self._downloader.cookiejar.set_cookie(cookie)
+
 
 class SearchInfoExtractor(InfoExtractor):
     """
index ffbe4903b807faf0442057ebbee27bc9ed838c12..3db4db4e4db816ae532060bc2386cd91a9c71a92 100644 (file)
@@ -5,12 +5,14 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
-    orderedSet,
     compat_urllib_parse_urlparse,
     compat_urlparse,
 )
+from ..utils import (
+    orderedSet,
+)
 
 
 class CondeNastIE(InfoExtractor):
@@ -34,6 +36,8 @@ class CondeNastIE(InfoExtractor):
     _VALID_URL = r'http://(video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys())
     IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
 
+    EMBED_URL = r'(?:https?:)?//player\.(?P<site>%s)\.com/(?P<type>embed)/.+?' % '|'.join(_SITES.keys())
+
     _TEST = {
         'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
         'md5': '1921f713ed48aabd715691f774c451f7',
index 74b880ffce9b0a8dbf7ab273063253a2743dbbdb..cf763ee7e03019adc5f957060b0f45e52e532084 100644 (file)
@@ -54,7 +54,7 @@ class CrackedIE(InfoExtractor):
 
         return {
             'id': video_id,
-            'url':video_url,
+            'url': video_url,
             'title': title,
             'description': description,
             'timestamp': timestamp,
@@ -62,4 +62,4 @@ class CrackedIE(InfoExtractor):
             'comment_count': comment_count,
             'height': height,
             'width': width,
-        }
\ No newline at end of file
+        }
index f99888ecc378ea2a5404fe42d8d32a6a8c4093fb..8f1ea02e74466ac4999356267eebedffe2f62daa 100644 (file)
@@ -10,20 +10,22 @@ import xml.etree.ElementTree
 from hashlib import sha1
 from math import pow, sqrt, floor
 from .subtitles import SubtitlesInfoExtractor
-from ..utils import (
-    ExtractorError,
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
+)
+from ..utils import (
+    ExtractorError,
     bytes_to_intlist,
     intlist_to_bytes,
     unified_strdate,
-    clean_html,
     urlencode_postdata,
 )
 from ..aes import (
     aes_cbc_decrypt,
     inc,
 )
+from .common import InfoExtractor
 
 
 class CrunchyrollIE(SubtitlesInfoExtractor):
@@ -39,6 +41,7 @@ class CrunchyrollIE(SubtitlesInfoExtractor):
             'thumbnail': 'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg',
             'uploader': 'Yomiuri Telecasting Corporation (YTV)',
             'upload_date': '20131013',
+            'url': 're:(?!.*&amp)',
         },
         'params': {
             # rtmp
@@ -68,11 +71,9 @@ class CrunchyrollIE(SubtitlesInfoExtractor):
         login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         self._download_webpage(login_request, None, False, 'Wrong login info')
 
-
     def _real_initialize(self):
         self._login()
 
-
     def _decrypt_subtitles(self, data, iv, id):
         data = bytes_to_intlist(data)
         iv = bytes_to_intlist(iv)
@@ -98,8 +99,10 @@ class CrunchyrollIE(SubtitlesInfoExtractor):
             return shaHash + [0] * 12
 
         key = obfuscate_key(id)
+
         class Counter:
             __value = iv
+
             def next_value(self):
                 temp = self.__value
                 self.__value = inc(self.__value)
@@ -107,19 +110,17 @@ class CrunchyrollIE(SubtitlesInfoExtractor):
         decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv))
         return zlib.decompress(decrypted_data)
 
-    def _convert_subtitles_to_srt(self, subtitles):
+    def _convert_subtitles_to_srt(self, sub_root):
         output = ''
-        for i, (start, end, text) in enumerate(re.findall(r'<event [^>]*?start="([^"]+)" [^>]*?end="([^"]+)" [^>]*?text="([^"]+)"[^>]*?>', subtitles), 1):
-            start = start.replace('.', ',')
-            end = end.replace('.', ',')
-            text = clean_html(text)
-            text = text.replace('\\N', '\n')
-            if not text:
-                continue
+
+        for i, event in enumerate(sub_root.findall('./events/event'), 1):
+            start = event.attrib['start'].replace('.', ',')
+            end = event.attrib['end'].replace('.', ',')
+            text = event.attrib['text'].replace('\\N', '\n')
             output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
         return output
 
-    def _convert_subtitles_to_ass(self, subtitles):
+    def _convert_subtitles_to_ass(self, sub_root):
         output = ''
 
         def ass_bool(strvalue):
@@ -128,10 +129,6 @@ class CrunchyrollIE(SubtitlesInfoExtractor):
                 assvalue = '-1'
             return assvalue
 
-        sub_root = xml.etree.ElementTree.fromstring(subtitles)
-        if not sub_root:
-            return output
-
         output = '[Script Info]\n'
         output += 'Title: %s\n' % sub_root.attrib["title"]
         output += 'ScriptType: v4.00+\n'
@@ -188,7 +185,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
 
         return output
 
-    def _real_extract(self,url):
+    def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('video_id')
 
@@ -231,18 +228,20 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
         formats = []
         for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage):
             stream_quality, stream_format = self._FORMAT_IDS[fmt]
-            video_format = fmt+'p'
+            video_format = fmt + 'p'
             streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/')
             # urlencode doesn't work!
-            streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+'&media%5Fid='+stream_id+'&video%5Fformat='+stream_format
+            streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' + stream_quality + '&media%5Fid=' + stream_id + '&video%5Fformat=' + stream_format
             streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
             streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
-            streamdata = self._download_webpage(streamdata_req, video_id, note='Downloading media info for '+video_format)
-            video_url = self._search_regex(r'<host>([^<]+)', streamdata, 'video_url')
-            video_play_path = self._search_regex(r'<file>([^<]+)', streamdata, 'video_play_path')
+            streamdata = self._download_xml(
+                streamdata_req, video_id,
+                note='Downloading media info for %s' % video_format)
+            video_url = streamdata.find('.//host').text
+            video_play_path = streamdata.find('.//file').text
             formats.append({
                 'url': video_url,
-                'play_path':   video_play_path,
+                'play_path': video_play_path,
                 'ext': 'flv',
                 'format': video_format,
                 'format_id': video_format,
@@ -251,8 +250,9 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
         subtitles = {}
         sub_format = self._downloader.params.get('subtitlesformat', 'srt')
         for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
-            sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\
-                                              video_id, note='Downloading subtitles for '+sub_name)
+            sub_page = self._download_webpage(
+                'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,
+                video_id, note='Downloading subtitles for ' + sub_name)
             id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
             iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
             data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
@@ -266,22 +266,60 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
             lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
             if not lang_code:
                 continue
+            sub_root = xml.etree.ElementTree.fromstring(subtitle)
             if sub_format == 'ass':
-                subtitles[lang_code] = self._convert_subtitles_to_ass(subtitle)
+                subtitles[lang_code] = self._convert_subtitles_to_ass(sub_root)
             else:
-                subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
+                subtitles[lang_code] = self._convert_subtitles_to_srt(sub_root)
 
         if self._downloader.params.get('listsubtitles', False):
             self._list_available_subtitles(video_id, subtitles)
             return
 
         return {
-            'id':          video_id,
-            'title':       video_title,
+            'id': video_id,
+            'title': video_title,
             'description': video_description,
-            'thumbnail':   video_thumbnail,
-            'uploader':    video_uploader,
+            'thumbnail': video_thumbnail,
+            'uploader': video_uploader,
             'upload_date': video_upload_date,
-            'subtitles':   subtitles,
-            'formats':     formats,
+            'subtitles': subtitles,
+            'formats': formats,
+        }
+
+
+class CrunchyrollShowPlaylistIE(InfoExtractor):
+    IE_NAME = "crunchyroll:playlist"
+    _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?$'
+
+    _TESTS = [{
+        'url': 'http://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
+        'info_dict': {
+            'id': 'a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
+            'title': 'A Bridge to the Starry Skies - Hoshizora e Kakaru Hashi'
+        },
+        'playlist_count': 13,
+    }]
+
+    def _real_extract(self, url):
+        show_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, show_id)
+        title = self._html_search_regex(
+            r'(?s)<h1[^>]*>\s*<span itemprop="name">(.*?)</span>',
+            webpage, 'title')
+        episode_paths = re.findall(
+            r'(?s)<li id="showview_videos_media_[0-9]+"[^>]+>.*?<a href="([^"]+)"',
+            webpage)
+        entries = [
+            self.url_result('http://www.crunchyroll.com' + ep, 'Crunchyroll')
+            for ep in episode_paths
+        ]
+        entries.reverse()
+
+        return {
+            '_type': 'playlist',
+            'id': show_id,
+            'title': title,
+            'entries': entries,
         }
diff --git a/youtube_dl/extractor/d8.py b/youtube_dl/extractor/d8.py
deleted file mode 100644 (file)
index 6b26ff2..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-# encoding: utf-8
-from __future__ import unicode_literals
-
-from .canalplus import CanalplusIE
-
-
-class D8IE(CanalplusIE):
-    _VALID_URL = r'https?://www\.d8\.tv/.*?/(?P<path>.*)'
-    _VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/d8/%s'
-    IE_NAME = 'd8.tv'
-
-    _TEST = {
-        'url': 'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
-        'file': '966289.flv',
-        'info_dict': {
-            'title': 'Campagne intime - Documentaire exceptionnel',
-            'description': 'md5:d2643b799fb190846ae09c61e59a859f',
-            'upload_date': '20131108',
-        },
-        'params': {
-            # rtmp
-            'skip_download': True,
-        },
-        'skip': 'videos get deleted after a while',
-    }
index dbcf5d6a72a5a8f44e988b506e2361e6279229ec..cf5841a7c6e92e115d7f685d8f7ce337a51cb92a 100644 (file)
@@ -1,4 +1,4 @@
-#coding: utf-8
+# coding: utf-8
 from __future__ import unicode_literals
 
 import re
@@ -8,16 +8,19 @@ import itertools
 from .common import InfoExtractor
 from .subtitles import SubtitlesInfoExtractor
 
-from ..utils import (
-    compat_urllib_request,
+from ..compat import (
     compat_str,
+    compat_urllib_request,
+)
+from ..utils import (
+    ExtractorError,
+    int_or_none,
     orderedSet,
     str_to_int,
-    int_or_none,
-    ExtractorError,
     unescapeHTML,
 )
 
+
 class DailymotionBaseInfoExtractor(InfoExtractor):
     @staticmethod
     def _build_request(url):
@@ -27,6 +30,7 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
         request.add_header('Cookie', 'ff=off')
         return request
 
+
 class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
     """Information Extractor for Dailymotion"""
 
@@ -94,7 +98,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
 
         # It may just embed a vevo video:
         m_vevo = re.search(
-            r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?videoId=(?P<id>[\w]*)',
+            r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)',
             webpage)
         if m_vevo is not None:
             vevo_id = m_vevo.group('id')
@@ -112,7 +116,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
         embed_page = self._download_webpage(embed_url, video_id,
                                             'Downloading embed page')
         info = self._search_regex(r'var info = ({.*?}),$', embed_page,
-            'video info', flags=re.MULTILINE)
+                                  'video info', flags=re.MULTILINE)
         info = json.loads(info)
         if info.get('error') is not None:
             msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
@@ -206,7 +210,7 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
             if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:
                 break
         return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
-                   for video_id in orderedSet(video_ids)]
+                for video_id in orderedSet(video_ids)]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
index 45d66e2e663fa376cec8f4fc7931e84006ee30b9..c6b813f58ed73e9b08f2584f84288fa821e91688 100644 (file)
@@ -5,7 +5,7 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
 )
 
index c5529f8d455963e7f7ff6ad798f27493a0579410..5e50c63d9aca7d2642239ccf32a5cedd91b05174 100644 (file)
@@ -9,7 +9,7 @@ from .common import InfoExtractor
 class DefenseGouvFrIE(InfoExtractor):
     IE_NAME = 'defense.gouv.fr'
     _VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/'
-        r'ligthboxvideo/base-de-medias/webtv/(.*)')
+                  r'ligthboxvideo/base-de-medias/webtv/(.*)')
 
     _TEST = {
         'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1',
@@ -26,13 +26,13 @@ class DefenseGouvFrIE(InfoExtractor):
         video_id = self._search_regex(
             r"flashvars.pvg_id=\"(\d+)\";",
             webpage, 'ID')
-        
+
         json_url = ('http://static.videos.gouv.fr/brightcovehub/export/json/'
-            + video_id)
+                    + video_id)
         info = self._download_webpage(json_url, title,
-                                                  'Downloading JSON config')
+                                      'Downloading JSON config')
         video_url = json.loads(info)['renditions'][0]['url']
-        
+
         return {'id': video_id,
                 'ext': 'mp4',
                 'url': video_url,
index 554df673506a88cada08b9db8300cd15d301087d..52c2d7ddf99873779b7f3223b0acfe4563e2b5d9 100644 (file)
@@ -16,9 +16,9 @@ class DiscoveryIE(InfoExtractor):
             'ext': 'mp4',
             'title': 'MythBusters: Mission Impossible Outtakes',
             'description': ('Watch Jamie Hyneman and Adam Savage practice being'
-                ' each other -- to the point of confusing Jamie\'s dog -- and '
-                'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s'
-                ' back.'),
+                            ' each other -- to the point of confusing Jamie\'s dog -- and '
+                            'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s'
+                            ' back.'),
             'duration': 156,
         },
     }
@@ -29,7 +29,7 @@ class DiscoveryIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
 
         video_list_json = self._search_regex(r'var videoListJSON = ({.*?});',
-            webpage, 'video list', flags=re.DOTALL)
+                                             webpage, 'video list', flags=re.DOTALL)
         video_list = json.loads(video_list_json)
         info = video_list['clips'][0]
         formats = []
index 5ae0ad5b65cdf12a896a573db78e23494864fd23..638bb33cd81b53fdc2c4bbc31f300a098fb57652 100644 (file)
@@ -27,7 +27,7 @@ class DotsubIE(InfoExtractor):
         video_id = mobj.group('id')
         info_url = "https://dotsub.com/api/media/%s/metadata" % video_id
         info = self._download_json(info_url, video_id)
-        date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds
+        date = time.gmtime(info['dateCreated'] / 1000)  # The timestamp is in miliseconds
 
         return {
             'id': video_id,
index 5f24ac7214a95b762d3805779d1c9517ca3d0000..14b6c00b0bd1c4d3a306b5513477e2bb6c4cd52d 100644 (file)
@@ -5,23 +5,24 @@ import os.path
 import re
 
 from .common import InfoExtractor
-from ..utils import compat_urllib_parse_unquote, url_basename
+from ..compat import compat_urllib_parse_unquote
+from ..utils import url_basename
 
 
 class DropboxIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?dropbox[.]com/sh?/(?P<id>[a-zA-Z0-9]{15})/.*'
-    _TESTS = [{
-        'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0',
-        'info_dict': {
-            'id': 'nelirfsxnmcfbfh',
-            'ext': 'mp4',
-            'title': 'youtube-dl test video \'ä"BaW_jenozKc'
-        }
-    },
-    {
-        'url': 'https://www.dropbox.com/sh/662glsejgzoj9sr/AAByil3FGH9KFNZ13e08eSa1a/Pregame%20Ceremony%20Program%20PA%2020140518.m4v',
-        'only_matching': True,
-    },
+    _TESTS = [
+        {
+            'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0',
+            'info_dict': {
+                'id': 'nelirfsxnmcfbfh',
+                'ext': 'mp4',
+                'title': 'youtube-dl test video \'ä"BaW_jenozKc'
+            }
+        }, {
+            'url': 'https://www.dropbox.com/sh/662glsejgzoj9sr/AAByil3FGH9KFNZ13e08eSa1a/Pregame%20Ceremony%20Program%20PA%2020140518.m4v',
+            'only_matching': True,
+        },
     ]
 
     def _real_extract(self, url):
index 9d6ce1f48cd41c390da308768f131b6c44521629..93b3c9f36094724cd751cb340f9f925f2d04554c 100644 (file)
@@ -1,7 +1,5 @@
 from __future__ import unicode_literals
 
-import re
-
 from .subtitles import SubtitlesInfoExtractor
 from .common import ExtractorError
 from ..utils import parse_iso8601
@@ -25,8 +23,7 @@ class DRTVIE(SubtitlesInfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         programcard = self._download_json(
             'http://www.dr.dk/mu/programcard/expanded/%s' % video_id, video_id, 'Downloading video JSON')
@@ -35,7 +32,7 @@ class DRTVIE(SubtitlesInfoExtractor):
 
         title = data['Title']
         description = data['Description']
-        timestamp = parse_iso8601(data['CreatedTime'][:-5])
+        timestamp = parse_iso8601(data['CreatedTime'])
 
         thumbnail = None
         duration = None
index 63c2549d37aa528cc79f83822c7a267d391b74cc..b6bfd2b2dedc5388ef383a3cd8853bbb0c541f68 100644 (file)
@@ -1,7 +1,5 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 
 
@@ -20,8 +18,7 @@ class EbaumsWorldIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         config = self._download_xml(
             'http://www.ebaumsworld.com/video/player/%s' % video_id, video_id)
         video_url = config.find('file').text
index f8f49a013503cc853c2bf79e345b360af3db7fee..9cb1bf301b9ae3e327e4831bdb8a7d2437b43803 100644 (file)
@@ -1,8 +1,6 @@
 from __future__ import unicode_literals
 
-import re
-
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
 )
 from .common import InfoExtractor
@@ -24,11 +22,10 @@ class EHowIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
-        video_url = self._search_regex(r'(?:file|source)=(http[^\'"&]*)',
-            webpage, 'video URL')
+        video_url = self._search_regex(
+            r'(?:file|source)=(http[^\'"&]*)', webpage, 'video URL')
         final_url = compat_urllib_parse.unquote(video_url)
         uploader = self._html_search_meta('uploader', webpage)
         title = self._og_search_title(webpage).replace(' | eHow', '')
index c1b4c729ef5888da0bdcebf692cbddad30dee4df..a30a1f3305ad9d2ba61552571accf23ec2625bff 100644 (file)
@@ -6,7 +6,7 @@ import random
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_str,
 )
 
@@ -125,7 +125,7 @@ class EightTracksIE(InfoExtractor):
             info = {
                 'id': compat_str(track_data['id']),
                 'url': track_data['track_file_stream_url'],
-                'title': track_data['performer'] + u' - ' + track_data['name'],
+                'title': track_data['performer'] + ' - ' + track_data['name'],
                 'raw_title': track_data['name'],
                 'uploader_id': data['user']['login'],
                 'ext': 'm4a',
index 92ada81d24b4b542d93222d4d9ba5be877005629..4ea37ebd9f2072ea7610cfc4a8630e120fcfa81b 100644 (file)
@@ -3,7 +3,6 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from .fivemin import FiveMinIE
 from ..utils import (
     url_basename,
 )
@@ -27,11 +26,10 @@ class EngadgetIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         if video_id is not None:
-            return FiveMinIE._build_result(video_id)
+            return self.url_result('5min:%s' % video_id)
         else:
             title = url_basename(url)
             webpage = self._download_webpage(url, title)
@@ -39,5 +37,5 @@ class EngadgetIE(InfoExtractor):
             return {
                 '_type': 'playlist',
                 'title': title,
-                'entries': [FiveMinIE._build_result(id) for id in ids]
+                'entries': [self.url_result('5min:%s' % vid) for vid in ids]
             }
index bb231ecb1cd81577d41c9c16ec3e0156c4abcf3e..4de8d4bc5c9107ddc361a8351ea4a63d3da40783 100644 (file)
@@ -20,7 +20,7 @@ class EpornerIE(InfoExtractor):
             'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video',
             'ext': 'mp4',
             'title': 'Infamous Tiffany Teen Strip Tease Video',
-            'duration': 194,
+            'duration': 1838,
             'view_count': int,
             'age_limit': 18,
         }
@@ -57,9 +57,7 @@ class EpornerIE(InfoExtractor):
             formats.append(fmt)
         self._sort_formats(formats)
 
-        duration = parse_duration(self._search_regex(
-            r'class="mbtim">([0-9:]+)</div>', webpage, 'duration',
-            fatal=False))
+        duration = parse_duration(self._html_search_meta('duration', webpage))
         view_count = str_to_int(self._search_regex(
             r'id="cinemaviews">\s*([0-9,]+)\s*<small>views',
             webpage, 'view count', fatal=False))
index 476fc22b93424b13255d5eec3578eb985dbfbdfd..e240cb8591ecc467c44d98742685740f4354cbda 100644 (file)
@@ -3,9 +3,10 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
-
+)
+from ..utils import (
     ExtractorError,
 )
 
index d237a82813ea2556175e32a882d87bd5d1831924..d872d828fcc8e10fea4770e1e56ab21cda027336 100644 (file)
@@ -3,8 +3,10 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_request,
+)
+from ..utils import (
     ExtractorError,
 )
 
index aacbf14141f6d5109d265b8e4dfa37883cee81ab..36ba331285b434136b8d3c10e6a8a16bef18e7b7 100644 (file)
@@ -3,16 +3,18 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse_urlparse,
     compat_urllib_request,
     compat_urllib_parse,
+)
+from ..utils import (
     str_to_int,
 )
 
 
 class ExtremeTubeIE(InfoExtractor):
-    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>extremetube\.com/.*?video/.+?(?P<videoid>[0-9]+))(?:[/?&]|$)'
+    _VALID_URL = r'https?://(?:www\.)?(?P<url>extremetube\.com/.*?video/.+?(?P<id>[0-9]+))(?:[/?&]|$)'
     _TESTS = [{
         'url': 'http://www.extremetube.com/video/music-video-14-british-euro-brit-european-cumshots-swallow-652431',
         'md5': '1fb9228f5e3332ec8c057d6ac36f33e0',
@@ -31,7 +33,7 @@ class ExtremeTubeIE(InfoExtractor):
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('videoid')
+        video_id = mobj.group('id')
         url = 'http://www.' + mobj.group('url')
 
         req = compat_urllib_request.Request(url)
index 3ad993751759cca6900bbc9cc21b4dfe1a8589fa..1ad4e77a8a334dc0bfec62a0fb4752676e2e1435 100644 (file)
@@ -5,15 +5,18 @@ import re
 import socket
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_http_client,
     compat_str,
     compat_urllib_error,
     compat_urllib_parse,
     compat_urllib_request,
-    urlencode_postdata,
+)
+from ..utils import (
     ExtractorError,
+    int_or_none,
     limit_length,
+    urlencode_postdata,
 )
 
 
@@ -34,7 +37,6 @@ class FacebookIE(InfoExtractor):
         'info_dict': {
             'id': '637842556329505',
             'ext': 'mp4',
-            'duration': 38,
             'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
         }
     }, {
@@ -58,8 +60,8 @@ class FacebookIE(InfoExtractor):
         login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
         login_page_req.add_header('Cookie', 'locale=en_US')
         login_page = self._download_webpage(login_page_req, None,
-            note='Downloading login page',
-            errnote='Unable to download login page')
+                                            note='Downloading login page',
+                                            errnote='Unable to download login page')
         lsd = self._search_regex(
             r'<input type="hidden" name="lsd" value="([^"]*)"',
             login_page, 'lsd')
@@ -75,12 +77,12 @@ class FacebookIE(InfoExtractor):
             'legacy_return': '1',
             'timezone': '-60',
             'trynum': '1',
-            }
+        }
         request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         try:
             login_results = self._download_webpage(request, None,
-                note='Logging in', errnote='unable to fetch login page')
+                                                   note='Logging in', errnote='unable to fetch login page')
             if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
                 self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
                 return
@@ -94,7 +96,7 @@ class FacebookIE(InfoExtractor):
             check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
             check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
             check_response = self._download_webpage(check_req, None,
-                note='Confirming login')
+                                                    note='Confirming login')
             if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
                 self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
         except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
@@ -105,9 +107,7 @@ class FacebookIE(InfoExtractor):
         self._login()
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
         webpage = self._download_webpage(url, video_id)
 
@@ -147,6 +147,6 @@ class FacebookIE(InfoExtractor):
             'id': video_id,
             'title': video_title,
             'url': video_url,
-            'duration': int(video_data['video_duration']),
-            'thumbnail': video_data['thumbnail_src'],
+            'duration': int_or_none(video_data.get('video_duration')),
+            'thumbnail': video_data.get('thumbnail_src'),
         }
index c6ab6952e84dc9074816f28ebb7fe6d8ce02cb47..3c39ca451a38e69a822968911e758847657380e9 100644 (file)
@@ -1,49 +1,48 @@
 # encoding: utf-8
-import re
+from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..utils import (
-    determine_ext,
-)
 
 
 class FazIE(InfoExtractor):
-    IE_NAME = u'faz.net'
+    IE_NAME = 'faz.net'
     _VALID_URL = r'https?://www\.faz\.net/multimedia/videos/.*?-(?P<id>\d+)\.html'
 
     _TEST = {
-        u'url': u'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html',
-        u'file': u'12610585.mp4',
-        u'info_dict': {
-            u'title': u'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher',
-            u'description': u'md5:1453fbf9a0d041d985a47306192ea253',
+        'url': 'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html',
+        'info_dict': {
+            'id': '12610585',
+            'ext': 'mp4',
+            'title': 'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher',
+            'description': 'md5:1453fbf9a0d041d985a47306192ea253',
         },
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-        self.to_screen(video_id)
+        video_id = self._match_id(url)
+
         webpage = self._download_webpage(url, video_id)
-        config_xml_url = self._search_regex(r'writeFLV\(\'(.+?)\',', webpage,
-            u'config xml url')
-        config = self._download_xml(config_xml_url, video_id,
-            u'Downloading config xml')
+        config_xml_url = self._search_regex(
+            r'writeFLV\(\'(.+?)\',', webpage, 'config xml url')
+        config = self._download_xml(
+            config_xml_url, video_id, 'Downloading config xml')
 
         encodings = config.find('ENCODINGS')
         formats = []
-        for code in ['LOW', 'HIGH', 'HQ']:
+        for pref, code in enumerate(['LOW', 'HIGH', 'HQ']):
             encoding = encodings.find(code)
             if encoding is None:
                 continue
             encoding_url = encoding.find('FILENAME').text
             formats.append({
                 'url': encoding_url,
-                'ext': determine_ext(encoding_url),
                 'format_id': code.lower(),
+                'quality': pref,
             })
+        self._sort_formats(formats)
 
-        descr = self._html_search_regex(r'<p class="Content Copy">(.*?)</p>', webpage, u'description')
+        descr = self._html_search_regex(
+            r'<p class="Content Copy">(.*?)</p>', webpage, 'description', fatal=False)
         return {
             'id': video_id,
             'title': self._og_search_title(webpage),
index c663a0f81d08650b24616d2c3c2daef262c95aa2..81ceace53289709b93d7c647f6627197320381ef 100644 (file)
@@ -1,19 +1,20 @@
 #! -*- coding: utf-8 -*-
 from __future__ import unicode_literals
 
-import re
 import hashlib
 
 from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
+from ..compat import (
     compat_urllib_request,
     compat_urlparse,
 )
+from ..utils import (
+    ExtractorError,
+)
 
 
 class FC2IE(InfoExtractor):
-    _VALID_URL = r'^http://video\.fc2\.com/((?P<lang>[^/]+)/)?content/(?P<id>[^/]+)'
+    _VALID_URL = r'^http://video\.fc2\.com/(?:[^/]+/)?content/(?P<id>[^/]+)'
     IE_NAME = 'fc2'
     _TEST = {
         'url': 'http://video.fc2.com/en/content/20121103kUan1KHs',
@@ -26,9 +27,7 @@ class FC2IE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
         self._downloader.cookiejar.clear_session_cookies()  # must clear
 
@@ -40,7 +39,7 @@ class FC2IE(InfoExtractor):
 
         info_url = (
             "http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
-            format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.','%2E')))
+            format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.', '%2E')))
 
         info_webpage = self._download_webpage(
             info_url, video_id, note='Downloading info page')
index af439ccfeefeade46f75b693627b09ba6ed830d6..3191116d96a0df0e61081fbc85e5745c815f1f99 100644 (file)
@@ -4,11 +4,13 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
 )
+from ..utils import (
+    ExtractorError,
+)
 
 
 class FiredriveIE(InfoExtractor):
@@ -28,11 +30,8 @@ class FiredriveIE(InfoExtractor):
     }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         url = 'http://firedrive.com/file/%s' % video_id
-
         webpage = self._download_webpage(url, video_id)
 
         if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
index c2e987ff72d7847d6c6ec955943763ea9f231e92..08ceee4ed7d5e8b96b81e7d8b9b823a5ea18e120 100644 (file)
@@ -44,9 +44,9 @@ class FirstTVIE(InfoExtractor):
         duration = self._og_search_property('video:duration', webpage, 'video duration', fatal=False)
 
         like_count = self._html_search_regex(r'title="Понравилось".*?/></label> \[(\d+)\]',
-            webpage, 'like count', fatal=False)
+                                             webpage, 'like count', fatal=False)
         dislike_count = self._html_search_regex(r'title="Не понравилось".*?/></label> \[(\d+)\]',
-            webpage, 'dislike count', fatal=False)
+                                                webpage, 'dislike count', fatal=False)
 
         return {
             'id': video_id,
@@ -57,4 +57,4 @@ class FirstTVIE(InfoExtractor):
             'duration': int_or_none(duration),
             'like_count': int_or_none(like_count),
             'dislike_count': int_or_none(dislike_count),
-        }
\ No newline at end of file
+        }
index 3a50bab5c9bd04c9d176a88be080033368fe46c7..5b24b921c13d497d09474fa405df5b164451dd80 100644 (file)
@@ -1,11 +1,11 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_str,
     compat_urllib_parse,
+)
+from ..utils import (
     ExtractorError,
 )
 
@@ -13,7 +13,7 @@ from ..utils import (
 class FiveMinIE(InfoExtractor):
     IE_NAME = '5min'
     _VALID_URL = r'''(?x)
-        (?:https?://[^/]*?5min\.com/Scripts/PlayerSeed\.js\?(.*?&)?playList=|
+        (?:https?://[^/]*?5min\.com/Scripts/PlayerSeed\.js\?(?:.*?&)?playList=|
             5min:)
         (?P<id>\d+)
         '''
@@ -41,16 +41,11 @@ class FiveMinIE(InfoExtractor):
         },
     ]
 
-    @classmethod
-    def _build_result(cls, video_id):
-        return cls.url_result('5min:%s' % video_id, cls.ie_key())
-
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         embed_url = 'https://embed.5min.com/playerseed/?playList=%s' % video_id
         embed_page = self._download_webpage(embed_url, video_id,
-            'Downloading embed page')
+                                            'Downloading embed page')
         sid = self._search_regex(r'sid=(\d+)', embed_page, 'sid')
         query = compat_urllib_parse.urlencode({
             'func': 'GetResults',
index d7048c8c1ae7e6ba149552a7b32ec2ab42c8a3f2..d09d1c13a70cffb725329f69368f37359d7f7a08 100644 (file)
@@ -1,25 +1,27 @@
+from __future__ import unicode_literals
+
 import re
 import random
 import json
 
 from .common import InfoExtractor
 from ..utils import (
-    determine_ext,
     get_element_by_id,
     clean_html,
 )
 
 
 class FKTVIE(InfoExtractor):
-    IE_NAME = u'fernsehkritik.tv'
-    _VALID_URL = r'(?:http://)?(?:www\.)?fernsehkritik\.tv/folge-(?P<ep>[0-9]+)(?:/.*)?'
+    IE_NAME = 'fernsehkritik.tv'
+    _VALID_URL = r'http://(?:www\.)?fernsehkritik\.tv/folge-(?P<ep>[0-9]+)(?:/.*)?'
 
     _TEST = {
-        u'url': u'http://fernsehkritik.tv/folge-1',
-        u'file': u'00011.flv',
-        u'info_dict': {
-            u'title': u'Folge 1 vom 10. April 2007',
-            u'description': u'md5:fb4818139c7cfe6907d4b83412a6864f',
+        'url': 'http://fernsehkritik.tv/folge-1',
+        'info_dict': {
+            'id': '00011',
+            'ext': 'flv',
+            'title': 'Folge 1 vom 10. April 2007',
+            'description': 'md5:fb4818139c7cfe6907d4b83412a6864f',
         },
     }
 
@@ -30,9 +32,9 @@ class FKTVIE(InfoExtractor):
         server = random.randint(2, 4)
         video_thumbnail = 'http://fernsehkritik.tv/images/magazin/folge%d.jpg' % episode
         start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%d/Start' % episode,
-            episode)
+                                               episode)
         playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage,
-            u'playlist', flags=re.DOTALL)
+                                      'playlist', flags=re.DOTALL)
         files = json.loads(re.sub('{[^{}]*?}', '{}', playlist))
         # TODO: return a single multipart video
         videos = []
@@ -42,7 +44,6 @@ class FKTVIE(InfoExtractor):
             videos.append({
                 'id': video_id,
                 'url': video_url,
-                'ext': determine_ext(video_url),
                 'title': clean_html(get_element_by_id('eptitle', start_webpage)),
                 'description': clean_html(get_element_by_id('contentlist', start_webpage)),
                 'thumbnail': video_thumbnail
@@ -51,14 +52,15 @@ class FKTVIE(InfoExtractor):
 
 
 class FKTVPosteckeIE(InfoExtractor):
-    IE_NAME = u'fernsehkritik.tv:postecke'
-    _VALID_URL = r'(?:http://)?(?:www\.)?fernsehkritik\.tv/inline-video/postecke\.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)'
+    IE_NAME = 'fernsehkritik.tv:postecke'
+    _VALID_URL = r'http://(?:www\.)?fernsehkritik\.tv/inline-video/postecke\.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)'
     _TEST = {
-        u'url': u'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120',
-        u'file': u'0120.flv',
-        u'md5': u'262f0adbac80317412f7e57b4808e5c4',
-        u'info_dict': {
-            u"title": u"Postecke 120"
+        'url': 'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120',
+        'md5': '262f0adbac80317412f7e57b4808e5c4',
+        'info_dict': {
+            'id': '0120',
+            'ext': 'flv',
+            'title': 'Postecke 120',
         }
     }
 
@@ -71,8 +73,7 @@ class FKTVPosteckeIE(InfoExtractor):
         video_url = 'http://dl%d.fernsehkritik.tv/postecke/postecke%d.flv' % (server, episode)
         video_title = 'Postecke %d' % episode
         return {
-            'id':       video_id,
-            'url':      video_url,
-            'ext':      determine_ext(video_url),
-            'title':    video_title,
+            'id': video_id,
+            'url': video_url,
+            'title': video_title,
         }
index e09982e88b913676a2f8c75946e79c82502bf650..0c858b6544b919b1b569b4c4102447631298046e 100644 (file)
@@ -17,8 +17,8 @@ class FlickrIE(InfoExtractor):
         'info_dict': {
             'id': '5645318632',
             'ext': 'mp4',
-            "description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.", 
-            "uploader_id": "forestwander-nature-pictures", 
+            "description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",
+            "uploader_id": "forestwander-nature-pictures",
             "title": "Dark Hollow Waterfalls"
         }
     }
@@ -37,7 +37,7 @@ class FlickrIE(InfoExtractor):
         first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage')
 
         node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>',
-            first_xml, 'node_id')
+                                          first_xml, 'node_id')
 
         second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'
         second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage')
diff --git a/youtube_dl/extractor/folketinget.py b/youtube_dl/extractor/folketinget.py
new file mode 100644 (file)
index 0000000..68e2db9
--- /dev/null
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_parse_qs
+from ..utils import (
+    int_or_none,
+    parse_duration,
+    parse_iso8601,
+    xpath_text,
+)
+
+
+class FolketingetIE(InfoExtractor):
+    IE_DESC = 'Folketinget (ft.dk; Danish parliament)'
+    _VALID_URL = r'https?://(?:www\.)?ft\.dk/webtv/video/[^?#]*?\.(?P<id>[0-9]+)\.aspx'
+    _TEST = {
+        'url': 'http://www.ft.dk/webtv/video/20141/eru/td.1165642.aspx?as=1#player',
+        'info_dict': {
+            'id': '1165642',
+            'ext': 'mp4',
+            'title': 'Åbent samråd i Erhvervsudvalget',
+            'description': 'Åbent samråd med erhvervs- og vækstministeren om regeringens politik på teleområdet',
+            'view_count': int,
+            'width': 768,
+            'height': 432,
+            'tbr': 928000,
+            'timestamp': 1416493800,
+            'upload_date': '20141120',
+            'duration': 3960,
+        },
+        'params': {
+            'skip_download': 'rtmpdump required',
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._og_search_title(webpage)
+        description = self._html_search_regex(
+            r'(?s)<div class="video-item-agenda"[^>]*>(.*?)<',
+            webpage, 'description', fatal=False)
+
+        player_params = compat_parse_qs(self._search_regex(
+            r'<embed src="http://ft\.arkena\.tv/flash/ftplayer\.swf\?([^"]+)"',
+            webpage, 'player params'))
+        xml_url = player_params['xml'][0]
+        doc = self._download_xml(xml_url, video_id)
+
+        timestamp = parse_iso8601(xpath_text(doc, './/date'))
+        duration = parse_duration(xpath_text(doc, './/duration'))
+        width = int_or_none(xpath_text(doc, './/width'))
+        height = int_or_none(xpath_text(doc, './/height'))
+        view_count = int_or_none(xpath_text(doc, './/views'))
+
+        formats = [{
+            'format_id': n.attrib['bitrate'],
+            'url': xpath_text(n, './url', fatal=True),
+            'tbr': int_or_none(n.attrib['bitrate']),
+        } for n in doc.findall('.//streams/stream')]
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'description': description,
+            'timestamp': timestamp,
+            'width': width,
+            'height': height,
+            'duration': duration,
+            'view_count': view_count,
+        }
index 7d56b9be93a0332e70381c3a46b748c6d39e5b6a..7187e075291af16151e28b8041d7e06e49887fa7 100644 (file)
@@ -3,12 +3,14 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_request,
-    unified_strdate,
-    str_to_int,
-    parse_duration,
+)
+from ..utils import (
     clean_html,
+    parse_duration,
+    str_to_int,
+    unified_strdate,
 )
 
 
@@ -31,9 +33,7 @@ class FourTubeIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage_url = 'http://www.4tube.com/videos/' + video_id
         webpage = self._download_webpage(webpage_url, video_id)
 
@@ -55,7 +55,7 @@ class FourTubeIE(InfoExtractor):
         description = self._html_search_meta('description', webpage, 'description')
         if description:
             upload_date = self._search_regex(r'Published Date: (\d{2} [a-zA-Z]{3} \d{4})', description, 'upload date',
-                fatal=False)
+                                             fatal=False)
             if upload_date:
                 upload_date = unified_strdate(upload_date)
             view_count = self._search_regex(r'Views: ([\d,\.]+)', description, 'view count', fatal=False)
@@ -65,9 +65,9 @@ class FourTubeIE(InfoExtractor):
 
         token_url = "http://tkn.4tube.com/{0}/desktop/{1}".format(media_id, "+".join(sources))
         headers = {
-                b'Content-Type': b'application/x-www-form-urlencoded',
-                b'Origin': b'http://www.4tube.com',
-                }
+            b'Content-Type': b'application/x-www-form-urlencoded',
+            b'Origin': b'http://www.4tube.com',
+        }
         token_req = compat_urllib_request.Request(token_url, b'{}', headers)
         tokens = self._download_json(token_req, video_id)
 
@@ -76,7 +76,7 @@ class FourTubeIE(InfoExtractor):
             'format_id': format + 'p',
             'resolution': format + 'p',
             'quality': int(format),
-            } for format in sources]
+        } for format in sources]
 
         self._sort_formats(formats)
 
@@ -92,4 +92,4 @@ class FourTubeIE(InfoExtractor):
             'duration': duration,
             'age_limit': 18,
             'webpage_url': webpage_url,
-        }
\ No newline at end of file
+        }
diff --git a/youtube_dl/extractor/foxgay.py b/youtube_dl/extractor/foxgay.py
new file mode 100644 (file)
index 0000000..08b8ea3
--- /dev/null
@@ -0,0 +1,48 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class FoxgayIE(InfoExtractor):
+    _VALID_URL = r'http://(?:www\.)?foxgay\.com/videos/(?:\S+-)?(?P<id>\d+)\.shtml'
+    _TEST = {
+        'url': 'http://foxgay.com/videos/fuck-turkish-style-2582.shtml',
+        'md5': '80d72beab5d04e1655a56ad37afe6841',
+        'info_dict': {
+            'id': '2582',
+            'ext': 'mp4',
+            'title': 'md5:6122f7ae0fc6b21ebdf59c5e083ce25a',
+            'description': 'md5:5e51dc4405f1fd315f7927daed2ce5cf',
+            'age_limit': 18,
+            'thumbnail': 're:https?://.*\.jpg$',
+        },
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._html_search_regex(
+            r'<title>(?P<title>.*?)</title>',
+            webpage, 'title', fatal=False)
+        description = self._html_search_regex(
+            r'<div class="ico_desc"><h2>(?P<description>.*?)</h2>',
+            webpage, 'description', fatal=False)
+
+        # Find the URL for the iFrame which contains the actual video.
+        iframe = self._download_webpage(
+            self._html_search_regex(r'iframe src="(?P<frame>.*?)"', webpage, 'video frame'),
+            video_id)
+        video_url = self._html_search_regex(
+            r"v_path = '(?P<vid>http://.*?)'", iframe, 'url')
+        thumb_url = self._html_search_regex(
+            r"t_path = '(?P<thumb>http://.*?)'", iframe, 'thumbnail', fatal=False)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'url': video_url,
+            'description': description,
+            'thumbnail': thumb_url,
+            'age_limit': 18,
+        }
diff --git a/youtube_dl/extractor/foxnews.py b/youtube_dl/extractor/foxnews.py
new file mode 100644 (file)
index 0000000..917f76b
--- /dev/null
@@ -0,0 +1,94 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_iso8601,
+    int_or_none,
+)
+
+
+class FoxNewsIE(InfoExtractor):
+    _VALID_URL = r'https?://video\.foxnews\.com/v/(?:video-embed\.html\?video_id=)?(?P<id>\d+)'
+    _TESTS = [
+        {
+            'url': 'http://video.foxnews.com/v/3937480/frozen-in-time/#sp=show-clips',
+            'md5': '32aaded6ba3ef0d1c04e238d01031e5e',
+            'info_dict': {
+                'id': '3937480',
+                'ext': 'flv',
+                'title': 'Frozen in Time',
+                'description': 'Doctors baffled by 16-year-old girl that is the size of a toddler',
+                'duration': 265,
+                'timestamp': 1304411491,
+                'upload_date': '20110503',
+                'thumbnail': 're:^https?://.*\.jpg$',
+            },
+        },
+        {
+            'url': 'http://video.foxnews.com/v/3922535568001/rep-luis-gutierrez-on-if-obamas-immigration-plan-is-legal/#sp=show-clips',
+            'md5': '5846c64a1ea05ec78175421b8323e2df',
+            'info_dict': {
+                'id': '3922535568001',
+                'ext': 'mp4',
+                'title': "Rep. Luis Gutierrez on if Obama's immigration plan is legal",
+                'description': "Congressman discusses the president's executive action",
+                'duration': 292,
+                'timestamp': 1417662047,
+                'upload_date': '20141204',
+                'thumbnail': 're:^https?://.*\.jpg$',
+            },
+        },
+        {
+            'url': 'http://video.foxnews.com/v/video-embed.html?video_id=3937480&d=video.foxnews.com',
+            'only_matching': True,
+        },
+    ]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        video = self._download_json(
+            'http://video.foxnews.com/v/feed/video/%s.js?template=fox' % video_id, video_id)
+
+        item = video['channel']['item']
+        title = item['title']
+        description = item['description']
+        timestamp = parse_iso8601(item['dc-date'])
+
+        media_group = item['media-group']
+        duration = None
+        formats = []
+        for media in media_group['media-content']:
+            attributes = media['@attributes']
+            video_url = attributes['url']
+            if video_url.endswith('.f4m'):
+                formats.extend(self._extract_f4m_formats(video_url + '?hdcore=3.4.0&plugin=aasp-3.4.0.132.124', video_id))
+            elif video_url.endswith('.m3u8'):
+                formats.extend(self._extract_m3u8_formats(video_url, video_id, 'flv'))
+            elif not video_url.endswith('.smil'):
+                duration = int_or_none(attributes.get('duration'))
+                formats.append({
+                    'url': video_url,
+                    'format_id': media['media-category']['@attributes']['label'],
+                    'preference': 1,
+                    'vbr': int_or_none(attributes.get('bitrate')),
+                    'filesize': int_or_none(attributes.get('fileSize'))
+                })
+        self._sort_formats(formats)
+
+        media_thumbnail = media_group['media-thumbnail']['@attributes']
+        thumbnails = [{
+            'url': media_thumbnail['url'],
+            'width': int_or_none(media_thumbnail.get('width')),
+            'height': int_or_none(media_thumbnail.get('height')),
+        }] if media_thumbnail else []
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'duration': duration,
+            'timestamp': timestamp,
+            'formats': formats,
+            'thumbnails': thumbnails,
+        }
index 898e0dda780df7a83f91226216b7fce4d59818c2..0c29721629a25369621072e4f451e7decdc8df0b 100644 (file)
@@ -5,7 +5,7 @@ import json
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_parse_qs,
     compat_urlparse,
 )
index 0b3374d97d7c72a559afc1ed6906549c092491d9..bbc760a4990cac1b6cdb731c161d61c853a72729 100644 (file)
@@ -6,13 +6,15 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
+    compat_urllib_parse_urlparse,
     compat_urlparse,
-    ExtractorError,
+)
+from ..utils import (
     clean_html,
-    parse_duration,
-    compat_urllib_parse_urlparse,
+    ExtractorError,
     int_or_none,
+    parse_duration,
 )
 
 
@@ -26,6 +28,19 @@ class FranceTVBaseInfoExtractor(InfoExtractor):
         if info.get('status') == 'NOK':
             raise ExtractorError(
                 '%s returned error: %s' % (self.IE_NAME, info['message']), expected=True)
+        allowed_countries = info['videos'][0].get('geoblocage')
+        if allowed_countries:
+            georestricted = True
+            geo_info = self._download_json(
+                'http://geo.francetv.fr/ws/edgescape.json', video_id,
+                'Downloading geo restriction info')
+            country = geo_info['reponse']['geo_info']['country_code']
+            if country not in allowed_countries:
+                raise ExtractorError(
+                    'The video is not available from your location',
+                    expected=True)
+        else:
+            georestricted = False
 
         formats = []
         for video in info['videos']:
@@ -36,6 +51,10 @@ class FranceTVBaseInfoExtractor(InfoExtractor):
                 continue
             format_id = video['format']
             if video_url.endswith('.f4m'):
+                if georestricted:
+                    # See https://github.com/rg3/youtube-dl/issues/3963
+                    # m3u8 urls work fine
+                    continue
                 video_url_parsed = compat_urllib_parse_urlparse(video_url)
                 f4m_url = self._download_webpage(
                     'http://hdfauth.francetv.fr/esi/urltokengen2.html?url=%s' % video_url_parsed.path,
@@ -46,7 +65,7 @@ class FranceTVBaseInfoExtractor(InfoExtractor):
                         f4m_format['preference'] = 1
                     formats.extend(f4m_formats)
             elif video_url.endswith('.m3u8'):
-                formats.extend(self._extract_m3u8_formats(video_url, video_id))
+                formats.extend(self._extract_m3u8_formats(video_url, video_id, 'mp4'))
             elif video_url.startswith('rtmp'):
                 formats.append({
                     'url': video_url,
@@ -58,7 +77,7 @@ class FranceTVBaseInfoExtractor(InfoExtractor):
                 formats.append({
                     'url': video_url,
                     'format_id': format_id,
-                    'preference': 2,
+                    'preference': -1,
                 })
         self._sort_formats(formats)
 
@@ -93,7 +112,6 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor):
 
     _TESTS = [{
         'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
-        'md5': '9cecf35f99c4079c199e9817882a9a1c',
         'info_dict': {
             'id': '84981923',
             'ext': 'flv',
@@ -235,7 +253,7 @@ class GenerationQuoiIE(InfoExtractor):
         info_json = self._download_webpage(info_url, name)
         info = json.loads(info_json)
         return self.url_result('http://www.dailymotion.com/video/%s' % info['id'],
-            ie='Dailymotion')
+                               ie='Dailymotion')
 
 
 class CultureboxIE(FranceTVBaseInfoExtractor):
diff --git a/youtube_dl/extractor/freevideo.py b/youtube_dl/extractor/freevideo.py
new file mode 100644 (file)
index 0000000..f755e3c
--- /dev/null
@@ -0,0 +1,38 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class FreeVideoIE(InfoExtractor):
+    _VALID_URL = r'^http://www.freevideo.cz/vase-videa/(?P<id>[^.]+)\.html(?:$|[?#])'
+
+    _TEST = {
+        'url': 'http://www.freevideo.cz/vase-videa/vysukany-zadecek-22033.html',
+        'info_dict': {
+            'id': 'vysukany-zadecek-22033',
+            'ext': 'mp4',
+            "title": "vysukany-zadecek-22033",
+            "age_limit": 18,
+        },
+        'skip': 'Blocked outside .cz',
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage, handle = self._download_webpage_handle(url, video_id)
+        if '//www.czechav.com/' in handle.geturl():
+            raise ExtractorError(
+                'Access to freevideo is blocked from your location',
+                expected=True)
+
+        video_url = self._search_regex(
+            r'\s+url: "(http://[a-z0-9-]+.cdn.freevideo.cz/stream/.*?/video.mp4)"',
+            webpage, 'video URL')
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': video_id,
+            'age_limit': 18,
+        }
index d966e8403dfe9e03765d6a2eb0ab895a0da4100a..a49fc1151cf324f5e4b61cbd4f1d586718410626 100644 (file)
@@ -8,7 +8,7 @@ from ..utils import ExtractorError
 
 
 class FunnyOrDieIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'
+    _VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|articles|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'
     _TESTS = [{
         'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',
         'md5': 'bcd81e0c4f26189ee09be362ad6e6ba9',
@@ -21,7 +21,6 @@ class FunnyOrDieIE(InfoExtractor):
         },
     }, {
         'url': 'http://www.funnyordie.com/embed/e402820827',
-        'md5': '29f4c5e5a61ca39dfd7e8348a75d0aad',
         'info_dict': {
             'id': 'e402820827',
             'ext': 'mp4',
@@ -29,6 +28,9 @@ class FunnyOrDieIE(InfoExtractor):
             'description': 'Please use this to sell something.  www.jonlajoie.com',
             'thumbnail': 're:^http:.*\.jpg$',
         },
+    }, {
+        'url': 'http://www.funnyordie.com/articles/ebf5e34fc8/10-hours-of-walking-in-nyc-as-a-man',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
@@ -37,7 +39,7 @@ class FunnyOrDieIE(InfoExtractor):
         video_id = mobj.group('id')
         webpage = self._download_webpage(url, video_id)
 
-        links = re.findall(r'<source src="([^"]+/v)\d+\.([^"]+)" type=\'video', webpage)
+        links = re.findall(r'<source src="([^"]+/v)[^"]+\.([^"]+)" type=\'video', webpage)
         if not links:
             raise ExtractorError('No media links available for %s' % video_id)
 
index 11fee3d31e88833b8074a1b59cff885eeffa46d3..cf8e90d7dbe9483efffe7894bedbe437746c5da1 100644 (file)
@@ -11,7 +11,7 @@ class GamekingsIE(InfoExtractor):
         'url': 'http://www.gamekings.tv/videos/phoenix-wright-ace-attorney-dual-destinies-review/',
         # MD5 is flaky, seems to change regularly
         # 'md5': '2f32b1f7b80fdc5cb616efb4f387f8a3',
-        u'info_dict': {
+        'info_dict': {
             'id': '20130811',
             'ext': 'mp4',
             'title': 'Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review',
index 3d67b9d60242760ff3e32c9fbbbcab39542f01da..47373e21540030d4c9a19dbfc1c5943f468fea4f 100644 (file)
@@ -4,16 +4,17 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
     compat_urlparse,
+)
+from ..utils import (
     unescapeHTML,
-    get_meta_content,
 )
 
 
 class GameSpotIE(InfoExtractor):
-    _VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<page_id>\d+)/?'
+    _VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<id>\d+)/?'
     _TEST = {
         'url': 'http://www.gamespot.com/videos/arma-3-community-guide-sitrep-i/2300-6410818/',
         'md5': 'b2a30deaa8654fcccd43713a6b6a4825',
@@ -26,10 +27,10 @@ class GameSpotIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        page_id = mobj.group('page_id')
+        page_id = self._match_id(url)
         webpage = self._download_webpage(url, page_id)
-        data_video_json = self._search_regex(r'data-video=["\'](.*?)["\']', webpage, 'data video')
+        data_video_json = self._search_regex(
+            r'data-video=["\'](.*?)["\']', webpage, 'data video')
         data_video = json.loads(unescapeHTML(data_video_json))
 
         # Transform the manifest url to a link to the mp4 files
@@ -41,7 +42,8 @@ class GameSpotIE(InfoExtractor):
         http_path = f4m_path[1:].split('/', 1)[1]
         http_template = re.sub(QUALITIES_RE, r'%s', http_path)
         http_template = http_template.replace('.csmil/manifest.f4m', '')
-        http_template = compat_urlparse.urljoin('http://video.gamespotcdn.com/', http_template)
+        http_template = compat_urlparse.urljoin(
+            'http://video.gamespotcdn.com/', http_template)
         formats = []
         for q in qualities:
             formats.append({
@@ -52,8 +54,9 @@ class GameSpotIE(InfoExtractor):
 
         return {
             'id': data_video['guid'],
+            'display_id': page_id,
             'title': compat_urllib_parse.unquote(data_video['title']),
             'formats': formats,
-            'description': get_meta_content('description', webpage),
+            'description': self._html_search_meta('description', webpage),
             'thumbnail': self._og_search_thumbnail(webpage),
         }
index de14ae1fb1edd0600488b8f04c7b400bf310ef5a..d453ec010937b1815bf3a22d568a70ce818224e6 100644 (file)
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
 )
index dfc2ef4e72f0804bc0a9e8f0099252a1f89a83c3..2b4d8c62f5696fce9c144bac7c1867e552d3e625 100644 (file)
@@ -7,11 +7,12 @@ import re
 
 from .common import InfoExtractor
 from .youtube import YoutubeIE
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
     compat_urlparse,
     compat_xml_parse_error,
-
+)
+from ..utils import (
     determine_ext,
     ExtractorError,
     float_or_none,
@@ -28,6 +29,7 @@ from .brightcove import BrightcoveIE
 from .ooyala import OoyalaIE
 from .rutv import RUTVIE
 from .smotri import SmotriIE
+from .condenast import CondeNastIE
 
 
 class GenericIE(InfoExtractor):
@@ -98,6 +100,22 @@ class GenericIE(InfoExtractor):
                 'uploader': 'Championat',
             },
         },
+        {
+            # https://github.com/rg3/youtube-dl/issues/3541
+            'add_ie': ['Brightcove'],
+            'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1',
+            'info_dict': {
+                'id': '3866516442001',
+                'ext': 'mp4',
+                'title': 'Leer mij vrouwen kennen: Aflevering 1',
+                'description': 'Leer mij vrouwen kennen: Aflevering 1',
+                'uploader': 'SBS Broadcasting',
+            },
+            'skip': 'Restricted to Netherlands',
+            'params': {
+                'skip_download': True,  # m3u8 download
+            },
+        },
         # Direct link to a video
         {
             'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
@@ -324,7 +342,7 @@ class GenericIE(InfoExtractor):
                 'ext': 'mp4',
                 'age_limit': 18,
                 'uploader': 'www.handjobhub.com',
-                'title': 'Busty Blonde Siri Tit Fuck While Wank at Handjob Hub',
+                'title': 'Busty Blonde Siri Tit Fuck While Wank at HandjobHub.com',
             }
         },
         # RSS feed
@@ -379,6 +397,87 @@ class GenericIE(InfoExtractor):
                 'uploader': 'education-portal.com',
             },
         },
+        {
+            'url': 'http://thoughtworks.wistia.com/medias/uxjb0lwrcz',
+            'md5': 'baf49c2baa8a7de5f3fc145a8506dcd4',
+            'info_dict': {
+                'id': 'uxjb0lwrcz',
+                'ext': 'mp4',
+                'title': 'Conversation about Hexagonal Rails Part 1 - ThoughtWorks',
+                'duration': 1715.0,
+                'uploader': 'thoughtworks.wistia.com',
+            },
+        },
+        # Direct download with broken HEAD
+        {
+            'url': 'http://ai-radio.org:8000/radio.opus',
+            'info_dict': {
+                'id': 'radio',
+                'ext': 'opus',
+                'title': 'radio',
+            },
+            'params': {
+                'skip_download': True,  # infinite live stream
+            },
+            'expected_warnings': [
+                r'501.*Not Implemented'
+            ],
+        },
+        # Soundcloud embed
+        {
+            'url': 'http://nakedsecurity.sophos.com/2014/10/29/sscc-171-are-you-sure-that-1234-is-a-bad-password-podcast/',
+            'info_dict': {
+                'id': '174391317',
+                'ext': 'mp3',
+                'description': 'md5:ff867d6b555488ad3c52572bb33d432c',
+                'uploader': 'Sophos Security',
+                'title': 'Chet Chat 171 - Oct 29, 2014',
+                'upload_date': '20141029',
+            }
+        },
+        # Livestream embed
+        {
+            'url': 'http://www.esa.int/Our_Activities/Space_Science/Rosetta/Philae_comet_touch-down_webcast',
+            'info_dict': {
+                'id': '67864563',
+                'ext': 'flv',
+                'upload_date': '20141112',
+                'title': 'Rosetta #CometLanding webcast HL 10',
+            }
+        },
+        # LazyYT
+        {
+            'url': 'http://discourse.ubuntu.com/t/unity-8-desktop-mode-windows-on-mir/1986',
+            'info_dict': {
+                'title': 'Unity 8 desktop-mode windows on Mir! - Ubuntu Discourse',
+            },
+            'playlist_mincount': 2,
+        },
+        # Direct link with incorrect MIME type
+        {
+            'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
+            'md5': '4ccbebe5f36706d85221f204d7eb5913',
+            'info_dict': {
+                'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
+                'id': '5_Lennart_Poettering_-_Systemd',
+                'ext': 'webm',
+                'title': '5_Lennart_Poettering_-_Systemd',
+                'upload_date': '20141120',
+            },
+            'expected_warnings': [
+                'URL could be a direct video link, returning it as such.'
+            ]
+        },
+        # Cinchcast embed
+        {
+            'url': 'http://undergroundwellness.com/podcasts/306-5-steps-to-permanent-gut-healing/',
+            'info_dict': {
+                'id': '7141703',
+                'ext': 'mp3',
+                'upload_date': '20141126',
+                'title': 'Jack Tips: 5 Steps to Permanent Gut Healing',
+            }
+        },
     ]
 
     def report_following_redirect(self, new_url):
@@ -471,11 +570,12 @@ class GenericIE(InfoExtractor):
 
             if default_search in ('error', 'fixup_error'):
                 raise ExtractorError(
-                    ('%r is not a valid URL. '
-                     'Set --default-search "ytsearch" (or run  youtube-dl "ytsearch:%s" ) to search YouTube'
-                    % (url, url), expected=True)
+                    '%r is not a valid URL. '
+                    'Set --default-search "ytsearch" (or run  youtube-dl "ytsearch:%s" ) to search YouTube'
+                    % (url, url), expected=True)
             else:
-                assert ':' in default_search
+                if ':' not in default_search:
+                    default_search += ':'
                 return self.url_result(default_search + url)
 
         url, smuggled_data = unsmuggle_url(url)
@@ -490,14 +590,14 @@ class GenericIE(InfoExtractor):
         self.to_screen('%s: Requesting header' % video_id)
 
         head_req = HEADRequest(url)
-        response = self._request_webpage(
+        head_response = self._request_webpage(
             head_req, video_id,
             note=False, errnote='Could not send HEAD request to %s' % url,
             fatal=False)
 
-        if response is not False:
+        if head_response is not False:
             # Check for redirect
-            new_url = response.geturl()
+            new_url = head_response.geturl()
             if url != new_url:
                 self.report_following_redirect(new_url)
                 if force_videoid:
@@ -505,33 +605,53 @@ class GenericIE(InfoExtractor):
                         new_url, {'force_videoid': force_videoid})
                 return self.url_result(new_url)
 
-            # Check for direct link to a video
-            content_type = response.headers.get('Content-Type', '')
-            m = re.match(r'^(?P<type>audio|video|application(?=/ogg$))/(?P<format_id>.+)$', content_type)
-            if m:
-                upload_date = response.headers.get('Last-Modified')
-                if upload_date:
-                    upload_date = unified_strdate(upload_date)
-                return {
-                    'id': video_id,
-                    'title': os.path.splitext(url_basename(url))[0],
-                    'formats': [{
-                        'format_id': m.group('format_id'),
-                        'url': url,
-                        'vcodec': 'none' if m.group('type') == 'audio' else None
-                    }],
-                    'upload_date': upload_date,
-                }
+        full_response = None
+        if head_response is False:
+            full_response = self._request_webpage(url, video_id)
+            head_response = full_response
+
+        # Check for direct link to a video
+        content_type = head_response.headers.get('Content-Type', '')
+        m = re.match(r'^(?P<type>audio|video|application(?=/ogg$))/(?P<format_id>.+)$', content_type)
+        if m:
+            upload_date = unified_strdate(
+                head_response.headers.get('Last-Modified'))
+            return {
+                'id': video_id,
+                'title': os.path.splitext(url_basename(url))[0],
+                'direct': True,
+                'formats': [{
+                    'format_id': m.group('format_id'),
+                    'url': url,
+                    'vcodec': 'none' if m.group('type') == 'audio' else None
+                }],
+                'upload_date': upload_date,
+            }
 
         if not self._downloader.params.get('test', False) and not is_intentional:
             self._downloader.report_warning('Falling back on generic information extractor.')
 
-        try:
-            webpage = self._download_webpage(url, video_id)
-        except ValueError:
-            # since this is the last-resort InfoExtractor, if
-            # this error is thrown, it'll be thrown here
-            raise ExtractorError('Failed to download URL: %s' % url)
+        if not full_response:
+            full_response = self._request_webpage(url, video_id)
+
+        # Maybe it's a direct link to a video?
+        # Be careful not to download the whole thing!
+        first_bytes = full_response.read(512)
+        if not re.match(r'^\s*<', first_bytes.decode('utf-8', 'replace')):
+            self._downloader.report_warning(
+                'URL could be a direct video link, returning it as such.')
+            upload_date = unified_strdate(
+                head_response.headers.get('Last-Modified'))
+            return {
+                'id': video_id,
+                'title': os.path.splitext(url_basename(url))[0],
+                'direct': True,
+                'url': url,
+                'upload_date': upload_date,
+            }
+
+        webpage = self._webpage_read_content(
+            full_response, url, video_id, prefix=first_bytes)
 
         self.report_extraction(video_id)
 
@@ -608,13 +728,13 @@ class GenericIE(InfoExtractor):
         if mobj:
             player_url = unescapeHTML(mobj.group('url'))
             surl = smuggle_url(player_url, {'Referer': url})
-            return self.url_result(surl, 'Vimeo')
+            return self.url_result(surl)
 
         # Look for embedded (swf embed) Vimeo player
         mobj = re.search(
-            r'<embed[^>]+?src="(https?://(?:www\.)?vimeo\.com/moogaloop\.swf.+?)"', webpage)
+            r'<embed[^>]+?src="((?:https?:)?//(?:www\.)?vimeo\.com/moogaloop\.swf.+?)"', webpage)
         if mobj:
-            return self.url_result(mobj.group(1), 'Vimeo')
+            return self.url_result(mobj.group(1))
 
         # Look for embedded YouTube player
         matches = re.findall(r'''(?x)
@@ -622,7 +742,8 @@ class GenericIE(InfoExtractor):
                 <iframe[^>]+?src=|
                 data-video-url=|
                 <embed[^>]+?src=|
-                embedSWF\(?:\s*
+                embedSWF\(?:\s*|
+                new\s+SWFObject\(
             )
             (["\'])
                 (?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
@@ -632,6 +753,12 @@ class GenericIE(InfoExtractor):
             return _playlist_from_matches(
                 matches, lambda m: unescapeHTML(m[1]))
 
+        # Look for lazyYT YouTube embed
+        matches = re.findall(
+            r'class="lazyYT" data-youtube-id="([^"]+)"', webpage)
+        if matches:
+            return _playlist_from_matches(matches, lambda m: unescapeHTML(m))
+
         # Look for embedded Dailymotion player
         matches = re.findall(
             r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.com/embed/video/.+?)\1', webpage)
@@ -651,17 +778,20 @@ class GenericIE(InfoExtractor):
 
         # Look for embedded Wistia player
         match = re.search(
-            r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
+            r'<(?:meta[^>]+?content|iframe[^>]+?src)=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
         if match:
+            embed_url = self._proto_relative_url(
+                unescapeHTML(match.group('url')))
             return {
                 '_type': 'url_transparent',
-                'url': unescapeHTML(match.group('url')),
+                'url': embed_url,
                 'ie_key': 'Wistia',
                 'uploader': video_uploader,
                 'title': video_title,
                 'id': video_id,
             }
-        match = re.search(r'(?:id=["\']wistia_|data-wistiaid=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage)
+
+        match = re.search(r'(?:id=["\']wistia_|data-wistia-?id=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage)
         if match:
             return {
                 '_type': 'url_transparent',
@@ -675,7 +805,7 @@ class GenericIE(InfoExtractor):
         # Look for embedded blip.tv player
         mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
         if mobj:
-            return self.url_result('http://blip.tv/a/a-'+mobj.group(1), 'BlipTV')
+            return self.url_result('http://blip.tv/a/a-' + mobj.group(1), 'BlipTV')
         mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9_]+)', webpage)
         if mobj:
             return self.url_result(mobj.group(1), 'BlipTV')
@@ -711,7 +841,7 @@ class GenericIE(InfoExtractor):
 
         # Look for Ooyala videos
         mobj = (re.search(r'player.ooyala.com/[^"?]+\?[^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or
-             re.search(r'OO.Player.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage))
+                re.search(r'OO.Player.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage))
         if mobj is not None:
             return OoyalaIE._build_url_result(mobj.group('ec'))
 
@@ -805,7 +935,7 @@ class GenericIE(InfoExtractor):
 
         # Look for embeded soundcloud player
         mobj = re.search(
-            r'<iframe src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
+            r'<iframe\s+(?:[a-zA-Z0-9_-]+="[^"]+"\s+)*src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
             webpage)
         if mobj is not None:
             url = unescapeHTML(mobj.group('url'))
@@ -841,12 +971,31 @@ class GenericIE(InfoExtractor):
         if mobj is not None:
             return self.url_result(mobj.group('url'), 'SBS')
 
+        # Look for embedded Cinchcast player
+        mobj = re.search(
+            r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.cinchcast\.com/.+?)\1',
+            webpage)
+        if mobj is not None:
+            return self.url_result(mobj.group('url'), 'Cinchcast')
+
         mobj = re.search(
-            r'<iframe[^>]+?src=(["\'])(?P<url>https?://m\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
+            r'<iframe[^>]+?src=(["\'])(?P<url>https?://m(?:lb)?\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
             webpage)
         if mobj is not None:
             return self.url_result(mobj.group('url'), 'MLB')
 
+        mobj = re.search(
+            r'<iframe[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
+            webpage)
+        if mobj is not None:
+            return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
+
+        mobj = re.search(
+            r'<iframe[^>]+src="(?P<url>https?://new\.livestream\.com/[^"]+/player[^"]+)"',
+            webpage)
+        if mobj is not None:
+            return self.url_result(mobj.group('url'), 'Livestream')
+
         def check_video(vurl):
             vpath = compat_urlparse.urlparse(vurl).path
             vext = determine_ext(vpath)
@@ -894,7 +1043,7 @@ class GenericIE(InfoExtractor):
                 found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
         if not found:
             # HTML5 video
-            found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]+)? src="([^"]+)"', webpage)
+            found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]*)?\s+src=["\'](.*?)["\']', webpage)
         if not found:
             found = re.search(
                 r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
@@ -940,4 +1089,3 @@ class GenericIE(InfoExtractor):
                 '_type': 'playlist',
                 'entries': entries,
             }
-
diff --git a/youtube_dl/extractor/giantbomb.py b/youtube_dl/extractor/giantbomb.py
new file mode 100644 (file)
index 0000000..87cd191
--- /dev/null
@@ -0,0 +1,81 @@
+from __future__ import unicode_literals
+
+import re
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+    unescapeHTML,
+    qualities,
+    int_or_none,
+)
+
+
+class GiantBombIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?giantbomb\.com/videos/(?P<display_id>[^/]+)/(?P<id>\d+-\d+)'
+    _TEST = {
+        'url': 'http://www.giantbomb.com/videos/quick-look-destiny-the-dark-below/2300-9782/',
+        'md5': '57badeface303ecf6b98b812de1b9018',
+        'info_dict': {
+            'id': '2300-9782',
+            'display_id': 'quick-look-destiny-the-dark-below',
+            'ext': 'mp4',
+            'title': 'Quick Look: Destiny: The Dark Below',
+            'description': 'md5:0aa3aaf2772a41b91d44c63f30dfad24',
+            'duration': 2399,
+            'thumbnail': 're:^https?://.*\.jpg$',
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        display_id = mobj.group('display_id')
+
+        webpage = self._download_webpage(url, display_id)
+
+        title = self._og_search_title(webpage)
+        description = self._og_search_description(webpage)
+        thumbnail = self._og_search_thumbnail(webpage)
+
+        video = json.loads(unescapeHTML(self._search_regex(
+            r'data-video="([^"]+)"', webpage, 'data-video')))
+
+        duration = int_or_none(video.get('lengthSeconds'))
+
+        quality = qualities([
+            'f4m_low', 'progressive_low', 'f4m_high',
+            'progressive_high', 'f4m_hd', 'progressive_hd'])
+
+        formats = []
+        for format_id, video_url in video['videoStreams'].items():
+            if format_id == 'f4m_stream':
+                continue
+            if video_url.endswith('.f4m'):
+                f4m_formats = self._extract_f4m_formats(video_url + '?hdcore=3.3.1', display_id)
+                if f4m_formats:
+                    f4m_formats[0]['quality'] = quality(format_id)
+                    formats.extend(f4m_formats)
+            else:
+                formats.append({
+                    'url': video_url,
+                    'format_id': format_id,
+                    'quality': quality(format_id),
+                })
+
+        if not formats:
+            youtube_id = video.get('youtubeID')
+            if youtube_id:
+                return self.url_result(youtube_id, 'Youtube')
+
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'duration': duration,
+            'formats': formats,
+        }
diff --git a/youtube_dl/extractor/glide.py b/youtube_dl/extractor/glide.py
new file mode 100644 (file)
index 0000000..9561ed5
--- /dev/null
@@ -0,0 +1,40 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class GlideIE(InfoExtractor):
+    IE_DESC = 'Glide mobile video messages (glide.me)'
+    _VALID_URL = r'https?://share\.glide\.me/(?P<id>[A-Za-z0-9\-=_+]+)'
+    _TEST = {
+        'url': 'http://share.glide.me/UZF8zlmuQbe4mr+7dCiQ0w==',
+        'md5': '4466372687352851af2d131cfaa8a4c7',
+        'info_dict': {
+            'id': 'UZF8zlmuQbe4mr+7dCiQ0w==',
+            'ext': 'mp4',
+            'title': 'Damon Timm\'s Glide message',
+            'thumbnail': 're:^https?://.*?\.cloudfront\.net/.*\.jpg$',
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+        title = self._html_search_regex(
+            r'<title>(.*?)</title>', webpage, 'title')
+        video_url = self.http_scheme() + self._search_regex(
+            r'<source src="(.*?)" type="video/mp4">', webpage, 'video URL')
+        thumbnail_url = self._search_regex(
+            r'<img id="video-thumbnail" src="(.*?)"',
+            webpage, 'thumbnail url', fatal=False)
+        thumbnail = (
+            thumbnail_url if thumbnail_url is None
+            else self.http_scheme() + thumbnail_url)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'url': video_url,
+            'thumbnail': thumbnail,
+        }
index 77c3ad4fc8b882fe92c91574eca1b5cbabc265cd..6949a57c70dd9b378c4879dad8afd4f3b18e558a 100644 (file)
@@ -5,13 +5,15 @@ import random
 import math
 
 from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
-    float_or_none,
+from ..compat import (
     compat_str,
     compat_chr,
     compat_ord,
 )
+from ..utils import (
+    ExtractorError,
+    float_or_none,
+)
 
 
 class GloboIE(InfoExtractor):
@@ -395,4 +397,4 @@ class GloboIE(InfoExtractor):
             'uploader_id': uploader_id,
             'like_count': like_count,
             'formats': formats
-        }
\ No newline at end of file
+        }
diff --git a/youtube_dl/extractor/goldenmoustache.py b/youtube_dl/extractor/goldenmoustache.py
new file mode 100644 (file)
index 0000000..0fb5097
--- /dev/null
@@ -0,0 +1,48 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class GoldenMoustacheIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?goldenmoustache\.com/(?P<display_id>[\w-]+)-(?P<id>\d+)'
+    _TESTS = [{
+        'url': 'http://www.goldenmoustache.com/suricate-le-poker-3700/',
+        'md5': '0f904432fa07da5054d6c8beb5efb51a',
+        'info_dict': {
+            'id': '3700',
+            'ext': 'mp4',
+            'title': 'Suricate - Le Poker',
+            'description': 'md5:3d1f242f44f8c8cb0a106f1fd08e5dc9',
+            'thumbnail': 're:^https?://.*\.jpg$',
+        }
+    }, {
+        'url': 'http://www.goldenmoustache.com/le-lab-tout-effacer-mc-fly-et-carlito-55249/',
+        'md5': '27f0c50fb4dd5f01dc9082fc67cd5700',
+        'info_dict': {
+            'id': '55249',
+            'ext': 'mp4',
+            'title': 'Le LAB - Tout Effacer (Mc Fly et Carlito)',
+            'description': 'md5:9b7fbf11023fb2250bd4b185e3de3b2a',
+            'thumbnail': 're:^https?://.*\.(?:png|jpg)$',
+        }
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        video_url = self._html_search_regex(
+            r'data-src-type="mp4" data-src="([^"]+)"', webpage, 'video URL')
+        title = self._html_search_regex(
+            r'<title>(.*?)(?: - Golden Moustache)?</title>', webpage, 'title')
+        thumbnail = self._og_search_thumbnail(webpage)
+        description = self._og_search_description(webpage)
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'ext': 'mp4',
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+        }
index 53714f47f1a0a8cd1abb8aab0ec09cdbd283d51b..2bfb9904022c6a3830901baa2ee380b6f4f14714 100644 (file)
@@ -2,8 +2,10 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urlparse,
+)
+from ..utils import (
     determine_ext,
 )
 
index 469e1f9357eaf66ef48b3bed6c3d464c48f2b5dc..498304cb2bd9b605d44e67291a2f38bf4481a6f8 100644 (file)
@@ -4,7 +4,7 @@ import itertools
 import re
 
 from .common import SearchInfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
 )
 
index 45cca1d24e858edd7a1afa3352b2457058163fd7..ae24aff84fd85c6796c7a4374964f70629175f43 100644 (file)
@@ -4,19 +4,21 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
-    determine_ext,
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
 )
+from ..utils import (
+    ExtractorError,
+    int_or_none,
+)
 
 
 class GorillaVidIE(InfoExtractor):
-    IE_DESC = 'GorillaVid.in, daclips.in and movpod.in'
+    IE_DESC = 'GorillaVid.in, daclips.in, movpod.in and fastvideo.in'
     _VALID_URL = r'''(?x)
         https?://(?P<host>(?:www\.)?
-            (?:daclips\.in|gorillavid\.in|movpod\.in))/
+            (?:daclips\.in|gorillavid\.in|movpod\.in|fastvideo\.in))/
         (?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:-[0-9]+x[0-9]+\.html)?
     '''
 
@@ -46,7 +48,17 @@ class GorillaVidIE(InfoExtractor):
         'info_dict': {
             'id': '3rso4kdn6f9m',
             'ext': 'mp4',
-            'title': 'Micro Pig piglets ready on 16th July 2009',
+            'title': 'Micro Pig piglets ready on 16th July 2009-bG0PdrCdxUc',
+            'thumbnail': 're:http://.*\.jpg',
+        }
+    }, {
+        # video with countdown timeout
+        'url': 'http://fastvideo.in/1qmdn1lmsmbw',
+        'md5': '8b87ec3f6564a3108a0e8e66594842ba',
+        'info_dict': {
+            'id': '1qmdn1lmsmbw',
+            'ext': 'mp4',
+            'title': 'Man of Steel - Trailer',
             'thumbnail': 're:http://.*\.jpg',
         },
     }, {
@@ -69,8 +81,14 @@ class GorillaVidIE(InfoExtractor):
             (?:id="[^"]+"\s+)?
             value="([^"]*)"
             ''', webpage))
-        
+
         if fields['op'] == 'download1':
+            countdown = int_or_none(self._search_regex(
+                r'<span id="countdown_str">(?:[Ww]ait)?\s*<span id="cxc">(\d+)</span>\s*(?:seconds?)?</span>',
+                webpage, 'countdown', default=None))
+            if countdown:
+                self._sleep(countdown, video_id)
+
             post = compat_urllib_parse.urlencode(fields)
 
             req = compat_urllib_request.Request(url, post)
@@ -78,14 +96,17 @@ class GorillaVidIE(InfoExtractor):
 
             webpage = self._download_webpage(req, video_id, 'Downloading video page')
 
-        title = self._search_regex(r'style="z-index: [0-9]+;">([^<]+)</span>', webpage, 'title')
-        video_url = self._search_regex(r'file\s*:\s*\'(http[^\']+)\',', webpage, 'file url')
-        thumbnail = self._search_regex(r'image\s*:\s*\'(http[^\']+)\',', webpage, 'thumbnail', fatal=False)
+        title = self._search_regex(
+            r'style="z-index: [0-9]+;">([^<]+)</span>',
+            webpage, 'title', default=None) or self._og_search_title(webpage)
+        video_url = self._search_regex(
+            r'file\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'file url')
+        thumbnail = self._search_regex(
+            r'image\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'thumbnail', fatal=False)
 
         formats = [{
             'format_id': 'sd',
             'url': video_url,
-            'ext': determine_ext(video_url),
             'quality': 1,
         }]
 
index 7bca21ad0fe81c71444bfe64fb23ea3ad62c6c5b..b116d251d5d3f30c6affc852454e7e326d14f660 100644 (file)
@@ -1,73 +1,53 @@
 # -*- coding: utf-8 -*-
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
+from ..compat import (
+    compat_parse_qs,
+)
 from ..utils import (
-    compat_urlparse,
-    str_to_int,
-    ExtractorError,
+    parse_duration,
 )
-import json
 
 
 class GoshgayIE(InfoExtractor):
-    _VALID_URL = r'^(?:https?://)www.goshgay.com/video(?P<id>\d+?)($|/)'
+    _VALID_URL = r'https?://www\.goshgay\.com/video(?P<id>\d+?)($|/)'
     _TEST = {
-        'url': 'http://www.goshgay.com/video4116282',
-        'md5': '268b9f3c3229105c57859e166dd72b03',
+        'url': 'http://www.goshgay.com/video299069/diesel_sfw_xxx_video',
+        'md5': '027fcc54459dff0feb0bc06a7aeda680',
         'info_dict': {
-            'id': '4116282',
+            'id': '299069',
             'ext': 'flv',
-            'title': 'md5:089833a4790b5e103285a07337f245bf',
-            'thumbnail': 're:http://.*\.jpg',
+            'title': 'DIESEL SFW XXX Video',
+            'thumbnail': 're:^http://.*\.jpg$',
+            'duration': 79,
             'age_limit': 18,
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
-        title = self._search_regex(r'class="video-title"><h1>(.+?)<', webpage, 'title')
 
-        player_config = self._search_regex(
-            r'(?s)jwplayer\("player"\)\.setup\(({.+?})\)', webpage, 'config settings')
-        player_vars = json.loads(player_config.replace("'", '"'))
-        width = str_to_int(player_vars.get('width'))
-        height = str_to_int(player_vars.get('height'))
-        config_uri = player_vars.get('config')
+        title = self._html_search_regex(
+            r'<h2>(.*?)<', webpage, 'title')
+        duration = parse_duration(self._html_search_regex(
+            r'<span class="duration">\s*-?\s*(.*?)</span>',
+            webpage, 'duration', fatal=False))
+        family_friendly = self._html_search_meta(
+            'isFamilyFriendly', webpage, default='false')
 
-        if config_uri is None:
-            raise ExtractorError('Missing config URI')
-        node = self._download_xml(config_uri, video_id, 'Downloading player config XML',
-                                  errnote='Unable to download XML')
-        if node is None:
-            raise ExtractorError('Missing config XML')
-        if node.tag != 'config':
-            raise ExtractorError('Missing config attribute')
-        fns = node.findall('file')
-        imgs = node.findall('image')
-        if len(fns) != 1:
-            raise ExtractorError('Missing media URI')
-        video_url = fns[0].text
-        if len(imgs) < 1:
-            thumbnail = None
-        else:
-            thumbnail = imgs[0].text
-
-        url_comp = compat_urlparse.urlparse(url)
-        ref = "%s://%s%s" % (url_comp[0], url_comp[1], url_comp[2])
+        flashvars = compat_parse_qs(self._html_search_regex(
+            r'<embed.+?id="flash-player-embed".+?flashvars="([^"]+)"',
+            webpage, 'flashvars'))
+        thumbnail = flashvars.get('url_bigthumb', [None])[0]
+        video_url = flashvars['flv_url'][0]
 
         return {
             'id': video_id,
             'url': video_url,
             'title': title,
-            'width': width,
-            'height': height,
             'thumbnail': thumbnail,
-            'http_referer': ref,
-            'age_limit': 18,
+            'duration': duration,
+            'age_limit': 0 if family_friendly == 'true' else 18,
         }
index 726adff773305844a5e8950b4467509646b79d1d..fff74a70a891fc163ff488408e4df348564b8a29 100644 (file)
@@ -8,12 +8,13 @@ import re
 
 
 from .common import InfoExtractor
-from ..utils import ExtractorError, compat_urllib_request, compat_html_parser
-
-from ..utils import (
+from ..compat import (
+    compat_html_parser,
     compat_urllib_parse,
+    compat_urllib_request,
     compat_urlparse,
 )
+from ..utils import ExtractorError
 
 
 class GroovesharkHtmlParser(compat_html_parser.HTMLParser):
diff --git a/youtube_dl/extractor/groupon.py b/youtube_dl/extractor/groupon.py
new file mode 100644 (file)
index 0000000..8b9e0e2
--- /dev/null
@@ -0,0 +1,50 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class GrouponIE(InfoExtractor):
+    _VALID_URL = r'https?://www\.groupon\.com/deals/(?P<id>[^?#]+)'
+
+    _TEST = {
+        'url': 'https://www.groupon.com/deals/bikram-yoga-huntington-beach-2#ooid=tubGNycTo_9Uxg82uESj4i61EYX8nyuf',
+        'info_dict': {
+            'id': 'bikram-yoga-huntington-beach-2',
+            'title': '$49 for 10 Yoga Classes or One Month of Unlimited Classes at Bikram Yoga Huntington Beach ($180 Value)',
+            'description': 'Studio kept at 105 degrees and 40% humidity with anti-microbial and anti-slip Flotex flooring; certified instructors',
+        },
+        'playlist': [{
+            'info_dict': {
+                'id': 'tubGNycTo_9Uxg82uESj4i61EYX8nyuf',
+                'ext': 'mp4',
+                'title': 'Bikram Yoga Huntington Beach | Orange County',
+            },
+        }],
+        'params': {
+            'skip_download': 'HLS',
+        }
+    }
+
+    def _real_extract(self, url):
+        playlist_id = self._match_id(url)
+        webpage = self._download_webpage(url, playlist_id)
+
+        payload = self._parse_json(self._search_regex(
+            r'var\s+payload\s*=\s*(.*?);\n', webpage, 'payload'), playlist_id)
+        videos = payload['carousel'].get('dealVideos', [])
+        entries = []
+        for v in videos:
+            if v.get('provider') != 'OOYALA':
+                self.report_warning(
+                    '%s: Unsupported video provider %s, skipping video' %
+                    (playlist_id, v.get('provider')))
+                continue
+            entries.append(self.url_result('ooyala:%s' % v['media']))
+
+        return {
+            '_type': 'playlist',
+            'id': playlist_id,
+            'entries': entries,
+            'title': self._og_search_title(webpage),
+            'description': self._og_search_description(webpage),
+        }
index 5bdd08afabd27474fc2f5b1ed2afb371efdf8d9d..b6cc15b6fbad25c43fe0699668bd3ec452ed944d 100644 (file)
@@ -1,37 +1,33 @@
 # -*- coding: utf-8 -*-
-
-import re
-import json
+from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..utils import determine_ext
+
 
 class HarkIE(InfoExtractor):
-    _VALID_URL = r'https?://www\.hark\.com/clips/(.+?)-.+'
+    _VALID_URL = r'https?://www\.hark\.com/clips/(?P<id>.+?)-.+'
     _TEST = {
-        u'url': u'http://www.hark.com/clips/mmbzyhkgny-obama-beyond-the-afghan-theater-we-only-target-al-qaeda-on-may-23-2013',
-        u'file': u'mmbzyhkgny.mp3',
-        u'md5': u'6783a58491b47b92c7c1af5a77d4cbee',
-        u'info_dict': {
-            u'title': u"Obama: 'Beyond The Afghan Theater, We Only Target Al Qaeda' on May 23, 2013",
-            u'description': u'President Barack Obama addressed the nation live on May 23, 2013 in a speech aimed at addressing counter-terrorism policies including the use of drone strikes, detainees at Guantanamo Bay prison facility, and American citizens who are terrorists.',
-            u'duration': 11,
+        'url': 'http://www.hark.com/clips/mmbzyhkgny-obama-beyond-the-afghan-theater-we-only-target-al-qaeda-on-may-23-2013',
+        'md5': '6783a58491b47b92c7c1af5a77d4cbee',
+        'info_dict': {
+            'id': 'mmbzyhkgny',
+            'ext': 'mp3',
+            'title': 'Obama: \'Beyond The Afghan Theater, We Only Target Al Qaeda\' on May 23, 2013',
+            'description': 'President Barack Obama addressed the nation live on May 23, 2013 in a speech aimed at addressing counter-terrorism policies including the use of drone strikes, detainees at Guantanamo Bay prison facility, and American citizens who are terrorists.',
+            'duration': 11,
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group(1)
-        json_url = "http://www.hark.com/clips/%s.json" %(video_id)
-        info_json = self._download_webpage(json_url, video_id)
-        info = json.loads(info_json)
-        final_url = info['url']
+        video_id = self._match_id(url)
+        data = self._download_json(
+            'http://www.hark.com/clips/%s.json' % video_id, video_id)
 
-        return {'id': video_id,
-                'url' : final_url,
-                'title': info['name'],
-                'ext': determine_ext(final_url),
-                'description': info['description'],
-                'thumbnail': info['image_original'],
-                'duration': info['duration'],
-                }
+        return {
+            'id': video_id,
+            'url': data['url'],
+            'title': data['name'],
+            'description': data.get('description'),
+            'thumbnail': data.get('image_original'),
+            'duration': data.get('duration'),
+        }
index f97b1e0854e0655d4cab866ace1dccd58a13f7ec..278d9f527fd41c8e1e2c180a9ae455a23fbef1fc 100644 (file)
@@ -3,7 +3,8 @@ from __future__ import unicode_literals
 
 from .common import InfoExtractor
 from ..utils import (
-    get_meta_content,
+    determine_ext,
+    int_or_none,
     parse_iso8601,
 )
 
@@ -24,57 +25,54 @@ class HeiseIE(InfoExtractor):
             'title': (
                 "Podcast: c't uplink 3.3 – Owncloud / Tastaturen / Peilsender Smartphone"
             ),
-            'format_id': 'mp4_720',
+            'format_id': 'mp4_720p',
             'timestamp': 1411812600,
             'upload_date': '20140927',
             'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.',
+            'thumbnail': 're:^https?://.*\.jpe?g$',
         }
     }
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-
         webpage = self._download_webpage(url, video_id)
-        json_url = self._search_regex(
-            r'json_url:\s*"([^"]+)"', webpage, 'json URL')
-        config = self._download_json(json_url, video_id)
+
+        container_id = self._search_regex(
+            r'<div class="videoplayerjw".*?data-container="([0-9]+)"',
+            webpage, 'container ID')
+        sequenz_id = self._search_regex(
+            r'<div class="videoplayerjw".*?data-sequenz="([0-9]+)"',
+            webpage, 'sequenz ID')
+        data_url = 'http://www.heise.de/videout/feed?container=%s&sequenz=%s' % (container_id, sequenz_id)
+        doc = self._download_xml(data_url, video_id)
 
         info = {
             'id': video_id,
-            'thumbnail': config.get('poster'),
-            'timestamp': parse_iso8601(get_meta_content('date', webpage)),
+            'thumbnail': self._og_search_thumbnail(webpage),
+            'timestamp': parse_iso8601(
+                self._html_search_meta('date', webpage)),
             'description': self._og_search_description(webpage),
         }
 
-        title = get_meta_content('fulltitle', webpage)
+        title = self._html_search_meta('fulltitle', webpage)
         if title:
             info['title'] = title
-        elif config.get('title'):
-            info['title'] = config['title']
         else:
             info['title'] = self._og_search_title(webpage)
 
         formats = []
-        for t, rs in config['formats'].items():
-            if not rs or not hasattr(rs, 'items'):
-                self._downloader.report_warning(
-                    'formats: {0}: no resolutions'.format(t))
-                continue
-
-            for height_str, obj in rs.items():
-                format_id = '{0}_{1}'.format(t, height_str)
-
-                if not obj or not obj.get('url'):
-                    self._downloader.report_warning(
-                        'formats: {0}: no url'.format(format_id))
-                    continue
-
-                formats.append({
-                    'url': obj['url'],
-                    'format_id': format_id,
-                    'height': self._int(height_str, 'height'),
-                })
-
+        for source_node in doc.findall('.//{http://rss.jwpcdn.com/}source'):
+            label = source_node.attrib['label']
+            height = int_or_none(self._search_regex(
+                r'^(.*?_)?([0-9]+)p$', label, 'height', default=None))
+            video_url = source_node.attrib['file']
+            ext = determine_ext(video_url, '')
+            formats.append({
+                'url': video_url,
+                'format_note': label,
+                'format_id': '%s_%s' % (ext, label),
+                'height': height,
+            })
         self._sort_formats(formats)
         info['formats'] = formats
 
index 5268efa49433ca8c7cb0c2288df0705165e876a3..93107b3064ebfba513b3aa208556b5822f6cf979 100644 (file)
@@ -2,9 +2,8 @@
 
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
+from ..utils import js_to_json
 
 
 class HelsinkiIE(InfoExtractor):
@@ -24,39 +23,21 @@ class HelsinkiIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
-        formats = []
-
-        mobj = re.search(r'file=((\w+):[^&]+)', webpage)
-        if mobj:
-            formats.append({
-                'ext': mobj.group(2),
-                'play_path': mobj.group(1),
-                'url': 'rtmp://flashvideo.it.helsinki.fi/vod/',
-                'player_url': 'http://video.helsinki.fi/player.swf',
-                'format_note': 'sd',
-                'quality': 0,
-            })
-
-        mobj = re.search(r'hd\.file=((\w+):[^&]+)', webpage)
-        if mobj:
-            formats.append({
-                'ext': mobj.group(2),
-                'play_path': mobj.group(1),
-                'url': 'rtmp://flashvideo.it.helsinki.fi/vod/',
-                'player_url': 'http://video.helsinki.fi/player.swf',
-                'format_note': 'hd',
-                'quality': 1,
-            })
 
+        params = self._parse_json(self._html_search_regex(
+            r'(?s)jwplayer\("player"\).setup\((\{.*?\})\);',
+            webpage, 'player code'), video_id, transform_source=js_to_json)
+        formats = [{
+            'url': s['file'],
+            'ext': 'mp4',
+        } for s in params['sources']]
         self._sort_formats(formats)
 
         return {
             'id': video_id,
             'title': self._og_search_title(webpage).replace('Video: ', ''),
             'description': self._og_search_description(webpage),
-            'thumbnail': self._og_search_thumbnail(webpage),
             'formats': formats,
         }
index 7e7714438ce9099e4d7f8d6efe9f1204dc6f2690..5b6efb27eedfe0097cc47d96f3f287ab1858e9e8 100644 (file)
@@ -37,7 +37,7 @@ class HornBunnyIE(InfoExtractor):
         webpage2 = self._download_webpage(redirect_url, video_id)
         video_url = self._html_search_regex(
             r'flvMask:(.*?);', webpage2, 'video_url')
-        
+
         duration = parse_duration(self._search_regex(
             r'<strong>Runtime:</strong>\s*([0-9:]+)</div>',
             webpage, 'duration', fatal=False))
index 8e812b66976e31e43ad594dbee6344c5e34629cf..704d0285d3e1c2ce10e8f3929543c6c66b0fd58a 100644 (file)
@@ -4,9 +4,11 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import (
+    compat_urllib_request,
+)
 from ..utils import (
     ExtractorError,
-    compat_urllib_request,
     int_or_none,
     urlencode_postdata,
 )
@@ -30,9 +32,7 @@ class HostingBulkIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         url = 'http://hostingbulk.com/{0:}.html'.format(video_id)
 
         # Custom request with cookie to set language to English, so our file
index 80b48b1b3605a18fa11547ab890897b6b47ccaa5..651784b73940032fd65c9675043f045b0bcf4ff2 100644 (file)
@@ -1,12 +1,13 @@
 from __future__ import unicode_literals
 
-import re
 import base64
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
+)
+from ..utils import (
     ExtractorError,
     HEADRequest,
 )
@@ -16,25 +17,24 @@ class HotNewHipHopIE(InfoExtractor):
     _VALID_URL = r'http://www\.hotnewhiphop\.com/.*\.(?P<id>.*)\.html'
     _TEST = {
         'url': 'http://www.hotnewhiphop.com/freddie-gibbs-lay-it-down-song.1435540.html',
-        'file': '1435540.mp3',
         'md5': '2c2cd2f76ef11a9b3b581e8b232f3d96',
         'info_dict': {
+            'id': '1435540',
+            'ext': 'mp3',
             'title': 'Freddie Gibbs - Lay It Down'
         }
     }
 
     def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('id')
-
-        webpage_src = self._download_webpage(url, video_id)
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
 
         video_url_base64 = self._search_regex(
-            r'data-path="(.*?)"', webpage_src, u'video URL', fatal=False)
+            r'data-path="(.*?)"', webpage, 'video URL', default=None)
 
         if video_url_base64 is None:
             video_url = self._search_regex(
-                r'"contentUrl" content="(.*?)"', webpage_src, u'video URL')
+                r'"contentUrl" content="(.*?)"', webpage, 'content URL')
             return self.url_result(video_url, ie='Youtube')
 
         reqdata = compat_urllib_parse.urlencode([
@@ -59,11 +59,11 @@ class HotNewHipHopIE(InfoExtractor):
         if video_url.endswith('.html'):
             raise ExtractorError('Redirect failed')
 
-        video_title = self._og_search_title(webpage_src).strip()
+        video_title = self._og_search_title(webpage).strip()
 
         return {
             'id': video_id,
             'url': video_url,
             'title': video_title,
-            'thumbnail': self._og_search_thumbnail(webpage_src),
+            'thumbnail': self._og_search_thumbnail(webpage),
         }
index 6ae04782c1aabb27ee6973819810f1e6f763b8b0..3f7d6666c0810e545c0f285dcf70689806c4dac7 100644 (file)
@@ -13,7 +13,7 @@ class HowcastIE(InfoExtractor):
         'info_dict': {
             'id': '390161',
             'ext': 'mp4',
-            'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.', 
+            'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.',
             'title': 'How to Tie a Square Knot Properly',
         }
     }
@@ -27,10 +27,10 @@ class HowcastIE(InfoExtractor):
         self.report_extraction(video_id)
 
         video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
-            webpage, 'video URL')
+                                       webpage, 'video URL')
 
         video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
-            webpage, 'description', fatal=False)
+                                                    webpage, 'description', fatal=False)
 
         return {
             'id': video_id,
index fccc238840887fd70ed56b2f642c47ea6aa4f43e..e9733912132798d99be18bb935dcd3c3b190525d 100644 (file)
@@ -1,12 +1,12 @@
 from __future__ import unicode_literals
 
-import re
-import json
-import random
-import string
-
 from .common import InfoExtractor
-from ..utils import find_xpath_attr
+from ..utils import (
+    find_xpath_attr,
+    int_or_none,
+    js_to_json,
+    unescapeHTML,
+)
 
 
 class HowStuffWorksIE(InfoExtractor):
@@ -16,98 +16,74 @@ class HowStuffWorksIE(InfoExtractor):
             'url': 'http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm',
             'info_dict': {
                 'id': '450221',
-                'display_id': 'cool-jobs-iditarod-musher',
                 'ext': 'flv',
                 'title': 'Cool Jobs - Iditarod Musher',
-                'description': 'md5:82bb58438a88027b8186a1fccb365f90',
+                'description': 'Cold sleds, freezing temps and warm dog breath... an Iditarod musher\'s dream. Kasey-Dee Gardner jumps on a sled to find out what the big deal is.',
+                'display_id': 'cool-jobs-iditarod-musher',
                 'thumbnail': 're:^https?://.*\.jpg$',
+                'duration': 161,
             },
-            'params': {
-                # md5 is not consistent
-                'skip_download': True
-            }
         },
         {
             'url': 'http://adventure.howstuffworks.com/7199-survival-zone-food-and-water-in-the-savanna-video.htm',
             'info_dict': {
                 'id': '453464',
-                'display_id': 'survival-zone-food-and-water-in-the-savanna',
                 'ext': 'mp4',
                 'title': 'Survival Zone: Food and Water In the Savanna',
-                'description': 'md5:7e1c89f6411434970c15fa094170c371',
+                'description': 'Learn how to find both food and water while trekking in the African savannah. In this video from the Discovery Channel.',
+                'display_id': 'survival-zone-food-and-water-in-the-savanna',
                 'thumbnail': 're:^https?://.*\.jpg$',
             },
-            'params': {
-                # md5 is not consistent
-                'skip_download': True
-            }
         },
         {
             'url': 'http://entertainment.howstuffworks.com/arts/2706-sword-swallowing-1-by-dan-meyer-video.htm',
             'info_dict': {
                 'id': '440011',
-                'display_id': 'sword-swallowing-1-by-dan-meyer',
                 'ext': 'flv',
                 'title': 'Sword Swallowing #1 by Dan Meyer',
-                'description': 'md5:b2409e88172913e2e7d3d1159b0ef735',
+                'description': 'Video footage (1 of 3) used by permission of the owner Dan Meyer through Sword Swallowers Association International <www.swordswallow.org>',
+                'display_id': 'sword-swallowing-1-by-dan-meyer',
                 'thumbnail': 're:^https?://.*\.jpg$',
             },
-            'params': {
-                # md5 is not consistent
-                'skip_download': True
-            }
         },
     ]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        display_id = mobj.group('id')
+        display_id = self._match_id(url)
         webpage = self._download_webpage(url, display_id)
+        clip_js = self._search_regex(
+            r'(?s)var clip = ({.*?});', webpage, 'clip info')
+        clip_info = self._parse_json(
+            clip_js, display_id, transform_source=js_to_json)
 
-        content_id = self._search_regex(r'var siteSectionId="(\d+)";', webpage, 'content id')
-
-        mp4 = self._search_regex(
-            r'''(?xs)var\s+clip\s*=\s*{\s*
-                .+?\s*
-                content_id\s*:\s*%s\s*,\s*
-                .+?\s*
-                mp4\s*:\s*\[(.*?),?\]\s*
-                };\s*
-                videoData\.push\(clip\);''' % content_id,
-            webpage, 'mp4', fatal=False, default=None)
-
-        smil = self._download_xml(
-            'http://services.media.howstuffworks.com/videos/%s/smil-service.smil' % content_id,
-            content_id, 'Downloading video SMIL')
-
-        http_base = find_xpath_attr(
-            smil,
-            './{0}head/{0}meta'.format('{http://www.w3.org/2001/SMIL20/Language}'),
-            'name',
-            'httpBase').get('content')
-
-        def random_string(str_len=0):
-            return ''.join([random.choice(string.ascii_uppercase) for _ in range(str_len)])
-
-        URL_SUFFIX = '?v=2.11.3&fp=LNX 11,2,202,356&r=%s&g=%s' % (random_string(5), random_string(12))
-
+        video_id = clip_info['content_id']
         formats = []
+        m3u8_url = clip_info.get('m3u8')
+        if m3u8_url:
+            formats += self._extract_m3u8_formats(m3u8_url, video_id, 'mp4')
+        for video in clip_info.get('mp4', []):
+            formats.append({
+                'url': video['src'],
+                'format_id': video['bitrate'],
+                'vbr': int(video['bitrate'].rstrip('k')),
+            })
+
+        if not formats:
+            smil = self._download_xml(
+                'http://services.media.howstuffworks.com/videos/%s/smil-service.smil' % video_id,
+                video_id, 'Downloading video SMIL')
+
+            http_base = find_xpath_attr(
+                smil,
+                './{0}head/{0}meta'.format('{http://www.w3.org/2001/SMIL20/Language}'),
+                'name',
+                'httpBase').get('content')
+
+            URL_SUFFIX = '?v=2.11.3&fp=LNX 11,2,202,356&r=A&g=A'
 
-        if mp4:
-            for video in json.loads('[%s]' % mp4):
-                bitrate = video['bitrate']
-                fmt = {
-                    'url': video['src'].replace('http://pmd.video.howstuffworks.com', http_base) + URL_SUFFIX,
-                    'format_id': bitrate,
-                }
-                m = re.search(r'(?P<vbr>\d+)[Kk]', bitrate)
-                if m:
-                    fmt['vbr'] = int(m.group('vbr'))
-                formats.append(fmt)
-        else:
             for video in smil.findall(
-                    './/{0}body/{0}switch/{0}video'.format('{http://www.w3.org/2001/SMIL20/Language}')):
-                vbr = int(video.attrib['system-bitrate']) / 1000
+                    './{0}body/{0}switch/{0}video'.format('{http://www.w3.org/2001/SMIL20/Language}')):
+                vbr = int_or_none(video.attrib['system-bitrate'], scale=1000)
                 formats.append({
                     'url': '%s/%s%s' % (http_base, video.attrib['src'], URL_SUFFIX),
                     'format_id': '%dk' % vbr,
@@ -116,19 +92,12 @@ class HowStuffWorksIE(InfoExtractor):
 
         self._sort_formats(formats)
 
-        title = self._og_search_title(webpage)
-        TITLE_SUFFIX = ' : HowStuffWorks'
-        if title.endswith(TITLE_SUFFIX):
-            title = title[:-len(TITLE_SUFFIX)]
-
-        description = self._og_search_description(webpage)
-        thumbnail = self._og_search_thumbnail(webpage)
-
         return {
-            'id': content_id,
+            'id': '%s' % video_id,
             'display_id': display_id,
-            'title': title,
-            'description': description,
-            'thumbnail': thumbnail,
+            'title': unescapeHTML(clip_info['clip_title']),
+            'description': unescapeHTML(clip_info.get('caption')),
+            'thumbnail': clip_info.get('video_still_url'),
+            'duration': clip_info.get('duration'),
             'formats': formats,
         }
index 94e7cf79008aa0b2426f70a26ba70218f916d731..4ccf6b9b8a82c3ef28c1d9d04dcc6f26ce2a8f8d 100644 (file)
@@ -33,8 +33,7 @@ class HuffPostIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         api_url = 'http://embed.live.huffingtonpost.com/api/segments/%s.json' % video_id
         data = self._download_json(api_url, video_id)['data']
index 6d0d847c6d3461a02c6eab71b24848247e9678ab..aa0724a02353840e5f5533a1eedbc7005aa63008 100644 (file)
@@ -1,20 +1,20 @@
 from __future__ import unicode_literals
 
 import json
-import re
 import time
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
-
+)
+from ..utils import (
     ExtractorError,
 )
 
 
 class HypemIE(InfoExtractor):
-    _VALID_URL = r'http://(?:www\.)?hypem\.com/track/([^/]+)/([^/]+)'
+    _VALID_URL = r'http://(?:www\.)?hypem\.com/track/(?P<id>[^/]+)/'
     _TEST = {
         'url': 'http://hypem.com/track/1v6ga/BODYWORK+-+TAME',
         'md5': 'b9cc91b5af8995e9f0c1cee04c575828',
@@ -27,8 +27,7 @@ class HypemIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        track_id = mobj.group(1)
+        track_id = self._match_id(url)
 
         data = {'ax': 1, 'ts': time.time()}
         data_encoded = compat_urllib_parse.urlencode(data)
index 1d5a10a3b6349d95387aee00d4be5e6deac618b8..370e86e5ac7ce497c8b3c658805374246fb9690a 100644 (file)
@@ -1,7 +1,5 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 
 
@@ -20,13 +18,11 @@ class IconosquareIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
-        html_title = self._html_search_regex(
-            r'<title>(.+?)</title>',
+        title = self._html_search_regex(
+            r'<title>(.+?)(?: *\(Videos?\))? \| (?:Iconosquare|Statigram)</title>',
             webpage, 'title')
-        title = re.sub(r'(?: *\(Videos?\))? \| (?:Iconosquare|Statigram)$', '', html_title)
         uploader_id = self._html_search_regex(
             r'@([^ ]+)', title, 'uploader name', fatal=False)
 
index c80185b535b8d9853adf886fb8b5582284d12a03..3db668cd0297ea0ff3c0168c2b3f5db1491a0db4 100644 (file)
@@ -63,8 +63,10 @@ class IGNIE(InfoExtractor):
                 'id': '078fdd005f6d3c02f63d795faa1b984f',
                 'ext': 'mp4',
                 'title': 'Rewind Theater - Wild Trailer Gamescom 2014',
-                'description': 'Giant skeletons, bloody hunts, and captivating'
-                    ' natural beauty take our breath away.',
+                'description': (
+                    'Giant skeletons, bloody hunts, and captivating'
+                    ' natural beauty take our breath away.'
+                ),
             },
         },
     ]
@@ -99,7 +101,7 @@ class IGNIE(InfoExtractor):
         video_id = self._find_video_id(webpage)
         result = self._get_video_info(video_id)
         description = self._html_search_regex(self._DESCRIPTION_RE,
-            webpage, 'video description', flags=re.DOTALL)
+                                              webpage, 'video description', flags=re.DOTALL)
         result['description'] = description
         return result
 
index 4536db3bfca1e1244e70089bea30de9687d923f0..13a53a0cb39f70ed1aaf1713283852cdc3cebeb4 100644 (file)
@@ -4,9 +4,8 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urlparse,
-    get_element_by_attribute,
 )
 
 
@@ -27,10 +26,11 @@ class ImdbIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage('http://www.imdb.com/video/imdb/vi%s' % video_id, video_id)
-        descr = get_element_by_attribute('itemprop', 'description', webpage)
+        descr = self._html_search_regex(
+            r'(?s)<span itemprop="description">(.*?)</span>',
+            webpage, 'description', fatal=False)
         available_formats = re.findall(
             r'case \'(?P<f_id>.*?)\' :$\s+url = \'(?P<path>.*?)\'', webpage,
             flags=re.MULTILINE)
@@ -71,11 +71,9 @@ class ImdbListIE(InfoExtractor):
         },
         'playlist_count': 7,
     }
-    
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        list_id = mobj.group('id')
 
+    def _real_extract(self, url):
+        list_id = self._match_id(url)
         webpage = self._download_webpage(url, list_id)
         entries = [
             self.url_result('http://www.imdb.com' + m, 'Imdb')
index e76dd222d1ee81dc0e0b2d5b1b3c28ef22e1bd83..f25f43664e262b25473557c5f11dae91e697e3f6 100644 (file)
@@ -1,10 +1,9 @@
 from __future__ import unicode_literals
 
 import base64
-import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
 )
 
@@ -24,9 +23,7 @@ class InfoQIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
index 5109f26ce860edc0675eaba6350e0ab820e7fe27..b020e2621a5cc3c8d7ef6a1bc2cb6aaea989f779 100644 (file)
@@ -27,9 +27,9 @@ class InstagramIE(InfoExtractor):
         video_id = mobj.group('id')
         webpage = self._download_webpage(url, video_id)
         uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
-            webpage, 'uploader id', fatal=False)
+                                         webpage, 'uploader id', fatal=False)
         desc = self._search_regex(r'"caption":"(.*?)"', webpage, 'description',
-            fatal=False)
+                                  fatal=False)
 
         return {
             'id': video_id,
index 53f9a5f7587bcf36d9d4a63f6cfa36d90496dd28..c813d4b82921b7598ad0981e70a1241a8f9bda32 100644 (file)
@@ -3,9 +3,11 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urlparse,
     compat_urllib_parse,
+)
+from ..utils import (
     xpath_with_ns,
 )
 
@@ -32,7 +34,7 @@ class InternetVideoArchiveIE(InfoExtractor):
     def _clean_query(query):
         NEEDED_ARGS = ['publishedid', 'customerid']
         query_dic = compat_urlparse.parse_qs(query)
-        cleaned_dic = dict((k,v[0]) for (k,v) in query_dic.items() if k in NEEDED_ARGS)
+        cleaned_dic = dict((k, v[0]) for (k, v) in query_dic.items() if k in NEEDED_ARGS)
         # Other player ids return m3u8 urls
         cleaned_dic['playerid'] = '247'
         cleaned_dic['videokbrate'] = '100000'
@@ -45,22 +47,26 @@ class InternetVideoArchiveIE(InfoExtractor):
         url = self._build_url(query)
 
         flashconfiguration = self._download_xml(url, video_id,
-            'Downloading flash configuration')
+                                                'Downloading flash configuration')
         file_url = flashconfiguration.find('file').text
         file_url = file_url.replace('/playlist.aspx', '/mrssplaylist.aspx')
         # Replace some of the parameters in the query to get the best quality
         # and http links (no m3u8 manifests)
         file_url = re.sub(r'(?<=\?)(.+)$',
-            lambda m: self._clean_query(m.group()),
-            file_url)
+                          lambda m: self._clean_query(m.group()),
+                          file_url)
         info = self._download_xml(file_url, video_id,
-            'Downloading video info')
+                                  'Downloading video info')
         item = info.find('channel/item')
 
         def _bp(p):
-            return xpath_with_ns(p,
-                {'media': 'http://search.yahoo.com/mrss/',
-                'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats'})
+            return xpath_with_ns(
+                p,
+                {
+                    'media': 'http://search.yahoo.com/mrss/',
+                    'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats',
+                }
+            )
         formats = []
         for content in item.findall(_bp('media:group/media:content')):
             attr = content.attrib
index d1defd363c5fe9c86330236f53b8aa21bfe65a38..8529bedfc0ab283790e74144bc9d570df19dc4b3 100644 (file)
@@ -6,8 +6,10 @@ from random import random
 from math import floor
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_request,
+)
+from ..utils import (
     ExtractorError,
 )
 
@@ -54,7 +56,7 @@ class IPrimaIE(InfoExtractor):
 
         player_url = (
             'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' %
-            (floor(random()*1073741824), floor(random()*1073741824))
+            (floor(random() * 1073741824), floor(random() * 1073741824))
         )
 
         req = compat_urllib_request.Request(player_url)
index 75b543b7cf8ed443bb98f3cd5c492e1c629c28a3..7a400323dc4df3807057a77b25f7401ce5e2a3b8 100644 (file)
@@ -5,8 +5,10 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_request,
+)
+from ..utils import (
     ExtractorError,
 )
 
@@ -43,7 +45,7 @@ class IviIE(InfoExtractor):
                 'thumbnail': 'http://thumbs.ivi.ru/f15.vcp.digitalaccess.ru/contents/8/4/0068dc0677041f3336b7c2baad8fc0.jpg',
             },
             'skip': 'Only works from Russia',
-         }
+        }
     ]
 
     # Sorted by quality
@@ -102,7 +104,7 @@ class IviIE(InfoExtractor):
         compilation = result['compilation']
         title = result['title']
 
-        title = '%s - %s' % (compilation, title) if compilation is not None else title  
+        title = '%s - %s' % (compilation, title) if compilation is not None else title
 
         previews = result['preview']
         previews.sort(key=lambda fmt: self._known_thumbnails.index(fmt['content_format']))
@@ -152,17 +154,17 @@ class IviCompilationIE(InfoExtractor):
         compilation_id = mobj.group('compilationid')
         season_id = mobj.group('seasonid')
 
-        if season_id is not None: # Season link
+        if season_id is not None:  # Season link
             season_page = self._download_webpage(url, compilation_id, 'Downloading season %s web page' % season_id)
             playlist_id = '%s/season%s' % (compilation_id, season_id)
             playlist_title = self._html_search_meta('title', season_page, 'title')
             entries = self._extract_entries(season_page, compilation_id)
-        else: # Compilation link            
+        else:  # Compilation link
             compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page')
             playlist_id = compilation_id
             playlist_title = self._html_search_meta('title', compilation_page, 'title')
             seasons = re.findall(r'<a href="/watch/%s/season(\d+)">[^<]+</a>' % compilation_id, compilation_page)
-            if len(seasons) == 0: # No seasons in this compilation
+            if len(seasons) == 0:  # No seasons in this compilation
                 entries = self._extract_entries(compilation_page, compilation_id)
             else:
                 entries = []
@@ -172,4 +174,4 @@ class IviCompilationIE(InfoExtractor):
                         compilation_id, 'Downloading season %s web page' % season_id)
                     entries.extend(self._extract_entries(season_page, compilation_id))
 
-        return self.playlist_result(entries, playlist_id, playlist_title)
\ No newline at end of file
+        return self.playlist_result(entries, playlist_id, playlist_title)
index 07ef682ee38052088d07f3f232c245ded77b2193..d16d483eeb0d533debe041b7cd6c7b4826d41dde 100644 (file)
@@ -5,11 +5,11 @@ import re
 
 from .common import InfoExtractor
 from ..utils import (
-    get_element_by_id,
-    parse_iso8601,
     determine_ext,
-    int_or_none,
     float_or_none,
+    get_element_by_id,
+    int_or_none,
+    parse_iso8601,
     str_to_int,
 )
 
@@ -30,7 +30,7 @@ class IzleseneIE(InfoExtractor):
                 'description': 'md5:253753e2655dde93f59f74b572454f6d',
                 'thumbnail': 're:^http://.*\.jpg',
                 'uploader_id': 'pelikzzle',
-                'timestamp': 1404298698,
+                'timestamp': 1404302298,
                 'upload_date': '20140702',
                 'duration': 95.395,
                 'age_limit': 0,
@@ -46,7 +46,7 @@ class IzleseneIE(InfoExtractor):
                 'description': 'Tarkan Dortmund 2006 Konseri',
                 'thumbnail': 're:^http://.*\.jpg',
                 'uploader_id': 'parlayankiz',
-                'timestamp': 1163318593,
+                'timestamp': 1163322193,
                 'upload_date': '20061112',
                 'duration': 253.666,
                 'age_limit': 0,
@@ -55,10 +55,9 @@ class IzleseneIE(InfoExtractor):
     ]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-        url = 'http://www.izlesene.com/video/%s' % video_id
+        video_id = self._match_id(url)
 
+        url = 'http://www.izlesene.com/video/%s' % video_id
         webpage = self._download_webpage(url, video_id)
 
         title = self._og_search_title(webpage)
index ace08769bd7671619db448696a016d08fd453e67..063e86de46c896c94be505ae916fd6f3fbdedc02 100644 (file)
@@ -45,4 +45,3 @@ class JadoreCettePubIE(InfoExtractor):
             'title': title,
             'description': description,
         }
-
index 1881659665b415714d1315f6d52bc93dbe324a2c..8094cc2e487f2880a66178d0a07c97a2ef9432f5 100644 (file)
@@ -29,7 +29,7 @@ class JeuxVideoIE(InfoExtractor):
         xml_link = self._html_search_regex(
             r'<param name="flashvars" value="config=(.*?)" />',
             webpage, 'config URL')
-        
+
         video_id = self._search_regex(
             r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml',
             xml_link, 'video ID')
@@ -38,7 +38,7 @@ class JeuxVideoIE(InfoExtractor):
             xml_link, title, 'Downloading XML config')
         info_json = config.find('format.json').text
         info = json.loads(info_json)['versions'][0]
-        
+
         video_url = 'http://video720.jeuxvideo.com/' + info['file']
 
         return {
index 5aa32bf092d8bfae17fd302fe399acf5d5264164..da8068efcba914a14788bc7b39872b8b5cd01c7c 100644 (file)
@@ -36,7 +36,7 @@ class JukeboxIE(InfoExtractor):
 
         try:
             video_url = self._search_regex(r'"config":{"file":"(?P<video_url>http:[^"]+\?mdtk=[0-9]+)"',
-                iframe_html, 'video url')
+                                           iframe_html, 'video url')
             video_url = unescapeHTML(video_url).replace('\/', '/')
         except RegexNotFoundError:
             youtube_url = self._search_regex(
@@ -47,9 +47,9 @@ class JukeboxIE(InfoExtractor):
             return self.url_result(youtube_url, ie='Youtube')
 
         title = self._html_search_regex(r'<h1 class="inline">([^<]+)</h1>',
-            html, 'title')
+                                        html, 'title')
         artist = self._html_search_regex(r'<span id="infos_article_artist">([^<]+)</span>',
-            html, 'artist')
+                                         html, 'artist')
 
         return {
             'id': video_id,
diff --git a/youtube_dl/extractor/justintv.py b/youtube_dl/extractor/justintv.py
deleted file mode 100644 (file)
index 27017e8..0000000
+++ /dev/null
@@ -1,155 +0,0 @@
-from __future__ import unicode_literals
-
-import itertools
-import json
-import os
-import re
-
-from .common import InfoExtractor
-from ..utils import (
-    compat_str,
-    ExtractorError,
-    formatSeconds,
-)
-
-
-class JustinTVIE(InfoExtractor):
-    """Information extractor for justin.tv and twitch.tv"""
-    # TODO: One broadcast may be split into multiple videos. The key
-    # 'broadcast_id' is the same for all parts, and 'broadcast_part'
-    # starts at 1 and increases. Can we treat all parts as one video?
-
-    _VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?(?:twitch|justin)\.tv/
-        (?:
-            (?P<channelid>[^/]+)|
-            (?:(?:[^/]+)/b/(?P<videoid>[^/]+))|
-            (?:(?:[^/]+)/c/(?P<chapterid>[^/]+))
-        )
-        /?(?:\#.*)?$
-        """
-    _JUSTIN_PAGE_LIMIT = 100
-    IE_NAME = 'justin.tv'
-    IE_DESC = 'justin.tv and twitch.tv'
-    _TEST = {
-        'url': 'http://www.twitch.tv/thegamedevhub/b/296128360',
-        'md5': 'ecaa8a790c22a40770901460af191c9a',
-        'info_dict': {
-            'id': '296128360',
-            'ext': 'flv',
-            'upload_date': '20110927',
-            'uploader_id': 25114803,
-            'uploader': 'thegamedevhub',
-            'title': 'Beginner Series - Scripting With Python Pt.1'
-        }
-    }
-
-    # Return count of items, list of *valid* items
-    def _parse_page(self, url, video_id, counter):
-        info_json = self._download_webpage(
-            url, video_id,
-            'Downloading video info JSON on page %d' % counter,
-            'Unable to download video info JSON %d' % counter)
-
-        response = json.loads(info_json)
-        if type(response) != list:
-            error_text = response.get('error', 'unknown error')
-            raise ExtractorError('Justin.tv API: %s' % error_text)
-        info = []
-        for clip in response:
-            video_url = clip['video_file_url']
-            if video_url:
-                video_extension = os.path.splitext(video_url)[1][1:]
-                video_date = re.sub('-', '', clip['start_time'][:10])
-                video_uploader_id = clip.get('user_id', clip.get('channel_id'))
-                video_id = clip['id']
-                video_title = clip.get('title', video_id)
-                info.append({
-                    'id': compat_str(video_id),
-                    'url': video_url,
-                    'title': video_title,
-                    'uploader': clip.get('channel_name', video_uploader_id),
-                    'uploader_id': video_uploader_id,
-                    'upload_date': video_date,
-                    'ext': video_extension,
-                })
-        return (len(response), info)
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-
-        api_base = 'http://api.justin.tv'
-        paged = False
-        if mobj.group('channelid'):
-            paged = True
-            video_id = mobj.group('channelid')
-            api = api_base + '/channel/archives/%s.json' % video_id
-        elif mobj.group('chapterid'):
-            chapter_id = mobj.group('chapterid')
-
-            webpage = self._download_webpage(url, chapter_id)
-            m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
-            if not m:
-                raise ExtractorError('Cannot find archive of a chapter')
-            archive_id = m.group(1)
-
-            api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
-            doc = self._download_xml(
-                api, chapter_id,
-                note='Downloading chapter information',
-                errnote='Chapter information download failed')
-            for a in doc.findall('.//archive'):
-                if archive_id == a.find('./id').text:
-                    break
-            else:
-                raise ExtractorError('Could not find chapter in chapter information')
-
-            video_url = a.find('./video_file_url').text
-            video_ext = video_url.rpartition('.')[2] or 'flv'
-
-            chapter_api_url = 'https://api.twitch.tv/kraken/videos/c' + chapter_id
-            chapter_info = self._download_json(
-                chapter_api_url, 'c' + chapter_id,
-                note='Downloading chapter metadata',
-                errnote='Download of chapter metadata failed')
-
-            bracket_start = int(doc.find('.//bracket_start').text)
-            bracket_end = int(doc.find('.//bracket_end').text)
-
-            # TODO determine start (and probably fix up file)
-            #  youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
-            #video_url += '?start=' + TODO:start_timestamp
-            # bracket_start is 13290, but we want 51670615
-            self._downloader.report_warning('Chapter detected, but we can just download the whole file. '
-                                            'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
-
-            info = {
-                'id': 'c' + chapter_id,
-                'url': video_url,
-                'ext': video_ext,
-                'title': chapter_info['title'],
-                'thumbnail': chapter_info['preview'],
-                'description': chapter_info['description'],
-                'uploader': chapter_info['channel']['display_name'],
-                'uploader_id': chapter_info['channel']['name'],
-            }
-            return info
-        else:
-            video_id = mobj.group('videoid')
-            api = api_base + '/broadcast/by_archive/%s.json' % video_id
-
-        entries = []
-        offset = 0
-        limit = self._JUSTIN_PAGE_LIMIT
-        for counter in itertools.count(1):
-            page_url = api + ('?offset=%d&limit=%d' % (offset, limit))
-            page_count, page_info = self._parse_page(
-                page_url, video_id, counter)
-            entries.extend(page_info)
-            if not paged or page_count != limit:
-                break
-            offset += limit
-        return {
-            '_type': 'playlist',
-            'id': video_id,
-            'entries': entries,
-        }
index 23103b163fea1ed6a27cb44dadcf231b478edcdb..dbfe4cc03fd8c569ed2f05d5ae9c86c36bb9e278 100644 (file)
@@ -10,7 +10,7 @@ _md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
 
 class KankanIE(InfoExtractor):
     _VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml'
-    
+
     _TEST = {
         'url': 'http://yinyue.kankan.com/vod/48/48863.shtml',
         'file': '48863.flv',
index 5d679e88d811c6ad55c9fe475267c2842a641f83..e94e2f8ade275a6733561948a5712efcc9a314f9 100644 (file)
@@ -6,29 +6,36 @@ from .common import InfoExtractor
 
 
 class KeekIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<videoID>\w+)'
+    _VALID_URL = r'https?://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<id>\w+)'
     IE_NAME = 'keek'
     _TEST = {
         'url': 'https://www.keek.com/ytdl/keeks/NODfbab',
-        'file': 'NODfbab.mp4',
-        'md5': '9b0636f8c0f7614afa4ea5e4c6e57e83',
+        'md5': '09c5c109067536c1cec8bac8c21fea05',
         'info_dict': {
-            'uploader': 'ytdl',
+            'id': 'NODfbab',
+            'ext': 'mp4',
+            'uploader': 'youtube-dl project',
+            'uploader_id': 'ytdl',
             'title': 'test chars: "\'/\\\u00e4<>This is a test video for youtube-dl.For more information, contact phihag@phihag.de .',
         },
     }
 
     def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('videoID')
+        video_id = self._match_id(url)
 
         video_url = 'http://cdn.keek.com/keek/video/%s' % video_id
         thumbnail = 'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
         webpage = self._download_webpage(url, video_id)
 
-        uploader = self._html_search_regex(
-            r'<div class="user-name-and-bio">[\S\s]+?<h2>(?P<uploader>.+?)</h2>',
-            webpage, 'uploader', fatal=False)
+        raw_desc = self._html_search_meta('description', webpage)
+        if raw_desc:
+            uploader = self._html_search_regex(
+                r'Watch (.*?)\s+\(', raw_desc, 'uploader', fatal=False)
+            uploader_id = self._html_search_regex(
+                r'Watch .*?\(@(.+?)\)', raw_desc, 'uploader_id', fatal=False)
+        else:
+            uploader = None
+            uploader_id = None
 
         return {
             'id': video_id,
@@ -36,5 +43,6 @@ class KeekIE(InfoExtractor):
             'ext': 'mp4',
             'title': self._og_search_title(webpage),
             'thumbnail': thumbnail,
-            'uploader': uploader
+            'uploader': uploader,
+            'uploader_id': uploader_id,
         }
index 75b63cffb5961f33ea2d2f5ae37803dfb0fe37fc..97dcb518a3587406bc93a44c39344630cafe7119 100644 (file)
@@ -4,7 +4,7 @@ import os
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse_urlparse,
     compat_urllib_request,
     compat_urllib_parse,
@@ -15,7 +15,7 @@ from ..aes import (
 
 
 class KeezMoviesIE(InfoExtractor):
-    _VALID_URL = r'^https?://(?:www\.)?keezmovies\.com/video/.+?(?P<videoid>[0-9]+)(?:[/?&]|$)'
+    _VALID_URL = r'https?://(?:www\.)?keezmovies\.com/video/.+?(?P<id>[0-9]+)(?:[/?&]|$)'
     _TEST = {
         'url': 'http://www.keezmovies.com/video/petite-asian-lady-mai-playing-in-bathtub-1214711',
         'file': '1214711.mp4',
@@ -27,8 +27,7 @@ class KeezMoviesIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('videoid')
+        video_id = self._match_id(url)
 
         req = compat_urllib_request.Request(url)
         req.add_header('Cookie', 'age_verified=1')
index 56a76380cad6f45cb4a0a33581803f1371b2543b..7d4b57056509383fdc082a68c1650f38dc258763 100644 (file)
@@ -1,8 +1,6 @@
 # encoding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 
 
@@ -15,28 +13,25 @@ class KickStarterIE(InfoExtractor):
             'id': '1404461844',
             'ext': 'mp4',
             'title': 'Intersection: The Story of Josh Grant by Kyle Cowling',
-            'description': 'A unique motocross documentary that examines the '
-                'life and mind of one of sports most elite athletes: Josh Grant.',
+            'description': (
+                'A unique motocross documentary that examines the '
+                'life and mind of one of sports most elite athletes: Josh Grant.'
+            ),
         },
     }, {
         'note': 'Embedded video (not using the native kickstarter video service)',
         'url': 'https://www.kickstarter.com/projects/597507018/pebble-e-paper-watch-for-iphone-and-android/posts/659178',
-        'playlist': [
-            {
-                'info_dict': {
-                    'id': '78704821',
-                    'ext': 'mp4',
-                    'uploader_id': 'pebble',
-                    'uploader': 'Pebble Technology',
-                    'title': 'Pebble iOS Notifications',
-                }
-            }
-        ],
+        'info_dict': {
+            'id': '78704821',
+            'ext': 'mp4',
+            'uploader_id': 'pebble',
+            'uploader': 'Pebble Technology',
+            'title': 'Pebble iOS Notifications',
+        }
     }]
 
     def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         title = self._html_search_regex(
index 5341ac773f79fe237626bdfe3243bd1561d8003d..41fd62009ac16100c1e6bb776028020cd12e9ff2 100644 (file)
@@ -34,7 +34,7 @@ class KontrTubeIE(InfoExtractor):
         video_url = self._html_search_regex(r"video_url: '(.+?)/?',", webpage, 'video URL')
         thumbnail = self._html_search_regex(r"preview_url: '(.+?)/?',", webpage, 'video thumbnail', fatal=False)
         title = self._html_search_regex(
-            r'<title>(.+?) - Труба зовёт - Интересный видеохостинг</title>', webpage, 'video title')
+            r'<title>(.+?)</title>', webpage, 'video title')
         description = self._html_search_meta('description', webpage, 'video description')
 
         mobj = re.search(
@@ -63,4 +63,4 @@ class KontrTubeIE(InfoExtractor):
             'duration': duration,
             'view_count': int_or_none(view_count),
             'comment_count': int_or_none(comment_count),
-        }
\ No newline at end of file
+        }
index 484239b19e9b20436eaa40e15263d7dd72c11445..a602980a141f3f8ccce026eaddc8b383e7894352 100644 (file)
@@ -1,7 +1,5 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 
 
@@ -18,11 +16,11 @@ class Ku6IE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
-        title = self._search_regex(r'<h1 title=.*>(.*?)</h1>', webpage, 'title')
+
+        title = self._html_search_regex(
+            r'<h1 title=.*>(.*?)</h1>', webpage, 'title')
         dataUrl = 'http://v.ku6.com/fetchVideo4Player/%s.html' % video_id
         jsonData = self._download_json(dataUrl, video_id)
         downloadUrl = jsonData['data']['f']
@@ -32,4 +30,3 @@ class Ku6IE(InfoExtractor):
             'title': title,
             'url': downloadUrl
         }
-
diff --git a/youtube_dl/extractor/laola1tv.py b/youtube_dl/extractor/laola1tv.py
new file mode 100644 (file)
index 0000000..2fd3b46
--- /dev/null
@@ -0,0 +1,77 @@
+from __future__ import unicode_literals
+
+import random
+import re
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class Laola1TvIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?laola1\.tv/(?P<lang>[a-z]+)-(?P<portal>[a-z]+)/.*?/(?P<id>[0-9]+)\.html'
+    _TEST = {
+        'url': 'http://www.laola1.tv/de-de/live/bwf-bitburger-open-grand-prix-gold-court-1/250019.html',
+        'info_dict': {
+            'id': '250019',
+            'ext': 'mp4',
+            'title': 'Bitburger Open Grand Prix Gold - Court 1',
+            'categories': ['Badminton'],
+            'uploader': 'BWF - Badminton World Federation',
+            'is_live': True,
+        },
+        'params': {
+            'skip_download': True,
+        }
+    }
+
+    _BROKEN = True  # Not really - extractor works fine, but f4m downloader does not support live streams yet.
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        lang = mobj.group('lang')
+        portal = mobj.group('portal')
+
+        webpage = self._download_webpage(url, video_id)
+        iframe_url = self._search_regex(
+            r'<iframe[^>]*?class="main_tv_player"[^>]*?src="([^"]+)"',
+            webpage, 'iframe URL')
+
+        iframe = self._download_webpage(
+            iframe_url, video_id, note='Downloading iframe')
+        flashvars_m = re.findall(
+            r'flashvars\.([_a-zA-Z0-9]+)\s*=\s*"([^"]*)";', iframe)
+        flashvars = dict((m[0], m[1]) for m in flashvars_m)
+
+        xml_url = ('http://www.laola1.tv/server/hd_video.php?' +
+                   'play=%s&partner=1&portal=%s&v5ident=&lang=%s' % (
+                       video_id, portal, lang))
+        hd_doc = self._download_xml(xml_url, video_id)
+
+        title = hd_doc.find('.//video/title').text
+        flash_url = hd_doc.find('.//video/url').text
+        categories = hd_doc.find('.//video/meta_sports').text.split(',')
+        uploader = hd_doc.find('.//video/meta_organistation').text
+
+        ident = random.randint(10000000, 99999999)
+        token_url = '%s&ident=%s&klub=0&unikey=0&timestamp=%s&auth=%s' % (
+            flash_url, ident, flashvars['timestamp'], flashvars['auth'])
+
+        token_doc = self._download_xml(
+            token_url, video_id, note='Downloading token')
+        token_attrib = token_doc.find('.//token').attrib
+        if token_attrib.get('auth') == 'blocked':
+            raise ExtractorError('Token error: ' % token_attrib.get('comment'))
+
+        video_url = '%s?hdnea=%s&hdcore=3.2.0' % (
+            token_attrib['url'], token_attrib['auth'])
+
+        return {
+            'id': video_id,
+            'is_live': True,
+            'title': title,
+            'url': video_url,
+            'uploader': uploader,
+            'categories': categories,
+            'ext': 'mp4',
+        }
index 8d9491f233bf578bc9274a18fe022a1538effad6..1dfe7f77f4ccdefa0b076f71f6467644e465cb52 100644 (file)
@@ -52,7 +52,7 @@ class LifeNewsIE(InfoExtractor):
             r'<div class=\'comments\'>\s*<span class=\'counter\'>(\d+)</span>', webpage, 'comment count', fatal=False)
 
         upload_date = self._html_search_regex(
-            r'<time datetime=\'([^\']+)\'>', webpage, 'upload date',fatal=False)
+            r'<time datetime=\'([^\']+)\'>', webpage, 'upload date', fatal=False)
         if upload_date is not None:
             upload_date = unified_strdate(upload_date)
 
@@ -71,4 +71,4 @@ class LifeNewsIE(InfoExtractor):
         if len(videos) == 1:
             return make_entry(video_id, videos[0])
         else:
-            return [make_entry(video_id, media, video_number+1) for video_number, media in enumerate(videos)]
\ No newline at end of file
+            return [make_entry(video_id, media, video_number + 1) for video_number, media in enumerate(videos)]
index 8e50e8f79adee2e21c7801da13da60897fcb61ec..b04be1e8cfda94addca26a1d1e3731ce61519dc1 100644 (file)
@@ -19,8 +19,7 @@ class LiveLeakIE(InfoExtractor):
             'uploader': 'ljfriel2',
             'title': 'Most unlucky car accident'
         }
-    },
-    {
+    }, {
         'url': 'http://www.liveleak.com/view?i=f93_1390833151',
         'md5': 'd3f1367d14cc3c15bf24fbfbe04b9abf',
         'info_dict': {
@@ -30,8 +29,7 @@ class LiveLeakIE(InfoExtractor):
             'uploader': 'ARD_Stinkt',
             'title': 'German Television does first Edward Snowden Interview (ENGLISH)',
         }
-    },
-    {
+    }, {
         'url': 'http://www.liveleak.com/view?i=4f7_1392687779',
         'md5': '42c6d97d54f1db107958760788c5f48f',
         'info_dict': {
index 5161474171b2a6a53389477275f54480a02d1240..5247c6f58500e301dab50ed48039df0c070b493a 100644 (file)
@@ -4,10 +4,12 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_str,
     compat_urllib_parse_urlparse,
     compat_urlparse,
+)
+from ..utils import (
     ExtractorError,
     find_xpath_attr,
     int_or_none,
@@ -18,7 +20,7 @@ from ..utils import (
 
 class LivestreamIE(InfoExtractor):
     IE_NAME = 'livestream'
-    _VALID_URL = r'http://new\.livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>\d+))?/?$'
+    _VALID_URL = r'https?://new\.livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>[0-9]+)(?:/player)?)?/?(?:$|[?#])'
     _TESTS = [{
         'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',
         'md5': '53274c76ba7754fb0e8d072716f2292b',
@@ -37,6 +39,9 @@ class LivestreamIE(InfoExtractor):
             'title': 'TEDCity2.0 (English)',
         },
         'playlist_mincount': 4,
+    }, {
+        'url': 'https://new.livestream.com/accounts/362/events/3557232/videos/67864563/player?autoPlay=false&height=360&mute=false&width=640',
+        'only_matching': True,
     }]
 
     def _parse_smil(self, video_id, smil_url):
@@ -190,7 +195,8 @@ class LivestreamOriginalIE(InfoExtractor):
             'id': video_id,
             'title': item.find('title').text,
             'url': 'rtmp://extondemand.livestream.com/ondemand',
-            'play_path': 'mp4:trans/dv15/mogulus-{0}.mp4'.format(path),
+            'play_path': 'trans/dv15/mogulus-{0}'.format(path),
+            'player_url': 'http://static.livestream.com/chromelessPlayer/v21/playerapi.swf?hash=5uetk&v=0803&classid=D27CDB6E-AE6D-11cf-96B8-444553540000&jsEnabled=false&wmode=opaque',
             'ext': 'flv',
             'thumbnail': thumbnail_url,
         }
index fca0bfef0726b472e484d7be2fd890fb5ce50f8c..d72d470aa8dbb532f80f44796354dabd3de92261 100644 (file)
@@ -22,7 +22,7 @@ class LRTIE(InfoExtractor):
             'id': '54391',
             'ext': 'mp4',
             'title': 'Septynios Kauno dienos',
-            'description': 'Kauno miesto ir apskrities naujienos',
+            'description': 'md5:24d84534c7dc76581e59f5689462411a',
             'duration': 1783,
         },
         'params': {
@@ -32,9 +32,7 @@ class LRTIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         title = remove_end(self._og_search_title(webpage), ' - LRT')
index 33f34f4e9bdda2aa034dd4f46ef3299478f181ec..26e84970d49463068f032dcf05afbc03e485e859 100644 (file)
@@ -5,12 +5,14 @@ import json
 
 from .subtitles import SubtitlesInfoExtractor
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
+    compat_str,
     compat_urllib_parse,
     compat_urllib_request,
+)
+from ..utils import (
     ExtractorError,
     int_or_none,
-    compat_str,
 )
 
 
@@ -45,7 +47,7 @@ class LyndaIE(SubtitlesInfoExtractor):
         video_id = mobj.group(1)
 
         page = self._download_webpage('http://www.lynda.com/ajax/player?videoId=%s&type=video' % video_id, video_id,
-            'Downloading video JSON')
+                                      'Downloading video JSON')
         video_json = json.loads(page)
 
         if 'Status' in video_json:
@@ -109,7 +111,7 @@ class LyndaIE(SubtitlesInfoExtractor):
             'password': password,
             'remember': 'false',
             'stayPut': 'false'
-        }        
+        }
         request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
         login_page = self._download_webpage(request, None, 'Logging in as %s' % username)
 
@@ -117,7 +119,7 @@ class LyndaIE(SubtitlesInfoExtractor):
         m = re.search(r'loginResultJson = \'(?P<json>[^\']+)\';', login_page)
         if m is not None:
             response = m.group('json')
-            response_json = json.loads(response)            
+            response_json = json.loads(response)
             state = response_json['state']
 
             if state == 'notlogged':
@@ -187,7 +189,7 @@ class LyndaCourseIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         course_path = mobj.group('coursepath')
         course_id = mobj.group('courseid')
-        
+
         page = self._download_webpage('http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
                                       course_id, 'Downloading course JSON')
         course_json = json.loads(page)
@@ -221,4 +223,4 @@ class LyndaCourseIE(InfoExtractor):
 
         course_title = course_json['Title']
 
-        return self.playlist_result(entries, course_id, course_title)
\ No newline at end of file
+        return self.playlist_result(entries, course_id, course_title)
index 1a26b5d572852eab85449af0a6ffc2fd7c98aeda..7e025831b51d611f00e248bda637b4ae8f35efb6 100644 (file)
@@ -27,7 +27,7 @@ class M6IE(InfoExtractor):
         video_id = mobj.group('id')
 
         rss = self._download_xml('http://ws.m6.fr/v1/video/info/m6/bonus/%s' % video_id, video_id,
-            'Downloading video RSS')
+                                 'Downloading video RSS')
 
         title = rss.find('./channel/item/title').text
         description = rss.find('./channel/item/description').text
@@ -53,4 +53,4 @@ class M6IE(InfoExtractor):
             'duration': duration,
             'view_count': view_count,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 7460d81cd501b8c52dcce3caae8313f6854b571a..54a14cb94c93dad587a83c58d58ec3d262f0eed8 100644 (file)
@@ -16,7 +16,7 @@ class MailRuIE(InfoExtractor):
             'url': 'http://my.mail.ru/video/top#video=/mail/sonypicturesrus/75/76',
             'md5': 'dea205f03120046894db4ebb6159879a',
             'info_dict': {
-                'id': '46301138',
+                'id': '46301138_76',
                 'ext': 'mp4',
                 'title': 'Новый Человек-Паук. Высокое напряжение. Восстание Электро',
                 'timestamp': 1393232740,
@@ -30,7 +30,7 @@ class MailRuIE(InfoExtractor):
             'url': 'http://my.mail.ru/corp/hitech/video/news_hi-tech_mail_ru/1263.html',
             'md5': '00a91a58c3402204dcced523777b475f',
             'info_dict': {
-                'id': '46843144',
+                'id': '46843144_1263',
                 'ext': 'mp4',
                 'title': 'Samsung Galaxy S5 Hammer Smash Fail Battery Explosion',
                 'timestamp': 1397217632,
@@ -54,33 +54,36 @@ class MailRuIE(InfoExtractor):
 
         author = video_data['author']
         uploader = author['name']
-        uploader_id = author['id']
+        uploader_id = author.get('id') or author.get('email')
+        view_count = video_data.get('views_count')
 
-        movie = video_data['movie']
-        content_id = str(movie['contentId'])
-        title = movie['title']
+        meta_data = video_data['meta']
+        content_id = '%s_%s' % (
+            meta_data.get('accId', ''), meta_data['itemId'])
+        title = meta_data['title']
         if title.endswith('.mp4'):
             title = title[:-4]
-        thumbnail = movie['poster']
-        duration = movie['duration']
-
-        view_count = video_data['views_count']
+        thumbnail = meta_data['poster']
+        duration = meta_data['duration']
+        timestamp = meta_data['timestamp']
 
         formats = [
             {
                 'url': video['url'],
-                'format_id': video['name'],
+                'format_id': video['key'],
+                'height': int(video['key'].rstrip('p'))
             } for video in video_data['videos']
         ]
+        self._sort_formats(formats)
 
         return {
             'id': content_id,
             'title': title,
             'thumbnail': thumbnail,
-            'timestamp': video_data['timestamp'],
+            'timestamp': timestamp,
             'uploader': uploader,
             'uploader_id': uploader_id,
             'duration': duration,
             'view_count': view_count,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 8c1966ab25e5215ab96286822d50507aa25ea58d..0b85a59d1c644d7d04e573aae0bdd03ebd4f6c80 100644 (file)
@@ -1,42 +1,33 @@
+# coding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
 )
 
+
 class MalemotionIE(InfoExtractor):
-    _VALID_URL = r'^(?:https?://)?malemotion\.com/video/(.+?)\.(?P<id>.+?)(#|$)'
+    _VALID_URL = r'https?://malemotion\.com/video/(.+?)\.(?P<id>.+?)(#|$)'
     _TEST = {
-        'url': 'http://malemotion.com/video/bien-dur.10ew',
-        'file': '10ew.mp4',
-        'md5': 'b3cc49f953b107e4a363cdff07d100ce',
+        'url': 'http://malemotion.com/video/bete-de-concours.ltc',
+        'md5': '3013e53a0afbde2878bc39998c33e8a5',
         'info_dict': {
-            "title": "Bien dur",
-            "age_limit": 18,
+            'id': 'ltc',
+            'ext': 'mp4',
+            'title': 'Bête de Concours',
+            'age_limit': 18,
         },
-        'skip': 'This video has been deleted.'
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group("id")
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
-        self.report_extraction(video_id)
-
-        # Extract video URL
-        video_url = compat_urllib_parse.unquote(
-            self._search_regex(r'<source type="video/mp4" src="(.+?)"', webpage, 'video URL'))
-
-        # Extract title
+        video_url = compat_urllib_parse.unquote(self._search_regex(
+            r'<source type="video/mp4" src="(.+?)"', webpage, 'video URL'))
         video_title = self._html_search_regex(
             r'<title>(.*?)</title', webpage, 'title')
-
-        # Extract video thumbnail
         video_thumbnail = self._search_regex(
             r'<video .+?poster="(.+?)"', webpage, 'thumbnail', fatal=False)
 
@@ -46,14 +37,12 @@ class MalemotionIE(InfoExtractor):
             'format_id': 'mp4',
             'preference': 1,
         }]
+        self._sort_formats(formats)
 
         return {
             'id': video_id,
             'formats': formats,
-            'uploader': None,
-            'upload_date': None,
             'title': video_title,
             'thumbnail': video_thumbnail,
-            'description': None,
             'age_limit': 18,
         }
index 1b8c4a32edf5d269b1e9bb9db366487ef2fba981..5fdd19027db3ccad0265601b8d88452a0eaac525 100644 (file)
@@ -7,7 +7,7 @@ from .common import InfoExtractor
 
 class MDRIE(InfoExtractor):
     _VALID_URL = r'^(?P<domain>https?://(?:www\.)?mdr\.de)/(?:.*)/(?P<type>video|audio)(?P<video_id>[^/_]+)(?:_|\.html)'
-    
+
     # No tests, MDR regularily deletes its videos
     _TEST = {
         'url': 'http://www.mdr.de/fakt/video189002.html',
index 1a896b536dd813a561cea8f870258bf73519e00b..8bc333b0277e27e6fd8f3d4f11b3c9c7eabdd7d7 100644 (file)
@@ -3,10 +3,12 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_parse_qs,
     compat_urllib_parse,
     compat_urllib_request,
+)
+from ..utils import (
     determine_ext,
     ExtractorError,
     int_or_none,
@@ -22,7 +24,7 @@ class MetacafeIE(InfoExtractor):
         # Youtube video
         {
             'add_ie': ['Youtube'],
-            'url':  'http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/',
+            'url': 'http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/',
             'info_dict': {
                 'id': '_aUehQsCQtM',
                 'ext': 'mp4',
@@ -219,8 +221,8 @@ class MetacafeIE(InfoExtractor):
         description = self._og_search_description(webpage)
         thumbnail = self._og_search_thumbnail(webpage)
         video_uploader = self._html_search_regex(
-                r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);',
-                webpage, 'uploader nickname', fatal=False)
+            r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);',
+            webpage, 'uploader nickname', fatal=False)
         duration = int_or_none(
             self._html_search_meta('video:duration', webpage))
 
index 07f072924a6dadb2838230fd29a6a830ff99bb64..e30320569805aedaa6694ae54f9086909593f7a4 100644 (file)
@@ -28,7 +28,7 @@ class MetacriticIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
         # The xml is not well formatted, there are raw '&'
         info = self._download_xml('http://www.metacritic.com/video_data?video=' + video_id,
-            video_id, 'Downloading info xml', transform_source=fix_xml_ampersands)
+                                  video_id, 'Downloading info xml', transform_source=fix_xml_ampersands)
 
         clip = next(c for c in info.findall('playList/clip') if c.find('id').text == video_id)
         formats = []
@@ -44,7 +44,7 @@ class MetacriticIE(InfoExtractor):
         self._sort_formats(formats)
 
         description = self._html_search_regex(r'<b>Description:</b>(.*?)</p>',
-            webpage, 'description', flags=re.DOTALL)
+                                              webpage, 'description', flags=re.DOTALL)
 
         return {
             'id': video_id,
diff --git a/youtube_dl/extractor/minhateca.py b/youtube_dl/extractor/minhateca.py
new file mode 100644 (file)
index 0000000..14934b7
--- /dev/null
@@ -0,0 +1,72 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import (
+    compat_urllib_parse,
+    compat_urllib_request,
+)
+from ..utils import (
+    int_or_none,
+    parse_duration,
+    parse_filesize,
+)
+
+
+class MinhatecaIE(InfoExtractor):
+    _VALID_URL = r'https?://minhateca\.com\.br/[^?#]+,(?P<id>[0-9]+)\.'
+    _TEST = {
+        'url': 'http://minhateca.com.br/pereba/misc/youtube-dl+test+video,125848331.mp4(video)',
+        'info_dict': {
+            'id': '125848331',
+            'ext': 'mp4',
+            'title': 'youtube-dl test video',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'filesize_approx': 1530000,
+            'duration': 9,
+            'view_count': int,
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        token = self._html_search_regex(
+            r'<input name="__RequestVerificationToken".*?value="([^"]+)"',
+            webpage, 'request token')
+        token_data = [
+            ('fileId', video_id),
+            ('__RequestVerificationToken', token),
+        ]
+        req = compat_urllib_request.Request(
+            'http://minhateca.com.br/action/License/Download',
+            data=compat_urllib_parse.urlencode(token_data))
+        req.add_header('Content-Type', 'application/x-www-form-urlencoded')
+        data = self._download_json(
+            req, video_id, note='Downloading metadata')
+
+        video_url = data['redirectUrl']
+        title_str = self._html_search_regex(
+            r'<h1.*?>(.*?)</h1>', webpage, 'title')
+        title, _, ext = title_str.rpartition('.')
+        filesize_approx = parse_filesize(self._html_search_regex(
+            r'<p class="fileSize">(.*?)</p>',
+            webpage, 'file size approximation', fatal=False))
+        duration = parse_duration(self._html_search_regex(
+            r'(?s)<p class="fileLeng[ht][th]">.*?class="bold">(.*?)<',
+            webpage, 'duration', fatal=False))
+        view_count = int_or_none(self._html_search_regex(
+            r'<p class="downloadsCounter">([0-9]+)</p>',
+            webpage, 'view count', fatal=False))
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+            'ext': ext,
+            'filesize_approx': filesize_approx,
+            'duration': duration,
+            'view_count': view_count,
+            'thumbnail': self._og_search_thumbnail(webpage),
+        }
index 807b1dc89b608333e06c1fbab2e9d806fb7d090f..d354702d1d5753a1cee6976c497edc9177e91ef6 100644 (file)
@@ -5,8 +5,10 @@ import json
 
 from .common import InfoExtractor
 from .youtube import YoutubeIE
-from ..utils import (
+from ..compat import (
     compat_urlparse,
+)
+from ..utils import (
     clean_html,
     ExtractorError,
     get_element_by_id,
index 979f3d692a0707fdf2a6a6617b75581e047679dd..2567583235617e52b6420419863dbc8d319c8201 100644 (file)
@@ -1,11 +1,13 @@
 from __future__ import unicode_literals
 
-import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
+    compat_urlparse,
+)
+from ..utils import (
     get_element_by_attribute,
     parse_duration,
     strip_jsonp,
@@ -14,7 +16,7 @@ from ..utils import (
 
 class MiTeleIE(InfoExtractor):
     IE_NAME = 'mitele.es'
-    _VALID_URL = r'http://www\.mitele\.es/[^/]+/[^/]+/[^/]+/(?P<episode>[^/]+)/'
+    _VALID_URL = r'http://www\.mitele\.es/[^/]+/[^/]+/[^/]+/(?P<id>[^/]+)/'
 
     _TEST = {
         'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/',
@@ -30,22 +32,28 @@ class MiTeleIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        episode = mobj.group('episode')
+        episode = self._match_id(url)
         webpage = self._download_webpage(url, episode)
         embed_data_json = self._search_regex(
-            r'MSV\.embedData\[.*?\]\s*=\s*({.*?});', webpage, 'embed data',
-            flags=re.DOTALL
+            r'(?s)MSV\.embedData\[.*?\]\s*=\s*({.*?});', webpage, 'embed data',
         ).replace('\'', '"')
         embed_data = json.loads(embed_data_json)
 
-        info_url = embed_data['flashvars']['host']
+        domain = embed_data['mediaUrl']
+        if not domain.startswith('http'):
+            # only happens in telecinco.es videos
+            domain = 'http://' + domain
+        info_url = compat_urlparse.urljoin(
+            domain,
+            compat_urllib_parse.unquote(embed_data['flashvars']['host'])
+        )
         info_el = self._download_xml(info_url, episode).find('./video/info')
 
         video_link = info_el.find('videoUrl/link').text
         token_query = compat_urllib_parse.urlencode({'id': video_link})
         token_info = self._download_json(
-            'http://token.mitele.es/?' + token_query, episode,
+            embed_data['flashvars']['ov_tk'] + '?' + token_query,
+            episode,
             transform_source=strip_jsonp
         )
 
index 520f27fca14a3ed819b452281ede6f4aa86fe4a5..07d194562e77044a8d8d87138ed32205842a1a25 100644 (file)
@@ -3,8 +3,10 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
+)
+from ..utils import (
     ExtractorError,
     HEADRequest,
     int_or_none,
@@ -33,22 +35,22 @@ class MixcloudIE(InfoExtractor):
         },
     }
 
-    def check_urls(self, url_list):
-        """Returns 1st active url from list"""
-        for url in url_list:
+    def _get_url(self, track_id, template_url):
+        server_count = 30
+        for i in range(server_count):
+            url = template_url % i
             try:
                 # We only want to know if the request succeed
                 # don't download the whole file
-                self._request_webpage(HEADRequest(url), None, False)
+                self._request_webpage(
+                    HEADRequest(url), track_id,
+                    'Checking URL %d/%d ...' % (i + 1, server_count + 1))
                 return url
             except ExtractorError:
-                url = None
+                pass
 
         return None
 
-    def _get_url(self, template_url):
-        return self.check_urls(template_url % i for i in range(30))
-
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         uploader = mobj.group(1)
@@ -61,16 +63,16 @@ class MixcloudIE(InfoExtractor):
             r'\s(?:data-preview-url|m-preview)="(.+?)"', webpage, 'preview url')
         song_url = preview_url.replace('/previews/', '/c/originals/')
         template_url = re.sub(r'(stream\d*)', 'stream%d', song_url)
-        final_song_url = self._get_url(template_url)
+        final_song_url = self._get_url(track_id, template_url)
         if final_song_url is None:
             self.to_screen('Trying with m4a extension')
             template_url = template_url.replace('.mp3', '.m4a').replace('originals/', 'm4a/64/')
-            final_song_url = self._get_url(template_url)
+            final_song_url = self._get_url(track_id, template_url)
         if final_song_url is None:
             raise ExtractorError('Unable to extract track url')
 
         PREFIX = (
-            r'<div class="cloudcast-play-button-container"'
+            r'<span class="play-button[^"]*?"'
             r'(?:\s+[a-zA-Z0-9-]+(?:="[^"]+")?)*?\s+')
         title = self._html_search_regex(
             PREFIX + r'm-title="([^"]+)"', webpage, 'title')
index 42aa2e227dcb1360af535c92a17edcbdf99f259c..1a241aca77983ac9626a53e59bf85ad4394cc8fd 100644 (file)
@@ -10,7 +10,7 @@ from ..utils import (
 
 
 class MLBIE(InfoExtractor):
-    _VALID_URL = r'https?://m\.mlb\.com/(?:(?:.*?/)?video/(?:topic/[\da-z_-]+/)?v|shared/video/embed/embed\.html\?.*?\bcontent_id=)(?P<id>n?\d+)'
+    _VALID_URL = r'https?://m(?:lb)?\.mlb\.com/(?:(?:.*?/)?video/(?:topic/[\da-z_-]+/)?v|(?:shared/video/embed/embed\.html|[^/]+/video/play\.jsp)\?.*?\bcontent_id=)(?P<id>n?\d+)'
     _TESTS = [
         {
             'url': 'http://m.mlb.com/sea/video/topic/51231442/v34698933/nymsea-ackley-robs-a-home-run-with-an-amazing-catch/?c_id=sea',
@@ -72,6 +72,14 @@ class MLBIE(InfoExtractor):
             'url': 'http://m.mlb.com/shared/video/embed/embed.html?content_id=35692085&topic_id=6479266&width=400&height=224&property=mlb',
             'only_matching': True,
         },
+        {
+            'url': 'http://mlb.mlb.com/shared/video/embed/embed.html?content_id=36599553',
+            'only_matching': True,
+        },
+        {
+            'url': 'http://mlb.mlb.com/es/video/play.jsp?content_id=36599553',
+            'only_matching': True,
+        },
     ]
 
     def _real_extract(self, url):
index 2ff79b9b88590e87f1aecf9fdfd32f242bd98420..184f9c2c9e216a47b635680ba928f1c3c403619d 100644 (file)
@@ -5,10 +5,12 @@ import json
 import re
 
 from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
+)
+from ..utils import (
+    ExtractorError,
     int_or_none,
 )
 
index d658647e6ca6d9b7675dd76ea55c58f52887374d..2cec12d35ec1797dd7612ad49c5739e87f77e6c9 100644 (file)
@@ -4,7 +4,7 @@ import os
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse_urlparse,
     compat_urllib_request,
     compat_urllib_parse,
@@ -12,7 +12,7 @@ from ..utils import (
 
 
 class MofosexIE(InfoExtractor):
-    _VALID_URL = r'^https?://(?:www\.)?(?P<url>mofosex\.com/videos/(?P<videoid>[0-9]+)/.*?\.html)'
+    _VALID_URL = r'https?://(?:www\.)?(?P<url>mofosex\.com/videos/(?P<id>[0-9]+)/.*?\.html)'
     _TEST = {
         'url': 'http://www.mofosex.com/videos/5018/japanese-teen-music-video.html',
         'md5': '1b2eb47ac33cc75d4a80e3026b613c5a',
@@ -26,7 +26,7 @@ class MofosexIE(InfoExtractor):
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('videoid')
+        video_id = mobj.group('id')
         url = 'http://www.' + mobj.group('url')
 
         req = compat_urllib_request.Request(url)
index 90b460d65c2ca90e252dba6ecc3adcb93f86ab1e..0ba435dc5597219e9b569c441e58e3a196e1bfbe 100644 (file)
@@ -55,4 +55,4 @@ class MojvideoIE(InfoExtractor):
             'title': title,
             'thumbnail': thumbnail,
             'duration': duration,
-        }
\ No newline at end of file
+        }
index 79bb2ca5935cb7ad888081540696e256c31faa98..5de719bdc41d2af56d6133a85b998c4ed85af726 100644 (file)
@@ -5,7 +5,7 @@ import os.path
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
 )
@@ -37,10 +37,9 @@ class MonikerIE(InfoExtractor):
     }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         orig_webpage = self._download_webpage(url, video_id)
+
         fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
         data = dict(fields)
 
@@ -54,7 +53,7 @@ class MonikerIE(InfoExtractor):
 
         title = os.path.splitext(data['fname'])[0]
 
-        #Could be several links with different quality
+        # Could be several links with different quality
         links = re.findall(r'"file" : "?(.+?)",', webpage)
         # Assume the links are ordered in quality
         formats = [{
index 7d21ea18f1bec57a83a49478d43f000b8039041f..9f2853fa32a031bae5f4d5fab4ee9cab93dbafc8 100644 (file)
@@ -4,11 +4,13 @@ import re
 import time
 
 from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
+from ..compat import (
     compat_urllib_request,
     compat_urllib_parse,
 )
+from ..utils import (
+    ExtractorError,
+)
 
 
 class MooshareIE(InfoExtractor):
@@ -43,13 +45,11 @@ class MooshareIE(InfoExtractor):
     ]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         page = self._download_webpage(url, video_id, 'Downloading page')
 
         if re.search(r'>Video Not Found or Deleted<', page) is not None:
-            raise ExtractorError(u'Video %s does not exist' % video_id, expected=True)
+            raise ExtractorError('Video %s does not exist' % video_id, expected=True)
 
         hash_key = self._html_search_regex(r'<input type="hidden" name="hash" value="([^"]+)">', page, 'hash')
         title = self._html_search_regex(r'(?m)<div class="blockTitle">\s*<h2>Watch ([^<]+)</h2>', page, 'title')
@@ -111,4 +111,4 @@ class MooshareIE(InfoExtractor):
             'thumbnail': thumbnail,
             'duration': duration,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 6229b21732b70525b832ab2f3370594736cae8df..97d5da626a7a5d2555ac3107eb89d1a4fd11b510 100644 (file)
@@ -5,20 +5,20 @@ import re
 
 from .common import InfoExtractor
 from ..utils import (
-    int_or_none,
+    str_to_int,
     unified_strdate,
 )
 
 
 class MotherlessIE(InfoExtractor):
-    _VALID_URL = r'http://(?:www\.)?motherless\.com/(?P<id>[A-Z0-9]+)'
+    _VALID_URL = r'http://(?:www\.)?motherless\.com/(?:g/[a-z0-9_]+/)?(?P<id>[A-Z0-9]+)'
     _TESTS = [
         {
             'url': 'http://motherless.com/AC3FFE1',
-            'md5': '5527fef81d2e529215dad3c2d744a7d9',
+            'md5': '310f62e325a9fafe64f68c0bccb6e75f',
             'info_dict': {
                 'id': 'AC3FFE1',
-                'ext': 'flv',
+                'ext': 'mp4',
                 'title': 'Fucked in the ass while playing PS3',
                 'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'],
                 'upload_date': '20100913',
@@ -40,33 +40,51 @@ class MotherlessIE(InfoExtractor):
                 'thumbnail': 're:http://.*\.jpg',
                 'age_limit': 18,
             }
+        },
+        {
+            'url': 'http://motherless.com/g/cosplay/633979F',
+            'md5': '0b2a43f447a49c3e649c93ad1fafa4a0',
+            'info_dict': {
+                'id': '633979F',
+                'ext': 'mp4',
+                'title': 'Turtlette',
+                'categories': ['superheroine heroine  superher'],
+                'upload_date': '20140827',
+                'uploader_id': 'shade0230',
+                'thumbnail': 're:http://.*\.jpg',
+                'age_limit': 18,
+            }
         }
     ]
 
-    def _real_extract(self,url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
-        title = self._html_search_regex(r'id="view-upload-title">\s+([^<]+)<', webpage, 'title')
-        
-        video_url = self._html_search_regex(r'setup\(\{\s+"file".+: "([^"]+)",', webpage, 'video_url')
+        title = self._html_search_regex(
+            r'id="view-upload-title">\s+([^<]+)<', webpage, 'title')
+        video_url = self._html_search_regex(
+            r'setup\(\{\s+"file".+: "([^"]+)",', webpage, 'video URL')
         age_limit = self._rta_search(webpage)
+        view_count = str_to_int(self._html_search_regex(
+            r'<strong>Views</strong>\s+([^<]+)<',
+            webpage, 'view count', fatal=False))
+        like_count = str_to_int(self._html_search_regex(
+            r'<strong>Favorited</strong>\s+([^<]+)<',
+            webpage, 'like count', fatal=False))
 
-        view_count = self._html_search_regex(r'<strong>Views</strong>\s+([^<]+)<', webpage, 'view_count')
-        upload_date = self._html_search_regex(r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload_date')
+        upload_date = self._html_search_regex(
+            r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload date')
         if 'Ago' in upload_date:
             days = int(re.search(r'([0-9]+)', upload_date).group(1))
             upload_date = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y%m%d')
         else:
             upload_date = unified_strdate(upload_date)
 
-        like_count = self._html_search_regex(r'<strong>Favorited</strong>\s+([^<]+)<', webpage, 'like_count')
-
         comment_count = webpage.count('class="media-comment-contents"')
-        uploader_id = self._html_search_regex(r'"thumb-member-username">\s+<a href="/m/([^"]+)"', webpage, 'uploader_id')
+        uploader_id = self._html_search_regex(
+            r'"thumb-member-username">\s+<a href="/m/([^"]+)"',
+            webpage, 'uploader_id')
 
         categories = self._html_search_meta('keywords', webpage)
         if categories:
@@ -79,8 +97,8 @@ class MotherlessIE(InfoExtractor):
             'uploader_id': uploader_id,
             'thumbnail': self._og_search_thumbnail(webpage),
             'categories': categories,
-            'view_count': int_or_none(view_count.replace(',', '')),
-            'like_count': int_or_none(like_count.replace(',', '')),
+            'view_count': view_count,
+            'like_count': like_count,
             'comment_count': comment_count,
             'age_limit': age_limit,
             'url': video_url,
index 7c0ec6a127e97dca1068db9db740954e851a8447..f5ca74e976bc10ff896bf7e6134a14332c7b131c 100644 (file)
@@ -3,13 +3,14 @@ from __future__ import unicode_literals
 
 import hashlib
 import json
-import re
 import time
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_parse_qs,
     compat_str,
+)
+from ..utils import (
     int_or_none,
 )
 
@@ -32,10 +33,9 @@ class MotorsportIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        display_id = mobj.group('id')
-
+        display_id = self._match_id(url)
         webpage = self._download_webpage(url, display_id)
+
         flashvars_code = self._html_search_regex(
             r'<embed id="player".*?flashvars="([^"]+)"', webpage, 'flashvars')
         flashvars = compat_parse_qs(flashvars_code)
index 456807dd1c4487332a4e0006448074010e86117b..04e17d0551c7a46feff1822c4dc4be38d00cc520 100644 (file)
@@ -3,9 +3,11 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import (
+    compat_str,
+)
 from ..utils import (
     ExtractorError,
-    compat_str,
     clean_html,
 )
 
index 43146180ad2b272104cb2234de3f4482e7c330fb..f130b75c416ad3fe2e8d4ac3221799d1eb4aa1b9 100644 (file)
@@ -27,7 +27,7 @@ class MoviezineIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
         jsplayer = self._download_webpage('http://www.moviezine.se/api/player.js?video=%s' % video_id, video_id, 'Downloading js api player')
 
-        formats =[{
+        formats = [{
             'format_id': 'sd',
             'url': self._html_search_regex(r'file: "(.+?)",', jsplayer, 'file'),
             'quality': 0,
index 4191cf7a0c808034edf37aa07fa8338174a8d7e3..6101063f2ef9695dd86bbabedec134a4387a9332 100644 (file)
@@ -24,4 +24,4 @@ class MovShareIE(NovaMovIE):
             'title': 'dissapeared image',
             'description': 'optical illusion  dissapeared image  magic illusion',
         }
-    }
\ No newline at end of file
+    }
index 387935d4db784641377b72f5be9ec5e7649f5908..88c9501cd4e34492003a1fe67923d1d84a9e2d2b 100644 (file)
@@ -44,7 +44,7 @@ class MporaIE(InfoExtractor):
                     r'_([0-9]+)\.[a-zA-Z0-9]+$', src['src'],
                     False, default=None)
                 vcodec = src['type'].partition('/')[2]
-                
+
                 formats.append({
                     'format_id': encoding_id + '-' + vcodec,
                     'url': src['src'],
index 228b42d2b940d8eadd0fa3d5e61d0836fd19b7b7..5ebc78033a4abbb98310096c279fe11459b4a791 100644 (file)
@@ -3,9 +3,11 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
+)
+from ..utils import (
     ExtractorError,
     find_xpath_attr,
     fix_xml_ampersands,
@@ -33,7 +35,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
         m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\..+?/.*)$', rtmp_video_url)
         if not m:
             return rtmp_video_url
-        base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
+        base = 'http://viacommtvstrmfs.fplive.net/'
         return base + m.group('finalid')
 
     def _get_feed_url(self, uri):
@@ -53,23 +55,23 @@ class MTVServicesInfoExtractor(InfoExtractor):
         # Otherwise we get a webpage that would execute some javascript
         req.add_header('Youtubedl-user-agent', 'curl/7')
         webpage = self._download_webpage(req, mtvn_id,
-            'Downloading mobile page')
+                                         'Downloading mobile page')
         metrics_url = unescapeHTML(self._search_regex(r'<a href="(http://metrics.+?)"', webpage, 'url'))
         req = HEADRequest(metrics_url)
         response = self._request_webpage(req, mtvn_id, 'Resolving url')
         url = response.geturl()
         # Transform the url to get the best quality:
         url = re.sub(r'.+pxE=mp4', 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=mp4', url, 1)
-        return [{'url': url,'ext': 'mp4'}]
+        return [{'url': url, 'ext': 'mp4'}]
 
     def _extract_video_formats(self, mdoc, mtvn_id):
         if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4)$', mdoc.find('.//src').text) is not None:
             if mtvn_id is not None and self._MOBILE_TEMPLATE is not None:
                 self.to_screen('The normal version is not available from your '
-                    'country, trying with the mobile version')
+                               'country, trying with the mobile version')
                 return self._extract_mobile_video_formats(mtvn_id)
             raise ExtractorError('This video is not available from your country.',
-                expected=True)
+                                 expected=True)
 
         formats = []
         for rendition in mdoc.findall('.//rendition'):
@@ -98,7 +100,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
             mediagen_url += '&acceptMethods=fms'
 
         mediagen_doc = self._download_xml(mediagen_url, video_id,
-            'Downloading video urls')
+                                          'Downloading video urls')
 
         description_node = itemdoc.find('description')
         if description_node is not None:
@@ -126,7 +128,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
         # This a short id that's used in the webpage urls
         mtvn_id = None
         mtvn_id_node = find_xpath_attr(itemdoc, './/{http://search.yahoo.com/mrss/}category',
-                'scheme', 'urn:mtvn:id')
+                                       'scheme', 'urn:mtvn:id')
         if mtvn_id_node is not None:
             mtvn_id = mtvn_id_node.text
 
@@ -145,7 +147,8 @@ class MTVServicesInfoExtractor(InfoExtractor):
         idoc = self._download_xml(
             feed_url + '?' + data, video_id,
             'Downloading info', transform_source=fix_xml_ampersands)
-        return [self._get_video_info(item) for item in idoc.findall('.//item')]
+        return self.playlist_result(
+            [self._get_video_info(item) for item in idoc.findall('.//item')])
 
     def _real_extract(self, url):
         title = url_basename(url)
@@ -163,7 +166,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
         if mgid is None or ':' not in mgid:
             mgid = self._search_regex(
                 [r'data-mgid="(.*?)"', r'swfobject.embedSWF\(".*?(mgid:.*?)"'],
-                webpage, u'mgid')
+                webpage, 'mgid')
         return self._get_videos_info(mgid)
 
 
@@ -186,7 +189,8 @@ class MTVServicesEmbeddedIE(MTVServicesInfoExtractor):
     def _get_feed_url(self, uri):
         video_id = self._id_from_uri(uri)
         site_id = uri.replace(video_id, '')
-        config_url = 'http://media.mtvnservices.com/pmt/e1/players/{0}/config.xml'.format(site_id)
+        config_url = ('http://media.mtvnservices.com/pmt/e1/players/{0}/'
+                      'context4/context5/config.xml'.format(site_id))
         config_doc = self._download_xml(config_url, video_id)
         feed_node = config_doc.find('.//feed')
         feed_url = feed_node.text.strip().split('?')[0]
@@ -238,15 +242,15 @@ class MTVIE(MTVServicesInfoExtractor):
         uri = mobj.groupdict().get('mgid')
         if uri is None:
             webpage = self._download_webpage(url, video_id)
-    
+
             # Some videos come from Vevo.com
             m_vevo = re.search(r'isVevoVideo = true;.*?vevoVideoId = "(.*?)";',
                                webpage, re.DOTALL)
             if m_vevo:
-                vevo_id = m_vevo.group(1);
+                vevo_id = m_vevo.group(1)
                 self.to_screen('Vevo video detected: %s' % vevo_id)
                 return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
-    
+
             uri = self._html_search_regex(r'/uri/(.*?)\?', webpage, 'uri')
         return self._get_videos_info(uri)
 
index c7f6beb9c703de0434a8ab4e9a41df3df16c2d81..b4e8ad17e9003940e5753349e30b4857c4c8aa6a 100644 (file)
@@ -73,4 +73,3 @@ class MuenchenTVIE(InfoExtractor):
             'is_live': True,
             'thumbnail': thumbnail,
         }
-
index 42d7a82a5dcc50fe0f84c48cb96fdf4b4a794c37..50d92b50ae5ec2fa49e45cc64aea7f08cc21ccea 100644 (file)
@@ -72,4 +72,4 @@ class MusicPlayOnIE(InfoExtractor):
             'duration': int_or_none(duration),
             'view_count': int_or_none(view_count),
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 1772b7f9ae43c2eaef57a15a5b3df5d9e7244213..1e9cf8de9174e086dd7c19525a7dc94025075683 100644 (file)
@@ -1,64 +1,65 @@
-import re
-import json
+from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
-    determine_ext,
 )
 
 
 class MuzuTVIE(InfoExtractor):
     _VALID_URL = r'https?://www\.muzu\.tv/(.+?)/(.+?)/(?P<id>\d+)'
-    IE_NAME = u'muzu.tv'
+    IE_NAME = 'muzu.tv'
 
     _TEST = {
-        u'url': u'http://www.muzu.tv/defected/marcashken-featuring-sos-cat-walk-original-mix-music-video/1981454/',
-        u'file': u'1981454.mp4',
-        u'md5': u'98f8b2c7bc50578d6a0364fff2bfb000',
-        u'info_dict': {
-            u'title': u'Cat Walk (Original Mix)',
-            u'description': u'md5:90e868994de201b2570e4e5854e19420',
-            u'uploader': u'MarcAshken featuring SOS',
+        'url': 'http://www.muzu.tv/defected/marcashken-featuring-sos-cat-walk-original-mix-music-video/1981454/',
+        'md5': '98f8b2c7bc50578d6a0364fff2bfb000',
+        'info_dict': {
+            'id': '1981454',
+            'ext': 'mp4',
+            'title': 'Cat Walk (Original Mix)',
+            'description': 'md5:90e868994de201b2570e4e5854e19420',
+            'uploader': 'MarcAshken featuring SOS',
         },
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
-        info_data = compat_urllib_parse.urlencode({'format': 'json',
-                                                   'url': url,
-                                                   })
-        video_info_page = self._download_webpage('http://www.muzu.tv/api/oembed/?%s' % info_data,
-                                                 video_id, u'Downloading video info')
-        info = json.loads(video_info_page)
+        info_data = compat_urllib_parse.urlencode({
+            'format': 'json',
+            'url': url,
+        })
+        info = self._download_json(
+            'http://www.muzu.tv/api/oembed/?%s' % info_data,
+            video_id, 'Downloading video info')
 
-        player_info_page = self._download_webpage('http://player.muzu.tv/player/playerInit?ai=%s' % video_id,
-                                                  video_id, u'Downloading player info')
-        video_info = json.loads(player_info_page)['videos'][0]
-        for quality in ['1080' , '720', '480', '360']:
+        player_info = self._download_json(
+            'http://player.muzu.tv/player/playerInit?ai=%s' % video_id,
+            video_id, 'Downloading player info')
+        video_info = player_info['videos'][0]
+        for quality in ['1080', '720', '480', '360']:
             if video_info.get('v%s' % quality):
                 break
 
-        data = compat_urllib_parse.urlencode({'ai': video_id,
-                                              # Even if each time you watch a video the hash changes,
-                                              # it seems to work for different videos, and it will work
-                                              # even if you use any non empty string as a hash
-                                              'viewhash': 'VBNff6djeV4HV5TRPW5kOHub2k',
-                                              'device': 'web',
-                                              'qv': quality,
-                                              })
-        video_url_page = self._download_webpage('http://player.muzu.tv/player/requestVideo?%s' % data,
-                                                video_id, u'Downloading video url')
-        video_url_info = json.loads(video_url_page)
+        data = compat_urllib_parse.urlencode({
+            'ai': video_id,
+            # Even if each time you watch a video the hash changes,
+            # it seems to work for different videos, and it will work
+            # even if you use any non empty string as a hash
+            'viewhash': 'VBNff6djeV4HV5TRPW5kOHub2k',
+            'device': 'web',
+            'qv': quality,
+        })
+        video_url_info = self._download_json(
+            'http://player.muzu.tv/player/requestVideo?%s' % data,
+            video_id, 'Downloading video url')
         video_url = video_url_info['url']
 
-        return {'id': video_id,
-                'title': info['title'],
-                'url': video_url,
-                'ext': determine_ext(video_url),
-                'thumbnail': info['thumbnail_url'],
-                'description': info['description'],
-                'uploader': info['author_name'],
-                }
+        return {
+            'id': video_id,
+            'title': info['title'],
+            'url': video_url,
+            'thumbnail': info['thumbnail_url'],
+            'description': info['description'],
+            'uploader': info['author_name'],
+        }
index c16939f5437cf55b6d3e51e9d51ab45d1d12392f..83414a2325586d7319c06247fa037c42bb2b199a 100644 (file)
@@ -1,12 +1,14 @@
+# encoding: utf-8
 from __future__ import unicode_literals
 
 import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_str,
 )
+from ..utils import ExtractorError
 
 
 class MySpaceIE(InfoExtractor):
@@ -14,33 +16,58 @@ class MySpaceIE(InfoExtractor):
 
     _TESTS = [
         {
-            'url': 'https://myspace.com/coldplay/video/viva-la-vida/100008689',
+            'url': 'https://myspace.com/fiveminutestothestage/video/little-big-town/109594919',
             'info_dict': {
-                'id': '100008689',
+                'id': '109594919',
                 'ext': 'flv',
-                'title': 'Viva La Vida',
-                'description': 'The official Viva La Vida video, directed by Hype Williams',
-                'uploader': 'Coldplay',
-                'uploader_id': 'coldplay',
+                'title': 'Little Big Town',
+                'description': 'This country quartet was all smiles while playing a sold out show at the Pacific Amphitheatre in Orange County, California.',
+                'uploader': 'Five Minutes to the Stage',
+                'uploader_id': 'fiveminutestothestage',
             },
             'params': {
                 # rtmp download
                 'skip_download': True,
             },
         },
-        # song
+        # songs
         {
-            'url': 'https://myspace.com/spiderbags/music/song/darkness-in-my-heart-39008454-27041242',
+            'url': 'https://myspace.com/killsorrow/music/song/of-weakened-soul...-93388656-103880681',
             'info_dict': {
-                'id': '39008454',
+                'id': '93388656',
                 'ext': 'flv',
-                'title': 'Darkness In My Heart',
-                'uploader_id': 'spiderbags',
+                'title': 'Of weakened soul...',
+                'uploader': 'Killsorrow',
+                'uploader_id': 'killsorrow',
             },
             'params': {
                 # rtmp download
                 'skip_download': True,
             },
+        }, {
+            'add_ie': ['Vevo'],
+            'url': 'https://myspace.com/threedaysgrace/music/song/animal-i-have-become-28400208-28218041',
+            'info_dict': {
+                'id': 'USZM20600099',
+                'ext': 'mp4',
+                'title': 'Animal I Have Become',
+                'uploader': 'Three Days Grace',
+                'timestamp': int,
+                'upload_date': '20060502',
+            },
+            'skip': 'VEVO is only available in some countries',
+        }, {
+            'add_ie': ['Youtube'],
+            'url': 'https://myspace.com/starset2/music/song/first-light-95799905-106964426',
+            'info_dict': {
+                'id': 'ypWvQgnJrSU',
+                'ext': 'mp4',
+                'title': 'Starset - First Light',
+                'description': 'md5:2d5db6c9d11d527683bcda818d332414',
+                'uploader': 'Jacob Soren',
+                'uploader_id': 'SorenPromotions',
+                'upload_date': '20140725',
+            }
         },
     ]
 
@@ -48,22 +75,47 @@ class MySpaceIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
         webpage = self._download_webpage(url, video_id)
+        player_url = self._search_regex(
+            r'playerSwf":"([^"?]*)', webpage, 'player URL')
 
         if mobj.group('mediatype').startswith('music/song'):
             # songs don't store any useful info in the 'context' variable
+            song_data = self._search_regex(
+                r'''<button.*data-song-id=(["\'])%s\1.*''' % video_id,
+                webpage, 'song_data', default=None, group=0)
+            if song_data is None:
+                # some songs in an album are not playable
+                self.report_warning(
+                    '%s: No downloadable song on this page' % video_id)
+                return
+
             def search_data(name):
-                return self._search_regex(r'data-%s="(.*?)"' % name, webpage,
-                    name)
+                return self._search_regex(
+                    r'''data-%s=([\'"])(?P<data>.*?)\1''' % name,
+                    song_data, name, default='', group='data')
             streamUrl = search_data('stream-url')
+            if not streamUrl:
+                vevo_id = search_data('vevo-id')
+                youtube_id = search_data('youtube-id')
+                if vevo_id:
+                    self.to_screen('Vevo video detected: %s' % vevo_id)
+                    return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
+                elif youtube_id:
+                    self.to_screen('Youtube video detected: %s' % youtube_id)
+                    return self.url_result(youtube_id, ie='Youtube')
+                else:
+                    raise ExtractorError(
+                        'Found song but don\'t know how to download it')
             info = {
                 'id': video_id,
                 'title': self._og_search_title(webpage),
+                'uploader': search_data('artist-name'),
                 'uploader_id': search_data('artist-username'),
                 'thumbnail': self._og_search_thumbnail(webpage),
             }
         else:
-            context = json.loads(self._search_regex(r'context = ({.*?});', webpage,
-                u'context'))
+            context = json.loads(self._search_regex(
+                r'context = ({.*?});', webpage, 'context'))
             video = context['video']
             streamUrl = video['streamUrl']
             info = {
@@ -79,6 +131,50 @@ class MySpaceIE(InfoExtractor):
         info.update({
             'url': rtmp_url,
             'play_path': play_path,
+            'player_url': player_url,
             'ext': 'flv',
         })
         return info
+
+
+class MySpaceAlbumIE(InfoExtractor):
+    IE_NAME = 'MySpace:album'
+    _VALID_URL = r'https?://myspace\.com/([^/]+)/music/album/(?P<title>.*-)(?P<id>\d+)'
+
+    _TESTS = [{
+        'url': 'https://myspace.com/starset2/music/album/transmissions-19455773',
+        'info_dict': {
+            'title': 'Transmissions',
+            'id': '19455773',
+        },
+        'playlist_count': 14,
+        'skip': 'this album is only available in some countries',
+    }, {
+        'url': 'https://myspace.com/killsorrow/music/album/the-demo-18596029',
+        'info_dict': {
+            'title': 'The Demo',
+            'id': '18596029',
+        },
+        'playlist_count': 5,
+    }]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        playlist_id = mobj.group('id')
+        display_id = mobj.group('title') + playlist_id
+        webpage = self._download_webpage(url, display_id)
+        tracks_paths = re.findall(r'"music:song" content="(.*?)"', webpage)
+        if not tracks_paths:
+            raise ExtractorError(
+                '%s: No songs found, try using proxy' % display_id,
+                expected=True)
+        entries = [
+            self.url_result(t_path, ie=MySpaceIE.ie_key())
+            for t_path in tracks_paths]
+        return {
+            '_type': 'playlist',
+            'id': playlist_id,
+            'display_id': display_id,
+            'title': self._og_search_title(webpage),
+            'entries': entries,
+        }
index 4fa0575f8a282aa6f8f561a7f18bc0129fceea8c..5b9b9fbcd0844897d6d63305ed00729e70c7f4fb 100644 (file)
@@ -2,9 +2,10 @@ from __future__ import unicode_literals
 import os.path
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse_urlparse,
-
+)
+from ..utils import (
     ExtractorError,
 )
 
@@ -13,9 +14,10 @@ class MySpassIE(InfoExtractor):
     _VALID_URL = r'http://www\.myspass\.de/.*'
     _TEST = {
         'url': 'http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/',
-        'file': '11741.mp4',
         'md5': '0b49f4844a068f8b33f4b7c88405862b',
         'info_dict': {
+            'id': '11741',
+            'ext': 'mp4',
             "description": "Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?",
             "title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2",
         },
index ccb5959c4046e73f1263ecee19e829b418c6d506..5e754fcffb6403cbd359b9b358ad3879ef279f53 100644 (file)
@@ -7,11 +7,12 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_ord,
     compat_urllib_parse,
     compat_urllib_request,
-
+)
+from ..utils import (
     ExtractorError,
 )
 
@@ -32,7 +33,7 @@ class MyVideoIE(InfoExtractor):
     # Original Code from: https://github.com/dersphere/plugin.video.myvideo_de.git
     # Released into the Public Domain by Tristan Fischer on 2013-05-19
     # https://github.com/rg3/youtube-dl/pull/842
-    def __rc4crypt(self,data, key):
+    def __rc4crypt(self, data, key):
         x = 0
         box = list(range(256))
         for i in list(range(256)):
@@ -48,17 +49,17 @@ class MyVideoIE(InfoExtractor):
             out += chr(compat_ord(char) ^ box[(box[x] + box[y]) % 256])
         return out
 
-    def __md5(self,s):
+    def __md5(self, s):
         return hashlib.md5(s).hexdigest().encode()
 
-    def _real_extract(self,url):
+    def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
 
         GK = (
-          b'WXpnME1EZGhNRGhpTTJNM01XVmhOREU0WldNNVpHTTJOakpt'
-          b'TW1FMU5tVTBNR05pWkRaa05XRXhNVFJoWVRVd1ptSXhaVEV3'
-          b'TnpsbA0KTVRkbU1tSTRNdz09'
+            b'WXpnME1EZGhNRGhpTTJNM01XVmhOREU0WldNNVpHTTJOakpt'
+            b'TW1FMU5tVTBNR05pWkRaa05XRXhNVFJoWVRVd1ptSXhaVEV3'
+            b'TnpsbA0KTVRkbU1tSTRNdz09'
         )
 
         # Get video webpage
@@ -71,7 +72,7 @@ class MyVideoIE(InfoExtractor):
             video_url = mobj.group(1) + '.flv'
 
             video_title = self._html_search_regex('<title>([^<]+)</title>',
-                webpage, 'title')
+                                                  webpage, 'title')
 
             return {
                 'id': video_id,
@@ -161,7 +162,7 @@ class MyVideoIE(InfoExtractor):
         video_swfobj = compat_urllib_parse.unquote(video_swfobj)
 
         video_title = self._html_search_regex("<h1(?: class='globalHd')?>(.*?)</h1>",
-            webpage, 'title')
+                                              webpage, 'title')
 
         return {
             'id': video_id,
@@ -172,4 +173,3 @@ class MyVideoIE(InfoExtractor):
             'play_path': video_playpath,
             'player_url': video_swfobj,
         }
-
diff --git a/youtube_dl/extractor/myvidster.py b/youtube_dl/extractor/myvidster.py
new file mode 100644 (file)
index 0000000..a94ab83
--- /dev/null
@@ -0,0 +1,29 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class MyVidsterIE(InfoExtractor):
+    _VALID_URL = r'http://(?:www\.)?myvidster\.com/video/(?P<id>\d+)/'
+
+    _TEST = {
+        'url': 'http://www.myvidster.com/video/32059805/Hot_chemistry_with_raw_love_making',
+        'md5': '95296d0231c1363222c3441af62dc4ca',
+        'info_dict': {
+            'id': '3685814',
+            'title': 'md5:7d8427d6d02c4fbcef50fe269980c749',
+            'upload_date': '20141027',
+            'uploader_id': 'utkualp',
+            'ext': 'mp4',
+            'age_limit': 18,
+        },
+        'add_ie': ['XHamster'],
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        return self.url_result(self._html_search_regex(
+            r'rel="videolink" href="(?P<real_url>.*)">',
+            webpage, 'real video url'))
index c0231c197b12b86c669e9cff4b34a5c2ac1639bf..c10405f04d3cc1b3e89004029b7502112e9baa29 100644 (file)
@@ -4,9 +4,12 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
+)
+from ..utils import (
     ExtractorError,
+    clean_html,
 )
 
 
@@ -25,16 +28,21 @@ class NaverIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group(1)
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
+
         m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"',
-            webpage)
+                         webpage)
         if m_id is None:
+            m_error = re.search(
+                r'(?s)<div class="nation_error">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>',
+                webpage)
+            if m_error:
+                raise ExtractorError(clean_html(m_error.group('msg')), expected=True)
             raise ExtractorError('couldn\'t extract vid and key')
         vid = m_id.group(1)
         key = m_id.group(2)
-        query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key,})
+        query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key, })
         query_urls = compat_urllib_parse.urlencode({
             'masterVid': vid,
             'protocol': 'p2p',
@@ -59,7 +67,7 @@ class NaverIE(InfoExtractor):
             if domain.startswith('rtmp'):
                 f.update({
                     'ext': 'flv',
-                    'rtmp_protocol': '1', # rtmpt
+                    'rtmp_protocol': '1',  # rtmpt
                 })
             formats.append(f)
         self._sort_formats(formats)
index 78e650b2d01a87d3772a1f40459171bd7cce5cf4..862b706bf96719aa071f1f89c73f2a4ef45a20b1 100644 (file)
@@ -1,7 +1,5 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 from ..utils import (
     remove_end,
@@ -10,8 +8,8 @@ from ..utils import (
 
 
 class NBAIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?:nba/)?video(?P<id>/[^?]*?)(?:/index\.html)?(?:\?.*)?$'
-    _TEST = {
+    _VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?:nba/)?video(?P<id>/[^?]*?)/?(?:/index\.html)?(?:\?.*)?$'
+    _TESTS = [{
         'url': 'http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html',
         'md5': 'c0edcfc37607344e2ff8f13c378c88a4',
         'info_dict': {
@@ -21,12 +19,13 @@ class NBAIE(InfoExtractor):
             'description': 'Kevin Durant scores 32 points and dishes out six assists as the Thunder beat the Nets in Brooklyn.',
             'duration': 181,
         },
-    }
+    }, {
+        'url': 'http://www.nba.com/video/games/hornets/2014/12/05/0021400276-nyk-cha-play5.nba/',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         video_url = 'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4'
@@ -37,8 +36,7 @@ class NBAIE(InfoExtractor):
 
         description = self._og_search_description(webpage)
         duration = parse_duration(
-            self._html_search_meta('duration', webpage, 'duration', fatal=False))
-
+            self._html_search_meta('duration', webpage, 'duration'))
 
         return {
             'id': shortened_video_id,
index e75ab7c398604451db54bc9d3afe66e4df074871..690c46b6a57be11edf36899b959318af5e482119 100644 (file)
@@ -4,32 +4,47 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_str,
+)
+from ..utils import (
     ExtractorError,
     find_xpath_attr,
 )
 
 
 class NBCIE(InfoExtractor):
-    _VALID_URL = r'http://www\.nbc\.com/[^/]+/video/[^/]+/(?P<id>n?\d+)'
-
-    _TEST = {
-        'url': 'http://www.nbc.com/chicago-fire/video/i-am-a-firefighter/2734188',
-        # md5 checksum is not stable
-        'info_dict': {
-            'id': 'bTmnLCvIbaaH',
-            'ext': 'flv',
-            'title': 'I Am a Firefighter',
-            'description': 'An emergency puts Dawson\'sf irefighter skills to the ultimate test in this four-part digital series.',
+    _VALID_URL = r'http://www\.nbc\.com/(?:[^/]+/)+(?P<id>n?\d+)'
+
+    _TESTS = [
+        {
+            'url': 'http://www.nbc.com/chicago-fire/video/i-am-a-firefighter/2734188',
+            # md5 checksum is not stable
+            'info_dict': {
+                'id': 'bTmnLCvIbaaH',
+                'ext': 'flv',
+                'title': 'I Am a Firefighter',
+                'description': 'An emergency puts Dawson\'sf irefighter skills to the ultimate test in this four-part digital series.',
+            },
         },
-    }
+        {
+            'url': 'http://www.nbc.com/the-tonight-show/episodes/176',
+            'info_dict': {
+                'id': 'XwU9KZkp98TH',
+                'ext': 'flv',
+                'title': 'Ricky Gervais, Steven Van Zandt, ILoveMakonnen',
+                'description': 'A brand new episode of The Tonight Show welcomes Ricky Gervais, Steven Van Zandt and ILoveMakonnen.',
+            },
+            'skip': 'Only works from US',
+        },
+    ]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
-        theplatform_url = self._search_regex('class="video-player video-player-full" data-mpx-url="(.*?)"', webpage, 'theplatform url')
+        theplatform_url = self._search_regex(
+            '(?:class="video-player video-player-full" data-mpx-url|class="player" src)="(.*?)"',
+            webpage, 'theplatform url').replace('_no_endcard', '')
         if theplatform_url.startswith('//'):
             theplatform_url = 'http:' + theplatform_url
         return self.url_result(theplatform_url)
@@ -57,7 +72,7 @@ class NBCNewsIE(InfoExtractor):
             'md5': 'b2421750c9f260783721d898f4c42063',
             'info_dict': {
                 'id': 'I1wpAI_zmhsQ',
-                'ext': 'flv',
+                'ext': 'mp4',
                 'title': 'How Twitter Reacted To The Snowden Interview',
                 'description': 'md5:65a0bd5d76fe114f3c2727aa3a81fe64',
             },
@@ -97,6 +112,8 @@ class NBCNewsIE(InfoExtractor):
             ]
 
             for base_url in base_urls:
+                if not base_url:
+                    continue
                 playlist_url = base_url + '?form=MPXNBCNewsAPI'
                 all_videos = self._download_json(playlist_url, title)['videos']
 
index cf7f0309b6fa76e8ad4d35e865db22a14e0251b5..f49c666909a270ad18e36a2d1177ef681adc3121 100644 (file)
@@ -25,7 +25,7 @@ class NDRIE(InfoExtractor):
                 'id': '25866',
                 'ext': 'mp4',
                 'title': 'Kartoffeltage in der Lewitz',
-                'description': 'md5:16d4d66907f541692e8182c33270f29a',
+                'description': 'md5:48c4c04dde604c8a9971b3d4e3b9eaa8',
                 'duration': 166,
             }
         },
@@ -67,7 +67,7 @@ class NDRIE(InfoExtractor):
 
         thumbnail = None
 
-        video_url = re.search(r'''3: \{src:'(?P<video>.+?)\.hi\.mp4', type:"video/mp4"},''', page)
+        video_url = re.search(r'''3: \{src:'(?P<video>.+?)\.(lo|hi|hq)\.mp4', type:"video/mp4"},''', page)
         if video_url:
             thumbnails = re.findall(r'''\d+: \{src: "([^"]+)"(?: \|\| '[^']+')?, quality: '([^']+)'}''', page)
             if thumbnails:
@@ -91,4 +91,4 @@ class NDRIE(InfoExtractor):
             'thumbnail': thumbnail,
             'duration': duration,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 2e72e8915aab601b6916fd18e4f64090a613986e..cd117b04edeff88d90842f2ed8e15a8c43bde714 100644 (file)
@@ -23,12 +23,12 @@ class NewgroundsIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         music_id = mobj.group('id')
         webpage = self._download_webpage(url, music_id)
-        
+
         title = self._html_search_regex(
             r',"name":"([^"]+)",', webpage, 'music title')
         uploader = self._html_search_regex(
             r',"artist":"([^"]+)",', webpage, 'music uploader')
-        
+
         music_url_json_string = self._html_search_regex(
             r'({"url":"[^"]+"),', webpage, 'music url') + '}'
         music_url_json = json.loads(music_url_json_string)
index 551bd4d7a511c51f3d5809cf488d7e454b6ed6b5..85fcad06b51dc9ce87bdd563043c92a126cc8eea 100644 (file)
@@ -89,4 +89,4 @@ class NewstubeIE(InfoExtractor):
             'thumbnail': thumbnail,
             'duration': duration,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index ba7b77a467e8e7ba969fb9d4555e47fad6ed7196..ea077254b4320fe18e59eb9b67461b13c146b873 100644 (file)
@@ -1,9 +1,7 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_request,
     compat_urllib_parse,
 )
@@ -12,7 +10,7 @@ from ..utils import (
 class NFBIE(InfoExtractor):
     IE_NAME = 'nfb'
     IE_DESC = 'National Film Board of Canada'
-    _VALID_URL = r'https?://(?:www\.)?(nfb|onf)\.ca/film/(?P<id>[\da-z_-]+)'
+    _VALID_URL = r'https?://(?:www\.)?(?:nfb|onf)\.ca/film/(?P<id>[\da-z_-]+)'
 
     _TEST = {
         'url': 'https://www.nfb.ca/film/qallunaat_why_white_people_are_funny',
@@ -32,18 +30,18 @@ class NFBIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
-        page = self._download_webpage('https://www.nfb.ca/film/%s' % video_id, video_id, 'Downloading film page')
+        video_id = self._match_id(url)
+        page = self._download_webpage(
+            'https://www.nfb.ca/film/%s' % video_id, video_id,
+            'Downloading film page')
 
         uploader_id = self._html_search_regex(r'<a class="director-link" href="/explore-all-directors/([^/]+)/"',
-            page, 'director id', fatal=False)
+                                              page, 'director id', fatal=False)
         uploader = self._html_search_regex(r'<em class="director-name" itemprop="name">([^<]+)</em>',
-            page, 'director name', fatal=False)
+                                           page, 'director name', fatal=False)
 
         request = compat_urllib_request.Request('https://www.nfb.ca/film/%s/player_config' % video_id,
-            compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii'))
+                                                compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii'))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf')
 
@@ -93,4 +91,4 @@ class NFBIE(InfoExtractor):
             'uploader': uploader,
             'uploader_id': uploader_id,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index cc7c921c364d64ee504fa6d31265d13a96565e8d..606e2294efb716cfe755d1b9564357dbda7f9039 100644 (file)
@@ -4,9 +4,11 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import (
+    compat_urllib_parse_urlparse,
+)
 from ..utils import (
     ExtractorError,
-    compat_urllib_parse_urlparse,
     int_or_none,
     remove_end,
 )
index 072d9cf8e49438f5688a9eef6ef43d0efd0cf8c2..b2f40344f59d75caf94028167dcf5db7ce0f83fd 100644 (file)
@@ -2,12 +2,15 @@ from __future__ import unicode_literals
 
 import re
 import json
+import os
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urlparse,
     compat_urllib_parse,
-    determine_ext,
+    compat_urllib_parse_urlparse
+)
+from ..utils import (
     unified_strdate,
 )
 
@@ -22,21 +25,26 @@ class NHLBaseInfoExtractor(InfoExtractor):
         self.report_extraction(video_id)
 
         initial_video_url = info['publishPoint']
-        data = compat_urllib_parse.urlencode({
-            'type': 'fvod',
-            'path': initial_video_url.replace('.mp4', '_sd.mp4'),
-        })
-        path_url = 'http://video.nhl.com/videocenter/servlets/encryptvideopath?' + data
-        path_doc = self._download_xml(
-            path_url, video_id, 'Downloading final video url')
-        video_url = path_doc.find('path').text
+        if info['formats'] == '1':
+            parsed_url = compat_urllib_parse_urlparse(initial_video_url)
+            filename, ext = os.path.splitext(parsed_url.path)
+            path = '%s_sd%s' % (filename, ext)
+            data = compat_urllib_parse.urlencode({
+                'type': 'fvod',
+                'path': compat_urlparse.urlunparse(parsed_url[:2] + (path,) + parsed_url[3:])
+            })
+            path_url = 'http://video.nhl.com/videocenter/servlets/encryptvideopath?' + data
+            path_doc = self._download_xml(
+                path_url, video_id, 'Downloading final video url')
+            video_url = path_doc.find('path').text
+        else:
+            video_url = initial_video_url
 
         join = compat_urlparse.urljoin
         return {
             'id': video_id,
             'title': info['name'],
             'url': video_url,
-            'ext': determine_ext(video_url),
             'description': info['description'],
             'duration': int(info['duration']),
             'thumbnail': join(join(video_url, '/u/'), info['bigImage']),
@@ -46,10 +54,11 @@ class NHLBaseInfoExtractor(InfoExtractor):
 
 class NHLIE(NHLBaseInfoExtractor):
     IE_NAME = 'nhl.com'
-    _VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/console(?:\?(?:.*?[?&])?)id=(?P<id>[0-9]+)'
+    _VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/console(?:\?(?:.*?[?&])?)id=(?P<id>[0-9a-z-]+)'
 
     _TESTS = [{
         'url': 'http://video.canucks.nhl.com/videocenter/console?catid=6?id=453614',
+        'md5': 'db704a4ea09e8d3988c85e36cc892d09',
         'info_dict': {
             'id': '453614',
             'ext': 'mp4',
@@ -58,6 +67,28 @@ class NHLIE(NHLBaseInfoExtractor):
             'duration': 18,
             'upload_date': '20131006',
         },
+    }, {
+        'url': 'http://video.nhl.com/videocenter/console?id=2014020024-628-h',
+        'md5': 'd22e82bc592f52d37d24b03531ee9696',
+        'info_dict': {
+            'id': '2014020024-628-h',
+            'ext': 'mp4',
+            'title': 'Alex Galchenyuk Goal on Ray Emery (14:40/3rd)',
+            'description': 'Home broadcast - Montreal Canadiens at Philadelphia Flyers - October 11, 2014',
+            'duration': 0,
+            'upload_date': '20141011',
+        },
+    }, {
+        'url': 'http://video.mapleleafs.nhl.com/videocenter/console?id=58665&catid=802',
+        'md5': 'c78fc64ea01777e426cfc202b746c825',
+        'info_dict': {
+            'id': '58665',
+            'ext': 'flv',
+            'title': 'Classic Game In Six - April 22, 1979',
+            'description': 'It was the last playoff game for the Leafs in the decade, and the last time the Leafs and Habs played in the playoffs. Great game, not a great ending.',
+            'duration': 400,
+            'upload_date': '20100129'
+        },
     }, {
         'url': 'http://video.flames.nhl.com/videocenter/console?id=630616',
         'only_matching': True,
@@ -75,7 +106,7 @@ class NHLIE(NHLBaseInfoExtractor):
 class NHLVideocenterIE(NHLBaseInfoExtractor):
     IE_NAME = 'nhl.com:videocenter'
     IE_DESC = 'NHL videocenter category'
-    _VALID_URL = r'https?://video\.(?P<team>[^.]*)\.nhl\.com/videocenter/(console\?.*?catid=(?P<catid>[0-9]+)(?![&?]id=).*?)?$'
+    _VALID_URL = r'https?://video\.(?P<team>[^.]*)\.nhl\.com/videocenter/(console\?[^(id=)]*catid=(?P<catid>[0-9]+)(?![&?]id=).*?)?$'
     _TEST = {
         'url': 'http://video.canucks.nhl.com/videocenter/console?catid=999',
         'info_dict': {
@@ -109,10 +140,10 @@ class NHLVideocenterIE(NHLBaseInfoExtractor):
         response = self._download_webpage(request_url, playlist_title)
         response = self._fix_json(response)
         if not response.strip():
-            self._downloader.report_warning(u'Got an empty reponse, trying '
+            self._downloader.report_warning('Got an empty reponse, trying '
                                             'adding the "newvideos" parameter')
             response = self._download_webpage(request_url + '&newvideos=true',
-                playlist_title)
+                                              playlist_title)
             response = self._fix_json(response)
         videos = json.loads(response)
 
index c0c139b5df16ce900ba6920a1a004bc433eab4e9..4c18904169d3f69a0bf7e95fb21d98218bca7e91 100644 (file)
@@ -2,15 +2,19 @@
 from __future__ import unicode_literals
 
 import re
+import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
     compat_urlparse,
-    unified_strdate,
-    parse_duration,
+)
+from ..utils import (
+    ExtractorError,
     int_or_none,
+    parse_duration,
+    unified_strdate,
 )
 
 
@@ -39,18 +43,17 @@ class NiconicoIE(InfoExtractor):
 
     _VALID_URL = r'https?://(?:www\.|secure\.)?nicovideo\.jp/watch/((?:[a-z]{2})?[0-9]+)'
     _NETRC_MACHINE = 'niconico'
-    # Determine whether the downloader uses authentication to download video
-    _AUTHENTICATE = False
+    # Determine whether the downloader used authentication to download video
+    _AUTHENTICATED = False
 
     def _real_initialize(self):
-        if self._downloader.params.get('username', None) is not None:
-            self._AUTHENTICATE = True
-
-        if self._AUTHENTICATE:
-            self._login()
+        self._login()
 
     def _login(self):
         (username, password) = self._get_login_info()
+        # No authentication to be performed
+        if not username:
+            return True
 
         # Log in
         login_form_strs = {
@@ -68,6 +71,8 @@ class NiconicoIE(InfoExtractor):
         if re.search(r'(?i)<h1 class="mb8p4">Log in error</h1>', login_results) is not None:
             self._downloader.report_warning('unable to log in: bad username or password')
             return False
+        # Successful login
+        self._AUTHENTICATED = True
         return True
 
     def _real_extract(self, url):
@@ -82,7 +87,7 @@ class NiconicoIE(InfoExtractor):
             'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id, video_id,
             note='Downloading video info page')
 
-        if self._AUTHENTICATE:
+        if self._AUTHENTICATED:
             # Get flv info
             flv_info_webpage = self._download_webpage(
                 'http://flapi.nicovideo.jp/api/getflv?v=' + video_id,
@@ -106,6 +111,9 @@ class NiconicoIE(InfoExtractor):
                 flv_info_request, video_id,
                 note='Downloading flv info', errnote='Unable to download flv info')
 
+        if 'deleted=' in flv_info_webpage:
+            raise ExtractorError('The video has been deleted.',
+                                 expected=True)
         video_real_url = compat_urlparse.parse_qs(flv_info_webpage)['url'][0]
 
         # Start extracting information
@@ -145,3 +153,37 @@ class NiconicoIE(InfoExtractor):
             'duration': duration,
             'webpage_url': webpage_url,
         }
+
+
+class NiconicoPlaylistIE(InfoExtractor):
+    _VALID_URL = r'https?://www\.nicovideo\.jp/mylist/(?P<id>\d+)'
+
+    _TEST = {
+        'url': 'http://www.nicovideo.jp/mylist/27411728',
+        'info_dict': {
+            'id': '27411728',
+            'title': 'AKB48のオールナイトニッポン',
+        },
+        'playlist_mincount': 225,
+    }
+
+    def _real_extract(self, url):
+        list_id = self._match_id(url)
+        webpage = self._download_webpage(url, list_id)
+
+        entries_json = self._search_regex(r'Mylist\.preload\(\d+, (\[.*\])\);',
+                                          webpage, 'entries')
+        entries = json.loads(entries_json)
+        entries = [{
+            '_type': 'url',
+            'ie_key': NiconicoIE.ie_key(),
+            'url': ('http://www.nicovideo.jp/watch/%s' %
+                    entry['item_data']['video_id']),
+        } for entry in entries]
+
+        return {
+            '_type': 'playlist',
+            'title': self._search_regex(r'\s+name: "(.*?)"', webpage, 'title'),
+            'id': list_id,
+            'entries': entries,
+        }
index 33daa0dec327dea3f691f5dccab5b6b312d79e4f..7f842b5c2560211cc88280e2b97cf107af588bfe 100644 (file)
@@ -23,12 +23,14 @@ class NineGagIE(InfoExtractor):
             "ext": "mp4",
             "description": "This 3-minute video will make you smile and then make you feel untalented and insignificant. Anyway, you should share this awesomeness. (Thanks, Dino!)",
             "title": "\"People Are Awesome 2013\" Is Absolutely Awesome",
+            'uploader_id': 'UCdEH6EjDKwtTe-sO2f0_1XA',
+            'uploader': 'CompilationChannel',
+            'upload_date': '20131110',
             "view_count": int,
             "thumbnail": "re:^https?://",
         },
         'add_ie': ['Youtube']
-    },
-    {
+    }, {
         'url': 'http://9gag.tv/p/KklwM/alternate-banned-opening-scene-of-gravity?ref=fsidebar',
         'info_dict': {
             'id': 'KklwM',
@@ -36,6 +38,9 @@ class NineGagIE(InfoExtractor):
             'display_id': 'alternate-banned-opening-scene-of-gravity',
             "description": "While Gravity was a pretty awesome movie already, YouTuber Krishna Shenoi came up with a way to improve upon it, introducing a much better solution to Sandra Bullock's seemingly endless tumble in space. The ending is priceless.",
             'title': "Banned Opening Scene Of \"Gravity\" That Changes The Whole Movie",
+            'uploader': 'Krishna Shenoi',
+            'upload_date': '20140401',
+            'uploader_id': 'krishnashenoi93',
         },
     }]
 
index 7f1bc6377a42e99d853a38dd406a60dda929e49f..251e6da07457b7e7be6b5703b5769214ae299c3d 100644 (file)
@@ -6,13 +6,15 @@ import time
 import hashlib
 
 from .common import InfoExtractor
-from ..utils import (
-    compat_urllib_request,
+from ..compat import (
+    compat_str,
     compat_urllib_parse,
-    ExtractorError,
+    compat_urllib_request,
+)
+from ..utils import (
     clean_html,
+    ExtractorError,
     unified_strdate,
-    compat_str,
 )
 
 
@@ -20,6 +22,7 @@ class NocoIE(InfoExtractor):
     _VALID_URL = r'http://(?:(?:www\.)?noco\.tv/emission/|player\.noco\.tv/\?idvideo=)(?P<id>\d+)'
     _LOGIN_URL = 'http://noco.tv/do.php'
     _API_URL_TEMPLATE = 'https://api.noco.tv/1.1/%s?ts=%s&tk=%s'
+    _SUB_LANG_TEMPLATE = '&sub_lang=%s'
     _NETRC_MACHINE = 'noco'
 
     _TEST = {
@@ -60,10 +63,12 @@ class NocoIE(InfoExtractor):
         if 'erreur' in login:
             raise ExtractorError('Unable to login: %s' % clean_html(login['erreur']), expected=True)
 
-    def _call_api(self, path, video_id, note):
+    def _call_api(self, path, video_id, note, sub_lang=None):
         ts = compat_str(int(time.time() * 1000))
         tk = hashlib.md5((hashlib.md5(ts.encode('ascii')).hexdigest() + '#8S?uCraTedap6a').encode('ascii')).hexdigest()
         url = self._API_URL_TEMPLATE % (path, ts, tk)
+        if sub_lang:
+            url += self._SUB_LANG_TEMPLATE % sub_lang
 
         resp = self._download_json(url, video_id, note)
 
@@ -91,31 +96,34 @@ class NocoIE(InfoExtractor):
 
         formats = []
 
-        for format_id, fmt in medias['fr']['video_list']['none']['quality_list'].items():
-
-            video = self._call_api(
-                'shows/%s/video/%s/fr' % (video_id, format_id.lower()),
-                video_id, 'Downloading %s video JSON' % format_id)
-
-            file_url = video['file']
-            if not file_url:
-                continue
-
-            if file_url in ['forbidden', 'not found']:
-                popmessage = video['popmessage']
-                self._raise_error(popmessage['title'], popmessage['message'])
-
-            formats.append({
-                'url': file_url,
-                'format_id': format_id,
-                'width': fmt['res_width'],
-                'height': fmt['res_lines'],
-                'abr': fmt['audiobitrate'],
-                'vbr': fmt['videobitrate'],
-                'filesize': fmt['filesize'],
-                'format_note': qualities[format_id]['quality_name'],
-                'preference': qualities[format_id]['priority'],
-            })
+        for lang, lang_dict in medias['fr']['video_list'].items():
+            for format_id, fmt in lang_dict['quality_list'].items():
+                format_id_extended = '%s-%s' % (lang, format_id) if lang != 'none' else format_id
+
+                video = self._call_api(
+                    'shows/%s/video/%s/fr' % (video_id, format_id.lower()),
+                    video_id, 'Downloading %s video JSON' % format_id_extended,
+                    lang if lang != 'none' else None)
+
+                file_url = video['file']
+                if not file_url:
+                    continue
+
+                if file_url in ['forbidden', 'not found']:
+                    popmessage = video['popmessage']
+                    self._raise_error(popmessage['title'], popmessage['message'])
+
+                formats.append({
+                    'url': file_url,
+                    'format_id': format_id_extended,
+                    'width': fmt['res_width'],
+                    'height': fmt['res_lines'],
+                    'abr': fmt['audiobitrate'],
+                    'vbr': fmt['videobitrate'],
+                    'filesize': fmt['filesize'],
+                    'format_note': qualities[format_id]['quality_name'],
+                    'preference': qualities[format_id]['priority'],
+                })
 
         self._sort_formats(formats)
 
@@ -163,4 +171,4 @@ class NocoIE(InfoExtractor):
             'uploader_id': uploader_id,
             'duration': duration,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 25e71a56e196d9cf7f9d2423c47293b01e46cd24..3d35b11ac81286a359a06620d0e08e8fae18043b 100644 (file)
@@ -31,9 +31,9 @@ class NormalbootsIE(InfoExtractor):
 
         webpage = self._download_webpage(url, video_id)
         video_uploader = self._html_search_regex(r'Posted\sby\s<a\shref="[A-Za-z0-9/]*">(?P<uploader>[A-Za-z]*)\s</a>',
-            webpage, 'uploader')
+                                                 webpage, 'uploader')
         raw_upload_date = self._html_search_regex('<span style="text-transform:uppercase; font-size:inherit;">[A-Za-z]+, (?P<date>.*)</span>',
-            webpage, 'date')
+                                                  webpage, 'date')
         video_upload_date = unified_strdate(raw_upload_date)
 
         player_url = self._html_search_regex(r'<iframe\swidth="[0-9]+"\sheight="[0-9]+"\ssrc="(?P<url>[\S]+)"', webpage, 'url')
index f3be8f552c3764995057acf18b74514537960d4e..f5ef856db0155dd84f10d5db4a8cef8e6c08213c 100644 (file)
@@ -4,9 +4,11 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import (
+    compat_urllib_request,
+)
 from ..utils import (
     ExtractorError,
-    compat_urllib_request,
     urlencode_postdata,
     xpath_text,
     xpath_with_ns,
@@ -32,8 +34,7 @@ class NosVideoIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         fields = {
             'id': video_id,
index 2e7ab1e4f9ce23c422fddf478b23b1497aac02ae..04d779890af1960d65b070d0b2f80e429db21d07 100644 (file)
@@ -3,9 +3,11 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import (
+    compat_urlparse,
+)
 from ..utils import (
     ExtractorError,
-    compat_urlparse
 )
 
 
@@ -66,4 +68,4 @@ class NovaMovIE(InfoExtractor):
             'url': video_url,
             'title': title,
             'description': description
-        }
\ No newline at end of file
+        }
index bfba184184c09bfd429698229efc1375c334e617..dec09cdfef0087ee3400394b82750daf434ed29d 100644 (file)
@@ -7,7 +7,7 @@ class NowVideoIE(NovaMovIE):
     IE_NAME = 'nowvideo'
     IE_DESC = 'NowVideo'
 
-    _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'nowvideo\.(?:ch|sx|eu|at|ag|co)'}
+    _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'nowvideo\.(?:ch|sx|eu|at|ag|co|li)'}
 
     _HOST = 'www.nowvideo.ch'
 
@@ -25,4 +25,4 @@ class NowVideoIE(NovaMovIE):
             'title': 'youtubedl test video _BaW_jenozKc.mp4',
             'description': 'Description',
         }
-    }
\ No newline at end of file
+    }
index f36d446d2dd398ac48168f2181299bdfc920797f..ce31694a506a99d1ae21460f0c458ef3100c1850 100644 (file)
@@ -7,6 +7,7 @@ from ..utils import (
     unified_strdate,
     parse_duration,
     qualities,
+    strip_jsonp,
     url_basename,
 )
 
@@ -63,7 +64,7 @@ class NPOIE(InfoExtractor):
             'http://e.omroep.nl/metadata/aflevering/%s' % video_id,
             video_id,
             # We have to remove the javascript callback
-            transform_source=lambda j: re.sub(r'parseMetadata\((.*?)\);\n//.*$', r'\1', j)
+            transform_source=strip_jsonp,
         )
         token_page = self._download_webpage(
             'http://ida.omroep.nl/npoplayer/i.js',
index ed60314eca4f918392da7fe2637e47ed3cdf5ee9..ee740cd9c0fe71a48b79aee00c40ea610e81ea99 100644 (file)
@@ -130,7 +130,7 @@ class NTVIE(InfoExtractor):
                 'rtmp_conn': 'B:1',
                 'player_url': 'http://www.ntv.ru/swf/vps1.swf?update=20131128',
                 'page_url': 'http://www.ntv.ru',
-                'flash_ver': 'LNX 11,2,202,341',
+                'flash_version': 'LNX 11,2,202,341',
                 'rtmp_live': True,
                 'ext': 'flv',
                 'filesize': int(size.text),
@@ -145,4 +145,4 @@ class NTVIE(InfoExtractor):
             'duration': duration,
             'view_count': view_count,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 58ec81f91115b9df146f7570f5ef508f86a35fde..57928f2aedcc0acfa5ba71d6e9f0a62af9d67b71 100644 (file)
@@ -3,15 +3,17 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import (
+    compat_urllib_request,
+)
 from ..utils import (
     parse_duration,
     unified_strdate,
-    compat_urllib_request,
 )
 
 
 class NuvidIE(InfoExtractor):
-    _VALID_URL = r'^https?://(?:www|m)\.nuvid\.com/video/(?P<id>[0-9]+)'
+    _VALID_URL = r'https?://(?:www|m)\.nuvid\.com/video/(?P<id>[0-9]+)'
     _TEST = {
         'url': 'http://m.nuvid.com/video/1310741/',
         'md5': 'eab207b7ac4fccfb4e23c86201f11277',
@@ -26,8 +28,7 @@ class NuvidIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         formats = []
 
@@ -71,4 +72,4 @@ class NuvidIE(InfoExtractor):
             'upload_date': upload_date,
             'age_limit': 18,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 7bf105d38178a5d59339d81262649b40fa57a46e..56e1cad3b0021431721b59df2162feaf7e0c357b 100644 (file)
@@ -74,4 +74,4 @@ class NYTimesIE(InfoExtractor):
             'duration': duration,
             'formats': formats,
             'thumbnails': thumbnails,
-        }
\ No newline at end of file
+        }
index 2044e107eba9808bbde802e8468bf6b009841fb8..d5b05c18febb580a448263b4f7b2876ef3234957 100644 (file)
@@ -16,7 +16,6 @@ class OoyalaIE(InfoExtractor):
         {
             # From http://it.slashdot.org/story/13/04/25/178216/recovering-data-from-broken-hard-drives-and-ssds-video
             'url': 'http://player.ooyala.com/player.js?embedCode=pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
-            'md5': '3f5cceb3a7bf461d6c29dc466cf8033c',
             'info_dict': {
                 'id': 'pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
                 'ext': 'mp4',
@@ -26,7 +25,6 @@ class OoyalaIE(InfoExtractor):
         }, {
             # Only available for ipad
             'url': 'http://player.ooyala.com/player.js?embedCode=x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0',
-            'md5': '4b9754921fddb68106e48c142e2a01e6',
             'info_dict': {
                 'id': 'x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0',
                 'ext': 'mp4',
@@ -43,7 +41,7 @@ class OoyalaIE(InfoExtractor):
     @classmethod
     def _build_url_result(cls, embed_code):
         return cls.url_result(cls._url_for_embed_code(embed_code),
-            ie=cls.ie_key())
+                              ie=cls.ie_key())
 
     def _extract_result(self, info, more_info):
         return {
@@ -97,4 +95,3 @@ class OoyalaIE(InfoExtractor):
             }
         else:
             return self._extract_result(videos_info[0], videos_more_info)
-        
index 011e6be13e63562dad8def87ea264a7e1b6783af..572a234ad8c2514e5704d936fb98a19035662f40 100644 (file)
@@ -178,4 +178,4 @@ class ORFFM4IE(InfoExtractor):
             'title': data['title'],
             'description': data['subtitle'],
             'entries': entries
-        }
\ No newline at end of file
+        }
index 8f140d62660b896f5a6f819d621a762d13fbdb69..6118ed5c2021492ee91e22dccd642d564918604c 100644 (file)
@@ -80,8 +80,14 @@ class PBSIE(InfoExtractor):
                 'thumbnail': 're:^https?://.*\.jpg$',
                 'upload_date': '20140122',
             }
+        },
+        {
+            'url': 'http://www.pbs.org/wgbh/pages/frontline/united-states-of-secrets/',
+            'info_dict': {
+                'id': 'united-states-of-secrets',
+            },
+            'playlist_count': 2,
         }
-
     ]
 
     def _extract_webpage(self, url):
@@ -96,6 +102,12 @@ class PBSIE(InfoExtractor):
                 r'<input type="hidden" id="air_date_[0-9]+" value="([^"]+)"',
                 webpage, 'upload date', default=None))
 
+            # tabbed frontline videos
+            tabbed_videos = re.findall(
+                r'<div[^>]+class="videotab[^"]*"[^>]+vid="(\d+)"', webpage)
+            if tabbed_videos:
+                return tabbed_videos, presumptive_id, upload_date
+
             MEDIA_ID_REGEXES = [
                 r"div\s*:\s*'videoembed'\s*,\s*mediaid\s*:\s*'(\d+)'",  # frontline video embed
                 r'class="coveplayerid">([^<]+)<',                       # coveplayer
@@ -130,6 +142,12 @@ class PBSIE(InfoExtractor):
     def _real_extract(self, url):
         video_id, display_id, upload_date = self._extract_webpage(url)
 
+        if isinstance(video_id, list):
+            entries = [self.url_result(
+                'http://video.pbs.org/video/%s' % vid_id, 'PBS', vid_id)
+                for vid_id in video_id]
+            return self.playlist_result(entries, display_id)
+
         info_url = 'http://video.pbs.org/videoInfo/%s?format=json' % video_id
         info = self._download_json(info_url, display_id)
 
diff --git a/youtube_dl/extractor/phoenix.py b/youtube_dl/extractor/phoenix.py
new file mode 100644 (file)
index 0000000..a20672c
--- /dev/null
@@ -0,0 +1,31 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from .zdf import extract_from_xml_url
+
+
+class PhoenixIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?phoenix\.de/content/(?P<id>[0-9]+)'
+    _TEST = {
+        'url': 'http://www.phoenix.de/content/884301',
+        'md5': 'ed249f045256150c92e72dbb70eadec6',
+        'info_dict': {
+            'id': '884301',
+            'ext': 'mp4',
+            'title': 'Michael Krons mit Hans-Werner Sinn',
+            'description': 'Im Dialog - Sa. 25.10.14, 00.00 - 00.35 Uhr',
+            'upload_date': '20141025',
+            'uploader': 'Im Dialog',
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        internal_id = self._search_regex(
+            r'<div class="phx_vod" id="phx_vod_([0-9]+)"',
+            webpage, 'internal video ID')
+
+        api_url = 'http://www.phoenix.de/php/zdfplayer-v1.3/data/beitragsDetails.php?ak=web&id=%s' % internal_id
+        return extract_from_xml_url(self, video_id, api_url)
index 8aa69c46eb75e9ccfe6fab5b7bff2c9a5778009e..c66db3cdc84e55a6a3a904ddf3ff7c09aaac9573 100644 (file)
@@ -4,16 +4,17 @@ import json
 import re
 
 from .common import InfoExtractor
-from ..utils import compat_urllib_parse
+from ..compat import compat_urllib_parse
 
 
 class PhotobucketIE(InfoExtractor):
     _VALID_URL = r'http://(?:[a-z0-9]+\.)?photobucket\.com/.*(([\?\&]current=)|_)(?P<id>.*)\.(?P<ext>(flv)|(mp4))'
     _TEST = {
         'url': 'http://media.photobucket.com/user/rachaneronas/media/TiredofLinkBuildingTryBacklinkMyDomaincom_zpsc0c3b9fa.mp4.html?filters[term]=search&filters[primary]=videos&filters[secondary]=images&sort=1&o=0',
-        'file': 'zpsc0c3b9fa.mp4',
         'md5': '7dabfb92b0a31f6c16cebc0f8e60ff99',
         'info_dict': {
+            'id': 'zpsc0c3b9fa',
+            'ext': 'mp4',
             'timestamp': 1367669341,
             'upload_date': '20130504',
             'uploader': 'rachaneronas',
@@ -31,7 +32,7 @@ class PhotobucketIE(InfoExtractor):
         # Extract URL, uploader, and title from webpage
         self.report_extraction(video_id)
         info_json = self._search_regex(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (.*?)\);',
-            webpage, 'info json')
+                                       webpage, 'info json')
         info = json.loads(info_json)
         url = compat_urllib_parse.unquote(self._html_search_regex(r'file=(.+\.mp4)', info['linkcodes']['html'], 'url'))
         return {
index 645a1e06dc8f3678b30d4e41b38ea13396c7a3e0..449d4836c3f7c0c8ce220cbcbd447e76cb17ea0f 100644 (file)
@@ -5,10 +5,13 @@ import re
 import os.path
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
 )
+from ..utils import (
+    ExtractorError,
+)
 
 
 class PlayedIE(InfoExtractor):
@@ -27,8 +30,13 @@ class PlayedIE(InfoExtractor):
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-
         orig_webpage = self._download_webpage(url, video_id)
+
+        m_error = re.search(
+            r'(?s)Reason for deletion:.*?<b class="err"[^>]*>(?P<msg>[^<]+)</b>', orig_webpage)
+        if m_error:
+            raise ExtractorError(m_error.group('msg'), expected=True)
+
         fields = re.findall(
             r'type="hidden" name="([^"]+)"\s+value="([^"]+)">', orig_webpage)
         data = dict(fields)
index ebc0468042a22c2bccdfb5b7e45861c0bc45f61c..9576aed0e6668189c1959df3166b1e550facc7b0 100644 (file)
@@ -4,9 +4,11 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
+)
+from ..utils import (
     ExtractorError,
     float_or_none,
     int_or_none,
index b1322f13f8b62a4618b1f857dadf0829b9752238..c3e667e9e72ea0aaf6e5db731f630816e6a2861d 100644 (file)
@@ -3,31 +3,38 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
 )
+from ..utils import (
+    clean_html,
+    ExtractorError,
+)
 
 
 class PlayvidIE(InfoExtractor):
-    _VALID_URL = r'^https?://www\.playvid\.com/watch(\?v=|/)(?P<id>.+?)(?:#|$)'
+    _VALID_URL = r'https?://www\.playvid\.com/watch(\?v=|/)(?P<id>.+?)(?:#|$)'
     _TEST = {
-        'url': 'http://www.playvid.com/watch/agbDDi7WZTV',
-        'md5': '44930f8afa616efdf9482daf4fe53e1e',
+        'url': 'http://www.playvid.com/watch/RnmBNgtrrJu',
+        'md5': 'ffa2f6b2119af359f544388d8c01eb6c',
         'info_dict': {
-            'id': 'agbDDi7WZTV',
+            'id': 'RnmBNgtrrJu',
             'ext': 'mp4',
-            'title': 'Michelle Lewin in Miami Beach',
-            'duration': 240,
+            'title': 'md5:9256d01c6317e3f703848b5906880dc8',
+            'duration': 82,
             'age_limit': 18,
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
+        m_error = re.search(
+            r'<div class="block-error">\s*<div class="heading">\s*<div>(?P<msg>.+?)</div>\s*</div>', webpage)
+        if m_error:
+            raise ExtractorError(clean_html(m_error.group('msg')), expected=True)
+
         video_title = None
         duration = None
         video_thumbnail = None
index ffafd23800f49de2bc7987f3d63c9b5ab2104b58..f20946a2bd0616d8d90cebf360570a3f0bc40e94 100644 (file)
@@ -6,6 +6,7 @@ import re
 from .common import InfoExtractor
 from ..utils import int_or_none
 
+
 class PodomaticIE(InfoExtractor):
     IE_NAME = 'podomatic'
     _VALID_URL = r'^(?P<proto>https?)://(?P<channel>[^.]+)\.podomatic\.com/entry/(?P<id>[^?]+)'
index bac484c67dbb01bbafa319c117b7c6d152b7dd5d..954dfccb75954d50a9a46bc14bdb1d0dcbd5588c 100644 (file)
@@ -8,7 +8,6 @@ from ..utils import (
     int_or_none,
     js_to_json,
     qualities,
-    determine_ext,
 )
 
 
@@ -45,13 +44,18 @@ class PornHdIE(InfoExtractor):
         thumbnail = self._search_regex(
             r"'poster'\s*:\s*'([^']+)'", webpage, 'thumbnail', fatal=False)
 
-        quality = qualities(['SD', 'HD'])
-        formats = [{
-            'url': source['file'],
-            'format_id': '%s-%s' % (source['label'], determine_ext(source['file'])),
-            'quality': quality(source['label']),
-        } for source in json.loads(js_to_json(self._search_regex(
-            r"(?s)'sources'\s*:\s*(\[.+?\])", webpage, 'sources')))]
+        quality = qualities(['sd', 'hd'])
+        sources = json.loads(js_to_json(self._search_regex(
+            r"(?s)'sources'\s*:\s*(\{.+?\})\s*\}\);", webpage, 'sources')))
+        formats = []
+        for container, s in sources.items():
+            for qname, video_url in s.items():
+                formats.append({
+                    'url': video_url,
+                    'container': container,
+                    'format_id': '%s-%s' % (container, qname),
+                    'quality': quality(qname),
+                })
         self._sort_formats(formats)
 
         return {
index 4118ee9560e03d2fa1eea171766ef4893e274aa5..634142d0d27300eb82ea2f460fd2163a20208709 100644 (file)
@@ -4,10 +4,12 @@ import os
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
+    compat_urllib_parse,
     compat_urllib_parse_urlparse,
     compat_urllib_request,
-    compat_urllib_parse,
+)
+from ..utils import (
     str_to_int,
 )
 from ..aes import (
@@ -16,13 +18,14 @@ from ..aes import (
 
 
 class PornHubIE(InfoExtractor):
-    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>pornhub\.com/view_video\.php\?viewkey=(?P<videoid>[0-9a-f]+))'
+    _VALID_URL = r'https?://(?:www\.)?pornhub\.com/view_video\.php\?viewkey=(?P<id>[0-9a-f]+)'
     _TEST = {
         'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
-        'file': '648719015.mp4',
         'md5': '882f488fa1f0026f023f33576004a2ed',
         'info_dict': {
-            "uploader": "BABES-COM",
+            'id': '648719015',
+            'ext': 'mp4',
+            "uploader": "Babes",
             "title": "Seductive Indian beauty strips down and fingers her pink pussy",
             "age_limit": 18
         }
@@ -35,9 +38,7 @@ class PornHubIE(InfoExtractor):
         return count
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('videoid')
-        url = 'http://www.' + mobj.group('url')
+        video_id = self._match_id(url)
 
         req = compat_urllib_request.Request(url)
         req.add_header('Cookie', 'age_verified=1')
@@ -45,7 +46,7 @@ class PornHubIE(InfoExtractor):
 
         video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
         video_uploader = self._html_search_regex(
-            r'(?s)From:&nbsp;.+?<(?:a href="/users/|<span class="username)[^>]+>(.+?)<',
+            r'(?s)From:&nbsp;.+?<(?:a href="/users/|a href="/channels/|<span class="username)[^>]+>(.+?)<',
             webpage, 'uploader', fatal=False)
         thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False)
         if thumbnail:
@@ -57,7 +58,7 @@ class PornHubIE(InfoExtractor):
         comment_count = self._extract_count(
             r'All comments \(<var class="videoCommentCount">([\d,\.]+)</var>', webpage, 'comment')
 
-        video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
+        video_urls = list(map(compat_urllib_parse.unquote, re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
         if webpage.find('"encrypted":true') != -1:
             password = compat_urllib_parse.unquote_plus(self._html_search_regex(r'"video_title":"([^"]+)', webpage, 'password'))
             video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
index 04bd3d9793c0424c6dded7d727e1f8cac629377c..34735c51e19c7dbbb1c07f2fc4a203df4dda70a9 100644 (file)
@@ -1,56 +1,94 @@
 from __future__ import unicode_literals
 
-import re
+import json
 
 from .common import InfoExtractor
+from ..compat import (
+    compat_urllib_request,
+)
 from ..utils import (
-    compat_urllib_parse,
-
-    unified_strdate,
+    int_or_none,
 )
 
 
 class PornotubeIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
+    _VALID_URL = r'https?://(?:\w+\.)?pornotube\.com/(?:[^?#]*?)/video/(?P<id>[0-9]+)'
     _TEST = {
-        'url': 'http://pornotube.com/c/173/m/1689755/Marilyn-Monroe-Bathing',
-        'md5': '374dd6dcedd24234453b295209aa69b6',
+        'url': 'http://www.pornotube.com/orientation/straight/video/4964/title/weird-hot-and-wet-science',
+        'md5': '60fc5a4f0d93a97968fc7999d98260c9',
         'info_dict': {
-            'id': '1689755',
-            'ext': 'flv',
-            'upload_date': '20090708',
-            'title': 'Marilyn-Monroe-Bathing',
-            'age_limit': 18
+            'id': '4964',
+            'ext': 'mp4',
+            'upload_date': '20141203',
+            'title': 'Weird Hot and Wet Science',
+            'description': 'md5:a8304bef7ef06cb4ab476ca6029b01b0',
+            'categories': ['Adult Humor', 'Blondes'],
+            'uploader': 'Alpha Blue Archives',
+            'thumbnail': 're:^https?://.*\\.jpg$',
+            'timestamp': 1417582800,
+            'age_limit': 18,
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
+        video_id = self._match_id(url)
 
-        video_id = mobj.group('videoid')
-        video_title = mobj.group('title')
+        # Fetch origin token
+        js_config = self._download_webpage(
+            'http://www.pornotube.com/assets/src/app/config.js', video_id,
+            note='Download JS config')
+        originAuthenticationSpaceKey = self._search_regex(
+            r"constant\('originAuthenticationSpaceKey',\s*'([^']+)'",
+            js_config, 'originAuthenticationSpaceKey')
+
+        # Fetch actual token
+        token_req_data = {
+            'authenticationSpaceKey': originAuthenticationSpaceKey,
+            'credentials': 'Clip Application',
+        }
+        token_req = compat_urllib_request.Request(
+            'https://api.aebn.net/auth/v1/token/primal',
+            data=json.dumps(token_req_data).encode('utf-8'))
+        token_req.add_header('Content-Type', 'application/json')
+        token_req.add_header('Origin', 'http://www.pornotube.com')
+        token_answer = self._download_json(
+            token_req, video_id, note='Requesting primal token')
+        token = token_answer['tokenKey']
 
-        # Get webpage content
-        webpage = self._download_webpage(url, video_id)
+        # Get video URL
+        delivery_req = compat_urllib_request.Request(
+            'https://api.aebn.net/delivery/v1/clips/%s/MP4' % video_id)
+        delivery_req.add_header('Authorization', token)
+        delivery_info = self._download_json(
+            delivery_req, video_id, note='Downloading delivery information')
+        video_url = delivery_info['mediaUrl']
 
-        # Get the video URL
-        VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
-        video_url = self._search_regex(VIDEO_URL_RE, webpage, 'video url')
-        video_url = compat_urllib_parse.unquote(video_url)
+        # Get additional info (title etc.)
+        info_req = compat_urllib_request.Request(
+            'https://api.aebn.net/content/v1/clips/%s?expand='
+            'title,description,primaryImageNumber,startSecond,endSecond,'
+            'movie.title,movie.MovieId,movie.boxCoverFront,movie.stars,'
+            'movie.studios,stars.name,studios.name,categories.name,'
+            'clipActive,movieActive,publishDate,orientations' % video_id)
+        info_req.add_header('Authorization', token)
+        info = self._download_json(
+            info_req, video_id, note='Downloading metadata')
 
-        #Get the uploaded date
-        VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
-        upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, 'upload date', fatal=False)
-        if upload_date:
-            upload_date = unified_strdate(upload_date)
-        age_limit = self._rta_search(webpage)
+        timestamp = int_or_none(info.get('publishDate'), scale=1000)
+        uploader = info.get('studios', [{}])[0].get('name')
+        movie_id = info['movie']['movieId']
+        thumbnail = 'http://pic.aebn.net/dis/t/%s/%s_%08d.jpg' % (
+            movie_id, movie_id, info['primaryImageNumber'])
+        categories = [c['name'] for c in info.get('categories')]
 
         return {
             'id': video_id,
             'url': video_url,
-            'upload_date': upload_date,
-            'title': video_title,
-            'ext': 'flv',
-            'format': 'flv',
-            'age_limit': age_limit,
+            'title': info['title'],
+            'description': info.get('description'),
+            'timestamp': timestamp,
+            'uploader': uploader,
+            'thumbnail': thumbnail,
+            'categories': categories,
+            'age_limit': 18,
         }
index 463e855014fada90cce969f8dccc0b9f1e80cde7..f536e6e6cdfb3d71e21c98614e2baf117387493b 100644 (file)
@@ -4,17 +4,18 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
-    determine_ext,
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
 )
+from ..utils import (
+    determine_ext,
+    ExtractorError,
+)
 
 
 class PromptFileIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?promptfile\.com/l/(?P<id>[0-9A-Z\-]+)'
-    _FILE_NOT_FOUND_REGEX = r'<div.+id="not_found_msg".+>.+</div>[^-]'
     _TEST = {
         'url': 'http://www.promptfile.com/l/D21B4746E9-F01462F0FF',
         'md5': 'd1451b6302da7215485837aaea882c4c',
@@ -27,11 +28,10 @@ class PromptFileIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
-        if re.search(self._FILE_NOT_FOUND_REGEX, webpage) is not None:
+        if re.search(r'<div.+id="not_found_msg".+>(?!We are).+</div>[^-]', webpage) is not None:
             raise ExtractorError('Video %s does not exist' % video_id,
                                  expected=True)
 
index 619496de7a57f9ab297b708bbffb3005c85e0dd8..385681d06e3dda356193d9f89c7ccbdd4cbde453 100644 (file)
@@ -5,8 +5,10 @@ import re
 
 from hashlib import sha1
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
+)
+from ..utils import (
     unified_strdate,
 )
 
@@ -85,7 +87,7 @@ class ProSiebenSat1IE(InfoExtractor):
                 'ext': 'mp4',
                 'title': 'Im Interview: Kai Wiesinger',
                 'description': 'md5:e4e5370652ec63b95023e914190b4eb9',
-                'upload_date': '20140225',
+                'upload_date': '20140203',
                 'duration': 522.56,
             },
             'params': {
@@ -100,7 +102,7 @@ class ProSiebenSat1IE(InfoExtractor):
                 'ext': 'mp4',
                 'title': 'Jagd auf Fertigkost im Elsthal - Teil 2',
                 'description': 'md5:2669cde3febe9bce13904f701e774eb6',
-                'upload_date': '20140225',
+                'upload_date': '20141014',
                 'duration': 2410.44,
             },
             'params': {
@@ -152,12 +154,22 @@ class ProSiebenSat1IE(InfoExtractor):
                 'skip_download': True,
             },
         },
+        {
+            'url': 'http://www.prosieben.de/tv/joko-gegen-klaas/videos/playlists/episode-8-ganze-folge-playlist',
+            'info_dict': {
+                'id': '439664',
+                'title': 'Episode 8 - Ganze Folge - Playlist',
+                'description': 'md5:63b8963e71f481782aeea877658dec84',
+            },
+            'playlist_count': 2,
+        },
     ]
 
     _CLIPID_REGEXES = [
         r'"clip_id"\s*:\s+"(\d+)"',
         r'clipid: "(\d+)"',
         r'clip[iI]d=(\d+)',
+        r"'itemImageUrl'\s*:\s*'/dynamic/thumbnails/full/\d+/(\d+)",
     ]
     _TITLE_REGEXES = [
         r'<h2 class="subtitle" itemprop="name">\s*(.+?)</h2>',
@@ -178,11 +190,19 @@ class ProSiebenSat1IE(InfoExtractor):
         r'<span style="padding-left: 4px;line-height:20px; color:#404040">(\d{2}\.\d{2}\.\d{4})</span>',
         r'(\d{2}\.\d{2}\.\d{4}) \| \d{2}:\d{2} Min<br/>',
     ]
+    _PAGE_TYPE_REGEXES = [
+        r'<meta name="page_type" content="([^"]+)">',
+        r"'itemType'\s*:\s*'([^']*)'",
+    ]
+    _PLAYLIST_ID_REGEXES = [
+        r'content[iI]d=(\d+)',
+        r"'itemId'\s*:\s*'([^']*)'",
+    ]
+    _PLAYLIST_CLIP_REGEXES = [
+        r'(?s)data-qvt=.+?<a href="([^"]+)"',
+    ]
 
-    def _real_extract(self, url):
-        video_id = self._match_id(url)
-        webpage = self._download_webpage(url, video_id)
-
+    def _extract_clip(self, url, webpage):
         clip_id = self._html_search_regex(self._CLIPID_REGEXES, webpage, 'clip id')
 
         access_token = 'testclient'
@@ -280,4 +300,32 @@ class ProSiebenSat1IE(InfoExtractor):
             'upload_date': upload_date,
             'duration': duration,
             'formats': formats,
-        }
\ No newline at end of file
+        }
+
+    def _extract_playlist(self, url, webpage):
+        playlist_id = self._html_search_regex(
+            self._PLAYLIST_ID_REGEXES, webpage, 'playlist id')
+        for regex in self._PLAYLIST_CLIP_REGEXES:
+            playlist_clips = re.findall(regex, webpage)
+            if playlist_clips:
+                title = self._html_search_regex(
+                    self._TITLE_REGEXES, webpage, 'title')
+                description = self._html_search_regex(
+                    self._DESCRIPTION_REGEXES, webpage, 'description', fatal=False)
+                entries = [
+                    self.url_result(
+                        re.match('(.+?//.+?)/', url).group(1) + clip_path,
+                        'ProSiebenSat1')
+                    for clip_path in playlist_clips]
+                return self.playlist_result(entries, playlist_id, title, description)
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+        page_type = self._search_regex(
+            self._PAGE_TYPE_REGEXES, webpage,
+            'page type', default='clip').lower()
+        if page_type == 'clip':
+            return self._extract_clip(url, webpage)
+        elif page_type == 'playlist':
+            return self._extract_playlist(url, webpage)
diff --git a/youtube_dl/extractor/quickvid.py b/youtube_dl/extractor/quickvid.py
new file mode 100644 (file)
index 0000000..af7d76c
--- /dev/null
@@ -0,0 +1,53 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..compat import (
+    compat_urlparse,
+)
+from ..utils import (
+    determine_ext,
+    int_or_none,
+)
+
+
+class QuickVidIE(InfoExtractor):
+    _VALID_URL = r'https?://(www\.)?quickvid\.org/watch\.php\?v=(?P<id>[a-zA-Z_0-9-]+)'
+    _TEST = {
+        'url': 'http://quickvid.org/watch.php?v=sUQT3RCG8dx',
+        'md5': 'c0c72dd473f260c06c808a05d19acdc5',
+        'info_dict': {
+            'id': 'sUQT3RCG8dx',
+            'ext': 'mp4',
+            'title': 'Nick Offerman\'s Summer Reading Recap',
+            'thumbnail': 're:^https?://.*\.(?:png|jpg|gif)$',
+            'view_count': int,
+        },
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._html_search_regex(r'<h2>(.*?)</h2>', webpage, 'title')
+        view_count = int_or_none(self._html_search_regex(
+            r'(?s)<div id="views">(.*?)</div>',
+            webpage, 'view count', fatal=False))
+        video_code = self._search_regex(
+            r'(?s)<video id="video"[^>]*>(.*?)</video>', webpage, 'video code')
+        formats = [
+            {
+                'url': compat_urlparse.urljoin(url, src),
+                'format_id': determine_ext(src, None),
+            } for src in re.findall('<source\s+src="([^"]+)"', video_code)
+        ]
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'thumbnail': self._og_search_thumbnail(webpage),
+            'view_count': view_count,
+        }
diff --git a/youtube_dl/extractor/radiode.py b/youtube_dl/extractor/radiode.py
new file mode 100644 (file)
index 0000000..f95bc94
--- /dev/null
@@ -0,0 +1,55 @@
+from __future__ import unicode_literals
+
+import json
+
+from .common import InfoExtractor
+
+
+class RadioDeIE(InfoExtractor):
+    IE_NAME = 'radio.de'
+    _VALID_URL = r'https?://(?P<id>.+?)\.(?:radio\.(?:de|at|fr|pt|es|pl|it)|rad\.io)'
+    _TEST = {
+        'url': 'http://ndr2.radio.de/',
+        'md5': '3b4cdd011bc59174596b6145cda474a4',
+        'info_dict': {
+            'id': 'ndr2',
+            'ext': 'mp3',
+            'title': 're:^NDR 2 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
+            'description': 'md5:591c49c702db1a33751625ebfb67f273',
+            'thumbnail': 're:^https?://.*\.png',
+        },
+        'params': {
+            'skip_download': True,
+        }
+    }
+
+    def _real_extract(self, url):
+        radio_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, radio_id)
+
+        broadcast = json.loads(self._search_regex(
+            r'_getBroadcast\s*=\s*function\(\s*\)\s*{\s*return\s+({.+?})\s*;\s*}',
+            webpage, 'broadcast'))
+
+        title = self._live_title(broadcast['name'])
+        description = broadcast.get('description') or broadcast.get('shortDescription')
+        thumbnail = broadcast.get('picture4Url') or broadcast.get('picture4TransUrl')
+
+        formats = [{
+            'url': stream['streamUrl'],
+            'ext': stream['streamContentFormat'].lower(),
+            'acodec': stream['streamContentFormat'],
+            'abr': stream['bitRate'],
+            'asr': stream['sampleRate']
+        } for stream in broadcast['streamUrls']]
+        self._sort_formats(formats)
+
+        return {
+            'id': radio_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'is_live': True,
+            'formats': formats,
+        }
index ba3dd707f8b5d38363dacce62e9fe684265c6a96..aa26b7e0bb0f4f0a489ad4cfdef330c704747680 100644 (file)
@@ -3,10 +3,12 @@ from __future__ import unicode_literals
 import re
 
 from .subtitles import SubtitlesInfoExtractor
+from ..compat import (
+    compat_urllib_parse,
+)
 from ..utils import (
     parse_duration,
     unified_strdate,
-    compat_urllib_parse,
 )
 
 
@@ -119,4 +121,4 @@ class RaiIE(SubtitlesInfoExtractor):
             if captions.endswith(STL_EXT):
                 captions = captions[:-len(STL_EXT)] + SRT_EXT
             subtitles['it'] = 'http://www.rai.tv%s' % compat_urllib_parse.quote(captions)
-        return subtitles
\ No newline at end of file
+        return subtitles
index 2c53ed2e1147a50248a4294c838ceef67688a356..0f8f3ebde0999e8599eaa86516dd2b52524c9b40 100644 (file)
@@ -33,7 +33,7 @@ class RBMARadioIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
 
         json_data = self._search_regex(r'window\.gon.*?gon\.show=(.+?);$',
-            webpage, 'json data', flags=re.MULTILINE)
+                                       webpage, 'json data', flags=re.MULTILINE)
 
         try:
             data = json.loads(json_data)
index d1e12dd8d5a6699ba3caa8d041c8bc039e996fc7..846b76c81528431c0faf8ea3fc9bbd6b017db099 100644 (file)
@@ -1,7 +1,5 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 
 
@@ -9,32 +7,23 @@ class RedTubeIE(InfoExtractor):
     _VALID_URL = r'http://(?:www\.)?redtube\.com/(?P<id>[0-9]+)'
     _TEST = {
         'url': 'http://www.redtube.com/66418',
-        'file': '66418.mp4',
-        # md5 varies from time to time, as in
-        # https://travis-ci.org/rg3/youtube-dl/jobs/14052463#L295
-        #'md5': u'7b8c22b5e7098a3e1c09709df1126d2d',
         'info_dict': {
+            'id': '66418',
+            'ext': 'mp4',
             "title": "Sucked on a toilet",
             "age_limit": 18,
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-
-        video_id = mobj.group('id')
-        video_extension = 'mp4'
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
-        self.report_extraction(video_id)
-
         video_url = self._html_search_regex(
-            r'<source src="(.+?)" type="video/mp4">', webpage, u'video URL')
-
+            r'<source src="(.+?)" type="video/mp4">', webpage, 'video URL')
         video_title = self._html_search_regex(
             r'<h1 class="videoTitle[^"]*">(.+?)</h1>',
-            webpage, u'title')
-
+            webpage, 'title')
         video_thumbnail = self._og_search_thumbnail(webpage)
 
         # No self-labeling, but they describe themselves as
@@ -44,7 +33,7 @@ class RedTubeIE(InfoExtractor):
         return {
             'id': video_id,
             'url': video_url,
-            'ext': video_extension,
+            'ext': 'mp4',
             'title': video_title,
             'thumbnail': video_thumbnail,
             'age_limit': age_limit,
index 9fbdb9fcbbbc8fc5ef1847a2df8e8f7f929d3b75..59dc137cc225889feb9428dd70f42a91451a951d 100644 (file)
@@ -41,4 +41,3 @@ class RingTVIE(InfoExtractor):
             'thumbnail': thumbnail_url,
             'description': description,
         }
-
index a6ad594659250254b6cee9efd31cdcb8364da89b..962b524e94d2bddd12781b8dace06a1b28bc2c71 100644 (file)
@@ -1,43 +1,43 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
-from ..utils import (
-    clean_html,
-    compat_parse_qs,
-)
+from ..compat import compat_urllib_parse_unquote
 
 
 class Ro220IE(InfoExtractor):
     IE_NAME = '220.ro'
-    _VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<video_id>[^/]+)'
+    _VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<id>[^/]+)'
     _TEST = {
-        "url": "http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/",
-        'file': 'LYV6doKo7f.mp4',
+        'url': 'http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/',
         'md5': '03af18b73a07b4088753930db7a34add',
         'info_dict': {
-            "title": "Luati-le Banii sez 4 ep 1",
-            "description": "re:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$",
+            'id': 'LYV6doKo7f',
+            'ext': 'mp4',
+            'title': 'Luati-le Banii sez 4 ep 1',
+            'description': 're:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$',
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('video_id')
+        video_id = self._match_id(url)
 
         webpage = self._download_webpage(url, video_id)
-        flashVars_str = self._search_regex(
-            r'<param name="flashVars" value="([^"]+)"',
-            webpage, 'flashVars')
-        flashVars = compat_parse_qs(flashVars_str)
+        url = compat_urllib_parse_unquote(self._search_regex(
+            r'(?s)clip\s*:\s*{.*?url\s*:\s*\'([^\']+)\'', webpage, 'url'))
+        title = self._og_search_title(webpage)
+        description = self._og_search_description(webpage)
+        thumbnail = self._og_search_thumbnail(webpage)
+
+        formats = [{
+            'format_id': 'sd',
+            'url': url,
+            'ext': 'mp4',
+        }]
 
         return {
-            '_type': 'video',
             'id': video_id,
-            'ext': 'mp4',
-            'url': flashVars['videoURL'][0],
-            'title': flashVars['title'][0],
-            'description': clean_html(flashVars['desc'][0]),
-            'thumbnail': flashVars['preview'][0],
+            'formats': formats,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
         }
index 0ab1eb69c8c82ae0bc04ec135971cd919b17a155..d029b0ec525fc2e1186aba7e1c3dadf9bed981b0 100644 (file)
@@ -38,12 +38,13 @@ class RtlXlIE(InfoExtractor):
         progname = info['abstracts'][0]['name']
         subtitle = material['title'] or info['episodes'][0]['name']
 
-        videopath = material['videopath']
-        f4m_url = 'http://manifest.us.rtl.nl' + videopath
+        # Use unencrypted m3u8 streams (See https://github.com/rg3/youtube-dl/issues/4118)
+        videopath = material['videopath'].replace('.f4m', '.m3u8')
+        m3u8_url = 'http://manifest.us.rtl.nl' + videopath
 
-        formats = self._extract_f4m_formats(f4m_url, uuid)
+        formats = self._extract_m3u8_formats(m3u8_url, uuid, ext='mp4')
 
-        video_urlpart = videopath.split('/flash/')[1][:-4]
+        video_urlpart = videopath.split('/flash/')[1][:-5]
         PG_URL_TEMPLATE = 'http://pg.us.rtl.nl/rtlxl/network/%s/progressive/%s.mp4'
 
         formats.extend([
@@ -54,9 +55,12 @@ class RtlXlIE(InfoExtractor):
             {
                 'url': PG_URL_TEMPLATE % ('a3m', video_urlpart),
                 'format_id': 'pg-hd',
+                'quality': 0,
             }
         ])
 
+        self._sort_formats(formats)
+
         return {
             'id': uuid,
             'title': '%s - %s' % (progname, subtitle),
index a45884b251fa355e04c65de554f0e9cbfb5406bb..285c3c4bebf8ec7c2cd793d2b40739222c18a5ca 100644 (file)
@@ -81,7 +81,7 @@ class RTLnowIE(InfoExtractor):
                 'id': '99205',
                 'ext': 'flv',
                 'title': 'Medicopter 117 - Angst!',
-                'description': 'md5:895b1df01639b5f61a04fc305a5cb94d',
+                'description': 're:^Im Therapiezentrum \'Sonnalm\' kommen durch eine Unachtsamkeit die für die B.handlung mit Phobikern gehaltenen Voglespinnen frei\. Eine Ausreißerin',
                 'thumbnail': 'http://autoimg.static-fra.de/superrtlnow/287529/1500x1500/image2.jpg',
                 'upload_date': '20080928',
                 'duration': 2691,
@@ -122,7 +122,7 @@ class RTLnowIE(InfoExtractor):
         playerdata = self._download_xml(playerdata_url, video_id, 'Downloading player data XML')
 
         videoinfo = playerdata.find('./playlist/videoinfo')
-        
+
         formats = []
         for filename in videoinfo.findall('filename'):
             mobj = re.search(r'(?P<url>rtmpe://(?:[^/]+/){2})(?P<play_path>.+)', filename.text)
@@ -153,4 +153,4 @@ class RTLnowIE(InfoExtractor):
             'upload_date': upload_date,
             'duration': duration,
             'formats': formats,
-        }
\ No newline at end of file
+        }
diff --git a/youtube_dl/extractor/rtp.py b/youtube_dl/extractor/rtp.py
new file mode 100644 (file)
index 0000000..0bed1f1
--- /dev/null
@@ -0,0 +1,57 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+
+from .common import InfoExtractor
+from ..utils import js_to_json
+
+
+class RTPIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?rtp\.pt/play/p(?P<program_id>[0-9]+)/e(?P<id>[0-9]+)/?'
+    _TEST = {
+        'url': 'http://www.rtp.pt/play/p405/e174042/paixoes-cruzadas',
+        'info_dict': {
+            'id': '174042',
+            'ext': 'mp3',
+            'title': 'Paixões Cruzadas',
+            'description': 'As paixões musicais de António Cartaxo e António Macedo',
+            'thumbnail': 're:^https?://.*\.jpg',
+        },
+        'params': {
+            'skip_download': True,  # RTMP download
+        },
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, video_id)
+        title = self._html_search_meta(
+            'twitter:title', webpage, display_name='title', fatal=True)
+        description = self._html_search_meta('description', webpage)
+        thumbnail = self._og_search_thumbnail(webpage)
+
+        player_config = self._search_regex(
+            r'(?s)RTPPLAY\.player\.newPlayer\(\s*(\{.*?\})\s*\)', webpage, 'player config')
+        config = json.loads(js_to_json(player_config))
+
+        path, ext = config.get('file').rsplit('.', 1)
+        formats = [{
+            'app': config.get('application'),
+            'play_path': '{ext:s}:{path:s}'.format(ext=ext, path=path),
+            'page_url': url,
+            'url': 'rtmp://{streamer:s}/{application:s}'.format(**config),
+            'rtmp_live': config.get('live', False),
+            'ext': ext,
+            'vcodec': config.get('type') == 'audio' and 'none' or None,
+            'player_url': 'http://programas.rtp.pt/play/player.swf?v3',
+        }]
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'description': description,
+            'thumbnail': thumbnail,
+        }
index e8199b11446e503897500e0e423b3cbd5d52b441..5e84c109802e34ce8f57496ee3b7e2cd409c0788 100644 (file)
@@ -4,18 +4,20 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import (
+    compat_str,
+)
 from ..utils import (
     int_or_none,
     parse_duration,
     parse_iso8601,
     unescapeHTML,
-    compat_str,
 )
 
 
 class RTSIE(InfoExtractor):
     IE_DESC = 'RTS.ch'
-    _VALID_URL = r'^https?://(?:www\.)?rts\.ch/(?:[^/]+/){2,}(?P<id>[0-9]+)-.*?\.html'
+    _VALID_URL = r'https?://(?:www\.)?rts\.ch/(?:(?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html|play/tv/[^/]+/video/(?P<display_id_new>.+?)\?id=(?P<id_new>[0-9]+))'
 
     _TESTS = [
         {
@@ -23,6 +25,7 @@ class RTSIE(InfoExtractor):
             'md5': '753b877968ad8afaeddccc374d4256a5',
             'info_dict': {
                 'id': '3449373',
+                'display_id': 'les-enfants-terribles',
                 'ext': 'mp4',
                 'duration': 1488,
                 'title': 'Les Enfants Terribles',
@@ -30,7 +33,8 @@ class RTSIE(InfoExtractor):
                 'uploader': 'Divers',
                 'upload_date': '19680921',
                 'timestamp': -40280400,
-                'thumbnail': 're:^https?://.*\.image'
+                'thumbnail': 're:^https?://.*\.image',
+                'view_count': int,
             },
         },
         {
@@ -38,6 +42,7 @@ class RTSIE(InfoExtractor):
             'md5': 'c148457a27bdc9e5b1ffe081a7a8337b',
             'info_dict': {
                 'id': '5624067',
+                'display_id': 'entre-ciel-et-mer',
                 'ext': 'mp4',
                 'duration': 3720,
                 'title': 'Les yeux dans les cieux - Mon homard au Canada',
@@ -45,7 +50,8 @@ class RTSIE(InfoExtractor):
                 'uploader': 'Passe-moi les jumelles',
                 'upload_date': '20140404',
                 'timestamp': 1396635300,
-                'thumbnail': 're:^https?://.*\.image'
+                'thumbnail': 're:^https?://.*\.image',
+                'view_count': int,
             },
         },
         {
@@ -53,6 +59,7 @@ class RTSIE(InfoExtractor):
             'md5': 'b4326fecd3eb64a458ba73c73e91299d',
             'info_dict': {
                 'id': '5745975',
+                'display_id': '1-2-kloten-fribourg-5-2-second-but-pour-gotteron-par-kwiatowski',
                 'ext': 'mp4',
                 'duration': 48,
                 'title': '1/2, Kloten - Fribourg (5-2): second but pour Gottéron par Kwiatowski',
@@ -60,7 +67,8 @@ class RTSIE(InfoExtractor):
                 'uploader': 'Hockey',
                 'upload_date': '20140403',
                 'timestamp': 1396556882,
-                'thumbnail': 're:^https?://.*\.image'
+                'thumbnail': 're:^https?://.*\.image',
+                'view_count': int,
             },
             'skip': 'Blocked outside Switzerland',
         },
@@ -69,6 +77,7 @@ class RTSIE(InfoExtractor):
             'md5': '9bb06503773c07ce83d3cbd793cebb91',
             'info_dict': {
                 'id': '5745356',
+                'display_id': 'londres-cachee-par-un-epais-smog',
                 'ext': 'mp4',
                 'duration': 33,
                 'title': 'Londres cachée par un épais smog',
@@ -76,7 +85,8 @@ class RTSIE(InfoExtractor):
                 'uploader': 'Le Journal en continu',
                 'upload_date': '20140403',
                 'timestamp': 1396537322,
-                'thumbnail': 're:^https?://.*\.image'
+                'thumbnail': 're:^https?://.*\.image',
+                'view_count': int,
             },
         },
         {
@@ -84,6 +94,7 @@ class RTSIE(InfoExtractor):
             'md5': 'dd8ef6a22dff163d063e2a52bc8adcae',
             'info_dict': {
                 'id': '5706148',
+                'display_id': 'urban-hippie-de-damien-krisl-03-04-2014',
                 'ext': 'mp3',
                 'duration': 123,
                 'title': '"Urban Hippie", de Damien Krisl',
@@ -92,22 +103,44 @@ class RTSIE(InfoExtractor):
                 'timestamp': 1396551600,
             },
         },
+        {
+            'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260',
+            'md5': '968777c8779e5aa2434be96c54e19743',
+            'info_dict': {
+                'id': '6348260',
+                'display_id': 'le-19h30',
+                'ext': 'mp4',
+                'duration': 1796,
+                'title': 'Le 19h30',
+                'description': '',
+                'uploader': 'Le 19h30',
+                'upload_date': '20141201',
+                'timestamp': 1417458600,
+                'thumbnail': 're:^https?://.*\.image',
+                'view_count': int,
+            },
+        },
+        {
+            'url': 'http://www.rts.ch/play/tv/le-19h30/video/le-chantier-du-nouveau-parlement-vaudois-a-permis-une-trouvaille-historique?id=6348280',
+            'only_matching': True,
+        }
     ]
 
     def _real_extract(self, url):
         m = re.match(self._VALID_URL, url)
-        video_id = m.group('id')
+        video_id = m.group('id') or m.group('id_new')
+        display_id = m.group('display_id') or m.group('display_id_new')
 
         def download_json(internal_id):
             return self._download_json(
                 'http://www.rts.ch/a/%s.html?f=json/article' % internal_id,
-                video_id)
+                display_id)
 
         all_info = download_json(video_id)
 
         # video_id extracted out of URL is not always a real id
         if 'video' not in all_info and 'audio' not in all_info:
-            page = self._download_webpage(url, video_id)
+            page = self._download_webpage(url, display_id)
             internal_id = self._html_search_regex(
                 r'<(?:video|audio) data-id="([0-9]+)"', page,
                 'internal video id')
@@ -143,6 +176,7 @@ class RTSIE(InfoExtractor):
 
         return {
             'id': video_id,
+            'display_id': display_id,
             'formats': formats,
             'title': info['title'],
             'description': info.get('intro'),
index 4dd35a47b35b5341139aa2d7f27886b52ddad5ce..0ce22d60c7fa995980e3f70159583f156672d76b 100644 (file)
@@ -54,7 +54,6 @@ def _decrypt_url(png):
     return url
 
 
-
 class RTVEALaCartaIE(InfoExtractor):
     IE_NAME = 'rtve.es:alacarta'
     IE_DESC = 'RTVE a la carta'
index 55b58e5e6c09af2d211edaba256ad2ebce2a958d..0e470e73f538fd60d7ed34cbe515042f6abc078b 100644 (file)
@@ -1,8 +1,6 @@
 # -*- coding: utf-8 -*-
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 
 
@@ -21,19 +19,20 @@ class RUHDIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         video_url = self._html_search_regex(
             r'<param name="src" value="([^"]+)"', webpage, 'video url')
         title = self._html_search_regex(
-            r'<title>([^<]+)&nbsp;&nbsp; RUHD.ru - Видео Высокого качества №1 в России!</title>', webpage, 'title')
+            r'<title>([^<]+)&nbsp;&nbsp; RUHD.ru - Видео Высокого качества №1 в России!</title>',
+            webpage, 'title')
         description = self._html_search_regex(
-            r'(?s)<div id="longdesc">(.+?)<span id="showlink">', webpage, 'description', fatal=False)
+            r'(?s)<div id="longdesc">(.+?)<span id="showlink">',
+            webpage, 'description', fatal=False)
         thumbnail = self._html_search_regex(
-            r'<param name="previewImage" value="([^"]+)"', webpage, 'thumbnail', fatal=False)
+            r'<param name="previewImage" value="([^"]+)"',
+            webpage, 'thumbnail', fatal=False)
         if thumbnail:
             thumbnail = 'http://www.ruhd.ru' + thumbnail
 
index 0c8790da28c4b06cfbc941bdff7ad4e64b47ac74..b72b5a5869ae3ae6044ac210d55eca9df8a5ccf8 100644 (file)
@@ -5,10 +5,12 @@ import re
 import itertools
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_str,
-    unified_strdate,
+)
+from ..utils import (
     ExtractorError,
+    unified_strdate,
 )
 
 
@@ -36,9 +38,7 @@ class RutubeIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         video = self._download_json(
             'http://rutube.ru/api/video/%s/?format=json' % video_id,
             video_id, 'Downloading video JSON')
@@ -53,6 +53,7 @@ class RutubeIE(InfoExtractor):
         m3u8_url = options['video_balancer'].get('m3u8')
         if m3u8_url is None:
             raise ExtractorError('Couldn\'t find m3u8 manifest url')
+        formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4')
 
         return {
             'id': video['id'],
@@ -60,8 +61,7 @@ class RutubeIE(InfoExtractor):
             'description': video['description'],
             'duration': video['duration'],
             'view_count': video['hits'],
-            'url': m3u8_url,
-            'ext': 'mp4',
+            'formats': formats,
             'thumbnail': video['thumbnail_url'],
             'uploader': author.get('name'),
             'uploader_id': compat_str(author['id']) if author else None,
@@ -114,8 +114,7 @@ class RutubeMovieIE(RutubeChannelIE):
     _PAGE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/video?page=%s&format=json'
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        movie_id = mobj.group('id')
+        movie_id = self._match_id(url)
         movie = self._download_json(
             self._MOVIE_TEMPLATE % movie_id, movie_id,
             'Downloading movie JSON')
index f737b4e5fad8cd86e28b094abcb4226927083d2a..a73e6f331fc02a8977863a412227681b3838b91a 100644 (file)
@@ -191,4 +191,4 @@ class RUTVIE(InfoExtractor):
             'view_count': view_count,
             'duration': duration,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 409f8540a0b2e2ef9db1ab3c5746d7779a2e5db3..b8775c2f99f4a105ae35f1b04a919e64c987df0f 100644 (file)
@@ -27,8 +27,7 @@ class SBSIE(InfoExtractor):
             'thumbnail': 're:http://.*\.jpg',
         },
         'add_ies': ['generic'],
-    },
-    {
+    }, {
         'url': 'http://www.sbs.com.au/ondemand/video/320403011771/Dingo-Conservation-The-Feed',
         'only_matching': True,
     }]
index 55a481cc0ed41fa0bef33f3af1bce941fa7659a1..3bf93c870b2bc30c3baf9567a64d06171558f06b 100644 (file)
@@ -53,4 +53,4 @@ class SciVeeIE(InfoExtractor):
             'description': description,
             'thumbnail': 'http://www.scivee.tv/assets/videothumb/%s' % video_id,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 306869e6af109cf6971a72df14f47fe99a7b6885..dfd897ba3a3f0a7297164fb315e4543bb597d678 100644 (file)
@@ -1,14 +1,14 @@
 # -*- coding: utf-8 -*-
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
+from ..compat import (
     compat_parse_qs,
     compat_urllib_request,
 )
+from ..utils import (
+    ExtractorError,
+)
 
 
 class ScreencastIE(InfoExtractor):
@@ -57,8 +57,7 @@ class ScreencastIE(InfoExtractor):
     ]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         video_url = self._html_search_regex(
@@ -96,7 +95,7 @@ class ScreencastIE(InfoExtractor):
         if title is None:
             title = self._html_search_regex(
                 [r'<b>Title:</b> ([^<]*)</div>',
-                r'class="tabSeperator">></span><span class="tabText">(.*?)<'],
+                 r'class="tabSeperator">></span><span class="tabText">(.*?)<'],
                 webpage, 'title')
         thumbnail = self._og_search_thumbnail(webpage)
         description = self._og_search_description(webpage, default=None)
diff --git a/youtube_dl/extractor/screenwavemedia.py b/youtube_dl/extractor/screenwavemedia.py
new file mode 100644 (file)
index 0000000..6c9fdb7
--- /dev/null
@@ -0,0 +1,178 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    int_or_none,
+    unified_strdate,
+)
+
+
+class ScreenwaveMediaIE(InfoExtractor):
+    _VALID_URL = r'http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?[^"]*\bid=(?P<id>.+)'
+
+    _TESTS = [{
+        'url': 'http://player.screenwavemedia.com/play/play.php?playerdiv=videoarea&companiondiv=squareAd&id=Cinemassacre-19911',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        playerdata = self._download_webpage(url, video_id, 'Downloading player webpage')
+
+        vidtitle = self._search_regex(
+            r'\'vidtitle\'\s*:\s*"([^"]+)"', playerdata, 'vidtitle').replace('\\/', '/')
+        vidurl = self._search_regex(
+            r'\'vidurl\'\s*:\s*"([^"]+)"', playerdata, 'vidurl').replace('\\/', '/')
+
+        videolist_url = None
+
+        mobj = re.search(r"'videoserver'\s*:\s*'(?P<videoserver>[^']+)'", playerdata)
+        if mobj:
+            videoserver = mobj.group('videoserver')
+            mobj = re.search(r'\'vidid\'\s*:\s*"(?P<vidid>[^\']+)"', playerdata)
+            vidid = mobj.group('vidid') if mobj else video_id
+            videolist_url = 'http://%s/vod/smil:%s.smil/jwplayer.smil' % (videoserver, vidid)
+        else:
+            mobj = re.search(r"file\s*:\s*'(?P<smil>http.+?/jwplayer\.smil)'", playerdata)
+            if mobj:
+                videolist_url = mobj.group('smil')
+
+        if videolist_url:
+            videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
+            formats = []
+            baseurl = vidurl[:vidurl.rfind('/') + 1]
+            for video in videolist.findall('.//video'):
+                src = video.get('src')
+                if not src:
+                    continue
+                file_ = src.partition(':')[-1]
+                width = int_or_none(video.get('width'))
+                height = int_or_none(video.get('height'))
+                bitrate = int_or_none(video.get('system-bitrate'), scale=1000)
+                format = {
+                    'url': baseurl + file_,
+                    'format_id': src.rpartition('.')[0].rpartition('_')[-1],
+                }
+                if width or height:
+                    format.update({
+                        'tbr': bitrate,
+                        'width': width,
+                        'height': height,
+                    })
+                else:
+                    format.update({
+                        'abr': bitrate,
+                        'vcodec': 'none',
+                    })
+                formats.append(format)
+        else:
+            formats = [{
+                'url': vidurl,
+            }]
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': vidtitle,
+            'formats': formats,
+        }
+
+
+class CinemassacreIE(InfoExtractor):
+    _VALID_URL = 'https?://(?:www\.)?cinemassacre\.com/(?P<date_y>[0-9]{4})/(?P<date_m>[0-9]{2})/(?P<date_d>[0-9]{2})/(?P<display_id>[^?#/]+)'
+    _TESTS = [
+        {
+            'url': 'http://cinemassacre.com/2012/11/10/avgn-the-movie-trailer/',
+            'md5': 'fde81fbafaee331785f58cd6c0d46190',
+            'info_dict': {
+                'id': 'Cinemassacre-19911',
+                'ext': 'mp4',
+                'upload_date': '20121110',
+                'title': '“Angry Video Game Nerd: The Movie” – Trailer',
+                'description': 'md5:fb87405fcb42a331742a0dce2708560b',
+            },
+        },
+        {
+            'url': 'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
+            'md5': 'd72f10cd39eac4215048f62ab477a511',
+            'info_dict': {
+                'id': 'Cinemassacre-521be8ef82b16',
+                'ext': 'mp4',
+                'upload_date': '20131002',
+                'title': 'The Mummy’s Hand (1940)',
+            },
+        }
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        display_id = mobj.group('display_id')
+        video_date = mobj.group('date_y') + mobj.group('date_m') + mobj.group('date_d')
+
+        webpage = self._download_webpage(url, display_id)
+
+        playerdata_url = self._search_regex(
+            r'src="(http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?[^"]*\bid=.+?)"',
+            webpage, 'player data URL')
+        video_title = self._html_search_regex(
+            r'<title>(?P<title>.+?)\|', webpage, 'title')
+        video_description = self._html_search_regex(
+            r'<div class="entry-content">(?P<description>.+?)</div>',
+            webpage, 'description', flags=re.DOTALL, fatal=False)
+        video_thumbnail = self._og_search_thumbnail(webpage)
+
+        return {
+            '_type': 'url_transparent',
+            'display_id': display_id,
+            'title': video_title,
+            'description': video_description,
+            'upload_date': video_date,
+            'thumbnail': video_thumbnail,
+            'url': playerdata_url,
+        }
+
+
+class TeamFourIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?teamfourstar\.com/video/(?P<id>[a-z0-9\-]+)/?'
+    _TEST = {
+        'url': 'http://teamfourstar.com/video/a-moment-with-tfs-episode-4/',
+        'info_dict': {
+            'id': 'TeamFourStar-5292a02f20bfa',
+            'ext': 'mp4',
+            'upload_date': '20130401',
+            'description': 'Check out this and more on our website: http://teamfourstar.com\nTFS Store: http://sharkrobot.com/team-four-star\nFollow on Twitter: http://twitter.com/teamfourstar\nLike on FB: http://facebook.com/teamfourstar',
+            'title': 'A Moment With TFS Episode 4',
+        }
+    }
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url)
+        webpage = self._download_webpage(url, display_id)
+
+        playerdata_url = self._search_regex(
+            r'src="(http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?[^"]*\bid=.+?)"',
+            webpage, 'player data URL')
+
+        video_title = self._html_search_regex(
+            r'<div class="heroheadingtitle">(?P<title>.+?)</div>',
+            webpage, 'title')
+        video_date = unified_strdate(self._html_search_regex(
+            r'<div class="heroheadingdate">(?P<date>.+?)</div>',
+            webpage, 'date', fatal=False))
+        video_description = self._html_search_regex(
+            r'(?s)<div class="postcontent">(?P<description>.+?)</div>',
+            webpage, 'description', fatal=False)
+        video_thumbnail = self._og_search_thumbnail(webpage)
+
+        return {
+            '_type': 'url_transparent',
+            'display_id': display_id,
+            'title': video_title,
+            'description': video_description,
+            'upload_date': video_date,
+            'thumbnail': video_thumbnail,
+            'url': playerdata_url,
+        }
index 1dc551d5c7f274b717369952824050812c21fee3..16dc3736b48bfb15a94b98713beef4757446b642 100644 (file)
@@ -67,5 +67,3 @@ class ServingSysIE(InfoExtractor):
             'title': title,
             'entries': entries,
         }
-
\ No newline at end of file
diff --git a/youtube_dl/extractor/sexu.py b/youtube_dl/extractor/sexu.py
new file mode 100644 (file)
index 0000000..6365a87
--- /dev/null
@@ -0,0 +1,61 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class SexuIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?sexu\.com/(?P<id>\d+)'
+    _TEST = {
+        'url': 'http://sexu.com/961791/',
+        'md5': 'ff615aca9691053c94f8f10d96cd7884',
+        'info_dict': {
+            'id': '961791',
+            'ext': 'mp4',
+            'title': 'md5:4d05a19a5fc049a63dbbaf05fb71d91b',
+            'description': 'md5:c5ed8625eb386855d5a7967bd7b77a54',
+            'categories': list,  # NSFW
+            'thumbnail': 're:https?://.*\.jpg$',
+            'age_limit': 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        quality_arr = self._search_regex(
+            r'sources:\s*\[([^\]]+)\]', webpage, 'forrmat string')
+        formats = [{
+            'url': fmt[0].replace('\\', ''),
+            'format_id': fmt[1],
+            'height': int(fmt[1][:3]),
+        } for fmt in re.findall(r'"file":"([^"]+)","label":"([^"]+)"', quality_arr)]
+        self._sort_formats(formats)
+
+        title = self._html_search_regex(
+            r'<title>([^<]+)\s*-\s*Sexu\.Com</title>', webpage, 'title')
+
+        description = self._html_search_meta(
+            'description', webpage, 'description')
+
+        thumbnail = self._html_search_regex(
+            r'image:\s*"([^"]+)"',
+            webpage, 'thumbnail', fatal=False)
+
+        categories_str = self._html_search_meta(
+            'keywords', webpage, 'categories')
+        categories = (
+            None if categories_str is None
+            else categories_str.split(','))
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'categories': categories,
+            'formats': formats,
+            'age_limit': 18,
+        }
diff --git a/youtube_dl/extractor/sexykarma.py b/youtube_dl/extractor/sexykarma.py
new file mode 100644 (file)
index 0000000..c833fc8
--- /dev/null
@@ -0,0 +1,117 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    unified_strdate,
+    parse_duration,
+    int_or_none,
+)
+
+
+class SexyKarmaIE(InfoExtractor):
+    IE_DESC = 'Sexy Karma and Watch Indian Porn'
+    _VALID_URL = r'https?://(?:www\.)?(?:sexykarma\.com|watchindianporn\.net)/(?:[^/]+/)*video/(?P<display_id>[^/]+)-(?P<id>[a-zA-Z0-9]+)\.html'
+    _TESTS = [{
+        'url': 'http://www.sexykarma.com/gonewild/video/taking-a-quick-pee-yHI70cOyIHt.html',
+        'md5': 'b9798e7d1ef1765116a8f516c8091dbd',
+        'info_dict': {
+            'id': 'yHI70cOyIHt',
+            'display_id': 'taking-a-quick-pee',
+            'ext': 'mp4',
+            'title': 'Taking a quick pee.',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'uploader': 'wildginger7',
+            'upload_date': '20141007',
+            'duration': 22,
+            'view_count': int,
+            'comment_count': int,
+            'categories': list,
+        }
+    }, {
+        'url': 'http://www.sexykarma.com/gonewild/video/pot-pixie-tribute-8Id6EZPbuHf.html',
+        'md5': 'dd216c68d29b49b12842b9babe762a5d',
+        'info_dict': {
+            'id': '8Id6EZPbuHf',
+            'display_id': 'pot-pixie-tribute',
+            'ext': 'mp4',
+            'title': 'pot_pixie tribute',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'uploader': 'banffite',
+            'upload_date': '20141013',
+            'duration': 16,
+            'view_count': int,
+            'comment_count': int,
+            'categories': list,
+        }
+    }, {
+        'url': 'http://www.watchindianporn.net/video/desi-dancer-namrata-stripping-completely-nude-and-dancing-on-a-hot-number-dW2mtctxJfs.html',
+        'md5': '9afb80675550406ed9a63ac2819ef69d',
+        'info_dict': {
+            'id': 'dW2mtctxJfs',
+            'display_id': 'desi-dancer-namrata-stripping-completely-nude-and-dancing-on-a-hot-number',
+            'ext': 'mp4',
+            'title': 'Desi dancer namrata stripping completely nude and dancing on a hot number',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'uploader': 'Don',
+            'upload_date': '20140213',
+            'duration': 83,
+            'view_count': int,
+            'comment_count': int,
+            'categories': list,
+        }
+    }]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        display_id = mobj.group('display_id')
+
+        webpage = self._download_webpage(url, display_id)
+
+        video_url = self._html_search_regex(
+            r"url: escape\('([^']+)'\)", webpage, 'url')
+
+        title = self._html_search_regex(
+            r'<h2 class="he2"><span>(.*?)</span>',
+            webpage, 'title')
+        thumbnail = self._html_search_regex(
+            r'<span id="container"><img\s+src="([^"]+)"',
+            webpage, 'thumbnail', fatal=False)
+
+        uploader = self._html_search_regex(
+            r'class="aupa">\s*(.*?)</a>',
+            webpage, 'uploader')
+        upload_date = unified_strdate(self._html_search_regex(
+            r'Added: <strong>(.+?)</strong>', webpage, 'upload date', fatal=False))
+
+        duration = parse_duration(self._search_regex(
+            r'<td>Time:\s*</td>\s*<td align="right"><span>\s*(.+?)\s*</span>',
+            webpage, 'duration', fatal=False))
+
+        view_count = int_or_none(self._search_regex(
+            r'<td>Views:\s*</td>\s*<td align="right"><span>\s*(\d+)\s*</span>',
+            webpage, 'view count', fatal=False))
+        comment_count = int_or_none(self._search_regex(
+            r'<td>Comments:\s*</td>\s*<td align="right"><span>\s*(\d+)\s*</span>',
+            webpage, 'comment count', fatal=False))
+
+        categories = re.findall(
+            r'<a href="[^"]+/search/video/desi"><span>([^<]+)</span></a>',
+            webpage)
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'url': video_url,
+            'title': title,
+            'thumbnail': thumbnail,
+            'uploader': uploader,
+            'upload_date': upload_date,
+            'duration': duration,
+            'view_count': view_count,
+            'comment_count': comment_count,
+            'categories': categories,
+        }
index badba2ac61ca5f46db325e5064f6e75425c60cbb..26ced716e8a875f1c4c5c9527b856475dce83f9e 100644 (file)
@@ -4,10 +4,12 @@ import re
 import base64
 
 from .common import InfoExtractor
+from ..compat import (
+    compat_urllib_parse,
+    compat_urllib_request,
+)
 from ..utils import (
     ExtractorError,
-    compat_urllib_request,
-    compat_urllib_parse,
     int_or_none,
 )
 
@@ -26,26 +28,30 @@ class SharedIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
-        page = self._download_webpage(url, video_id)
-
-        if re.search(r'>File does not exist<', page) is not None:
-            raise ExtractorError('Video %s does not exist' % video_id, expected=True)
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
 
-        download_form = dict(re.findall(r'<input type="hidden" name="([^"]+)" value="([^"]*)"', page))
+        if '>File does not exist<' in webpage:
+            raise ExtractorError(
+                'Video %s does not exist' % video_id, expected=True)
 
-        request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(download_form))
+        download_form = dict(re.findall(
+            r'<input type="hidden" name="([^"]+)" value="([^"]*)"', webpage))
+        request = compat_urllib_request.Request(
+            url, compat_urllib_parse.urlencode(download_form))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
 
-        video_page = self._download_webpage(request, video_id, 'Downloading video page')
+        video_page = self._download_webpage(
+            request, video_id, 'Downloading video page')
 
-        video_url = self._html_search_regex(r'data-url="([^"]+)"', video_page, 'video URL')
-        title = base64.b64decode(self._html_search_meta('full:title', page, 'title')).decode('utf-8')
-        filesize = int_or_none(self._html_search_meta('full:size', page, 'file size', fatal=False))
+        video_url = self._html_search_regex(
+            r'data-url="([^"]+)"', video_page, 'video URL')
+        title = base64.b64decode(self._html_search_meta(
+            'full:title', webpage, 'title')).decode('utf-8')
+        filesize = int_or_none(self._html_search_meta(
+            'full:size', webpage, 'file size', fatal=False))
         thumbnail = self._html_search_regex(
-            r'data-poster="([^"]+)"', video_page, 'thumbnail', fatal=False, default=None)
+            r'data-poster="([^"]+)"', video_page, 'thumbnail', default=None)
 
         return {
             'id': video_id,
@@ -54,4 +60,4 @@ class SharedIE(InfoExtractor):
             'filesize': filesize,
             'title': title,
             'thumbnail': thumbnail,
-        }
\ No newline at end of file
+        }
index 7531e8325bf88e3d89958dca1107334c41b78c6c..ac3e3adf22ad194a8af3e833ae4d8acf7484e8b4 100644 (file)
@@ -4,9 +4,11 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
+)
+from ..utils import (
     parse_duration,
 )
 
index 2909ef18b51a5ac6dadc4eec33bd05522443da8c..a63d126d4560dda83133fa6280116ca517e71bdc 100644 (file)
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_request,
     compat_urllib_parse,
 )
@@ -46,7 +46,7 @@ class SinaIE(InfoExtractor):
     def _extract_video(self, video_id):
         data = compat_urllib_parse.urlencode({'vid': video_id})
         url_doc = self._download_xml('http://v.iask.com/v_play.php?%s' % data,
-            video_id, 'Downloading video url')
+                                     video_id, 'Downloading video url')
         image_page = self._download_webpage(
             'http://interface.video.sina.com.cn/interface/common/getVideoImage.php?%s' % data,
             video_id, 'Downloading thumbnail info')
index 53c3c9220374737b88dc516ec810ecb6865b74f7..e7d776e7bd8bd3334ff0da1203cd91d52508a6ef 100644 (file)
@@ -4,8 +4,10 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urlparse,
+)
+from ..utils import (
     ExtractorError,
 )
 
@@ -39,7 +41,7 @@ class SlideshareIE(InfoExtractor):
         ext = info['jsplayer']['video_extension']
         video_url = compat_urlparse.urljoin(bucket, doc + '-SD.' + ext)
         description = self._html_search_regex(
-            r'<p\s+(?:style="[^"]*"\s+)?class="description.*?"[^>]*>(.*?)</p>', webpage,
+            r'<p\s+(?:style="[^"]*"\s+)?class=".*?description.*?"[^>]*>(.*?)</p>', webpage,
             'description', fatal=False)
 
         return {
index e6e7d086503a04a3fda862f601e109003169b9d7..3df71304dafc9c9e353923f6769c88e1fcf8c5ff 100644 (file)
@@ -26,7 +26,7 @@ class SlutloadIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
 
         video_title = self._html_search_regex(r'<h1><strong>([^<]+)</strong>',
-            webpage, 'title').strip()
+                                              webpage, 'title').strip()
 
         video_url = self._html_search_regex(
             r'(?s)<div id="vidPlayer"\s+data-url="([^"]+)"',
index 9bd5defa7ac5e171904eb681015e9bcf1661acb9..d031fe40167f7ff704c26fde3972d4fa45f1b3b5 100644 (file)
@@ -1,26 +1,27 @@
 # encoding: utf-8
 from __future__ import unicode_literals
 
-import os.path
 import re
 import json
 import hashlib
 import uuid
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
+)
+from ..utils import (
     ExtractorError,
-    url_basename,
     int_or_none,
+    unified_strdate,
 )
 
 
 class SmotriIE(InfoExtractor):
     IE_DESC = 'Smotri.com'
     IE_NAME = 'smotri'
-    _VALID_URL = r'^https?://(?:www\.)?(?:smotri\.com/video/view/\?id=|pics\.smotri\.com/(?:player|scrubber_custom8)\.swf\?file=)(?P<videoid>v(?P<realvideoid>[0-9]+)[a-z0-9]{4})'
+    _VALID_URL = r'^https?://(?:www\.)?(?:smotri\.com/video/view/\?id=|pics\.smotri\.com/(?:player|scrubber_custom8)\.swf\?file=)(?P<id>v(?P<realvideoid>[0-9]+)[a-z0-9]{4})'
     _NETRC_MACHINE = 'smotri'
 
     _TESTS = [
@@ -35,7 +36,6 @@ class SmotriIE(InfoExtractor):
                 'uploader': 'rbc2008',
                 'uploader_id': 'rbc08',
                 'upload_date': '20131118',
-                'description': 'катастрофа с камер видеонаблюдения, видео катастрофа с камер видеонаблюдения',
                 'thumbnail': 'http://frame6.loadup.ru/8b/a9/2610366.3.3.jpg',
             },
         },
@@ -50,7 +50,6 @@ class SmotriIE(InfoExtractor):
                 'uploader': 'Support Photofile@photofile',
                 'uploader_id': 'support-photofile',
                 'upload_date': '20070704',
-                'description': 'test, видео test',
                 'thumbnail': 'http://frame4.loadup.ru/03/ed/57591.2.3.jpg',
             },
         },
@@ -66,7 +65,6 @@ class SmotriIE(InfoExtractor):
                 'uploader_id': 'timoxa40',
                 'upload_date': '20100404',
                 'thumbnail': 'http://frame7.loadup.ru/af/3f/1390466.3.3.jpg',
-                'description': 'TOCCA_A_NOI_-_LE_COSE_NON_VANNO_CAMBIAMOLE_ORA-1, видео TOCCA_A_NOI_-_LE_COSE_NON_VANNO_CAMBIAMOLE_ORA-1',
             },
             'params': {
                 'videopassword': 'qwerty',
@@ -85,7 +83,6 @@ class SmotriIE(InfoExtractor):
                 'upload_date': '20101001',
                 'thumbnail': 'http://frame3.loadup.ru/75/75/1540889.1.3.jpg',
                 'age_limit': 18,
-                'description': 'этот ролик не покажут по ТВ, видео этот ролик не покажут по ТВ',
             },
             'params': {
                 'videopassword': '333'
@@ -102,17 +99,11 @@ class SmotriIE(InfoExtractor):
                 'uploader': 'HannahL',
                 'uploader_id': 'lisaha95',
                 'upload_date': '20090331',
-                'description': 'Shakira - Don\'t Bother, видео Shakira - Don\'t Bother',
                 'thumbnail': 'http://frame8.loadup.ru/44/0b/918809.7.3.jpg',
             },
         },
     ]
 
-    _SUCCESS = 0
-    _PASSWORD_NOT_VERIFIED = 1
-    _PASSWORD_DETECTED = 2
-    _VIDEO_NOT_FOUND = 3
-
     @classmethod
     def _extract_url(cls, webpage):
         mobj = re.search(
@@ -137,44 +128,44 @@ class SmotriIE(InfoExtractor):
         return self._html_search_meta(name, html, display_name)
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('videoid')
-        real_video_id = mobj.group('realvideoid')
+        video_id = self._match_id(url)
+
+        video_form = {
+            'ticket': video_id,
+            'video_url': '1',
+            'frame_url': '1',
+            'devid': 'LoadupFlashPlayer',
+            'getvideoinfo': '1',
+        }
+
+        request = compat_urllib_request.Request(
+            'http://smotri.com/video/view/url/bot/', compat_urllib_parse.urlencode(video_form))
+        request.add_header('Content-Type', 'application/x-www-form-urlencoded')
+
+        video = self._download_json(request, video_id, 'Downloading video JSON')
 
-        # Download video JSON data
-        video_json_url = 'http://smotri.com/vt.php?id=%s' % real_video_id
-        video_json_page = self._download_webpage(video_json_url, video_id, 'Downloading video JSON')
-        video_json = json.loads(video_json_page)
+        if video.get('_moderate_no') or not video.get('moderated'):
+            raise ExtractorError('Video %s has not been approved by moderator' % video_id, expected=True)
 
-        status = video_json['status']
-        if status == self._VIDEO_NOT_FOUND:
+        if video.get('error'):
             raise ExtractorError('Video %s does not exist' % video_id, expected=True)
-        elif status == self._PASSWORD_DETECTED: # The video is protected by a password, retry with
-                                                # video-password set
-            video_password = self._downloader.params.get('videopassword', None)
-            if not video_password:
-                raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
-            video_json_url += '&md5pass=%s' % hashlib.md5(video_password.encode('utf-8')).hexdigest()
-            video_json_page = self._download_webpage(video_json_url, video_id, 'Downloading video JSON (video-password set)')
-            video_json = json.loads(video_json_page)
-            status = video_json['status']
-            if status == self._PASSWORD_NOT_VERIFIED:
-                raise ExtractorError('Video password is invalid', expected=True)
-
-        if status != self._SUCCESS:
-            raise ExtractorError('Unexpected status value %s' % status)
-
-        # Extract the URL of the video
-        video_url = video_json['file_data']
+
+        video_url = video.get('_vidURL') or video.get('_vidURL_mp4')
+        title = video['title']
+        thumbnail = video['_imgURL']
+        upload_date = unified_strdate(video['added'])
+        uploader = video['userNick']
+        uploader_id = video['userLogin']
+        duration = int_or_none(video['duration'])
 
         # Video JSON does not provide enough meta data
         # We will extract some from the video web page instead
-        video_page_url = 'http://smotri.com/video/view/?id=%s' % video_id
-        video_page = self._download_webpage(video_page_url, video_id, 'Downloading video page')
+        webpage_url = 'http://smotri.com/video/view/?id=%s' % video_id
+        webpage = self._download_webpage(webpage_url, video_id, 'Downloading video page')
 
         # Warning if video is unavailable
         warning = self._html_search_regex(
-            r'<div class="videoUnModer">(.*?)</div>', video_page,
+            r'<div class="videoUnModer">(.*?)</div>', webpage,
             'warning message', default=None)
         if warning is not None:
             self._downloader.report_warning(
@@ -182,84 +173,32 @@ class SmotriIE(InfoExtractor):
                 (video_id, warning))
 
         # Adult content
-        if re.search('EroConfirmText">', video_page) is not None:
+        if re.search('EroConfirmText">', webpage) is not None:
             self.report_age_confirmation()
             confirm_string = self._html_search_regex(
                 r'<a href="/video/view/\?id=%s&confirm=([^"]+)" title="[^"]+">' % video_id,
-                video_page, 'confirm string')
-            confirm_url = video_page_url + '&confirm=%s' % confirm_string
-            video_page = self._download_webpage(confirm_url, video_id, 'Downloading video page (age confirmed)')
+                webpage, 'confirm string')
+            confirm_url = webpage_url + '&confirm=%s' % confirm_string
+            webpage = self._download_webpage(confirm_url, video_id, 'Downloading video page (age confirmed)')
             adult_content = True
         else:
             adult_content = False
 
-        # Extract the rest of meta data
-        video_title = self._search_meta('name', video_page, 'title')
-        if not video_title:
-            video_title = os.path.splitext(url_basename(video_url))[0]
-
-        video_description = self._search_meta('description', video_page)
-        END_TEXT = ' на сайте Smotri.com'
-        if video_description and video_description.endswith(END_TEXT):
-            video_description = video_description[:-len(END_TEXT)]
-        START_TEXT = 'Смотреть онлайн ролик '
-        if video_description and video_description.startswith(START_TEXT):
-            video_description = video_description[len(START_TEXT):]
-        video_thumbnail = self._search_meta('thumbnail', video_page)
-
-        upload_date_str = self._search_meta('uploadDate', video_page, 'upload date')
-        if upload_date_str:
-            upload_date_m = re.search(r'(?P<year>\d{4})\.(?P<month>\d{2})\.(?P<day>\d{2})T', upload_date_str)
-            video_upload_date = (
-                (
-                    upload_date_m.group('year') +
-                    upload_date_m.group('month') +
-                    upload_date_m.group('day')
-                )
-                if upload_date_m else None
-            )
-        else:
-            video_upload_date = None
-
-        duration_str = self._search_meta('duration', video_page)
-        if duration_str:
-            duration_m = re.search(r'T(?P<hours>[0-9]{2})H(?P<minutes>[0-9]{2})M(?P<seconds>[0-9]{2})S', duration_str)
-            video_duration = (
-                (
-                    (int(duration_m.group('hours')) * 60 * 60) +
-                    (int(duration_m.group('minutes')) * 60) +
-                    int(duration_m.group('seconds'))
-                )
-                if duration_m else None
-            )
-        else:
-            video_duration = None
-
-        video_uploader = self._html_search_regex(
-            '<div class="DescrUser"><div>Автор.*?onmouseover="popup_user_info[^"]+">(.*?)</a>',
-            video_page, 'uploader', fatal=False, flags=re.MULTILINE|re.DOTALL)
-
-        video_uploader_id = self._html_search_regex(
-            '<div class="DescrUser"><div>Автор.*?onmouseover="popup_user_info\\(.*?\'([^\']+)\'\\);">',
-            video_page, 'uploader id', fatal=False, flags=re.MULTILINE|re.DOTALL)
-
-        video_view_count = self._html_search_regex(
+        view_count = self._html_search_regex(
             'Общее количество просмотров.*?<span class="Number">(\\d+)</span>',
-            video_page, 'view count', fatal=False, flags=re.MULTILINE|re.DOTALL)
+            webpage, 'view count', fatal=False, flags=re.MULTILINE | re.DOTALL)
 
         return {
             'id': video_id,
             'url': video_url,
-            'title': video_title,
-            'thumbnail': video_thumbnail,
-            'description': video_description,
-            'uploader': video_uploader,
-            'upload_date': video_upload_date,
-            'uploader_id': video_uploader_id,
-            'duration': video_duration,
-            'view_count': int_or_none(video_view_count),
+            'title': title,
+            'thumbnail': thumbnail,
+            'uploader': uploader,
+            'upload_date': upload_date,
+            'uploader_id': uploader_id,
+            'duration': duration,
+            'view_count': int_or_none(view_count),
             'age_limit': 18 if adult_content else 0,
-            'video_page_url': video_page_url
         }
 
 
@@ -275,7 +214,7 @@ class SmotriCommunityIE(InfoExtractor):
         },
         'playlist_mincount': 4,
     }
-    
+
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         community_id = mobj.group('communityid')
@@ -337,15 +276,18 @@ class SmotriBroadcastIE(InfoExtractor):
         broadcast_page = self._download_webpage(broadcast_url, broadcast_id, 'Downloading broadcast page')
 
         if re.search('>Режиссер с логином <br/>"%s"<br/> <span>не существует<' % broadcast_id, broadcast_page) is not None:
-            raise ExtractorError('Broadcast %s does not exist' % broadcast_id, expected=True)
+            raise ExtractorError(
+                'Broadcast %s does not exist' % broadcast_id, expected=True)
 
         # Adult content
         if re.search('EroConfirmText">', broadcast_page) is not None:
 
             (username, password) = self._get_login_info()
             if username is None:
-                raise ExtractorError('Erotic broadcasts allowed only for registered users, '
-                    'use --username and --password options to provide account credentials.', expected=True)
+                raise ExtractorError(
+                    'Erotic broadcasts allowed only for registered users, '
+                    'use --username and --password options to provide account credentials.',
+                    expected=True)
 
             login_form = {
                 'login-hint53': '1',
@@ -354,9 +296,11 @@ class SmotriBroadcastIE(InfoExtractor):
                 'password': password,
             }
 
-            request = compat_urllib_request.Request(broadcast_url + '/?no_redirect=1', compat_urllib_parse.urlencode(login_form))
+            request = compat_urllib_request.Request(
+                broadcast_url + '/?no_redirect=1', compat_urllib_parse.urlencode(login_form))
             request.add_header('Content-Type', 'application/x-www-form-urlencoded')
-            broadcast_page = self._download_webpage(request, broadcast_id, 'Logging in and confirming age')
+            broadcast_page = self._download_webpage(
+                request, broadcast_id, 'Logging in and confirming age')
 
             if re.search('>Неверный логин или пароль<', broadcast_page) is not None:
                 raise ExtractorError('Unable to log in: bad username or password', expected=True)
@@ -366,7 +310,7 @@ class SmotriBroadcastIE(InfoExtractor):
             adult_content = False
 
         ticket = self._html_search_regex(
-            'window\.broadcast_control\.addFlashVar\\(\'file\', \'([^\']+)\'\\);',
+            r"window\.broadcast_control\.addFlashVar\('file'\s*,\s*'([^']+)'\)",
             broadcast_page, 'broadcast ticket')
 
         url = 'http://smotri.com/broadcast/view/url/?ticket=%s' % ticket
@@ -375,26 +319,31 @@ class SmotriBroadcastIE(InfoExtractor):
         if broadcast_password:
             url += '&pass=%s' % hashlib.md5(broadcast_password.encode('utf-8')).hexdigest()
 
-        broadcast_json_page = self._download_webpage(url, broadcast_id, 'Downloading broadcast JSON')
+        broadcast_json_page = self._download_webpage(
+            url, broadcast_id, 'Downloading broadcast JSON')
 
         try:
             broadcast_json = json.loads(broadcast_json_page)
 
             protected_broadcast = broadcast_json['_pass_protected'] == 1
             if protected_broadcast and not broadcast_password:
-                raise ExtractorError('This broadcast is protected by a password, use the --video-password option', expected=True)
+                raise ExtractorError(
+                    'This broadcast is protected by a password, use the --video-password option',
+                    expected=True)
 
             broadcast_offline = broadcast_json['is_play'] == 0
             if broadcast_offline:
                 raise ExtractorError('Broadcast %s is offline' % broadcast_id, expected=True)
 
             rtmp_url = broadcast_json['_server']
-            if not rtmp_url.startswith('rtmp://'):
+            mobj = re.search(r'^rtmp://[^/]+/(?P<app>.+)/?$', rtmp_url)
+            if not mobj:
                 raise ExtractorError('Unexpected broadcast rtmp URL')
 
             broadcast_playpath = broadcast_json['_streamName']
+            broadcast_app = '%s/%s' % (mobj.group('app'), broadcast_json['_vidURL'])
             broadcast_thumbnail = broadcast_json['_imgURL']
-            broadcast_title = broadcast_json['title']
+            broadcast_title = self._live_title(broadcast_json['title'])
             broadcast_description = broadcast_json['description']
             broadcaster_nick = broadcast_json['nick']
             broadcaster_login = broadcast_json['login']
@@ -415,6 +364,9 @@ class SmotriBroadcastIE(InfoExtractor):
             'age_limit': 18 if adult_content else 0,
             'ext': 'flv',
             'play_path': broadcast_playpath,
+            'player_url': 'http://pics.smotri.com/broadcast_play.swf',
+            'app': broadcast_app,
             'rtmp_live': True,
-            'rtmp_conn': rtmp_conn
+            'rtmp_conn': rtmp_conn,
+            'is_live': True,
         }
index c663e56d42ed02645313637cd7866a9071d10ae7..7d3c0e93783afeac3d8e939e0cf317177df4ca9f 100644 (file)
@@ -1,13 +1,16 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-from ..utils import (
-    ExtractorError,
+import re
+
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
+)
+from ..utils import (
     determine_ext,
+    ExtractorError,
 )
-import re
 
 from .common import InfoExtractor
 
@@ -27,9 +30,7 @@ class SockshareIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         url = 'http://sockshare.com/file/%s' % video_id
         webpage = self._download_webpage(url, video_id)
 
index bebcafb62bea3fe2e180818256475262177f902f..07f514a46246657206ae3fd31e19b1d2932e6f15 100644 (file)
@@ -1,4 +1,5 @@
 # encoding: utf-8
+from __future__ import unicode_literals
 
 import json
 import re
@@ -11,13 +12,14 @@ class SohuIE(InfoExtractor):
     _VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?'
 
     _TEST = {
-        u'url': u'http://tv.sohu.com/20130724/n382479172.shtml#super',
-        u'file': u'382479172.mp4',
-        u'md5': u'bde8d9a6ffd82c63a1eefaef4eeefec7',
-        u'info_dict': {
-            u'title': u'MV:Far East Movement《The Illest》',
+        'url': 'http://tv.sohu.com/20130724/n382479172.shtml#super',
+        'md5': 'bde8d9a6ffd82c63a1eefaef4eeefec7',
+        'info_dict': {
+            'id': '382479172',
+            'ext': 'mp4',
+            'title': 'MV:Far East Movement《The Illest》',
         },
-        u'skip': u'Only available from China',
+        'skip': 'Only available from China',
     }
 
     def _real_extract(self, url):
@@ -26,11 +28,11 @@ class SohuIE(InfoExtractor):
             if mytv:
                 base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid='
             else:
-                base_data_url = u'http://hot.vrs.sohu.com/vrs_flash.action?vid='
+                base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid='
             data_url = base_data_url + str(vid_id)
             data_json = self._download_webpage(
                 data_url, video_id,
-                note=u'Downloading JSON data for ' + str(vid_id))
+                note='Downloading JSON data for ' + str(vid_id))
             return json.loads(data_json)
 
         mobj = re.match(self._VALID_URL, url)
@@ -39,11 +41,11 @@ class SohuIE(InfoExtractor):
 
         webpage = self._download_webpage(url, video_id)
         raw_title = self._html_search_regex(r'(?s)<title>(.+?)</title>',
-                                            webpage, u'video title')
+                                            webpage, 'video title')
         title = raw_title.partition('-')[0].strip()
 
         vid = self._html_search_regex(r'var vid ?= ?["\'](\d+)["\']', webpage,
-                                      u'video path')
+                                      'video path')
         data = _fetch_data(vid, mytv)
 
         QUALITIES = ('ori', 'super', 'high', 'nor')
@@ -51,7 +53,7 @@ class SohuIE(InfoExtractor):
                    for q in QUALITIES
                    if data['data'][q + 'Vid'] != 0]
         if not vid_ids:
-            raise ExtractorError(u'No formats available for this video')
+            raise ExtractorError('No formats available for this video')
 
         # For now, we just pick the highest available quality
         vid_id = vid_ids[-1]
@@ -69,7 +71,7 @@ class SohuIE(InfoExtractor):
                         (allot, prot, clipsURL[i], su[i]))
             part_str = self._download_webpage(
                 part_url, video_id,
-                note=u'Downloading part %d of %d' % (i+1, part_count))
+                note='Downloading part %d of %d' % (i + 1, part_count))
 
             part_info = part_str.split('|')
             video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3])
index 4719ba45c5f754338f11da9a038c506ad001023c..5d60c4939588ad543840b501ef0e552ad0b1e673 100644 (file)
@@ -5,11 +5,12 @@ import re
 import itertools
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_str,
     compat_urlparse,
     compat_urllib_parse,
-
+)
+from ..utils import (
     ExtractorError,
     int_or_none,
     unified_strdate,
@@ -32,7 +33,7 @@ class SoundcloudIE(InfoExtractor):
                             (?P<title>[\w\d-]+)/?
                             (?P<token>[^?]+?)?(?:[?].*)?$)
                        |(?:api\.soundcloud\.com/tracks/(?P<track_id>\d+)
-                          (?:/?\?secret_token=(?P<secret_token>[^&]+?))?$)
+                          (?:/?\?secret_token=(?P<secret_token>[^&]+))?)
                        |(?P<player>(?:w|player|p.)\.soundcloud\.com/player/?.*?url=.*)
                     )
                     '''
@@ -40,14 +41,15 @@ class SoundcloudIE(InfoExtractor):
     _TESTS = [
         {
             'url': 'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy',
-            'file': '62986583.mp3',
             'md5': 'ebef0a451b909710ed1d7787dddbf0d7',
             'info_dict': {
-                "upload_date": "20121011",
-                "description": "No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o'd",
-                "uploader": "E.T. ExTerrestrial Music",
-                "title": "Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1",
-                "duration": 143,
+                'id': '62986583',
+                'ext': 'mp3',
+                'upload_date': '20121011',
+                'description': 'No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o\'d',
+                'uploader': 'E.T. ExTerrestrial Music',
+                'title': 'Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1',
+                'duration': 143,
             }
         },
         # not streamable song
@@ -103,7 +105,7 @@ class SoundcloudIE(InfoExtractor):
                 'id': '128590877',
                 'ext': 'mp3',
                 'title': 'Bus Brakes',
-                'description': 'md5:0170be75dd395c96025d210d261c784e',
+                'description': 'md5:0053ca6396e8d2fd7b7e1595ef12ab66',
                 'uploader': 'oddsamples',
                 'upload_date': '20140109',
                 'duration': 17,
@@ -140,6 +142,7 @@ class SoundcloudIE(InfoExtractor):
             'description': info['description'],
             'thumbnail': thumbnail,
             'duration': int_or_none(info.get('duration'), 1000),
+            'webpage_url': info.get('permalink_url'),
         }
         formats = []
         if info.get('downloadable', False):
@@ -157,7 +160,7 @@ class SoundcloudIE(InfoExtractor):
 
         # We have to retrieve the url
         streams_url = ('http://api.soundcloud.com/i1/tracks/{0}/streams?'
-            'client_id={1}&secret_token={2}'.format(track_id, self._IPHONE_CLIENT_ID, secret_token))
+                       'client_id={1}&secret_token={2}'.format(track_id, self._IPHONE_CLIENT_ID, secret_token))
         format_dict = self._download_json(
             streams_url,
             track_id, 'Downloading track url')
@@ -222,14 +225,14 @@ class SoundcloudIE(InfoExtractor):
             # extract uploader (which is in the url)
             uploader = mobj.group('uploader')
             # extract simple title (uploader + slug of song title)
-            slug_title =  mobj.group('title')
+            slug_title = mobj.group('title')
             token = mobj.group('token')
             full_title = resolve_title = '%s/%s' % (uploader, slug_title)
             if token:
                 resolve_title += '/%s' % token
-    
+
             self.report_resolve(full_title)
-    
+
             url = 'http://soundcloud.com/%s' % resolve_title
             info_json_url = self._resolv_url(url)
         info = self._download_json(info_json_url, full_title, 'Downloading info JSON')
@@ -369,7 +372,7 @@ class SoundcloudPlaylistIE(SoundcloudIE):
 
         entries = [
             self._extract_info_dict(t, quiet=True, secret_token=token)
-                for t in data['tracks']]
+            for t in data['tracks']]
 
         return {
             '_type': 'playlist',
index d34aefeaa24a2b8b307005e9164ad9349b638e88..c2d0d36a6935c40553419621678ce8987c4f2dbd 100644 (file)
@@ -33,5 +33,6 @@ class SpaceIE(InfoExtractor):
             # Other videos works fine with the info from the object
             brightcove_url = BrightcoveIE._extract_brightcove_url(webpage)
         if brightcove_url is None:
-            raise ExtractorError(u'The webpage does not contain a video', expected=True)
+            raise ExtractorError(
+                'The webpage does not contain a video', expected=True)
         return self.url_result(brightcove_url, BrightcoveIE.ie_key())
index 94602e89e56549243ed38ecb107ef842cd8ebd46..b936202f6f3005fe9ae085724566d709c6a484cc 100644 (file)
@@ -3,12 +3,14 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
+    compat_urllib_parse,
     compat_urllib_parse_urlparse,
     compat_urllib_request,
-    compat_urllib_parse,
-    unified_strdate,
+)
+from ..utils import (
     str_to_int,
+    unified_strdate,
 )
 from ..aes import aes_decrypt_text
 
index 9ed7d3b39e227806971fe98f43e1c1018b84ad3c..1e55a9ffb5748b70969de11886c13720ff936be7 100644 (file)
@@ -4,11 +4,12 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import compat_urlparse
+from ..compat import compat_urlparse
+from .spiegeltv import SpiegeltvIE
 
 
 class SpiegelIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?(?:#.*)?$'
+    _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed)?(?:\.html)?(?:#.*)?$'
     _TESTS = [{
         'url': 'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html',
         'md5': '2c2754212136f35fb4b19767d242f66e',
@@ -29,16 +30,28 @@ class SpiegelIE(InfoExtractor):
             'description': 'md5:c2322b65e58f385a820c10fa03b2d088',
             'duration': 983,
         },
+    }, {
+        'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-embed.html',
+        'md5': 'd8eeca6bfc8f1cd6f490eb1f44695d51',
+        'info_dict': {
+            'id': '1519126',
+            'ext': 'mp4',
+            'description': 'SPIEGEL ONLINE-Nutzer durften den deutschen Astronauten Alexander Gerst über sein Leben auf der ISS-Station befragen. Hier kommen seine Antworten auf die besten sechs Fragen.',
+            'title': 'Fragen an Astronaut Alexander Gerst: "Bekommen Sie die Tageszeiten mit?"',
+        }
     }]
 
     def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('videoID')
+        video_id = self._match_id(url)
+        webpage, handle = self._download_webpage_handle(url, video_id)
 
-        webpage = self._download_webpage(url, video_id)
+        # 302 to spiegel.tv, like http://www.spiegel.de/video/der-film-zum-wochenende-die-wahrheit-ueber-maenner-video-99003272.html
+        if SpiegeltvIE.suitable(handle.geturl()):
+            return self.url_result(handle.geturl(), 'Spiegeltv')
 
-        title = self._html_search_regex(
-            r'<div class="module-title">(.*?)</div>', webpage, 'title')
+        title = re.sub(r'\s+', ' ', self._html_search_regex(
+            r'(?s)<(?:h1|div) class="module-title"[^>]*>(.*?)</(?:h1|div)>',
+            webpage, 'title'))
         description = self._html_search_meta('description', webpage, 'description')
 
         base_url = self._search_regex(
@@ -79,7 +92,7 @@ class SpiegelArticleIE(InfoExtractor):
     _VALID_URL = 'https?://www\.spiegel\.de/(?!video/)[^?#]*?-(?P<id>[0-9]+)\.html'
     IE_NAME = 'Spiegel:Article'
     IE_DESC = 'Articles on spiegel.de'
-    _TEST = {
+    _TESTS = [{
         'url': 'http://www.spiegel.de/sport/sonst/badminton-wm-die-randsportart-soll-populaerer-werden-a-987092.html',
         'info_dict': {
             'id': '1516455',
@@ -87,20 +100,34 @@ class SpiegelArticleIE(InfoExtractor):
             'title': 'Faszination Badminton: Nennt es bloß nicht Federball',
             'description': 're:^Patrick Kämnitz gehört.{100,}',
         },
-    }
+    }, {
+        'url': 'http://www.spiegel.de/wissenschaft/weltall/astronaut-alexander-gerst-antwortet-spiegel-online-lesern-a-989876.html',
+        'info_dict': {
 
-    def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('id')
+        },
+        'playlist_count': 6,
+    }]
 
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
+
+        # Single video on top of the page
         video_link = self._search_regex(
             r'<a href="([^"]+)" onclick="return spOpenVideo\(this,', webpage,
-            'video page URL')
-        video_url = compat_urlparse.urljoin(
-            self.http_scheme() + '//spiegel.de/', video_link)
-
-        return {
-            '_type': 'url',
-            'url': video_url,
-        }
+            'video page URL', default=None)
+        if video_link:
+            video_url = compat_urlparse.urljoin(
+                self.http_scheme() + '//spiegel.de/', video_link)
+            return self.url_result(video_url)
+
+        # Multiple embedded videos
+        embeds = re.findall(
+            r'<div class="vid_holder[0-9]+.*?</div>\s*.*?url\s*=\s*"([^"]+)"',
+            webpage)
+        entries = [
+            self.url_result(compat_urlparse.urljoin(
+                self.http_scheme() + '//spiegel.de/', embed_path))
+            for embed_path in embeds
+        ]
+        return self.playlist_result(entries)
index 7f388aced0800ebc1881de06b1ddded61afba926..98cf92d89a1151edfd11b8f15a86eeaa6a83178d 100644 (file)
@@ -1,13 +1,13 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import re
 from .common import InfoExtractor
+from ..utils import float_or_none
 
 
 class SpiegeltvIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?spiegel\.tv/filme/(?P<id>[\-a-z0-9]+)'
-    _TEST = {
+    _VALID_URL = r'https?://(?:www\.)?spiegel\.tv/(?:#/)?filme/(?P<id>[\-a-z0-9]+)'
+    _TESTS = [{
         'url': 'http://www.spiegel.tv/filme/flug-mh370/',
         'info_dict': {
             'id': 'flug-mh370',
@@ -20,12 +20,15 @@ class SpiegeltvIE(InfoExtractor):
             # rtmp download
             'skip_download': True,
         }
-    }
+    }, {
+        'url': 'http://www.spiegel.tv/#/filme/alleskino-die-wahrheit-ueber-maenner/',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        if '/#/' in url:
+            url = url.replace('/#/', '/')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
         title = self._html_search_regex(r'<h1.*?>(.*?)</h1>', webpage, 'title')
 
@@ -61,12 +64,8 @@ class SpiegeltvIE(InfoExtractor):
             })
 
         description = media_json['subtitle']
-        duration = media_json['duration_in_ms'] / 1000.
-
-        if is_wide:
-            format = '16x9'
-        else:
-            format = '4x3'
+        duration = float_or_none(media_json.get('duration_in_ms'), scale=1000)
+        format = '16x9' if is_wide else '4x3'
 
         url = server + 'mp4:' + uuid + '_spiegeltv_0500_' + format + '.m4v'
 
@@ -78,4 +77,4 @@ class SpiegeltvIE(InfoExtractor):
             'description': description,
             'duration': duration,
             'thumbnails': thumbnails
-        }
\ No newline at end of file
+        }
index 3f680bfc6322d4f4190351e5fae9ca7c178accfd..dfe50ed4585b0fe876b8a300edd00a453ae4b690 100644 (file)
@@ -89,4 +89,4 @@ class Sport5IE(InfoExtractor):
             'duration': duration,
             'categories': categories,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 19cc976e3d3c9c7cff875f26e63e25a827fb8073..becdf658f6e0ce8b209dffc0ce4c96a2857099dc 100644 (file)
@@ -7,7 +7,6 @@ from .common import InfoExtractor
 from ..utils import (
     parse_duration,
     parse_iso8601,
-    int_or_none,
 )
 
 
@@ -26,7 +25,6 @@ class SportBoxIE(InfoExtractor):
                 'timestamp': 1411896237,
                 'upload_date': '20140928',
                 'duration': 4846,
-                'view_count': int,
             },
             'params': {
                 # m3u8 download
@@ -65,8 +63,6 @@ class SportBoxIE(InfoExtractor):
             r'<span itemprop="uploadDate">([^<]+)</span>', webpage, 'timestamp', fatal=False))
         duration = parse_duration(self._html_search_regex(
             r'<meta itemprop="duration" content="PT([^"]+)">', webpage, 'duration', fatal=False))
-        view_count = int_or_none(self._html_search_regex(
-            r'<span>Просмотров: (\d+)</span>', player, 'view count', fatal=False))
 
         return {
             'id': video_id,
@@ -76,6 +72,5 @@ class SportBoxIE(InfoExtractor):
             'thumbnail': thumbnail,
             'timestamp': timestamp,
             'duration': duration,
-            'view_count': view_count,
             'formats': formats,
         }
index abb82778325fd74f55a2ae1ce00f8f98316ad0a1..2f57f5b7c76944589b3b8f091849604d9d4e3bf0 100644 (file)
@@ -4,8 +4,10 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_request,
+)
+from ..utils import (
     parse_iso8601,
 )
 
@@ -93,4 +95,3 @@ class SportDeutschlandIE(InfoExtractor):
             'rtmp_live': asset.get('live'),
             'timestamp': parse_iso8601(asset.get('date')),
         }
-
diff --git a/youtube_dl/extractor/srmediathek.py b/youtube_dl/extractor/srmediathek.py
new file mode 100644 (file)
index 0000000..666a7dc
--- /dev/null
@@ -0,0 +1,43 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import json
+
+from .common import InfoExtractor
+from ..utils import js_to_json
+
+
+class SRMediathekIE(InfoExtractor):
+    IE_DESC = 'Süddeutscher Rundfunk'
+    _VALID_URL = r'https?://sr-mediathek\.sr-online\.de/index\.php\?.*?&id=(?P<id>[0-9]+)'
+
+    _TEST = {
+        'url': 'http://sr-mediathek.sr-online.de/index.php?seite=7&id=28455',
+        'info_dict': {
+            'id': '28455',
+            'ext': 'mp4',
+            'title': 'sportarena (26.10.2014)',
+            'description': 'Ringen: KSV Köllerbach gegen Aachen-Walheim; Frauen-Fußball: 1. FC Saarbrücken gegen Sindelfingen; Motorsport: Rallye in Losheim; dazu: Interview mit Timo Bernhard; Turnen: TG Saar; Reitsport: Deutscher Voltigier-Pokal; Badminton: Interview mit Michael Fuchs ',
+            'thumbnail': 're:^https?://.*\.jpg$',
+        },
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        murls = json.loads(js_to_json(self._search_regex(
+            r'var mediaURLs\s*=\s*(.*?);\n', webpage, 'video URLs')))
+        formats = [{'url': murl} for murl in murls]
+        self._sort_formats(formats)
+
+        title = json.loads(js_to_json(self._search_regex(
+            r'var mediaTitles\s*=\s*(.*?);\n', webpage, 'title')))[0]
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'description': self._og_search_description(webpage),
+            'thumbnail': self._og_search_thumbnail(webpage),
+        }
index 44c52c718e2090cca8f28c6542d67ae4585332e6..4a3d8bb8f267b588c59e2f16b208955a70d362d9 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
@@ -9,24 +11,23 @@ from ..utils import (
 
 
 class StanfordOpenClassroomIE(InfoExtractor):
-    IE_NAME = u'stanfordoc'
-    IE_DESC = u'Stanford Open ClassRoom'
-    _VALID_URL = r'^(?:https?://)?openclassroom\.stanford\.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
+    IE_NAME = 'stanfordoc'
+    IE_DESC = 'Stanford Open ClassRoom'
+    _VALID_URL = r'https?://openclassroom\.stanford\.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
     _TEST = {
-        u'url': u'http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100',
-        u'file': u'PracticalUnix_intro-environment.mp4',
-        u'md5': u'544a9468546059d4e80d76265b0443b8',
-        u'info_dict': {
-            u"title": u"Intro Environment"
+        'url': 'http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100',
+        'md5': '544a9468546059d4e80d76265b0443b8',
+        'info_dict': {
+            'id': 'PracticalUnix_intro-environment',
+            'ext': 'mp4',
+            'title': 'Intro Environment',
         }
     }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
 
-        if mobj.group('course') and mobj.group('video'): # A specific video
+        if mobj.group('course') and mobj.group('video'):  # A specific video
             course = mobj.group('course')
             video = mobj.group('video')
             info = {
@@ -35,7 +36,6 @@ class StanfordOpenClassroomIE(InfoExtractor):
                 'upload_date': None,
             }
 
-            self.report_extraction(info['id'])
             baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
             xmlUrl = baseUrl + video + '.xml'
             mdoc = self._download_xml(xmlUrl, info['id'])
@@ -43,63 +43,49 @@ class StanfordOpenClassroomIE(InfoExtractor):
                 info['title'] = mdoc.findall('./title')[0].text
                 info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
             except IndexError:
-                raise ExtractorError(u'Invalid metadata XML file')
-            info['ext'] = info['url'].rpartition('.')[2]
-            return [info]
-        elif mobj.group('course'): # A course page
+                raise ExtractorError('Invalid metadata XML file')
+            return info
+        elif mobj.group('course'):  # A course page
             course = mobj.group('course')
             info = {
                 'id': course,
-                'type': 'playlist',
+                '_type': 'playlist',
                 'uploader': None,
                 'upload_date': None,
             }
 
-            coursepage = self._download_webpage(url, info['id'],
-                                        note='Downloading course info page',
-                                        errnote='Unable to download course info page')
+            coursepage = self._download_webpage(
+                url, info['id'],
+                note='Downloading course info page',
+                errnote='Unable to download course info page')
 
-            info['title'] = self._html_search_regex('<h1>([^<]+)</h1>', coursepage, 'title', default=info['id'])
+            info['title'] = self._html_search_regex(
+                r'<h1>([^<]+)</h1>', coursepage, 'title', default=info['id'])
 
-            info['description'] = self._html_search_regex('<description>([^<]+)</description>',
-                coursepage, u'description', fatal=False)
+            info['description'] = self._html_search_regex(
+                r'(?s)<description>([^<]+)</description>',
+                coursepage, 'description', fatal=False)
 
             links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage))
-            info['list'] = [
-                {
-                    'type': 'reference',
-                    'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage),
-                }
-                    for vpage in links]
-            results = []
-            for entry in info['list']:
-                assert entry['type'] == 'reference'
-                results += self.extract(entry['url'])
-            return results
-        else: # Root page
+            info['entries'] = [self.url_result(
+                'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l)
+            ) for l in links]
+            return info
+        else:  # Root page
             info = {
                 'id': 'Stanford OpenClassroom',
-                'type': 'playlist',
+                '_type': 'playlist',
                 'uploader': None,
                 'upload_date': None,
             }
+            info['title'] = info['id']
 
             rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
             rootpage = self._download_webpage(rootURL, info['id'],
-                errnote=u'Unable to download course info page')
-
-            info['title'] = info['id']
+                                              errnote='Unable to download course info page')
 
             links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage))
-            info['list'] = [
-                {
-                    'type': 'reference',
-                    'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage),
-                }
-                    for cpage in links]
-
-            results = []
-            for entry in info['list']:
-                assert entry['type'] == 'reference'
-                results += self.extract(entry['url'])
-            return results
+            info['entries'] = [self.url_result(
+                'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l)
+            ) for l in links]
+            return info
index 172def221e1277298dc355a2cfdbea3ae4f9fdce..38176498dd05aeea99e45379984f31fdec994d5a 100644 (file)
@@ -5,7 +5,7 @@ import re
 import time
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
 )
@@ -13,7 +13,7 @@ from ..utils import (
 
 class StreamcloudIE(InfoExtractor):
     IE_NAME = 'streamcloud.eu'
-    _VALID_URL = r'https?://streamcloud\.eu/(?P<id>[a-zA-Z0-9_-]+)/(?P<fname>[^#?]*)\.html'
+    _VALID_URL = r'https?://streamcloud\.eu/(?P<id>[a-zA-Z0-9_-]+)(?:/(?P<fname>[^#?]*)\.html)?'
 
     _TEST = {
         'url': 'http://streamcloud.eu/skp9j99s4bpz/youtube-dl_test_video_____________-BaW_jenozKc.mp4.html',
@@ -27,8 +27,8 @@ class StreamcloudIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
+        url = 'http://streamcloud.eu/%s' % video_id
 
         orig_webpage = self._download_webpage(url, video_id)
 
index 73efe95420ff7b83412864de02d8d5601690b537..c3ceb5f76d450001affda86e79466607b677e8f5 100644 (file)
@@ -1,18 +1,14 @@
 # -*- coding: utf-8 -*-
 from __future__ import unicode_literals
 
-import re
-import json
-
 from .common import InfoExtractor
 from ..utils import (
     int_or_none,
-    compat_str,
 )
 
 
 class StreamCZIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?stream\.cz/.+/(?P<videoid>.+)'
+    _VALID_URL = r'https?://(?:www\.)?stream\.cz/.+/(?P<id>[0-9]+)'
 
     _TESTS = [{
         'url': 'http://www.stream.cz/peklonataliri/765767-ecka-pro-deti',
@@ -21,61 +17,63 @@ class StreamCZIE(InfoExtractor):
             'id': '765767',
             'ext': 'mp4',
             'title': 'Peklo na talíři: Éčka pro děti',
-            'description': 'md5:49ace0df986e95e331d0fe239d421519',
-            'thumbnail': 'http://im.stream.cz/episode/52961d7e19d423f8f06f0100',
+            'description': 'Taška s grónskou pomazánkou a další pekelnosti ZDE',
+            'thumbnail': 're:^http://im.stream.cz/episode/52961d7e19d423f8f06f0100',
             'duration': 256,
         },
     }, {
         'url': 'http://www.stream.cz/blanik/10002447-tri-roky-pro-mazanka',
-        'md5': '246272e753e26bbace7fcd9deca0650c',
+        'md5': 'e54a254fb8b871968fd8403255f28589',
         'info_dict': {
             'id': '10002447',
             'ext': 'mp4',
             'title': 'Kancelář Blaník: Tři roky pro Mazánka',
-            'description': 'md5:9177695a8b756a0a8ab160de4043b392',
-            'thumbnail': 'http://im.stream.cz/episode/537f838c50c11f8d21320000',
+            'description': 'md5:3862a00ba7bf0b3e44806b544032c859',
+            'thumbnail': 're:^http://im.stream.cz/episode/537f838c50c11f8d21320000',
             'duration': 368,
         },
     }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('videoid')
-
-        webpage = self._download_webpage(url, video_id)
-
-        data = self._html_search_regex(r'Stream\.Data\.Episode\((.+?)\);', webpage, 'stream data')
-
-        jsonData = json.loads(data)
+        video_id = self._match_id(url)
+        data = self._download_json(
+            'http://www.stream.cz/API/episode/%s' % video_id, video_id)
 
         formats = []
-        for video in jsonData['instances']:
-            for video_format in video['instances']:
-                format_id = video_format['quality']
-
-                if format_id == '240p':
-                    quality = 0
-                elif format_id == '360p':
-                    quality = 1
-                elif format_id == '480p':
-                    quality = 2
-                elif format_id == '720p':
-                    quality = 3
-
+        for quality, video in enumerate(data['video_qualities']):
+            for f in video['formats']:
+                typ = f['type'].partition('/')[2]
+                qlabel = video.get('quality_label')
                 formats.append({
-                    'format_id': '%s-%s' % (video_format['type'].split('/')[1], format_id),
-                    'url': video_format['source'],
+                    'format_note': '%s-%s' % (qlabel, typ) if qlabel else typ,
+                    'format_id': '%s-%s' % (typ, f['quality']),
+                    'url': f['source'],
+                    'height': int_or_none(f['quality'].rstrip('p')),
                     'quality': quality,
                 })
-
         self._sort_formats(formats)
 
+        image = data.get('image')
+        if image:
+            thumbnail = self._proto_relative_url(
+                image.replace('{width}', '1240').replace('{height}', '697'),
+                scheme='http:',
+            )
+        else:
+            thumbnail = None
+
+        stream = data.get('_embedded', {}).get('stream:show', {}).get('name')
+        if stream:
+            title = '%s: %s' % (stream, data['name'])
+        else:
+            title = data['name']
+
         return {
-            'id': compat_str(jsonData['episode_id']),
-            'title': self._og_search_title(webpage),
-            'thumbnail': jsonData['episode_image_original_url'].replace('//', 'http://'),
+            'id': video_id,
+            'title': title,
+            'thumbnail': thumbnail,
             'formats': formats,
-            'description': self._og_search_description(webpage),
-            'duration': int_or_none(jsonData['duration']),
-            'view_count': int_or_none(jsonData['stats_total']),
+            'description': data.get('web_site_text'),
+            'duration': int_or_none(data.get('duration')),
+            'view_count': int_or_none(data.get('views')),
         }
index db33745c14472f7f3e7749978585f0b2b2c53af2..59a51268d25ed4b0718c67ae91a5e8f07fcfe42c 100644 (file)
@@ -1,7 +1,8 @@
+from __future__ import unicode_literals
 from .common import InfoExtractor
 
+from ..compat import compat_str
 from ..utils import (
-    compat_str,
     ExtractorError,
 )
 
@@ -17,10 +18,10 @@ class SubtitlesInfoExtractor(InfoExtractor):
         sub_lang_list = self._get_available_subtitles(video_id, webpage)
         auto_captions_list = self._get_available_automatic_caption(video_id, webpage)
         sub_lang = ",".join(list(sub_lang_list.keys()))
-        self.to_screen(u'%s: Available subtitles for video: %s' %
+        self.to_screen('%s: Available subtitles for video: %s' %
                        (video_id, sub_lang))
         auto_lang = ",".join(auto_captions_list.keys())
-        self.to_screen(u'%s: Available automatic captions for video: %s' %
+        self.to_screen('%s: Available automatic captions for video: %s' %
                        (video_id, auto_lang))
 
     def extract_subtitles(self, video_id, webpage):
@@ -50,8 +51,8 @@ class SubtitlesInfoExtractor(InfoExtractor):
 
             sub_lang_list = {}
             for sub_lang in requested_langs:
-                if not sub_lang in available_subs_list:
-                    self._downloader.report_warning(u'no closed captions found in the specified language "%s"' % sub_lang)
+                if sub_lang not in available_subs_list:
+                    self._downloader.report_warning('no closed captions found in the specified language "%s"' % sub_lang)
                     continue
                 sub_lang_list[sub_lang] = available_subs_list[sub_lang]
 
@@ -70,10 +71,10 @@ class SubtitlesInfoExtractor(InfoExtractor):
         try:
             sub = self._download_subtitle_url(sub_lang, url)
         except ExtractorError as err:
-            self._downloader.report_warning(u'unable to download video subtitles for %s: %s' % (sub_lang, compat_str(err)))
+            self._downloader.report_warning('unable to download video subtitles for %s: %s' % (sub_lang, compat_str(err)))
             return
         if not sub:
-            self._downloader.report_warning(u'Did not fetch video subtitles')
+            self._downloader.report_warning('Did not fetch video subtitles')
             return
         return sub
 
@@ -94,5 +95,5 @@ class SubtitlesInfoExtractor(InfoExtractor):
         Must be redefined by the subclasses that support automatic captions,
         otherwise it will return {}
         """
-        self._downloader.report_warning(u'Automatic Captions not supported by this server')
+        self._downloader.report_warning('Automatic Captions not supported by this server')
         return {}
index 13c6ea67728d040a9e1f17111031952492d921b5..58073eefeffc0f3ebc244a6087cad36662940228 100644 (file)
@@ -80,7 +80,7 @@ class SWRMediathekIE(InfoExtractor):
 
             if media_type == 'Video':
                 fmt.update({
-                    'format_note': ['144p', '288p', '544p'][quality-1],
+                    'format_note': ['144p', '288p', '544p', '720p'][quality - 1],
                     'vcodec': codec,
                 })
             elif media_type == 'Audio':
@@ -101,4 +101,4 @@ class SWRMediathekIE(InfoExtractor):
             'uploader': attr['channel_title'],
             'uploader_id': attr['channel_idkey'],
             'formats': formats,
-        }
\ No newline at end of file
+        }
index f76b6e2b22c7fa391664218f9e59fa62908c4c08..5ca079f880717933a4216de6399046a44970d29b 100644 (file)
@@ -10,7 +10,6 @@ class SyfyIE(InfoExtractor):
 
     _TESTS = [{
         'url': 'http://www.syfy.com/videos/Robot%20Combat%20League/Behind%20the%20Scenes/vid:2631458',
-        'md5': 'e07de1d52c7278adbb9b9b1c93a66849',
         'info_dict': {
             'id': 'NmqMrGnXvmO1',
             'ext': 'flv',
index c9359fafb5c5989923c6320e3e684673b80057d6..aa5964acb6b3f40b0d663bd2169ac6aec0c210ae 100644 (file)
@@ -1,27 +1,24 @@
 # -*- coding: utf-8 -*-
-
-import re
+from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..utils import determine_ext
 
 
 class SztvHuIE(InfoExtractor):
-    _VALID_URL = r'(?:http://)?(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)'
+    _VALID_URL = r'http://(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)'
     _TEST = {
-        u'url': u'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909',
-        u'file': u'20130909.mp4',
-        u'md5': u'a6df607b11fb07d0e9f2ad94613375cb',
-        u'info_dict': {
-            u"title": u"Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren",
-            u"description": u'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...',
+        'url': 'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909',
+        'md5': 'a6df607b11fb07d0e9f2ad94613375cb',
+        'info_dict': {
+            'id': '20130909',
+            'ext': 'mp4',
+            'title': 'Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren',
+            'description': 'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...',
         },
-        u'skip': u'Service temporarily disabled as of 2013-11-20'
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
         video_file = self._search_regex(
             r'file: "...:(.*?)",', webpage, 'video file')
@@ -39,7 +36,6 @@ class SztvHuIE(InfoExtractor):
             'id': video_id,
             'url': video_url,
             'title': title,
-            'ext': determine_ext(video_url),
             'description': description,
             'thumbnail': thumbnail,
         }
index b870474515ba61ee33641c86554d53d68a6bf46d..bfe07b02417a2a44f23a09c10c25d48ec18b5535 100644 (file)
@@ -4,10 +4,11 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..utils import parse_filesize
 
 
 class TagesschauIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/video/video(?P<id>-?[0-9]+)\.html'
+    _VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/(?:sendung/ts|video/video)(?P<id>-?[0-9]+)\.html'
 
     _TESTS = [{
         'url': 'http://www.tagesschau.de/multimedia/video/video1399128.html',
@@ -19,6 +20,16 @@ class TagesschauIE(InfoExtractor):
             'description': 'md5:69da3c61275b426426d711bde96463ab',
             'thumbnail': 're:^http:.*\.jpg$',
         },
+    }, {
+        'url': 'http://www.tagesschau.de/multimedia/sendung/ts-5727.html',
+        'md5': '3c54c1f6243d279b706bde660ceec633',
+        'info_dict': {
+            'id': '5727',
+            'ext': 'mp4',
+            'description': 'md5:695c01bfd98b7e313c501386327aea59',
+            'title': 'Sendung: tagesschau \t04.12.2014 20:00 Uhr',
+            'thumbnail': 're:^http:.*\.jpg$',
+        }
     }]
 
     _FORMATS = {
@@ -28,42 +39,82 @@ class TagesschauIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
-        if video_id.startswith('-'):
-            display_id = video_id.strip('-')
-        else:
-            display_id = video_id
-
+        video_id = self._match_id(url)
+        display_id = video_id.lstrip('-')
         webpage = self._download_webpage(url, display_id)
 
-        playerpage = self._download_webpage(
-            'http://www.tagesschau.de/multimedia/video/video%s~player_autoplay-true.html' % video_id,
-            display_id, 'Downloading player page')
-
-        medias = re.findall(
-            r'"(http://media.+?)", type:"video/(.+?)", quality:"(.+?)"',
-            playerpage)
+        player_url = self._html_search_meta(
+            'twitter:player', webpage, 'player URL', default=None)
+        if player_url:
+            playerpage = self._download_webpage(
+                player_url, display_id, 'Downloading player page')
 
-        formats = []
-        for url, ext, res in medias:
-            f = {
-                'format_id': res + '_' + ext,
-                'url': url,
-                'ext': ext,
-            }
-            f.update(self._FORMATS.get(res, {}))
-            formats.append(f)
+            medias = re.findall(
+                r'"(http://media.+?)", type:"video/(.+?)", quality:"(.+?)"',
+                playerpage)
+            formats = []
+            for url, ext, res in medias:
+                f = {
+                    'format_id': res + '_' + ext,
+                    'url': url,
+                    'ext': ext,
+                }
+                f.update(self._FORMATS.get(res, {}))
+                formats.append(f)
+            thumbnail_fn = re.findall(r'"(/multimedia/.+?\.jpg)"', playerpage)[-1]
+            title = self._og_search_title(webpage).strip()
+            description = self._og_search_description(webpage).strip()
+        else:
+            download_text = self._search_regex(
+                r'(?s)<p>Wir bieten dieses Video in folgenden Formaten zum Download an:</p>\s*<div class="controls">(.*?)</div>\s*<p>',
+                webpage, 'download links')
+            links = re.finditer(
+                r'<div class="button" title="(?P<title>[^"]*)"><a href="(?P<url>[^"]+)">(?P<name>.+?)</a></div>',
+                download_text)
+            formats = []
+            for l in links:
+                format_id = self._search_regex(
+                    r'.*/[^/.]+\.([^/]+)\.[^/.]+', l.group('url'), 'format ID')
+                format = {
+                    'format_id': format_id,
+                    'url': l.group('url'),
+                    'format_name': l.group('name'),
+                }
+                m = re.match(
+                    r'''(?x)
+                        Video:\s*(?P<vcodec>[a-zA-Z0-9/._-]+)\s*&\#10;
+                        (?P<width>[0-9]+)x(?P<height>[0-9]+)px&\#10;
+                        (?P<vbr>[0-9]+)kbps&\#10;
+                        Audio:\s*(?P<abr>[0-9]+)kbps,\s*(?P<audio_desc>[A-Za-z\.0-9]+)&\#10;
+                        Gr&ouml;&szlig;e:\s*(?P<filesize_approx>[0-9.,]+\s+[a-zA-Z]*B)''',
+                    l.group('title'))
+                if m:
+                    format.update({
+                        'format_note': m.group('audio_desc'),
+                        'vcodec': m.group('vcodec'),
+                        'width': int(m.group('width')),
+                        'height': int(m.group('height')),
+                        'abr': int(m.group('abr')),
+                        'vbr': int(m.group('vbr')),
+                        'filesize_approx': parse_filesize(m.group('filesize_approx')),
+                    })
+                formats.append(format)
+            thumbnail_fn = self._search_regex(
+                r'(?s)<img alt="Sendungsbild".*?src="([^"]+)"',
+                webpage, 'thumbnail', fatal=False)
+            description = self._html_search_regex(
+                r'(?s)<p class="teasertext">(.*?)</p>',
+                webpage, 'description', fatal=False)
+            title = self._html_search_regex(
+                r'<span class="headline".*?>(.*?)</span>', webpage, 'title')
 
         self._sort_formats(formats)
-
-        thumbnail = re.findall(r'"(/multimedia/.+?\.jpg)"', playerpage)[-1]
+        thumbnail = 'http://www.tagesschau.de' + thumbnail_fn
 
         return {
             'id': display_id,
-            'title': self._og_search_title(webpage).strip(),
-            'thumbnail': 'http://www.tagesschau.de' + thumbnail,
+            'title': title,
+            'thumbnail': thumbnail,
             'formats': formats,
-            'description': self._og_search_description(webpage).strip(),
+            'description': description,
         }
index 77e0562425f0577869c01cb10e690fa9bef7d230..f1f43d0a7113cbf40e5dfd3ffb71af5e900fab78 100644 (file)
@@ -4,10 +4,12 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import (
+    compat_urllib_request,
+)
 from ..utils import (
-    ExtractorError,
     clean_html,
-    compat_urllib_request,
+    ExtractorError,
     float_or_none,
     parse_iso8601,
 )
@@ -50,6 +52,7 @@ class TapelyIE(InfoExtractor):
         request = compat_urllib_request.Request(playlist_url)
         request.add_header('X-Requested-With', 'XMLHttpRequest')
         request.add_header('Accept', 'application/json')
+        request.add_header('Referer', url)
 
         playlist = self._download_json(request, display_id)
 
diff --git a/youtube_dl/extractor/tass.py b/youtube_dl/extractor/tass.py
new file mode 100644 (file)
index 0000000..c4ef707
--- /dev/null
@@ -0,0 +1,62 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+    js_to_json,
+    qualities,
+)
+
+
+class TassIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:tass\.ru|itar-tass\.com)/[^/]+/(?P<id>\d+)'
+    _TESTS = [
+        {
+            'url': 'http://tass.ru/obschestvo/1586870',
+            'md5': '3b4cdd011bc59174596b6145cda474a4',
+            'info_dict': {
+                'id': '1586870',
+                'ext': 'mp4',
+                'title': 'Посетителям московского зоопарка показали красную панду',
+                'description': 'Приехавшую из Дублина Зейну можно увидеть в павильоне "Кошки тропиков"',
+                'thumbnail': 're:^https?://.*\.jpg$',
+            },
+        },
+        {
+            'url': 'http://itar-tass.com/obschestvo/1600009',
+            'only_matching': True,
+        },
+    ]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, video_id)
+
+        sources = json.loads(js_to_json(self._search_regex(
+            r'(?s)sources\s*:\s*(\[.+?\])', webpage, 'sources')))
+
+        quality = qualities(['sd', 'hd'])
+
+        formats = []
+        for source in sources:
+            video_url = source.get('file')
+            if not video_url or not video_url.startswith('http') or not video_url.endswith('.mp4'):
+                continue
+            label = source.get('label')
+            formats.append({
+                'url': video_url,
+                'format_id': label,
+                'quality': quality(label),
+            })
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': self._og_search_title(webpage),
+            'description': self._og_search_description(webpage),
+            'thumbnail': self._og_search_thumbnail(webpage),
+            'formats': formats,
+        }
index 8a95fd6563999f1f59808e2b2090ede9ee312f7a..6c3445d792206395b7a36d016b8a42ad255ea9cc 100644 (file)
@@ -121,7 +121,7 @@ class TeacherTubeUserIE(InfoExtractor):
         urls = []
         webpage = self._download_webpage(url, user_id)
         urls.extend(re.findall(self._MEDIA_RE, webpage))
-        
+
         pages = re.findall(r'/ajax-user/user-videos/%s\?page=([0-9]+)' % user_id, webpage)[:-1]
         for p in pages:
             more = 'http://www.teachertube.com/ajax-user/user-videos/%s?page=%s' % (user_id, p)
index fa796ce72126610cda53db5378d926b44d72e526..5fa67eb8d4441d62c1591289551171cdbcbcf45b 100644 (file)
@@ -8,24 +8,23 @@ from .common import InfoExtractor
 class TeamcocoIE(InfoExtractor):
     _VALID_URL = r'http://teamcoco\.com/video/(?P<video_id>[0-9]+)?/?(?P<display_id>.*)'
     _TESTS = [
-    {
-        'url': 'http://teamcoco.com/video/80187/conan-becomes-a-mary-kay-beauty-consultant',
-        'file': '80187.mp4',
-        'md5': '3f7746aa0dc86de18df7539903d399ea',
-        'info_dict': {
-            'title': 'Conan Becomes A Mary Kay Beauty Consultant',
-            'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.'
+        {
+            'url': 'http://teamcoco.com/video/80187/conan-becomes-a-mary-kay-beauty-consultant',
+            'file': '80187.mp4',
+            'md5': '3f7746aa0dc86de18df7539903d399ea',
+            'info_dict': {
+                'title': 'Conan Becomes A Mary Kay Beauty Consultant',
+                'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.'
+            }
+        }, {
+            'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush',
+            'file': '19705.mp4',
+            'md5': 'cde9ba0fa3506f5f017ce11ead928f9a',
+            'info_dict': {
+                "description": "Louis C.K. got starstruck by George W. Bush, so what? Part one.",
+                "title": "Louis C.K. Interview Pt. 1 11/3/11"
+            }
         }
-    },
-    {
-        'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush',
-        'file': '19705.mp4',
-        'md5': 'cde9ba0fa3506f5f017ce11ead928f9a',
-        'info_dict': {
-            "description": "Louis C.K. got starstruck by George W. Bush, so what? Part one.",
-            "title": "Louis C.K. Interview Pt. 1 11/3/11"
-        }
-    }
     ]
 
     def _real_extract(self, url):
@@ -33,7 +32,7 @@ class TeamcocoIE(InfoExtractor):
 
         display_id = mobj.group('display_id')
         webpage = self._download_webpage(url, display_id)
-        
+
         video_id = mobj.group("video_id")
         if not video_id:
             video_id = self._html_search_regex(
index d5e28efada55a91a480ce031df0bc2774de2ccc6..944177426d5d719d152d3474f5b059410cb27955 100644 (file)
@@ -5,7 +5,7 @@ import re
 
 from .subtitles import SubtitlesInfoExtractor
 
-from ..utils import (
+from ..compat import (
     compat_str,
 )
 
@@ -33,11 +33,12 @@ class TEDIE(SubtitlesInfoExtractor):
             'ext': 'mp4',
             'title': 'The illusion of consciousness',
             'description': ('Philosopher Dan Dennett makes a compelling '
-                'argument that not only don\'t we understand our own '
-                'consciousness, but that half the time our brains are '
-                'actively fooling us.'),
+                            'argument that not only don\'t we understand our own '
+                            'consciousness, but that half the time our brains are '
+                            'actively fooling us.'),
             'uploader': 'Dan Dennett',
             'width': 854,
+            'duration': 1308,
         }
     }, {
         'url': 'http://www.ted.com/watch/ted-institute/ted-bcg/vishal-sikka-the-beauty-and-power-of-algorithms',
@@ -57,6 +58,7 @@ class TEDIE(SubtitlesInfoExtractor):
             'title': 'Be passionate. Be courageous. Be your best.',
             'uploader': 'Gabby Giffords and Mark Kelly',
             'description': 'md5:5174aed4d0f16021b704120360f72b92',
+            'duration': 1128,
         },
     }, {
         'url': 'http://www.ted.com/playlists/who_are_the_hackers',
@@ -65,6 +67,22 @@ class TEDIE(SubtitlesInfoExtractor):
             'title': 'Who are the hackers?',
         },
         'playlist_mincount': 6,
+    }, {
+        # contains a youtube video
+        'url': 'https://www.ted.com/talks/douglas_adams_parrots_the_universe_and_everything',
+        'add_ie': ['Youtube'],
+        'info_dict': {
+            'id': '_ZG8HBuDjgc',
+            'ext': 'mp4',
+            'title': 'Douglas Adams: Parrots the Universe and Everything',
+            'description': 'md5:01ad1e199c49ac640cb1196c0e9016af',
+            'uploader': 'University of California Television (UCTV)',
+            'uploader_id': 'UCtelevision',
+            'upload_date': '20080522',
+        },
+        'params': {
+            'skip_download': True,
+        },
     }]
 
     _NATIVE_FORMATS = {
@@ -75,7 +93,7 @@ class TEDIE(SubtitlesInfoExtractor):
 
     def _extract_info(self, webpage):
         info_json = self._search_regex(r'q\("\w+.init",({.+})\)</script>',
-            webpage, 'info json')
+                                       webpage, 'info json')
         return json.loads(info_json)
 
     def _real_extract(self, url):
@@ -95,7 +113,7 @@ class TEDIE(SubtitlesInfoExtractor):
         '''Returns the videos of the playlist'''
 
         webpage = self._download_webpage(url, name,
-            'Downloading playlist webpage')
+                                         'Downloading playlist webpage')
         info = self._extract_info(webpage)
         playlist_info = info['playlist']
 
@@ -114,6 +132,13 @@ class TEDIE(SubtitlesInfoExtractor):
 
         talk_info = self._extract_info(webpage)['talks'][0]
 
+        if talk_info.get('external') is not None:
+            self.to_screen('Found video from %s' % talk_info['external']['service'])
+            return {
+                '_type': 'url',
+                'url': talk_info['external']['uri'],
+            }
+
         formats = [{
             'url': format_url,
             'format_id': format_id,
@@ -155,6 +180,7 @@ class TEDIE(SubtitlesInfoExtractor):
             'description': self._og_search_description(webpage),
             'subtitles': video_subtitles,
             'formats': formats,
+            'duration': talk_info.get('duration'),
         }
 
     def _get_available_subtitles(self, video_id, talk_info):
@@ -173,8 +199,9 @@ class TEDIE(SubtitlesInfoExtractor):
         webpage = self._download_webpage(url, name)
 
         config_json = self._html_search_regex(
-            r"data-config='([^']+)", webpage, 'config')
-        config = json.loads(config_json)
+            r'"pages\.jwplayer"\s*,\s*({.+?})\s*\)\s*</script>',
+            webpage, 'config')
+        config = json.loads(config_json)['config']
         video_url = config['video']['url']
         thumbnail = config.get('image', {}).get('url')
 
diff --git a/youtube_dl/extractor/telebruxelles.py b/youtube_dl/extractor/telebruxelles.py
new file mode 100644 (file)
index 0000000..a3d05f9
--- /dev/null
@@ -0,0 +1,60 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class TeleBruxellesIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?telebruxelles\.be/(news|sport|dernier-jt)/?(?P<id>[^/#?]+)'
+    _TESTS = [{
+        'url': 'http://www.telebruxelles.be/news/auditions-devant-parlement-francken-galant-tres-attendus/',
+        'md5': '59439e568c9ee42fb77588b2096b214f',
+        'info_dict': {
+            'id': '11942',
+            'display_id': 'auditions-devant-parlement-francken-galant-tres-attendus',
+            'ext': 'flv',
+            'title': 'Parlement : Francken et Galant répondent aux interpellations de l’opposition',
+            'description': 're:Les auditions des ministres se poursuivent*'
+        },
+        'params': {
+            'skip_download': 'requires rtmpdump'
+        },
+    }, {
+        'url': 'http://www.telebruxelles.be/sport/basket-brussels-bat-mons-80-74/',
+        'md5': '181d3fbdcf20b909309e5aef5c6c6047',
+        'info_dict': {
+            'id': '10091',
+            'display_id': 'basket-brussels-bat-mons-80-74',
+            'ext': 'flv',
+            'title': 'Basket : le Brussels bat Mons 80-74',
+            'description': 're:^Ils l\u2019on fait ! En basket, le B*',
+        },
+        'params': {
+            'skip_download': 'requires rtmpdump'
+        },
+    }]
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url)
+        webpage = self._download_webpage(url, display_id)
+
+        article_id = self._html_search_regex(
+            r"<article id=\"post-(\d+)\"", webpage, 'article ID')
+        title = self._html_search_regex(
+            r'<h1 class=\"entry-title\">(.*?)</h1>', webpage, 'title')
+        description = self._og_search_description(webpage)
+
+        rtmp_url = self._html_search_regex(
+            r"file: \"(rtmp://\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}/vod/mp4:\" \+ \"\w+\" \+ \".mp4)\"",
+            webpage, 'RTMP url')
+        rtmp_url = rtmp_url.replace("\" + \"", "")
+
+        return {
+            'id': article_id,
+            'display_id': display_id,
+            'title': title,
+            'description': description,
+            'url': rtmp_url,
+            'ext': 'flv',
+            'rtmp_live': True  # if rtmpdump is not called with "--live" argument, the download is blocked and can be completed
+        }
diff --git a/youtube_dl/extractor/telecinco.py b/youtube_dl/extractor/telecinco.py
new file mode 100644 (file)
index 0000000..2a2fff5
--- /dev/null
@@ -0,0 +1,19 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .mitele import MiTeleIE
+
+
+class TelecincoIE(MiTeleIE):
+    IE_NAME = 'telecinco.es'
+    _VALID_URL = r'https?://www\.telecinco\.es/[^/]+/[^/]+/[^/]+/(?P<episode>.*?)\.html'
+
+    _TEST = {
+        'url': 'http://www.telecinco.es/robinfood/temporada-01/t01xp14/Bacalao-cocochas-pil-pil_0_1876350223.html',
+        'info_dict': {
+            'id': 'MDSVID20141015_0058',
+            'ext': 'mp4',
+            'title': 'Con Martín Berasategui, hacer un bacalao al ...',
+            'duration': 662,
+        },
+    }
index fdae17b1b817efd2a7666d44cc2cc38de1ccfa22..6e61cc9e2ecf621b19fe13924456970b091bce1c 100644 (file)
@@ -30,7 +30,7 @@ class TF1IE(InfoExtractor):
         embed_url = self._html_search_regex(
             r'"(https://www.wat.tv/embedframe/.*?)"', webpage, 'embed url')
         embed_page = self._download_webpage(embed_url, video_id,
-            'Downloading embed player page')
+                                            'Downloading embed player page')
         wat_id = self._search_regex(r'UVID=(.*?)&', embed_page, 'wat id')
         wat_info = self._download_json(
             'http://www.wat.tv/interface/contentv3/%s' % wat_id, video_id)
diff --git a/youtube_dl/extractor/theonion.py b/youtube_dl/extractor/theonion.py
new file mode 100644 (file)
index 0000000..b65d8e0
--- /dev/null
@@ -0,0 +1,70 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class TheOnionIE(InfoExtractor):
+    _VALID_URL = r'(?x)https?://(?:www\.)?theonion\.com/video/[^,]+,(?P<article_id>[0-9]+)/?'
+    _TEST = {
+        'url': 'http://www.theonion.com/video/man-wearing-mm-jacket-gods-image,36918/',
+        'md5': '19eaa9a39cf9b9804d982e654dc791ee',
+        'info_dict': {
+            'id': '2133',
+            'ext': 'mp4',
+            'title': 'Man Wearing M&M Jacket Apparently Made In God\'s Image',
+            'description': 'md5:cc12448686b5600baae9261d3e180910',
+            'thumbnail': 're:^https?://.*\.jpg\?\d+$',
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        article_id = mobj.group('article_id')
+
+        webpage = self._download_webpage(url, article_id)
+
+        video_id = self._search_regex(
+            r'"videoId":\s(\d+),', webpage, 'video ID')
+        title = self._og_search_title(webpage)
+        description = self._og_search_description(webpage)
+        thumbnail = self._og_search_thumbnail(webpage)
+
+        sources = re.findall(r'<source src="([^"]+)" type="([^"]+)"', webpage)
+        if not sources:
+            raise ExtractorError(
+                'No sources found for video %s' % video_id, expected=True)
+
+        formats = []
+        for src, type_ in sources:
+            if type_ == 'video/mp4':
+                formats.append({
+                    'format_id': 'mp4_sd',
+                    'preference': 1,
+                    'url': src,
+                })
+            elif type_ == 'video/webm':
+                formats.append({
+                    'format_id': 'webm_sd',
+                    'preference': 0,
+                    'url': src,
+                })
+            elif type_ == 'application/x-mpegURL':
+                formats.extend(
+                    self._extract_m3u8_formats(src, video_id, preference=-1))
+            else:
+                self.report_warning(
+                    'Encountered unexpected format: %s' % type_)
+
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'thumbnail': thumbnail,
+            'description': description,
+        }
index 0be793b1c262ed6c951fa6695de3cf22680d5720..af6ef0033af061713d81785734381a6beffc2c6d 100644 (file)
@@ -4,8 +4,11 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_str,
+)
+from ..utils import (
+    determine_ext,
     ExtractorError,
     xpath_with_ns,
 )
@@ -35,9 +38,20 @@ class ThePlatformIE(InfoExtractor):
         },
     }
 
-    def _get_info(self, video_id, smil_url):
-        meta = self._download_xml(smil_url, video_id)
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        if mobj.group('config'):
+            config_url = url + '&form=json'
+            config_url = config_url.replace('swf/', 'config/')
+            config_url = config_url.replace('onsite/', 'onsite/config/')
+            config = self._download_json(config_url, video_id, 'Downloading config')
+            smil_url = config['releaseUrl'] + '&format=SMIL&formats=MPEG4&manifest=f4m'
+        else:
+            smil_url = ('http://link.theplatform.com/s/dJ5BDC/{0}/meta.smil?'
+                        'format=smil&mbr=true'.format(video_id))
 
+        meta = self._download_xml(smil_url, video_id)
         try:
             error_msg = next(
                 n.attrib['abstract']
@@ -89,10 +103,14 @@ class ThePlatformIE(InfoExtractor):
                 for f in switch.findall(_x('smil:video')):
                     attr = f.attrib
                     vbr = int(attr['system-bitrate']) // 1000
+                    ext = determine_ext(attr['src'])
+                    if ext == 'once':
+                        ext = 'mp4'
                     formats.append({
                         'format_id': compat_str(vbr),
                         'url': attr['src'],
                         'vbr': vbr,
+                        'ext': ext,
                     })
             self._sort_formats(formats)
 
@@ -102,19 +120,5 @@ class ThePlatformIE(InfoExtractor):
             'formats': formats,
             'description': info['description'],
             'thumbnail': info['defaultThumbnailUrl'],
-            'duration': info['duration']//1000,
+            'duration': info['duration'] // 1000,
         }
-        
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-        if mobj.group('config'):
-            config_url = url+ '&form=json'
-            config_url = config_url.replace('swf/', 'config/')
-            config_url = config_url.replace('onsite/', 'onsite/config/')
-            config = self._download_json(config_url, video_id, 'Downloading config')
-            smil_url = config['releaseUrl'] + '&format=SMIL&formats=MPEG4&manifest=f4m'
-        else:
-            smil_url = ('http://link.theplatform.com/s/dJ5BDC/{0}/meta.smil?'
-                'format=smil&mbr=true'.format(video_id))
-        return self._get_info(video_id, smil_url)
index bfb9d2fc9f3cef23466806519c6ac0d226efa945..7f323c938762f6ec1337b6dbf6b9c64ec2993dd8 100644 (file)
@@ -1,4 +1,4 @@
-#coding: utf-8
+# coding: utf-8
 from __future__ import unicode_literals
 
 import re
@@ -36,12 +36,12 @@ class ThisAVIE(InfoExtractor):
             r': <a href="http://www.thisav.com/user/[0-9]+/([^"]+)">(?:[^<]+)</a>',
             webpage, 'uploader id', fatal=False)
         ext = determine_ext(video_url)
-        
+
         return {
-            'id':          video_id,
-            'url':         video_url,
-            'uploader':    uploader,
+            'id': video_id,
+            'url': video_url,
+            'uploader': uploader,
             'uploader_id': uploader_id,
-            'title':       title,
-            'ext':         ext,
+            'title': title,
+            'ext': ext,
         }
index a4aa25f661223301b9d16c7ac87b6c502aa0e0ff..4fe89dbe516f8e25eb1f84239bc9cbc9f26bd648 100644 (file)
@@ -26,9 +26,9 @@ class TinyPicIE(InfoExtractor):
         video_id = mobj.group('id')
 
         webpage = self._download_webpage(url, video_id, 'Downloading page')
-        
+
         mobj = re.search(r'(?m)fo\.addVariable\("file",\s"(?P<fileid>[\da-z]+)"\);\n'
-            '\s+fo\.addVariable\("s",\s"(?P<serverid>\d+)"\);', webpage)
+                         '\s+fo\.addVariable\("s",\s"(?P<serverid>\d+)"\);', webpage)
         if mobj is None:
             raise ExtractorError('Video %s does not exist' % video_id, expected=True)
 
@@ -47,4 +47,4 @@ class TinyPicIE(InfoExtractor):
             'url': video_url,
             'thumbnail': thumbnail,
             'title': title
-        }
\ No newline at end of file
+        }
index d848ee1863252dbc155652c997210476c42479e4..9f9e388c50948d658d1022f8514122643b623a03 100644 (file)
@@ -5,7 +5,7 @@ import re
 from .common import InfoExtractor
 from .brightcove import BrightcoveIE
 from .discovery import DiscoveryIE
-from ..utils import compat_urlparse
+from ..compat import compat_urlparse
 
 
 class TlcIE(DiscoveryIE):
@@ -36,9 +36,10 @@ class TlcDeIE(InfoExtractor):
             'ext': 'mp4',
             'title': 'Breaking Amish: Die Welt da draußen',
             'uploader': 'Discovery Networks - Germany',
-            'description': 'Vier Amische und eine Mennonitin wagen in New York'
+            'description': (
+                'Vier Amische und eine Mennonitin wagen in New York'
                 '  den Sprung in ein komplett anderes Leben. Begleitet sie auf'
-                ' ihrem spannenden Weg.',
+                ' ihrem spannenden Weg.'),
         },
     }
 
diff --git a/youtube_dl/extractor/tmz.py b/youtube_dl/extractor/tmz.py
new file mode 100644 (file)
index 0000000..827aa08
--- /dev/null
@@ -0,0 +1,32 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class TMZIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?tmz\.com/videos/(?P<id>[^/]+)/?'
+    _TEST = {
+        'url': 'http://www.tmz.com/videos/0_okj015ty/',
+        'md5': '791204e3bf790b1426cb2db0706184c0',
+        'info_dict': {
+            'id': '0_okj015ty',
+            'url': 'http://tmz.vo.llnwd.net/o28/2014-03/13/0_okj015ty_0_rt8ro3si_2.mp4',
+            'ext': 'mp4',
+            'title': 'Kim Kardashian\'s Boobs Unlock a Mystery!',
+            'description': 'Did Kim Kardasain try to one-up Khloe by one-upping Kylie???  Or is she just showing off her amazing boobs?',
+            'thumbnail': 'http://cdnbakmi.kaltura.com/p/591531/sp/59153100/thumbnail/entry_id/0_okj015ty/version/100002/acv/182/width/640',
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        return {
+            'id': video_id,
+            'url': self._html_search_meta('VideoURL', webpage, fatal=True),
+            'title': self._og_search_title(webpage),
+            'description': self._og_search_description(webpage),
+            'thumbnail': self._html_search_meta('ThumbURL', webpage),
+        }
index 4956f857766eb6ea24638355c327742556f0e55f..0ecd695f85e6a40929501fb6adc40bace8784d42 100644 (file)
@@ -71,7 +71,7 @@ class TNAFlixIE(InfoExtractor):
                 fmt['height'] = int(m.group(1))
             formats.append(fmt)
         self._sort_formats(formats)
-        
+
         return {
             'id': video_id,
             'display_id': display_id,
index 11407428b56cb779ee9411ee1ccc7736fa2fb718..1c53a3fd09459f31fdf188dc852141ef000af4a6 100644 (file)
@@ -25,7 +25,7 @@ class TrailerAddictIE(InfoExtractor):
         webpage = self._download_webpage(url, name)
 
         title = self._search_regex(r'<title>(.+?)</title>',
-                webpage, 'video title').replace(' - Trailer Addict','')
+                                   webpage, 'video title').replace(' - Trailer Addict', '')
         view_count_str = self._search_regex(
             r'<span class="views_n">([0-9,.]+)</span>',
             webpage, 'view count', fatal=False)
@@ -43,12 +43,12 @@ class TrailerAddictIE(InfoExtractor):
             fvar = "fvar"
 
         info_url = "http://www.traileraddict.com/%s.php?tid=%s" % (fvar, str(video_id))
-        info_webpage = self._download_webpage(info_url, video_id , "Downloading the info webpage")
+        info_webpage = self._download_webpage(info_url, video_id, "Downloading the info webpage")
 
         final_url = self._search_regex(r'&fileurl=(.+)',
-                info_webpage, 'Download url').replace('%3F','?')
+                                       info_webpage, 'Download url').replace('%3F', '?')
         thumbnail_url = self._search_regex(r'&image=(.+?)&',
-                info_webpage, 'thumbnail url')
+                                           info_webpage, 'thumbnail url')
 
         description = self._html_search_regex(
             r'(?s)<div class="synopsis">.*?<div class="movie_label_info"[^>]*>(.*?)</div>',
index d64aaa41f690956b08211ed4fe07e1bc27267641..220a05b7b493fb728f3cd3c6dab74208a8f587eb 100644 (file)
@@ -1,28 +1,28 @@
+from __future__ import unicode_literals
+
 import json
-import re
 
 from .common import InfoExtractor
 
 
 class TriluliluIE(InfoExtractor):
-    _VALID_URL = r'(?x)(?:https?://)?(?:www\.)?trilulilu\.ro/video-(?P<category>[^/]+)/(?P<video_id>[^/]+)'
+    _VALID_URL = r'https?://(?:www\.)?trilulilu\.ro/video-[^/]+/(?P<id>[^/]+)'
     _TEST = {
-        u"url": u"http://www.trilulilu.ro/video-animatie/big-buck-bunny-1",
-        u'file': u"big-buck-bunny-1.mp4",
-        u'info_dict': {
-            u"title": u"Big Buck Bunny",
-            u"description": u":) pentru copilul din noi",
+        'url': 'http://www.trilulilu.ro/video-animatie/big-buck-bunny-1',
+        'info_dict': {
+            'id': 'big-buck-bunny-1',
+            'ext': 'mp4',
+            'title': 'Big Buck Bunny',
+            'description': ':) pentru copilul din noi',
         },
         # Server ignores Range headers (--test)
-        u"params": {
-            u"skip_download": True
+        'params': {
+            'skip_download': True
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('video_id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         title = self._og_search_title(webpage)
@@ -30,20 +30,20 @@ class TriluliluIE(InfoExtractor):
         description = self._og_search_description(webpage)
 
         log_str = self._search_regex(
-            r'block_flash_vars[ ]=[ ]({[^}]+})', webpage, u'log info')
+            r'block_flash_vars[ ]=[ ]({[^}]+})', webpage, 'log info')
         log = json.loads(log_str)
 
-        format_url = (u'http://fs%(server)s.trilulilu.ro/%(hash)s/'
-                      u'video-formats2' % log)
+        format_url = ('http://fs%(server)s.trilulilu.ro/%(hash)s/'
+                      'video-formats2' % log)
         format_doc = self._download_xml(
             format_url, video_id,
-            note=u'Downloading formats',
-            errnote=u'Error while downloading formats')
+            note='Downloading formats',
+            errnote='Error while downloading formats')
+
         video_url_template = (
-            u'http://fs%(server)s.trilulilu.ro/stream.php?type=video'
-            u'&source=site&hash=%(hash)s&username=%(userid)s&'
-            u'key=ministhebest&format=%%s&sig=&exp=' %
+            'http://fs%(server)s.trilulilu.ro/stream.php?type=video'
+            '&source=site&hash=%(hash)s&username=%(userid)s&'
+            'key=ministhebest&format=%%s&sig=&exp=' %
             log)
         formats = [
             {
@@ -63,4 +63,3 @@ class TriluliluIE(InfoExtractor):
             'description': description,
             'thumbnail': thumbnail,
         }
-
index 57f9566832401ff8eb2d1a90aaf91d3e68cdf873..e7b79243a8fb9f091087f5c452c8192c49c81af2 100644 (file)
@@ -1,13 +1,12 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
+from ..utils import xpath_text
 
 
 class TruTubeIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?trutube\.tv/video/(?P<id>[0-9]+)/.*'
-    _TEST = {
+    _VALID_URL = r'https?://(?:www\.)?trutube\.tv/(?:video/|nuevo/player/embed\.php\?v=)(?P<id>[0-9]+)'
+    _TESTS = [{
         'url': 'http://trutube.tv/video/14880/Ramses-II-Proven-To-Be-A-Red-Headed-Caucasoid-',
         'md5': 'c5b6e301b0a2040b074746cbeaa26ca1',
         'info_dict': {
@@ -16,29 +15,26 @@ class TruTubeIE(InfoExtractor):
             'title': 'Ramses II - Proven To Be A Red Headed Caucasoid',
             'thumbnail': 're:^http:.*\.jpg$',
         }
-    }
+    }, {
+        'url': 'https://trutube.tv/nuevo/player/embed.php?v=14880',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
-        webpage = self._download_webpage(url, video_id)
-        video_title = self._og_search_title(webpage).strip()
-        thumbnail = self._search_regex(
-            r"var splash_img = '([^']+)';", webpage, 'thumbnail', fatal=False)
+        config = self._download_xml(
+            'https://trutube.tv/nuevo/player/config.php?v=%s' % video_id,
+            video_id, transform_source=lambda s: s.strip())
 
-        all_formats = re.finditer(
-            r"var (?P<key>[a-z]+)_video_file\s*=\s*'(?P<url>[^']+)';", webpage)
-        formats = [{
-            'format_id': m.group('key'),
-            'quality': -i,
-            'url': m.group('url'),
-        } for i, m in enumerate(all_formats)]
-        self._sort_formats(formats)
+        # filehd is always 404
+        video_url = xpath_text(config, './file', 'video URL', fatal=True)
+        title = xpath_text(config, './title', 'title').strip()
+        thumbnail = xpath_text(config, './image', ' thumbnail')
 
         return {
             'id': video_id,
-            'title': video_title,
-            'formats': formats,
+            'url': video_url,
+            'title': title,
             'thumbnail': thumbnail,
         }
index 64a1e903022a78fa3a2b15eeff5eed20afce568d..d73ad3762a1b455cfd4bc384c27e2dd85e776dde 100644 (file)
@@ -4,9 +4,11 @@ import json
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse_urlparse,
     compat_urllib_request,
+)
+from ..utils import (
     int_or_none,
     str_to_int,
 )
index dcd823d0838dca23b27298cbf05ad47cc4261637..161e47624b383dfe76ea1e2ea6ec73395a97db53 100644 (file)
@@ -36,12 +36,12 @@ class TudouIE(InfoExtractor):
         'skip': 'Only works from China'
     }]
 
-    def _url_for_id(self, id, quality = None):
-        info_url = "http://v2.tudou.com/f?id="+str(id)
+    def _url_for_id(self, id, quality=None):
+        info_url = "http://v2.tudou.com/f?id=" + str(id)
         if quality:
             info_url += '&hd' + quality
         webpage = self._download_webpage(info_url, id, "Opening the info webpage")
-        final_url = self._html_search_regex('>(.+?)</f>',webpage, 'video url')
+        final_url = self._html_search_regex('>(.+?)</f>', webpage, 'video url')
         return final_url
 
     def _real_extract(self, url):
@@ -73,7 +73,7 @@ class TudouIE(InfoExtractor):
         result = []
         len_parts = len(parts)
         if len_parts > 1:
-            self.to_screen(u'%s: found %s parts' % (video_id, len_parts))
+            self.to_screen('%s: found %s parts' % (video_id, len_parts))
         for part in parts:
             part_id = part['k']
             final_url = self._url_for_id(part_id, quality)
index 306fe89741cce8b3c281c94349be266f221028b3..2a1ae5a717cf7b2af16bf5a1ce3ef7494e28a7a6 100644 (file)
@@ -4,9 +4,6 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
-)
 
 
 class TumblrIE(InfoExtractor):
@@ -18,7 +15,7 @@ class TumblrIE(InfoExtractor):
             'id': '54196191430',
             'ext': 'mp4',
             'title': 'tatiana maslany news, Orphan Black || DVD extra - behind the scenes ↳...',
-            'description': 'md5:dfac39636969fe6bf1caa2d50405f069',
+            'description': 'md5:37db8211e40b50c7c44e95da14f630b7',
             'thumbnail': 're:http://.*\.jpg',
         }
     }, {
@@ -27,7 +24,7 @@ class TumblrIE(InfoExtractor):
         'info_dict': {
             'id': '90208453769',
             'ext': 'mp4',
-            'title': '5SOS STRUM ;)',
+            'title': '5SOS STRUM ;]',
             'description': 'md5:dba62ac8639482759c8eb10ce474586a',
             'thumbnail': 're:http://.*\.jpg',
         }
@@ -41,18 +38,12 @@ class TumblrIE(InfoExtractor):
         url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
         webpage = self._download_webpage(url, video_id)
 
-        re_video = r'src=\\x22(?P<video_url>http://%s\.tumblr\.com/video_file/%s/(.*?))\\x22 type=\\x22video/(?P<ext>.*?)\\x22' % (blog, video_id)
-        video = re.search(re_video, webpage)
-        if video is None:
-            raise ExtractorError('Unable to extract video')
-        video_url = video.group('video_url')
-        ext = video.group('ext')
-
-        video_thumbnail = self._search_regex(
-            r'posters.*?\[\\x22(.*?)\\x22',
-            webpage, 'thumbnail', fatal=False)  # We pick the first poster
-        if video_thumbnail:
-            video_thumbnail = video_thumbnail.replace('\\\\/', '/')
+        iframe_url = self._search_regex(
+            r'src=\'(https?://www\.tumblr\.com/video/[^\']+)\'',
+            webpage, 'iframe url')
+        iframe = self._download_webpage(iframe_url, video_id)
+        video_url = self._search_regex(r'<source src="([^"]+)"',
+                                       iframe, 'video url')
 
         # The only place where you can get a title, it's not complete,
         # but searching in other places doesn't work for all videos
@@ -62,9 +53,9 @@ class TumblrIE(InfoExtractor):
 
         return {
             'id': video_id,
-             'url': video_url,
-             'title': video_title,
-             'description': self._html_search_meta('description', webpage),
-             'thumbnail': video_thumbnail,
-             'ext': ext,
+            'url': video_url,
+            'ext': 'mp4',
+            'title': video_title,
+            'description': self._og_search_description(webpage),
+            'thumbnail': self._og_search_thumbnail(webpage),
         }
diff --git a/youtube_dl/extractor/tunein.py b/youtube_dl/extractor/tunein.py
new file mode 100644 (file)
index 0000000..4ce5aee
--- /dev/null
@@ -0,0 +1,99 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class TuneInIE(InfoExtractor):
+    _VALID_URL = r'''(?x)https?://(?:www\.)?
+    (?:
+        tunein\.com/
+        (?:
+            radio/.*?-s|
+            station/.*?StationId\=
+        )(?P<id>[0-9]+)
+        |tun\.in/(?P<redirect_id>[A-Za-z0-9]+)
+    )
+    '''
+    _API_URL_TEMPLATE = 'http://tunein.com/tuner/tune/?stationId={0:}&tuneType=Station'
+
+    _INFO_DICT = {
+        'id': '34682',
+        'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2',
+        'ext': 'AAC',
+        'thumbnail': 're:^https?://.*\.png$',
+        'location': 'Tacoma, WA',
+    }
+    _TESTS = [
+        {
+            'url': 'http://tunein.com/radio/Jazz24-885-s34682/',
+            'info_dict': _INFO_DICT,
+            'params': {
+                'skip_download': True,  # live stream
+            },
+        },
+        {  # test redirection
+            'url': 'http://tun.in/ser7s',
+            'info_dict': _INFO_DICT,
+            'params': {
+                'skip_download': True,  # live stream
+            },
+        },
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        redirect_id = mobj.group('redirect_id')
+        if redirect_id:
+            # The server doesn't support HEAD requests
+            urlh = self._request_webpage(
+                url, redirect_id, note='Downloading redirect page')
+            url = urlh.geturl()
+            self.to_screen('Following redirect: %s' % url)
+            mobj = re.match(self._VALID_URL, url)
+        station_id = mobj.group('id')
+
+        station_info = self._download_json(
+            self._API_URL_TEMPLATE.format(station_id),
+            station_id, note='Downloading station JSON')
+
+        title = station_info['Title']
+        thumbnail = station_info.get('Logo')
+        location = station_info.get('Location')
+        streams_url = station_info.get('StreamUrl')
+        if not streams_url:
+            raise ExtractorError('No downloadable streams found',
+                                 expected=True)
+        stream_data = self._download_webpage(
+            streams_url, station_id, note='Downloading stream data')
+        streams = json.loads(self._search_regex(
+            r'\((.*)\);', stream_data, 'stream info'))['Streams']
+
+        is_live = None
+        formats = []
+        for stream in streams:
+            if stream.get('Type') == 'Live':
+                is_live = True
+            formats.append({
+                'abr': stream.get('Bandwidth'),
+                'ext': stream.get('MediaType'),
+                'acodec': stream.get('MediaType'),
+                'vcodec': 'none',
+                'url': stream.get('Url'),
+                # Sometimes streams with the highest quality do not exist
+                'preference': stream.get('Reliability'),
+            })
+        self._sort_formats(formats)
+
+        return {
+            'id': station_id,
+            'title': title,
+            'formats': formats,
+            'thumbnail': thumbnail,
+            'location': location,
+            'is_live': is_live,
+        }
index d516b6427bd271fa8f7e1129cdbbcd9dda692ae1..4de0aac523313eced334aab38a9a20c7bf08dfc7 100644 (file)
@@ -1,10 +1,9 @@
 from __future__ import unicode_literals
 
 import base64
-import re
 
 from .common import InfoExtractor
-from ..utils import compat_parse_qs
+from ..compat import compat_parse_qs
 
 
 class TutvIE(InfoExtractor):
@@ -20,10 +19,9 @@ class TutvIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
+
         internal_id = self._search_regex(r'codVideo=([0-9]+)', webpage, 'internal video ID')
 
         data_content = self._download_webpage(
index 27962b5fe146dd16e85f46e341725b4e30bf24e1..ba65996dc01646e019cfd5820aa36c1934365d9b 100644 (file)
@@ -1,32 +1,30 @@
 # encoding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 from ..utils import (
     float_or_none,
-    str_to_int,
+    parse_age_limit,
 )
 
 
 class TvigleIE(InfoExtractor):
     IE_NAME = 'tvigle'
     IE_DESC = 'Интернет-телевидение Tvigle.ru'
-    _VALID_URL = r'http://(?:www\.)?tvigle\.ru/(?:[^/]+/)+(?P<display_id>[^/]+)/$'
+    _VALID_URL = r'http://(?:www\.)?tvigle\.ru/(?:[^/]+/)+(?P<id>[^/]+)/$'
 
     _TESTS = [
         {
-            'url': 'http://www.tvigle.ru/video/brat/',
-            'md5': 'ff4344a4894b0524441fb6f8218dc716',
+            'url': 'http://www.tvigle.ru/video/sokrat/',
+            'md5': '36514aed3657d4f70b4b2cef8eb520cd',
             'info_dict': {
-                'id': '5118490',
-                'display_id': 'brat',
-                'ext': 'mp4',
-                'title': 'Ð\91рат',
-                'description': 'md5:d16ac7c0b47052ea51fddb92c4e413eb',
-                'duration': 5722.6,
-                'age_limit': 16,
+                'id': '1848932',
+                'display_id': 'sokrat',
+                'ext': 'flv',
+                'title': 'Сократ',
+                'description': 'md5:a05bd01be310074d5833efc6743be95e',
+                'duration': 6586,
+                'age_limit': 0,
             },
         },
         {
@@ -44,8 +42,7 @@ class TvigleIE(InfoExtractor):
     ]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        display_id = mobj.group('display_id')
+        display_id = self._match_id(url)
 
         webpage = self._download_webpage(url, display_id)
 
@@ -60,8 +57,8 @@ class TvigleIE(InfoExtractor):
         title = item['title']
         description = item['description']
         thumbnail = item['thumbnail']
-        duration = float_or_none(item['durationMilliseconds'], 1000)
-        age_limit = str_to_int(item['ageRestrictions'])
+        duration = float_or_none(item.get('durationMilliseconds'), 1000)
+        age_limit = parse_age_limit(item.get('ageRestrictions'))
 
         formats = []
         for vcodec, fmts in item['videos'].items():
@@ -84,4 +81,4 @@ class TvigleIE(InfoExtractor):
             'duration': duration,
             'age_limit': age_limit,
             'formats': formats,
-        }
\ No newline at end of file
+        }
index bfed9dd042bf3e170f1af394954d64dbdadea0ac..a645800057fc6dc88850885cba7737243c95574e 100644 (file)
@@ -1,40 +1,35 @@
-import json
-import re
+from __future__ import unicode_literals
 
 from .common import InfoExtractor
 
 
 class TvpIE(InfoExtractor):
-    IE_NAME = u'tvp.pl'
+    IE_NAME = 'tvp.pl'
     _VALID_URL = r'https?://www\.tvp\.pl/.*?wideo/(?P<date>\d+)/(?P<id>\d+)'
 
     _TEST = {
-        u'url': u'http://www.tvp.pl/warszawa/magazyny/campusnews/wideo/31102013/12878238',
-        u'md5': u'148408967a6a468953c0a75cbdaf0d7a',
-        u'file': u'12878238.wmv',
-        u'info_dict': {
-            u'title': u'31.10.2013 - Odcinek 2',
-            u'description': u'31.10.2013 - Odcinek 2',
+        'url': 'http://www.tvp.pl/warszawa/magazyny/campusnews/wideo/31102013/12878238',
+        'md5': '148408967a6a468953c0a75cbdaf0d7a',
+        'info_dict': {
+            'id': '12878238',
+            'ext': 'wmv',
+            'title': '31.10.2013 - Odcinek 2',
+            'description': '31.10.2013 - Odcinek 2',
         },
-        u'skip': u'Download has to use same server IP as extraction. Therefore, a good (load-balancing) DNS resolver will make the download fail.'
+        'skip': 'Download has to use same server IP as extraction. Therefore, a good (load-balancing) DNS resolver will make the download fail.'
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
         json_url = 'http://www.tvp.pl/pub/stat/videofileinfo?video_id=%s' % video_id
-        json_params = self._download_webpage(
-            json_url, video_id, u"Downloading video metadata")
-
-        params = json.loads(json_params)
-        self.report_extraction(video_id)
+        params = self._download_json(
+            json_url, video_id, "Downloading video metadata")
         video_url = params['video_url']
 
-        title = self._og_search_title(webpage, fatal=True)
         return {
             'id': video_id,
-            'title': title,
+            'title': self._og_search_title(webpage),
             'ext': 'wmv',
             'url': video_url,
             'description': self._og_search_description(webpage),
index 445e0ec419ccc7eb2e23e522f6f3eba6010dcd69..9a53a3c74143d72a14842ea70ce4063a8d28a30c 100644 (file)
@@ -4,9 +4,8 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import compat_str
 from ..utils import (
-    ExtractorError,
-    compat_str,
     parse_iso8601,
     qualities,
 )
@@ -176,15 +175,14 @@ class TVPlayIE(InfoExtractor):
     ]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         video = self._download_json(
             'http://playapi.mtgx.tv/v1/videos/%s' % video_id, video_id, 'Downloading video JSON')
 
         if video['is_geo_blocked']:
-            raise ExtractorError(
-                'This content is not available in your country due to copyright reasons', expected=True)
+            self.report_warning(
+                'This content might not be available in your country due to copyright reasons')
 
         streams = self._download_json(
             'http://playapi.mtgx.tv/v1/videos/stream/%s' % video_id, video_id, 'Downloading streams JSON')
@@ -208,6 +206,10 @@ class TVPlayIE(InfoExtractor):
                     'app': m.group('app'),
                     'play_path': m.group('playpath'),
                 })
+            elif video_url.endswith('.f4m'):
+                formats.extend(self._extract_f4m_formats(
+                    video_url + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81', video_id))
+                continue
             else:
                 fmt.update({
                     'url': video_url,
diff --git a/youtube_dl/extractor/twentyfourvideo.py b/youtube_dl/extractor/twentyfourvideo.py
new file mode 100644 (file)
index 0000000..67e8bfe
--- /dev/null
@@ -0,0 +1,109 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_iso8601,
+    int_or_none,
+)
+
+
+class TwentyFourVideoIE(InfoExtractor):
+    IE_NAME = '24video'
+    _VALID_URL = r'https?://(?:www\.)?24video\.net/(?:video/(?:view|xml)/|player/new24_play\.swf\?id=)(?P<id>\d+)'
+
+    _TESTS = [
+        {
+            'url': 'http://www.24video.net/video/view/1044982',
+            'md5': '48dd7646775690a80447a8dca6a2df76',
+            'info_dict': {
+                'id': '1044982',
+                'ext': 'mp4',
+                'title': 'Эротика каменного века',
+                'description': 'Как смотрели порно в каменном веке.',
+                'thumbnail': 're:^https?://.*\.jpg$',
+                'uploader': 'SUPERTELO',
+                'duration': 31,
+                'timestamp': 1275937857,
+                'upload_date': '20100607',
+                'age_limit': 18,
+                'like_count': int,
+                'dislike_count': int,
+            },
+        },
+        {
+            'url': 'http://www.24video.net/player/new24_play.swf?id=1044982',
+            'only_matching': True,
+        }
+    ]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        webpage = self._download_webpage(
+            'http://www.24video.net/video/view/%s' % video_id, video_id)
+
+        title = self._og_search_title(webpage)
+        description = self._html_search_regex(
+            r'<span itemprop="description">([^<]+)</span>', webpage, 'description', fatal=False)
+        thumbnail = self._og_search_thumbnail(webpage)
+        duration = int_or_none(self._og_search_property(
+            'duration', webpage, 'duration', fatal=False))
+        timestamp = parse_iso8601(self._search_regex(
+            r'<time id="video-timeago" datetime="([^"]+)" itemprop="uploadDate">',
+            webpage, 'upload date'))
+
+        uploader = self._html_search_regex(
+            r'Загрузил\s*<a href="/jsecUser/movies/[^"]+" class="link">([^<]+)</a>',
+            webpage, 'uploader', fatal=False)
+
+        view_count = int_or_none(self._html_search_regex(
+            r'<span class="video-views">(\d+) просмотр',
+            webpage, 'view count', fatal=False))
+        comment_count = int_or_none(self._html_search_regex(
+            r'<div class="comments-title" id="comments-count">(\d+) комментари',
+            webpage, 'comment count', fatal=False))
+
+        formats = []
+
+        pc_video = self._download_xml(
+            'http://www.24video.net/video/xml/%s?mode=play' % video_id,
+            video_id, 'Downloading PC video URL').find('.//video')
+
+        formats.append({
+            'url': pc_video.attrib['url'],
+            'format_id': 'pc',
+            'quality': 1,
+        })
+
+        like_count = int_or_none(pc_video.get('ratingPlus'))
+        dislike_count = int_or_none(pc_video.get('ratingMinus'))
+        age_limit = 18 if pc_video.get('adult') == 'true' else 0
+
+        mobile_video = self._download_xml(
+            'http://www.24video.net/video/xml/%s' % video_id,
+            video_id, 'Downloading mobile video URL').find('.//video')
+
+        formats.append({
+            'url': mobile_video.attrib['url'],
+            'format_id': 'mobile',
+            'quality': 0,
+        })
+
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'uploader': uploader,
+            'duration': duration,
+            'timestamp': timestamp,
+            'view_count': view_count,
+            'comment_count': comment_count,
+            'like_count': like_count,
+            'dislike_count': dislike_count,
+            'age_limit': age_limit,
+            'formats': formats,
+        }
diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py
new file mode 100644 (file)
index 0000000..715f993
--- /dev/null
@@ -0,0 +1,231 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import itertools
+import re
+
+from .common import InfoExtractor
+from ..compat import (
+    compat_urllib_parse,
+    compat_urllib_request,
+)
+from ..utils import (
+    ExtractorError,
+    parse_iso8601,
+)
+
+
+class TwitchIE(InfoExtractor):
+    # TODO: One broadcast may be split into multiple videos. The key
+    # 'broadcast_id' is the same for all parts, and 'broadcast_part'
+    # starts at 1 and increases. Can we treat all parts as one video?
+    _VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?twitch\.tv/
+        (?:
+            (?P<channelid>[^/]+)|
+            (?:(?:[^/]+)/b/(?P<videoid>[^/]+))|
+            (?:(?:[^/]+)/c/(?P<chapterid>[^/]+))
+        )
+        /?(?:\#.*)?$
+        """
+    _PAGE_LIMIT = 100
+    _API_BASE = 'https://api.twitch.tv'
+    _LOGIN_URL = 'https://secure.twitch.tv/user/login'
+    _TESTS = [{
+        'url': 'http://www.twitch.tv/riotgames/b/577357806',
+        'info_dict': {
+            'id': 'a577357806',
+            'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
+        },
+        'playlist_mincount': 12,
+    }, {
+        'url': 'http://www.twitch.tv/acracingleague/c/5285812',
+        'info_dict': {
+            'id': 'c5285812',
+            'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
+        },
+        'playlist_mincount': 3,
+    }, {
+        'url': 'http://www.twitch.tv/vanillatv',
+        'info_dict': {
+            'id': 'vanillatv',
+            'title': 'VanillaTV',
+        },
+        'playlist_mincount': 412,
+    }]
+
+    def _handle_error(self, response):
+        if not isinstance(response, dict):
+            return
+        error = response.get('error')
+        if error:
+            raise ExtractorError(
+                '%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
+                expected=True)
+
+    def _download_json(self, url, video_id, note='Downloading JSON metadata'):
+        response = super(TwitchIE, self)._download_json(url, video_id, note)
+        self._handle_error(response)
+        return response
+
+    def _extract_media(self, item, item_id):
+        ITEMS = {
+            'a': 'video',
+            'c': 'chapter',
+        }
+        info = self._extract_info(self._download_json(
+            '%s/kraken/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
+            'Downloading %s info JSON' % ITEMS[item]))
+        response = self._download_json(
+            '%s/api/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
+            'Downloading %s playlist JSON' % ITEMS[item])
+        entries = []
+        chunks = response['chunks']
+        qualities = list(chunks.keys())
+        for num, fragment in enumerate(zip(*chunks.values()), start=1):
+            formats = []
+            for fmt_num, fragment_fmt in enumerate(fragment):
+                format_id = qualities[fmt_num]
+                fmt = {
+                    'url': fragment_fmt['url'],
+                    'format_id': format_id,
+                    'quality': 1 if format_id == 'live' else 0,
+                }
+                m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
+                if m:
+                    fmt['height'] = int(m.group('height'))
+                formats.append(fmt)
+            self._sort_formats(formats)
+            entry = dict(info)
+            entry['id'] = '%s_%d' % (entry['id'], num)
+            entry['title'] = '%s part %d' % (entry['title'], num)
+            entry['formats'] = formats
+            entries.append(entry)
+        return self.playlist_result(entries, info['id'], info['title'])
+
+    def _extract_info(self, info):
+        return {
+            'id': info['_id'],
+            'title': info['title'],
+            'description': info['description'],
+            'duration': info['length'],
+            'thumbnail': info['preview'],
+            'uploader': info['channel']['display_name'],
+            'uploader_id': info['channel']['name'],
+            'timestamp': parse_iso8601(info['recorded_at']),
+            'view_count': info['views'],
+        }
+
+    def _real_initialize(self):
+        self._login()
+
+    def _login(self):
+        (username, password) = self._get_login_info()
+        if username is None:
+            return
+
+        login_page = self._download_webpage(
+            self._LOGIN_URL, None, 'Downloading login page')
+
+        authenticity_token = self._search_regex(
+            r'<input name="authenticity_token" type="hidden" value="([^"]+)"',
+            login_page, 'authenticity token')
+
+        login_form = {
+            'utf8': '✓'.encode('utf-8'),
+            'authenticity_token': authenticity_token,
+            'redirect_on_login': '',
+            'embed_form': 'false',
+            'mp_source_action': '',
+            'follow': '',
+            'user[login]': username,
+            'user[password]': password,
+        }
+
+        request = compat_urllib_request.Request(
+            self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+        request.add_header('Referer', self._LOGIN_URL)
+        response = self._download_webpage(
+            request, None, 'Logging in as %s' % username)
+
+        m = re.search(
+            r"id=([\"'])login_error_message\1[^>]*>(?P<msg>[^<]+)", response)
+        if m:
+            raise ExtractorError(
+                'Unable to login: %s' % m.group('msg').strip(), expected=True)
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj.group('chapterid'):
+            return self._extract_media('c', mobj.group('chapterid'))
+
+            """
+            webpage = self._download_webpage(url, chapter_id)
+            m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
+            if not m:
+                raise ExtractorError('Cannot find archive of a chapter')
+            archive_id = m.group(1)
+
+            api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
+            doc = self._download_xml(
+                api, chapter_id,
+                note='Downloading chapter information',
+                errnote='Chapter information download failed')
+            for a in doc.findall('.//archive'):
+                if archive_id == a.find('./id').text:
+                    break
+            else:
+                raise ExtractorError('Could not find chapter in chapter information')
+
+            video_url = a.find('./video_file_url').text
+            video_ext = video_url.rpartition('.')[2] or 'flv'
+
+            chapter_api_url = 'https://api.twitch.tv/kraken/videos/c' + chapter_id
+            chapter_info = self._download_json(
+                chapter_api_url, 'c' + chapter_id,
+                note='Downloading chapter metadata',
+                errnote='Download of chapter metadata failed')
+
+            bracket_start = int(doc.find('.//bracket_start').text)
+            bracket_end = int(doc.find('.//bracket_end').text)
+
+            # TODO determine start (and probably fix up file)
+            #  youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
+            #video_url += '?start=' + TODO:start_timestamp
+            # bracket_start is 13290, but we want 51670615
+            self._downloader.report_warning('Chapter detected, but we can just download the whole file. '
+                                            'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
+
+            info = {
+                'id': 'c' + chapter_id,
+                'url': video_url,
+                'ext': video_ext,
+                'title': chapter_info['title'],
+                'thumbnail': chapter_info['preview'],
+                'description': chapter_info['description'],
+                'uploader': chapter_info['channel']['display_name'],
+                'uploader_id': chapter_info['channel']['name'],
+            }
+            return info
+            """
+        elif mobj.group('videoid'):
+            return self._extract_media('a', mobj.group('videoid'))
+        elif mobj.group('channelid'):
+            channel_id = mobj.group('channelid')
+            info = self._download_json(
+                '%s/kraken/channels/%s' % (self._API_BASE, channel_id),
+                channel_id, 'Downloading channel info JSON')
+            channel_name = info.get('display_name') or info.get('name')
+            entries = []
+            offset = 0
+            limit = self._PAGE_LIMIT
+            for counter in itertools.count(1):
+                response = self._download_json(
+                    '%s/kraken/channels/%s/videos/?offset=%d&limit=%d'
+                    % (self._API_BASE, channel_id, offset, limit),
+                    channel_id, 'Downloading channel videos JSON page %d' % counter)
+                videos = response['videos']
+                if not videos:
+                    break
+                entries.extend([self.url_result(video['url'], 'Twitch') for video in videos])
+                offset += limit
+            return self.playlist_result(entries, channel_id, channel_name)
index 054f427252341306edd7698e6d150bcb619b64fc..4667ed83b71f4aec5f081741834e2c9cca010e82 100644 (file)
@@ -3,9 +3,11 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
+)
+from ..utils import (
     ExtractorError,
 )
 
@@ -40,8 +42,24 @@ class UdemyIE(InfoExtractor):
                 error_str += ' - %s' % error_data.get('formErrors')
             raise ExtractorError(error_str, expected=True)
 
-    def _download_json(self, url, video_id, note='Downloading JSON metadata'):
-        response = super(UdemyIE, self)._download_json(url, video_id, note)
+    def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata'):
+        headers = {
+            'X-Udemy-Snail-Case': 'true',
+            'X-Requested-With': 'XMLHttpRequest',
+        }
+        for cookie in self._downloader.cookiejar:
+            if cookie.name == 'client_id':
+                headers['X-Udemy-Client-Id'] = cookie.value
+            elif cookie.name == 'access_token':
+                headers['X-Udemy-Bearer-Token'] = cookie.value
+
+        if isinstance(url_or_request, compat_urllib_request.Request):
+            for header, value in headers.items():
+                url_or_request.add_header(header, value)
+        else:
+            url_or_request = compat_urllib_request.Request(url_or_request, headers=headers)
+
+        response = super(UdemyIE, self)._download_json(url_or_request, video_id, note)
         self._handle_error(response)
         return response
 
@@ -62,7 +80,9 @@ class UdemyIE(InfoExtractor):
         if login_popup == '<div class="run-command close-popup redirect" data-url="https://www.udemy.com/"></div>':
             return
 
-        csrf = self._html_search_regex(r'<input type="hidden" name="csrf" value="(.+?)"', login_popup, 'csrf token')
+        csrf = self._html_search_regex(
+            r'<input type="hidden" name="csrf" value="(.+?)"',
+            login_popup, 'csrf token')
 
         login_form = {
             'email': username,
@@ -71,42 +91,49 @@ class UdemyIE(InfoExtractor):
             'displayType': 'json',
             'isSubmitted': '1',
         }
-        request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
-        response = self._download_json(request, None, 'Logging in as %s' % username)
+        request = compat_urllib_request.Request(
+            self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+        response = self._download_json(
+            request, None, 'Logging in as %s' % username)
 
         if 'returnUrl' not in response:
             raise ExtractorError('Unable to log in')
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        lecture_id = mobj.group('id')
+        lecture_id = self._match_id(url)
 
         lecture = self._download_json(
-            'https://www.udemy.com/api-1.1/lectures/%s' % lecture_id, lecture_id, 'Downloading lecture JSON')
+            'https://www.udemy.com/api-1.1/lectures/%s' % lecture_id,
+            lecture_id, 'Downloading lecture JSON')
 
-        if lecture['assetType'] != 'Video':
-            raise ExtractorError('Lecture %s is not a video' % lecture_id, expected=True)
+        asset_type = lecture.get('assetType') or lecture.get('asset_type')
+        if asset_type != 'Video':
+            raise ExtractorError(
+                'Lecture %s is not a video' % lecture_id, expected=True)
 
         asset = lecture['asset']
 
-        stream_url = asset['streamUrl']
+        stream_url = asset.get('streamUrl') or asset.get('stream_url')
         mobj = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url)
         if mobj:
             return self.url_result(mobj.group(1), 'Youtube')
 
         video_id = asset['id']
-        thumbnail = asset['thumbnailUrl']
+        thumbnail = asset.get('thumbnailUrl') or asset.get('thumbnail_url')
         duration = asset['data']['duration']
 
-        download_url = asset['downloadUrl']
+        download_url = asset.get('downloadUrl') or asset.get('download_url')
+
+        video = download_url.get('Video') or download_url.get('video')
+        video_480p = download_url.get('Video480p') or download_url.get('video_480p')
 
         formats = [
             {
-                'url': download_url['Video480p'][0],
+                'url': video_480p[0],
                 'format_id': '360p',
             },
             {
-                'url': download_url['Video'][0],
+                'url': video[0],
                 'format_id': '720p',
             },
         ]
@@ -140,25 +167,29 @@ class UdemyCourseIE(UdemyIE):
         course_path = mobj.group('coursepath')
 
         response = self._download_json(
-            'https://www.udemy.com/api-1.1/courses/%s' % course_path, course_path, 'Downloading course JSON')
+            'https://www.udemy.com/api-1.1/courses/%s' % course_path,
+            course_path, 'Downloading course JSON')
 
         course_id = int(response['id'])
         course_title = response['title']
 
         webpage = self._download_webpage(
-            'https://www.udemy.com/course/subscribe/?courseId=%s' % course_id, course_id, 'Enrolling in the course')
+            'https://www.udemy.com/course/subscribe/?courseId=%s' % course_id,
+            course_id, 'Enrolling in the course')
 
         if self._SUCCESSFULLY_ENROLLED in webpage:
             self.to_screen('%s: Successfully enrolled in' % course_id)
         elif self._ALREADY_ENROLLED in webpage:
             self.to_screen('%s: Already enrolled in' % course_id)
 
-        response = self._download_json('https://www.udemy.com/api-1.1/courses/%s/curriculum' % course_id,
+        response = self._download_json(
+            'https://www.udemy.com/api-1.1/courses/%s/curriculum' % course_id,
             course_id, 'Downloading course curriculum')
 
         entries = [
-            self.url_result('https://www.udemy.com/%s/#/lecture/%s' % (course_path, asset['id']), 'Udemy')
-            for asset in response if asset.get('assetType') == 'Video'
+            self.url_result(
+                'https://www.udemy.com/%s/#/lecture/%s' % (course_path, asset['id']), 'Udemy')
+            for asset in response if asset.get('assetType') or asset.get('asset_type') == 'Video'
         ]
 
-        return self.playlist_result(entries, course_id, course_title)
\ No newline at end of file
+        return self.playlist_result(entries, course_id, course_title)
index 5d06fcc9e65690ae7f9a474e274e7f8c14ce4a73..8872cfcb2795ab0bfb9db1ad5418eb61dd0dffc6 100644 (file)
@@ -1,11 +1,11 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
+)
+from ..utils import (
     unified_strdate,
 )
 
@@ -18,11 +18,10 @@ class UrortIE(InfoExtractor):
         'url': 'https://urort.p3.no/#!/Band/Gerilja',
         'md5': '5ed31a924be8a05e47812678a86e127b',
         'info_dict': {
-            'id': '33124-4',
+            'id': '33124-24',
             'ext': 'mp3',
             'title': 'The Bomb',
             'thumbnail': 're:^https?://.+\.jpg',
-            'like_count': int,
             'uploader': 'Gerilja',
             'uploader_id': 'Gerilja',
             'upload_date': '20100323',
@@ -33,25 +32,31 @@ class UrortIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        playlist_id = mobj.group('id')
+        playlist_id = self._match_id(url)
 
         fstr = compat_urllib_parse.quote("InternalBandUrl eq '%s'" % playlist_id)
-        json_url = 'http://urort.p3.no/breeze/urort/TrackDtos?$filter=' + fstr
+        json_url = 'http://urort.p3.no/breeze/urort/TrackDTOViews?$filter=%s&$orderby=Released%%20desc&$expand=Tags%%2CFiles' % fstr
         songs = self._download_json(json_url, playlist_id)
-        print(songs[0])
-
-        entries = [{
-            'id': '%d-%s' % (s['BandId'], s['$id']),
-            'title': s['Title'],
-            'url': s['TrackUrl'],
-            'ext': 'mp3',
-            'uploader_id': playlist_id,
-            'uploader': s.get('BandName', playlist_id),
-            'like_count': s.get('LikeCount'),
-            'thumbnail': 'http://urort.p3.no/cloud/images/%s' % s['Image'],
-            'upload_date': unified_strdate(s.get('Released')),
-        } for s in songs]
+        entries = []
+        for s in songs:
+            formats = [{
+                'tbr': f.get('Quality'),
+                'ext': f['FileType'],
+                'format_id': '%s-%s' % (f['FileType'], f.get('Quality', '')),
+                'url': 'http://p3urort.blob.core.windows.net/tracks/%s' % f['FileRef'],
+                'preference': 3 if f['FileType'] == 'mp3' else 2,
+            } for f in s['Files']]
+            self._sort_formats(formats)
+            e = {
+                'id': '%d-%s' % (s['BandId'], s['$id']),
+                'title': s['Title'],
+                'uploader_id': playlist_id,
+                'uploader': s.get('BandName', playlist_id),
+                'thumbnail': 'http://urort.p3.no/cloud/images/%s' % s['Image'],
+                'upload_date': unified_strdate(s.get('Released')),
+                'formats': formats,
+            }
+            entries.append(e)
 
         return {
             '_type': 'playlist',
index 994b60a76b88ef4d7ff8be630c2bafbd989e2c96..68d03b99905cce848eb38fde8b6d8e643c548105 100644 (file)
@@ -3,9 +3,8 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urlparse,
-    get_meta_content,
 )
 
 
@@ -46,13 +45,13 @@ class UstreamIE(InfoExtractor):
         self.report_extraction(video_id)
 
         video_title = self._html_search_regex(r'data-title="(?P<title>.+)"',
-            webpage, 'title')
+                                              webpage, 'title')
 
         uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
-            webpage, 'uploader', fatal=False, flags=re.DOTALL)
+                                           webpage, 'uploader', fatal=False, flags=re.DOTALL)
 
         thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"',
-            webpage, 'thumbnail', fatal=False)
+                                            webpage, 'thumbnail', fatal=False)
 
         return {
             'id': video_id,
@@ -72,14 +71,14 @@ class UstreamChannelIE(InfoExtractor):
         'info_dict': {
             'id': '10874166',
         },
-        'playlist_mincount': 54,
+        'playlist_mincount': 17,
     }
 
     def _real_extract(self, url):
         m = re.match(self._VALID_URL, url)
         display_id = m.group('slug')
         webpage = self._download_webpage(url, display_id)
-        channel_id = get_meta_content('ustream:channel_id', webpage)
+        channel_id = self._html_search_meta('ustream:channel_id', webpage)
 
         BASE = 'http://www.ustream.tv'
         next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id
index ebd64f0f54df23fca1d243e32dc8071fcfcbfb1d..dd026748dcbb536f9f49181b0d211bf0a9157777 100644 (file)
@@ -1,19 +1,18 @@
 # encoding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
-
+)
+from ..utils import (
     ExtractorError,
 )
 
 
 class Vbox7IE(InfoExtractor):
-    _VALID_URL = r'http://(www\.)?vbox7\.com/play:(?P<id>[^/]+)'
+    _VALID_URL = r'http://(?:www\.)?vbox7\.com/play:(?P<id>[^/]+)'
     _TEST = {
         'url': 'http://vbox7.com/play:249bb972c2',
         'md5': '99f65c0c9ef9b682b97313e052734c3f',
@@ -25,18 +24,17 @@ class Vbox7IE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         redirect_page, urlh = self._download_webpage_handle(url, video_id)
         new_location = self._search_regex(r'window\.location = \'(.*)\';',
-            redirect_page, 'redirect location')
+                                          redirect_page, 'redirect location')
         redirect_url = urlh.geturl() + new_location
         webpage = self._download_webpage(redirect_url, video_id,
-            'Downloading redirect page')
+                                         'Downloading redirect page')
 
         title = self._html_search_regex(r'<title>(.*)</title>',
-            webpage, 'title').split('/')[0].strip()
+                                        webpage, 'title').split('/')[0].strip()
 
         info_url = "http://vbox7.com/play/magare.do"
         data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id})
index 77b1f91ce3636cbb8f805f60ff06d95852cf4940..815f5846804a3cebcf716d8d20b6373e44fd14fd 100644 (file)
@@ -4,10 +4,12 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urlparse,
-    get_element_by_id,
+)
+from ..utils import (
     clean_html,
+    get_element_by_id,
 )
 
 
@@ -26,8 +28,7 @@ class VeeHDIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         # VeeHD seems to send garbage on the first request.
         # See https://github.com/rg3/youtube-dl/issues/2102
@@ -48,11 +49,11 @@ class VeeHDIE(InfoExtractor):
         video_url = compat_urlparse.unquote(config['clip']['url'])
         title = clean_html(get_element_by_id('videoName', webpage).rpartition('|')[0])
         uploader_id = self._html_search_regex(r'<a href="/profile/\d+">(.+?)</a>',
-            webpage, 'uploader')
+                                              webpage, 'uploader')
         thumbnail = self._search_regex(r'<img id="veehdpreview" src="(.+?)"',
-            webpage, 'thumbnail')
+                                       webpage, 'thumbnail')
         description = self._html_search_regex(r'<td class="infodropdown".*?<div>(.*?)<ul',
-            webpage, 'description', flags=re.DOTALL)
+                                              webpage, 'description', flags=re.DOTALL)
 
         return {
             '_type': 'video',
index a7953a7e7c5d33b154435cd7b4afa354994f4bf5..01e258e32218c227c5de3caf60588baab56e9045 100644 (file)
@@ -4,8 +4,10 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_request,
+)
+from ..utils import (
     int_or_none,
     ExtractorError,
 )
index 27f9acb670b1b10ffa2ee0220f62dc411e49d8d7..a0c59a2e0e1cb8fca2e0e3eb3ec2e4edce2918bb 100644 (file)
@@ -112,10 +112,10 @@ class VestiIE(InfoExtractor):
         if mobj:
             video_id = mobj.group('id')
             page = self._download_webpage('http://www.vesti.ru/only_video.html?vid=%s' % video_id, video_id,
-                'Downloading video page')
+                                          'Downloading video page')
 
         rutv_url = RUTVIE._extract_url(page)
         if rutv_url:
             return self.url_result(rutv_url, 'RUTV')
 
-        raise ExtractorError('No video found', expected=True)
\ No newline at end of file
+        raise ExtractorError('No video found', expected=True)
index 5b1a3ec787ac6c99c0db4d37d5ff2c33c6950ef6..43f6b029da8ff5df7fe808c11a85f8a8120f8ca5 100644 (file)
@@ -4,8 +4,10 @@ import re
 import xml.etree.ElementTree
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_request,
+)
+from ..utils import (
     ExtractorError,
 )
 
@@ -13,7 +15,7 @@ from ..utils import (
 class VevoIE(InfoExtractor):
     """
     Accepts urls from vevo.com or in the format 'vevo:{id}'
-    (currently used by MTVIE)
+    (currently used by MTVIE and MySpaceIE)
     """
     _VALID_URL = r'''(?x)
         (?:https?://www\.vevo\.com/watch/(?:[^/]+/(?:[^/]+/)?)?|
index 96447007021f054fc155906511833aab5ac551c0..2f111bf7ee042de1fce790a3f0d0f13be7f1feff 100644 (file)
@@ -17,7 +17,7 @@ class VGTVIE(InfoExtractor):
             'info_dict': {
                 'id': '84196',
                 'ext': 'mp4',
-                'title': 'Hevnen er søt episode 10: Abu',
+                'title': 'Hevnen er søt: Episode 10 - Abu',
                 'description': 'md5:e25e4badb5f544b04341e14abdc72234',
                 'thumbnail': 're:^https?://.*\.jpg',
                 'duration': 648.000,
@@ -35,7 +35,7 @@ class VGTVIE(InfoExtractor):
                 'title': 'OPPTAK: VGTV følger EM-kvalifiseringen',
                 'description': 'md5:3772d9c0dc2dff92a886b60039a7d4d3',
                 'thumbnail': 're:^https?://.*\.jpg',
-                'duration': 9056.000,
+                'duration': 9103.0,
                 'timestamp': 1410113864,
                 'upload_date': '20140907',
                 'view_count': int,
@@ -67,9 +67,7 @@ class VGTVIE(InfoExtractor):
     ]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         data = self._download_json(
             'http://svp.vg.no/svp/api/v1/vgtv/assets/%s?appName=vgtv-website' % video_id,
             video_id, 'Downloading media JSON')
@@ -116,4 +114,4 @@ class VGTVIE(InfoExtractor):
             'duration': float_or_none(data['duration'], 1000),
             'view_count': data['displays'],
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 2f77e38981c3607b6dceb00cac3bca514bf2fcd8..6be3774b7aa7367e9a067b417d7a749fece55d05 100644 (file)
@@ -121,4 +121,7 @@ class VH1IE(MTVIE):
         idoc = self._download_xml(
             doc_url, video_id,
             'Downloading info', transform_source=fix_xml_ampersands)
-        return [self._get_video_info(item) for item in idoc.findall('.//item')]
+        return self.playlist_result(
+            [self._get_video_info(item) for item in idoc.findall('.//item')],
+            playlist_id=video_id,
+        )
diff --git a/youtube_dl/extractor/vice.py b/youtube_dl/extractor/vice.py
new file mode 100644 (file)
index 0000000..71f520f
--- /dev/null
@@ -0,0 +1,37 @@
+from __future__ import unicode_literals
+import re
+
+from .common import InfoExtractor
+from .ooyala import OoyalaIE
+from ..utils import ExtractorError
+
+
+class ViceIE(InfoExtractor):
+    _VALID_URL = r'http://www\.vice\.com/.*?/(?P<name>.+)'
+
+    _TEST = {
+        'url': 'http://www.vice.com/Fringes/cowboy-capitalists-part-1',
+        'info_dict': {
+            'id': '43cW1mYzpia9IlestBjVpd23Yu3afAfp',
+            'ext': 'mp4',
+            'title': 'VICE_COWBOYCAPITALISTS_PART01_v1_VICE_WM_1080p.mov',
+        },
+        'params': {
+            # Requires ffmpeg (m3u8 manifest)
+            'skip_download': True,
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        name = mobj.group('name')
+        webpage = self._download_webpage(url, name)
+        try:
+            embed_code = self._search_regex(
+                r'embedCode=([^&\'"]+)', webpage,
+                'ooyala embed code')
+            ooyala_url = OoyalaIE._url_for_embed_code(embed_code)
+            print(ooyala_url)
+        except ExtractorError:
+            raise ExtractorError('The page doesn\'t contain a video', expected=True)
+        return self.url_result(ooyala_url, ie='Ooyala')
index 9328ef4a2121f091c256e9324d0de0e8b7dcbecd..0faa729c60f916d69b885cfc76580104b226f84b 100644 (file)
@@ -1,55 +1,85 @@
-import json
-import re
+from __future__ import unicode_literals
 
 from .common import InfoExtractor
+from ..utils import (
+    float_or_none,
+    int_or_none,
+)
 
 
 class ViddlerIE(InfoExtractor):
-    _VALID_URL = r'(?P<domain>https?://(?:www\.)?viddler\.com)/(?:v|embed|player)/(?P<id>[a-z0-9]+)'
+    _VALID_URL = r'https?://(?:www\.)?viddler\.com/(?:v|embed|player)/(?P<id>[a-z0-9]+)'
     _TEST = {
-        u"url": u"http://www.viddler.com/v/43903784",
-        u'file': u'43903784.mp4',
-        u'md5': u'fbbaedf7813e514eb7ca30410f439ac9',
-        u'info_dict': {
-            u"title": u"Video Made Easy",
-            u"uploader": u"viddler",
-            u"duration": 100.89,
+        "url": "http://www.viddler.com/v/43903784",
+        'md5': 'ae43ad7cb59431ce043f0ff7fa13cbf4',
+        'info_dict': {
+            'id': '43903784',
+            'ext': 'mp4',
+            "title": "Video Made Easy",
+            'description': 'You don\'t need to be a professional to make high-quality video content. Viddler provides some quick and easy tips on how to produce great video content with limited resources. ',
+            "uploader": "viddler",
+            'timestamp': 1335371429,
+            'upload_date': '20120425',
+            "duration": 100.89,
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'view_count': int,
+            'categories': ['video content', 'high quality video', 'video made easy', 'how to produce video with limited resources', 'viddler'],
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
-        embed_url = mobj.group('domain') + u'/embed/' + video_id
-        webpage = self._download_webpage(embed_url, video_id)
-
-        video_sources_code = self._search_regex(
-            r"(?ms)sources\s*:\s*(\{.*?\})", webpage, u'video URLs')
-        video_sources = json.loads(video_sources_code.replace("'", '"'))
-
-        formats = [{
-            'url': video_url,
-            'format': format_id,
-        } for video_url, format_id in video_sources.items()]
-
-        title = self._html_search_regex(
-            r"title\s*:\s*'([^']*)'", webpage, u'title')
-        uploader = self._html_search_regex(
-            r"authorName\s*:\s*'([^']*)'", webpage, u'uploader', fatal=False)
-        duration_s = self._html_search_regex(
-            r"duration\s*:\s*([0-9.]*)", webpage, u'duration', fatal=False)
-        duration = float(duration_s) if duration_s else None
-        thumbnail = self._html_search_regex(
-            r"thumbnail\s*:\s*'([^']*)'",
-            webpage, u'thumbnail', fatal=False)
+        video_id = self._match_id(url)
+
+        json_url = (
+            'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json?video_id=%s&key=v0vhrt7bg2xq1vyxhkct' %
+            video_id)
+        data = self._download_json(json_url, video_id)['video']
+
+        formats = []
+        for filed in data['files']:
+            if filed.get('status', 'ready') != 'ready':
+                continue
+            f = {
+                'format_id': filed['profile_id'],
+                'format_note': filed['profile_name'],
+                'url': self._proto_relative_url(filed['url']),
+                'width': int_or_none(filed.get('width')),
+                'height': int_or_none(filed.get('height')),
+                'filesize': int_or_none(filed.get('size')),
+                'ext': filed.get('ext'),
+                'source_preference': -1,
+            }
+            formats.append(f)
+
+            if filed.get('cdn_url'):
+                f = f.copy()
+                f['url'] = self._proto_relative_url(filed['cdn_url'])
+                f['format_id'] = filed['profile_id'] + '-cdn'
+                f['source_preference'] = 1
+                formats.append(f)
+
+            if filed.get('html5_video_source'):
+                f = f.copy()
+                f['url'] = self._proto_relative_url(
+                    filed['html5_video_source'])
+                f['format_id'] = filed['profile_id'] + '-html5'
+                f['source_preference'] = 0
+                formats.append(f)
+        self._sort_formats(formats)
+
+        categories = [
+            t.get('text') for t in data.get('tags', []) if 'text' in t]
 
         return {
             '_type': 'video',
             'id': video_id,
-            'title': title,
-            'thumbnail': thumbnail,
-            'uploader': uploader,
-            'duration': duration,
+            'title': data['title'],
             'formats': formats,
+            'description': data.get('description'),
+            'timestamp': int_or_none(data.get('upload_time')),
+            'thumbnail': self._proto_relative_url(data.get('thumbnail_url')),
+            'uploader': data.get('author'),
+            'duration': float_or_none(data.get('length')),
+            'view_count': int_or_none(data.get('view_count')),
+            'categories': categories,
         }
index fed95ef71120a7137b47d953dcb830e6ede59f23..0eb3d9414ea339e0854949732f40ab9975ce8500 100644 (file)
@@ -78,4 +78,4 @@ class VideoBamIE(InfoExtractor):
             'view_count': view_count,
             'formats': formats,
             'age_limit': 18,
-        }
\ No newline at end of file
+        }
index ac6c255376442d132948eb5f54e0517bca5a66f4..0ffc7ff7dc9185a3a3ec5c0fd14d302872662dda 100644 (file)
@@ -1,10 +1,8 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
+from ..compat import compat_urlparse
 from .internetvideoarchive import InternetVideoArchiveIE
-from ..utils import compat_urlparse
 
 
 class VideoDetectiveIE(InfoExtractor):
@@ -17,13 +15,12 @@ class VideoDetectiveIE(InfoExtractor):
             'ext': 'mp4',
             'title': 'KICK-ASS 2',
             'description': 'md5:65ba37ad619165afac7d432eaded6013',
-            'duration': 135,
+            'duration': 138,
         },
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
         og_video = self._og_search_video_url(webpage)
         query = compat_urlparse.urlparse(og_video).query
index f75169041b4f958b9f345daba99a4a1ba575cf4e..94f9e9be94f9a420fd0207339085cbc35aba6805 100644 (file)
@@ -1,46 +1,50 @@
-import re
+from __future__ import unicode_literals
 
 from .common import InfoExtractor
 from ..utils import (
     find_xpath_attr,
-    determine_ext,
+    int_or_none,
 )
 
+
 class VideofyMeIE(InfoExtractor):
-    _VALID_URL = r'https?://(www\.videofy\.me/.+?|p\.videofy\.me/v)/(?P<id>\d+)(&|#|$)'
-    IE_NAME = u'videofy.me'
+    _VALID_URL = r'https?://(?:www\.videofy\.me/.+?|p\.videofy\.me/v)/(?P<id>\d+)(&|#|$)'
+    IE_NAME = 'videofy.me'
 
     _TEST = {
-        u'url': u'http://www.videofy.me/thisisvideofyme/1100701',
-        u'file':  u'1100701.mp4',
-        u'md5': u'c77d700bdc16ae2e9f3c26019bd96143',
-        u'info_dict': {
-            u'title': u'This is VideofyMe',
-            u'description': None,
-            u'uploader': u'VideofyMe',
-            u'uploader_id': u'thisisvideofyme',
+        'url': 'http://www.videofy.me/thisisvideofyme/1100701',
+        'md5': 'c77d700bdc16ae2e9f3c26019bd96143',
+        'info_dict': {
+            'id': '1100701',
+            'ext': 'mp4',
+            'title': 'This is VideofyMe',
+            'description': None,
+            'uploader': 'VideofyMe',
+            'uploader_id': 'thisisvideofyme',
+            'view_count': int,
         },
-        
+
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         config = self._download_xml('http://sunshine.videofy.me/?videoId=%s' % video_id,
-                                            video_id)
+                                    video_id)
         video = config.find('video')
         sources = video.find('sources')
-        url_node = next(node for node in [find_xpath_attr(sources, 'source', 'id', 'HQ %s' % key) 
-            for key in ['on', 'av', 'off']] if node is not None)
+        url_node = next(node for node in [find_xpath_attr(sources, 'source', 'id', 'HQ %s' % key)
+                                          for key in ['on', 'av', 'off']] if node is not None)
         video_url = url_node.find('url').text
+        view_count = int_or_none(self._search_regex(
+            r'([0-9]+)', video.find('views').text, 'view count', fatal=False))
 
-        return {'id': video_id,
-                'title': video.find('title').text,
-                'url': video_url,
-                'ext': determine_ext(video_url),
-                'thumbnail': video.find('thumb').text,
-                'description': video.find('description').text,
-                'uploader': config.find('blog/name').text,
-                'uploader_id': video.find('identifier').text,
-                'view_count': re.search(r'\d+', video.find('views').text).group(),
-                }
+        return {
+            'id': video_id,
+            'title': video.find('title').text,
+            'url': video_url,
+            'thumbnail': video.find('thumb').text,
+            'description': video.find('description').text,
+            'uploader': config.find('blog/name').text,
+            'uploader_id': video.find('identifier').text,
+            'view_count': view_count,
+        }
index 29c4e0101ec21eb59c22de9739a516b9f96c0e0f..7a78f0d264a47fc85c53cd815242e629ee38ed30 100644 (file)
@@ -1,11 +1,11 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
+)
+from ..utils import (
     remove_start,
 )
 
@@ -27,9 +27,7 @@ class VideoMegaIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         url = 'http://videomega.tv/iframe.php?ref={0:}'.format(video_id)
         webpage = self._download_webpage(url, video_id)
 
index 65463c73324ca83ab87b45bc33d569c3fe881163..3176e3b9dda8580ca4c693343508367d12b25751 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import re
 import random
 
@@ -5,23 +7,22 @@ from .common import InfoExtractor
 
 
 class VideoPremiumIE(InfoExtractor):
-    _VALID_URL = r'(?:https?://)?(?:www\.)?videopremium\.(?:tv|me)/(?P<id>\w+)(?:/.*)?'
+    _VALID_URL = r'https?://(?:www\.)?videopremium\.(?:tv|me)/(?P<id>\w+)(?:/.*)?'
     _TEST = {
-        u'url': u'http://videopremium.tv/4w7oadjsf156',
-        u'file': u'4w7oadjsf156.f4v',
-        u'info_dict': {
-            u"title": u"youtube-dl_test_video____a_________-BaW_jenozKc.mp4.mp4"
+        'url': 'http://videopremium.tv/4w7oadjsf156',
+        'info_dict': {
+            'id': '4w7oadjsf156',
+            'ext': 'f4v',
+            'title': 'youtube-dl_test_video____a_________-BaW_jenozKc.mp4.mp4'
         },
-        u'params': {
-            u'skip_download': True,
+        'params': {
+            'skip_download': True,
         },
-        u'skip': u'Test file has been deleted.',
+        'skip': 'Test file has been deleted.',
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage_url = 'http://videopremium.tv/' + video_id
         webpage = self._download_webpage(webpage_url, video_id)
 
@@ -29,17 +30,17 @@ class VideoPremiumIE(InfoExtractor):
             # Download again, we need a cookie
             webpage = self._download_webpage(
                 webpage_url, video_id,
-                note=u'Downloading webpage again (with cookie)')
+                note='Downloading webpage again (with cookie)')
 
         video_title = self._html_search_regex(
-            r'<h2(?:.*?)>\s*(.+?)\s*<', webpage, u'video title')
+            r'<h2(?:.*?)>\s*(.+?)\s*<', webpage, 'video title')
 
         return {
-            'id':          video_id,
-            'url':         "rtmp://e%d.md.iplay.md/play" % random.randint(1, 16),
-            'play_path':   "mp4:%s.f4v" % video_id,
-            'page_url':    "http://videopremium.tv/" + video_id,
-            'player_url':  "http://videopremium.tv/uplayer/uppod.swf",
-            'ext':         'f4v',
-            'title':       video_title,
+            'id': video_id,
+            'url': "rtmp://e%d.md.iplay.md/play" % random.randint(1, 16),
+            'play_path': "mp4:%s.f4v" % video_id,
+            'page_url': "http://videopremium.tv/" + video_id,
+            'player_url': "http://videopremium.tv/uplayer/uppod.swf",
+            'ext': 'f4v',
+            'title': video_title,
         }
index a647807d01f8b3e5a2ed3eab99acc545533327d4..1f938838cc9247e9e80c8ce32911cb40954c1830 100644 (file)
@@ -58,4 +58,4 @@ class VideoTtIE(InfoExtractor):
             'like_count': int_or_none(video['liked']),
             'dislike_count': int_or_none(video['disliked']),
             'formats': formats,
-        }
\ No newline at end of file
+        }
index 4a08ddd4390fedb6fc0944b6f3f0034e72c8bc03..ca2e50935def482fb3d3f7f5e7594d24b3dba2f1 100644 (file)
@@ -23,4 +23,4 @@ class VideoWeedIE(NovaMovIE):
             'title': 'optical illusion  dissapeared image magic illusion',
             'description': ''
         },
-    }
\ No newline at end of file
+    }
diff --git a/youtube_dl/extractor/vidzi.py b/youtube_dl/extractor/vidzi.py
new file mode 100644 (file)
index 0000000..08a5a7b
--- /dev/null
@@ -0,0 +1,32 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class VidziIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?vidzi\.tv/(?P<id>\w+)'
+    _TEST = {
+        'url': 'http://vidzi.tv/cghql9yq6emu.html',
+        'md5': '4f16c71ca0c8c8635ab6932b5f3f1660',
+        'info_dict': {
+            'id': 'cghql9yq6emu',
+            'ext': 'mp4',
+            'title': 'youtube-dl test video  1\\\\2\'3/4<5\\\\6ä7↭',
+        },
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, video_id)
+        video_url = self._html_search_regex(
+            r'{\s*file\s*:\s*"([^"]+)"\s*}', webpage, 'video url')
+        title = self._html_search_regex(
+            r'(?s)<h2 class="video-title">(.*?)</h2>', webpage, 'title')
+
+        return {
+            'id': video_id,
+            'title': title,
+            'url': video_url,
+        }
index e6a86f18ec5018615344e547cd332892c81fc665..06b0bed41e68401a8667cbabdca0d9796ea8ca3d 100644 (file)
@@ -7,14 +7,14 @@ import itertools
 
 from .common import InfoExtractor
 from .subtitles import SubtitlesInfoExtractor
-from ..utils import (
-    clean_html,
+from ..compat import (
     compat_HTTPError,
     compat_urllib_parse,
     compat_urllib_request,
     compat_urlparse,
+)
+from ..utils import (
     ExtractorError,
-    get_element_by_attribute,
     InAdvancePagedList,
     int_or_none,
     RegexNotFoundError,
@@ -157,6 +157,18 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
                 'duration': 62,
             }
         },
+        {
+            # from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/
+            'url': 'https://player.vimeo.com/video/98044508',
+            'note': 'The js code contains assignments to the same variable as the config',
+            'info_dict': {
+                'id': '98044508',
+                'ext': 'mp4',
+                'title': 'Pier Solar OUYA Official Trailer',
+                'uploader': 'Tulio Gonçalves',
+                'uploader_id': 'user28849593',
+            },
+        },
     ]
 
     def _verify_video_password(self, url, video_id, webpage):
@@ -244,11 +256,11 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
                 # We try to find out to which variable is assigned the config dic
                 m_variable_name = re.search('(\w)\.video\.id', webpage)
                 if m_variable_name is not None:
-                    config_re = r'%s=({.+?});' % re.escape(m_variable_name.group(1))
+                    config_re = r'%s=({[^}].+?});' % re.escape(m_variable_name.group(1))
                 else:
                     config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
                 config = self._search_regex(config_re, webpage, 'info section',
-                    flags=re.DOTALL)
+                                            flags=re.DOTALL)
                 config = json.loads(config)
         except Exception as e:
             if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
@@ -502,7 +514,7 @@ class VimeoReviewIE(InfoExtractor):
         'info_dict': {
             'id': '91613211',
             'ext': 'mp4',
-            'title': 'Death by dogma versus assembling agile - Sander Hoogendoorn',
+            'title': 're:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn',
             'uploader': 'DevWeek Events',
             'duration': 2773,
             'thumbnail': 're:^https?://.*\.jpg$',
index e7754158dcde7c44fe5f975f21e17648e445fd2b..0b58fe0fe0b5188e9c9865e56ce064e94dbc45e5 100644 (file)
@@ -17,6 +17,7 @@ class VineIE(InfoExtractor):
             'id': 'b9KOOWX7HUx',
             'ext': 'mp4',
             'title': 'Chicken.',
+            'alt_title': 'Vine by Jack Dorsey',
             'description': 'Chicken.',
             'upload_date': '20130519',
             'uploader': 'Jack Dorsey',
@@ -25,30 +26,26 @@ class VineIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage('https://vine.co/v/' + video_id, video_id)
 
         data = json.loads(self._html_search_regex(
             r'window\.POST_DATA = { %s: ({.+?}) }' % video_id, webpage, 'vine data'))
 
-        formats = [
-            {
-                'url': data['videoLowURL'],
-                'ext': 'mp4',
-                'format_id': 'low',
-            },
-            {
-                'url': data['videoUrl'],
-                'ext': 'mp4',
-                'format_id': 'standard',
-            }
-        ]
+        formats = [{
+            'url': data['videoLowURL'],
+            'ext': 'mp4',
+            'format_id': 'low',
+        }, {
+            'url': data['videoUrl'],
+            'ext': 'mp4',
+            'format_id': 'standard',
+        }]
 
         return {
             'id': video_id,
             'title': self._og_search_title(webpage),
+            'alt_title': self._og_search_description(webpage),
             'description': data['description'],
             'thumbnail': data['thumbnailUrl'],
             'upload_date': unified_strdate(data['created']),
@@ -63,29 +60,36 @@ class VineIE(InfoExtractor):
 
 class VineUserIE(InfoExtractor):
     IE_NAME = 'vine:user'
-    _VALID_URL = r'(?:https?://)?vine\.co/(?P<user>[^/]+)/?(\?.*)?$'
+    _VALID_URL = r'(?:https?://)?vine\.co/(?P<u>u/)?(?P<user>[^/]+)/?(\?.*)?$'
     _VINE_BASE_URL = "https://vine.co/"
-    _TEST = {
-        'url': 'https://vine.co/Visa',
-        'info_dict': {
-            'id': 'Visa',
+    _TESTS = [
+        {
+            'url': 'https://vine.co/Visa',
+            'info_dict': {
+                'id': 'Visa',
+            },
+            'playlist_mincount': 46,
         },
-        'playlist_mincount': 47,
-    }
+        {
+            'url': 'https://vine.co/u/941705360593584128',
+            'only_matching': True,
+        },
+    ]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         user = mobj.group('user')
+        u = mobj.group('u')
 
-        profile_url = "%sapi/users/profiles/vanity/%s" % (
-            self._VINE_BASE_URL, user)
+        profile_url = "%sapi/users/profiles/%s%s" % (
+            self._VINE_BASE_URL, 'vanity/' if not u else '', user)
         profile_data = self._download_json(
             profile_url, user, note='Downloading user profile data')
 
         user_id = profile_data['data']['userId']
         timeline_data = []
         for pagenum in itertools.count(1):
-            timeline_url = "%sapi/timelines/users/%s?page=%s" % (
+            timeline_url = "%sapi/timelines/users/%s?page=%s&size=100" % (
                 self._VINE_BASE_URL, user_id, pagenum)
             timeline_page = self._download_json(
                 timeline_url, user, note='Downloading page %d' % pagenum)
index 918bd10988a2c49fac2fc76bb7481e1a3ed7fdf1..542e9198ac0e5a9470ad5e16717bf9ca006050af 100644 (file)
@@ -5,18 +5,22 @@ import re
 import json
 
 from .common import InfoExtractor
+from ..compat import (
+    compat_str,
+    compat_urllib_parse,
+    compat_urllib_request,
+)
 from ..utils import (
     ExtractorError,
-    compat_urllib_request,
-    compat_urllib_parse,
-    compat_str,
+    orderedSet,
     unescapeHTML,
+    unified_strdate,
 )
 
 
 class VKIE(InfoExtractor):
     IE_NAME = 'vk.com'
-    _VALID_URL = r'https?://(?:m\.)?vk\.com/(?:video_ext\.php\?.*?\boid=(?P<oid>-?\d+).*?\bid=(?P<id>\d+)|(?:.+?\?.*?z=)?video(?P<videoid>.*?)(?:\?|%2F|$))'
+    _VALID_URL = r'https?://(?:m\.)?vk\.com/(?:video_ext\.php\?.*?\boid=(?P<oid>-?\d+).*?\bid=(?P<id>\d+)|(?:.+?\?.*?z=)?video(?P<videoid>[^s].*?)(?:\?|%2F|$))'
     _NETRC_MACHINE = 'vk'
 
     _TESTS = [
@@ -29,17 +33,19 @@ class VKIE(InfoExtractor):
                 'title': 'ProtivoGunz - Хуёвая песня',
                 'uploader': 're:Noize MC.*',
                 'duration': 195,
+                'upload_date': '20120212',
             },
         },
         {
-            'url': 'http://vk.com/video4643923_163339118',
-            'md5': 'f79bccb5cd182b1f43502ca5685b2b36',
+            'url': 'http://vk.com/video205387401_165548505',
+            'md5': '6c0aeb2e90396ba97035b9cbde548700',
             'info_dict': {
-                'id': '163339118',
+                'id': '165548505',
                 'ext': 'mp4',
-                'uploader': 'Elya Iskhakova',
-                'title': 'Dream Theater - Hollow Years Live at Budokan 720*',
-                'duration': 558,
+                'uploader': 'Tom Cruise',
+                'title': 'No name',
+                'duration': 9,
+                'upload_date': '20130721'
             }
         },
         {
@@ -52,9 +58,12 @@ class VKIE(InfoExtractor):
                 'uploader': 'Vladimir Gavrin',
                 'title': 'Lin Dan',
                 'duration': 101,
+                'upload_date': '20120730',
             }
         },
         {
+            # VIDEO NOW REMOVED
+            # please update if you find a video whose URL follows the same pattern
             'url': 'http://vk.com/video-8871596_164049491',
             'md5': 'a590bcaf3d543576c9bd162812387666',
             'note': 'Only available for registered users',
@@ -64,18 +73,7 @@ class VKIE(InfoExtractor):
                 'uploader': 'Триллеры',
                 'title': '► Бойцовский клуб / Fight Club 1999 [HD 720]',
                 'duration': 8352,
-            },
-            'skip': 'Requires vk account credentials',
-        },
-        {
-            'url': 'http://vk.com/feed?z=video-43215063_166094326%2Fbb50cacd3177146d7a',
-            'md5': 'd82c22e449f036282d1d3f7f4d276869',
-            'info_dict': {
-                'id': '166094326',
-                'ext': 'mp4',
-                'uploader': 'Киномания - лучшее из мира кино',
-                'title': 'Запах женщины (1992)',
-                'duration': 9392,
+                'upload_date': '20121218'
             },
             'skip': 'Requires vk account credentials',
         },
@@ -88,6 +86,7 @@ class VKIE(InfoExtractor):
                 'uploader': 'Киномания - лучшее из мира кино',
                 'title': ' ',
                 'duration': 7291,
+                'upload_date': '20140328',
             },
             'skip': 'Requires vk account credentials',
         },
@@ -100,9 +99,15 @@ class VKIE(InfoExtractor):
                 'ext': 'mp4',
                 'title': 'Книга Илая',
                 'duration': 6771,
+                'upload_date': '20140626',
             },
             'skip': 'Only works from Russia',
         },
+        {
+            # removed video, just testing that we match the pattern
+            'url': 'http://vk.com/feed?z=video-43215063_166094326%2Fbb50cacd3177146d7a',
+            'only_matching': True,
+        },
     ]
 
     def _login(self):
@@ -119,7 +124,7 @@ class VKIE(InfoExtractor):
         }
 
         request = compat_urllib_request.Request('https://login.vk.com/?act=login',
-            compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+                                                compat_urllib_parse.urlencode(login_form).encode('utf-8'))
         login_page = self._download_webpage(request, None, note='Logging in as %s' % username)
 
         if re.search(r'onLoginFailed', login_page):
@@ -138,9 +143,21 @@ class VKIE(InfoExtractor):
         info_url = 'http://vk.com/al_video.php?act=show&al=1&video=%s' % video_id
         info_page = self._download_webpage(info_url, video_id)
 
-        if re.search(r'<!>Please log in or <', info_page):
-            raise ExtractorError('This video is only available for registered users, '
-                'use --username and --password options to provide account credentials.', expected=True)
+        ERRORS = {
+            r'>Видеозапись .*? была изъята из публичного доступа в связи с обращением правообладателя.<':
+            'Video %s has been removed from public access due to rightholder complaint.',
+
+            r'<!>Please log in or <':
+            'Video %s is only available for registered users, '
+            'use --username and --password options to provide account credentials.',
+
+            r'<!>Unknown error':
+            'Video %s does not exist.'
+        }
+
+        for error_re, error_msg in ERRORS.items():
+            if re.search(error_re, info_page):
+                raise ExtractorError(error_msg % video_id, expected=True)
 
         m_yt = re.search(r'src="(http://www.youtube.com/.*?)"', info_page)
         if m_yt is not None:
@@ -159,6 +176,13 @@ class VKIE(InfoExtractor):
         data_json = self._search_regex(r'var vars = ({.*?});', info_page, 'vars')
         data = json.loads(data_json)
 
+        # Extract upload date
+        upload_date = None
+        mobj = re.search(r'id="mv_date_wrap".*?Added ([a-zA-Z]+ [0-9]+), ([0-9]+) at', info_page)
+        if mobj is not None:
+            mobj.group(1) + ' ' + mobj.group(2)
+            upload_date = unified_strdate(mobj.group(1) + ' ' + mobj.group(2))
+
         formats = [{
             'format_id': k,
             'url': v,
@@ -173,5 +197,28 @@ class VKIE(InfoExtractor):
             'title': unescapeHTML(data['md_title']),
             'thumbnail': data.get('jpg'),
             'uploader': data.get('md_author'),
-            'duration': data.get('duration')
+            'duration': data.get('duration'),
+            'upload_date': upload_date,
         }
+
+
+class VKUserVideosIE(InfoExtractor):
+    IE_NAME = 'vk.com:user-videos'
+    IE_DESC = 'vk.com:All of a user\'s videos'
+    _VALID_URL = r'https?://vk\.com/videos(?P<id>[0-9]+)(?:m\?.*)?'
+    _TEMPLATE_URL = 'https://vk.com/videos'
+    _TEST = {
+        'url': 'http://vk.com/videos205387401',
+        'playlist_mincount': 4,
+    }
+
+    def _real_extract(self, url):
+        page_id = self._match_id(url)
+        page = self._download_webpage(url, page_id)
+        video_ids = orderedSet(
+            m.group(1) for m in re.finditer(r'href="/video([0-9_]+)"', page))
+        url_entries = [
+            self.url_result(
+                'http://vk.com/video' + video_id, 'VK', video_id=video_id)
+            for video_id in video_ids]
+        return self.playlist_result(url_entries, page_id)
index affef650726d716b7e80aaab5c66dab3bc3ddc28..1c0966a793511a2ec3a9d147bd75ff22e8fb7209 100644 (file)
@@ -2,8 +2,9 @@
 from __future__ import unicode_literals
 
 import re
+
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
     compat_urllib_request,
 )
@@ -24,8 +25,7 @@ class VodlockerIE(InfoExtractor):
     }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         fields = dict(re.findall(r'''(?x)<input\s+
diff --git a/youtube_dl/extractor/vrt.py b/youtube_dl/extractor/vrt.py
new file mode 100644 (file)
index 0000000..bbd3bbf
--- /dev/null
@@ -0,0 +1,95 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import float_or_none
+
+
+class VRTIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:deredactie|sporza|cobra)\.be/cm/(?:[^/]+/)+(?P<id>[^/]+)/*'
+    _TESTS = [
+        # deredactie.be
+        {
+            'url': 'http://deredactie.be/cm/vrtnieuws/videozone/programmas/journaal/EP_141025_JOL',
+            'md5': '4cebde1eb60a53782d4f3992cbd46ec8',
+            'info_dict': {
+                'id': '2129880',
+                'ext': 'flv',
+                'title': 'Het journaal L - 25/10/14',
+                'description': None,
+                'timestamp': 1414271750.949,
+                'upload_date': '20141025',
+                'duration': 929,
+            }
+        },
+        # sporza.be
+        {
+            'url': 'http://sporza.be/cm/sporza/videozone/programmas/extratime/EP_141020_Extra_time',
+            'md5': '11f53088da9bf8e7cfc42456697953ff',
+            'info_dict': {
+                'id': '2124639',
+                'ext': 'flv',
+                'title': 'Bekijk Extra Time van 20 oktober',
+                'description': 'md5:83ac5415a4f1816c6a93f8138aef2426',
+                'timestamp': 1413835980.560,
+                'upload_date': '20141020',
+                'duration': 3238,
+            }
+        },
+        # cobra.be
+        {
+            'url': 'http://cobra.be/cm/cobra/videozone/rubriek/film-videozone/141022-mv-ellis-cafecorsari',
+            'md5': '78a2b060a5083c4f055449a72477409d',
+            'info_dict': {
+                'id': '2126050',
+                'ext': 'flv',
+                'title': 'Bret Easton Ellis in Café Corsari',
+                'description': 'md5:f699986e823f32fd6036c1855a724ee9',
+                'timestamp': 1413967500.494,
+                'upload_date': '20141022',
+                'duration': 661,
+            }
+        },
+    ]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, video_id)
+
+        video_id = self._search_regex(
+            r'data-video-id="([^"]+)_[^"]+"', webpage, 'video id', fatal=False)
+
+        formats = []
+        mobj = re.search(
+            r'data-video-iphone-server="(?P<server>[^"]+)"\s+data-video-iphone-path="(?P<path>[^"]+)"',
+            webpage)
+        if mobj:
+            formats.extend(self._extract_m3u8_formats(
+                '%s/%s' % (mobj.group('server'), mobj.group('path')),
+                video_id, 'mp4'))
+        mobj = re.search(r'data-video-src="(?P<src>[^"]+)"', webpage)
+        if mobj:
+            formats.extend(self._extract_f4m_formats(
+                '%s/manifest.f4m' % mobj.group('src'), video_id))
+        self._sort_formats(formats)
+
+        title = self._og_search_title(webpage)
+        description = self._og_search_description(webpage, default=None)
+        thumbnail = self._og_search_thumbnail(webpage)
+        timestamp = float_or_none(self._search_regex(
+            r'data-video-sitestat-pubdate="(\d+)"', webpage, 'timestamp', fatal=False), 1000)
+        duration = float_or_none(self._search_regex(
+            r'data-video-duration="(\d+)"', webpage, 'duration', fatal=False), 1000)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'timestamp': timestamp,
+            'duration': duration,
+            'formats': formats,
+        }
index 1b2f731e932a63fbc7722251c0b4e57f0963c34c..405cb9db49f41a144a4c842d8f99aeb1c2023da9 100644 (file)
@@ -3,9 +3,11 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import (
+    compat_str,
+)
 from ..utils import (
     int_or_none,
-    compat_str,
     ExtractorError,
 )
 
index ec3c010ad7e151bfc304315cdc5fd32bc21e8f43..c3fde53f5ef06a56b54e94b20b72a7e98c1992a5 100644 (file)
@@ -3,8 +3,10 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse_urlparse,
+)
+from ..utils import (
     ExtractorError,
     parse_duration,
     qualities,
@@ -25,10 +27,9 @@ class VuClipIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
+
         ad_m = re.search(
             r'''value="No.*?" onClick="location.href='([^"']+)'"''', webpage)
         if ad_m:
index 54d37da618960d46d977ff4b97682ca0c7b2a99c..8e25ecf280769166a49d18cfd2508bd6d90caa74 100644 (file)
@@ -4,9 +4,11 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_parse_qs,
     compat_urlparse,
+)
+from ..utils import (
     determine_ext,
     unified_strdate,
 )
@@ -141,7 +143,6 @@ class WDRMobileIE(InfoExtractor):
             'title': mobj.group('title'),
             'age_limit': int(mobj.group('age_limit')),
             'url': url,
-            'ext': determine_ext(url),
             'user_agent': 'mobile',
         }
 
@@ -224,4 +225,4 @@ class WDRMausIE(InfoExtractor):
             'upload_date': upload_date,
         }
 
-# TODO test _1
\ No newline at end of file
+# TODO test _1
index b24297a409911c79433cca404dc94206009aefe5..20bb039d38dd9c62ff6c38135f67f9aa41f484aa 100644 (file)
@@ -41,7 +41,7 @@ class WeiboIE(InfoExtractor):
         videos_urls = sorted(videos_urls, key=lambda u: 'video.sina.com' in u)
         player_url = videos_urls[-1]
         m_sina = re.match(r'https?://video\.sina\.com\.cn/v/b/(\d+)-\d+\.html',
-            player_url)
+                          player_url)
         if m_sina is not None:
             self.to_screen('Sina video detected')
             sina_id = m_sina.group(1)
index c27dda9440e62274e13b9359f24c2a909516b4bc..d6dec25ca9e7bb9de539e89c147e22b7381e3719 100644 (file)
@@ -37,7 +37,7 @@ class WimpIE(InfoExtractor):
         video_id = mobj.group(1)
         webpage = self._download_webpage(url, video_id)
         video_url = self._search_regex(
-            r's1\.addVariable\("file",\s*"([^"]+)"\);', webpage, 'video URL')
+            r"[\"']file[\"']\s*[:,]\s*[\"'](.+?)[\"']", webpage, 'video URL')
         if YoutubeIE.suitable(video_url):
             self.to_screen('Found YouTube video')
             return {
index 748443f811f184d4276d4628cd13ed1e2bf92d9c..13a079151c9c879561e3e538c49f3122f85b349b 100644 (file)
@@ -1,9 +1,8 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
-from ..utils import ExtractorError, compat_urllib_request
+from ..compat import compat_urllib_request
+from ..utils import ExtractorError
 
 
 class WistiaIE(InfoExtractor):
@@ -22,8 +21,7 @@ class WistiaIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         request = compat_urllib_request.Request(self._API_URL.format(video_id))
         request.add_header('Referer', url)  # Some videos require this.
index bda3870db9f16e12c721c361696656df8be8a1b3..d5c26a032bcf28a9c8ae79e1d083d67ed29b2726 100644 (file)
@@ -51,4 +51,3 @@ class WorldStarHipHopIE(InfoExtractor):
             'title': video_title,
             'thumbnail': thumbnail,
         }
-
index 34dd6d9528ee9d4746b798e10cfeecf19e3c8277..c427649211079715a5510eef3eaf35981bdb1034 100644 (file)
@@ -27,15 +27,15 @@ class WrzutaIE(InfoExtractor):
             'description': 'md5:7fb5ef3c21c5893375fda51d9b15d9cd',
         },
     }, {
-        'url': 'http://w729.wrzuta.pl/audio/9oXJqdcndqv/david_guetta_amp_showtek_ft._vassy_-_bad',
-        'md5': '1e546a18e1c22ac6e9adce17b8961ff5',
+        'url': 'http://jolka85.wrzuta.pl/audio/063jOPX5ue2/liber_natalia_szroeder_-_teraz_ty',
+        'md5': 'bc78077859bea7bcfe4295d7d7fc9025',
         'info_dict': {
-            'id': '9oXJqdcndqv',
+            'id': '063jOPX5ue2',
             'ext': 'ogg',
-            'title': 'David Guetta & Showtek ft. Vassy - Bad',
-            'duration': 270,
-            'uploader_id': 'w729',
-            'description': 'md5:4628f01c666bbaaecefa83476cfa794a',
+            'title': 'Liber & Natalia Szroeder - Teraz Ty',
+            'duration': 203,
+            'uploader_id': 'jolka85',
+            'description': 'md5:2d2b6340f9188c8c4cd891580e481096',
         },
     }]
 
@@ -49,16 +49,17 @@ class WrzutaIE(InfoExtractor):
 
         quality = qualities(['SD', 'MQ', 'HQ', 'HD'])
 
-        audio_table = {'flv': 'mp3', 'webm': 'ogg'}
+        audio_table = {'flv': 'mp3', 'webm': 'ogg', '???': 'mp3'}
 
         embedpage = self._download_json('http://www.wrzuta.pl/npp/embed/%s/%s' % (uploader, video_id), video_id)
 
         formats = []
         for media in embedpage['url']:
+            fmt = media['type'].split('@')[0]
             if typ == 'audio':
-                ext = audio_table[media['type'].split('@')[0]]
+                ext = audio_table.get(fmt, fmt)
             else:
-                ext = media['type'].split('@')[0]
+                ext = fmt
 
             formats.append({
                 'format_id': '%s_%s' % (ext, media['quality'].lower()),
index 71bd7c463595a549786d3156f33f114b97b54226..80c48c37d32c0849e689d626811ee34c5b414ee0 100644 (file)
@@ -1,9 +1,7 @@
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
 )
 
@@ -23,10 +21,9 @@ class XBefIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('id')
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
+
         title = self._html_search_regex(
             r'<h1[^>]*>(.*?)</h1>', webpage, 'title')
 
@@ -47,4 +44,3 @@ class XBefIE(InfoExtractor):
             'thumbnail': thumbnail,
             'age_limit': 18,
         }
-
index 4e8fbde8d6bbb072e7fc3475288c6c2e93360993..6b37bcbc959a8e8b83fee052da18728ca9a9c298 100644 (file)
@@ -42,7 +42,7 @@ class XHamsterIE(InfoExtractor):
         }
     ]
 
-    def _real_extract(self,url):
+    def _real_extract(self, url):
         def extract_video_url(webpage):
             mp4 = re.search(r'<video\s+.*?file="([^"]+)".*?>', webpage)
             if mp4 is None:
@@ -67,17 +67,17 @@ class XHamsterIE(InfoExtractor):
         description = mobj.group(1) if mobj else None
 
         upload_date = self._html_search_regex(r'hint=\'(\d{4}-\d{2}-\d{2}) \d{2}:\d{2}:\d{2} [A-Z]{3,4}\'',
-            webpage, 'upload date', fatal=False)
+                                              webpage, 'upload date', fatal=False)
         if upload_date:
             upload_date = unified_strdate(upload_date)
 
         uploader_id = self._html_search_regex(r'<a href=\'/user/[^>]+>(?P<uploader_id>[^<]+)',
-            webpage, 'uploader id', default='anonymous')
+                                              webpage, 'uploader id', default='anonymous')
 
         thumbnail = self._html_search_regex(r'<video\s+.*?poster="([^"]+)".*?>', webpage, 'thumbnail', fatal=False)
 
         duration = parse_duration(self._html_search_regex(r'<span>Runtime:</span> (\d+:\d+)</div>',
-            webpage, 'duration', fatal=False))
+                                                          webpage, 'duration', fatal=False))
 
         view_count = self._html_search_regex(r'<span>Views:</span> ([^<]+)</div>', webpage, 'view count', fatal=False)
         if view_count:
diff --git a/youtube_dl/extractor/xminus.py b/youtube_dl/extractor/xminus.py
new file mode 100644 (file)
index 0000000..8c6241a
--- /dev/null
@@ -0,0 +1,76 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..compat import (
+    compat_chr,
+    compat_ord,
+)
+from ..utils import (
+    int_or_none,
+    parse_filesize,
+)
+
+
+class XMinusIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?x-minus\.org/track/(?P<id>[0-9]+)'
+    _TEST = {
+        'url': 'http://x-minus.org/track/4542/%D0%BF%D0%B5%D1%81%D0%B5%D0%BD%D0%BA%D0%B0-%D1%88%D0%BE%D1%84%D0%B5%D1%80%D0%B0.html',
+        'md5': '401a15f2d2dcf6d592cb95528d72a2a8',
+        'info_dict': {
+            'id': '4542',
+            'ext': 'mp3',
+            'title': 'Леонид Агутин-Песенка шофера',
+            'duration': 156,
+            'tbr': 320,
+            'filesize_approx': 5900000,
+            'view_count': int,
+            'description': 'md5:03238c5b663810bc79cf42ef3c03e371',
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        artist = self._html_search_regex(
+            r'minus_track\.artist="(.+?)"', webpage, 'artist')
+        title = artist + '-' + self._html_search_regex(
+            r'minus_track\.title="(.+?)"', webpage, 'title')
+        duration = int_or_none(self._html_search_regex(
+            r'minus_track\.dur_sec=\'([0-9]*?)\'',
+            webpage, 'duration', fatal=False))
+        filesize_approx = parse_filesize(self._html_search_regex(
+            r'<div class="filesize[^"]*"></div>\s*([0-9.]+\s*[a-zA-Z][bB])',
+            webpage, 'approximate filesize', fatal=False))
+        tbr = int_or_none(self._html_search_regex(
+            r'<div class="quality[^"]*"></div>\s*([0-9]+)\s*kbps',
+            webpage, 'bitrate', fatal=False))
+        view_count = int_or_none(self._html_search_regex(
+            r'<div class="quality.*?► ([0-9]+)',
+            webpage, 'view count', fatal=False))
+        description = self._html_search_regex(
+            r'(?s)<div id="song_texts">(.*?)</div><br',
+            webpage, 'song lyrics', fatal=False)
+        if description:
+            description = re.sub(' *\r *', '\n', description)
+
+        enc_token = self._html_search_regex(
+            r'minus_track\.tkn="(.+?)"', webpage, 'enc_token')
+        token = ''.join(
+            c if pos == 3 else compat_chr(compat_ord(c) - 1)
+            for pos, c in enumerate(reversed(enc_token)))
+        video_url = 'http://x-minus.org/dwlf/%s/%s.mp3' % (video_id, token)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'url': video_url,
+            'duration': duration,
+            'filesize_approx': filesize_approx,
+            'tbr': tbr,
+            'view_count': view_count,
+            'description': description,
+        }
index 7a73b243080406b29b6c4a17d50a3e4d1ac023cb..79ed6c744242bf132afd033ae35949cc1e2263b5 100644 (file)
@@ -1,10 +1,8 @@
 # encoding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
 )
 
@@ -23,21 +21,18 @@ class XNXXIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
-        # Get webpage content
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         video_url = self._search_regex(r'flv_url=(.*?)&amp;',
-            webpage, 'video URL')
+                                       webpage, 'video URL')
         video_url = compat_urllib_parse.unquote(video_url)
 
         video_title = self._html_search_regex(r'<title>(.*?)\s+-\s+XNXX.COM',
-            webpage, 'title')
+                                              webpage, 'title')
 
         video_thumbnail = self._search_regex(r'url_bigthumb=(.*?)&amp;',
-            webpage, 'thumbnail', fatal=False)
+                                             webpage, 'thumbnail', fatal=False)
 
         return {
             'id': video_id,
index 273d93d9ee544b74f22daa20a195b8e4bc7b05a2..f9d98b83fb47aef2f927c21c102ced7943c11e53 100644 (file)
@@ -4,15 +4,17 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_request,
+)
+from ..utils import (
     parse_duration,
     str_to_int,
 )
 
 
 class XTubeIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?(?P<url>xtube\.com/watch\.php\?v=(?P<videoid>[^/?&]+))'
+    _VALID_URL = r'https?://(?:www\.)?(?P<url>xtube\.com/watch\.php\?v=(?P<id>[^/?&]+))'
     _TEST = {
         'url': 'http://www.xtube.com/watch.php?v=kVTUy_G222_',
         'md5': '092fbdd3cbe292c920ef6fc6a8a9cdab',
@@ -20,7 +22,7 @@ class XTubeIE(InfoExtractor):
             'id': 'kVTUy_G222_',
             'ext': 'mp4',
             'title': 'strange erotica',
-            'description': 'surreal gay themed erotica...almost an ET kind of thing',
+            'description': 'http://www.xtube.com an ET kind of thing',
             'uploader': 'greenshowers',
             'duration': 450,
             'age_limit': 18,
@@ -29,7 +31,7 @@ class XTubeIE(InfoExtractor):
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('videoid')
+        video_id = mobj.group('id')
         url = 'http://www.' + mobj.group('url')
 
         req = compat_urllib_request.Request(url)
@@ -97,7 +99,7 @@ class XTubeUserIE(InfoExtractor):
             url, username, note='Retrieving profile page')
 
         video_count = int(self._search_regex(
-            r'<strong>%s\'s Videos \(([0-9]+)\)</strong>'%username, profile_page,
+            r'<strong>%s\'s Videos \(([0-9]+)\)</strong>' % username, profile_page,
             'video count'))
 
         PAGE_SIZE = 25
index 7e00448246beb9ab9b7c25f33b05e6f4f1bb8283..2a45dc574263f7e651020e591fcc40bdf987367d 100644 (file)
@@ -3,15 +3,17 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse,
-    ExtractorError,
+)
+from ..utils import (
     clean_html,
+    ExtractorError,
 )
 
 
 class XVideosIE(InfoExtractor):
-    _VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)'
+    _VALID_URL = r'https?://(?:www\.)?xvideos\.com/video(?P<id>[0-9]+)(?:.*)'
     _TEST = {
         'url': 'http://www.xvideos.com/video4588838/biker_takes_his_girl',
         'md5': '4b46ae6ea5e6e9086e714d883313c0c9',
@@ -24,37 +26,25 @@ class XVideosIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group(1)
-
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
-        self.report_extraction(video_id)
-
         mobj = re.search(r'<h1 class="inlineError">(.+?)</h1>', webpage)
         if mobj:
             raise ExtractorError('%s said: %s' % (self.IE_NAME, clean_html(mobj.group(1))), expected=True)
 
-        # Extract video URL
         video_url = compat_urllib_parse.unquote(
             self._search_regex(r'flv_url=(.+?)&', webpage, 'video URL'))
-
-        # Extract title
         video_title = self._html_search_regex(
             r'<title>(.*?)\s+-\s+XVID', webpage, 'title')
-
-        # Extract video thumbnail
         video_thumbnail = self._search_regex(
             r'url_bigthumb=(.+?)&amp', webpage, 'thumbnail', fatal=False)
 
         return {
             'id': video_id,
             'url': video_url,
-            'uploader': None,
-            'upload_date': None,
             'title': video_title,
             'ext': 'flv',
             'thumbnail': video_thumbnail,
-            'description': None,
             'age_limit': 18,
         }
index 117f0856a261e8f520bd6ac593ca75f843f6c7a7..031226f2764032b9f1af16f5c9d74188a89cdc05 100644 (file)
@@ -6,11 +6,13 @@ import json
 import re
 
 from .common import InfoExtractor, SearchInfoExtractor
-from ..utils import (
-    ExtractorError,
+from ..compat import (
     compat_urllib_parse,
     compat_urlparse,
+)
+from ..utils import (
     clean_html,
+    ExtractorError,
     int_or_none,
 )
 
@@ -229,7 +231,7 @@ class YahooSearchIE(SearchInfoExtractor):
         for pagenum in itertools.count(0):
             result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
             info = self._download_json(result_url, query,
-                note='Downloading results page '+str(pagenum+1))
+                                       note='Downloading results page ' + str(pagenum + 1))
             m = info['m']
             results = info['results']
 
index 944d7da380d668809f511243f3f192a41beba3b6..894678a23dac9d1b03e07f0cd9b2eecc7e690e18 100644 (file)
@@ -5,7 +5,7 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import compat_urllib_parse
+from ..compat import compat_urllib_parse
 
 
 class YnetIE(InfoExtractor):
@@ -13,7 +13,6 @@ class YnetIE(InfoExtractor):
     _TESTS = [
         {
             'url': 'http://hot.ynet.co.il/home/0,7340,L-11659-99244,00.html',
-            'md5': '4b29cb57c3dddd57642b3f051f535b07',
             'info_dict': {
                 'id': 'L-11659-99244',
                 'ext': 'flv',
@@ -22,7 +21,6 @@ class YnetIE(InfoExtractor):
             }
         }, {
             'url': 'http://hot.ynet.co.il/home/0,7340,L-8859-84418,00.html',
-            'md5': '8194c2ea221e9a639cac96b6b0753dc5',
             'info_dict': {
                 'id': 'L-8859-84418',
                 'ext': 'flv',
@@ -49,4 +47,4 @@ class YnetIE(InfoExtractor):
             'title': title,
             'formats': self._extract_f4m_formats(f4m_url, video_id),
             'thumbnail': self._og_search_thumbnail(webpage),
-        }
\ No newline at end of file
+        }
index b86331e3cfa39ec8d3f287e829900b414892beee..c642075dcfabbfb025d64b92e392d614578f42b1 100644 (file)
@@ -9,40 +9,30 @@ from ..utils import (
 
 
 class YouJizzIE(InfoExtractor):
-    _VALID_URL = r'^https?://(?:\w+\.)?youjizz\.com/videos/(?P<videoid>[^.]+)\.html$'
+    _VALID_URL = r'https?://(?:\w+\.)?youjizz\.com/videos/[^/#?]+-(?P<id>[0-9]+)\.html(?:$|[?#])'
     _TEST = {
         'url': 'http://www.youjizz.com/videos/zeichentrick-1-2189178.html',
-        'file': '2189178.flv',
         'md5': '07e15fa469ba384c7693fd246905547c',
         'info_dict': {
+            'id': '2189178',
+            'ext': 'flv',
             "title": "Zeichentrick 1",
             "age_limit": 18,
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-
-        video_id = mobj.group('videoid')
-
-        # Get webpage content
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
-
         age_limit = self._rta_search(webpage)
-
-        # Get the video title
-        video_title = self._html_search_regex(r'<title>(?P<title>.*)</title>',
-            webpage, 'title').strip()
-
-        # Get the embed page
-        result = re.search(r'https?://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)', webpage)
-        if result is None:
-            raise ExtractorError('ERROR: unable to extract embed page')
-
-        embed_page_url = result.group(0).strip()
-        video_id = result.group('videoid')
-
-        webpage = self._download_webpage(embed_page_url, video_id)
+        video_title = self._html_search_regex(
+            r'<title>\s*(.*)\s*</title>', webpage, 'title')
+
+        embed_page_url = self._search_regex(
+            r'(https?://www.youjizz.com/videos/embed/[0-9]+)',
+            webpage, 'embed page')
+        webpage = self._download_webpage(
+            embed_page_url, video_id, note='downloading embed page')
 
         # Get the video URL
         m_playlist = re.search(r'so.addVariable\("playlist", ?"(?P<playlist>.+?)"\);', webpage)
index 48d47a24556666df12f3a1ea4c0d1f1d6a51a06b..97b98bbe88715f644da6bec1709d697af2c8e0e0 100644 (file)
@@ -35,21 +35,21 @@ class YoukuIE(InfoExtractor):
 
     def _gen_sid(self):
         nowTime = int(time.time() * 1000)
-        random1 = random.randint(1000,1998)
-        random2 = random.randint(1000,9999)
+        random1 = random.randint(1000, 1998)
+        random2 = random.randint(1000, 9999)
 
-        return "%d%d%d" %(nowTime,random1,random2)
+        return "%d%d%d" % (nowTime, random1, random2)
 
     def _get_file_ID_mix_string(self, seed):
         mixed = []
         source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
         seed = float(seed)
         for i in range(len(source)):
-            seed  =  (seed * 211 + 30031) % 65536
-            index  =  math.floor(seed / 65536 * len(source))
+            seed = (seed * 211 + 30031) % 65536
+            index = math.floor(seed / 65536 * len(source))
             mixed.append(source[int(index)])
             source.remove(source[int(index)])
-        #return ''.join(mixed)
+        # return ''.join(mixed)
         return mixed
 
     def _get_file_id(self, fileId, seed):
@@ -74,7 +74,7 @@ class YoukuIE(InfoExtractor):
             # -8 means blocked outside China.
             error = config['data'][0].get('error')  # Chinese and English, separated by newline.
             raise ExtractorError(error or 'Server reported error %i' % error_code,
-                expected=True)
+                                 expected=True)
 
         video_title = config['data'][0]['title']
         seed = config['data'][0]['seed']
@@ -100,12 +100,12 @@ class YoukuIE(InfoExtractor):
         keys = [s['k'] for s in config['data'][0]['segs'][format]]
         # segs is usually a dictionary, but an empty *list* if an error occured.
 
-        files_info=[]
+        files_info = []
         sid = self._gen_sid()
         fileid = self._get_file_id(fileid, seed)
 
-        #column 8,9 of fileid represent the segment number
-        #fileid[7:9] should be changed
+        # column 8,9 of fileid represent the segment number
+        # fileid[7:9] should be changed
         for index, key in enumerate(keys):
             temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
             download_url = 'http://k.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
index 7bfda45e76e0d4ca3b6bfd6c3a8ec9f38de453e1..107c9ac36e4f4f48bd768567e4399af15fd07743 100644 (file)
@@ -6,10 +6,11 @@ import re
 import sys
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_parse_urlparse,
     compat_urllib_request,
-
+)
+from ..utils import (
     ExtractorError,
     unescapeHTML,
     unified_strdate,
@@ -45,11 +46,13 @@ class YouPornIE(InfoExtractor):
         age_limit = self._rta_search(webpage)
 
         # Get JSON parameters
-        json_params = self._search_regex(r'var currentVideo = new Video\((.*)\);', webpage, 'JSON parameters')
+        json_params = self._search_regex(
+            r'var currentVideo = new Video\((.*)\)[,;]',
+            webpage, 'JSON parameters')
         try:
             params = json.loads(json_params)
         except:
-            raise ExtractorError(u'Invalid JSON')
+            raise ExtractorError('Invalid JSON')
 
         self.report_extraction(video_id)
         try:
@@ -64,7 +67,7 @@ class YouPornIE(InfoExtractor):
         # Get all of the links from the page
         DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
         download_list_html = self._search_regex(DOWNLOAD_LIST_RE,
-            webpage, 'download list').strip()
+                                                webpage, 'download list').strip()
         LINK_RE = r'<a href="([^"]+)">'
         links = re.findall(LINK_RE, download_list_html)
 
@@ -73,7 +76,7 @@ class YouPornIE(InfoExtractor):
         for encrypted_link in encrypted_links:
             link = aes_decrypt_text(encrypted_link, video_title, 32).decode('utf-8')
             links.append(link)
-        
+
         formats = []
         for link in links:
             # A link looks like this:
@@ -103,8 +106,8 @@ class YouPornIE(InfoExtractor):
         self._sort_formats(formats)
 
         if not formats:
-            raise ExtractorError(u'ERROR: no known formats available for video')
-        
+            raise ExtractorError('ERROR: no known formats available for video')
+
         return {
             'id': video_id,
             'uploader': video_uploader,
index 9041cfa8770897851d06026942517a103df2a639..823d6aaf3a4f38ad5696e67a23b14e6fe690fabd 100644 (file)
@@ -7,47 +7,48 @@ import itertools
 import json
 import os.path
 import re
+import time
 import traceback
 
 from .common import InfoExtractor, SearchInfoExtractor
 from .subtitles import SubtitlesInfoExtractor
 from ..jsinterp import JSInterpreter
 from ..swfinterp import SWFInterpreter
-from ..utils import (
+from ..compat import (
     compat_chr,
     compat_parse_qs,
     compat_urllib_parse,
     compat_urllib_request,
     compat_urlparse,
     compat_str,
-
+)
+from ..utils import (
     clean_html,
-    get_element_by_id,
-    get_element_by_attribute,
     ExtractorError,
+    get_element_by_attribute,
+    get_element_by_id,
     int_or_none,
     OnDemandPagedList,
+    orderedSet,
     unescapeHTML,
     unified_strdate,
-    orderedSet,
     uppercase_escape,
 )
 
+
 class YoutubeBaseInfoExtractor(InfoExtractor):
     """Provide base functions for Youtube extractors"""
     _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
     _TWOFACTOR_URL = 'https://accounts.google.com/SecondFactor'
-    _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
-    _AGE_URL = 'https://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
     _NETRC_MACHINE = 'youtube'
     # If True it will raise an error if no login info is provided
     _LOGIN_REQUIRED = False
 
     def _set_language(self):
-        return bool(self._download_webpage(
-            self._LANG_URL, None,
-            note='Setting language', errnote='unable to set language',
-            fatal=False))
+        self._set_cookie(
+            '.youtube.com', 'PREF', 'f1=50000000&hl=en',
+            # YouTube sets the expire time to about two months
+            expire_time=time.time() + 2 * 30 * 24 * 3600)
 
     def _login(self):
         """
@@ -76,30 +77,30 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
 
         # Log in
         login_form_strs = {
-                'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
-                'Email': username,
-                'GALX': galx,
-                'Passwd': password,
-
-                'PersistentCookie': 'yes',
-                '_utf8': '霱',
-                'bgresponse': 'js_disabled',
-                'checkConnection': '',
-                'checkedDomains': 'youtube',
-                'dnConn': '',
-                'pstMsg': '0',
-                'rmShown': '1',
-                'secTok': '',
-                'signIn': 'Sign in',
-                'timeStmp': '',
-                'service': 'youtube',
-                'uilel': '3',
-                'hl': 'en_US',
+            'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
+            'Email': username,
+            'GALX': galx,
+            'Passwd': password,
+
+            'PersistentCookie': 'yes',
+            '_utf8': '霱',
+            'bgresponse': 'js_disabled',
+            'checkConnection': '',
+            'checkedDomains': 'youtube',
+            'dnConn': '',
+            'pstMsg': '0',
+            'rmShown': '1',
+            'secTok': '',
+            'signIn': 'Sign in',
+            'timeStmp': '',
+            'service': 'youtube',
+            'uilel': '3',
+            'hl': 'en_US',
         }
 
         # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
         # chokes on unicode
-        login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
+        login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items())
         login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
 
         req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
@@ -149,7 +150,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
                 'service': 'youtube',
                 'hl': 'en_US',
             }
-            tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in tfa_form_strs.items())
+            tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in tfa_form_strs.items())
             tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii')
 
             tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
@@ -175,27 +176,12 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
             return False
         return True
 
-    def _confirm_age(self):
-        age_form = {
-            'next_url': '/',
-            'action_confirm': 'Confirm',
-        }
-        req = compat_urllib_request.Request(self._AGE_URL,
-            compat_urllib_parse.urlencode(age_form).encode('ascii'))
-
-        self._download_webpage(
-            req, None,
-            note='Confirming age', errnote='Unable to confirm age')
-        return True
-
     def _real_initialize(self):
         if self._downloader is None:
             return
-        if not self._set_language():
-            return
+        self._set_language()
         if not self._login():
             return
-        self._confirm_age()
 
 
 class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
@@ -273,6 +259,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         '138': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
         '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
         '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
+        '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
+        '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
+        '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'h264'},
 
         # Dash mp4 audio
         '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 48, 'preference': -50},
@@ -286,6 +275,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
         '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
         '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
+        '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'VP9'},
         '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
         '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
         '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
@@ -295,11 +285,19 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
         '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
         '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
+        '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
+        '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
+        '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'VP9'},
 
         # Dash webm audio
         '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
         '172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
 
+        # Dash webm audio with opus inside
+        '249': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
+        '250': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
+        '251': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
+
         # RTMP (unnamed)
         '_rtmp': {'protocol': 'rtmp'},
     }
@@ -383,8 +381,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             'info_dict': {
                 'id': 'IB3lcPjvWLA',
                 'ext': 'm4a',
-                'title': 'Afrojack - The Spark ft. Spree Wilson',
-                'description': 'md5:9717375db5a9a3992be4668bbf3bc0a8',
+                'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
+                'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
                 'uploader': 'AfrojackVEVO',
                 'uploader_id': 'AfrojackVEVO',
                 'upload_date': '20131011',
@@ -394,6 +392,64 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                 'format': '141',
             },
         },
+        # Controversy video
+        {
+            'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
+            'info_dict': {
+                'id': 'T4XJQO3qol8',
+                'ext': 'mp4',
+                'upload_date': '20100909',
+                'uploader': 'The Amazing Atheist',
+                'uploader_id': 'TheAmazingAtheist',
+                'title': 'Burning Everyone\'s Koran',
+                'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
+            }
+        },
+        # Normal age-gate video (No vevo, embed allowed)
+        {
+            'url': 'http://youtube.com/watch?v=HtVdAasjOgU',
+            'info_dict': {
+                'id': 'HtVdAasjOgU',
+                'ext': 'mp4',
+                'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
+                'description': 'md5:eca57043abae25130f58f655ad9a7771',
+                'uploader': 'The Witcher',
+                'uploader_id': 'WitcherGame',
+                'upload_date': '20140605',
+            },
+        },
+        # video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
+        {
+            'url': '__2ABJjxzNo',
+            'info_dict': {
+                'id': '__2ABJjxzNo',
+                'ext': 'mp4',
+                'upload_date': '20100430',
+                'uploader_id': 'deadmau5',
+                'description': 'md5:12c56784b8032162bb936a5f76d55360',
+                'uploader': 'deadmau5',
+                'title': 'Deadmau5 - Some Chords (HD)',
+            },
+            'expected_warnings': [
+                'DASH manifest missing',
+            ]
+        },
+        # Olympics (https://github.com/rg3/youtube-dl/issues/4431)
+        {
+            'url': 'lqQg6PlCWgI',
+            'info_dict': {
+                'id': 'lqQg6PlCWgI',
+                'ext': 'mp4',
+                'upload_date': '20120731',
+                'uploader_id': 'olympic',
+                'description': 'HO09  - Women -  GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
+                'uploader': 'Olympics',
+                'title': 'Hockey - Women -  GER-AUS - London 2012 Olympic Games',
+            },
+            'params': {
+                'skip_download': 'requires avconv',
+            }
+        },
     ]
 
     def __init__(self, *args, **kwargs):
@@ -466,7 +522,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         def gen_sig_code(idxs):
             def _genslice(start, end, step):
                 starts = '' if start == 0 else str(start)
-                ends = (':%d' % (end+step)) if end + step >= 0 else ':'
+                ends = (':%d' % (end + step)) if end + step >= 0 else ':'
                 steps = '' if step == 1 else (':%d' % step)
                 return 's[%s%s%s]' % (starts, ends, steps)
 
@@ -503,8 +559,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
 
     def _parse_sig_js(self, jscode):
         funcname = self._search_regex(
-            r'signature=([$a-zA-Z]+)', jscode,
-             'Initial JS player signature function name')
+            r'\.sig\|\|([a-zA-Z0-9]+)\(', jscode,
+            'Initial JS player signature function name')
 
         jsi = JSInterpreter(jscode)
         initial_function = jsi.extract_function(funcname)
@@ -581,9 +637,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             return {}
         player_config = json.loads(mobj.group(1))
         try:
-            args = player_config[u'args']
-            caption_url = args[u'ttsurl']
-            timestamp = args[u'timestamp']
+            args = player_config['args']
+            caption_url = args['ttsurl']
+            timestamp = args['timestamp']
             # We get the available subtitles
             list_params = compat_urllib_parse.urlencode({
                 'type': 'list',
@@ -593,7 +649,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             list_url = caption_url + '&' + list_params
             caption_list = self._download_xml(list_url, video_id)
             original_lang_node = caption_list.find('track')
-            if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr' :
+            if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr':
                 self._downloader.report_warning('Video doesn\'t have automatic captions')
                 return {}
             original_lang = original_lang_node.attrib['lang_code']
@@ -626,10 +682,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
 
     def _extract_from_m3u8(self, manifest_url, video_id):
         url_map = {}
+
         def _get_urls(_manifest):
             lines = _manifest.split('\n')
             urls = filter(lambda l: l and not l.startswith('#'),
-                            lines)
+                          lines)
             return urls
         manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
         formats_urls = _get_urls(manifest)
@@ -642,6 +699,46 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
         return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
 
+    def _parse_dash_manifest(
+            self, video_id, dash_manifest_url, player_url, age_gate):
+        def decrypt_sig(mobj):
+            s = mobj.group(1)
+            dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
+            return '/signature/%s' % dec_s
+        dash_manifest_url = re.sub(r'/s/([\w\.]+)', decrypt_sig, dash_manifest_url)
+        dash_doc = self._download_xml(
+            dash_manifest_url, video_id,
+            note='Downloading DASH manifest',
+            errnote='Could not download DASH manifest')
+
+        formats = []
+        for r in dash_doc.findall('.//{urn:mpeg:DASH:schema:MPD:2011}Representation'):
+            url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
+            if url_el is None:
+                continue
+            format_id = r.attrib['id']
+            video_url = url_el.text
+            filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
+            f = {
+                'format_id': format_id,
+                'url': video_url,
+                'width': int_or_none(r.attrib.get('width')),
+                'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
+                'asr': int_or_none(r.attrib.get('audioSamplingRate')),
+                'filesize': filesize,
+                'fps': int_or_none(r.attrib.get('frameRate')),
+            }
+            try:
+                existing_format = next(
+                    fo for fo in formats
+                    if fo['format_id'] == format_id)
+            except StopIteration:
+                f.update(self._formats.get(format_id, {}))
+                formats.append(f)
+            else:
+                existing_format.update(f)
+        return formats
+
     def _real_extract(self, url):
         proto = (
             'http' if self._downloader.params.get('prefer_insecure', False)
@@ -654,17 +751,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         video_id = self.extract_id(url)
 
         # Get video webpage
-        url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
-        pref_cookies = [
-            c for c in self._downloader.cookiejar
-            if c.domain == '.youtube.com' and c.name == 'PREF']
-        for pc in pref_cookies:
-            if 'hl=' in pc.value:
-                pc.value = re.sub(r'hl=[^&]+', 'hl=en', pc.value)
-            else:
-                if pc.value:
-                    pc.value += '&'
-                pc.value += 'hl=en'
+        url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
         video_webpage = self._download_webpage(url, video_id)
 
         # Attempt to extract SWF player URL
@@ -675,9 +762,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             player_url = None
 
         # Get video info
-        self.report_video_info_webpage_download(video_id)
         if re.search(r'player-age-gate-content">', video_webpage) is not None:
-            self.report_age_confirmation()
             age_gate = True
             # We simulate the access to the video from www.youtube.com/v/{video_id}
             # this can be viewed without login into Youtube
@@ -685,24 +770,42 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                 'video_id': video_id,
                 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
                 'sts': self._search_regex(
-                    r'"sts"\s*:\s*(\d+)', video_webpage, 'sts'),
+                    r'"sts"\s*:\s*(\d+)', video_webpage, 'sts', default=''),
             })
             video_info_url = proto + '://www.youtube.com/get_video_info?' + data
-            video_info_webpage = self._download_webpage(video_info_url, video_id,
-                                    note=False,
-                                    errnote='unable to download video info webpage')
+            video_info_webpage = self._download_webpage(
+                video_info_url, video_id,
+                note='Refetching age-gated info webpage',
+                errnote='unable to download video info webpage')
             video_info = compat_parse_qs(video_info_webpage)
         else:
             age_gate = False
-            for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
-                video_info_url = (proto + '://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
-                        % (video_id, el_type))
-                video_info_webpage = self._download_webpage(video_info_url, video_id,
-                                        note=False,
-                                        errnote='unable to download video info webpage')
-                video_info = compat_parse_qs(video_info_webpage)
-                if 'token' in video_info:
-                    break
+            try:
+                # Try looking directly into the video webpage
+                mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage)
+                if not mobj:
+                    raise ValueError('Could not find ytplayer.config')  # caught below
+                json_code = uppercase_escape(mobj.group(1))
+                ytplayer_config = json.loads(json_code)
+                args = ytplayer_config['args']
+                # Convert to the same format returned by compat_parse_qs
+                video_info = dict((k, [v]) for k, v in args.items())
+                if 'url_encoded_fmt_stream_map' not in args:
+                    raise ValueError('No stream_map present')  # caught below
+            except ValueError:
+                # We fallback to the get_video_info pages (used by the embed page)
+                self.report_video_info_webpage_download(video_id)
+                for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
+                    video_info_url = (
+                        '%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
+                        % (proto, video_id, el_type))
+                    video_info_webpage = self._download_webpage(
+                        video_info_url,
+                        video_id, note=False,
+                        errnote='unable to download video info webpage')
+                    video_info = compat_parse_qs(video_info_webpage)
+                    if 'token' in video_info:
+                        break
         if 'token' not in video_info:
             if 'reason' in video_info:
                 raise ExtractorError(
@@ -770,7 +873,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
 
         m_cat_container = self._search_regex(
             r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
-            video_webpage, 'categories', fatal=False)
+            video_webpage, 'categories', default=None)
         if m_cat_container:
             category = self._html_search_regex(
                 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
@@ -825,33 +928,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         # annotations
         video_annotations = None
         if self._downloader.params.get('writeannotations', False):
-                video_annotations = self._extract_annotations(video_id)
-
-        # Decide which formats to download
-        try:
-            mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage)
-            if not mobj:
-                raise ValueError('Could not find vevo ID')
-            json_code = uppercase_escape(mobj.group(1))
-            ytplayer_config = json.loads(json_code)
-            args = ytplayer_config['args']
-            # Easy way to know if the 's' value is in url_encoded_fmt_stream_map
-            # this signatures are encrypted
-            if 'url_encoded_fmt_stream_map' not in args:
-                raise ValueError('No stream_map present')  # caught below
-            re_signature = re.compile(r'[&,]s=')
-            m_s = re_signature.search(args['url_encoded_fmt_stream_map'])
-            if m_s is not None:
-                self.to_screen('%s: Encrypted signatures detected.' % video_id)
-                video_info['url_encoded_fmt_stream_map'] = [args['url_encoded_fmt_stream_map']]
-            m_s = re_signature.search(args.get('adaptive_fmts', ''))
-            if m_s is not None:
-                if 'adaptive_fmts' in video_info:
-                    video_info['adaptive_fmts'][0] += ',' + args['adaptive_fmts']
-                else:
-                    video_info['adaptive_fmts'] = [args['adaptive_fmts']]
-        except ValueError:
-            pass
+            video_annotations = self._extract_annotations(video_id)
 
         def _map_to_format_list(urlmap):
             formats = []
@@ -874,8 +951,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                 'url': video_info['conn'][0],
                 'player_url': player_url,
             }]
-        elif len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len(video_info.get('adaptive_fmts', [])) >= 1:
-            encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts',[''])[0]
+        elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
+            encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
             if 'rtmpe%3Dyes' in encoded_url_map:
                 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
             url_map = {}
@@ -921,7 +998,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
 
                         parts_sizes = self._signature_cache_id(encrypted_sig)
                         self.to_screen('{%s} signature length %s, %s' %
-                            (format_id, parts_sizes, player_desc))
+                                       (format_id, parts_sizes, player_desc))
 
                     signature = self._decrypt_signature(
                         encrypted_sig, video_id, player_url, age_gate)
@@ -938,76 +1015,42 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
 
         # Look for the DASH manifest
-        if (self._downloader.params.get('youtube_include_dash_manifest', False)):
-            try:
-                # The DASH manifest used needs to be the one from the original video_webpage.
-                # The one found in get_video_info seems to be using different signatures.
-                # However, in the case of an age restriction there won't be any embedded dashmpd in the video_webpage.
-                # Luckily, it seems, this case uses some kind of default signature (len == 86), so the
-                # combination of get_video_info and the _static_decrypt_signature() decryption fallback will work here.
-                if age_gate:
-                    dash_manifest_url = video_info.get('dashmpd')[0]
+        if self._downloader.params.get('youtube_include_dash_manifest', True):
+            dash_mpd = video_info.get('dashmpd')
+            if dash_mpd:
+                dash_manifest_url = dash_mpd[0]
+                try:
+                    dash_formats = self._parse_dash_manifest(
+                        video_id, dash_manifest_url, player_url, age_gate)
+                except (ExtractorError, KeyError) as e:
+                    self.report_warning(
+                        'Skipping DASH manifest: %r' % e, video_id)
                 else:
-                    dash_manifest_url = ytplayer_config['args']['dashmpd']
-                def decrypt_sig(mobj):
-                    s = mobj.group(1)
-                    dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
-                    return '/signature/%s' % dec_s
-                dash_manifest_url = re.sub(r'/s/([\w\.]+)', decrypt_sig, dash_manifest_url)
-                dash_doc = self._download_xml(
-                    dash_manifest_url, video_id,
-                    note='Downloading DASH manifest',
-                    errnote='Could not download DASH manifest')
-                for r in dash_doc.findall('.//{urn:mpeg:DASH:schema:MPD:2011}Representation'):
-                    url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
-                    if url_el is None:
-                        continue
-                    format_id = r.attrib['id']
-                    video_url = url_el.text
-                    filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
-                    f = {
-                        'format_id': format_id,
-                        'url': video_url,
-                        'width': int_or_none(r.attrib.get('width')),
-                        'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
-                        'asr': int_or_none(r.attrib.get('audioSamplingRate')),
-                        'filesize': filesize,
-                    }
-                    try:
-                        existing_format = next(
-                            fo for fo in formats
-                            if fo['format_id'] == format_id)
-                    except StopIteration:
-                        f.update(self._formats.get(format_id, {}))
-                        formats.append(f)
-                    else:
-                        existing_format.update(f)
-
-            except (ExtractorError, KeyError) as e:
-                self.report_warning('Skipping DASH manifest: %s' % e, video_id)
+                    formats.extend(dash_formats)
 
         self._sort_formats(formats)
 
         return {
-            'id':           video_id,
-            'uploader':     video_uploader,
-            'uploader_id':  video_uploader_id,
-            'upload_date':  upload_date,
-            'title':        video_title,
-            'thumbnail':    video_thumbnail,
-            'description':  video_description,
-            'categories':   video_categories,
-            'subtitles':    video_subtitles,
-            'duration':     video_duration,
-            'age_limit':    18 if age_gate else 0,
-            'annotations':  video_annotations,
+            'id': video_id,
+            'uploader': video_uploader,
+            'uploader_id': video_uploader_id,
+            'upload_date': upload_date,
+            'title': video_title,
+            'thumbnail': video_thumbnail,
+            'description': video_description,
+            'categories': video_categories,
+            'subtitles': video_subtitles,
+            'duration': video_duration,
+            'age_limit': 18 if age_gate else 0,
+            'annotations': video_annotations,
             'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
-            'view_count':   view_count,
+            'view_count': view_count,
             'like_count': like_count,
             'dislike_count': dislike_count,
-            'formats':      formats,
+            'formats': formats,
         }
 
+
 class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
     IE_DESC = 'YouTube.com playlists'
     _VALID_URL = r"""(?x)(?:
@@ -1021,7 +1064,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
                         )
                         (
                             (?:PL|LL|EC|UU|FL|RD)?[0-9A-Za-z-_]{10,}
-                            # Top tracks, they can also include dots 
+                            # Top tracks, they can also include dots
                             |(?:MC)[\w\.]*
                         )
                         .*
@@ -1029,13 +1072,13 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
                         ((?:PL|LL|EC|UU|FL|RD)[0-9A-Za-z-_]{10,})
                      )"""
     _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
-    _MORE_PAGES_INDICATOR = r'data-link-type="next"'
     _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)'
     IE_NAME = 'youtube:playlist'
     _TESTS = [{
         'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
         'info_dict': {
             'title': 'ytdl test PL',
+            'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
         },
         'playlist_count': 3,
     }, {
@@ -1055,7 +1098,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
         'note': 'issue #673',
         'url': 'PLBB231211A4F62143',
         'info_dict': {
-            'title': 'Team Fortress 2 (Class-based LP)',
+            'title': '[OLD]Team Fortress 2 (Class-based LP)',
         },
         'playlist_mincount': 26,
     }, {
@@ -1137,7 +1180,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
             return self._extract_mix(playlist_id)
         if playlist_id.startswith('TL'):
             raise ExtractorError('For downloading YouTube.com top lists, use '
-                'the "yttoplist" keyword, for example "youtube-dl \'yttoplist:music:Top Tracks\'"', expected=True)
+                                 'the "yttoplist" keyword, for example "youtube-dl \'yttoplist:music:Top Tracks\'"', expected=True)
 
         url = self._TEMPLATE_URL % playlist_id
         page = self._download_webpage(url, playlist_id)
@@ -1182,7 +1225,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
 class YoutubeTopListIE(YoutubePlaylistIE):
     IE_NAME = 'youtube:toplist'
     IE_DESC = ('YouTube.com top lists, "yttoplist:{channel}:{list title}"'
-        ' (Example: "yttoplist:music:Top Tracks")')
+               ' (Example: "yttoplist:music:Top Tracks")')
     _VALID_URL = r'yttoplist:(?P<chann>.*?):(?P<title>.*?)$'
     _TESTS = [{
         'url': 'yttoplist:music:Trending',
@@ -1204,7 +1247,7 @@ class YoutubeTopListIE(YoutubePlaylistIE):
                 <span[^>]*>.*?%s.*?</span>''' % re.escape(query),
             channel_page, 'list')
         url = compat_urlparse.urljoin('https://www.youtube.com/', link)
-        
+
         video_re = r'data-index="\d+".*?data-video-id="([0-9A-Za-z_-]{11})"'
         ids = []
         # sometimes the webpage doesn't contain the videos
@@ -1224,9 +1267,7 @@ class YoutubeTopListIE(YoutubePlaylistIE):
 
 class YoutubeChannelIE(InfoExtractor):
     IE_DESC = 'YouTube.com channels'
-    _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)"
-    _MORE_PAGES_INDICATOR = 'yt-uix-load-more'
-    _MORE_PAGES_URL = 'https://www.youtube.com/c4_browse_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s'
+    _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
     IE_NAME = 'youtube:channel'
     _TESTS = [{
         'note': 'paginated channel',
@@ -1242,13 +1283,8 @@ class YoutubeChannelIE(InfoExtractor):
         return ids_in_page
 
     def _real_extract(self, url):
-        # Extract channel id
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError('Invalid URL: %s' % url)
+        channel_id = self._match_id(url)
 
-        # Download channel page
-        channel_id = mobj.group(1)
         video_ids = []
         url = 'https://www.youtube.com/channel/%s/videos' % channel_id
         channel_page = self._download_webpage(url, channel_id)
@@ -1262,30 +1298,39 @@ class YoutubeChannelIE(InfoExtractor):
             # The videos are contained in a single page
             # the ajax pages can't be used, they are empty
             video_ids = self.extract_videos_from_page(channel_page)
-        else:
-            # Download all channel pages using the json-based channel_ajax query
+            entries = [
+                self.url_result(video_id, 'Youtube', video_id=video_id)
+                for video_id in video_ids]
+            return self.playlist_result(entries, channel_id)
+
+        def _entries():
+            more_widget_html = content_html = channel_page
             for pagenum in itertools.count(1):
-                url = self._MORE_PAGES_URL % (pagenum, channel_id)
-                page = self._download_json(
-                    url, channel_id, note='Downloading page #%s' % pagenum,
-                    transform_source=uppercase_escape)
 
-                ids_in_page = self.extract_videos_from_page(page['content_html'])
-                video_ids.extend(ids_in_page)
-    
-                if self._MORE_PAGES_INDICATOR not in page['load_more_widget_html']:
+                ids_in_page = self.extract_videos_from_page(content_html)
+                for video_id in ids_in_page:
+                    yield self.url_result(
+                        video_id, 'Youtube', video_id=video_id)
+
+                mobj = re.search(
+                    r'data-uix-load-more-href="/?(?P<more>[^"]+)"',
+                    more_widget_html)
+                if not mobj:
                     break
 
-        self._downloader.to_screen('[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
+                more = self._download_json(
+                    'https://youtube.com/%s' % mobj.group('more'), channel_id,
+                    'Downloading page #%s' % (pagenum + 1),
+                    transform_source=uppercase_escape)
+                content_html = more['content_html']
+                more_widget_html = more['load_more_widget_html']
 
-        url_entries = [self.url_result(video_id, 'Youtube', video_id=video_id)
-                       for video_id in video_ids]
-        return self.playlist_result(url_entries, channel_id)
+        return self.playlist_result(_entries(), channel_id)
 
 
 class YoutubeUserIE(InfoExtractor):
     IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
-    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)([A-Za-z0-9_-]+)'
+    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
     _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/users/%s'
     _GDATA_PAGE_SIZE = 50
     _GDATA_URL = 'https://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d&alt=json'
@@ -1307,16 +1352,13 @@ class YoutubeUserIE(InfoExtractor):
         # Don't return True if the url can be extracted with other youtube
         # extractor, the regex would is too permissive and it would match.
         other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
-        if any(ie.suitable(url) for ie in other_ies): return False
-        else: return super(YoutubeUserIE, cls).suitable(url)
+        if any(ie.suitable(url) for ie in other_ies):
+            return False
+        else:
+            return super(YoutubeUserIE, cls).suitable(url)
 
     def _real_extract(self, url):
-        # Extract username
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError('Invalid URL: %s' % url)
-
-        username = mobj.group(1)
+        username = self._match_id(url)
 
         # Download video ids using YouTube Data API. Result size per
         # query is limited (currently to 50 videos) so we need to query
@@ -1514,8 +1556,8 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
         paging = 0
         for i in itertools.count(1):
             info = self._download_json(self._FEED_TEMPLATE % paging,
-                                          '%s feed' % self._FEED_NAME,
-                                          'Downloading page %s' % i)
+                                       '%s feed' % self._FEED_NAME,
+                                       'Downloading page %s' % i)
             feed_html = info.get('feed_html') or info.get('content_html')
             load_more_widget_html = info.get('load_more_widget_html') or feed_html
             m_ids = re.finditer(r'"/watch\?v=(.*?)["&]', feed_html)
@@ -1531,29 +1573,33 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
             paging = mobj.group('paging')
         return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE)
 
+
 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
-    IE_DESC = 'YouTube.com recommended videos, "ytrec" keyword (requires authentication)'
+    IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
     _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
     _FEED_NAME = 'recommended'
     _PLAYLIST_TITLE = 'Youtube Recommended videos'
 
+
 class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor):
-    IE_DESC = 'Youtube watch later list, "ytwatchlater" keyword (requires authentication)'
+    IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
     _VALID_URL = r'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater'
     _FEED_NAME = 'watch_later'
     _PLAYLIST_TITLE = 'Youtube Watch Later'
     _PERSONAL_FEED = True
 
+
 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
-    IE_DESC = 'Youtube watch history, "ythistory" keyword (requires authentication)'
+    IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
     _VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
     _FEED_NAME = 'history'
     _PERSONAL_FEED = True
     _PLAYLIST_TITLE = 'Youtube Watch History'
 
+
 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
     IE_NAME = 'youtube:favorites'
-    IE_DESC = 'YouTube.com favourite videos, "ytfav" keyword (requires authentication)'
+    IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
     _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
     _LOGIN_REQUIRED = True
 
index 3b1ac4e9f5246e268e0c0b49d64249196270e9d4..74c76a9a0446482c303f3b4182f3ef2bd4942c0d 100644 (file)
@@ -1,17 +1,95 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import functools
 import re
 
 from .common import InfoExtractor
 from ..utils import (
     int_or_none,
     unified_strdate,
+    OnDemandPagedList,
 )
 
 
+def extract_from_xml_url(ie, video_id, xml_url):
+    doc = ie._download_xml(
+        xml_url, video_id,
+        note='Downloading video info',
+        errnote='Failed to download video info')
+
+    title = doc.find('.//information/title').text
+    description = doc.find('.//information/detail').text
+    duration = int(doc.find('.//details/lengthSec').text)
+    uploader_node = doc.find('.//details/originChannelTitle')
+    uploader = None if uploader_node is None else uploader_node.text
+    uploader_id_node = doc.find('.//details/originChannelId')
+    uploader_id = None if uploader_id_node is None else uploader_id_node.text
+    upload_date = unified_strdate(doc.find('.//details/airtime').text)
+
+    def xml_to_format(fnode):
+        video_url = fnode.find('url').text
+        is_available = 'http://www.metafilegenerator' not in video_url
+
+        format_id = fnode.attrib['basetype']
+        format_m = re.match(r'''(?x)
+            (?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_
+            (?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+)
+        ''', format_id)
+
+        ext = format_m.group('container')
+        proto = format_m.group('proto').lower()
+
+        quality = fnode.find('./quality').text
+        abr = int(fnode.find('./audioBitrate').text) // 1000
+        vbr_node = fnode.find('./videoBitrate')
+        vbr = None if vbr_node is None else int(vbr_node.text) // 1000
+
+        width_node = fnode.find('./width')
+        width = None if width_node is None else int_or_none(width_node.text)
+        height_node = fnode.find('./height')
+        height = None if height_node is None else int_or_none(height_node.text)
+
+        format_note = ''
+        if not format_note:
+            format_note = None
+
+        return {
+            'format_id': format_id + '-' + quality,
+            'url': video_url,
+            'ext': ext,
+            'acodec': format_m.group('acodec'),
+            'vcodec': format_m.group('vcodec'),
+            'abr': abr,
+            'vbr': vbr,
+            'width': width,
+            'height': height,
+            'filesize': int_or_none(fnode.find('./filesize').text),
+            'format_note': format_note,
+            'protocol': proto,
+            '_available': is_available,
+        }
+
+    format_nodes = doc.findall('.//formitaeten/formitaet')
+    formats = list(filter(
+        lambda f: f['_available'],
+        map(xml_to_format, format_nodes)))
+    ie._sort_formats(formats)
+
+    return {
+        'id': video_id,
+        'title': title,
+        'description': description,
+        'duration': duration,
+        'uploader': uploader,
+        'uploader_id': uploader_id,
+        'upload_date': upload_date,
+        'formats': formats,
+    }
+
+
 class ZDFIE(InfoExtractor):
-    _VALID_URL = r'^https?://www\.zdf\.de/ZDFmediathek(?P<hash>#)?/(.*beitrag/(?:video/)?)(?P<video_id>[0-9]+)(?:/[^/?]+)?(?:\?.*)?'
+    _VALID_URL = r'(?:zdf:|zdf:video:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/(.*beitrag/(?:video/)?))(?P<id>[0-9]+)(?:/[^/?]+)?(?:\?.*)?'
 
     _TEST = {
         'url': 'http://www.zdf.de/ZDFmediathek/beitrag/video/2037704/ZDFspezial---Ende-des-Machtpokers--?bc=sts;stt',
@@ -29,81 +107,53 @@ class ZDFIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('video_id')
-
+        video_id = self._match_id(url)
         xml_url = 'http://www.zdf.de/ZDFmediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
+        return extract_from_xml_url(self, video_id, xml_url)
+
+
+class ZDFChannelIE(InfoExtractor):
+    _VALID_URL = r'(?:zdf:topic:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/.*kanaluebersicht/)(?P<id>[0-9]+)'
+    _TEST = {
+        'url': 'http://www.zdf.de/ZDFmediathek#/kanaluebersicht/1586442/sendung/Titanic',
+        'info_dict': {
+            'id': '1586442',
+        },
+        'playlist_count': 4,
+    }
+    _PAGE_SIZE = 50
+
+    def _fetch_page(self, channel_id, page):
+        offset = page * self._PAGE_SIZE
+        xml_url = (
+            'http://www.zdf.de/ZDFmediathek/xmlservice/web/aktuellste?ak=web&offset=%d&maxLength=%d&id=%s'
+            % (offset, self._PAGE_SIZE, channel_id))
         doc = self._download_xml(
-            xml_url, video_id,
-            note='Downloading video info',
-            errnote='Failed to download video info')
+            xml_url, channel_id,
+            note='Downloading channel info',
+            errnote='Failed to download channel info')
 
         title = doc.find('.//information/title').text
         description = doc.find('.//information/detail').text
-        duration = int(doc.find('.//details/lengthSec').text)
-        uploader_node = doc.find('.//details/originChannelTitle')
-        uploader = None if uploader_node is None else uploader_node.text
-        uploader_id_node = doc.find('.//details/originChannelId')
-        uploader_id = None if uploader_id_node is None else uploader_id_node.text
-        upload_date = unified_strdate(doc.find('.//details/airtime').text)
-
-        def xml_to_format(fnode):
-            video_url = fnode.find('url').text
-            is_available = 'http://www.metafilegenerator' not in video_url
-
-            format_id = fnode.attrib['basetype']
-            format_m = re.match(r'''(?x)
-                (?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_
-                (?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+)
-            ''', format_id)
-
-            ext = format_m.group('container')
-            proto = format_m.group('proto').lower()
-
-            quality = fnode.find('./quality').text
-            abr = int(fnode.find('./audioBitrate').text) // 1000
-            vbr_node = fnode.find('./videoBitrate')
-            vbr = None if vbr_node is None else int(vbr_node.text) // 1000
-
-            width_node = fnode.find('./width')
-            width = None if width_node is None else int_or_none(width_node.text)
-            height_node = fnode.find('./height')
-            height = None if height_node is None else int_or_none(height_node.text)
-
-            format_note = ''
-            if not format_note:
-                format_note = None
-
-            return {
-                'format_id': format_id + '-' + quality,
-                'url': video_url,
-                'ext': ext,
-                'acodec': format_m.group('acodec'),
-                'vcodec': format_m.group('vcodec'),
-                'abr': abr,
-                'vbr': vbr,
-                'width': width,
-                'height': height,
-                'filesize': int_or_none(fnode.find('./filesize').text),
-                'format_note': format_note,
-                'protocol': proto,
-                '_available': is_available,
+        for asset in doc.findall('.//teasers/teaser'):
+            a_type = asset.find('./type').text
+            a_id = asset.find('./details/assetId').text
+            if a_type not in ('video', 'topic'):
+                continue
+            yield {
+                '_type': 'url',
+                'playlist_title': title,
+                'playlist_description': description,
+                'url': 'zdf:%s:%s' % (a_type, a_id),
             }
 
-        format_nodes = doc.findall('.//formitaeten/formitaet')
-        formats = list(filter(
-            lambda f: f['_available'],
-            map(xml_to_format, format_nodes)))
-
-        self._sort_formats(formats)
+    def _real_extract(self, url):
+        channel_id = self._match_id(url)
+        entries = OnDemandPagedList(
+            functools.partial(self._fetch_page, channel_id), self._PAGE_SIZE)
 
         return {
-            'id': video_id,
-            'title': title,
-            'description': description,
-            'duration': duration,
-            'uploader': uploader,
-            'uploader_id': uploader_id,
-            'upload_date': upload_date,
-            'formats': formats,
-        }
\ No newline at end of file
+            '_type': 'playlist',
+            'id': channel_id,
+            'entries': entries,
+        }
diff --git a/youtube_dl/extractor/zingmp3.py b/youtube_dl/extractor/zingmp3.py
new file mode 100644 (file)
index 0000000..1afbe68
--- /dev/null
@@ -0,0 +1,107 @@
+# coding=utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class ZingMp3BaseInfoExtractor(InfoExtractor):
+
+    @staticmethod
+    def _extract_item(item):
+        title = item.find('./title').text.strip()
+        source = item.find('./source').text
+        extension = item.attrib['type']
+        thumbnail = item.find('./backimage').text
+
+        return {
+            'title': title,
+            'url': source,
+            'ext': extension,
+            'thumbnail': thumbnail,
+        }
+
+    def _extract_player_xml(self, player_xml_url, id, playlist_title=None):
+        player_xml = self._download_xml(player_xml_url, id, 'Downloading Player XML')
+        items = player_xml.findall('./item')
+
+        if len(items) == 1:
+            # one single song
+            data = self._extract_item(items[0])
+            data['id'] = id
+
+            return data
+        else:
+            # playlist of songs
+            entries = []
+
+            for i, item in enumerate(items, 1):
+                entry = self._extract_item(item)
+                entry['id'] = '%s-%d' % (id, i)
+                entries.append(entry)
+
+            return {
+                '_type': 'playlist',
+                'id': id,
+                'title': playlist_title,
+                'entries': entries,
+            }
+
+
+class ZingMp3SongIE(ZingMp3BaseInfoExtractor):
+    _VALID_URL = r'https?://mp3\.zing\.vn/bai-hat/(?P<slug>[^/]+)/(?P<song_id>\w+)\.html'
+    _TESTS = [{
+        'url': 'http://mp3.zing.vn/bai-hat/Xa-Mai-Xa-Bao-Thy/ZWZB9WAB.html',
+        'md5': 'ead7ae13693b3205cbc89536a077daed',
+        'info_dict': {
+            'id': 'ZWZB9WAB',
+            'title': 'Xa Mãi Xa',
+            'ext': 'mp3',
+            'thumbnail': 're:^https?://.*\.jpg$',
+        },
+    }]
+    IE_NAME = 'zingmp3:song'
+    IE_DESC = 'mp3.zing.vn songs'
+
+    def _real_extract(self, url):
+        matched = re.match(self._VALID_URL, url)
+        slug = matched.group('slug')
+        song_id = matched.group('song_id')
+
+        webpage = self._download_webpage(
+            'http://mp3.zing.vn/bai-hat/%s/%s.html' % (slug, song_id), song_id)
+
+        player_xml_url = self._search_regex(
+            r'&amp;xmlURL=(?P<xml_url>[^&]+)&', webpage, 'player xml url')
+
+        return self._extract_player_xml(player_xml_url, song_id)
+
+
+class ZingMp3AlbumIE(ZingMp3BaseInfoExtractor):
+    _VALID_URL = r'https?://mp3\.zing\.vn/album/(?P<slug>[^/]+)/(?P<album_id>\w+)\.html'
+    _TESTS = [{
+        'url': 'http://mp3.zing.vn/album/Lau-Dai-Tinh-Ai-Bang-Kieu-Minh-Tuyet/ZWZBWDAF.html',
+        'info_dict': {
+            '_type': 'playlist',
+            'id': 'ZWZBWDAF',
+            'title': 'Lâu Đài Tình Ái - Bằng Kiều ft. Minh Tuyết | Album 320 lossless',
+        },
+        'playlist_count': 10,
+    }]
+    IE_NAME = 'zingmp3:album'
+    IE_DESC = 'mp3.zing.vn albums'
+
+    def _real_extract(self, url):
+        matched = re.match(self._VALID_URL, url)
+        slug = matched.group('slug')
+        album_id = matched.group('album_id')
+
+        webpage = self._download_webpage(
+            'http://mp3.zing.vn/album/%s/%s.html' % (slug, album_id), album_id)
+        player_xml_url = self._search_regex(
+            r'&amp;xmlURL=(?P<xml_url>[^&]+)&', webpage, 'player xml url')
+
+        return self._extract_player_xml(
+            player_xml_url, album_id,
+            playlist_title=self._og_search_title(webpage))
index c40cd376d120f2063bb4cf6958ca4cf701db1f00..b4617fbad0fc40323a129ce1218f9f97590c89bb 100644 (file)
@@ -61,7 +61,7 @@ class JSInterpreter(object):
             pass
 
         m = re.match(
-            r'^(?P<var>[a-zA-Z0-9_]+)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$',
+            r'^(?P<var>[$a-zA-Z0-9_]+)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$',
             expr)
         if m:
             variable = m.group('var')
index f651337adbedf1b58460d7fa147dec79664b0f27..21c452141479a492f23c1f7f20a916c347f0a3a4 100644 (file)
@@ -5,6 +5,11 @@ import optparse
 import shlex
 import sys
 
+from .compat import (
+    compat_expanduser,
+    compat_getenv,
+    compat_kwargs,
+)
 from .utils import (
     get_term_width,
     write_string,
@@ -27,19 +32,19 @@ def parseOpts(overrideArguments=None):
         return res
 
     def _readUserConf():
-        xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
+        xdg_config_home = compat_getenv('XDG_CONFIG_HOME')
         if xdg_config_home:
             userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
             if not os.path.isfile(userConfFile):
                 userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
         else:
-            userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl', 'config')
+            userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl', 'config')
             if not os.path.isfile(userConfFile):
-                userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
+                userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl.conf')
         userConf = _readOptions(userConfFile, None)
 
         if userConf is None:
-            appdata_dir = os.environ.get('appdata')
+            appdata_dir = compat_getenv('appdata')
             if appdata_dir:
                 userConf = _readOptions(
                     os.path.join(appdata_dir, 'youtube-dl', 'config'),
@@ -51,11 +56,11 @@ def parseOpts(overrideArguments=None):
 
         if userConf is None:
             userConf = _readOptions(
-                os.path.join(os.path.expanduser('~'), 'youtube-dl.conf'),
+                os.path.join(compat_expanduser('~'), 'youtube-dl.conf'),
                 default=None)
         if userConf is None:
             userConf = _readOptions(
-                os.path.join(os.path.expanduser('~'), 'youtube-dl.conf.txt'),
+                os.path.join(compat_expanduser('~'), 'youtube-dl.conf.txt'),
                 default=None)
 
         if userConf is None:
@@ -75,7 +80,8 @@ def parseOpts(overrideArguments=None):
         if len(opts) > 1:
             opts.insert(1, ', ')
 
-        if option.takes_value(): opts.append(' %s' % option.metavar)
+        if option.takes_value():
+            opts.append(' %s' % option.metavar)
 
         return "".join(opts)
 
@@ -87,68 +93,69 @@ def parseOpts(overrideArguments=None):
         for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
             try:
                 i = opts.index(private_opt)
-                opts[i+1] = 'PRIVATE'
+                opts[i + 1] = 'PRIVATE'
             except ValueError:
                 pass
         return opts
 
-    max_width = 80
-    max_help_position = 80
-
     # No need to wrap help messages if we're on a wide console
     columns = get_term_width()
-    if columns: max_width = columns
+    max_width = columns if columns else 80
+    max_help_position = 80
 
     fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
     fmt.format_option_strings = _format_option_string
 
     kw = {
-        'version'   : __version__,
-        'formatter' : fmt,
-        'usage' : '%prog [options] url [url...]',
-        'conflict_handler' : 'resolve',
+        'version': __version__,
+        'formatter': fmt,
+        'usage': '%prog [options] url [url...]',
+        'conflict_handler': 'resolve',
     }
 
-    parser = optparse.OptionParser(**kw)
+    parser = optparse.OptionParser(**compat_kwargs(kw))
 
-    # option groups
-    general        = optparse.OptionGroup(parser, 'General Options')
-    selection      = optparse.OptionGroup(parser, 'Video Selection')
-    authentication = optparse.OptionGroup(parser, 'Authentication Options')
-    video_format   = optparse.OptionGroup(parser, 'Video Format Options')
-    subtitles      = optparse.OptionGroup(parser, 'Subtitle Options')
-    downloader     = optparse.OptionGroup(parser, 'Download Options')
-    postproc       = optparse.OptionGroup(parser, 'Post-processing Options')
-    filesystem     = optparse.OptionGroup(parser, 'Filesystem Options')
-    workarounds    = optparse.OptionGroup(parser, 'Workarounds')
-    verbosity      = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
-
-    general.add_option('-h', '--help',
-            action='help', help='print this help text and exit')
-    general.add_option('-v', '--version',
-            action='version', help='print program version and exit')
-    general.add_option('-U', '--update',
-            action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
-    general.add_option('-i', '--ignore-errors',
-            action='store_true', dest='ignoreerrors', help='continue on download errors, for example to skip unavailable videos in a playlist', default=False)
-    general.add_option('--abort-on-error',
-            action='store_false', dest='ignoreerrors',
-            help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
-    general.add_option('--dump-user-agent',
-            action='store_true', dest='dump_user_agent',
-            help='display the current browser identification', default=False)
-    general.add_option('--list-extractors',
-            action='store_true', dest='list_extractors',
-            help='List all supported extractors and the URLs they would handle', default=False)
-    general.add_option('--extractor-descriptions',
-            action='store_true', dest='list_extractor_descriptions',
-            help='Output descriptions of all supported extractors', default=False)
+    general = optparse.OptionGroup(parser, 'General Options')
+    general.add_option(
+        '-h', '--help',
+        action='help',
+        help='print this help text and exit')
+    general.add_option(
+        '-v', '--version',
+        action='version',
+        help='print program version and exit')
+    general.add_option(
+        '-U', '--update',
+        action='store_true', dest='update_self',
+        help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
+    general.add_option(
+        '-i', '--ignore-errors',
+        action='store_true', dest='ignoreerrors', default=False,
+        help='continue on download errors, for example to skip unavailable videos in a playlist')
+    general.add_option(
+        '--abort-on-error',
+        action='store_false', dest='ignoreerrors',
+        help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
+    general.add_option(
+        '--dump-user-agent',
+        action='store_true', dest='dump_user_agent', default=False,
+        help='display the current browser identification')
     general.add_option(
-        '--proxy', dest='proxy', default=None, metavar='URL',
+        '--list-extractors',
+        action='store_true', dest='list_extractors', default=False,
+        help='List all supported extractors and the URLs they would handle')
+    general.add_option(
+        '--extractor-descriptions',
+        action='store_true', dest='list_extractor_descriptions', default=False,
+        help='Output descriptions of all supported extractors')
+    general.add_option(
+        '--proxy', dest='proxy',
+        default=None, metavar='URL',
         help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
     general.add_option(
-        '--socket-timeout', dest='socket_timeout',
-        type=float, default=None, help=u'Time to wait before giving up, in seconds')
+        '--socket-timeout',
+        dest='socket_timeout', type=float, default=None,
+        help='Time to wait before giving up, in seconds')
     general.add_option(
         '--default-search',
         dest='default_search', metavar='PREFIX',
@@ -156,8 +163,17 @@ def parseOpts(overrideArguments=None):
     general.add_option(
         '--ignore-config',
         action='store_true',
-        help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
+        help='Do not read configuration files. '
+        'When given in the global configuration file /etc/youtube-dl.conf: '
+        'Do not read the user configuration in ~/.config/youtube-dl/config '
+        '(%APPDATA%/youtube-dl/config.txt on Windows)')
+    general.add_option(
+        '--flat-playlist',
+        action='store_const', dest='extract_flat', const='in_playlist',
+        default=False,
+        help='Do not extract the videos of a playlist, only list them.')
 
+    selection = optparse.OptionGroup(parser, 'Video Selection')
     selection.add_option(
         '--playlist-start',
         dest='playliststart', metavar='NUMBER', default=1, type=int,
@@ -166,245 +182,391 @@ def parseOpts(overrideArguments=None):
         '--playlist-end',
         dest='playlistend', metavar='NUMBER', default=None, type=int,
         help='playlist video to end at (default is last)')
-    selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
-    selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
-    selection.add_option('--max-downloads', metavar='NUMBER',
-                         dest='max_downloads', type=int, default=None,
-                         help='Abort after downloading NUMBER files')
-    selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
-    selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
-    selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None)
     selection.add_option(
-        '--datebefore', metavar='DATE', dest='datebefore', default=None,
+        '--match-title',
+        dest='matchtitle', metavar='REGEX',
+        help='download only matching titles (regex or caseless sub-string)')
+    selection.add_option(
+        '--reject-title',
+        dest='rejecttitle', metavar='REGEX',
+        help='skip download for matching titles (regex or caseless sub-string)')
+    selection.add_option(
+        '--max-downloads',
+        dest='max_downloads', metavar='NUMBER', type=int, default=None,
+        help='Abort after downloading NUMBER files')
+    selection.add_option(
+        '--min-filesize',
+        metavar='SIZE', dest='min_filesize', default=None,
+        help='Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)')
+    selection.add_option(
+        '--max-filesize',
+        metavar='SIZE', dest='max_filesize', default=None,
+        help='Do not download any videos larger than SIZE (e.g. 50k or 44.6m)')
+    selection.add_option(
+        '--date',
+        metavar='DATE', dest='date', default=None,
+        help='download only videos uploaded in this date')
+    selection.add_option(
+        '--datebefore',
+        metavar='DATE', dest='datebefore', default=None,
         help='download only videos uploaded on or before this date (i.e. inclusive)')
     selection.add_option(
-        '--dateafter', metavar='DATE', dest='dateafter', default=None,
+        '--dateafter',
+        metavar='DATE', dest='dateafter', default=None,
         help='download only videos uploaded on or after this date (i.e. inclusive)')
     selection.add_option(
-        '--min-views', metavar='COUNT', dest='min_views',
-        default=None, type=int,
-        help="Do not download any videos with less than COUNT views",)
+        '--min-views',
+        metavar='COUNT', dest='min_views', default=None, type=int,
+        help='Do not download any videos with less than COUNT views',)
     selection.add_option(
-        '--max-views', metavar='COUNT', dest='max_views',
-        default=None, type=int,
-        help="Do not download any videos with more than COUNT views",)
-    selection.add_option('--no-playlist', action='store_true', dest='noplaylist', help='download only the currently playing video', default=False)
-    selection.add_option('--age-limit', metavar='YEARS', dest='age_limit',
-                         help='download only videos suitable for the given age',
-                         default=None, type=int)
-    selection.add_option('--download-archive', metavar='FILE',
-                         dest='download_archive',
-                         help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
+        '--max-views',
+        metavar='COUNT', dest='max_views', default=None, type=int,
+        help='Do not download any videos with more than COUNT views')
     selection.add_option(
-        '--include-ads', dest='include_ads',
-        action='store_true',
-        help='Download advertisements as well (experimental)')
+        '--no-playlist',
+        action='store_true', dest='noplaylist', default=False,
+        help='If the URL refers to a video and a playlist, download only the video.')
+    selection.add_option(
+        '--age-limit',
+        metavar='YEARS', dest='age_limit', default=None, type=int,
+        help='download only videos suitable for the given age')
+    selection.add_option(
+        '--download-archive', metavar='FILE',
+        dest='download_archive',
+        help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
     selection.add_option(
-        '--youtube-include-dash-manifest', action='store_true',
-        dest='youtube_include_dash_manifest', default=False,
-        help='Try to download the DASH manifest on YouTube videos (experimental)')
-
-    authentication.add_option('-u', '--username',
-            dest='username', metavar='USERNAME', help='account username')
-    authentication.add_option('-p', '--password',
-            dest='password', metavar='PASSWORD', help='account password')
-    authentication.add_option('-2', '--twofactor',
-            dest='twofactor', metavar='TWOFACTOR', help='two-factor auth code')
-    authentication.add_option('-n', '--netrc',
-            action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
-    authentication.add_option('--video-password',
-            dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)')
-
-
-    video_format.add_option('-f', '--format',
-            action='store', dest='format', metavar='FORMAT', default=None,
-            help='video format code, specify the order of preference using slashes: -f 22/17/18 .  -f mp4 , -f m4a and  -f flv  are also supported. You can also use the special names "best", "bestvideo", "bestaudio", "worst", "worstvideo" and "worstaudio". By default, youtube-dl will pick the best quality. Use commas to download multiple audio formats, such as  -f  136/137/mp4/bestvideo,140/m4a/bestaudio')
-    video_format.add_option('--all-formats',
-            action='store_const', dest='format', help='download all available video formats', const='all')
-    video_format.add_option('--prefer-free-formats',
-            action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
-    video_format.add_option('--max-quality',
-            action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
-    video_format.add_option('-F', '--list-formats',
-            action='store_true', dest='listformats', help='list all available formats')
-
-    subtitles.add_option('--write-sub', '--write-srt',
-            action='store_true', dest='writesubtitles',
-            help='write subtitle file', default=False)
-    subtitles.add_option('--write-auto-sub', '--write-automatic-sub',
-            action='store_true', dest='writeautomaticsub',
-            help='write automatic subtitle file (youtube only)', default=False)
-    subtitles.add_option('--all-subs',
-            action='store_true', dest='allsubtitles',
-            help='downloads all the available subtitles of the video', default=False)
-    subtitles.add_option('--list-subs',
-            action='store_true', dest='listsubtitles',
-            help='lists all available subtitles for the video', default=False)
-    subtitles.add_option('--sub-format',
-            action='store', dest='subtitlesformat', metavar='FORMAT',
-            help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt')
-    subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang',
-            action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
-            default=[], callback=_comma_separated_values_options_callback,
-            help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
-
-    downloader.add_option('-r', '--rate-limit',
-            dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
-    downloader.add_option('-R', '--retries',
-            dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
-    downloader.add_option('--buffer-size',
-            dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16K) (default is %default)', default="1024")
-    downloader.add_option('--no-resize-buffer',
-            action='store_true', dest='noresizebuffer',
-            help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
-    downloader.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
+        '--include-ads',
+        dest='include_ads', action='store_true',
+        help='Download advertisements as well (experimental)')
+
+    authentication = optparse.OptionGroup(parser, 'Authentication Options')
+    authentication.add_option(
+        '-u', '--username',
+        dest='username', metavar='USERNAME',
+        help='login with this account ID')
+    authentication.add_option(
+        '-p', '--password',
+        dest='password', metavar='PASSWORD',
+        help='account password')
+    authentication.add_option(
+        '-2', '--twofactor',
+        dest='twofactor', metavar='TWOFACTOR',
+        help='two-factor auth code')
+    authentication.add_option(
+        '-n', '--netrc',
+        action='store_true', dest='usenetrc', default=False,
+        help='use .netrc authentication data')
+    authentication.add_option(
+        '--video-password',
+        dest='videopassword', metavar='PASSWORD',
+        help='video password (vimeo, smotri)')
+
+    video_format = optparse.OptionGroup(parser, 'Video Format Options')
+    video_format.add_option(
+        '-f', '--format',
+        action='store', dest='format', metavar='FORMAT', default=None,
+        help=(
+            'video format code, specify the order of preference using'
+            ' slashes: -f 22/17/18 .  -f mp4 , -f m4a and  -f flv  are also'
+            ' supported. You can also use the special names "best",'
+            ' "bestvideo", "bestaudio", "worst", "worstvideo" and'
+            ' "worstaudio". By default, youtube-dl will pick the best quality.'
+            ' Use commas to download multiple audio formats, such as'
+            ' -f  136/137/mp4/bestvideo,140/m4a/bestaudio.'
+            ' You can merge the video and audio of two formats into a single'
+            ' file using -f <video-format>+<audio-format> (requires ffmpeg or'
+            ' avconv), for example -f bestvideo+bestaudio.'))
+    video_format.add_option(
+        '--all-formats',
+        action='store_const', dest='format', const='all',
+        help='download all available video formats')
+    video_format.add_option(
+        '--prefer-free-formats',
+        action='store_true', dest='prefer_free_formats', default=False,
+        help='prefer free video formats unless a specific one is requested')
+    video_format.add_option(
+        '--max-quality',
+        action='store', dest='format_limit', metavar='FORMAT',
+        help='highest quality format to download')
+    video_format.add_option(
+        '-F', '--list-formats',
+        action='store_true', dest='listformats',
+        help='list all available formats')
+    video_format.add_option(
+        '--youtube-include-dash-manifest',
+        action='store_true', dest='youtube_include_dash_manifest', default=True,
+        help=optparse.SUPPRESS_HELP)
+    video_format.add_option(
+        '--youtube-skip-dash-manifest',
+        action='store_false', dest='youtube_include_dash_manifest',
+        help='Do not download the DASH manifest on YouTube videos')
+
+    subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
+    subtitles.add_option(
+        '--write-sub', '--write-srt',
+        action='store_true', dest='writesubtitles', default=False,
+        help='write subtitle file')
+    subtitles.add_option(
+        '--write-auto-sub', '--write-automatic-sub',
+        action='store_true', dest='writeautomaticsub', default=False,
+        help='write automatic subtitle file (youtube only)')
+    subtitles.add_option(
+        '--all-subs',
+        action='store_true', dest='allsubtitles', default=False,
+        help='downloads all the available subtitles of the video')
+    subtitles.add_option(
+        '--list-subs',
+        action='store_true', dest='listsubtitles', default=False,
+        help='lists all available subtitles for the video')
+    subtitles.add_option(
+        '--sub-format',
+        action='store', dest='subtitlesformat', metavar='FORMAT', default='srt',
+        help='subtitle format (default=srt) ([sbv/vtt] youtube only)')
+    subtitles.add_option(
+        '--sub-lang', '--sub-langs', '--srt-lang',
+        action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
+        default=[], callback=_comma_separated_values_options_callback,
+        help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
+
+    downloader = optparse.OptionGroup(parser, 'Download Options')
+    downloader.add_option(
+        '-r', '--rate-limit',
+        dest='ratelimit', metavar='LIMIT',
+        help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
+    downloader.add_option(
+        '-R', '--retries',
+        dest='retries', metavar='RETRIES', default=10,
+        help='number of retries (default is %default)')
+    downloader.add_option(
+        '--buffer-size',
+        dest='buffersize', metavar='SIZE', default='1024',
+        help='size of download buffer (e.g. 1024 or 16K) (default is %default)')
+    downloader.add_option(
+        '--no-resize-buffer',
+        action='store_true', dest='noresizebuffer', default=False,
+        help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.')
+    downloader.add_option(
+        '--test',
+        action='store_true', dest='test', default=False,
+        help=optparse.SUPPRESS_HELP)
+    downloader.add_option(
+        '--playlist-reverse',
+        action='store_true',
+        help='Download playlist videos in reverse order')
 
+    workarounds = optparse.OptionGroup(parser, 'Workarounds')
     workarounds.add_option(
-        '--encoding', dest='encoding', metavar='ENCODING',
+        '--encoding',
+        dest='encoding', metavar='ENCODING',
         help='Force the specified encoding (experimental)')
     workarounds.add_option(
-        '--no-check-certificate', action='store_true',
-        dest='no_check_certificate', default=False,
+        '--no-check-certificate',
+        action='store_true', dest='no_check_certificate', default=False,
         help='Suppress HTTPS certificate validation.')
     workarounds.add_option(
-        '--prefer-insecure', '--prefer-unsecure', action='store_true', dest='prefer_insecure',
+        '--prefer-insecure',
+        '--prefer-unsecure', action='store_true', dest='prefer_insecure',
         help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
     workarounds.add_option(
-        '--user-agent', metavar='UA',
-        dest='user_agent', help='specify a custom user agent')
+        '--user-agent',
+        metavar='UA', dest='user_agent',
+        help='specify a custom user agent')
     workarounds.add_option(
-        '--referer', metavar='REF',
-        dest='referer', default=None,
+        '--referer',
+        metavar='URL', dest='referer', default=None,
         help='specify a custom referer, use if the video access is restricted to one domain',
     )
     workarounds.add_option(
-        '--add-header', metavar='FIELD:VALUE',
-        dest='headers', action='append',
+        '--add-header',
+        metavar='FIELD:VALUE', dest='headers', action='append',
         help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
     )
     workarounds.add_option(
-        '--bidi-workaround', dest='bidi_workaround', action='store_true',
-        help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
+        '--bidi-workaround',
+        dest='bidi_workaround', action='store_true',
+        help='Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
 
-    verbosity.add_option('-q', '--quiet',
-            action='store_true', dest='quiet', help='activates quiet mode', default=False)
+    verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
+    verbosity.add_option(
+        '-q', '--quiet',
+        action='store_true', dest='quiet', default=False,
+        help='activates quiet mode')
     verbosity.add_option(
         '--no-warnings',
         dest='no_warnings', action='store_true', default=False,
         help='Ignore warnings')
-    verbosity.add_option('-s', '--simulate',
-            action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
-    verbosity.add_option('--skip-download',
-            action='store_true', dest='skip_download', help='do not download the video', default=False)
-    verbosity.add_option('-g', '--get-url',
-            action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
-    verbosity.add_option('-e', '--get-title',
-            action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
-    verbosity.add_option('--get-id',
-            action='store_true', dest='getid', help='simulate, quiet but print id', default=False)
-    verbosity.add_option('--get-thumbnail',
-            action='store_true', dest='getthumbnail',
-            help='simulate, quiet but print thumbnail URL', default=False)
-    verbosity.add_option('--get-description',
-            action='store_true', dest='getdescription',
-            help='simulate, quiet but print video description', default=False)
-    verbosity.add_option('--get-duration',
-            action='store_true', dest='getduration',
-            help='simulate, quiet but print video length', default=False)
-    verbosity.add_option('--get-filename',
-            action='store_true', dest='getfilename',
-            help='simulate, quiet but print output filename', default=False)
-    verbosity.add_option('--get-format',
-            action='store_true', dest='getformat',
-            help='simulate, quiet but print output format', default=False)
-    verbosity.add_option('-j', '--dump-json',
-            action='store_true', dest='dumpjson',
-            help='simulate, quiet but print JSON information. See --output for a description of available keys.', default=False)
-    verbosity.add_option('--newline',
-            action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
-    verbosity.add_option('--no-progress',
-            action='store_true', dest='noprogress', help='do not print progress bar', default=False)
-    verbosity.add_option('--console-title',
-            action='store_true', dest='consoletitle',
-            help='display progress in console titlebar', default=False)
-    verbosity.add_option('-v', '--verbose',
-            action='store_true', dest='verbose', help='print various debugging information', default=False)
-    verbosity.add_option('--dump-intermediate-pages',
-            action='store_true', dest='dump_intermediate_pages', default=False,
-            help='print downloaded pages to debug problems (very verbose)')
-    verbosity.add_option('--write-pages',
-            action='store_true', dest='write_pages', default=False,
-            help='Write downloaded intermediary pages to files in the current directory to debug problems')
-    verbosity.add_option('--youtube-print-sig-code',
-            action='store_true', dest='youtube_print_sig_code', default=False,
-            help=optparse.SUPPRESS_HELP)
-    verbosity.add_option('--print-traffic',
-            dest='debug_printtraffic', action='store_true', default=False,
-            help='Display sent and read HTTP traffic')
-
-
-    filesystem.add_option('-a', '--batch-file',
-            dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
-    filesystem.add_option('--id',
-            action='store_true', dest='useid', help='use only video ID in file name', default=False)
-    filesystem.add_option('-A', '--auto-number',
-            action='store_true', dest='autonumber',
-            help='number downloaded files starting from 00000', default=False)
-    filesystem.add_option('-o', '--output',
-            dest='outtmpl', metavar='TEMPLATE',
-            help=('output filename template. Use %(title)s to get the title, '
-                  '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
-                  '%(autonumber)s to get an automatically incremented number, '
-                  '%(ext)s for the filename extension, '
-                  '%(format)s for the format description (like "22 - 1280x720" or "HD"), '
-                  '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
-                  '%(upload_date)s for the upload date (YYYYMMDD), '
-                  '%(extractor)s for the provider (youtube, metacafe, etc), '
-                  '%(id)s for the video id, %(playlist)s for the playlist the video is in, '
-                  '%(playlist_index)s for the position in the playlist and %% for a literal percent. '
-                  '%(height)s and %(width)s for the width and height of the video format. '
-                  '%(resolution)s for a textual description of the resolution of the video format. '
-                  'Use - to output to stdout. Can also be used to download to a different directory, '
-                  'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
-    filesystem.add_option('--autonumber-size',
-            dest='autonumber_size', metavar='NUMBER',
-            help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
-    filesystem.add_option('--restrict-filenames',
-            action='store_true', dest='restrictfilenames',
-            help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
-    filesystem.add_option('-t', '--title',
-            action='store_true', dest='usetitle', help='[deprecated] use title in file name (default)', default=False)
-    filesystem.add_option('-l', '--literal',
-            action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
-    filesystem.add_option('-w', '--no-overwrites',
-            action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
-    filesystem.add_option('-c', '--continue',
-            action='store_true', dest='continue_dl', help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.', default=True)
-    filesystem.add_option('--no-continue',
-            action='store_false', dest='continue_dl',
-            help='do not resume partially downloaded files (restart from beginning)')
-    filesystem.add_option('--no-part',
-            action='store_true', dest='nopart', help='do not use .part files', default=False)
-    filesystem.add_option('--no-mtime',
-            action='store_false', dest='updatetime',
-            help='do not use the Last-modified header to set the file modification time', default=True)
-    filesystem.add_option('--write-description',
-            action='store_true', dest='writedescription',
-            help='write video description to a .description file', default=False)
-    filesystem.add_option('--write-info-json',
-            action='store_true', dest='writeinfojson',
-            help='write video metadata to a .info.json file', default=False)
-    filesystem.add_option('--write-annotations',
-            action='store_true', dest='writeannotations',
-            help='write video annotations to a .annotation file', default=False)
-    filesystem.add_option('--write-thumbnail',
-            action='store_true', dest='writethumbnail',
-            help='write thumbnail image to disk', default=False)
-    filesystem.add_option('--load-info',
-            dest='load_info_filename', metavar='FILE',
-            help='json file containing the video information (created with the "--write-json" option)')
-    filesystem.add_option('--cookies',
-            dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
+    verbosity.add_option(
+        '-s', '--simulate',
+        action='store_true', dest='simulate', default=False,
+        help='do not download the video and do not write anything to disk',)
+    verbosity.add_option(
+        '--skip-download',
+        action='store_true', dest='skip_download', default=False,
+        help='do not download the video',)
+    verbosity.add_option(
+        '-g', '--get-url',
+        action='store_true', dest='geturl', default=False,
+        help='simulate, quiet but print URL')
+    verbosity.add_option(
+        '-e', '--get-title',
+        action='store_true', dest='gettitle', default=False,
+        help='simulate, quiet but print title')
+    verbosity.add_option(
+        '--get-id',
+        action='store_true', dest='getid', default=False,
+        help='simulate, quiet but print id')
+    verbosity.add_option(
+        '--get-thumbnail',
+        action='store_true', dest='getthumbnail', default=False,
+        help='simulate, quiet but print thumbnail URL')
+    verbosity.add_option(
+        '--get-description',
+        action='store_true', dest='getdescription', default=False,
+        help='simulate, quiet but print video description')
+    verbosity.add_option(
+        '--get-duration',
+        action='store_true', dest='getduration', default=False,
+        help='simulate, quiet but print video length')
+    verbosity.add_option(
+        '--get-filename',
+        action='store_true', dest='getfilename', default=False,
+        help='simulate, quiet but print output filename')
+    verbosity.add_option(
+        '--get-format',
+        action='store_true', dest='getformat', default=False,
+        help='simulate, quiet but print output format')
+    verbosity.add_option(
+        '-j', '--dump-json',
+        action='store_true', dest='dumpjson', default=False,
+        help='simulate, quiet but print JSON information. See --output for a description of available keys.')
+    verbosity.add_option(
+        '-J', '--dump-single-json',
+        action='store_true', dest='dump_single_json', default=False,
+        help='simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist information in a single line.')
+    verbosity.add_option(
+        '--newline',
+        action='store_true', dest='progress_with_newline', default=False,
+        help='output progress bar as new lines')
+    verbosity.add_option(
+        '--no-progress',
+        action='store_true', dest='noprogress', default=False,
+        help='do not print progress bar')
+    verbosity.add_option(
+        '--console-title',
+        action='store_true', dest='consoletitle', default=False,
+        help='display progress in console titlebar')
+    verbosity.add_option(
+        '-v', '--verbose',
+        action='store_true', dest='verbose', default=False,
+        help='print various debugging information')
+    verbosity.add_option(
+        '--dump-intermediate-pages',
+        action='store_true', dest='dump_intermediate_pages', default=False,
+        help='print downloaded pages to debug problems (very verbose)')
+    verbosity.add_option(
+        '--write-pages',
+        action='store_true', dest='write_pages', default=False,
+        help='Write downloaded intermediary pages to files in the current directory to debug problems')
+    verbosity.add_option(
+        '--youtube-print-sig-code',
+        action='store_true', dest='youtube_print_sig_code', default=False,
+        help=optparse.SUPPRESS_HELP)
+    verbosity.add_option(
+        '--print-traffic',
+        dest='debug_printtraffic', action='store_true', default=False,
+        help='Display sent and read HTTP traffic')
+
+    filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
+    filesystem.add_option(
+        '-a', '--batch-file',
+        dest='batchfile', metavar='FILE',
+        help='file containing URLs to download (\'-\' for stdin)')
+    filesystem.add_option(
+        '--id', default=False,
+        action='store_true', dest='useid', help='use only video ID in file name')
+    filesystem.add_option(
+        '-o', '--output',
+        dest='outtmpl', metavar='TEMPLATE',
+        help=('output filename template. Use %(title)s to get the title, '
+              '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
+              '%(autonumber)s to get an automatically incremented number, '
+              '%(ext)s for the filename extension, '
+              '%(format)s for the format description (like "22 - 1280x720" or "HD"), '
+              '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
+              '%(upload_date)s for the upload date (YYYYMMDD), '
+              '%(extractor)s for the provider (youtube, metacafe, etc), '
+              '%(id)s for the video id, '
+              '%(playlist_title)s, %(playlist_id)s, or %(playlist)s (=title if present, ID otherwise) for the playlist the video is in, '
+              '%(playlist_index)s for the position in the playlist. '
+              '%(height)s and %(width)s for the width and height of the video format. '
+              '%(resolution)s for a textual description of the resolution of the video format. '
+              '%% for a literal percent. '
+              'Use - to output to stdout. Can also be used to download to a different directory, '
+              'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
+    filesystem.add_option(
+        '--autonumber-size',
+        dest='autonumber_size', metavar='NUMBER',
+        help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
+    filesystem.add_option(
+        '--restrict-filenames',
+        action='store_true', dest='restrictfilenames', default=False,
+        help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames')
+    filesystem.add_option(
+        '-A', '--auto-number',
+        action='store_true', dest='autonumber', default=False,
+        help='[deprecated; use  -o "%(autonumber)s-%(title)s.%(ext)s" ] number downloaded files starting from 00000')
+    filesystem.add_option(
+        '-t', '--title',
+        action='store_true', dest='usetitle', default=False,
+        help='[deprecated] use title in file name (default)')
+    filesystem.add_option(
+        '-l', '--literal', default=False,
+        action='store_true', dest='usetitle',
+        help='[deprecated] alias of --title')
+    filesystem.add_option(
+        '-w', '--no-overwrites',
+        action='store_true', dest='nooverwrites', default=False,
+        help='do not overwrite files')
+    filesystem.add_option(
+        '-c', '--continue',
+        action='store_true', dest='continue_dl', default=True,
+        help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.')
+    filesystem.add_option(
+        '--no-continue',
+        action='store_false', dest='continue_dl',
+        help='do not resume partially downloaded files (restart from beginning)')
+    filesystem.add_option(
+        '--no-part',
+        action='store_true', dest='nopart', default=False,
+        help='do not use .part files - write directly into output file')
+    filesystem.add_option(
+        '--no-mtime',
+        action='store_false', dest='updatetime', default=True,
+        help='do not use the Last-modified header to set the file modification time')
+    filesystem.add_option(
+        '--write-description',
+        action='store_true', dest='writedescription', default=False,
+        help='write video description to a .description file')
+    filesystem.add_option(
+        '--write-info-json',
+        action='store_true', dest='writeinfojson', default=False,
+        help='write video metadata to a .info.json file')
+    filesystem.add_option(
+        '--write-annotations',
+        action='store_true', dest='writeannotations', default=False,
+        help='write video annotations to a .annotation file')
+    filesystem.add_option(
+        '--write-thumbnail',
+        action='store_true', dest='writethumbnail', default=False,
+        help='write thumbnail image to disk')
+    filesystem.add_option(
+        '--load-info',
+        dest='load_info_filename', metavar='FILE',
+        help='json file containing the video information (created with the "--write-json" option)')
+    filesystem.add_option(
+        '--cookies',
+        dest='cookiefile', metavar='FILE',
+        help='file to read cookies from and dump cookie jar in')
     filesystem.add_option(
         '--cache-dir', dest='cachedir', default=None, metavar='DIR',
         help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
@@ -412,37 +574,62 @@ def parseOpts(overrideArguments=None):
         '--no-cache-dir', action='store_const', const=False, dest='cachedir',
         help='Disable filesystem caching')
     filesystem.add_option(
-        '--rm-cache-dir', action='store_true', dest='rm_cachedir',
+        '--rm-cache-dir',
+        action='store_true', dest='rm_cachedir',
         help='Delete all filesystem cache files')
 
-
-    postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
-            help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
-    postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
-            help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; best by default')
-    postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
-            help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
-    postproc.add_option('--recode-video', metavar='FORMAT', dest='recodevideo', default=None,
-            help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)')
-    postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
-            help='keeps the video file on disk after the post-processing; the video is erased by default')
-    postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
-            help='do not overwrite post-processed files; the post-processed files are overwritten by default')
-    postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
-            help='embed subtitles in the video (only for mp4 videos)')
-    postproc.add_option('--embed-thumbnail', action='store_true', dest='embedthumbnail', default=False,
-            help='embed thumbnail in the audio as cover art')
-    postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
-            help='write metadata to the video file')
-    postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False,
-            help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
-    postproc.add_option('--prefer-avconv', action='store_false', dest='prefer_ffmpeg',
+    postproc = optparse.OptionGroup(parser, 'Post-processing Options')
+    postproc.add_option(
+        '-x', '--extract-audio',
+        action='store_true', dest='extractaudio', default=False,
+        help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
+    postproc.add_option(
+        '--audio-format', metavar='FORMAT', dest='audioformat', default='best',
+        help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "%default" by default')
+    postproc.add_option(
+        '--audio-quality', metavar='QUALITY',
+        dest='audioquality', default='5',
+        help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default %default)')
+    postproc.add_option(
+        '--recode-video',
+        metavar='FORMAT', dest='recodevideo', default=None,
+        help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)')
+    postproc.add_option(
+        '-k', '--keep-video',
+        action='store_true', dest='keepvideo', default=False,
+        help='keeps the video file on disk after the post-processing; the video is erased by default')
+    postproc.add_option(
+        '--no-post-overwrites',
+        action='store_true', dest='nopostoverwrites', default=False,
+        help='do not overwrite post-processed files; the post-processed files are overwritten by default')
+    postproc.add_option(
+        '--embed-subs',
+        action='store_true', dest='embedsubtitles', default=False,
+        help='embed subtitles in the video (only for mp4 videos)')
+    postproc.add_option(
+        '--embed-thumbnail',
+        action='store_true', dest='embedthumbnail', default=False,
+        help='embed thumbnail in the audio as cover art')
+    postproc.add_option(
+        '--add-metadata',
+        action='store_true', dest='addmetadata', default=False,
+        help='write metadata to the video file')
+    postproc.add_option(
+        '--xattrs',
+        action='store_true', dest='xattrs', default=False,
+        help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
+    postproc.add_option(
+        '--prefer-avconv',
+        action='store_false', dest='prefer_ffmpeg',
         help='Prefer avconv over ffmpeg for running the postprocessors (default)')
-    postproc.add_option('--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg',
+    postproc.add_option(
+        '--prefer-ffmpeg',
+        action='store_true', dest='prefer_ffmpeg',
         help='Prefer ffmpeg over avconv for running the postprocessors')
     postproc.add_option(
-        '--exec', metavar='CMD', dest='exec_cmd',
-        help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'' )
+        '--exec',
+        metavar='CMD', dest='exec_cmd',
+        help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'')
 
     parser.add_option_group(general)
     parser.add_option_group(selection)
@@ -458,7 +645,7 @@ def parseOpts(overrideArguments=None):
     if overrideArguments is not None:
         opts, args = parser.parse_args(overrideArguments)
         if opts.verbose:
-            write_string(u'[debug] Override config: ' + repr(overrideArguments) + '\n')
+            write_string('[debug] Override config: ' + repr(overrideArguments) + '\n')
     else:
         commandLineConf = sys.argv[1:]
         if '--ignore-config' in commandLineConf:
@@ -474,8 +661,8 @@ def parseOpts(overrideArguments=None):
 
         opts, args = parser.parse_args(argv)
         if opts.verbose:
-            write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
-            write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
-            write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
+            write_string('[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
+            write_string('[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
+            write_string('[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
 
     return parser, opts, args
index 15aa0daa9b7b69b5710096ecb099f62e6ab51f3d..fb367ebe4474063a279fcd096b21461d11deafe8 100644 (file)
@@ -1,24 +1,27 @@
+from __future__ import unicode_literals
 
 from .atomicparsley import AtomicParsleyPP
 from .ffmpeg import (
+    FFmpegPostProcessor,
     FFmpegAudioFixPP,
+    FFmpegEmbedSubtitlePP,
+    FFmpegExtractAudioPP,
     FFmpegMergerPP,
     FFmpegMetadataPP,
     FFmpegVideoConvertor,
-    FFmpegExtractAudioPP,
-    FFmpegEmbedSubtitlePP,
 )
 from .xattrpp import XAttrMetadataPP
 from .execafterdownload import ExecAfterDownloadPP
 
 __all__ = [
     'AtomicParsleyPP',
+    'ExecAfterDownloadPP',
     'FFmpegAudioFixPP',
+    'FFmpegEmbedSubtitlePP',
+    'FFmpegExtractAudioPP',
     'FFmpegMergerPP',
     'FFmpegMetadataPP',
+    'FFmpegPostProcessor',
     'FFmpegVideoConvertor',
-    'FFmpegExtractAudioPP',
-    'FFmpegEmbedSubtitlePP',
     'XAttrMetadataPP',
-    'ExecAfterDownloadPP',
 ]
index 765b2d9ee7c834ee1d03be6fc4e20d7628ad0f76..448ccc5f342e42959aae0619854fa80d1e1cd978 100644 (file)
@@ -6,10 +6,11 @@ import os
 import subprocess
 
 from .common import PostProcessor
-
+from ..compat import (
+    compat_urlretrieve,
+)
 from ..utils import (
     check_executable,
-    compat_urlretrieve,
     encodeFilename,
     PostProcessingError,
     prepend_extension,
index 788f94d021fa71648c3c5f521f8344f94d981e61..e54ae678da17bef5c5848bc7165d42d5eec912a4 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 from ..utils import PostProcessingError
 
 
index 08419a3d4b6990c83cd0d0c844a61c2abe5c2f0e..75c0f7bbe86ef8e19f41fd61e1bbd58678474d8a 100644 (file)
@@ -3,10 +3,8 @@ from __future__ import unicode_literals
 import subprocess
 
 from .common import PostProcessor
-from ..utils import (
-    shlex_quote,
-    PostProcessingError,
-)
+from ..compat import shlex_quote
+from ..utils import PostProcessingError
 
 
 class ExecAfterDownloadPP(PostProcessor):
@@ -16,7 +14,7 @@ class ExecAfterDownloadPP(PostProcessor):
 
     def run(self, information):
         cmd = self.exec_cmd
-        if not '{}' in cmd:
+        if '{}' not in cmd:
             cmd += ' {}'
 
         cmd = cmd.replace('{}', shlex_quote(information['filepath']))
@@ -28,4 +26,3 @@ class ExecAfterDownloadPP(PostProcessor):
                 'Command returned error code %d' % retCode)
 
         return None, information  # by default, keep file and do nothing
-
index 8c5f7c43b75b17466a91a2b54c928ec8ca2298f3..965ded4c1590eb3cccfebcfc3fd460f5a83d1c90 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import os
 import subprocess
 import sys
@@ -6,11 +8,14 @@ import time
 
 from .common import AudioConversionError, PostProcessor
 
-from ..utils import (
-    check_executable,
+from ..compat import (
     compat_subprocess_get_DEVNULL,
+)
+from ..utils import (
     encodeArgument,
     encodeFilename,
+    get_exe_version,
+    is_outdated_version,
     PostProcessingError,
     prepend_extension,
     shell_quote,
@@ -25,36 +30,63 @@ class FFmpegPostProcessorError(PostProcessingError):
 class FFmpegPostProcessor(PostProcessor):
     def __init__(self, downloader=None, deletetempfiles=False):
         PostProcessor.__init__(self, downloader)
-        self._exes = self.detect_executables()
+        self._versions = self.get_versions()
         self._deletetempfiles = deletetempfiles
 
+    def check_version(self):
+        if not self._executable:
+            raise FFmpegPostProcessorError('ffmpeg or avconv not found. Please install one.')
+
+        required_version = '10-0' if self._uses_avconv() else '1.0'
+        if is_outdated_version(
+                self._versions[self._executable], required_version):
+            warning = 'Your copy of %s is outdated, update %s to version %s or newer if you encounter any errors.' % (
+                self._executable, self._executable, required_version)
+            if self._downloader:
+                self._downloader.report_warning(warning)
+
     @staticmethod
-    def detect_executables():
+    def get_versions():
         programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
-        return dict((program, check_executable(program, ['-version'])) for program in programs)
+        return dict((p, get_exe_version(p, args=['-version'])) for p in programs)
 
-    def _get_executable(self):
+    @property
+    def _executable(self):
         if self._downloader.params.get('prefer_ffmpeg', False):
-            return self._exes['ffmpeg'] or self._exes['avconv']
+            prefs = ('ffmpeg', 'avconv')
         else:
-            return self._exes['avconv'] or self._exes['ffmpeg']
+            prefs = ('avconv', 'ffmpeg')
+        for p in prefs:
+            if self._versions[p]:
+                return p
+        return None
+
+    @property
+    def _probe_executable(self):
+        if self._downloader.params.get('prefer_ffmpeg', False):
+            prefs = ('ffprobe', 'avprobe')
+        else:
+            prefs = ('avprobe', 'ffprobe')
+        for p in prefs:
+            if self._versions[p]:
+                return p
+        return None
 
     def _uses_avconv(self):
-        return self._get_executable() == self._exes['avconv']
+        return self._executable == 'avconv'
 
     def run_ffmpeg_multiple_files(self, input_paths, out_path, opts):
-        if not self._get_executable():
-            raise FFmpegPostProcessorError(u'ffmpeg or avconv not found. Please install one.')
+        self.check_version()
 
         files_cmd = []
         for path in input_paths:
             files_cmd.extend(['-i', encodeFilename(path, True)])
-        cmd = ([self._get_executable(), '-y'] + files_cmd
+        cmd = ([self._executable, '-y'] + files_cmd
                + [encodeArgument(o) for o in opts] +
                [encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
 
         if self._downloader.params.get('verbose', False):
-            self._downloader.to_screen(u'[debug] ffmpeg command line: %s' % shell_quote(cmd))
+            self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd))
         p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
         stdout, stderr = p.communicate()
         if p.returncode != 0:
@@ -70,8 +102,8 @@ class FFmpegPostProcessor(PostProcessor):
 
     def _ffmpeg_filename_argument(self, fn):
         # ffmpeg broke --, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details
-        if fn.startswith(u'-'):
-            return u'./' + fn
+        if fn.startswith('-'):
+            return './' + fn
         return fn
 
 
@@ -85,11 +117,12 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
         self._nopostoverwrites = nopostoverwrites
 
     def get_audio_codec(self, path):
-        if not self._exes['ffprobe'] and not self._exes['avprobe']:
-            raise PostProcessingError(u'ffprobe or avprobe not found. Please install one.')
+
+        if not self._probe_executable:
+            raise PostProcessingError('ffprobe or avprobe not found. Please install one.')
         try:
             cmd = [
-                self._exes['avprobe'] or self._exes['ffprobe'],
+                self._probe_executable,
                 '-show_streams',
                 encodeFilename(self._ffmpeg_filename_argument(path), True)]
             handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE)
@@ -122,7 +155,7 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
 
         filecodec = self.get_audio_codec(path)
         if filecodec is None:
-            raise PostProcessingError(u'WARNING: unable to obtain file audio codec with ffprobe')
+            raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe')
 
         uses_avconv = self._uses_avconv()
         more_opts = []
@@ -171,7 +204,7 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
                 extension = 'wav'
                 more_opts += ['-f', 'wav']
 
-        prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups
+        prefix, sep, ext = path.rpartition('.')  # not os.path.splitext, since the latter does not work on unicode in all setups
         new_path = prefix + sep + extension
 
         # If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly.
@@ -180,16 +213,16 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
 
         try:
             if self._nopostoverwrites and os.path.exists(encodeFilename(new_path)):
-                self._downloader.to_screen(u'[youtube] Post-process file %s exists, skipping' % new_path)
+                self._downloader.to_screen('[youtube] Post-process file %s exists, skipping' % new_path)
             else:
-                self._downloader.to_screen(u'[' + self._get_executable() + '] Destination: ' + new_path)
+                self._downloader.to_screen('[' + self._executable + '] Destination: ' + new_path)
                 self.run_ffmpeg(path, new_path, acodec, more_opts)
         except:
-            etype,e,tb = sys.exc_info()
+            etype, e, tb = sys.exc_info()
             if isinstance(e, AudioConversionError):
-                msg = u'audio conversion failed: ' + e.msg
+                msg = 'audio conversion failed: ' + e.msg
             else:
-                msg = u'error running ' + self._get_executable()
+                msg = 'error running ' + self._executable
             raise PostProcessingError(msg)
 
         # Try to update the date time for extracted audio file.
@@ -197,30 +230,30 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
             try:
                 os.utime(encodeFilename(new_path), (time.time(), information['filetime']))
             except:
-                self._downloader.report_warning(u'Cannot update utime of audio file')
+                self._downloader.report_warning('Cannot update utime of audio file')
 
         information['filepath'] = new_path
-        return self._nopostoverwrites,information
+        return self._nopostoverwrites, information
 
 
 class FFmpegVideoConvertor(FFmpegPostProcessor):
-    def __init__(self, downloader=None,preferedformat=None):
+    def __init__(self, downloader=None, preferedformat=None):
         super(FFmpegVideoConvertor, self).__init__(downloader)
-        self._preferedformat=preferedformat
+        self._preferedformat = preferedformat
 
     def run(self, information):
         path = information['filepath']
-        prefix, sep, ext = path.rpartition(u'.')
+        prefix, sep, ext = path.rpartition('.')
         outpath = prefix + sep + self._preferedformat
         if information['ext'] == self._preferedformat:
-            self._downloader.to_screen(u'[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
-            return True,information
-        self._downloader.to_screen(u'['+'ffmpeg'+'] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) +outpath)
+            self._downloader.to_screen('[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
+            return True, information
+        self._downloader.to_screen('[' + 'ffmpeg' + '] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath)
         self.run_ffmpeg(path, outpath, [])
         information['filepath'] = outpath
         information['format'] = self._preferedformat
         information['ext'] = self._preferedformat
-        return False,information
+        return False, information
 
 
 class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
@@ -422,11 +455,11 @@ class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
         return cls._lang_map.get(code[:2])
 
     def run(self, information):
-        if information['ext'] != u'mp4':
-            self._downloader.to_screen(u'[ffmpeg] Subtitles can only be embedded in mp4 files')
+        if information['ext'] != 'mp4':
+            self._downloader.to_screen('[ffmpeg] Subtitles can only be embedded in mp4 files')
             return True, information
         if not information.get('subtitles'):
-            self._downloader.to_screen(u'[ffmpeg] There aren\'t any subtitles to embed') 
+            self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to embed')
             return True, information
 
         sub_langs = [key for key in information['subtitles']]
@@ -435,14 +468,14 @@ class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
 
         opts = ['-map', '0:0', '-map', '0:1', '-c:v', 'copy', '-c:a', 'copy']
         for (i, lang) in enumerate(sub_langs):
-            opts.extend(['-map', '%d:0' % (i+1), '-c:s:%d' % i, 'mov_text'])
+            opts.extend(['-map', '%d:0' % (i + 1), '-c:s:%d' % i, 'mov_text'])
             lang_code = self._conver_lang_code(lang)
             if lang_code is not None:
                 opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])
         opts.extend(['-f', 'mp4'])
 
-        temp_filename = filename + u'.temp'
-        self._downloader.to_screen(u'[ffmpeg] Embedding subtitles in \'%s\'' % filename)
+        temp_filename = filename + '.temp'
+        self._downloader.to_screen('[ffmpeg] Embedding subtitles in \'%s\'' % filename)
         self.run_ffmpeg_multiple_files(input_files, temp_filename, opts)
         os.remove(encodeFilename(filename))
         os.rename(encodeFilename(temp_filename), encodeFilename(filename))
@@ -463,13 +496,13 @@ class FFmpegMetadataPP(FFmpegPostProcessor):
             metadata['artist'] = info['uploader_id']
 
         if not metadata:
-            self._downloader.to_screen(u'[ffmpeg] There isn\'t any metadata to add')
+            self._downloader.to_screen('[ffmpeg] There isn\'t any metadata to add')
             return True, info
 
         filename = info['filepath']
         temp_filename = prepend_extension(filename, 'temp')
 
-        if info['ext'] == u'm4a':
+        if info['ext'] == 'm4a':
             options = ['-vn', '-acodec', 'copy']
         else:
             options = ['-c', 'copy']
@@ -477,7 +510,7 @@ class FFmpegMetadataPP(FFmpegPostProcessor):
         for (name, value) in metadata.items():
             options.extend(['-metadata', '%s=%s' % (name, value)])
 
-        self._downloader.to_screen(u'[ffmpeg] Adding metadata to \'%s\'' % filename)
+        self._downloader.to_screen('[ffmpeg] Adding metadata to \'%s\'' % filename)
         self.run_ffmpeg(filename, temp_filename, options)
         os.remove(encodeFilename(filename))
         os.rename(encodeFilename(temp_filename), encodeFilename(filename))
@@ -487,8 +520,8 @@ class FFmpegMetadataPP(FFmpegPostProcessor):
 class FFmpegMergerPP(FFmpegPostProcessor):
     def run(self, info):
         filename = info['filepath']
-        args = ['-c', 'copy']
-        self._downloader.to_screen(u'[ffmpeg] Merging formats into "%s"' % filename)
+        args = ['-c', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-shortest']
+        self._downloader.to_screen('[ffmpeg] Merging formats into "%s"' % filename)
         self.run_ffmpeg_multiple_files(info['__files_to_merge'], filename, args)
         return True, info
 
@@ -499,7 +532,7 @@ class FFmpegAudioFixPP(FFmpegPostProcessor):
         temp_filename = prepend_extension(filename, 'temp')
 
         options = ['-vn', '-acodec', 'copy']
-        self._downloader.to_screen(u'[ffmpeg] Fixing audio file "%s"' % filename)
+        self._downloader.to_screen('[ffmpeg] Fixing audio file "%s"' % filename)
         self.run_ffmpeg(filename, temp_filename, options)
 
         os.remove(encodeFilename(filename))
index f6940940b340dea5c23e5ce118b8fcc4d7ee8574..f6c63fe97545d86947ef1ef4bf2d70e9ea7144be 100644 (file)
@@ -1,12 +1,16 @@
+from __future__ import unicode_literals
+
 import os
 import subprocess
 import sys
 
 from .common import PostProcessor
+from ..compat import (
+    subprocess_check_output
+)
 from ..utils import (
     check_executable,
     hyphenate_date,
-    subprocess_check_output
 )
 
 
@@ -106,4 +110,3 @@ class XAttrMetadataPP(PostProcessor):
         except (subprocess.CalledProcessError, OSError):
             self._downloader.report_error("This filesystem doesn't support extended attributes. (You may have to enable them in your /etc/fstab)")
             return False, info
-
index b63c65b201ec2b34c6482e8526694431041f75fe..e60505ace8b8451666f2aeebea3277bc58cb6297 100644 (file)
@@ -4,8 +4,8 @@ import collections
 import io
 import zlib
 
+from .compat import compat_str
 from .utils import (
-    compat_str,
     ExtractorError,
     struct_unpack,
 )
@@ -62,15 +62,17 @@ class _ScopeDict(dict):
 
 
 class _AVMClass(object):
-    def __init__(self, name_idx, name):
+    def __init__(self, name_idx, name, static_properties=None):
         self.name_idx = name_idx
         self.name = name
         self.method_names = {}
         self.method_idxs = {}
         self.methods = {}
         self.method_pyfunctions = {}
+        self.static_properties = static_properties if static_properties else {}
 
         self.variables = _ScopeDict(self)
+        self.constants = {}
 
     def make_object(self):
         return _AVMClass_Object(self)
@@ -148,8 +150,38 @@ def _read_byte(reader):
     return res
 
 
+StringClass = _AVMClass('(no name idx)', 'String')
+ByteArrayClass = _AVMClass('(no name idx)', 'ByteArray')
+TimerClass = _AVMClass('(no name idx)', 'Timer')
+TimerEventClass = _AVMClass('(no name idx)', 'TimerEvent', {'TIMER': 'timer'})
+_builtin_classes = {
+    StringClass.name: StringClass,
+    ByteArrayClass.name: ByteArrayClass,
+    TimerClass.name: TimerClass,
+    TimerEventClass.name: TimerEventClass,
+}
+
+
+class _Undefined(object):
+    def __bool__(self):
+        return False
+    __nonzero__ = __bool__
+
+    def __hash__(self):
+        return 0
+
+    def __str__(self):
+        return 'undefined'
+    __repr__ = __str__
+
+undefined = _Undefined()
+
+
 class SWFInterpreter(object):
     def __init__(self, file_contents):
+        self._patched_functions = {
+            (TimerClass, 'addEventListener'): lambda params: undefined,
+        }
         code_tag = next(tag
                         for tag_code, tag in _extract_tags(file_contents)
                         if tag_code == 82)
@@ -170,11 +202,13 @@ class SWFInterpreter(object):
 
         # Constant pool
         int_count = u30()
+        self.constant_ints = [0]
         for _c in range(1, int_count):
-            s32()
+            self.constant_ints.append(s32())
+        self.constant_uints = [0]
         uint_count = u30()
         for _c in range(1, uint_count):
-            u32()
+            self.constant_uints.append(u32())
         double_count = u30()
         read_bytes(max(0, (double_count - 1)) * 8)
         string_count = u30()
@@ -212,6 +246,10 @@ class SWFInterpreter(object):
                 u30()  # namespace_idx
                 name_idx = u30()
                 self.multinames.append(self.constant_strings[name_idx])
+            elif kind == 0x09:
+                name_idx = u30()
+                u30()
+                self.multinames.append(self.constant_strings[name_idx])
             else:
                 self.multinames.append(_Multiname(kind))
                 for _c2 in range(MULTINAME_SIZES[kind]):
@@ -258,13 +296,28 @@ class SWFInterpreter(object):
             kind = kind_full & 0x0f
             attrs = kind_full >> 4
             methods = {}
-            if kind in [0x00, 0x06]:  # Slot or Const
+            constants = None
+            if kind == 0x00:  # Slot
                 u30()  # Slot id
                 u30()  # type_name_idx
                 vindex = u30()
                 if vindex != 0:
                     read_byte()  # vkind
-            elif kind in [0x01, 0x02, 0x03]:  # Method / Getter / Setter
+            elif kind == 0x06:  # Const
+                u30()  # Slot id
+                u30()  # type_name_idx
+                vindex = u30()
+                vkind = 'any'
+                if vindex != 0:
+                    vkind = read_byte()
+                if vkind == 0x03:  # Constant_Int
+                    value = self.constant_ints[vindex]
+                elif vkind == 0x04:  # Constant_UInt
+                    value = self.constant_uints[vindex]
+                else:
+                    return {}, None  # Ignore silently for now
+                constants = {self.multinames[trait_name_idx]: value}
+            elif kind in (0x01, 0x02, 0x03):  # Method / Getter / Setter
                 u30()  # disp_id
                 method_idx = u30()
                 methods[self.multinames[trait_name_idx]] = method_idx
@@ -283,7 +336,7 @@ class SWFInterpreter(object):
                 for _c3 in range(metadata_count):
                     u30()  # metadata index
 
-            return methods
+            return methods, constants
 
         # Classes
         class_count = u30()
@@ -305,18 +358,22 @@ class SWFInterpreter(object):
             u30()  # iinit
             trait_count = u30()
             for _c2 in range(trait_count):
-                trait_methods = parse_traits_info()
+                trait_methods, trait_constants = parse_traits_info()
                 avm_class.register_methods(trait_methods)
+                if trait_constants:
+                    avm_class.constants.update(trait_constants)
 
         assert len(classes) == class_count
         self._classes_by_name = dict((c.name, c) for c in classes)
 
         for avm_class in classes:
-            u30()  # cinit
+            avm_class.cinit_idx = u30()
             trait_count = u30()
             for _c2 in range(trait_count):
-                trait_methods = parse_traits_info()
+                trait_methods, trait_constants = parse_traits_info()
                 avm_class.register_methods(trait_methods)
+                if trait_constants:
+                    avm_class.constants.update(trait_constants)
 
         # Scripts
         script_count = u30()
@@ -329,6 +386,7 @@ class SWFInterpreter(object):
         # Method bodies
         method_body_count = u30()
         Method = collections.namedtuple('Method', ['code', 'local_count'])
+        self._all_methods = []
         for _c in range(method_body_count):
             method_idx = u30()
             u30()  # max_stack
@@ -337,9 +395,10 @@ class SWFInterpreter(object):
             u30()  # max_scope_depth
             code_length = u30()
             code = read_bytes(code_length)
+            m = Method(code, local_count)
+            self._all_methods.append(m)
             for avm_class in classes:
                 if method_idx in avm_class.method_idxs:
-                    m = Method(code, local_count)
                     avm_class.methods[avm_class.method_idxs[method_idx]] = m
             exception_count = u30()
             for _c2 in range(exception_count):
@@ -354,13 +413,27 @@ class SWFInterpreter(object):
 
         assert p + code_reader.tell() == len(code_tag)
 
-    def extract_class(self, class_name):
+    def patch_function(self, avm_class, func_name, f):
+        self._patched_functions[(avm_class, func_name)] = f
+
+    def extract_class(self, class_name, call_cinit=True):
         try:
-            return self._classes_by_name[class_name]
+            res = self._classes_by_name[class_name]
         except KeyError:
             raise ExtractorError('Class %r not found' % class_name)
 
+        if call_cinit and hasattr(res, 'cinit_idx'):
+            res.register_methods({'$cinit': res.cinit_idx})
+            res.methods['$cinit'] = self._all_methods[res.cinit_idx]
+            cinit = self.extract_function(res, '$cinit')
+            cinit([])
+
+        return res
+
     def extract_function(self, avm_class, func_name):
+        p = self._patched_functions.get((avm_class, func_name))
+        if p:
+            return p
         if func_name in avm_class.method_pyfunctions:
             return avm_class.method_pyfunctions[func_name]
         if func_name in self._classes_by_name:
@@ -379,10 +452,15 @@ class SWFInterpreter(object):
             registers = [avm_class.variables] + list(args) + [None] * m.local_count
             stack = []
             scopes = collections.deque([
-                self._classes_by_name, avm_class.variables])
+                self._classes_by_name, avm_class.constants, avm_class.variables])
             while True:
                 opcode = _read_byte(coder)
-                if opcode == 17:  # iftrue
+                if opcode == 9:  # label
+                    pass  # Spec says: "Do nothing."
+                elif opcode == 16:  # jump
+                    offset = s24()
+                    coder.seek(coder.tell() + offset)
+                elif opcode == 17:  # iftrue
                     offset = s24()
                     value = stack.pop()
                     if value:
@@ -392,9 +470,40 @@ class SWFInterpreter(object):
                     value = stack.pop()
                     if not value:
                         coder.seek(coder.tell() + offset)
+                elif opcode == 19:  # ifeq
+                    offset = s24()
+                    value2 = stack.pop()
+                    value1 = stack.pop()
+                    if value2 == value1:
+                        coder.seek(coder.tell() + offset)
+                elif opcode == 20:  # ifne
+                    offset = s24()
+                    value2 = stack.pop()
+                    value1 = stack.pop()
+                    if value2 != value1:
+                        coder.seek(coder.tell() + offset)
+                elif opcode == 21:  # iflt
+                    offset = s24()
+                    value2 = stack.pop()
+                    value1 = stack.pop()
+                    if value1 < value2:
+                        coder.seek(coder.tell() + offset)
+                elif opcode == 32:  # pushnull
+                    stack.append(None)
+                elif opcode == 33:  # pushundefined
+                    stack.append(undefined)
                 elif opcode == 36:  # pushbyte
                     v = _read_byte(coder)
                     stack.append(v)
+                elif opcode == 37:  # pushshort
+                    v = u30()
+                    stack.append(v)
+                elif opcode == 38:  # pushtrue
+                    stack.append(True)
+                elif opcode == 39:  # pushfalse
+                    stack.append(False)
+                elif opcode == 40:  # pushnan
+                    stack.append(float('NaN'))
                 elif opcode == 42:  # dup
                     value = stack[-1]
                     stack.append(value)
@@ -419,11 +528,31 @@ class SWFInterpreter(object):
                         [stack.pop() for _ in range(arg_count)]))
                     obj = stack.pop()
 
-                    if isinstance(obj, _AVMClass_Object):
+                    if obj == StringClass:
+                        if mname == 'String':
+                            assert len(args) == 1
+                            assert isinstance(args[0], (
+                                int, compat_str, _Undefined))
+                            if args[0] == undefined:
+                                res = 'undefined'
+                            else:
+                                res = compat_str(args[0])
+                            stack.append(res)
+                            continue
+                        else:
+                            raise NotImplementedError(
+                                'Function String.%s is not yet implemented'
+                                % mname)
+                    elif isinstance(obj, _AVMClass_Object):
                         func = self.extract_function(obj.avm_class, mname)
                         res = func(args)
                         stack.append(res)
                         continue
+                    elif isinstance(obj, _AVMClass):
+                        func = self.extract_function(obj, mname)
+                        res = func(args)
+                        stack.append(res)
+                        continue
                     elif isinstance(obj, _ScopeDict):
                         if mname in obj.avm_class.method_names:
                             func = self.extract_function(obj.avm_class, mname)
@@ -442,6 +571,13 @@ class SWFInterpreter(object):
                                 res = obj.split(args[0])
                             stack.append(res)
                             continue
+                        elif mname == 'charCodeAt':
+                            assert len(args) <= 1
+                            idx = 0 if len(args) == 0 else args[0]
+                            assert isinstance(idx, int)
+                            res = ord(obj[idx])
+                            stack.append(res)
+                            continue
                     elif isinstance(obj, list):
                         if mname == 'slice':
                             assert len(args) == 1
@@ -458,9 +594,18 @@ class SWFInterpreter(object):
                     raise NotImplementedError(
                         'Unsupported property %r on %r'
                         % (mname, obj))
+                elif opcode == 71:  # returnvoid
+                    res = undefined
+                    return res
                 elif opcode == 72:  # returnvalue
                     res = stack.pop()
                     return res
+                elif opcode == 73:  # constructsuper
+                    # Not yet implemented, just hope it works without it
+                    arg_count = u30()
+                    args = list(reversed(
+                        [stack.pop() for _ in range(arg_count)]))
+                    obj = stack.pop()
                 elif opcode == 74:  # constructproperty
                     index = u30()
                     arg_count = u30()
@@ -481,6 +626,17 @@ class SWFInterpreter(object):
                     args = list(reversed(
                         [stack.pop() for _ in range(arg_count)]))
                     obj = stack.pop()
+                    if isinstance(obj, _AVMClass_Object):
+                        func = self.extract_function(obj.avm_class, mname)
+                        res = func(args)
+                        assert res is undefined
+                        continue
+                    if isinstance(obj, _ScopeDict):
+                        assert mname in obj.avm_class.method_names
+                        func = self.extract_function(obj.avm_class, mname)
+                        res = func(args)
+                        assert res is undefined
+                        continue
                     if mname == 'reverse':
                         assert isinstance(obj, list)
                         obj.reverse()
@@ -504,7 +660,10 @@ class SWFInterpreter(object):
                             break
                     else:
                         res = scopes[0]
-                    stack.append(res[mname])
+                    if mname not in res and mname in _builtin_classes:
+                        stack.append(_builtin_classes[mname])
+                    else:
+                        stack.append(res[mname])
                 elif opcode == 94:  # findproperty
                     index = u30()
                     mname = self.multinames[index]
@@ -524,9 +683,15 @@ class SWFInterpreter(object):
                             break
                     else:
                         scope = avm_class.variables
-                    # I cannot find where static variables are initialized
-                    # so let's just return None
-                    res = scope.get(mname)
+
+                    if mname in scope:
+                        res = scope[mname]
+                    elif mname in _builtin_classes:
+                        res = _builtin_classes[mname]
+                    else:
+                        # Assume unitialized
+                        # TODO warn here
+                        res = undefined
                     stack.append(res)
                 elif opcode == 97:  # setproperty
                     index = u30()
@@ -548,22 +713,57 @@ class SWFInterpreter(object):
                     pname = self.multinames[index]
                     if pname == 'length':
                         obj = stack.pop()
-                        assert isinstance(obj, list)
+                        assert isinstance(obj, (compat_str, list))
                         stack.append(len(obj))
+                    elif isinstance(pname, compat_str):  # Member access
+                        obj = stack.pop()
+                        if isinstance(obj, _AVMClass):
+                            res = obj.static_properties[pname]
+                            stack.append(res)
+                            continue
+
+                        assert isinstance(obj, (dict, _ScopeDict)),\
+                            'Accessing member %r on %r' % (pname, obj)
+                        res = obj.get(pname, undefined)
+                        stack.append(res)
                     else:  # Assume attribute access
                         idx = stack.pop()
                         assert isinstance(idx, int)
                         obj = stack.pop()
                         assert isinstance(obj, list)
                         stack.append(obj[idx])
+                elif opcode == 104:  # initproperty
+                    index = u30()
+                    value = stack.pop()
+                    idx = self.multinames[index]
+                    if isinstance(idx, _Multiname):
+                        idx = stack.pop()
+                    obj = stack.pop()
+                    obj[idx] = value
                 elif opcode == 115:  # convert_
                     value = stack.pop()
                     intvalue = int(value)
                     stack.append(intvalue)
                 elif opcode == 128:  # coerce
                     u30()
+                elif opcode == 130:  # coerce_a
+                    value = stack.pop()
+                    # um, yes, it's any value
+                    stack.append(value)
                 elif opcode == 133:  # coerce_s
                     assert isinstance(stack[-1], (type(None), compat_str))
+                elif opcode == 147:  # decrement
+                    value = stack.pop()
+                    assert isinstance(value, int)
+                    stack.append(value - 1)
+                elif opcode == 149:  # typeof
+                    value = stack.pop()
+                    return {
+                        _Undefined: 'undefined',
+                        compat_str: 'String',
+                        int: 'Number',
+                        float: 'Number',
+                    }[type(value)]
                 elif opcode == 160:  # add
                     value2 = stack.pop()
                     value1 = stack.pop()
@@ -574,16 +774,37 @@ class SWFInterpreter(object):
                     value1 = stack.pop()
                     res = value1 - value2
                     stack.append(res)
+                elif opcode == 162:  # multiply
+                    value2 = stack.pop()
+                    value1 = stack.pop()
+                    res = value1 * value2
+                    stack.append(res)
                 elif opcode == 164:  # modulo
                     value2 = stack.pop()
                     value1 = stack.pop()
                     res = value1 % value2
                     stack.append(res)
+                elif opcode == 168:  # bitand
+                    value2 = stack.pop()
+                    value1 = stack.pop()
+                    assert isinstance(value1, int)
+                    assert isinstance(value2, int)
+                    res = value1 & value2
+                    stack.append(res)
+                elif opcode == 171:  # equals
+                    value2 = stack.pop()
+                    value1 = stack.pop()
+                    result = value1 == value2
+                    stack.append(result)
                 elif opcode == 175:  # greaterequals
                     value2 = stack.pop()
                     value1 = stack.pop()
                     result = value1 >= value2
                     stack.append(result)
+                elif opcode == 192:  # increment_i
+                    value = stack.pop()
+                    assert isinstance(value, int)
+                    stack.append(value + 1)
                 elif opcode == 208:  # getlocal_0
                     stack.append(registers[0])
                 elif opcode == 209:  # getlocal_1
@@ -606,4 +827,3 @@ class SWFInterpreter(object):
 
         avm_class.method_pyfunctions[func_name] = resfunc
         return resfunc
-
index 27308376130b97cc9cf6059b1f9dd4a64753ef42..2d2703368d8c2974e665985a10d1b6dcace8b235 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import io
 import json
 import traceback
@@ -7,20 +9,18 @@ import subprocess
 import sys
 from zipimport import zipimporter
 
-from .utils import (
+from .compat import (
     compat_str,
     compat_urllib_request,
 )
 from .version import __version__
 
+
 def rsa_verify(message, signature, key):
     from struct import pack
     from hashlib import sha256
-    from sys import version_info
-    def b(x):
-        if version_info[0] == 2: return x
-        else: return x.encode('latin1')
-    assert(type(message) == type(b('')))
+
+    assert isinstance(message, bytes)
     block_size = 0
     n = key[0]
     while n:
@@ -31,14 +31,18 @@ def rsa_verify(message, signature, key):
     while signature:
         raw_bytes.insert(0, pack("B", signature & 0xFF))
         signature >>= 8
-    signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
-    if signature[0:2] != b('\x00\x01'): return False
+    signature = (block_size - len(raw_bytes)) * b'\x00' + b''.join(raw_bytes)
+    if signature[0:2] != b'\x00\x01':
+        return False
     signature = signature[2:]
-    if not b('\x00') in signature: return False
-    signature = signature[signature.index(b('\x00'))+1:]
-    if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
+    if b'\x00' not in signature:
+        return False
+    signature = signature[signature.index(b'\x00') + 1:]
+    if not signature.startswith(b'\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20'):
+        return False
     signature = signature[19:]
-    if signature != sha256(message).digest(): return False
+    if signature != sha256(message).digest():
+        return False
     return True
 
 
@@ -51,18 +55,19 @@ def update_self(to_screen, verbose):
     UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
 
     if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, "frozen"):
-        to_screen(u'It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.')
+        to_screen('It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.')
         return
 
     # Check if there is a new version
     try:
         newversion = compat_urllib_request.urlopen(VERSION_URL).read().decode('utf-8').strip()
     except:
-        if verbose: to_screen(compat_str(traceback.format_exc()))
-        to_screen(u'ERROR: can\'t find the current version. Please try again later.')
+        if verbose:
+            to_screen(compat_str(traceback.format_exc()))
+        to_screen('ERROR: can\'t find the current version. Please try again later.')
         return
     if newversion == __version__:
-        to_screen(u'youtube-dl is up-to-date (' + __version__ + ')')
+        to_screen('youtube-dl is up-to-date (' + __version__ + ')')
         return
 
     # Download and check versions info
@@ -70,16 +75,17 @@ def update_self(to_screen, verbose):
         versions_info = compat_urllib_request.urlopen(JSON_URL).read().decode('utf-8')
         versions_info = json.loads(versions_info)
     except:
-        if verbose: to_screen(compat_str(traceback.format_exc()))
-        to_screen(u'ERROR: can\'t obtain versions info. Please try again later.')
+        if verbose:
+            to_screen(compat_str(traceback.format_exc()))
+        to_screen('ERROR: can\'t obtain versions info. Please try again later.')
         return
-    if not 'signature' in versions_info:
-        to_screen(u'ERROR: the versions file is not signed or corrupted. Aborting.')
+    if 'signature' not in versions_info:
+        to_screen('ERROR: the versions file is not signed or corrupted. Aborting.')
         return
     signature = versions_info['signature']
     del versions_info['signature']
     if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY):
-        to_screen(u'ERROR: the versions file signature is invalid. Aborting.')
+        to_screen('ERROR: the versions file signature is invalid. Aborting.')
         return
 
     version_id = versions_info['latest']
@@ -87,10 +93,10 @@ def update_self(to_screen, verbose):
     def version_tuple(version_str):
         return tuple(map(int, version_str.split('.')))
     if version_tuple(__version__) >= version_tuple(version_id):
-        to_screen(u'youtube-dl is up to date (%s)' % __version__)
+        to_screen('youtube-dl is up to date (%s)' % __version__)
         return
 
-    to_screen(u'Updating to version ' + version_id + ' ...')
+    to_screen('Updating to version ' + version_id + ' ...')
     version = versions_info['versions'][version_id]
 
     print_notes(to_screen, versions_info['versions'])
@@ -98,11 +104,11 @@ def update_self(to_screen, verbose):
     filename = sys.argv[0]
     # Py2EXE: Filename could be different
     if hasattr(sys, "frozen") and not os.path.isfile(filename):
-        if os.path.isfile(filename + u'.exe'):
-            filename += u'.exe'
+        if os.path.isfile(filename + '.exe'):
+            filename += '.exe'
 
     if not os.access(filename, os.W_OK):
-        to_screen(u'ERROR: no write permissions on %s' % filename)
+        to_screen('ERROR: no write permissions on %s' % filename)
         return
 
     # Py2EXE
@@ -110,7 +116,7 @@ def update_self(to_screen, verbose):
         exe = os.path.abspath(filename)
         directory = os.path.dirname(exe)
         if not os.access(directory, os.W_OK):
-            to_screen(u'ERROR: no write permissions on %s' % directory)
+            to_screen('ERROR: no write permissions on %s' % directory)
             return
 
         try:
@@ -118,40 +124,43 @@ def update_self(to_screen, verbose):
             newcontent = urlh.read()
             urlh.close()
         except (IOError, OSError):
-            if verbose: to_screen(compat_str(traceback.format_exc()))
-            to_screen(u'ERROR: unable to download latest version')
+            if verbose:
+                to_screen(compat_str(traceback.format_exc()))
+            to_screen('ERROR: unable to download latest version')
             return
 
         newcontent_hash = hashlib.sha256(newcontent).hexdigest()
         if newcontent_hash != version['exe'][1]:
-            to_screen(u'ERROR: the downloaded file hash does not match. Aborting.')
+            to_screen('ERROR: the downloaded file hash does not match. Aborting.')
             return
 
         try:
             with open(exe + '.new', 'wb') as outf:
                 outf.write(newcontent)
         except (IOError, OSError):
-            if verbose: to_screen(compat_str(traceback.format_exc()))
-            to_screen(u'ERROR: unable to write the new version')
+            if verbose:
+                to_screen(compat_str(traceback.format_exc()))
+            to_screen('ERROR: unable to write the new version')
             return
 
         try:
             bat = os.path.join(directory, 'youtube-dl-updater.bat')
             with io.open(bat, 'w') as batfile:
-                batfile.write(u"""
+                batfile.write('''
 @echo off
 echo Waiting for file handle to be closed ...
 ping 127.0.0.1 -n 5 -w 1000 > NUL
 move /Y "%s.new" "%s" > NUL
 echo Updated youtube-dl to version %s.
 start /b "" cmd /c del "%%~f0"&exit /b"
-                \n""" % (exe, exe, version_id))
+                \n''' % (exe, exe, version_id))
 
             subprocess.Popen([bat])  # Continues to run in the background
             return  # Do not show premature success messages
         except (IOError, OSError):
-            if verbose: to_screen(compat_str(traceback.format_exc()))
-            to_screen(u'ERROR: unable to overwrite current version')
+            if verbose:
+                to_screen(compat_str(traceback.format_exc()))
+            to_screen('ERROR: unable to overwrite current version')
             return
 
     # Zip unix package
@@ -161,35 +170,39 @@ start /b "" cmd /c del "%%~f0"&exit /b"
             newcontent = urlh.read()
             urlh.close()
         except (IOError, OSError):
-            if verbose: to_screen(compat_str(traceback.format_exc()))
-            to_screen(u'ERROR: unable to download latest version')
+            if verbose:
+                to_screen(compat_str(traceback.format_exc()))
+            to_screen('ERROR: unable to download latest version')
             return
 
         newcontent_hash = hashlib.sha256(newcontent).hexdigest()
         if newcontent_hash != version['bin'][1]:
-            to_screen(u'ERROR: the downloaded file hash does not match. Aborting.')
+            to_screen('ERROR: the downloaded file hash does not match. Aborting.')
             return
 
         try:
             with open(filename, 'wb') as outf:
                 outf.write(newcontent)
         except (IOError, OSError):
-            if verbose: to_screen(compat_str(traceback.format_exc()))
-            to_screen(u'ERROR: unable to overwrite current version')
+            if verbose:
+                to_screen(compat_str(traceback.format_exc()))
+            to_screen('ERROR: unable to overwrite current version')
             return
 
-    to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')
+    to_screen('Updated youtube-dl. Restart youtube-dl to use the new version.')
+
 
 def get_notes(versions, fromVersion):
     notes = []
-    for v,vdata in sorted(versions.items()):
+    for v, vdata in sorted(versions.items()):
         if v > fromVersion:
             notes.extend(vdata.get('notes', []))
     return notes
 
+
 def print_notes(to_screen, versions, fromVersion=__version__):
     notes = get_notes(versions, fromVersion)
     if notes:
-        to_screen(u'PLEASE NOTE:')
+        to_screen('PLEASE NOTE:')
         for note in notes:
             to_screen(note)
index d7ae5a90a8480f6c825303039b357ff53e7f8d56..ec34dcef935240ef8cfb43d00e8589c4fd0040a1 100644 (file)
@@ -1,6 +1,8 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+from __future__ import unicode_literals
+
 import calendar
 import codecs
 import contextlib
@@ -8,7 +10,6 @@ import ctypes
 import datetime
 import email.utils
 import errno
-import getpass
 import gzip
 import itertools
 import io
@@ -29,179 +30,20 @@ import traceback
 import xml.etree.ElementTree
 import zlib
 
-try:
-    import urllib.request as compat_urllib_request
-except ImportError: # Python 2
-    import urllib2 as compat_urllib_request
-
-try:
-    import urllib.error as compat_urllib_error
-except ImportError: # Python 2
-    import urllib2 as compat_urllib_error
-
-try:
-    import urllib.parse as compat_urllib_parse
-except ImportError: # Python 2
-    import urllib as compat_urllib_parse
-
-try:
-    from urllib.parse import urlparse as compat_urllib_parse_urlparse
-except ImportError: # Python 2
-    from urlparse import urlparse as compat_urllib_parse_urlparse
-
-try:
-    import urllib.parse as compat_urlparse
-except ImportError: # Python 2
-    import urlparse as compat_urlparse
-
-try:
-    import http.cookiejar as compat_cookiejar
-except ImportError: # Python 2
-    import cookielib as compat_cookiejar
-
-try:
-    import html.entities as compat_html_entities
-except ImportError: # Python 2
-    import htmlentitydefs as compat_html_entities
-
-try:
-    import html.parser as compat_html_parser
-except ImportError: # Python 2
-    import HTMLParser as compat_html_parser
-
-try:
-    import http.client as compat_http_client
-except ImportError: # Python 2
-    import httplib as compat_http_client
-
-try:
-    from urllib.error import HTTPError as compat_HTTPError
-except ImportError:  # Python 2
-    from urllib2 import HTTPError as compat_HTTPError
-
-try:
-    from urllib.request import urlretrieve as compat_urlretrieve
-except ImportError:  # Python 2
-    from urllib import urlretrieve as compat_urlretrieve
-
-
-try:
-    from subprocess import DEVNULL
-    compat_subprocess_get_DEVNULL = lambda: DEVNULL
-except ImportError:
-    compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
-
-try:
-    from urllib.parse import unquote as compat_urllib_parse_unquote
-except ImportError:
-    def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
-        if string == '':
-            return string
-        res = string.split('%')
-        if len(res) == 1:
-            return string
-        if encoding is None:
-            encoding = 'utf-8'
-        if errors is None:
-            errors = 'replace'
-        # pct_sequence: contiguous sequence of percent-encoded bytes, decoded
-        pct_sequence = b''
-        string = res[0]
-        for item in res[1:]:
-            try:
-                if not item:
-                    raise ValueError
-                pct_sequence += item[:2].decode('hex')
-                rest = item[2:]
-                if not rest:
-                    # This segment was just a single percent-encoded character.
-                    # May be part of a sequence of code units, so delay decoding.
-                    # (Stored in pct_sequence).
-                    continue
-            except ValueError:
-                rest = '%' + item
-            # Encountered non-percent-encoded characters. Flush the current
-            # pct_sequence.
-            string += pct_sequence.decode(encoding, errors) + rest
-            pct_sequence = b''
-        if pct_sequence:
-            # Flush the final pct_sequence
-            string += pct_sequence.decode(encoding, errors)
-        return string
-
+from .compat import (
+    compat_chr,
+    compat_getenv,
+    compat_html_entities,
+    compat_parse_qs,
+    compat_str,
+    compat_urllib_error,
+    compat_urllib_parse,
+    compat_urllib_parse_urlparse,
+    compat_urllib_request,
+    compat_urlparse,
+    shlex_quote,
+)
 
-try:
-    from urllib.parse import parse_qs as compat_parse_qs
-except ImportError: # Python 2
-    # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
-    # Python 2's version is apparently totally broken
-
-    def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
-                encoding='utf-8', errors='replace'):
-        qs, _coerce_result = qs, unicode
-        pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
-        r = []
-        for name_value in pairs:
-            if not name_value and not strict_parsing:
-                continue
-            nv = name_value.split('=', 1)
-            if len(nv) != 2:
-                if strict_parsing:
-                    raise ValueError("bad query field: %r" % (name_value,))
-                # Handle case of a control-name with no equal sign
-                if keep_blank_values:
-                    nv.append('')
-                else:
-                    continue
-            if len(nv[1]) or keep_blank_values:
-                name = nv[0].replace('+', ' ')
-                name = compat_urllib_parse_unquote(
-                    name, encoding=encoding, errors=errors)
-                name = _coerce_result(name)
-                value = nv[1].replace('+', ' ')
-                value = compat_urllib_parse_unquote(
-                    value, encoding=encoding, errors=errors)
-                value = _coerce_result(value)
-                r.append((name, value))
-        return r
-
-    def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
-                encoding='utf-8', errors='replace'):
-        parsed_result = {}
-        pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
-                        encoding=encoding, errors=errors)
-        for name, value in pairs:
-            if name in parsed_result:
-                parsed_result[name].append(value)
-            else:
-                parsed_result[name] = [value]
-        return parsed_result
-
-try:
-    compat_str = unicode # Python 2
-except NameError:
-    compat_str = str
-
-try:
-    compat_chr = unichr # Python 2
-except NameError:
-    compat_chr = chr
-
-try:
-    from xml.etree.ElementTree import ParseError as compat_xml_parse_error
-except ImportError:  # Python 2.6
-    from xml.parsers.expat import ExpatError as compat_xml_parse_error
-
-try:
-    from shlex import quote as shlex_quote
-except ImportError:  # Python < 3.3
-    def shlex_quote(s):
-        return "'" + s.replace("'", "'\"'\"'") + "'"
-
-
-def compat_ord(c):
-    if type(c) is int: return c
-    else: return ord(c)
 
 # This is not clearly defined otherwise
 compiled_regex_type = type(re.compile(''))
@@ -214,6 +56,7 @@ std_headers = {
     'Accept-Language': 'en-us,en;q=0.5',
 }
 
+
 def preferredencoding():
     """Get preferred encoding.
 
@@ -222,28 +65,33 @@ def preferredencoding():
     """
     try:
         pref = locale.getpreferredencoding()
-        u'TEST'.encode(pref)
+        'TEST'.encode(pref)
     except:
         pref = 'UTF-8'
 
     return pref
 
-if sys.version_info < (3,0):
-    def compat_print(s):
-        print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
-else:
-    def compat_print(s):
-        assert type(s) == type(u'')
-        print(s)
-
 
 def write_json_file(obj, fn):
-    """ Encode obj as JSON and write it to fn, atomically """
+    """ Encode obj as JSON and write it to fn, atomically if possible """
+
+    fn = encodeFilename(fn)
+    if sys.version_info < (3, 0) and sys.platform != 'win32':
+        encoding = get_filesystem_encoding()
+        # os.path.basename returns a bytes object, but NamedTemporaryFile
+        # will fail if the filename contains non ascii characters unless we
+        # use a unicode object
+        path_basename = lambda f: os.path.basename(fn).decode(encoding)
+        # the same for os.path.dirname
+        path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
+    else:
+        path_basename = os.path.basename
+        path_dirname = os.path.dirname
 
     args = {
         'suffix': '.tmp',
-        'prefix': os.path.basename(fn) + '.',
-        'dir': os.path.dirname(fn),
+        'prefix': path_basename(fn) + '.',
+        'dir': path_dirname(fn),
         'delete': False,
     }
 
@@ -262,6 +110,13 @@ def write_json_file(obj, fn):
     try:
         with tf:
             json.dump(obj, tf)
+        if sys.platform == 'win32':
+            # Need to remove existing file on Windows, else os.rename raises
+            # WindowsError or FileExistsError.
+            try:
+                os.unlink(fn)
+            except OSError:
+                pass
         os.rename(tf.name, fn)
     except:
         try:
@@ -276,7 +131,7 @@ if sys.version_info >= (2, 7):
         """ Find the xpath xpath[@key=val] """
         assert re.match(r'^[a-zA-Z-]+$', key)
         assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val)
-        expr = xpath + u"[@%s='%s']" % (key, val)
+        expr = xpath + "[@%s='%s']" % (key, val)
         return node.find(expr)
 else:
     def find_xpath_attr(node, xpath, key, val):
@@ -292,6 +147,8 @@ else:
 
 # On python2.6 the xml.etree.ElementTree.Element methods don't support
 # the namespace parameter
+
+
 def xpath_with_ns(path, ns_map):
     components = [c.split(':') for c in path.split('/')]
     replaced = []
@@ -309,7 +166,7 @@ def xpath_text(node, xpath, name=None, fatal=False):
         xpath = xpath.encode('ascii')
 
     n = node.find(xpath)
-    if n is None:
+    if n is None or n.text is None:
         if fatal:
             name = xpath if name is None else name
             raise ExtractorError('Could not find XML element %s' % name)
@@ -318,127 +175,32 @@ def xpath_text(node, xpath, name=None, fatal=False):
     return n.text
 
 
-compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix
-class BaseHTMLParser(compat_html_parser.HTMLParser):
-    def __init(self):
-        compat_html_parser.HTMLParser.__init__(self)
-        self.html = None
-
-    def loads(self, html):
-        self.html = html
-        self.feed(html)
-        self.close()
-
-class AttrParser(BaseHTMLParser):
-    """Modified HTMLParser that isolates a tag with the specified attribute"""
-    def __init__(self, attribute, value):
-        self.attribute = attribute
-        self.value = value
-        self.result = None
-        self.started = False
-        self.depth = {}
-        self.watch_startpos = False
-        self.error_count = 0
-        BaseHTMLParser.__init__(self)
-
-    def error(self, message):
-        if self.error_count > 10 or self.started:
-            raise compat_html_parser.HTMLParseError(message, self.getpos())
-        self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line
-        self.error_count += 1
-        self.goahead(1)
-
-    def handle_starttag(self, tag, attrs):
-        attrs = dict(attrs)
-        if self.started:
-            self.find_startpos(None)
-        if self.attribute in attrs and attrs[self.attribute] == self.value:
-            self.result = [tag]
-            self.started = True
-            self.watch_startpos = True
-        if self.started:
-            if not tag in self.depth: self.depth[tag] = 0
-            self.depth[tag] += 1
-
-    def handle_endtag(self, tag):
-        if self.started:
-            if tag in self.depth: self.depth[tag] -= 1
-            if self.depth[self.result[0]] == 0:
-                self.started = False
-                self.result.append(self.getpos())
-
-    def find_startpos(self, x):
-        """Needed to put the start position of the result (self.result[1])
-        after the opening tag with the requested id"""
-        if self.watch_startpos:
-            self.watch_startpos = False
-            self.result.append(self.getpos())
-    handle_entityref = handle_charref = handle_data = handle_comment = \
-    handle_decl = handle_pi = unknown_decl = find_startpos
-
-    def get_result(self):
-        if self.result is None:
-            return None
-        if len(self.result) != 3:
-            return None
-        lines = self.html.split('\n')
-        lines = lines[self.result[1][0]-1:self.result[2][0]]
-        lines[0] = lines[0][self.result[1][1]:]
-        if len(lines) == 1:
-            lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]]
-        lines[-1] = lines[-1][:self.result[2][1]]
-        return '\n'.join(lines).strip()
-# Hack for https://github.com/rg3/youtube-dl/issues/662
-if sys.version_info < (2, 7, 3):
-    AttrParser.parse_endtag = (lambda self, i:
-        i + len("</scr'+'ipt>")
-        if self.rawdata[i:].startswith("</scr'+'ipt>")
-        else compat_html_parser.HTMLParser.parse_endtag(self, i))
-
 def get_element_by_id(id, html):
     """Return the content of the tag with the specified ID in the passed HTML document"""
     return get_element_by_attribute("id", id, html)
 
+
 def get_element_by_attribute(attribute, value, html):
     """Return the content of the tag with the specified attribute in the passed HTML document"""
-    parser = AttrParser(attribute, value)
-    try:
-        parser.loads(html)
-    except compat_html_parser.HTMLParseError:
-        pass
-    return parser.get_result()
 
-class MetaParser(BaseHTMLParser):
-    """
-    Modified HTMLParser that isolates a meta tag with the specified name 
-    attribute.
-    """
-    def __init__(self, name):
-        BaseHTMLParser.__init__(self)
-        self.name = name
-        self.content = None
-        self.result = None
-
-    def handle_starttag(self, tag, attrs):
-        if tag != 'meta':
-            return
-        attrs = dict(attrs)
-        if attrs.get('name') == self.name:
-            self.result = attrs.get('content')
+    m = re.search(r'''(?xs)
+        <([a-zA-Z0-9:._-]+)
+         (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*?
+         \s+%s=['"]?%s['"]?
+         (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*?
+        \s*>
+        (?P<content>.*?)
+        </\1>
+    ''' % (re.escape(attribute), re.escape(value)), html)
 
-    def get_result(self):
-        return self.result
+    if not m:
+        return None
+    res = m.group('content')
 
-def get_meta_content(name, html):
-    """
-    Return the content attribute from the meta tag with the given name attribute.
-    """
-    parser = MetaParser(name)
-    try:
-        parser.loads(html)
-    except compat_html_parser.HTMLParseError:
-        pass
-    return parser.get_result()
+    if res.startswith('"') or res.startswith("'"):
+        res = res[1:-1]
+
+    return unescapeHTML(res)
 
 
 def clean_html(html):
@@ -465,7 +227,7 @@ def sanitize_open(filename, open_mode):
     It returns the tuple (stream, definitive_file_name).
     """
     try:
-        if filename == u'-':
+        if filename == '-':
             if sys.platform == 'win32':
                 import msvcrt
                 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
@@ -478,9 +240,9 @@ def sanitize_open(filename, open_mode):
 
         # In case of error, try to remove win32 forbidden chars
         alt_filename = os.path.join(
-                        re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', path_part)
-                        for path_part in os.path.split(filename)
-                       )
+            re.sub('[/<>:"\\|\\\\?\\*]', '#', path_part)
+            for path_part in os.path.split(filename)
+        )
         if alt_filename == filename:
             raise
         else:
@@ -497,6 +259,7 @@ def timeconvert(timestr):
         timestamp = email.utils.mktime_tz(timetuple)
     return timestamp
 
+
 def sanitize_filename(s, restricted=False, is_id=False):
     """Sanitizes a string so it could be used as part of a filename.
     If restricted is set, use a stricter subset of allowed characters.
@@ -517,7 +280,7 @@ def sanitize_filename(s, restricted=False, is_id=False):
             return '_'
         return char
 
-    result = u''.join(map(replace_insane, s))
+    result = ''.join(map(replace_insane, s))
     if not is_id:
         while '__' in result:
             result = result.replace('__', '_')
@@ -529,6 +292,7 @@ def sanitize_filename(s, restricted=False, is_id=False):
             result = '_'
     return result
 
+
 def orderedSet(iterable):
     """ Remove all duplicates from the input iterable """
     res = []
@@ -547,15 +311,15 @@ def _htmlentity_transform(entity):
     mobj = re.match(r'#(x?[0-9]+)', entity)
     if mobj is not None:
         numstr = mobj.group(1)
-        if numstr.startswith(u'x'):
+        if numstr.startswith('x'):
             base = 16
-            numstr = u'0%s' % numstr
+            numstr = '0%s' % numstr
         else:
             base = 10
         return compat_chr(int(numstr, base))
 
     # Unknown entity in name, return its literal representation
-    return (u'&%s;' % entity)
+    return ('&%s;' % entity)
 
 
 def unescapeHTML(s):
@@ -579,7 +343,7 @@ def encodeFilename(s, for_subprocess=False):
         return s
 
     if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
-        # Pass u'' directly to use Unicode APIs on Windows 2000 and up
+        # Pass '' directly to use Unicode APIs on Windows 2000 and up
         # (Detecting Windows NT 4 is tricky because 'major >= 4' would
         # match Windows 9x series as well. Besides, NT 4 is obsolete.)
         if not for_subprocess:
@@ -613,6 +377,7 @@ def decodeOption(optval):
     assert isinstance(optval, compat_str)
     return optval
 
+
 def formatSeconds(secs):
     if secs > 3600:
         return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
@@ -623,7 +388,13 @@ def formatSeconds(secs):
 
 
 def make_HTTPS_handler(opts_no_check_certificate, **kwargs):
-    if sys.version_info < (3, 2):
+    if hasattr(ssl, 'create_default_context'):  # Python >= 3.4 or 2.7.9
+        context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+        context.options &= ~ssl.OP_NO_SSLv3  # Allow older, not-as-secure SSLv3
+        if opts_no_check_certificate:
+            context.verify_mode = ssl.CERT_NONE
+        return compat_urllib_request.HTTPSHandler(context=context, **kwargs)
+    elif sys.version_info < (3, 2):
         import httplib
 
         class HTTPSConnectionV3(httplib.HTTPSConnection):
@@ -644,26 +415,18 @@ def make_HTTPS_handler(opts_no_check_certificate, **kwargs):
             def https_open(self, req):
                 return self.do_open(HTTPSConnectionV3, req)
         return HTTPSHandlerV3(**kwargs)
-    elif hasattr(ssl, 'create_default_context'):  # Python >= 3.4
-        context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
-        context.options &= ~ssl.OP_NO_SSLv3  # Allow older, not-as-secure SSLv3
-        if opts_no_check_certificate:
-            context.verify_mode = ssl.CERT_NONE
-        return compat_urllib_request.HTTPSHandler(context=context, **kwargs)
     else:  # Python < 3.4
         context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
         context.verify_mode = (ssl.CERT_NONE
                                if opts_no_check_certificate
                                else ssl.CERT_REQUIRED)
         context.set_default_verify_paths()
-        try:
-            context.load_default_certs()
-        except AttributeError:
-            pass  # Python < 3.4
         return compat_urllib_request.HTTPSHandler(context=context, **kwargs)
 
+
 class ExtractorError(Exception):
     """Error during info extraction."""
+
     def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
         """ tb, if given, is the original traceback (so that it can be printed out).
         If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
@@ -674,9 +437,15 @@ class ExtractorError(Exception):
         if video_id is not None:
             msg = video_id + ': ' + msg
         if cause:
-            msg += u' (caused by %r)' % cause
+            msg += ' (caused by %r)' % cause
         if not expected:
-            msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type  youtube-dl -U  to update.'
+            if ytdl_is_updateable():
+                update_cmd = 'type  youtube-dl -U  to update'
+            else:
+                update_cmd = 'see  https://yt-dl.org/update  on how to update'
+            msg += '; please report this issue on https://yt-dl.org/bug .'
+            msg += ' Make sure you are using the latest version; %s.' % update_cmd
+            msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
         super(ExtractorError, self).__init__(msg)
 
         self.traceback = tb
@@ -687,7 +456,7 @@ class ExtractorError(Exception):
     def format_traceback(self):
         if self.traceback is None:
             return None
-        return u''.join(traceback.format_tb(self.traceback))
+        return ''.join(traceback.format_tb(self.traceback))
 
 
 class RegexNotFoundError(ExtractorError):
@@ -702,6 +471,7 @@ class DownloadError(Exception):
     configured to continue on errors. They will contain the appropriate
     error message.
     """
+
     def __init__(self, msg, exc_info=None):
         """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
         super(DownloadError, self).__init__(msg)
@@ -723,9 +493,11 @@ class PostProcessingError(Exception):
     This exception may be raised by PostProcessor's .run() method to
     indicate an error in the postprocessing task.
     """
+
     def __init__(self, msg):
         self.msg = msg
 
+
 class MaxDownloadsReached(Exception):
     """ --max-downloads limit has been reached. """
     pass
@@ -755,6 +527,7 @@ class ContentTooShortError(Exception):
         self.downloaded = downloaded
         self.expected = expected
 
+
 class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
     """Handler for HTTP requests and responses.
 
@@ -849,7 +622,7 @@ def parse_iso8601(date_str, delimiter='T'):
         return None
 
     m = re.search(
-        r'Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$',
+        r'(\.[0-9]+)?(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
         date_str)
     if not m:
         timezone = datetime.timedelta()
@@ -862,22 +635,24 @@ def parse_iso8601(date_str, delimiter='T'):
             timezone = datetime.timedelta(
                 hours=sign * int(m.group('hours')),
                 minutes=sign * int(m.group('minutes')))
-    date_format =  '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
+    date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
     dt = datetime.datetime.strptime(date_str, date_format) - timezone
     return calendar.timegm(dt.timetuple())
 
 
-def unified_strdate(date_str):
+def unified_strdate(date_str, day_first=True):
     """Return a string with the date in the format YYYYMMDD"""
 
     if date_str is None:
         return None
-
     upload_date = None
-    #Replace commas
+    # Replace commas
     date_str = date_str.replace(',', ' ')
     # %z (UTC offset) is only supported in python>=3.2
     date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str)
+    # Remove AM/PM + timezone
+    date_str = re.sub(r'(?i)\s*(?:AM|PM)\s+[A-Z]+', '', date_str)
+
     format_expressions = [
         '%d %B %Y',
         '%d %b %Y',
@@ -892,7 +667,6 @@ def unified_strdate(date_str):
         '%d/%m/%Y',
         '%d/%m/%y',
         '%Y/%m/%d %H:%M:%S',
-        '%d/%m/%Y %H:%M:%S',
         '%Y-%m-%d %H:%M:%S',
         '%Y-%m-%d %H:%M:%S.%f',
         '%d.%m.%Y %H:%M',
@@ -904,6 +678,14 @@ def unified_strdate(date_str):
         '%Y-%m-%dT%H:%M:%S.%f',
         '%Y-%m-%dT%H:%M',
     ]
+    if day_first:
+        format_expressions.extend([
+            '%d/%m/%Y %H:%M:%S',
+        ])
+    else:
+        format_expressions.extend([
+            '%m/%d/%Y %H:%M:%S',
+        ])
     for expression in format_expressions:
         try:
             upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
@@ -915,25 +697,30 @@ def unified_strdate(date_str):
             upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
     return upload_date
 
-def determine_ext(url, default_ext=u'unknown_video'):
+
+def determine_ext(url, default_ext='unknown_video'):
     if url is None:
         return default_ext
-    guess = url.partition(u'?')[0].rpartition(u'.')[2]
+    guess = url.partition('?')[0].rpartition('.')[2]
     if re.match(r'^[A-Za-z0-9]+$', guess):
         return guess
     else:
         return default_ext
 
+
 def subtitles_filename(filename, sub_lang, sub_format):
-    return filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
+    return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
+
 
 def date_from_str(date_str):
     """
     Return a datetime object from a string in the format YYYYMMDD or
     (now|today)[+-][0-9](day|week|month|year)(s)?"""
     today = datetime.date.today()
-    if date_str == 'now'or date_str == 'today':
+    if date_str in ('now', 'today'):
         return today
+    if date_str == 'yesterday':
+        return today - datetime.timedelta(days=1)
     match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
     if match is not None:
         sign = match.group('sign')
@@ -941,7 +728,7 @@ def date_from_str(date_str):
         if sign == '-':
             time = -time
         unit = match.group('unit')
-        #A bad aproximation?
+        # A bad aproximation?
         if unit == 'month':
             unit = 'day'
             time *= 30
@@ -952,7 +739,8 @@ def date_from_str(date_str):
         delta = datetime.timedelta(**{unit: time})
         return today + delta
     return datetime.datetime.strptime(date_str, "%Y%m%d").date()
-    
+
+
 def hyphenate_date(date_str):
     """
     Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
@@ -962,8 +750,10 @@ def hyphenate_date(date_str):
     else:
         return date_str
 
+
 class DateRange(object):
     """Represents a time interval between two dates"""
+
     def __init__(self, start=None, end=None):
         """start and end must be strings in the format accepted by date"""
         if start is not None:
@@ -976,17 +766,20 @@ class DateRange(object):
             self.end = datetime.datetime.max.date()
         if self.start > self.end:
             raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
+
     @classmethod
     def day(cls, day):
         """Returns a range that only contains the given day"""
-        return cls(day,day)
+        return cls(day, day)
+
     def __contains__(self, date):
         """Check if the date is in the range"""
         if not isinstance(date, datetime.date):
             date = date_from_str(date)
         return self.start <= date <= self.end
+
     def __str__(self):
-        return '%s - %s' % ( self.start.isoformat(), self.end.isoformat())
+        return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
 
 
 def platform_name():
@@ -1022,22 +815,22 @@ def _windows_write_string(s, out):
 
     GetStdHandle = ctypes.WINFUNCTYPE(
         ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
-        ("GetStdHandle", ctypes.windll.kernel32))
+        (b"GetStdHandle", ctypes.windll.kernel32))
     h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
 
     WriteConsoleW = ctypes.WINFUNCTYPE(
         ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
         ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
-        ctypes.wintypes.LPVOID)(("WriteConsoleW", ctypes.windll.kernel32))
+        ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32))
     written = ctypes.wintypes.DWORD(0)
 
-    GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(("GetFileType", ctypes.windll.kernel32))
+    GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32))
     FILE_TYPE_CHAR = 0x0002
     FILE_TYPE_REMOTE = 0x8000
     GetConsoleMode = ctypes.WINFUNCTYPE(
         ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
         ctypes.POINTER(ctypes.wintypes.DWORD))(
-        ("GetConsoleMode", ctypes.windll.kernel32))
+        (b"GetConsoleMode", ctypes.windll.kernel32))
     INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
 
     def not_a_console(handle):
@@ -1105,10 +898,7 @@ def bytes_to_intlist(bs):
 def intlist_to_bytes(xs):
     if not xs:
         return b''
-    if isinstance(chr(0), bytes):  # Python 2
-        return ''.join([chr(x) for x in xs])
-    else:
-        return bytes(xs)
+    return struct_pack('%dB' % len(xs), *xs)
 
 
 # Cross-platform file locking
@@ -1207,17 +997,20 @@ class locked_file(object):
         return self.f.read(*args)
 
 
+def get_filesystem_encoding():
+    encoding = sys.getfilesystemencoding()
+    return encoding if encoding is not None else 'utf-8'
+
+
 def shell_quote(args):
     quoted_args = []
-    encoding = sys.getfilesystemencoding()
-    if encoding is None:
-        encoding = 'utf-8'
+    encoding = get_filesystem_encoding()
     for a in args:
         if isinstance(a, bytes):
             # We may get a filename encoded with 'encodeFilename'
             a = a.decode(encoding)
         quoted_args.append(pipes.quote(a))
-    return u' '.join(quoted_args)
+    return ' '.join(quoted_args)
 
 
 def takewhile_inclusive(pred, seq):
@@ -1233,35 +1026,89 @@ def smuggle_url(url, data):
     """ Pass additional data in a URL for internal use. """
 
     sdata = compat_urllib_parse.urlencode(
-        {u'__youtubedl_smuggle': json.dumps(data)})
-    return url + u'#' + sdata
+        {'__youtubedl_smuggle': json.dumps(data)})
+    return url + '#' + sdata
 
 
 def unsmuggle_url(smug_url, default=None):
-    if not '#__youtubedl_smuggle' in smug_url:
+    if '#__youtubedl_smuggle' not in smug_url:
         return smug_url, default
-    url, _, sdata = smug_url.rpartition(u'#')
-    jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0]
+    url, _, sdata = smug_url.rpartition('#')
+    jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
     data = json.loads(jsond)
     return url, data
 
 
 def format_bytes(bytes):
     if bytes is None:
-        return u'N/A'
+        return 'N/A'
     if type(bytes) is str:
         bytes = float(bytes)
     if bytes == 0.0:
         exponent = 0
     else:
         exponent = int(math.log(bytes, 1024.0))
-    suffix = [u'B', u'KiB', u'MiB', u'GiB', u'TiB', u'PiB', u'EiB', u'ZiB', u'YiB'][exponent]
+    suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
     converted = float(bytes) / float(1024 ** exponent)
-    return u'%.2f%s' % (converted, suffix)
+    return '%.2f%s' % (converted, suffix)
+
+
+def parse_filesize(s):
+    if s is None:
+        return None
+
+    # The lower-case forms are of course incorrect and inofficial,
+    # but we support those too
+    _UNIT_TABLE = {
+        'B': 1,
+        'b': 1,
+        'KiB': 1024,
+        'KB': 1000,
+        'kB': 1024,
+        'Kb': 1000,
+        'MiB': 1024 ** 2,
+        'MB': 1000 ** 2,
+        'mB': 1024 ** 2,
+        'Mb': 1000 ** 2,
+        'GiB': 1024 ** 3,
+        'GB': 1000 ** 3,
+        'gB': 1024 ** 3,
+        'Gb': 1000 ** 3,
+        'TiB': 1024 ** 4,
+        'TB': 1000 ** 4,
+        'tB': 1024 ** 4,
+        'Tb': 1000 ** 4,
+        'PiB': 1024 ** 5,
+        'PB': 1000 ** 5,
+        'pB': 1024 ** 5,
+        'Pb': 1000 ** 5,
+        'EiB': 1024 ** 6,
+        'EB': 1000 ** 6,
+        'eB': 1024 ** 6,
+        'Eb': 1000 ** 6,
+        'ZiB': 1024 ** 7,
+        'ZB': 1000 ** 7,
+        'zB': 1024 ** 7,
+        'Zb': 1000 ** 7,
+        'YiB': 1024 ** 8,
+        'YB': 1000 ** 8,
+        'yB': 1024 ** 8,
+        'Yb': 1000 ** 8,
+    }
+
+    units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE)
+    m = re.match(
+        r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re, s)
+    if not m:
+        return None
+
+    num_str = m.group('num').replace(',', '.')
+    mult = _UNIT_TABLE[m.group('unit')]
+    return int(float(num_str) * mult)
 
 
 def get_term_width():
-    columns = os.environ.get('COLUMNS', None)
+    columns = compat_getenv('COLUMNS', None)
     if columns:
         return int(columns)
 
@@ -1280,8 +1127,8 @@ def month_by_name(name):
     """ Return the number of a month by (locale-independently) English name """
 
     ENGLISH_NAMES = [
-        u'January', u'February', u'March', u'April', u'May', u'June',
-        u'July', u'August', u'September', u'October', u'November', u'December']
+        'January', 'February', 'March', 'April', 'May', 'June',
+        'July', 'August', 'September', 'October', 'November', 'December']
     try:
         return ENGLISH_NAMES.index(name) + 1
     except ValueError:
@@ -1292,7 +1139,7 @@ def fix_xml_ampersands(xml_str):
     """Replace all the '&' by '&amp;' in XML"""
     return re.sub(
         r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
-        u'&amp;',
+        '&amp;',
         xml_str)
 
 
@@ -1325,7 +1172,7 @@ def remove_end(s, end):
 
 def url_basename(url):
     path = compat_urlparse.urlparse(url).path
-    return path.strip(u'/').split(u'/')[-1]
+    return path.strip('/').split('/')[-1]
 
 
 class HEADRequest(compat_urllib_request.Request):
@@ -1350,7 +1197,7 @@ def str_to_int(int_str):
     """ A more relaxed version of int_or_none """
     if int_str is None:
         return None
-    int_str = re.sub(r'[,\.\+]', u'', int_str)
+    int_str = re.sub(r'[,\.\+]', '', int_str)
     return int(int_str)
 
 
@@ -1365,22 +1212,38 @@ def parse_duration(s):
     s = s.strip()
 
     m = re.match(
-        r'(?i)(?:(?:(?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s*)?(?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s*)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)?$', s)
+        r'''(?ix)T?
+        (?:
+            (?P<only_mins>[0-9.]+)\s*(?:mins?|minutes?)\s*|
+            (?P<only_hours>[0-9.]+)\s*(?:hours?)|
+
+            (?:
+                (?:(?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s*)?
+                (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s*
+            )?
+            (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)?
+        )$''', s)
     if not m:
         return None
-    res = int(m.group('secs'))
+    res = 0
+    if m.group('only_mins'):
+        return float_or_none(m.group('only_mins'), invscale=60)
+    if m.group('only_hours'):
+        return float_or_none(m.group('only_hours'), invscale=60 * 60)
+    if m.group('secs'):
+        res += int(m.group('secs'))
     if m.group('mins'):
         res += int(m.group('mins')) * 60
-        if m.group('hours'):
-            res += int(m.group('hours')) * 60 * 60
+    if m.group('hours'):
+        res += int(m.group('hours')) * 60 * 60
     if m.group('ms'):
         res += float(m.group('ms'))
     return res
 
 
 def prepend_extension(filename, ext):
-    name, real_ext = os.path.splitext(filename) 
-    return u'{0}.{1}{2}'.format(name, ext, real_ext)
+    name, real_ext = os.path.splitext(filename)
+    return '{0}.{1}{2}'.format(name, ext, real_ext)
 
 
 def check_executable(exe, args=[]):
@@ -1393,6 +1256,25 @@ def check_executable(exe, args=[]):
     return exe
 
 
+def get_exe_version(exe, args=['--version'],
+                    version_re=r'version\s+([0-9._-a-zA-Z]+)',
+                    unrecognized='present'):
+    """ Returns the version of the specified executable,
+    or False if the executable is not present """
+    try:
+        out, err = subprocess.Popen(
+            [exe] + args,
+            stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
+    except OSError:
+        return False
+    firstline = out.partition(b'\n')[0].decode('ascii', 'ignore')
+    m = re.search(version_re, firstline)
+    if m:
+        return m.group(1)
+    else:
+        return unrecognized
+
+
 class PagedList(object):
     def __len__(self):
         # This is only useful for tests
@@ -1483,7 +1365,7 @@ def escape_rfc3986(s):
     """Escape non-ASCII characters as suggested by RFC 3986"""
     if sys.version_info < (3, 0) and isinstance(s, unicode):
         s = s.encode('utf-8')
-    return compat_urllib_parse.quote(s, "%/;:@&=+$,!~*'()?#[]")
+    return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
 
 
 def escape_url(url):
@@ -1497,7 +1379,7 @@ def escape_url(url):
     ).geturl()
 
 try:
-    struct.pack(u'!I', 0)
+    struct.pack('!I', 0)
 except TypeError:
     # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument
     def struct_pack(spec, *args):
@@ -1518,7 +1400,7 @@ def read_batch_urls(batch_fd):
     def fixup(url):
         if not isinstance(url, compat_str):
             url = url.decode('utf-8', 'replace')
-        BOM_UTF8 = u'\xef\xbb\xbf'
+        BOM_UTF8 = '\xef\xbb\xbf'
         if url.startswith(BOM_UTF8):
             url = url[len(BOM_UTF8):]
         url = url.strip()
@@ -1557,15 +1439,6 @@ def parse_xml(s):
     return tree
 
 
-if sys.version_info < (3, 0) and sys.platform == 'win32':
-    def compat_getpass(prompt, *args, **kwargs):
-        if isinstance(prompt, compat_str):
-            prompt = prompt.encode(preferredencoding())
-        return getpass.getpass(prompt, *args, **kwargs)
-else:
-    compat_getpass = getpass.getpass
-
-
 US_RATINGS = {
     'G': 0,
     'PG': 10,
@@ -1583,7 +1456,8 @@ def parse_age_limit(s):
 
 
 def strip_jsonp(code):
-    return re.sub(r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?\s*$', r'\1', code)
+    return re.sub(
+        r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code)
 
 
 def js_to_json(code):
@@ -1623,18 +1497,6 @@ def qualities(quality_ids):
 
 DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
 
-try:
-    subprocess_check_output = subprocess.check_output
-except AttributeError:
-    def subprocess_check_output(*args, **kwargs):
-        assert 'input' not in kwargs
-        p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
-        output, _ = p.communicate()
-        ret = p.poll()
-        if ret:
-            raise subprocess.CalledProcessError(ret, p.args, output=output)
-        return output
-
 
 def limit_length(s, length):
     """ Add ellipses to overly long strings """
@@ -1644,3 +1506,28 @@ def limit_length(s, length):
     if len(s) > length:
         return s[:length - len(ELLIPSES)] + ELLIPSES
     return s
+
+
+def version_tuple(v):
+    return tuple(int(e) for e in re.split(r'[-.]', v))
+
+
+def is_outdated_version(version, limit, assume_new=True):
+    if not version:
+        return not assume_new
+    try:
+        return version_tuple(version) < version_tuple(limit)
+    except ValueError:
+        return not assume_new
+
+
+def ytdl_is_updateable():
+    """ Returns if youtube-dl can be updated with -U """
+    from zipimport import zipimporter
+
+    return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
+
+
+def args_to_str(args):
+    # Get a short string representation for a subprocess command
+    return ' '.join(shlex_quote(a) for a in args)
index 4f0d486b90eea895adb8a2677e8ab213f846b0ca..da959ea85880292e402cf194ec9c36a8da271944 100644 (file)
@@ -1,2 +1,3 @@
+from __future__ import unicode_literals
 
-__version__ = '2014.10.05.2'
+__version__ = '2014.12.12.7'