Merge branch '5tv' of https://github.com/hlintala/youtube-dl into hlintala-5tv
authorSergey M․ <dstftw@gmail.com>
Fri, 12 Jun 2015 10:34:28 +0000 (16:34 +0600)
committerSergey M․ <dstftw@gmail.com>
Fri, 12 Jun 2015 10:34:28 +0000 (16:34 +0600)
31 files changed:
youtube_dl/YoutubeDL.py
youtube_dl/extractor/__init__.py
youtube_dl/extractor/bilibili.py
youtube_dl/extractor/brightcove.py
youtube_dl/extractor/cbs.py
youtube_dl/extractor/cnet.py
youtube_dl/extractor/common.py
youtube_dl/extractor/discovery.py
youtube_dl/extractor/generic.py
youtube_dl/extractor/instagram.py
youtube_dl/extractor/iqiyi.py [new file with mode: 0644]
youtube_dl/extractor/izlesene.py
youtube_dl/extractor/kickstarter.py
youtube_dl/extractor/nfl.py
youtube_dl/extractor/noco.py
youtube_dl/extractor/pornovoisines.py
youtube_dl/extractor/qqmusic.py
youtube_dl/extractor/rtbf.py
youtube_dl/extractor/rtlnl.py
youtube_dl/extractor/ruutu.py [new file with mode: 0644]
youtube_dl/extractor/spiegeltv.py
youtube_dl/extractor/sunporno.py
youtube_dl/extractor/teamcoco.py
youtube_dl/extractor/theplatform.py
youtube_dl/extractor/tlc.py
youtube_dl/extractor/tube8.py
youtube_dl/extractor/turbo.py
youtube_dl/extractor/tvc.py [new file with mode: 0644]
youtube_dl/extractor/youtube.py
youtube_dl/options.py
youtube_dl/update.py

index aa6ec9d9a0721efbd5257750fda37b86fe024d8b..b1f792d4ef8815dc8cfb4471cdd6f0e47a93e8f7 100755 (executable)
@@ -1016,13 +1016,13 @@ class YoutubeDL(object):
             info_dict['display_id'] = info_dict['id']
 
         if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
-            # Working around negative timestamps in Windows
-            # (see http://bugs.python.org/issue1646728)
-            if info_dict['timestamp'] < 0 and os.name == 'nt':
-                info_dict['timestamp'] = 0
-            upload_date = datetime.datetime.utcfromtimestamp(
-                info_dict['timestamp'])
-            info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
+            # Working around out-of-range timestamp values (e.g. negative ones on Windows,
+            # see http://bugs.python.org/issue1646728)
+            try:
+                upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp'])
+                info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
+            except (ValueError, OverflowError, OSError):
+                pass
 
         if self.params.get('listsubtitles', False):
             if 'automatic_captions' in info_dict:
index d10275d03d0373178c1b2ffbf5fc5540d3567bd1..3bc62e9d5b222c20ecbc6239f6f3f05199bc4eba 100644 (file)
@@ -230,6 +230,7 @@ from .infoq import InfoQIE
 from .instagram import InstagramIE, InstagramUserIE
 from .internetvideoarchive import InternetVideoArchiveIE
 from .iprima import IPrimaIE
+from .iqiyi import IqiyiIE
 from .ivi import (
     IviIE,
     IviCompilationIE
@@ -454,6 +455,7 @@ from .rutube import (
     RutubePersonIE,
 )
 from .rutv import RUTVIE
+from .ruutu import RuutuIE
 from .sandia import SandiaIE
 from .safari import (
     SafariIE,
@@ -583,6 +585,10 @@ from .tv2 import (
     TV2ArticleIE,
 )
 from .tv4 import TV4IE
+from .tvc import (
+    TVCIE,
+    TVCArticleIE,
+)
 from .tvigle import TvigleIE
 from .tvp import TvpIE, TvpSeriesIE
 from .tvplay import TVPlayIE
index 2103ed73aad860738bba5108ee27a86fd921d29c..bf60450c2f3753aa64d5ae11069bec98b67f23dc 100644 (file)
@@ -105,7 +105,7 @@ class BiliBiliIE(InfoExtractor):
                 'filesize': int_or_none(
                     lq_durl.find('./size'), get_attr='text'),
             }]
-            if hq_durl:
+            if hq_durl is not None:
                 formats.append({
                     'format_id': 'hq',
                     'quality': 2,
index 4f60d53660fa7777b9e1b6152967ce2e7e567ec9..c1d4320e1ce73b6d9c5a9313eee61a7e05f9daf9 100644 (file)
@@ -172,7 +172,7 @@ class BrightcoveIE(InfoExtractor):
         """Return a list of all Brightcove URLs from the webpage """
 
         url_m = re.search(
-            r'<meta\s+property="og:video"\s+content="(https?://(?:secure|c)\.brightcove.com/[^"]+)"',
+            r'<meta\s+property=[\'"]og:video[\'"]\s+content=[\'"](https?://(?:secure|c)\.brightcove.com/[^\'"]+)[\'"]',
             webpage)
         if url_m:
             url = unescapeHTML(url_m.group(1))
index 1ceb9d8d9df6c0268e33de5e34c01a245e134e05..75fffb1563ae9f95bf862ad156111b6962a8429e 100644 (file)
@@ -4,12 +4,13 @@ from .common import InfoExtractor
 
 
 class CBSIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?cbs\.com/shows/[^/]+/(?:video|artist)/(?P<id>[^/]+)/.*'
+    _VALID_URL = r'https?://(?:www\.)?(?:cbs\.com/shows/[^/]+/(?:video|artist)|colbertlateshow\.com/(?:video|podcasts))/[^/]+/(?P<id>[^/]+)'
 
     _TESTS = [{
         'url': 'http://www.cbs.com/shows/garth-brooks/video/_u7W953k6la293J7EPTd9oHkSPs6Xn6_/connect-chat-feat-garth-brooks/',
         'info_dict': {
             'id': '4JUVEwq3wUT7',
+            'display_id': 'connect-chat-feat-garth-brooks',
             'ext': 'flv',
             'title': 'Connect Chat feat. Garth Brooks',
             'description': 'Connect with country music singer Garth Brooks, as he chats with fans on Wednesday November 27, 2013. Be sure to tune in to Garth Brooks: Live from Las Vegas, Friday November 29, at 9/8c on CBS!',
@@ -24,6 +25,7 @@ class CBSIE(InfoExtractor):
         'url': 'http://www.cbs.com/shows/liveonletterman/artist/221752/st-vincent/',
         'info_dict': {
             'id': 'WWF_5KqY3PK1',
+            'display_id': 'st-vincent',
             'ext': 'flv',
             'title': 'Live on Letterman - St. Vincent',
             'description': 'Live On Letterman: St. Vincent in concert from New York\'s Ed Sullivan Theater on Tuesday, July 16, 2014.',
@@ -34,12 +36,23 @@ class CBSIE(InfoExtractor):
             'skip_download': True,
         },
         '_skip': 'Blocked outside the US',
+    }, {
+        'url': 'http://colbertlateshow.com/video/8GmB0oY0McANFvp2aEffk9jZZZ2YyXxy/the-colbeard/',
+        'only_matching': True,
+    }, {
+        'url': 'http://www.colbertlateshow.com/podcasts/dYSwjqPs_X1tvbV_P2FcPWRa_qT6akTC/in-the-bad-room-with-stephen/',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
-        video_id = self._match_id(url)
-        webpage = self._download_webpage(url, video_id)
+        display_id = self._match_id(url)
+        webpage = self._download_webpage(url, display_id)
         real_id = self._search_regex(
-            r"video\.settings\.pid\s*=\s*'([^']+)';",
+            [r"video\.settings\.pid\s*=\s*'([^']+)';", r"cbsplayer\.pid\s*=\s*'([^']+)';"],
             webpage, 'real video ID')
-        return self.url_result('theplatform:%s' % real_id)
+        return {
+            '_type': 'url_transparent',
+            'ie_key': 'ThePlatform',
+            'url': 'theplatform:%s' % real_id,
+            'display_id': display_id,
+        }
index 3145b30514ea2a075f92077b9f87b64c9e8820a7..5dd69bff7ac73bcc0adc4d91c614045ddf116a9c 100644 (file)
@@ -11,7 +11,7 @@ from ..utils import (
 
 class CNETIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?cnet\.com/videos/(?P<id>[^/]+)/'
-    _TEST = {
+    _TESTS = [{
         'url': 'http://www.cnet.com/videos/hands-on-with-microsofts-windows-8-1-update/',
         'info_dict': {
             'id': '56f4ea68-bd21-4852-b08c-4de5b8354c60',
@@ -25,7 +25,20 @@ class CNETIE(InfoExtractor):
         'params': {
             'skip_download': 'requires rtmpdump',
         }
-    }
+    }, {
+        'url': 'http://www.cnet.com/videos/whiny-pothole-tweets-at-local-government-when-hit-by-cars-tomorrow-daily-187/',
+        'info_dict': {
+            'id': '56527b93-d25d-44e3-b738-f989ce2e49ba',
+            'ext': 'flv',
+            'description': 'Khail and Ashley wonder what other civic woes can be solved by self-tweeting objects, investigate a new kind of VR camera and watch an origami robot self-assemble, walk, climb, dig and dissolve. #TDPothole',
+            'uploader_id': 'b163284d-6b73-44fc-b3e6-3da66c392d40',
+            'uploader': 'Ashley Esqueda',
+            'title': 'Whiny potholes tweet at local government when hit by cars (Tomorrow Daily 187)',
+        },
+        'params': {
+            'skip_download': True,  # requires rtmpdump
+        },
+    }]
 
     def _real_extract(self, url):
         display_id = self._match_id(url)
@@ -42,7 +55,7 @@ class CNETIE(InfoExtractor):
             raise ExtractorError('Cannot find video data')
 
         mpx_account = data['config']['players']['default']['mpx_account']
-        vid = vdata['files']['rtmp']
+        vid = vdata['files'].get('rtmp', vdata['files']['hds'])
         tp_link = 'http://link.theplatform.com/s/%s/%s' % (mpx_account, vid)
 
         video_id = vdata['id']
index cecf917ffb67040739bbacf573297ba891ec9ea1..49e4dc7109e151ae124ed1aac15a9762d00eac21 100644 (file)
@@ -846,7 +846,7 @@ class InfoExtractor(object):
 
     def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
                               entry_protocol='m3u8', preference=None,
-                              m3u8_id=None):
+                              m3u8_id=None, note=None, errnote=None):
 
         formats = [{
             'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
@@ -865,8 +865,8 @@ class InfoExtractor(object):
 
         m3u8_doc = self._download_webpage(
             m3u8_url, video_id,
-            note='Downloading m3u8 information',
-            errnote='Failed to download m3u8 information')
+            note=note or 'Downloading m3u8 information',
+            errnote=errnote or 'Failed to download m3u8 information')
         last_info = None
         last_media = None
         kv_rex = re.compile(
index d3e6675283cddcb8f6a6dfffbfbd1e1ea3da11bc..d6723ecf26ea67356b288df6e5f3bf612141b91a 100644 (file)
@@ -2,19 +2,19 @@ from __future__ import unicode_literals
 
 from .common import InfoExtractor
 from ..utils import (
+    parse_duration,
     parse_iso8601,
-    int_or_none,
 )
+from ..compat import compat_str
 
 
 class DiscoveryIE(InfoExtractor):
     _VALID_URL = r'http://www\.discovery\.com\/[a-zA-Z0-9\-]*/[a-zA-Z0-9\-]*/videos/(?P<id>[a-zA-Z0-9_\-]*)(?:\.htm)?'
-    _TEST = {
+    _TESTS = [{
         'url': 'http://www.discovery.com/tv-shows/mythbusters/videos/mission-impossible-outtakes.htm',
-        'md5': '3c69d77d9b0d82bfd5e5932a60f26504',
         'info_dict': {
-            'id': 'mission-impossible-outtakes',
-            'ext': 'flv',
+            'id': '20769',
+            'ext': 'mp4',
             'title': 'Mission Impossible Outtakes',
             'description': ('Watch Jamie Hyneman and Adam Savage practice being'
                             ' each other -- to the point of confusing Jamie\'s dog -- and '
@@ -24,22 +24,36 @@ class DiscoveryIE(InfoExtractor):
             'timestamp': 1303099200,
             'upload_date': '20110418',
         },
-    }
+        'params': {
+            'skip_download': True,  # requires ffmpeg
+        }
+    }, {
+        'url': 'http://www.discovery.com/tv-shows/mythbusters/videos/mythbusters-the-simpsons',
+        'info_dict': {
+            'id': 'mythbusters-the-simpsons',
+            'title': 'MythBusters: The Simpsons',
+        },
+        'playlist_count': 9,
+    }]
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-        webpage = self._download_webpage(url, video_id)
+        info = self._download_json(url + '?flat=1', video_id)
 
-        info = self._parse_json(self._search_regex(
-            r'(?s)<script type="application/ld\+json">(.*?)</script>',
-            webpage, 'video info'), video_id)
+        video_title = info.get('playlist_title') or info.get('video_title')
 
-        return {
-            'id': video_id,
-            'title': info['name'],
-            'url': info['contentURL'],
-            'description': info.get('description'),
-            'thumbnail': info.get('thumbnailUrl'),
-            'timestamp': parse_iso8601(info.get('uploadDate')),
-            'duration': int_or_none(info.get('duration')),
-        }
+        entries = [{
+            'id': compat_str(video_info['id']),
+            'formats': self._extract_m3u8_formats(
+                video_info['src'], video_id, ext='mp4',
+                note='Download m3u8 information for video %d' % (idx + 1)),
+            'title': video_info['title'],
+            'description': video_info.get('description'),
+            'duration': parse_duration(video_info.get('video_length')),
+            'webpage_url': video_info.get('href'),
+            'thumbnail': video_info.get('thumbnailURL'),
+            'alt_title': video_info.get('secondary_title'),
+            'timestamp': parse_iso8601(video_info.get('publishedDate')),
+        } for idx, video_info in enumerate(info['playlist'])]
+
+        return self.playlist_result(entries, video_id, video_title)
index 96ca398de7ce46599e532a5d1adb4a05ac2fb939..6be9e632923b157667b18db7a607561cc24bd59b 100644 (file)
@@ -34,6 +34,7 @@ from .brightcove import BrightcoveIE
 from .nbc import NBCSportsVPlayerIE
 from .ooyala import OoyalaIE
 from .rutv import RUTVIE
+from .tvc import TVCIE
 from .sportbox import SportBoxEmbedIE
 from .smotri import SmotriIE
 from .condenast import CondeNastIE
@@ -291,6 +292,15 @@ class GenericIE(InfoExtractor):
                 'skip_download': True,
             },
         },
+        # TVC embed
+        {
+            'url': 'http://sch1298sz.mskobr.ru/dou_edu/karamel_ki/filial_galleries/video/iframe_src_http_tvc_ru_video_iframe_id_55304_isplay_false_acc_video_id_channel_brand_id_11_show_episodes_episode_id_32307_frameb/',
+            'info_dict': {
+                'id': '55304',
+                'ext': 'mp4',
+                'title': 'Дошкольное воспитание',
+            },
+        },
         # SportBox embed
         {
             'url': 'http://www.vestifinance.ru/articles/25753',
@@ -789,6 +799,18 @@ class GenericIE(InfoExtractor):
                 # rtmpe downloads
                 'skip_download': True,
             }
+        },
+        # Brightcove URL in single quotes
+        {
+            'url': 'http://www.sportsnet.ca/baseball/mlb/sn-presents-russell-martin-world-citizen/',
+            'md5': '4ae374f1f8b91c889c4b9203c8c752af',
+            'info_dict': {
+                'id': '4255764656001',
+                'ext': 'mp4',
+                'title': 'SN Presents: Russell Martin, World Citizen',
+                'description': 'To understand why he was the Toronto Blue Jays’ top off-season priority is to appreciate his background and upbringing in Montreal, where he first developed his baseball skills. Written and narrated by Stephen Brunt.',
+                'uploader': 'Rogers Sportsnet',
+            },
         }
     ]
 
@@ -1061,7 +1083,7 @@ class GenericIE(InfoExtractor):
 
         # Look for embedded rtl.nl player
         matches = re.findall(
-            r'<iframe\s+(?:[a-zA-Z-]+="[^"]+"\s+)*?src="((?:https?:)?//(?:www\.)?rtl\.nl/system/videoplayer/[^"]+video_embed[^"]+)"',
+            r'<iframe[^>]+?src="((?:https?:)?//(?:www\.)?rtl\.nl/system/videoplayer/[^"]+(?:video_)?embed[^"]+)"',
             webpage)
         if matches:
             return _playlist_from_matches(matches, ie='RtlNl')
@@ -1289,6 +1311,11 @@ class GenericIE(InfoExtractor):
         if rutv_url:
             return self.url_result(rutv_url, 'RUTV')
 
+        # Look for embedded TVC player
+        rutv_url = TVCIE._extract_url(webpage)
+        if rutv_url:
+            return self.url_result(rutv_url, 'TVC')
+
         # Look for embedded SportBox player
         sportbox_urls = SportBoxEmbedIE._extract_urls(webpage)
         if sportbox_urls:
index b107557880345157fcf66cb7e6e1b4a92b25af2e..3d78f78c46d1ad004339bc33ebcb09d1286e5092 100644 (file)
@@ -3,7 +3,10 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import int_or_none
+from ..utils import (
+    int_or_none,
+    limit_length,
+)
 
 
 class InstagramIE(InfoExtractor):
@@ -100,11 +103,13 @@ class InstagramUserIE(InfoExtractor):
                 thumbnails_el = it.get('images', {})
                 thumbnail = thumbnails_el.get('thumbnail', {}).get('url')
 
-                title = it.get('caption', {}).get('text', it['id'])
+                # In some cases caption is null, which corresponds to None
+                # in python. As a result, it.get('caption', {}) gives None
+                title = (it.get('caption') or {}).get('text', it['id'])
 
                 entries.append({
                     'id': it['id'],
-                    'title': title,
+                    'title': limit_length(title, 80),
                     'formats': formats,
                     'thumbnail': thumbnail,
                     'webpage_url': it.get('link'),
diff --git a/youtube_dl/extractor/iqiyi.py b/youtube_dl/extractor/iqiyi.py
new file mode 100644 (file)
index 0000000..9106dd0
--- /dev/null
@@ -0,0 +1,296 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import hashlib
+import math
+import os.path
+import random
+import re
+import time
+import uuid
+import zlib
+
+from .common import InfoExtractor
+from ..compat import compat_urllib_parse
+from ..utils import (
+    ExtractorError,
+    url_basename,
+)
+
+
+class IqiyiIE(InfoExtractor):
+    IE_NAME = 'iqiyi'
+
+    _VALID_URL = r'http://(?:www\.)iqiyi.com/v_.+?\.html'
+
+    _TESTS = [{
+        'url': 'http://www.iqiyi.com/v_19rrojlavg.html',
+        'md5': '2cb594dc2781e6c941a110d8f358118b',
+        'info_dict': {
+            'id': '9c1fb1b99d192b21c559e5a1a2cb3c73',
+            'title': '美国德州空中惊现奇异云团 酷似UFO',
+            'ext': 'f4v',
+        }
+    }, {
+        'url': 'http://www.iqiyi.com/v_19rrhnnclk.html',
+        'info_dict': {
+            'id': 'e3f585b550a280af23c98b6cb2be19fb',
+            'title': '名侦探柯南第752集',
+        },
+        'playlist': [{
+            'md5': '7e49376fecaffa115d951634917fe105',
+            'info_dict': {
+                'id': 'e3f585b550a280af23c98b6cb2be19fb_part1',
+                'ext': 'f4v',
+                'title': '名侦探柯南第752集',
+            },
+        }, {
+            'md5': '41b75ba13bb7ac0e411131f92bc4f6ca',
+            'info_dict': {
+                'id': 'e3f585b550a280af23c98b6cb2be19fb_part2',
+                'ext': 'f4v',
+                'title': '名侦探柯南第752集',
+            },
+        }, {
+            'md5': '0cee1dd0a3d46a83e71e2badeae2aab0',
+            'info_dict': {
+                'id': 'e3f585b550a280af23c98b6cb2be19fb_part3',
+                'ext': 'f4v',
+                'title': '名侦探柯南第752集',
+            },
+        }, {
+            'md5': '4f8ad72373b0c491b582e7c196b0b1f9',
+            'info_dict': {
+                'id': 'e3f585b550a280af23c98b6cb2be19fb_part4',
+                'ext': 'f4v',
+                'title': '名侦探柯南第752集',
+            },
+        }, {
+            'md5': 'd89ad028bcfad282918e8098e811711d',
+            'info_dict': {
+                'id': 'e3f585b550a280af23c98b6cb2be19fb_part5',
+                'ext': 'f4v',
+                'title': '名侦探柯南第752集',
+            },
+        }, {
+            'md5': '9cb1e5c95da25dff0660c32ae50903b7',
+            'info_dict': {
+                'id': 'e3f585b550a280af23c98b6cb2be19fb_part6',
+                'ext': 'f4v',
+                'title': '名侦探柯南第752集',
+            },
+        }, {
+            'md5': '155116e0ff1867bbc9b98df294faabc9',
+            'info_dict': {
+                'id': 'e3f585b550a280af23c98b6cb2be19fb_part7',
+                'ext': 'f4v',
+                'title': '名侦探柯南第752集',
+            },
+        }, {
+            'md5': '53f5db77622ae14fa493ed2a278a082b',
+            'info_dict': {
+                'id': 'e3f585b550a280af23c98b6cb2be19fb_part8',
+                'ext': 'f4v',
+                'title': '名侦探柯南第752集',
+            },
+        }],
+    }]
+
+    _FORMATS_MAP = [
+        ('1', 'h6'),
+        ('2', 'h5'),
+        ('3', 'h4'),
+        ('4', 'h3'),
+        ('5', 'h2'),
+        ('10', 'h1'),
+    ]
+
+    def construct_video_urls(self, data, video_id, _uuid):
+        def do_xor(x, y):
+            a = y % 3
+            if a == 1:
+                return x ^ 121
+            if a == 2:
+                return x ^ 72
+            return x ^ 103
+
+        def get_encode_code(l):
+            a = 0
+            b = l.split('-')
+            c = len(b)
+            s = ''
+            for i in range(c - 1, -1, -1):
+                a = do_xor(int(b[c - i - 1], 16), i)
+                s += chr(a)
+            return s[::-1]
+
+        def get_path_key(x, format_id, segment_index):
+            mg = ')(*&^flash@#$%a'
+            tm = self._download_json(
+                'http://data.video.qiyi.com/t?tn=' + str(random.random()), video_id,
+                note='Download path key of segment %d for format %s' % (segment_index + 1, format_id)
+            )['t']
+            t = str(int(math.floor(int(tm) / (600.0))))
+            return hashlib.md5((t + mg + x).encode('utf8')).hexdigest()
+
+        video_urls_dict = {}
+        for format_item in data['vp']['tkl'][0]['vs']:
+            if 0 < int(format_item['bid']) <= 10:
+                format_id = self.get_format(format_item['bid'])
+            else:
+                continue
+
+            video_urls = []
+
+            video_urls_info = format_item['fs']
+            if not format_item['fs'][0]['l'].startswith('/'):
+                t = get_encode_code(format_item['fs'][0]['l'])
+                if t.endswith('mp4'):
+                    video_urls_info = format_item['flvs']
+
+            for segment_index, segment in enumerate(video_urls_info):
+                vl = segment['l']
+                if not vl.startswith('/'):
+                    vl = get_encode_code(vl)
+                key = get_path_key(
+                    vl.split('/')[-1].split('.')[0], format_id, segment_index)
+                filesize = segment['b']
+                base_url = data['vp']['du'].split('/')
+                base_url.insert(-1, key)
+                base_url = '/'.join(base_url)
+                param = {
+                    'su': _uuid,
+                    'qyid': uuid.uuid4().hex,
+                    'client': '',
+                    'z': '',
+                    'bt': '',
+                    'ct': '',
+                    'tn': str(int(time.time()))
+                }
+                api_video_url = base_url + vl + '?' + \
+                    compat_urllib_parse.urlencode(param)
+                js = self._download_json(
+                    api_video_url, video_id,
+                    note='Download video info of segment %d for format %s' % (segment_index + 1, format_id))
+                video_url = js['l']
+                video_urls.append(
+                    (video_url, filesize))
+
+            video_urls_dict[format_id] = video_urls
+        return video_urls_dict
+
+    def get_format(self, bid):
+        matched_format_ids = [_format_id for _bid, _format_id in self._FORMATS_MAP if _bid == str(bid)]
+        return matched_format_ids[0] if len(matched_format_ids) else None
+
+    def get_bid(self, format_id):
+        matched_bids = [_bid for _bid, _format_id in self._FORMATS_MAP if _format_id == format_id]
+        return matched_bids[0] if len(matched_bids) else None
+
+    def get_raw_data(self, tvid, video_id, enc_key, _uuid):
+        tm = str(int(time.time()))
+        param = {
+            'key': 'fvip',
+            'src': hashlib.md5(b'youtube-dl').hexdigest(),
+            'tvId': tvid,
+            'vid': video_id,
+            'vinfo': 1,
+            'tm': tm,
+            'enc': hashlib.md5(
+                (enc_key + tm + tvid).encode('utf8')).hexdigest(),
+            'qyid': _uuid,
+            'tn': random.random(),
+            'um': 0,
+            'authkey': hashlib.md5(
+                (tm + tvid).encode('utf8')).hexdigest()
+        }
+
+        api_url = 'http://cache.video.qiyi.com/vms' + '?' + \
+            compat_urllib_parse.urlencode(param)
+        raw_data = self._download_json(api_url, video_id)
+        return raw_data
+
+    def get_enc_key(self, swf_url, video_id):
+        filename, _ = os.path.splitext(url_basename(swf_url))
+        enc_key_json = self._downloader.cache.load('iqiyi-enc-key', filename)
+        if enc_key_json is not None:
+            return enc_key_json[0]
+
+        req = self._request_webpage(
+            swf_url, video_id, note='download swf content')
+        cn = req.read()
+        cn = zlib.decompress(cn[8:])
+        pt = re.compile(b'MixerRemote\x08(?P<enc_key>.+?)\$&vv')
+        enc_key = self._search_regex(pt, cn, 'enc_key').decode('utf8')
+
+        self._downloader.cache.store('iqiyi-enc-key', filename, [enc_key])
+
+        return enc_key
+
+    def _real_extract(self, url):
+        webpage = self._download_webpage(
+            url, 'temp_id', note='download video page')
+        tvid = self._search_regex(
+            r'data-player-tvid\s*=\s*[\'"](\d+)', webpage, 'tvid')
+        video_id = self._search_regex(
+            r'data-player-videoid\s*=\s*[\'"]([a-f\d]+)', webpage, 'video_id')
+        swf_url = self._search_regex(
+            r'(http://[^\'"]+MainPlayer[^.]+\.swf)', webpage, 'swf player URL')
+        _uuid = uuid.uuid4().hex
+
+        enc_key = self.get_enc_key(swf_url, video_id)
+
+        raw_data = self.get_raw_data(tvid, video_id, enc_key, _uuid)
+
+        if raw_data['code'] != 'A000000':
+            raise ExtractorError('Unable to load data. Error code: ' + raw_data['code'])
+
+        if not raw_data['data']['vp']['tkl']:
+            raise ExtractorError('No support iQiqy VIP video')
+
+        data = raw_data['data']
+
+        title = data['vi']['vn']
+
+        # generate video_urls_dict
+        video_urls_dict = self.construct_video_urls(
+            data, video_id, _uuid)
+
+        # construct info
+        entries = []
+        for format_id in video_urls_dict:
+            video_urls = video_urls_dict[format_id]
+            for i, video_url_info in enumerate(video_urls):
+                if len(entries) < i + 1:
+                    entries.append({'formats': []})
+                entries[i]['formats'].append(
+                    {
+                        'url': video_url_info[0],
+                        'filesize': video_url_info[-1],
+                        'format_id': format_id,
+                        'preference': int(self.get_bid(format_id))
+                    }
+                )
+
+        for i in range(len(entries)):
+            self._sort_formats(entries[i]['formats'])
+            entries[i].update(
+                {
+                    'id': '%s_part%d' % (video_id, i + 1),
+                    'title': title,
+                }
+            )
+
+        if len(entries) > 1:
+            info = {
+                '_type': 'multi_video',
+                'id': video_id,
+                'title': title,
+                'entries': entries,
+            }
+        else:
+            info = entries[0]
+            info['id'] = video_id
+            info['title'] = title
+
+        return info
index 99a1361f844c15520c842cd9fffa1e5c2e9b6974..bc226fa67c064b991674a510b1eba54d40dc67e0 100644 (file)
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import compat_urllib_parse_unquote
 from ..utils import (
     determine_ext,
     float_or_none,
@@ -30,7 +31,7 @@ class IzleseneIE(InfoExtractor):
                 'description': 'md5:253753e2655dde93f59f74b572454f6d',
                 'thumbnail': 're:^http://.*\.jpg',
                 'uploader_id': 'pelikzzle',
-                'timestamp': 1404302298,
+                'timestamp': int,
                 'upload_date': '20140702',
                 'duration': 95.395,
                 'age_limit': 0,
@@ -46,7 +47,7 @@ class IzleseneIE(InfoExtractor):
                 'description': 'Tarkan Dortmund 2006 Konseri',
                 'thumbnail': 're:^http://.*\.jpg',
                 'uploader_id': 'parlayankiz',
-                'timestamp': 1163322193,
+                'timestamp': int,
                 'upload_date': '20061112',
                 'duration': 253.666,
                 'age_limit': 0,
@@ -67,9 +68,9 @@ class IzleseneIE(InfoExtractor):
 
         uploader = self._html_search_regex(
             r"adduserUsername\s*=\s*'([^']+)';",
-            webpage, 'uploader', fatal=False, default='')
+            webpage, 'uploader', fatal=False)
         timestamp = parse_iso8601(self._html_search_meta(
-            'uploadDate', webpage, 'upload date', fatal=False))
+            'uploadDate', webpage, 'upload date'))
 
         duration = float_or_none(self._html_search_regex(
             r'"videoduration"\s*:\s*"([^"]+)"',
@@ -86,8 +87,7 @@ class IzleseneIE(InfoExtractor):
 
         # Might be empty for some videos.
         streams = self._html_search_regex(
-            r'"qualitylevel"\s*:\s*"([^"]+)"',
-            webpage, 'streams', fatal=False, default='')
+            r'"qualitylevel"\s*:\s*"([^"]+)"', webpage, 'streams', default='')
 
         formats = []
         if streams:
@@ -95,15 +95,15 @@ class IzleseneIE(InfoExtractor):
                 quality, url = re.search(r'\[(\w+)\](.+)', stream).groups()
                 formats.append({
                     'format_id': '%sp' % quality if quality else 'sd',
-                    'url': url,
+                    'url': compat_urllib_parse_unquote(url),
                     'ext': ext,
                 })
         else:
             stream_url = self._search_regex(
-                r'"streamurl"\s?:\s?"([^"]+)"', webpage, 'stream URL')
+                r'"streamurl"\s*:\s*"([^"]+)"', webpage, 'stream URL')
             formats.append({
                 'format_id': 'sd',
-                'url': stream_url,
+                'url': compat_urllib_parse_unquote(stream_url),
                 'ext': ext,
             })
 
index 7d4b57056509383fdc082a68c1650f38dc258763..1d391e69ff7e0aba1b78ae5e32792b2dca839943 100644 (file)
@@ -28,6 +28,14 @@ class KickStarterIE(InfoExtractor):
             'uploader': 'Pebble Technology',
             'title': 'Pebble iOS Notifications',
         }
+    }, {
+        'url': 'https://www.kickstarter.com/projects/1420158244/power-drive-2000/widget/video.html',
+        'info_dict': {
+            'id': '1420158244',
+            'ext': 'mp4',
+            'title': 'Power Drive 2000',
+        },
+        'expected_warnings': ['OpenGraph description'],
     }]
 
     def _real_extract(self, url):
@@ -48,10 +56,15 @@ class KickStarterIE(InfoExtractor):
                 'title': title,
             }
 
+        thumbnail = self._og_search_thumbnail(webpage, default=None)
+        if thumbnail is None:
+            thumbnail = self._html_search_regex(
+                r'<img[^>]+class="[^"]+\s*poster\s*[^"]+"[^>]+src="([^"]+)"',
+                webpage, 'thumbnail image', fatal=False)
         return {
             'id': video_id,
             'url': video_url,
             'title': title,
             'description': self._og_search_description(webpage),
-            'thumbnail': self._og_search_thumbnail(webpage),
+            'thumbnail': thumbnail,
         }
index 2684dd250aa65e22903612f4a1780fc8f701296a..dc54634a58e440fc70ae9bcb3e7d5781981b2b1e 100644 (file)
@@ -19,7 +19,7 @@ class NFLIE(InfoExtractor):
     _VALID_URL = r'''(?x)https?://
         (?P<host>(?:www\.)?(?:nfl\.com|.*?\.clubs\.nfl\.com))/
         (?:.+?/)*
-        (?P<id>(?:\d[a-z]{2}\d{13}|\w{8}\-(?:\w{4}\-){3}\w{12}))'''
+        (?P<id>(?:[a-z0-9]{16}|\w{8}\-(?:\w{4}\-){3}\w{12}))'''
     _TESTS = [
         {
             'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights',
@@ -58,6 +58,10 @@ class NFLIE(InfoExtractor):
                 'upload_date': '20150202',
             },
         },
+        {
+            'url': 'http://www.nfl.com/videos/nfl-network-top-ten/09000d5d810a6bd4/Top-10-Gutsiest-Performances-Jack-Youngblood',
+            'only_matching': True,
+        }
     ]
 
     @staticmethod
index 664dc81d47ce7af613636022f4e540dffd67f8b6..5bbd2dcf66294f5f0e21b6aae000f9ddecd5c051 100644 (file)
@@ -166,6 +166,10 @@ class NocoIE(InfoExtractor):
         self._sort_formats(formats)
 
         timestamp = parse_iso8601(show.get('online_date_start_utc'), ' ')
+
+        if timestamp is not None and timestamp < 0:
+            timestamp = None
+
         uploader = show.get('partner_name')
         uploader_id = show.get('partner_key')
         duration = float_or_none(show.get('duration_ms'), 1000)
index 9688ed94898de231e6c7f1c9dc28d3779da10311..eba4dfbb39576bff355b722c997dd31e07ce370f 100644 (file)
@@ -34,7 +34,7 @@ class PornoVoisinesIE(InfoExtractor):
             'duration': 120,
             'view_count': int,
             'average_rating': float,
-            'categories': ['Débutante', 'Scénario', 'Sodomie'],
+            'categories': ['Débutantes', 'Scénario', 'Sodomie'],
             'age_limit': 18,
         }
     }
@@ -71,7 +71,7 @@ class PornoVoisinesIE(InfoExtractor):
         view_count = int_or_none(self._search_regex(
             r'(\d+) vues', webpage, 'view count', fatal=False))
         average_rating = self._search_regex(
-            r'Note : (\d+,\d+)', webpage, 'average rating', fatal=False)
+            r'Note\s*:\s*(\d+(?:,\d+)?)', webpage, 'average rating', fatal=False)
         if average_rating:
             average_rating = float_or_none(average_rating.replace(',', '.'))
 
index f773332a895b1b147ae9609229700a0c887a362f..bafa81c21c12f15ac8162f8303f56615124b260b 100644 (file)
@@ -18,10 +18,10 @@ class QQMusicIE(InfoExtractor):
     _VALID_URL = r'http://y.qq.com/#type=song&mid=(?P<id>[0-9A-Za-z]+)'
     _TESTS = [{
         'url': 'http://y.qq.com/#type=song&mid=004295Et37taLD',
-        'md5': 'bed90b6db2a7a7a7e11bc585f471f63a',
+        'md5': '9ce1c1c8445f561506d2e3cfb0255705',
         'info_dict': {
             'id': '004295Et37taLD',
-            'ext': 'm4a',
+            'ext': 'mp3',
             'title': '可惜没如果',
             'upload_date': '20141227',
             'creator': '林俊杰',
@@ -29,6 +29,12 @@ class QQMusicIE(InfoExtractor):
         }
     }]
 
+    _FORMATS = {
+        'mp3-320': {'prefix': 'M800', 'ext': 'mp3', 'preference': 40, 'abr': 320},
+        'mp3-128': {'prefix': 'M500', 'ext': 'mp3', 'preference': 30, 'abr': 128},
+        'm4a': {'prefix': 'C200', 'ext': 'm4a', 'preference': 10}
+    }
+
     # Reference: m_r_GetRUin() in top_player.js
     # http://imgcache.gtimg.cn/music/portal_v3/y/top_player.js
     @staticmethod
@@ -68,11 +74,22 @@ class QQMusicIE(InfoExtractor):
             'http://base.music.qq.com/fcgi-bin/fcg_musicexpress.fcg?json=3&guid=%s' % guid,
             mid, note='Retrieve vkey', errnote='Unable to get vkey',
             transform_source=strip_jsonp)['key']
-        song_url = 'http://cc.stream.qqmusic.qq.com/C200%s.m4a?vkey=%s&guid=%s&fromtag=0' % (mid, vkey, guid)
+
+        formats = []
+        for format_id, details in self._FORMATS.items():
+            formats.append({
+                'url': 'http://cc.stream.qqmusic.qq.com/%s%s.%s?vkey=%s&guid=%s&fromtag=0'
+                       % (details['prefix'], mid, details['ext'], vkey, guid),
+                'format': format_id,
+                'format_id': format_id,
+                'preference': details['preference'],
+                'abr': details.get('abr'),
+            })
+        self._sort_formats(formats)
 
         return {
             'id': mid,
-            'url': song_url,
+            'formats': formats,
             'title': song_name,
             'upload_date': publish_time,
             'creator': singer,
index 5a381d9ced41516db44d7e17120b29948a1957cb..e4215d546219bb95fe79abfb184da149148962db 100644 (file)
@@ -21,6 +21,13 @@ class RTBFIE(InfoExtractor):
         }
     }
 
+    _QUALITIES = [
+        ('mobile', 'mobile'),
+        ('web', 'SD'),
+        ('url', 'MD'),
+        ('high', 'HD'),
+    ]
+
     def _real_extract(self, url):
         video_id = self._match_id(url)
 
@@ -32,14 +39,21 @@ class RTBFIE(InfoExtractor):
                 r'data-video="([^"]+)"', webpage, 'data video')),
             video_id)
 
-        video_url = data.get('downloadUrl') or data.get('url')
-
         if data.get('provider').lower() == 'youtube':
+            video_url = data.get('downloadUrl') or data.get('url')
             return self.url_result(video_url, 'Youtube')
+        formats = []
+        for key, format_id in self._QUALITIES:
+            format_url = data['sources'].get(key)
+            if format_url:
+                formats.append({
+                    'format_id': format_id,
+                    'url': format_url,
+                })
 
         return {
             'id': video_id,
-            'url': video_url,
+            'formats': formats,
             'title': data['title'],
             'description': data.get('description') or data.get('subtitle'),
             'thumbnail': data.get('thumbnail'),
index cfce4550ada568cfe13fae859a2bb745671074b5..41d202c289839d00de98eb0a3b1e5770eb0990f5 100644 (file)
@@ -12,10 +12,10 @@ class RtlNlIE(InfoExtractor):
     IE_NAME = 'rtl.nl'
     IE_DESC = 'rtl.nl and rtlxl.nl'
     _VALID_URL = r'''(?x)
-        https?://(www\.)?
+        https?://(?:www\.)?
         (?:
             rtlxl\.nl/\#!/[^/]+/|
-            rtl\.nl/system/videoplayer/[^?#]+?/video_embed\.html\#uuid=
+            rtl\.nl/system/videoplayer/(?:[^/]+/)+(?:video_)?embed\.html\b.+?\buuid=
         )
         (?P<id>[0-9a-f-]+)'''
 
@@ -43,6 +43,9 @@ class RtlNlIE(InfoExtractor):
             'upload_date': '20150215',
             'description': 'Er zijn nieuwe beelden vrijgegeven die vlak na de aanslag in Kopenhagen zijn gemaakt. Op de video is goed te zien hoe omstanders zich bekommeren om één van de slachtoffers, terwijl de eerste agenten ter plaatse komen.',
         }
+    }, {
+        'url': 'http://www.rtl.nl/system/videoplayer/derden/embed.html#!/uuid=bb0353b0-d6a4-1dad-90e9-18fe75b8d1f0',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
diff --git a/youtube_dl/extractor/ruutu.py b/youtube_dl/extractor/ruutu.py
new file mode 100644 (file)
index 0000000..4e22628
--- /dev/null
@@ -0,0 +1,119 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_urllib_parse_urlparse
+from ..utils import (
+    determine_ext,
+    int_or_none,
+    xpath_text,
+)
+
+
+class RuutuIE(InfoExtractor):
+    _VALID_URL = r'http://(?:www\.)?ruutu\.fi/ohjelmat/(?:[^/?#]+/)*(?P<id>[^/?#]+)'
+    _TESTS = [
+        {
+            'url': 'http://www.ruutu.fi/ohjelmat/oletko-aina-halunnut-tietaa-mita-tapahtuu-vain-hetki-ennen-lahetysta-nyt-se-selvisi',
+            'md5': 'ab2093f39be1ca8581963451b3c0234f',
+            'info_dict': {
+                'id': '2058907',
+                'display_id': 'oletko-aina-halunnut-tietaa-mita-tapahtuu-vain-hetki-ennen-lahetysta-nyt-se-selvisi',
+                'ext': 'mp4',
+                'title': 'Oletko aina halunnut tietää mitä tapahtuu vain hetki ennen lähetystä? - Nyt se selvisi!',
+                'description': 'md5:cfc6ccf0e57a814360df464a91ff67d6',
+                'thumbnail': 're:^https?://.*\.jpg$',
+                'duration': 114,
+                'age_limit': 0,
+            },
+        },
+        {
+            'url': 'http://www.ruutu.fi/ohjelmat/superpesis/superpesis-katso-koko-kausi-ruudussa',
+            'md5': '065a10ae4d5b8cfd9d0c3d332465e3d9',
+            'info_dict': {
+                'id': '2057306',
+                'display_id': 'superpesis-katso-koko-kausi-ruudussa',
+                'ext': 'mp4',
+                'title': 'Superpesis: katso koko kausi Ruudussa',
+                'description': 'md5:44c44a99fdbe5b380ab74ebd75f0af77',
+                'thumbnail': 're:^https?://.*\.jpg$',
+                'duration': 40,
+                'age_limit': 0,
+            },
+        },
+    ]
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, display_id)
+
+        video_id = self._search_regex(
+            r'data-media-id="(\d+)"', webpage, 'media id')
+
+        video_xml_url = None
+
+        media_data = self._search_regex(
+            r'jQuery\.extend\([^,]+,\s*(.+?)\);', webpage,
+            'media data', default=None)
+        if media_data:
+            media_json = self._parse_json(media_data, display_id, fatal=False)
+            if media_json:
+                xml_url = media_json.get('ruutuplayer', {}).get('xmlUrl')
+                if xml_url:
+                    video_xml_url = xml_url.replace('{ID}', video_id)
+
+        if not video_xml_url:
+            video_xml_url = 'http://gatling.ruutu.fi/media-xml-cache?id=%s' % video_id
+
+        video_xml = self._download_xml(video_xml_url, video_id)
+
+        formats = []
+        processed_urls = []
+
+        def extract_formats(node):
+            for child in node:
+                if child.tag.endswith('Files'):
+                    extract_formats(child)
+                elif child.tag.endswith('File'):
+                    video_url = child.text
+                    if not video_url or video_url in processed_urls or 'NOT_USED' in video_url:
+                        return
+                    processed_urls.append(video_url)
+                    ext = determine_ext(video_url)
+                    if ext == 'm3u8':
+                        formats.extend(self._extract_m3u8_formats(
+                            video_url, video_id, 'mp4', m3u8_id='hls'))
+                    elif ext == 'f4m':
+                        formats.extend(self._extract_f4m_formats(
+                            video_url, video_id, f4m_id='hds'))
+                    else:
+                        proto = compat_urllib_parse_urlparse(video_url).scheme
+                        if not child.tag.startswith('HTTP') and proto != 'rtmp':
+                            continue
+                        preference = -1 if proto == 'rtmp' else 1
+                        label = child.get('label')
+                        tbr = int_or_none(child.get('bitrate'))
+                        width, height = [int_or_none(x) for x in child.get('resolution', '').split('x')]
+                        formats.append({
+                            'format_id': '%s-%s' % (proto, label if label else tbr),
+                            'url': video_url,
+                            'width': width,
+                            'height': height,
+                            'tbr': tbr,
+                            'preference': preference,
+                        })
+
+        extract_formats(video_xml.find('./Clip'))
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'title': self._og_search_title(webpage),
+            'description': self._og_search_description(webpage),
+            'thumbnail': self._og_search_thumbnail(webpage),
+            'duration': int_or_none(xpath_text(video_xml, './/Runtime', 'duration')),
+            'age_limit': int_or_none(xpath_text(video_xml, './/AgeLimit', 'age limit')),
+            'formats': formats,
+        }
index 359722ad697a7948bea484e7c8f8f77a924544f8..08a5c4314959409cd93d71fcd8f8160ee541b7c4 100644 (file)
@@ -2,7 +2,11 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..utils import float_or_none
+from ..compat import compat_urllib_parse_urlparse
+from ..utils import (
+    determine_ext,
+    float_or_none,
+)
 
 
 class SpiegeltvIE(InfoExtractor):
@@ -17,7 +21,7 @@ class SpiegeltvIE(InfoExtractor):
             'thumbnail': 're:http://.*\.jpg$',
         },
         'params': {
-            # rtmp download
+            # m3u8 download
             'skip_download': True,
         }
     }, {
@@ -53,7 +57,35 @@ class SpiegeltvIE(InfoExtractor):
         server_json = self._download_json(
             'http://spiegeltv-prod-static.s3.amazonaws.com/projectConfigs/projectConfig.json',
             video_id, note='Downloading server information')
-        server = server_json['streamingserver'][0]['endpoint']
+
+        format = '16x9' if is_wide else '4x3'
+
+        formats = []
+        for streamingserver in server_json['streamingserver']:
+            endpoint = streamingserver.get('endpoint')
+            if not endpoint:
+                continue
+            play_path = 'mp4:%s_spiegeltv_0500_%s.m4v' % (uuid, format)
+            if endpoint.startswith('rtmp'):
+                formats.append({
+                    'url': endpoint,
+                    'format_id': 'rtmp',
+                    'app': compat_urllib_parse_urlparse(endpoint).path[1:],
+                    'play_path': play_path,
+                    'player_path': 'http://prod-static.spiegel.tv/frontend-076.swf',
+                    'ext': 'flv',
+                    'rtmp_live': True,
+                })
+            elif determine_ext(endpoint) == 'm3u8':
+                formats.extend(self._extract_m3u8_formats(
+                    endpoint.replace('[video]', play_path),
+                    video_id, 'm4v',
+                    preference=1,  # Prefer hls since it allows to workaround georestriction
+                    m3u8_id='hls'))
+            else:
+                formats.append({
+                    'url': endpoint,
+                })
 
         thumbnails = []
         for image in media_json['images']:
@@ -65,17 +97,12 @@ class SpiegeltvIE(InfoExtractor):
 
         description = media_json['subtitle']
         duration = float_or_none(media_json.get('duration_in_ms'), scale=1000)
-        format = '16x9' if is_wide else '4x3'
-
-        url = server + 'mp4:' + uuid + '_spiegeltv_0500_' + format + '.m4v'
 
         return {
             'id': video_id,
             'title': title,
-            'url': url,
-            'ext': 'm4v',
             'description': description,
             'duration': duration,
             'thumbnails': thumbnails,
-            'rtmp_live': True,
+            'formats': formats,
         }
index 854d01beeb5cefd1f82d7991ee2c0ce75ad33dfa..e527aa97188b1860e054f8af7c7bd7a33301729e 100644 (file)
@@ -44,7 +44,7 @@ class SunPornoIE(InfoExtractor):
             webpage, 'duration', fatal=False))
 
         view_count = int_or_none(self._html_search_regex(
-            r'class="views">\s*(\d+)\s*<',
+            r'class="views">(?:<noscript>)?\s*(\d+)\s*<',
             webpage, 'view count', fatal=False))
         comment_count = int_or_none(self._html_search_regex(
             r'(\d+)</b> Comments?',
index b2a4b1fc05430558ad9b33a9aa3ce834107dc6e3..d1b7264b4ca4a0cb72e491da26d7f5bbc1cc66b7 100644 (file)
@@ -51,6 +51,17 @@ class TeamcocoIE(InfoExtractor):
             'params': {
                 'skip_download': True,  # m3u8 downloads
             }
+        }, {
+            'url': 'http://teamcoco.com/video/full-episode-mon-6-1-joel-mchale-jake-tapper-and-musical-guest-courtney-barnett?playlist=x;eyJ0eXBlIjoidGFnIiwiaWQiOjl9',
+            'info_dict': {
+                'id': '89341',
+                'ext': 'mp4',
+                'title': 'Full Episode - Mon. 6/1 - Joel McHale, Jake Tapper, And Musical Guest Courtney Barnett',
+                'description': 'Guests: Joel McHale, Jake Tapper, And Musical Guest Courtney Barnett',
+            },
+            'params': {
+                'skip_download': True,  # m3u8 downloads
+            }
         }
     ]
     _VIDEO_ID_REGEXES = (
@@ -110,9 +121,23 @@ class TeamcocoIE(InfoExtractor):
         get_quality = qualities(['500k', '480p', '1000k', '720p', '1080p'])
         for filed in data['files']:
             if determine_ext(filed['url']) == 'm3u8':
-                formats.extend(self._extract_m3u8_formats(
-                    filed['url'], video_id, ext='mp4'))
+                # compat_urllib_parse.urljoin does not work here
+                if filed['url'].startswith('/'):
+                    m3u8_url = 'http://ht.cdn.turner.com/tbs/big/teamcoco' + filed['url']
+                else:
+                    m3u8_url = filed['url']
+                m3u8_formats = self._extract_m3u8_formats(
+                    m3u8_url, video_id, ext='mp4')
+                for m3u8_format in m3u8_formats:
+                    if m3u8_format not in formats:
+                        formats.append(m3u8_format)
+            elif determine_ext(filed['url']) == 'f4m':
+                # TODO Correct f4m extraction
+                continue
             else:
+                if filed['url'].startswith('/mp4:protected/'):
+                    # TODO Correct extraction for these files
+                    continue
                 m_format = re.search(r'(\d+(k|p))\.mp4', filed['url'])
                 if m_format is not None:
                     format_id = m_format.group(1)
index 92731ad3d7e8dcc3167b50ce1a15e3b035fb7721..83d833e30dbeb60caa43aa272bfd4d35f4507a53 100644 (file)
@@ -26,7 +26,7 @@ _x = lambda p: xpath_with_ns(p, {'smil': 'http://www.w3.org/2005/SMIL21/Language
 class ThePlatformIE(InfoExtractor):
     _VALID_URL = r'''(?x)
         (?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/
-           (?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/)?
+           (?:(?P<media>(?:[^/]+/)+select/media/)|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))?
          |theplatform:)(?P<id>[^/\?&]+)'''
 
     _TESTS = [{
@@ -56,6 +56,17 @@ class ThePlatformIE(InfoExtractor):
             # rtmp download
             'skip_download': True,
         }
+    }, {
+        'url': 'https://player.theplatform.com/p/D6x-PC/pulse_preview/embed/select/media/yMBg9E8KFxZD',
+        'info_dict': {
+            'id': 'yMBg9E8KFxZD',
+            'ext': 'mp4',
+            'description': 'md5:644ad9188d655b742f942bf2e06b002d',
+            'title': 'HIGHLIGHTS: USA bag first ever series Cup win',
+        }
+    }, {
+        'url': 'http://player.theplatform.com/p/NnzsPC/widget/select/media/4Y0TlYUr_ZT7',
+        'only_matching': True,
     }]
 
     @staticmethod
@@ -85,6 +96,11 @@ class ThePlatformIE(InfoExtractor):
         if not provider_id:
             provider_id = 'dJ5BDC'
 
+        path = provider_id
+        if mobj.group('media'):
+            path += '/media'
+        path += '/' + video_id
+
         if smuggled_data.get('force_smil_url', False):
             smil_url = url
         elif mobj.group('config'):
@@ -94,8 +110,7 @@ class ThePlatformIE(InfoExtractor):
             config = self._download_json(config_url, video_id, 'Downloading config')
             smil_url = config['releaseUrl'] + '&format=SMIL&formats=MPEG4&manifest=f4m'
         else:
-            smil_url = ('http://link.theplatform.com/s/{0}/{1}/meta.smil?'
-                        'format=smil&mbr=true'.format(provider_id, video_id))
+            smil_url = 'http://link.theplatform.com/s/%s/meta.smil?format=smil&mbr=true' % path
 
         sig = smuggled_data.get('sig')
         if sig:
@@ -112,7 +127,7 @@ class ThePlatformIE(InfoExtractor):
         else:
             raise ExtractorError(error_msg, expected=True)
 
-        info_url = 'http://link.theplatform.com/s/{0}/{1}?format=preview'.format(provider_id, video_id)
+        info_url = 'http://link.theplatform.com/s/%s?format=preview' % path
         info_json = self._download_webpage(info_url, video_id)
         info = json.loads(info_json)
 
index 9f9e388c50948d658d1022f8514122643b623a03..13263614cc06b099d929ee71564899ac3620f76a 100644 (file)
@@ -12,17 +12,22 @@ class TlcIE(DiscoveryIE):
     IE_NAME = 'tlc.com'
     _VALID_URL = r'http://www\.tlc\.com\/[a-zA-Z0-9\-]*/[a-zA-Z0-9\-]*/videos/(?P<id>[a-zA-Z0-9\-]*)(.htm)?'
 
-    _TEST = {
+    # DiscoveryIE has _TESTS
+    _TESTS = [{
         'url': 'http://www.tlc.com/tv-shows/cake-boss/videos/too-big-to-fly.htm',
-        'md5': 'c4038f4a9b44d0b5d74caaa64ed2a01a',
         'info_dict': {
-            'id': '853232',
+            'id': '104493',
             'ext': 'mp4',
-            'title': 'Cake Boss: Too Big to Fly',
+            'title': 'Too Big to Fly',
             'description': 'Buddy has taken on a high flying task.',
             'duration': 119,
+            'timestamp': 1393365060,
+            'upload_date': '20140225',
         },
-    }
+        'params': {
+            'skip_download': True,  # requires ffmpef
+        },
+    }]
 
 
 class TlcDeIE(InfoExtractor):
index 6ca8840b0869f1490c06c7df0905c28e38ba1fee..c9cb69333f7da0a9f4fe009e79b06433bca83726 100644 (file)
@@ -47,7 +47,7 @@ class Tube8IE(InfoExtractor):
         webpage = self._download_webpage(req, display_id)
 
         flashvars = json.loads(self._html_search_regex(
-            r'flashvars\s*=\s*({.+?})', webpage, 'flashvars'))
+            r'flashvars\s*=\s*({.+?});\r?\n', webpage, 'flashvars'))
 
         video_url = flashvars['video_url']
         if flashvars.get('encrypted') is True:
index 29703a8a9a6ddf0981642c28cd2f1f68cc07c7b7..7ae63a4992a74368ec8b5f6a266a298cb6776b79 100644 (file)
@@ -23,7 +23,7 @@ class TurboIE(InfoExtractor):
             'ext': 'mp4',
             'duration': 3715,
             'title': 'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia... ',
-            'description': 'Retrouvez dans cette rubrique toutes les vidéos de l\'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia... ',
+            'description': 'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia...',
             'thumbnail': 're:^https?://.*\.jpg$',
         }
     }
@@ -42,7 +42,7 @@ class TurboIE(InfoExtractor):
         title = xpath_text(item, './title', 'title')
         duration = int_or_none(xpath_text(item, './durate', 'duration'))
         thumbnail = xpath_text(item, './visuel_clip', 'thumbnail')
-        description = self._og_search_description(webpage)
+        description = self._html_search_meta('description', webpage)
 
         formats = []
         get_quality = qualities(['3g', 'sd', 'hq'])
diff --git a/youtube_dl/extractor/tvc.py b/youtube_dl/extractor/tvc.py
new file mode 100644 (file)
index 0000000..6b5d80a
--- /dev/null
@@ -0,0 +1,109 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    clean_html,
+    int_or_none,
+)
+
+
+class TVCIE(InfoExtractor):
+    _VALID_URL = r'http://(?:www\.)?tvc\.ru/video/iframe/id/(?P<id>\d+)'
+    _TEST = {
+        'url': 'http://www.tvc.ru/video/iframe/id/74622/isPlay/false/id_stat/channel/?acc_video_id=/channel/brand/id/17/show/episodes/episode_id/39702',
+        'md5': 'bbc5ff531d1e90e856f60fc4b3afd708',
+        'info_dict': {
+            'id': '74622',
+            'ext': 'mp4',
+            'title': 'События. "События". Эфир от 22.05.2015 14:30',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'duration': 1122,
+        },
+    }
+
+    @classmethod
+    def _extract_url(cls, webpage):
+        mobj = re.search(
+            r'<iframe[^>]+?src=(["\'])(?P<url>(?:http://)?(?:www\.)?tvc\.ru/video/iframe/id/[^"]+)\1', webpage)
+        if mobj:
+            return mobj.group('url')
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        video = self._download_json(
+            'http://www.tvc.ru/video/json/id/%s' % video_id, video_id)
+
+        formats = []
+        for info in video.get('path', {}).get('quality', []):
+            video_url = info.get('url')
+            if not video_url:
+                continue
+            format_id = self._search_regex(
+                r'cdnvideo/([^/]+?)(?:-[^/]+?)?/', video_url,
+                'format id', default=None)
+            formats.append({
+                'url': video_url,
+                'format_id': format_id,
+                'width': int_or_none(info.get('width')),
+                'height': int_or_none(info.get('height')),
+                'tbr': int_or_none(info.get('bitrate')),
+            })
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': video['title'],
+            'thumbnail': video.get('picture'),
+            'duration': int_or_none(video.get('duration')),
+            'formats': formats,
+        }
+
+
+class TVCArticleIE(InfoExtractor):
+    _VALID_URL = r'http://(?:www\.)?tvc\.ru/(?!video/iframe/id/)(?P<id>[^?#]+)'
+    _TESTS = [{
+        'url': 'http://www.tvc.ru/channel/brand/id/29/show/episodes/episode_id/39702/',
+        'info_dict': {
+            'id': '74622',
+            'ext': 'mp4',
+            'title': 'События. "События". Эфир от 22.05.2015 14:30',
+            'description': 'md5:ad7aa7db22903f983e687b8a3e98c6dd',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'duration': 1122,
+        },
+    }, {
+        'url': 'http://www.tvc.ru/news/show/id/69944',
+        'info_dict': {
+            'id': '75399',
+            'ext': 'mp4',
+            'title': 'Эксперты: в столице встал вопрос о максимально безопасных остановках',
+            'description': 'md5:f2098f71e21f309e89f69b525fd9846e',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'duration': 278,
+        },
+    }, {
+        'url': 'http://www.tvc.ru/channel/brand/id/47/show/episodes#',
+        'info_dict': {
+            'id': '2185',
+            'ext': 'mp4',
+            'title': 'Ещё не поздно. Эфир от 03.08.2013',
+            'description': 'md5:51fae9f3f8cfe67abce014e428e5b027',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'duration': 3316,
+        },
+    }]
+
+    def _real_extract(self, url):
+        webpage = self._download_webpage(url, self._match_id(url))
+        return {
+            '_type': 'url_transparent',
+            'ie_key': 'TVC',
+            'url': self._og_search_video_url(webpage),
+            'title': clean_html(self._og_search_title(webpage)),
+            'description': clean_html(self._og_search_description(webpage)),
+            'thumbnail': self._og_search_thumbnail(webpage),
+        }
index aacb999ce9044ba07af7597d03b34e1aa37ebddd..3448bec4fdc96b361c0b071ab12832583533c14f 100644 (file)
@@ -785,7 +785,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             s = mobj.group(1)
             dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
             return '/signature/%s' % dec_s
-        dash_manifest_url = re.sub(r'/s/([\w\.]+)', decrypt_sig, dash_manifest_url)
+        dash_manifest_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, dash_manifest_url)
         dash_doc = self._download_xml(
             dash_manifest_url, video_id,
             note='Downloading DASH manifest',
@@ -1290,7 +1290,6 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
     def _extract_playlist(self, playlist_id):
         url = self._TEMPLATE_URL % playlist_id
         page = self._download_webpage(url, playlist_id)
-        more_widget_html = content_html = page
 
         for match in re.findall(r'<div class="yt-alert-message">([^<]+)</div>', page):
             match = match.strip()
@@ -1310,36 +1309,36 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
                 self.report_warning('Youtube gives an alert message: ' + match)
 
         # Extract the video ids from the playlist pages
-        ids = []
-
-        for page_num in itertools.count(1):
-            matches = re.finditer(self._VIDEO_RE, content_html)
-            # We remove the duplicates and the link with index 0
-            # (it's not the first video of the playlist)
-            new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
-            ids.extend(new_ids)
-
-            mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
-            if not mobj:
-                break
+        def _entries():
+            more_widget_html = content_html = page
+            for page_num in itertools.count(1):
+                matches = re.finditer(self._VIDEO_RE, content_html)
+                # We remove the duplicates and the link with index 0
+                # (it's not the first video of the playlist)
+                new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
+                for vid_id in new_ids:
+                    yield self.url_result(vid_id, 'Youtube', video_id=vid_id)
+
+                mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
+                if not mobj:
+                    break
 
-            more = self._download_json(
-                'https://youtube.com/%s' % mobj.group('more'), playlist_id,
-                'Downloading page #%s' % page_num,
-                transform_source=uppercase_escape)
-            content_html = more['content_html']
-            if not content_html.strip():
-                # Some webpages show a "Load more" button but they don't
-                # have more videos
-                break
-            more_widget_html = more['load_more_widget_html']
+                more = self._download_json(
+                    'https://youtube.com/%s' % mobj.group('more'), playlist_id,
+                    'Downloading page #%s' % page_num,
+                    transform_source=uppercase_escape)
+                content_html = more['content_html']
+                if not content_html.strip():
+                    # Some webpages show a "Load more" button but they don't
+                    # have more videos
+                    break
+                more_widget_html = more['load_more_widget_html']
 
         playlist_title = self._html_search_regex(
             r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
             page, 'title')
 
-        url_results = self._ids_to_results(ids)
-        return self.playlist_result(url_results, playlist_id, playlist_title)
+        return self.playlist_result(_entries(), playlist_id, playlist_title)
 
     def _real_extract(self, url):
         # Extract playlist id
@@ -1406,10 +1405,12 @@ class YoutubeChannelIE(InfoExtractor):
         channel_page = self._download_webpage(
             url + '?view=57', channel_id,
             'Downloading channel page', fatal=False)
-        channel_playlist_id = self._search_regex(
-            [r'<meta itemprop="channelId" content="([^"]+)">',
-             r'data-channel-external-id="([^"]+)"'],
-            channel_page, 'channel id', default=None)
+        channel_playlist_id = self._html_search_meta(
+            'channelId', channel_page, 'channel id', default=None)
+        if not channel_playlist_id:
+            channel_playlist_id = self._search_regex(
+                r'data-channel-external-id="([^"]+)"',
+                channel_page, 'channel id', default=None)
         if channel_playlist_id and channel_playlist_id.startswith('UC'):
             playlist_id = 'UU' + channel_playlist_id[2:]
             return self.url_result(
index 5a2315bd96ce0c6abfdf4a8bea65aa68e6fa370b..689fa75951e2fd390b93fa550807337bd6185aa2 100644 (file)
@@ -145,7 +145,7 @@ def parseOpts(overrideArguments=None):
     general.add_option(
         '--list-extractors',
         action='store_true', dest='list_extractors', default=False,
-        help='List all supported extractors and the URLs they would handle')
+        help='List all supported extractors')
     general.add_option(
         '--extractor-descriptions',
         action='store_true', dest='list_extractor_descriptions', default=False,
index de3169eef1d6ec29d82a60b2f4b6a68f49d7dd4e..fc7ac8305d71c8cce077ef3040cd0903ac9f09c5 100644 (file)
@@ -50,7 +50,7 @@ def rsa_verify(message, signature, key):
 def update_self(to_screen, verbose):
     """Update the program file with the latest version from the repository"""
 
-    UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
+    UPDATE_URL = "https://rg3.github.io/youtube-dl/update/"
     VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
     JSON_URL = UPDATE_URL + 'versions.json'
     UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)