Merge remote-tracking branch 'peugeot/beeg'
authorPhilipp Hagemeister <phihag@phihag.de>
Sun, 31 Aug 2014 21:57:51 +0000 (23:57 +0200)
committerPhilipp Hagemeister <phihag@phihag.de>
Sun, 31 Aug 2014 21:57:51 +0000 (23:57 +0200)
test/test_utils.py
youtube_dl/extractor/__init__.py
youtube_dl/extractor/anysex.py [new file with mode: 0644]
youtube_dl/extractor/crunchyroll.py
youtube_dl/extractor/eporner.py [new file with mode: 0644]
youtube_dl/extractor/hornbunny.py [new file with mode: 0644]
youtube_dl/extractor/sunporno.py [new file with mode: 0644]
youtube_dl/extractor/vporn.py [new file with mode: 0644]
youtube_dl/extractor/youtube.py
youtube_dl/utils.py

index 3d14f61fbf87ec61b9d7d14dd917c020232453aa..8d89979775c75db9f54c041716f5bfc92a77150a 100644 (file)
@@ -211,6 +211,9 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(parse_duration('00:01:01'), 61)
         self.assertEqual(parse_duration('x:y'), None)
         self.assertEqual(parse_duration('3h11m53s'), 11513)
+        self.assertEqual(parse_duration('3h 11m 53s'), 11513)
+        self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513)
+        self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513)
         self.assertEqual(parse_duration('62m45s'), 3765)
         self.assertEqual(parse_duration('6m59s'), 419)
         self.assertEqual(parse_duration('49s'), 49)
index 01b5f19ddc5f7d940cc28ad61e2796cbdaa0c51b..9f43bb8f47c091b50fad703a0de57795458d1394 100644 (file)
@@ -4,6 +4,7 @@ from .addanime import AddAnimeIE
 from .adultswim import AdultSwimIE
 from .aftonbladet import AftonbladetIE
 from .anitube import AnitubeIE
+from .anysex import AnySexIE
 from .aol import AolIE
 from .allocine import AllocineIE
 from .aparat import AparatIE
@@ -86,6 +87,7 @@ from .ellentv import (
 from .elpais import ElPaisIE
 from .empflix import EmpflixIE
 from .engadget import EngadgetIE
+from .eporner import EpornerIE
 from .escapist import EscapistIE
 from .everyonesmixtape import EveryonesMixtapeIE
 from .exfm import ExfmIE
@@ -135,6 +137,7 @@ from .grooveshark import GroovesharkIE
 from .hark import HarkIE
 from .helsinki import HelsinkiIE
 from .hentaistigma import HentaiStigmaIE
+from .hornbunny import HornBunnyIE
 from .hotnewhiphop import HotNewHipHopIE
 from .howcast import HowcastIE
 from .howstuffworks import HowStuffWorksIE
@@ -323,6 +326,7 @@ from .stanfordoc import StanfordOpenClassroomIE
 from .steam import SteamIE
 from .streamcloud import StreamcloudIE
 from .streamcz import StreamCZIE
+from .sunporno import SunPornoIE
 from .swrmediathek import SWRMediathekIE
 from .syfy import SyfyIE
 from .sztvhu import SztvHuIE
@@ -394,6 +398,7 @@ from .vine import (
 from .viki import VikiIE
 from .vk import VKIE
 from .vodlocker import VodlockerIE
+from .vporn import VpornIE
 from .vube import VubeIE
 from .vuclip import VuClipIE
 from .vulture import VultureIE
diff --git a/youtube_dl/extractor/anysex.py b/youtube_dl/extractor/anysex.py
new file mode 100644 (file)
index 0000000..adeacba
--- /dev/null
@@ -0,0 +1,60 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_duration,
+    int_or_none,
+)
+
+
+class AnySexIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?anysex\.com/(?P<id>\d+)'
+    _TEST = {
+        'url': 'http://anysex.com/156592/',
+        'md5': '023e9fbb7f7987f5529a394c34ad3d3d',
+        'info_dict': {
+            'id': '156592',
+            'ext': 'mp4',
+            'title': 'Busty and sexy blondie in her bikini strips for you',
+            'description': 'md5:de9e418178e2931c10b62966474e1383',
+            'categories': ['Erotic'],
+            'duration': 270,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        video_url = self._html_search_regex(r"video_url\s*:\s*'([^']+)'", webpage, 'video URL')
+
+        title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
+        description = self._html_search_regex(
+            r'<div class="description">([^<]+)</div>', webpage, 'description', fatal=False)
+        thumbnail = self._html_search_regex(
+            r'preview_url\s*:\s*\'(.*?)\'', webpage, 'thumbnail', fatal=False)
+
+        categories = re.findall(
+            r'<a href="http://anysex\.com/categories/[^"]+" title="[^"]*">([^<]+)</a>', webpage)
+
+        duration = parse_duration(self._search_regex(
+            r'<b>Duration:</b> (\d+:\d+)', webpage, 'duration', fatal=False))
+
+        view_count = int_or_none(self._html_search_regex(
+            r'<b>Views:</b> (\d+)', webpage, 'view count', fatal=False))
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'ext': 'mp4',
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'categories': categories,
+            'duration': duration,
+            'view_count': view_count,
+        }
index 026a9177e754de7d606961e6e4793af86da49fe2..4903764f7008ec6f22c054cda2c73557c44b2a91 100644 (file)
@@ -5,6 +5,7 @@ import re
 import json
 import base64
 import zlib
+import xml.etree.ElementTree
 
 from hashlib import sha1
 from math import pow, sqrt, floor
@@ -17,6 +18,7 @@ from ..utils import (
     intlist_to_bytes,
     unified_strdate,
     clean_html,
+    urlencode_postdata,
 )
 from ..aes import (
     aes_cbc_decrypt,
@@ -51,6 +53,26 @@ class CrunchyrollIE(InfoExtractor):
         '1080': ('80', '108'),
     }
 
+    def _login(self):
+        (username, password) = self._get_login_info()
+        if username is None:
+            return
+        self.report_login()
+        login_url = 'https://www.crunchyroll.com/?a=formhandler'
+        data = urlencode_postdata({
+            'formname': 'RpcApiUser_Login',
+            'name': username,
+            'password': password,
+        })
+        login_request = compat_urllib_request.Request(login_url, data)
+        login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
+        self._download_webpage(login_request, None, False, 'Wrong login info')
+
+
+    def _real_initialize(self):
+        self._login()
+
+
     def _decrypt_subtitles(self, data, iv, id):
         data = bytes_to_intlist(data)
         iv = bytes_to_intlist(iv)
@@ -97,6 +119,75 @@ class CrunchyrollIE(InfoExtractor):
             output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
         return output
 
+    def _convert_subtitles_to_ass(self, subtitles):
+        output = ''
+
+        def ass_bool(strvalue):
+            assvalue = '0'
+            if strvalue == '1':
+                assvalue = '-1'
+            return assvalue
+
+        sub_root = xml.etree.ElementTree.fromstring(subtitles)
+        if not sub_root:
+            return output
+
+        output = '[Script Info]\n'
+        output += 'Title: %s\n' % sub_root.attrib["title"]
+        output += 'ScriptType: v4.00+\n'
+        output += 'WrapStyle: %s\n' % sub_root.attrib["wrap_style"]
+        output += 'PlayResX: %s\n' % sub_root.attrib["play_res_x"]
+        output += 'PlayResY: %s\n' % sub_root.attrib["play_res_y"]
+        output += """ScaledBorderAndShadow: yes
+
+[V4+ Styles]
+Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
+"""
+        for style in sub_root.findall('./styles/style'):
+            output += 'Style: ' + style.attrib["name"]
+            output += ',' + style.attrib["font_name"]
+            output += ',' + style.attrib["font_size"]
+            output += ',' + style.attrib["primary_colour"]
+            output += ',' + style.attrib["secondary_colour"]
+            output += ',' + style.attrib["outline_colour"]
+            output += ',' + style.attrib["back_colour"]
+            output += ',' + ass_bool(style.attrib["bold"])
+            output += ',' + ass_bool(style.attrib["italic"])
+            output += ',' + ass_bool(style.attrib["underline"])
+            output += ',' + ass_bool(style.attrib["strikeout"])
+            output += ',' + style.attrib["scale_x"]
+            output += ',' + style.attrib["scale_y"]
+            output += ',' + style.attrib["spacing"]
+            output += ',' + style.attrib["angle"]
+            output += ',' + style.attrib["border_style"]
+            output += ',' + style.attrib["outline"]
+            output += ',' + style.attrib["shadow"]
+            output += ',' + style.attrib["alignment"]
+            output += ',' + style.attrib["margin_l"]
+            output += ',' + style.attrib["margin_r"]
+            output += ',' + style.attrib["margin_v"]
+            output += ',' + style.attrib["encoding"]
+            output += '\n'
+
+        output += """
+[Events]
+Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
+"""
+        for event in sub_root.findall('./events/event'):
+            output += 'Dialogue: 0'
+            output += ',' + event.attrib["start"]
+            output += ',' + event.attrib["end"]
+            output += ',' + event.attrib["style"]
+            output += ',' + event.attrib["name"]
+            output += ',' + event.attrib["margin_l"]
+            output += ',' + event.attrib["margin_r"]
+            output += ',' + event.attrib["margin_v"]
+            output += ',' + event.attrib["effect"]
+            output += ',' + event.attrib["text"]
+            output += '\n'
+
+        return output
+
     def _real_extract(self,url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('video_id')
@@ -158,6 +249,7 @@ class CrunchyrollIE(InfoExtractor):
             })
 
         subtitles = {}
+        sub_format = self._downloader.params.get('subtitlesformat', 'srt')
         for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
             sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\
                                               video_id, note='Downloading subtitles for '+sub_name)
@@ -174,7 +266,10 @@ class CrunchyrollIE(InfoExtractor):
             lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
             if not lang_code:
                 continue
-            subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
+            if sub_format == 'ass':
+                subtitles[lang_code] = self._convert_subtitles_to_ass(subtitle)
+            else:
+                subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
 
         return {
             'id':          video_id,
diff --git a/youtube_dl/extractor/eporner.py b/youtube_dl/extractor/eporner.py
new file mode 100644 (file)
index 0000000..4c2c074
--- /dev/null
@@ -0,0 +1,55 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_duration,
+    str_to_int,
+)
+
+
+class EpornerIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?eporner\.com/hd-porn/(?P<id>\d+)/(?P<title_dash>[\w-]+)/?'
+    _TEST = {
+        'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/',
+        'md5': '3b427ae4b9d60619106de3185c2987cd',
+        'info_dict': {
+            'id': '95008',
+            'ext': 'flv',
+            'title': 'Infamous Tiffany Teen Strip Tease Video',
+            'duration': 194,
+            'view_count': int,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        webpage = self._download_webpage(url, video_id)
+        title = self._html_search_regex(
+            r'<title>(.*?) - EPORNER', webpage, 'title')
+
+        redirect_code = self._html_search_regex(
+            r'<script type="text/javascript" src="/config5/%s/([a-f\d]+)/">' % video_id,
+            webpage, 'redirect_code')
+        redirect_url = 'http://www.eporner.com/config5/%s/%s' % (video_id, redirect_code)
+        webpage2 = self._download_webpage(redirect_url, video_id)
+        video_url = self._html_search_regex(
+            r'file: "(.*?)",', webpage2, 'video_url')
+
+        duration = parse_duration(self._search_regex(
+            r'class="mbtim">([0-9:]+)</div>', webpage, 'duration',
+            fatal=False))
+        view_count = str_to_int(self._search_regex(
+            r'id="cinemaviews">\s*([0-9,]+)\s*<small>views',
+            webpage, 'view count', fatal=False))
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+            'duration': duration,
+            'view_count': view_count,
+        }
diff --git a/youtube_dl/extractor/hornbunny.py b/youtube_dl/extractor/hornbunny.py
new file mode 100644 (file)
index 0000000..a42fba0
--- /dev/null
@@ -0,0 +1,44 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import int_or_none
+
+class HornBunnyIE(InfoExtractor):
+    _VALID_URL = r'http?://(?:www\.)?hornbunny\.com/videos/(?P<title_dash>[a-z-]+)-(?P<id>\d+)\.html'
+    _TEST = {
+        'url': 'http://hornbunny.com/videos/panty-slut-jerk-off-instruction-5227.html',
+        'md5': '95e40865aedd08eff60272b704852ad7',
+        'info_dict': {
+            'id': '5227',
+            'ext': 'flv',
+            'title': 'panty slut jerk off instruction',
+            'duration': 550
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+        title = self._html_search_regex(r'class="title">(.*?)</h2>', webpage, 'title')
+        redirect_url = self._html_search_regex(r'pg&settings=(.*?)\|0"\);', webpage, 'title')
+        webpage2 = self._download_webpage(redirect_url, video_id)
+        video_url = self._html_search_regex(r'flvMask:(.*?);', webpage2, 'video_url')
+        
+        mobj = re.search(r'<strong>Runtime:</strong> (?P<minutes>\d+):(?P<seconds>\d+)</div>', webpage)
+        duration = int(mobj.group('minutes')) * 60 + int(mobj.group('seconds')) if mobj else None
+
+        view_count = self._html_search_regex(r'<strong>Views:</strong>  (\d+)</div>', webpage, 'view count', fatal=False)
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+            'ext': 'flv',
+            'duration': duration,
+            'view_count': int_or_none(view_count),
+        }
diff --git a/youtube_dl/extractor/sunporno.py b/youtube_dl/extractor/sunporno.py
new file mode 100644 (file)
index 0000000..c7a46eb
--- /dev/null
@@ -0,0 +1,68 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_duration,
+    int_or_none,
+    qualities,
+    determine_ext,
+)
+
+
+class SunPornoIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?sunporno\.com/videos/(?P<id>\d+)'
+    _TEST = {
+        'url': 'http://www.sunporno.com/videos/807778/',
+        'md5': '6457d3c165fd6de062b99ef6c2ff4c86',
+        'info_dict': {
+            'id': '807778',
+            'ext': 'flv',
+            'title': 'md5:0a400058e8105d39e35c35e7c5184164',
+            'description': 'md5:a31241990e1bd3a64e72ae99afb325fb',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'duration': 302,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._html_search_regex(r'<title>([^<]+)</title>', webpage, 'title')
+        description = self._html_search_meta('description', webpage, 'description')
+        thumbnail = self._html_search_regex(
+            r'poster="([^"]+)"', webpage, 'thumbnail', fatal=False)
+
+        duration = parse_duration(self._search_regex(
+            r'<span>Duration: (\d+:\d+)</span>', webpage, 'duration', fatal=False))
+
+        view_count = int_or_none(self._html_search_regex(
+            r'<span class="views">(\d+)</span>', webpage, 'view count', fatal=False))
+        comment_count = int_or_none(self._html_search_regex(
+            r'(\d+)</b> Comments?', webpage, 'comment count', fatal=False))
+
+        formats = []
+        quality = qualities(['mp4', 'flv'])
+        for video_url in re.findall(r'<source src="([^"]+)"', webpage):
+            video_ext = determine_ext(video_url)
+            formats.append({
+                'url': video_url,
+                'format_id': video_ext,
+                'quality': quality(video_ext),
+            })
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'duration': duration,
+            'view_count': view_count,
+            'comment_count': comment_count,
+            'formats': formats,
+        }
diff --git a/youtube_dl/extractor/vporn.py b/youtube_dl/extractor/vporn.py
new file mode 100644 (file)
index 0000000..426369c
--- /dev/null
@@ -0,0 +1,99 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_duration,
+    str_to_int,
+)
+
+
+class VpornIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?vporn\.com/[^/]+/(?P<display_id>[^/]+)/(?P<id>\d+)'
+    _TEST = {
+        'url': 'http://www.vporn.com/masturbation/violet-on-her-th-birthday/497944/',
+        'md5': 'facf37c1b86546fa0208058546842c55',
+        'info_dict': {
+            'id': '497944',
+            'display_id': 'violet-on-her-th-birthday',
+            'ext': 'mp4',
+            'title': 'Violet on her 19th birthday',
+            'description': 'Violet dances in front of the camera which is sure to get you horny.',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'uploader': 'kileyGrope',
+            'categories': ['Masturbation', 'Teen'],
+            'duration': 393,
+            'age_limit': 18,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        display_id = mobj.group('display_id')
+
+        webpage = self._download_webpage(url, display_id)
+
+        title = self._html_search_regex(
+            r'videoname\s*=\s*\'([^\']+)\'', webpage, 'title').strip()
+        description = self._html_search_regex(
+            r'<div class="description_txt">(.*?)</div>', webpage, 'description', fatal=False)
+        thumbnail = self._html_search_regex(
+            r'flashvars\.imageUrl\s*=\s*"([^"]+)"', webpage, 'description', fatal=False, default=None)
+        if thumbnail:
+            thumbnail = 'http://www.vporn.com' + thumbnail
+
+        uploader = self._html_search_regex(
+            r'(?s)UPLOADED BY.*?<a href="/user/[^"]+">([^<]+)</a>',
+            webpage, 'uploader', fatal=False)
+
+        categories = re.findall(r'<a href="/cat/[^"]+">([^<]+)</a>', webpage)
+
+        duration = parse_duration(self._search_regex(
+            r'duration (\d+ min \d+ sec)', webpage, 'duration', fatal=False))
+
+        view_count = str_to_int(self._html_search_regex(
+            r'<span>([\d,\.]+) VIEWS</span>', webpage, 'view count', fatal=False))
+        like_count = str_to_int(self._html_search_regex(
+            r'<span id="like" class="n">([\d,\.]+)</span>', webpage, 'like count', fatal=False))
+        dislike_count = str_to_int(self._html_search_regex(
+            r'<span id="dislike" class="n">([\d,\.]+)</span>', webpage, 'dislike count', fatal=False))
+        comment_count = str_to_int(self._html_search_regex(
+            r'<h4>Comments \(<b>([\d,\.]+)</b>\)</h4>', webpage, 'comment count', fatal=False))
+
+        formats = []
+
+        for video in re.findall(r'flashvars\.videoUrl([^=]+?)\s*=\s*"([^"]+)"', webpage):
+            video_url = video[1]
+            fmt = {
+                'url': video_url,
+                'format_id': video[0],
+            }
+            m = re.search(r'_(?P<width>\d+)x(?P<height>\d+)_(?P<vbr>\d+)k\.mp4$', video_url)
+            if m:
+                fmt.update({
+                    'width': int(m.group('width')),
+                    'height': int(m.group('height')),
+                    'vbr': int(m.group('vbr')),
+                })
+            formats.append(fmt)
+
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'uploader': uploader,
+            'categories': categories,
+            'duration': duration,
+            'view_count': view_count,
+            'like_count': like_count,
+            'dislike_count': dislike_count,
+            'comment_count': comment_count,
+            'age_limit': 18,
+            'formats': formats,
+        }
index 75044d71a3fd9f81fa5d89ab8283eb13e5d8191d..3417c1275a972d6cfaf98b74e72bf60bbc6f33f4 100644 (file)
@@ -316,6 +316,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                 u"upload_date": u"20121002",
                 u"description": u"test chars:  \"'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .",
                 u"categories": [u'Science & Technology'],
+                'like_count': int,
+                'dislike_count': int,
             }
         },
         {
@@ -784,7 +786,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
             upload_date = unified_strdate(upload_date)
 
-        m_cat_container = get_element_by_id("eow-category", video_webpage)
+        m_cat_container = self._search_regex(
+            r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
+            video_webpage, 'categories', fatal=False)
         if m_cat_container:
             category = self._html_search_regex(
                 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
@@ -813,15 +817,15 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             else:
                 video_description = u''
 
-        def _extract_count(klass):
+        def _extract_count(count_name):
             count = self._search_regex(
-                r'class="%s">([\d,]+)</span>' % re.escape(klass),
-                video_webpage, klass, default=None)
+                r'id="watch-%s"[^>]*>.*?([\d,]+)\s*</span>' % re.escape(count_name),
+                video_webpage, count_name, default=None)
             if count is not None:
                 return int(count.replace(',', ''))
             return None
-        like_count = _extract_count(u'likes-count')
-        dislike_count = _extract_count(u'dislikes-count')
+        like_count = _extract_count(u'like')
+        dislike_count = _extract_count(u'dislike')
 
         # subtitles
         video_subtitles = self.extract_subtitles(video_id, video_webpage)
index 4f02108720d5699a017a78fb25bb30ae3c23c831..8a36e619ae7246da1f18a4d5fd6cee7b364b81b7 100644 (file)
@@ -1318,6 +1318,7 @@ def str_or_none(v, default=None):
 
 
 def str_to_int(int_str):
+    """ A more relaxed version of int_or_none """
     if int_str is None:
         return None
     int_str = re.sub(r'[,\.]', u'', int_str)
@@ -1332,8 +1333,10 @@ def parse_duration(s):
     if s is None:
         return None
 
+    s = s.strip()
+
     m = re.match(
-        r'(?:(?:(?P<hours>[0-9]+)[:h])?(?P<mins>[0-9]+)[:m])?(?P<secs>[0-9]+)s?(?::[0-9]+)?(?P<ms>\.[0-9]+)?$', s)
+        r'(?:(?:(?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s*)?(?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s*)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)?$', s)
     if not m:
         return None
     res = int(m.group('secs'))