Back-port ITV extractor from yt-dlp PR #1776

Includes commit 6fbcc16, fix by staubichsauger & pukkandan
This commit is contained in:
dirkf 2021-11-24 18:56:28 +00:00
commit a72aca5667

View file

@ -2,11 +2,13 @@
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from .brightcove import BrightcoveNewIE
from ..compat import compat_str
from ..utils import (
base_url,
clean_html,
determine_ext,
extract_attributes,
@ -18,6 +20,8 @@ from ..utils import (
smuggle_url,
try_get,
url_or_none,
url_basename,
urljoin,
)
@ -33,7 +37,24 @@ class ITVIE(InfoExtractor):
'description': 'md5:4d7159af53ebd5b36e8b3ec82a41fdb4',
'series': 'Plebs',
'season_number': 1,
'episode_number': 1
'episode_number': 1,
'thumbnail': r're:https?://hubimages\.itv\.com/episode/2_1873_0002'
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'https://www.itv.com/hub/the-jonathan-ross-show/2a1166a0209',
'info_dict': {
'id': '2a1166a0209',
'ext': 'mp4',
'title': 'The Jonathan Ross Show - Series 17 - Episode 8',
'description': 'md5:3023dcdd375db1bc9967186cdb3f1399',
'series': 'The Jonathan Ross Show',
'episode_number': 8,
'season_number': 17,
'thumbnail': r're:https?://hubimages\.itv\.com/episode/2(?:_\d{4}){2}'
},
'params': {
# m3u8 download
@ -53,38 +74,16 @@ class ITVIE(InfoExtractor):
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
params = extract_attributes(self._search_regex(
r'(?s)(<[^>]+id="video"[^>]*>)', webpage, 'params'))
variants = self._parse_json(
try_get(params, lambda x: x['data-video-variants'], str) or '{}',
video_id, fatal=False)
platform_tag_video = None
featureset_video = None
for platform_tag, featuresets in variants.items():
for featureset in featuresets:
if (isinstance(featureset, list) and len(featureset) >= 2
and featureset[0] == 'hls' and featureset[1] == 'aes'):
platform_tag_video = platform_tag
featureset_video = featureset
break
if not platform_tag_video or not featureset_video:
raise ExtractorError(
'[%s] %s: %s' % (self.IE_NAME, video_id, 'No downloads available'),
expected=True)
ios_playlist_url = params.get('data-video-playlist') or params['data-video-id']
hmac = params['data-video-hmac']
headers = self.geo_verification_headers()
headers.update({
def _generate_api_headers(self, hmac):
return merge_dicts({
'Accept': 'application/vnd.itv.vod.playlist.v2+json',
'Content-Type': 'application/json',
'hmac': hmac.upper(),
})
ios_playlist = self._download_json(
ios_playlist_url, video_id, data=json.dumps({
}, self.geo_verification_headers())
def _call_api(self, video_id, playlist_url, headers, platform_tag, featureset, fatal=True):
return self._download_json(
playlist_url, video_id, data=json.dumps({
'user': {
'itvUserId': '',
'entitlements': [],
@ -105,15 +104,62 @@ class ITVIE(InfoExtractor):
},
'variantAvailability': {
'featureset': {
'min': featureset_video,
'max': featureset_video
'min': featureset,
'max': featureset
},
'platformTag': platform_tag_video
'platformTag': platform_tag
}
}).encode(), headers=headers)
}).encode(), headers=headers, fatal=fatal)
def _get_subtitles(self, video_id, variants, ios_playlist_url, headers, *args, **kwargs):
subtitles = {}
# Prefer last matching featureset
# See: https://github.com/yt-dlp/yt-dlp/issues/986
platform_tag_subs, featureset_subs = next(
((platform_tag, featureset)
for platform_tag, featuresets in reversed(tuple(variants.items())) for featureset in featuresets
if try_get(featureset, lambda x: x[2]) == 'outband-webvtt'),
(None, None))
if platform_tag_subs and featureset_subs:
subs_playlist = self._call_api(
video_id, ios_playlist_url, headers, platform_tag_subs, featureset_subs, fatal=False)
subs = try_get(subs_playlist, lambda x: x['Playlist']['Video']['Subtitles'], list) or []
for sub in subs:
if not isinstance(sub, dict):
continue
href = url_or_none(sub.get('Href'))
if not href:
continue
subtitles.setdefault('en', []).append({'url': href})
return subtitles
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
params = extract_attributes(self._search_regex(
r'(?s)(<[^>]+id="video"[^>]*>)', webpage, 'params'))
variants = self._parse_json(
try_get(params, lambda x: x['data-video-variants'], compat_str) or '{}',
video_id, fatal=False)
# Prefer last matching featureset
# See: https://github.com/yt-dlp/yt-dlp/issues/986
platform_tag_video, featureset_video = next(
((platform_tag, featureset)
for platform_tag, featuresets in reversed(tuple(variants.items())) for featureset in featuresets
if set(try_get(featureset, lambda x: x[:2]) or {}) == {'aes', 'hls'}),
(None, None))
if not platform_tag_video or not featureset_video:
raise ExtractorError('No downloads available', expected=True, video_id=video_id)
ios_playlist_url = params.get('data-video-playlist') or params['data-video-id']
headers = self._generate_api_headers(params['data-video-hmac'])
headers['Referer'] = url
ios_playlist = self._call_api(
video_id, ios_playlist_url, headers, platform_tag_video, featureset_video)
video_data = try_get(ios_playlist, lambda x: x['Playlist']['Video'], dict) or {}
ios_base_url = video_data.get('Base')
formats = []
for media_file in (video_data.get('MediaFiles') or []):
href = media_file.get('Href')
@ -131,60 +177,6 @@ class ITVIE(InfoExtractor):
'url': href,
})
self._sort_formats(formats)
subtitles = {}
platform_tag_subs = None
featureset_subs = None
for platform_tag, featuresets in variants.items():
for featureset in featuresets:
if (isinstance(featureset, list) and len(featureset) >= 3
and featureset[2] == 'outband-webvtt'):
platform_tag_subs = platform_tag
featureset_subs = featureset
break
if not platform_tag_subs or not featureset_subs:
self.report_warning('%s: %s' % (video_id, 'No subtitles available'))
else:
subs_playlist = self._download_json(
ios_playlist_url, video_id, data=json.dumps({
'user': {
'itvUserId': '',
'entitlements': [],
'token': ''
},
'device': {
'manufacturer': 'Safari',
'model': '5',
'os': {
'name': 'Windows NT',
'version': '6.1',
'type': 'desktop'
}
},
'client': {
'version': '4.1',
'id': 'browser'
},
'variantAvailability': {
'featureset': {
'min': featureset_subs,
'max': featureset_subs
},
'platformTag': platform_tag_subs
}
}).encode(), headers=headers)
subs = try_get(subs_playlist, lambda x: x['Playlist']['Video']['Subtitles'], list) or []
for sub in subs:
if not isinstance(sub, dict):
continue
href = url_or_none(sub.get('Href'))
if not href:
continue
subtitles.setdefault('en', []).append({
'url': href,
'ext': determine_ext(href, 'vtt'),
})
info = self._search_json_ld(webpage, video_id, default={})
if not info:
json_ld = self._parse_json(self._search_regex(
@ -198,36 +190,73 @@ class ITVIE(InfoExtractor):
info = self._json_ld(item, video_id, fatal=False) or {}
break
thumbnails = []
thumbnail_url = try_get(params, lambda x: x['data-video-posterframe'], compat_str)
if thumbnail_url:
thumbnails.extend([{
'url': thumbnail_url.format(width=1920, height=1080, quality=100, blur=0, bg='false'),
'width': 1920,
'height': 1080,
}, {
'url': urljoin(base_url(thumbnail_url), url_basename(thumbnail_url)),
'preference': -2
}])
thumbnail_url = self._html_search_meta(['og:image', 'twitter:image'], webpage, default=None)
if thumbnail_url:
thumbnails.append({
'url': thumbnail_url,
})
self._remove_duplicate_formats(thumbnails)
return merge_dicts({
'id': video_id,
'title': self._html_search_meta(['og:title', 'twitter:title'], webpage),
'formats': formats,
'subtitles': subtitles,
'subtitles': self.extract_subtitles(video_id, variants, ios_playlist_url, headers),
'duration': parse_duration(video_data.get('Duration')),
'description': clean_html(get_element_by_class('episode-info__synopsis', webpage)),
'thumbnails': thumbnails
}, info)
class ITVBTCCIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?itv\.com/btcc/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TEST = {
'url': 'http://www.itv.com/btcc/races/btcc-2018-all-the-action-from-brands-hatch',
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?:news|btcc)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.itv.com/btcc/articles/btcc-2019-brands-hatch-gp-race-action',
'info_dict': {
'id': 'btcc-2018-all-the-action-from-brands-hatch',
'title': 'BTCC 2018: All the action from Brands Hatch',
'id': 'btcc-2019-brands-hatch-gp-race-action',
'title': 'BTCC 2019: Brands Hatch GP race action',
},
'playlist_mincount': 9,
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1582188683001/HkiHLnNRx_default/index.html?videoId=%s'
'playlist_count': 12,
}, {
'url': 'https://www.itv.com/news/2021-10-27/i-have-to-protect-the-country-says-rishi-sunak-as-uk-faces-interest-rate-hike',
'info_dict': {
'id': 'i-have-to-protect-the-country-says-rishi-sunak-as-uk-faces-interest-rate-hike',
'title': 'md5:6ef054dd9f069330db3dcc66cb772d32'
},
'playlist_count': 4
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result(
smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % video_id, {
json_map = try_get(self._parse_json(self._html_search_regex(
'(?s)<script[^>]+id=[\'"]__NEXT_DATA__[^>]*>([^<]+)</script>', webpage, 'json_map'), playlist_id),
lambda x: x['props']['pageProps']['article']['body']['content']) or []
entries = []
for video in json_map:
if not any(video['data'].get(attr) == 'Brightcove' for attr in ('name', 'type')):
continue
video_id = video['data']['id']
account_id = video['data']['accountId']
player_id = video['data']['playerId']
entries.append(self.url_result(
smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, video_id), {
# ITV does not like some GB IP ranges, so here are some
# IP blocks it accepts
'geo_ip_blocks': [
@ -235,8 +264,7 @@ class ITVBTCCIE(InfoExtractor):
],
'referrer': url,
}),
ie=BrightcoveNewIE.ie_key(), video_id=video_id)
for video_id in re.findall(r'data-video-id=["\'](\d+)', webpage)]
ie=BrightcoveNewIE.ie_key(), video_id=video_id))
title = self._og_search_title(webpage, fatal=False)