mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2025-12-15 02:56:47 +01:00
Merge branch 'master' into bandcamp
This commit is contained in:
commit
ba8a928844
4 changed files with 33 additions and 9 deletions
|
|
@ -5,6 +5,7 @@ from .common import InfoExtractor
|
||||||
from ..networking import Request
|
from ..networking import Request
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
UserNotLive,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
|
|
@ -205,6 +206,9 @@ class FC2LiveIE(InfoExtractor):
|
||||||
'client_app': 'browser_hls',
|
'client_app': 'browser_hls',
|
||||||
'ipv6': '',
|
'ipv6': '',
|
||||||
}), headers={'X-Requested-With': 'XMLHttpRequest'})
|
}), headers={'X-Requested-With': 'XMLHttpRequest'})
|
||||||
|
# A non-zero 'status' indicates the stream is not live, so check truthiness
|
||||||
|
if traverse_obj(control_server, ('status', {int})) and 'control_token' not in control_server:
|
||||||
|
raise UserNotLive(video_id=video_id)
|
||||||
self._set_cookie('live.fc2.com', 'l_ortkn', control_server['orz_raw'])
|
self._set_cookie('live.fc2.com', 'l_ortkn', control_server['orz_raw'])
|
||||||
|
|
||||||
ws_url = update_url_query(control_server['url'], {'control_token': control_server['control_token']})
|
ws_url = update_url_query(control_server['url'], {'control_token': control_server['control_token']})
|
||||||
|
|
|
||||||
|
|
@ -598,7 +598,8 @@ class PatreonCampaignIE(PatreonBaseIE):
|
||||||
'props', 'pageProps', 'bootstrapEnvelope', 'pageBootstrap', 'campaign', 'data', 'id', {str}))
|
'props', 'pageProps', 'bootstrapEnvelope', 'pageBootstrap', 'campaign', 'data', 'id', {str}))
|
||||||
if not campaign_id:
|
if not campaign_id:
|
||||||
campaign_id = traverse_obj(self._search_nextjs_v13_data(webpage, vanity), (
|
campaign_id = traverse_obj(self._search_nextjs_v13_data(webpage, vanity), (
|
||||||
lambda _, v: v['type'] == 'campaign', 'id', {str}, any, {require('campaign ID')}))
|
((..., 'value', 'campaign', 'data'), lambda _, v: v['type'] == 'campaign'),
|
||||||
|
'id', {str}, any, {require('campaign ID')}))
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
'json-api-use-default-includes': 'false',
|
'json-api-use-default-includes': 'false',
|
||||||
|
|
|
||||||
|
|
@ -182,13 +182,13 @@ class TubiTvShowIE(InfoExtractor):
|
||||||
webpage = self._download_webpage(show_url, playlist_id)
|
webpage = self._download_webpage(show_url, playlist_id)
|
||||||
|
|
||||||
data = self._search_json(
|
data = self._search_json(
|
||||||
r'window\.__data\s*=', webpage, 'data', playlist_id,
|
r'window\.__REACT_QUERY_STATE__\s*=', webpage, 'data', playlist_id,
|
||||||
transform_source=js_to_json)['video']
|
transform_source=js_to_json)['queries'][0]['state']['data']
|
||||||
|
|
||||||
# v['number'] is already a decimal string, but stringify to protect against API changes
|
# v['number'] is already a decimal string, but stringify to protect against API changes
|
||||||
path = [lambda _, v: str(v['number']) == selected_season] if selected_season else [..., {dict}]
|
path = [lambda _, v: str(v['number']) == selected_season] if selected_season else [..., {dict}]
|
||||||
|
|
||||||
for season in traverse_obj(data, ('byId', lambda _, v: v['type'] == 's', 'seasons', *path)):
|
for season in traverse_obj(data, ('seasons', *path)):
|
||||||
season_number = int_or_none(season.get('number'))
|
season_number = int_or_none(season.get('number'))
|
||||||
for episode in traverse_obj(season, ('episodes', lambda _, v: v['id'])):
|
for episode in traverse_obj(season, ('episodes', lambda _, v: v['id'])):
|
||||||
episode_id = episode['id']
|
episode_id = episode['id']
|
||||||
|
|
|
||||||
|
|
@ -4029,6 +4029,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||||
STREAMING_DATA_CLIENT_NAME: client_name,
|
STREAMING_DATA_CLIENT_NAME: client_name,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
def set_audio_lang_from_orig_subs_lang(lang_code):
|
||||||
|
for f in formats:
|
||||||
|
if f.get('acodec') != 'none' and not f.get('language'):
|
||||||
|
f['language'] = lang_code
|
||||||
|
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
skipped_subs_clients = set()
|
skipped_subs_clients = set()
|
||||||
|
|
||||||
|
|
@ -4088,7 +4093,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||||
|
|
||||||
orig_lang = qs.get('lang', [None])[-1]
|
orig_lang = qs.get('lang', [None])[-1]
|
||||||
lang_name = self._get_text(caption_track, 'name', max_runs=1)
|
lang_name = self._get_text(caption_track, 'name', max_runs=1)
|
||||||
if caption_track.get('kind') != 'asr':
|
is_manual_subs = caption_track.get('kind') != 'asr'
|
||||||
|
if is_manual_subs:
|
||||||
if not lang_code:
|
if not lang_code:
|
||||||
continue
|
continue
|
||||||
process_language(
|
process_language(
|
||||||
|
|
@ -4099,16 +4105,14 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||||
if not trans_code:
|
if not trans_code:
|
||||||
continue
|
continue
|
||||||
orig_trans_code = trans_code
|
orig_trans_code = trans_code
|
||||||
if caption_track.get('kind') != 'asr' and trans_code != 'und':
|
if is_manual_subs and trans_code != 'und':
|
||||||
if not get_translated_subs:
|
if not get_translated_subs:
|
||||||
continue
|
continue
|
||||||
trans_code += f'-{lang_code}'
|
trans_code += f'-{lang_code}'
|
||||||
trans_name += format_field(lang_name, None, ' from %s')
|
trans_name += format_field(lang_name, None, ' from %s')
|
||||||
if lang_code == f'a-{orig_trans_code}':
|
if lang_code == f'a-{orig_trans_code}':
|
||||||
# Set audio language based on original subtitles
|
# Set audio language based on original subtitles
|
||||||
for f in formats:
|
set_audio_lang_from_orig_subs_lang(orig_trans_code)
|
||||||
if f.get('acodec') != 'none' and not f.get('language'):
|
|
||||||
f['language'] = orig_trans_code
|
|
||||||
# Add an "-orig" label to the original language so that it can be distinguished.
|
# Add an "-orig" label to the original language so that it can be distinguished.
|
||||||
# The subs are returned without "-orig" as well for compatibility
|
# The subs are returned without "-orig" as well for compatibility
|
||||||
process_language(
|
process_language(
|
||||||
|
|
@ -4119,6 +4123,21 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||||
automatic_captions, base_url, trans_code, trans_name, client_name,
|
automatic_captions, base_url, trans_code, trans_name, client_name,
|
||||||
pot_params if orig_lang == orig_trans_code else {'tlang': trans_code, **pot_params})
|
pot_params if orig_lang == orig_trans_code else {'tlang': trans_code, **pot_params})
|
||||||
|
|
||||||
|
# Extract automatic captions when the language is not in 'translationLanguages'
|
||||||
|
# e.g. Cantonese [yue], see https://github.com/yt-dlp/yt-dlp/issues/14889
|
||||||
|
lang_code = remove_start(lang_code, 'a-')
|
||||||
|
if is_manual_subs or not lang_code or lang_code in automatic_captions:
|
||||||
|
continue
|
||||||
|
lang_name = remove_end(lang_name, ' (auto-generated)')
|
||||||
|
if caption_track.get('isTranslatable'):
|
||||||
|
# We can assume this is the original audio language
|
||||||
|
set_audio_lang_from_orig_subs_lang(lang_code)
|
||||||
|
process_language(
|
||||||
|
automatic_captions, base_url, f'{lang_code}-orig',
|
||||||
|
f'{lang_name} (Original)', client_name, pot_params)
|
||||||
|
process_language(
|
||||||
|
automatic_captions, base_url, lang_code, lang_name, client_name, pot_params)
|
||||||
|
|
||||||
# Avoid duplication if we've already got everything we need
|
# Avoid duplication if we've already got everything we need
|
||||||
need_subs_langs.difference_update(subtitles)
|
need_subs_langs.difference_update(subtitles)
|
||||||
need_caps_langs.difference_update(automatic_captions)
|
need_caps_langs.difference_update(automatic_captions)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue