2025-11-08 17:01:07 -08:00
|
|
|
|
import logging
|
2025-11-08 18:06:39 -08:00
|
|
|
|
import re
|
2025-11-08 18:48:09 -08:00
|
|
|
|
import urllib.parse
|
2025-11-08 17:26:20 -08:00
|
|
|
|
from xml.etree import ElementTree
|
2025-11-08 17:01:07 -08:00
|
|
|
|
|
|
|
|
|
|
from .common import InfoExtractor
|
2025-11-08 17:26:20 -08:00
|
|
|
|
from ..utils import (
|
|
|
|
|
|
extract_attributes,
|
|
|
|
|
|
get_element_by_attribute,
|
2025-11-08 18:06:39 -08:00
|
|
|
|
get_element_by_class,
|
2025-11-08 17:26:20 -08:00
|
|
|
|
get_element_html_by_class,
|
2025-11-08 18:06:39 -08:00
|
|
|
|
get_element_html_by_id,
|
2025-11-08 17:26:20 -08:00
|
|
|
|
get_element_text_and_html_by_tag,
|
|
|
|
|
|
get_elements_html_by_class,
|
2025-11-09 21:17:54 -08:00
|
|
|
|
int_or_none,
|
2025-11-08 18:06:39 -08:00
|
|
|
|
unified_strdate,
|
2025-11-08 17:26:20 -08:00
|
|
|
|
)
|
2025-11-08 17:01:07 -08:00
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
|
2025-11-08 18:48:09 -08:00
|
|
|
|
class HEINetworkTVIE(InfoExtractor):
|
|
|
|
|
|
_VALID_URL = r'https?://(?:www\.)?heinetwork\.tv/(?:[\w\-/]+)'
|
2025-11-08 17:01:07 -08:00
|
|
|
|
_TESTS = [{
|
|
|
|
|
|
# requires cookies
|
|
|
|
|
|
'url': 'https://www.heinetwork.tv/on-cinema-at-the-cinema/season-2/side-effects-and-identity-thief/',
|
|
|
|
|
|
'md5': 'd10a28af64c3c34a69baa3f38a8c760b',
|
|
|
|
|
|
'info_dict': {
|
2025-11-09 20:37:18 -08:00
|
|
|
|
'id': '52',
|
2025-11-08 17:01:07 -08:00
|
|
|
|
'title': '201 ‘Side Effects’ and ‘Identity Thief’',
|
|
|
|
|
|
'ext': 'mp4',
|
2025-11-08 18:06:39 -08:00
|
|
|
|
'release_date': '20130207',
|
2025-11-08 20:08:53 -08:00
|
|
|
|
'season': 'Season 2',
|
|
|
|
|
|
'season_number': 2,
|
|
|
|
|
|
'season_id': 'season-2',
|
|
|
|
|
|
'series': 'On Cinema at the Cinema',
|
2025-11-09 20:37:45 -08:00
|
|
|
|
'episode': '‘Side Effects’ and ‘Identity Thief’',
|
|
|
|
|
|
'episode_number': 1,
|
2025-11-08 18:06:39 -08:00
|
|
|
|
},
|
|
|
|
|
|
'params': {
|
|
|
|
|
|
'skip_download': True,
|
2025-11-08 17:01:07 -08:00
|
|
|
|
},
|
2025-11-08 18:48:09 -08:00
|
|
|
|
}, {
|
|
|
|
|
|
'url': 'https://www.heinetwork.tv/on-cinema-at-the-cinema/season-2/',
|
|
|
|
|
|
'playlist_mincount': 12,
|
|
|
|
|
|
'info_dict': {
|
|
|
|
|
|
'id': 'season-2',
|
|
|
|
|
|
'title': 'Season 2',
|
|
|
|
|
|
},
|
|
|
|
|
|
}, {
|
|
|
|
|
|
'url': 'https://www.heinetwork.tv/on-cinema-at-the-cinema/',
|
|
|
|
|
|
'playlist_mincount': 16,
|
|
|
|
|
|
'info_dict': {
|
|
|
|
|
|
'id': 'on-cinema-at-the-cinema',
|
|
|
|
|
|
'title': 'On Cinema at the Cinema',
|
|
|
|
|
|
},
|
2025-11-08 17:01:07 -08:00
|
|
|
|
}]
|
|
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
2025-11-08 20:26:18 -08:00
|
|
|
|
parts = urllib.parse.urlparse(url).path.split('/')
|
2025-11-09 21:17:54 -08:00
|
|
|
|
# Remove empty parts; get last element.
|
|
|
|
|
|
# This isn't necessarily a site-wide unique ID (we'll get that from the
|
|
|
|
|
|
# page content), but it at least gives us something unique-ish to pass
|
|
|
|
|
|
# to `_download_webpage`.
|
2025-11-08 20:26:18 -08:00
|
|
|
|
item_id = next(filter(None, reversed(parts)))
|
2025-11-08 17:01:07 -08:00
|
|
|
|
|
2025-11-08 18:48:09 -08:00
|
|
|
|
webpage = self._download_webpage(url, item_id)
|
2025-11-08 20:28:14 -08:00
|
|
|
|
if not self._is_logged_in(webpage):
|
|
|
|
|
|
logger.warning('You are not logged in. Some videos may be unavailable.')
|
2025-11-08 18:48:09 -08:00
|
|
|
|
if self._is_collection(webpage):
|
|
|
|
|
|
return self._extract_collection(webpage, url)
|
|
|
|
|
|
else:
|
|
|
|
|
|
return self._extract_single_video(webpage, url)
|
|
|
|
|
|
|
|
|
|
|
|
def _extract_collection(self, webpage, url):
|
2025-11-09 21:17:54 -08:00
|
|
|
|
if grid := get_element_html_by_class('group/collection', webpage):
|
|
|
|
|
|
if linksHtml := get_elements_html_by_class('group/thumb', grid):
|
|
|
|
|
|
urls = [extract_attributes(html).get('href') for html in linksHtml]
|
2025-11-08 18:48:09 -08:00
|
|
|
|
|
2025-11-09 21:17:54 -08:00
|
|
|
|
return self.playlist_from_matches(
|
|
|
|
|
|
urls,
|
|
|
|
|
|
ie=HEINetworkTVIE,
|
|
|
|
|
|
playlist_id=self._path_components(url)[-1],
|
|
|
|
|
|
playlist_title=self._breadcrumbs(webpage)[-1],
|
|
|
|
|
|
)
|
2025-11-08 18:48:09 -08:00
|
|
|
|
|
2025-11-08 20:08:53 -08:00
|
|
|
|
def _extract_season_name_and_number(self, webpage):
|
2025-11-09 21:17:54 -08:00
|
|
|
|
# expected breadcrumbs to be [series, season] for an episode page
|
|
|
|
|
|
if (bc := self._breadcrumbs(webpage)) and len(bc) == 2:
|
|
|
|
|
|
season_name = bc[-1]
|
|
|
|
|
|
season_number_match = re.match(r'Season (?P<season>\d+)', season_name)
|
|
|
|
|
|
if not season_number_match:
|
|
|
|
|
|
return season_name, None
|
|
|
|
|
|
return season_name, int_or_none(season_number_match.group('season'))
|
|
|
|
|
|
return None, None
|
2025-11-08 20:08:53 -08:00
|
|
|
|
|
|
|
|
|
|
def _extract_series_name(self, webpage):
|
2025-11-09 21:17:54 -08:00
|
|
|
|
if (bc := self._breadcrumbs(webpage)) and len(bc) >= 1:
|
|
|
|
|
|
return bc[0]
|
2025-11-08 20:08:53 -08:00
|
|
|
|
|
2025-11-08 20:28:14 -08:00
|
|
|
|
def _path_components(self, url):
|
|
|
|
|
|
return [p for p in urllib.parse.urlparse(url).path.split('/') if p]
|
|
|
|
|
|
|
2025-11-09 20:37:18 -08:00
|
|
|
|
def _extract_video_id(self, webpage):
|
|
|
|
|
|
_text, html = get_element_text_and_html_by_tag('hei-video', webpage)
|
2025-11-09 21:17:54 -08:00
|
|
|
|
if html is None:
|
|
|
|
|
|
return None
|
2025-11-09 20:37:18 -08:00
|
|
|
|
attrs = extract_attributes(html)
|
2025-11-09 21:17:54 -08:00
|
|
|
|
return attrs.get('data-episode-id')
|
2025-11-09 20:37:18 -08:00
|
|
|
|
|
2025-11-09 20:37:45 -08:00
|
|
|
|
def _clean_episode_title(self, video_title):
|
2025-11-09 21:17:54 -08:00
|
|
|
|
# ex: 1301 Episode Title
|
|
|
|
|
|
if match := re.match(r'\d+\s+(?P<title>.+)', video_title):
|
2025-11-09 20:37:45 -08:00
|
|
|
|
return match.group('title')
|
|
|
|
|
|
|
|
|
|
|
|
def _episode_number(self, video_title, season_number):
|
|
|
|
|
|
if season_number is None:
|
|
|
|
|
|
return None
|
2025-11-09 21:17:54 -08:00
|
|
|
|
# ex: 1301 -> season 13, episode 01
|
2025-11-09 20:37:45 -08:00
|
|
|
|
match = re.match(fr'{re.escape(str(season_number))}(?P<episode_no>\d+)', video_title)
|
|
|
|
|
|
if match:
|
2025-11-09 21:17:54 -08:00
|
|
|
|
return int_or_none(match.group('episode_no'))
|
2025-11-09 20:37:45 -08:00
|
|
|
|
|
2025-11-08 18:48:09 -08:00
|
|
|
|
def _extract_single_video(self, webpage, url):
|
2025-11-09 20:37:18 -08:00
|
|
|
|
video_id = self._extract_video_id(webpage)
|
2025-11-09 21:17:54 -08:00
|
|
|
|
formats, _subs = self._extract_m3u8_formats_and_subtitles(
|
|
|
|
|
|
self._extract_video_src(webpage), video_id)
|
2025-11-08 20:08:53 -08:00
|
|
|
|
season, season_number = self._extract_season_name_and_number(webpage)
|
2025-11-09 20:37:45 -08:00
|
|
|
|
video_title = self._extract_video_title(webpage)
|
2025-11-08 17:01:07 -08:00
|
|
|
|
|
|
|
|
|
|
return {
|
|
|
|
|
|
'id': video_id,
|
2025-11-09 20:37:45 -08:00
|
|
|
|
'title': video_title,
|
2025-11-08 17:01:07 -08:00
|
|
|
|
'formats': formats,
|
2025-11-09 21:17:54 -08:00
|
|
|
|
'release_date': self._air_date(webpage),
|
2025-11-08 20:08:53 -08:00
|
|
|
|
'season': season,
|
|
|
|
|
|
'season_number': season_number,
|
2025-11-09 21:17:54 -08:00
|
|
|
|
'season_id': self._path_components(url)[-2],
|
|
|
|
|
|
'series': self._extract_series_name(webpage),
|
2025-11-09 20:37:45 -08:00
|
|
|
|
'episode': self._clean_episode_title(video_title),
|
|
|
|
|
|
'episode_number': self._episode_number(video_title, season_number),
|
2025-11-08 17:01:07 -08:00
|
|
|
|
}
|
2025-11-08 17:26:20 -08:00
|
|
|
|
|
2025-11-08 18:48:09 -08:00
|
|
|
|
# General helpers
|
2025-11-08 17:26:20 -08:00
|
|
|
|
|
2025-11-08 18:48:09 -08:00
|
|
|
|
def _is_logged_in(self, webpage):
|
|
|
|
|
|
return get_element_by_attribute('href', '/my-account', webpage) is not None
|
2025-11-08 17:26:20 -08:00
|
|
|
|
|
2025-11-08 18:48:09 -08:00
|
|
|
|
def _is_collection(self, webpage):
|
2025-11-08 20:46:56 -08:00
|
|
|
|
return get_element_by_class('group/collection', webpage) is not None
|
2025-11-08 17:26:20 -08:00
|
|
|
|
|
2025-11-08 18:48:09 -08:00
|
|
|
|
def _breadcrumbs(self, webpage):
|
2025-11-09 21:17:54 -08:00
|
|
|
|
if breadcrumb_container := get_element_html_by_class('breadcrumbs', webpage):
|
|
|
|
|
|
root = ElementTree.fromstring(breadcrumb_container)
|
|
|
|
|
|
return [''.join(e.itertext()).strip() for e in root.findall('.//li')]
|
2025-11-08 17:48:31 -08:00
|
|
|
|
|
2025-11-08 18:48:09 -08:00
|
|
|
|
# Single-video helpers
|
2025-11-08 17:48:31 -08:00
|
|
|
|
|
2025-11-08 18:48:09 -08:00
|
|
|
|
def _extract_video_src(self, webpage):
|
|
|
|
|
|
_, html = get_element_text_and_html_by_tag('castable-video', webpage)
|
2025-11-09 21:17:54 -08:00
|
|
|
|
if html is None:
|
|
|
|
|
|
return None
|
2025-11-08 18:48:09 -08:00
|
|
|
|
attrs = extract_attributes(html)
|
2025-11-09 21:17:54 -08:00
|
|
|
|
return attrs.get('src')
|
2025-11-08 17:48:31 -08:00
|
|
|
|
|
2025-11-08 18:48:09 -08:00
|
|
|
|
def _extract_video_title(self, webpage):
|
|
|
|
|
|
_, mux_video = get_element_text_and_html_by_tag('mux-video', webpage)
|
2025-11-09 21:17:54 -08:00
|
|
|
|
if mux_video is None:
|
2025-11-08 18:48:09 -08:00
|
|
|
|
return None
|
2025-11-09 21:17:54 -08:00
|
|
|
|
attrs = extract_attributes(mux_video)
|
|
|
|
|
|
return attrs.get('metadata-video-title')
|
2025-11-08 17:26:20 -08:00
|
|
|
|
|
2025-11-08 18:48:09 -08:00
|
|
|
|
def _air_date(self, webpage):
|
2025-11-09 21:17:54 -08:00
|
|
|
|
if episode_info_container := get_element_html_by_id('hei-episode-title', webpage):
|
|
|
|
|
|
if release_date_str := get_element_by_class('text-sm', episode_info_container):
|
|
|
|
|
|
if matches := re.match(r'\s+Air Date: (?P<date>[\w/]+)', release_date_str):
|
|
|
|
|
|
return unified_strdate(matches.group('date'), day_first=False)
|