Home Reference Source

src/controller/timeline-controller.ts

  1. import { Events } from '../events';
  2. import Cea608Parser, { CaptionScreen } from '../utils/cea-608-parser';
  3. import OutputFilter from '../utils/output-filter';
  4. import { parseWebVTT } from '../utils/webvtt-parser';
  5. import { logger } from '../utils/logger';
  6. import { sendAddTrackEvent, clearCurrentCues } from '../utils/texttrack-utils';
  7. import { parseIMSC1, IMSC1_CODEC } from '../utils/imsc1-ttml-parser';
  8. import Fragment from '../loader/fragment';
  9. import {
  10. FragParsingUserdataData,
  11. FragLoadedData,
  12. FragDecryptedData,
  13. MediaAttachingData,
  14. ManifestLoadedData,
  15. InitPTSFoundData,
  16. SubtitleTracksUpdatedData
  17. } from '../types/events';
  18. import type Hls from '../hls';
  19. import type { ComponentAPI } from '../types/component-api';
  20. import type { HlsConfig } from '../config';
  21. import type { CuesInterface } from '../utils/cues';
  22. import type { MediaPlaylist } from '../types/media-playlist';
  23. import type { VTTCCs } from '../types/vtt';
  24.  
  25. type TrackProperties = {
  26. label: string,
  27. languageCode: string,
  28. media?: MediaPlaylist
  29. };
  30.  
  31. type NonNativeCaptionsTrack = {
  32. _id?: string,
  33. label: string,
  34. kind: string,
  35. default: boolean,
  36. closedCaptions?: MediaPlaylist,
  37. subtitleTrack?: MediaPlaylist
  38. };
  39.  
  40. export class TimelineController implements ComponentAPI {
  41. private hls: Hls;
  42. private media: HTMLMediaElement | null = null;
  43. private config: HlsConfig;
  44. private enabled: boolean = true;
  45. private Cues: CuesInterface;
  46. private textTracks: Array<TextTrack> = [];
  47. private tracks: Array<MediaPlaylist> = [];
  48. private initPTS: Array<number> = [];
  49. private timescale: Array<number> = [];
  50. private unparsedVttFrags: Array<FragLoadedData | FragDecryptedData> = [];
  51. private captionsTracks: Record<string, TextTrack> = {};
  52. private nonNativeCaptionsTracks: Record<string, NonNativeCaptionsTrack> = {};
  53. private readonly cea608Parser1!: Cea608Parser;
  54. private readonly cea608Parser2!: Cea608Parser;
  55. private lastSn: number = -1;
  56. private prevCC: number = -1;
  57. private vttCCs: VTTCCs = newVTTCCs();
  58. private captionsProperties: {
  59. textTrack1: TrackProperties
  60. textTrack2: TrackProperties
  61. textTrack3: TrackProperties
  62. textTrack4: TrackProperties
  63. };
  64.  
  65. constructor (hls: Hls) {
  66. this.hls = hls;
  67. this.config = hls.config;
  68. this.Cues = hls.config.cueHandler;
  69.  
  70. this.captionsProperties = {
  71. textTrack1: {
  72. label: this.config.captionsTextTrack1Label,
  73. languageCode: this.config.captionsTextTrack1LanguageCode
  74. },
  75. textTrack2: {
  76. label: this.config.captionsTextTrack2Label,
  77. languageCode: this.config.captionsTextTrack2LanguageCode
  78. },
  79. textTrack3: {
  80. label: this.config.captionsTextTrack3Label,
  81. languageCode: this.config.captionsTextTrack3LanguageCode
  82. },
  83. textTrack4: {
  84. label: this.config.captionsTextTrack4Label,
  85. languageCode: this.config.captionsTextTrack4LanguageCode
  86. }
  87. };
  88.  
  89. if (this.config.enableCEA708Captions) {
  90. const channel1 = new OutputFilter(this, 'textTrack1');
  91. const channel2 = new OutputFilter(this, 'textTrack2');
  92. const channel3 = new OutputFilter(this, 'textTrack3');
  93. const channel4 = new OutputFilter(this, 'textTrack4');
  94. this.cea608Parser1 = new Cea608Parser(1, channel1, channel2);
  95. this.cea608Parser2 = new Cea608Parser(3, channel3, channel4);
  96. }
  97.  
  98. this._registerListeners();
  99. }
  100.  
  101. private _registerListeners (): void {
  102. const { hls } = this;
  103. hls.on(Events.MEDIA_ATTACHING, this.onMediaAttaching, this);
  104. hls.on(Events.MEDIA_DETACHING, this.onMediaDetaching, this);
  105. hls.on(Events.FRAG_PARSING_USERDATA, this.onFragParsingUserdata, this);
  106. hls.on(Events.FRAG_DECRYPTED, this.onFragDecrypted, this);
  107. hls.on(Events.MANIFEST_LOADING, this.onManifestLoading, this);
  108. hls.on(Events.MANIFEST_LOADED, this.onManifestLoaded, this);
  109. hls.on(Events.SUBTITLE_TRACKS_UPDATED, this.onSubtitleTracksUpdated, this);
  110. hls.on(Events.FRAG_LOADED, this.onFragLoaded, this);
  111. hls.on(Events.INIT_PTS_FOUND, this.onInitPtsFound, this);
  112. hls.on(Events.SUBTITLE_TRACKS_CLEARED, this.onSubtitleTracksCleared, this);
  113. }
  114.  
  115. private _unregisterListeners (): void {
  116. const { hls } = this;
  117. hls.off(Events.MEDIA_ATTACHING, this.onMediaAttaching, this);
  118. hls.off(Events.MEDIA_DETACHING, this.onMediaDetaching, this);
  119. hls.off(Events.FRAG_PARSING_USERDATA, this.onFragParsingUserdata, this);
  120. hls.off(Events.FRAG_DECRYPTED, this.onFragDecrypted, this);
  121. hls.off(Events.MANIFEST_LOADING, this.onManifestLoading, this);
  122. hls.off(Events.MANIFEST_LOADED, this.onManifestLoaded, this);
  123. hls.off(Events.SUBTITLE_TRACKS_UPDATED, this.onSubtitleTracksUpdated, this);
  124. hls.off(Events.FRAG_LOADED, this.onFragLoaded, this);
  125. hls.off(Events.INIT_PTS_FOUND, this.onInitPtsFound, this);
  126. hls.off(Events.SUBTITLE_TRACKS_CLEARED, this.onSubtitleTracksCleared, this);
  127. }
  128.  
  129. addCues (trackName: string, startTime: number, endTime: number, screen: CaptionScreen, cueRanges: Array<[number, number]>) {
  130. // skip cues which overlap more than 50% with previously parsed time ranges
  131. let merged = false;
  132. for (let i = cueRanges.length; i--;) {
  133. const cueRange = cueRanges[i];
  134. const overlap = intersection(cueRange[0], cueRange[1], startTime, endTime);
  135. if (overlap >= 0) {
  136. cueRange[0] = Math.min(cueRange[0], startTime);
  137. cueRange[1] = Math.max(cueRange[1], endTime);
  138. merged = true;
  139. if ((overlap / (endTime - startTime)) > 0.5) {
  140. return;
  141. }
  142. }
  143. }
  144. if (!merged) {
  145. cueRanges.push([startTime, endTime]);
  146. }
  147.  
  148. if (this.config.renderTextTracksNatively) {
  149. this.Cues.newCue(this.captionsTracks[trackName], startTime, endTime, screen);
  150. } else {
  151. const cues = this.Cues.newCue(null, startTime, endTime, screen);
  152. this.hls.trigger(Events.CUES_PARSED, { type: 'captions', cues, track: trackName });
  153. }
  154. }
  155.  
  156. // Triggered when an initial PTS is found; used for synchronisation of WebVTT.
  157. onInitPtsFound (event: Events.INIT_PTS_FOUND, { frag, id, initPTS, timescale }: InitPTSFoundData) {
  158. const { unparsedVttFrags } = this;
  159. if (id === 'main') {
  160. this.initPTS[frag.cc] = initPTS;
  161. this.timescale[frag.cc] = timescale;
  162. }
  163.  
  164. // Due to asynchronous processing, initial PTS may arrive later than the first VTT fragments are loaded.
  165. // Parse any unparsed fragments upon receiving the initial PTS.
  166. if (unparsedVttFrags.length) {
  167. this.unparsedVttFrags = [];
  168. unparsedVttFrags.forEach(frag => {
  169. this.onFragLoaded(Events.FRAG_LOADED, frag as FragLoadedData);
  170. });
  171. }
  172. }
  173.  
  174. getExistingTrack (trackName: string): TextTrack | null {
  175. const { media } = this;
  176. if (media) {
  177. for (let i = 0; i < media.textTracks.length; i++) {
  178. const textTrack = media.textTracks[i];
  179. if (textTrack[trackName]) {
  180. return textTrack;
  181. }
  182. }
  183. }
  184. return null;
  185. }
  186.  
  187. createCaptionsTrack (trackName: string) {
  188. if (this.config.renderTextTracksNatively) {
  189. this.createNativeTrack(trackName);
  190. } else {
  191. this.createNonNativeTrack(trackName);
  192. }
  193. }
  194.  
  195. createNativeTrack (trackName: string) {
  196. if (this.captionsTracks[trackName]) {
  197. return;
  198. }
  199. const { captionsProperties, captionsTracks, media } = this;
  200. const { label, languageCode } = captionsProperties[trackName];
  201. // Enable reuse of existing text track.
  202. const existingTrack = this.getExistingTrack(trackName);
  203. if (!existingTrack) {
  204. const textTrack = this.createTextTrack('captions', label, languageCode);
  205. if (textTrack) {
  206. // Set a special property on the track so we know it's managed by Hls.js
  207. textTrack[trackName] = true;
  208. captionsTracks[trackName] = textTrack;
  209. }
  210. } else {
  211. captionsTracks[trackName] = existingTrack;
  212. clearCurrentCues(captionsTracks[trackName]);
  213. sendAddTrackEvent(captionsTracks[trackName], media as HTMLMediaElement);
  214. }
  215. }
  216.  
  217. createNonNativeTrack (trackName: string) {
  218. if (this.nonNativeCaptionsTracks[trackName]) {
  219. return;
  220. }
  221. // Create a list of a single track for the provider to consume
  222. const trackProperties: TrackProperties = this.captionsProperties[trackName];
  223. if (!trackProperties) {
  224. return;
  225. }
  226. const label = trackProperties.label as string;
  227. const track = {
  228. _id: trackName,
  229. label,
  230. kind: 'captions',
  231. default: trackProperties.media ? !!trackProperties.media.default : false,
  232. closedCaptions: trackProperties.media
  233. };
  234. this.nonNativeCaptionsTracks[trackName] = track;
  235. this.hls.trigger(Events.NON_NATIVE_TEXT_TRACKS_FOUND, { tracks: [track] });
  236. }
  237.  
  238. createTextTrack (kind: TextTrackKind, label: string, lang?: string): TextTrack | undefined {
  239. const media = this.media;
  240. if (!media) {
  241. return;
  242. }
  243. return media.addTextTrack(kind, label, lang);
  244. }
  245.  
  246. destroy () {
  247. this._unregisterListeners();
  248. }
  249.  
  250. onMediaAttaching (event: Events.MEDIA_ATTACHING, data: MediaAttachingData) {
  251. this.media = data.media;
  252. this._cleanTracks();
  253. }
  254.  
  255. onMediaDetaching () {
  256. const { captionsTracks } = this;
  257. Object.keys(captionsTracks).forEach(trackName => {
  258. clearCurrentCues(captionsTracks[trackName]);
  259. delete captionsTracks[trackName];
  260. });
  261. this.nonNativeCaptionsTracks = {};
  262. }
  263.  
  264. onManifestLoading () {
  265. this.lastSn = -1; // Detect discontinuity in fragment parsing
  266. this.prevCC = -1;
  267. this.vttCCs = newVTTCCs(); // Detect discontinuity in subtitle manifests
  268. this._cleanTracks();
  269. this.tracks = [];
  270. this.captionsTracks = {};
  271. this.nonNativeCaptionsTracks = {};
  272. this.textTracks = [];
  273. this.unparsedVttFrags = this.unparsedVttFrags || [];
  274. this.initPTS = [];
  275. this.timescale = [];
  276. if (this.cea608Parser1 && this.cea608Parser2) {
  277. this.cea608Parser1.reset();
  278. this.cea608Parser2.reset();
  279. }
  280. }
  281.  
  282. _cleanTracks () {
  283. // clear outdated subtitles
  284. const { media } = this;
  285. if (!media) {
  286. return;
  287. }
  288. const textTracks = media.textTracks;
  289. if (textTracks) {
  290. for (let i = 0; i < textTracks.length; i++) {
  291. clearCurrentCues(textTracks[i]);
  292. }
  293. }
  294. }
  295.  
  296. onSubtitleTracksUpdated (event: Events.SUBTITLE_TRACKS_UPDATED, data: SubtitleTracksUpdatedData) {
  297. this.textTracks = [];
  298. const tracks: Array<MediaPlaylist> = data.subtitleTracks || [];
  299. const hasIMSC1 = tracks.some((track) => track.textCodec === IMSC1_CODEC);
  300. if (this.config.enableWebVTT || (hasIMSC1 && this.config.enableIMSC1)) {
  301. const sameTracks = this.tracks && tracks && this.tracks.length === tracks.length;
  302. this.tracks = tracks || [];
  303.  
  304. if (this.config.renderTextTracksNatively) {
  305. const inUseTracks = this.media ? this.media.textTracks : [];
  306.  
  307. this.tracks.forEach((track, index) => {
  308. let textTrack: TextTrack | undefined;
  309. if (index < inUseTracks.length) {
  310. let inUseTrack: TextTrack | null = null;
  311.  
  312. for (let i = 0; i < inUseTracks.length; i++) {
  313. if (canReuseVttTextTrack(inUseTracks[i], track)) {
  314. inUseTrack = inUseTracks[i];
  315. break;
  316. }
  317. }
  318.  
  319. // Reuse tracks with the same label, but do not reuse 608/708 tracks
  320. if (inUseTrack) {
  321. textTrack = inUseTrack;
  322. }
  323. }
  324. if (!textTrack) {
  325. textTrack = this.createTextTrack('subtitles', track.name, track.lang);
  326. if (textTrack) {
  327. textTrack.mode = 'disabled';
  328. }
  329. }
  330. if (textTrack) {
  331. (textTrack as any).groupId = track.groupId;
  332. this.textTracks.push(textTrack);
  333. }
  334. });
  335. } else if (!sameTracks && this.tracks && this.tracks.length) {
  336. // Create a list of tracks for the provider to consume
  337. const tracksList = this.tracks.map((track) => {
  338. return {
  339. label: track.name,
  340. kind: track.type.toLowerCase(),
  341. default: track.default,
  342. subtitleTrack: track
  343. };
  344. });
  345. this.hls.trigger(Events.NON_NATIVE_TEXT_TRACKS_FOUND, { tracks: tracksList });
  346. }
  347. }
  348. }
  349.  
  350. onManifestLoaded (event: Events.MANIFEST_LOADED, data: ManifestLoadedData) {
  351. if (this.config.enableCEA708Captions && data.captions) {
  352. data.captions.forEach(captionsTrack => {
  353. const instreamIdMatch = /(?:CC|SERVICE)([1-4])/.exec(captionsTrack.instreamId as string);
  354. if (!instreamIdMatch) {
  355. return;
  356. }
  357. const trackName = `textTrack${instreamIdMatch[1]}`;
  358. const trackProperties: TrackProperties = this.captionsProperties[trackName];
  359. if (!trackProperties) {
  360. return;
  361. }
  362. trackProperties.label = captionsTrack.name;
  363. if (captionsTrack.lang) { // optional attribute
  364. trackProperties.languageCode = captionsTrack.lang;
  365. }
  366. trackProperties.media = captionsTrack;
  367. });
  368. }
  369. }
  370.  
  371. onFragLoaded (event: Events.FRAG_LOADED, data: FragLoadedData) {
  372. const { frag, payload } = data;
  373. const { cea608Parser1, cea608Parser2, initPTS, lastSn, unparsedVttFrags } = this;
  374. if (frag.type === 'main') {
  375. const sn = frag.sn;
  376. // if this frag isn't contiguous, clear the parser so cues with bad start/end times aren't added to the textTrack
  377. if (sn !== lastSn + 1) {
  378. if (cea608Parser1 && cea608Parser2) {
  379. cea608Parser1.reset();
  380. cea608Parser2.reset();
  381. }
  382. }
  383. this.lastSn = sn as number;
  384. } else if (frag.type === 'subtitle') { // If fragment is subtitle type, parse as WebVTT.
  385. if (payload.byteLength) {
  386. // We need an initial synchronisation PTS. Store fragments as long as none has arrived.
  387. if (!Number.isFinite(initPTS[frag.cc])) {
  388. unparsedVttFrags.push(data);
  389. if (initPTS.length) {
  390. // finish unsuccessfully, otherwise the subtitle-stream-controller could be blocked from loading new frags.
  391. this.hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, { success: false, frag, error: new Error('Missing initial subtitle PTS') });
  392. }
  393. return;
  394. }
  395.  
  396. const decryptData = frag.decryptdata;
  397. // If the subtitles are not encrypted, parse VTTs now. Otherwise, we need to wait.
  398. if ((decryptData == null) || (decryptData.key == null) || (decryptData.method !== 'AES-128')) {
  399. const trackPlaylistMedia = this.tracks[frag.level];
  400. const vttCCs = this.vttCCs;
  401. if (!vttCCs[frag.cc]) {
  402. vttCCs[frag.cc] = { start: frag.start, prevCC: this.prevCC, new: true };
  403. this.prevCC = frag.cc;
  404. }
  405. if (trackPlaylistMedia && trackPlaylistMedia.textCodec === IMSC1_CODEC) {
  406. this._parseIMSC1(frag, payload);
  407. } else {
  408. this._parseVTTs(frag, payload, vttCCs);
  409. }
  410. }
  411. } else {
  412. // In case there is no payload, finish unsuccessfully.
  413. this.hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, { success: false, frag, error: new Error('Empty subtitle payload') });
  414. }
  415. }
  416. }
  417.  
  418. private _parseIMSC1 (frag: Fragment, payload: ArrayBuffer) {
  419. const hls = this.hls;
  420. parseIMSC1(payload, this.initPTS[frag.cc], this.timescale[frag.cc], (cues) => {
  421. this._appendCues(cues, frag.level);
  422. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, { success: true, frag: frag });
  423. }, (error) => {
  424. logger.log(`Failed to parse IMSC1: ${error}`);
  425. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, { success: false, frag: frag, error });
  426. });
  427. }
  428.  
  429. private _parseVTTs (frag: Fragment, payload: ArrayBuffer, vttCCs: any) {
  430. const hls = this.hls;
  431. // Parse the WebVTT file contents.
  432. parseWebVTT(payload, this.initPTS[frag.cc], this.timescale[frag.cc], vttCCs, frag.cc, (cues) => {
  433. this._appendCues(cues, frag.level);
  434. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, { success: true, frag: frag });
  435. }, (error) => {
  436. this._fallbackToIMSC1(frag, payload);
  437. // Something went wrong while parsing. Trigger event with success false.
  438. logger.log(`Failed to parse VTT cue: ${error}`);
  439. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, { success: false, frag: frag, error });
  440. });
  441. }
  442.  
  443. private _fallbackToIMSC1 (frag: Fragment, payload: ArrayBuffer) {
  444. // If textCodec is unknown, try parsing as IMSC1. Set textCodec based on the result
  445. const trackPlaylistMedia = this.tracks[frag.level];
  446. if (!trackPlaylistMedia.textCodec) {
  447. parseIMSC1(payload, this.initPTS[frag.cc], this.timescale[frag.cc], () => {
  448. trackPlaylistMedia.textCodec = IMSC1_CODEC;
  449. this._parseIMSC1(frag, payload);
  450. }, () => {
  451. trackPlaylistMedia.textCodec = 'wvtt';
  452. });
  453. }
  454. }
  455.  
  456. private _appendCues (cues, fragLevel) {
  457. const hls = this.hls;
  458. if (this.config.renderTextTracksNatively) {
  459. const textTrack = this.textTracks[fragLevel];
  460. // WebVTTParser.parse is an async method and if the currently selected text track mode is set to "disabled"
  461. // before parsing is done then don't try to access currentTrack.cues.getCueById as cues will be null
  462. // and trying to access getCueById method of cues will throw an exception
  463. // Because we check if the mode is diabled, we can force check `cues` below. They can't be null.
  464. if (textTrack.mode === 'disabled') {
  465. return;
  466. }
  467. // Sometimes there are cue overlaps on segmented vtts so the same
  468. // cue can appear more than once in different vtt files.
  469. // This avoid showing duplicated cues with same timecode and text.
  470. cues.filter(cue => !textTrack.cues!.getCueById(cue.id)).forEach(cue => {
  471. try {
  472. textTrack.addCue(cue);
  473. if (!textTrack.cues!.getCueById(cue.id)) {
  474. throw new Error(`addCue is failed for: ${cue}`);
  475. }
  476. } catch (err) {
  477. logger.debug(`Failed occurred on adding cues: ${err}`);
  478. const textTrackCue = new (self.TextTrackCue as any)(cue.startTime, cue.endTime, cue.text);
  479. textTrackCue.id = cue.id;
  480. textTrack.addCue(textTrackCue);
  481. }
  482. });
  483. } else {
  484. const currentTrack = this.tracks[fragLevel];
  485. const track = currentTrack.default ? 'default' : 'subtitles' + fragLevel;
  486. hls.trigger(Events.CUES_PARSED, { type: 'subtitles', cues, track });
  487. }
  488. }
  489.  
  490. onFragDecrypted (event: Events.FRAG_DECRYPTED, data: FragDecryptedData) {
  491. const { frag } = data;
  492. if (frag.type === 'subtitle') {
  493. if (!Number.isFinite(this.initPTS[frag.cc])) {
  494. this.unparsedVttFrags.push(data as unknown as FragLoadedData);
  495. return;
  496. }
  497. this.onFragLoaded(Events.FRAG_LOADED, data as unknown as FragLoadedData);
  498. }
  499. }
  500.  
  501. onSubtitleTracksCleared () {
  502. this.tracks = [];
  503. this.captionsTracks = {};
  504. }
  505.  
  506. onFragParsingUserdata (event: Events.FRAG_PARSING_USERDATA, data: FragParsingUserdataData) {
  507. const { cea608Parser1, cea608Parser2 } = this;
  508. if (!this.enabled || !(cea608Parser1 && cea608Parser2)) {
  509. return;
  510. }
  511.  
  512. // If the event contains captions (found in the bytes property), push all bytes into the parser immediately
  513. // It will create the proper timestamps based on the PTS value
  514. for (let i = 0; i < data.samples.length; i++) {
  515. const ccBytes = data.samples[i].bytes;
  516. if (ccBytes) {
  517. const ccdatas = this.extractCea608Data(ccBytes);
  518. cea608Parser1.addData(data.samples[i].pts, ccdatas[0]);
  519. cea608Parser2.addData(data.samples[i].pts, ccdatas[1]);
  520. }
  521. }
  522. }
  523.  
  524. extractCea608Data (byteArray: Uint8Array): number[][] {
  525. const count = byteArray[0] & 31;
  526. let position = 2;
  527. const actualCCBytes: number[][] = [[], []];
  528.  
  529. for (let j = 0; j < count; j++) {
  530. const tmpByte = byteArray[position++];
  531. const ccbyte1 = 0x7F & byteArray[position++];
  532. const ccbyte2 = 0x7F & byteArray[position++];
  533. const ccValid = (4 & tmpByte) !== 0;
  534. const ccType = 3 & tmpByte;
  535.  
  536. if (ccbyte1 === 0 && ccbyte2 === 0) {
  537. continue;
  538. }
  539.  
  540. if (ccValid) {
  541. if (ccType === 0 || ccType === 1) {
  542. actualCCBytes[ccType].push(ccbyte1);
  543. actualCCBytes[ccType].push(ccbyte2);
  544. }
  545. }
  546. }
  547. return actualCCBytes;
  548. }
  549. }
  550.  
  551. function canReuseVttTextTrack (inUseTrack, manifestTrack): boolean {
  552. return inUseTrack && inUseTrack.label === manifestTrack.name && !(inUseTrack.textTrack1 || inUseTrack.textTrack2);
  553. }
  554.  
  555. function intersection (x1: number, x2: number, y1: number, y2: number): number {
  556. return Math.min(x2, y2) - Math.max(x1, y1);
  557. }
  558.  
  559. function newVTTCCs (): VTTCCs {
  560. return {
  561. ccOffset: 0,
  562. presentationOffset: 0,
  563. 0: {
  564. start: 0,
  565. prevCC: -1,
  566. new: false
  567. }
  568. };
  569. }