feat: Add TURN server support
All checks were successful
Queue Release Build / prepare (push) Successful in 15s
Deploy Web Apps / deploy (push) Successful in 5m35s
Queue Release Build / build-linux (push) Successful in 24m45s
Queue Release Build / build-windows (push) Successful in 13m52s
Queue Release Build / finalize (push) Successful in 23s

This commit is contained in:
2026-04-18 21:27:04 +02:00
parent 167c45ba8d
commit 44588e8789
60 changed files with 2404 additions and 365 deletions

View File

@@ -26,7 +26,7 @@ interface SeededEndpointStorageState {
}
function buildSeededEndpointStorageState(
endpointsOrPort: ReadonlyArray<SeededEndpointInput> | number = Number(process.env.TEST_SERVER_PORT) || 3099
endpointsOrPort: readonly SeededEndpointInput[] | number = Number(process.env.TEST_SERVER_PORT) || 3099
): SeededEndpointStorageState {
const endpoints = Array.isArray(endpointsOrPort)
? endpointsOrPort.map((endpoint) => ({
@@ -81,7 +81,7 @@ export async function installTestServerEndpoint(
export async function installTestServerEndpoints(
context: BrowserContext,
endpoints: ReadonlyArray<SeededEndpointInput>
endpoints: readonly SeededEndpointInput[]
): Promise<void> {
const storageState = buildSeededEndpointStorageState(endpoints);
@@ -111,7 +111,7 @@ export async function seedTestServerEndpoint(
export async function seedTestServerEndpoints(
page: Page,
endpoints: ReadonlyArray<SeededEndpointInput>
endpoints: readonly SeededEndpointInput[]
): Promise<void> {
const storageState = buildSeededEndpointStorageState(endpoints);

View File

@@ -129,6 +129,48 @@ export async function installWebRTCTracking(page: Page): Promise<void> {
/**
* Wait until at least one RTCPeerConnection reaches the 'connected' state.
*/
/**
* Ensure every `AudioContext` created by the page auto-resumes so that
* the input-gain Web Audio pipeline (`source -> gain -> destination`) never
* stalls in the "suspended" state.
*
* On Linux with multiple headless Chromium instances, `new AudioContext()`
* can start suspended without a user-gesture gate, causing the media
* pipeline to emit only a single RTP packet.
*
* Call once per page, BEFORE navigating, alongside `installWebRTCTracking`.
*/
export async function installAutoResumeAudioContext(page: Page): Promise<void> {
await page.addInitScript(() => {
const OrigAudioContext = window.AudioContext;
(window as any).AudioContext = function(this: AudioContext, ...args: any[]) {
const ctx: AudioContext = new OrigAudioContext(...args);
// Track all created AudioContexts for test diagnostics
const tracked = ((window as any).__trackedAudioContexts ??= []) as AudioContext[];
tracked.push(ctx);
if (ctx.state === 'suspended') {
ctx.resume().catch(() => { /* noop */ });
}
// Also catch transitions to suspended after creation
ctx.addEventListener('statechange', () => {
if (ctx.state === 'suspended') {
ctx.resume().catch(() => { /* noop */ });
}
});
return ctx;
} as any;
(window as any).AudioContext.prototype = OrigAudioContext.prototype;
Object.setPrototypeOf((window as any).AudioContext, OrigAudioContext);
});
}
export async function waitForPeerConnected(page: Page, timeout = 30_000): Promise<void> {
await page.waitForFunction(
() => (window as any).__rtcConnections?.some(
@@ -172,7 +214,7 @@ export async function waitForConnectedPeerCount(page: Page, expectedCount: numbe
/**
* Resume all suspended AudioContext instances created by the synthetic
* media patch. Uses CDP `Runtime.evaluate` with `userGesture: true` so
* Chrome treats the call as a user-gesture this satisfies the autoplay
* Chrome treats the call as a user-gesture - this satisfies the autoplay
* policy that otherwise blocks `AudioContext.resume()`.
*/
export async function resumeSyntheticAudioContexts(page: Page): Promise<number> {