test: Add 8 people voice tests
This commit is contained in:
@@ -46,75 +46,6 @@ export async function installWebRTCTracking(page: Page): Promise<void> {
|
||||
(window as any).RTCPeerConnection.prototype = OriginalRTCPeerConnection.prototype;
|
||||
Object.setPrototypeOf((window as any).RTCPeerConnection, OriginalRTCPeerConnection);
|
||||
|
||||
// Patch getUserMedia to use an AudioContext oscillator for audio
|
||||
// instead of the hardware capture device. Chromium's fake audio
|
||||
// device intermittently fails to produce frames after renegotiation.
|
||||
const origGetUserMedia = navigator.mediaDevices.getUserMedia.bind(navigator.mediaDevices);
|
||||
|
||||
navigator.mediaDevices.getUserMedia = async (constraints?: MediaStreamConstraints) => {
|
||||
const wantsAudio = !!constraints?.audio;
|
||||
|
||||
if (!wantsAudio) {
|
||||
return origGetUserMedia(constraints);
|
||||
}
|
||||
|
||||
// Get the original stream (may include video)
|
||||
const originalStream = await origGetUserMedia(constraints);
|
||||
const audioCtx = new AudioContext();
|
||||
const noiseBuffer = audioCtx.createBuffer(1, audioCtx.sampleRate * 2, audioCtx.sampleRate);
|
||||
const noiseData = noiseBuffer.getChannelData(0);
|
||||
|
||||
for (let sampleIndex = 0; sampleIndex < noiseData.length; sampleIndex++) {
|
||||
noiseData[sampleIndex] = (Math.random() * 2 - 1) * 0.18;
|
||||
}
|
||||
|
||||
const source = audioCtx.createBufferSource();
|
||||
const gain = audioCtx.createGain();
|
||||
|
||||
source.buffer = noiseBuffer;
|
||||
source.loop = true;
|
||||
gain.gain.value = 0.12;
|
||||
|
||||
const dest = audioCtx.createMediaStreamDestination();
|
||||
|
||||
source.connect(gain);
|
||||
gain.connect(dest);
|
||||
source.start();
|
||||
|
||||
if (audioCtx.state === 'suspended') {
|
||||
try {
|
||||
await audioCtx.resume();
|
||||
} catch {}
|
||||
}
|
||||
|
||||
const synthAudioTrack = dest.stream.getAudioTracks()[0];
|
||||
const resultStream = new MediaStream();
|
||||
|
||||
syntheticMediaResources.push({ audioCtx, source });
|
||||
|
||||
resultStream.addTrack(synthAudioTrack);
|
||||
|
||||
// Keep any video tracks from the original stream
|
||||
for (const videoTrack of originalStream.getVideoTracks()) {
|
||||
resultStream.addTrack(videoTrack);
|
||||
}
|
||||
|
||||
// Stop original audio tracks since we're not using them
|
||||
for (const track of originalStream.getAudioTracks()) {
|
||||
track.stop();
|
||||
}
|
||||
|
||||
synthAudioTrack.addEventListener('ended', () => {
|
||||
try {
|
||||
source.stop();
|
||||
} catch {}
|
||||
|
||||
void audioCtx.close().catch(() => {});
|
||||
}, { once: true });
|
||||
|
||||
return resultStream;
|
||||
};
|
||||
|
||||
// Patch getDisplayMedia to return a synthetic screen share stream
|
||||
// (canvas-based video + 880Hz oscillator audio) so the browser
|
||||
// picker dialog is never shown.
|
||||
@@ -218,6 +149,177 @@ export async function isPeerStillConnected(page: Page): Promise<boolean> {
|
||||
);
|
||||
}
|
||||
|
||||
/** Returns the number of tracked peer connections in `connected` state. */
|
||||
export async function getConnectedPeerCount(page: Page): Promise<number> {
|
||||
return page.evaluate(
|
||||
() => ((window as any).__rtcConnections as RTCPeerConnection[] | undefined)?.filter(
|
||||
(pc) => pc.connectionState === 'connected'
|
||||
).length ?? 0
|
||||
);
|
||||
}
|
||||
|
||||
/** Wait until the expected number of peer connections are `connected`. */
|
||||
export async function waitForConnectedPeerCount(page: Page, expectedCount: number, timeout = 45_000): Promise<void> {
|
||||
await page.waitForFunction(
|
||||
(count) => ((window as any).__rtcConnections as RTCPeerConnection[] | undefined)?.filter(
|
||||
(pc) => pc.connectionState === 'connected'
|
||||
).length === count,
|
||||
expectedCount,
|
||||
{ timeout }
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resume all suspended AudioContext instances created by the synthetic
|
||||
* media patch. Uses CDP `Runtime.evaluate` with `userGesture: true` so
|
||||
* Chrome treats the call as a user-gesture — this satisfies the autoplay
|
||||
* policy that otherwise blocks `AudioContext.resume()`.
|
||||
*/
|
||||
export async function resumeSyntheticAudioContexts(page: Page): Promise<number> {
|
||||
const cdpSession = await page.context().newCDPSession(page);
|
||||
|
||||
try {
|
||||
const result = await cdpSession.send('Runtime.evaluate', {
|
||||
expression: `(async () => {
|
||||
const resources = window.__rtcSyntheticMediaResources;
|
||||
if (!resources) return 0;
|
||||
let resumed = 0;
|
||||
for (const r of resources) {
|
||||
if (r.audioCtx.state === 'suspended') {
|
||||
await r.audioCtx.resume();
|
||||
resumed++;
|
||||
}
|
||||
}
|
||||
return resumed;
|
||||
})()`,
|
||||
awaitPromise: true,
|
||||
userGesture: true
|
||||
});
|
||||
|
||||
return result.result.value ?? 0;
|
||||
} finally {
|
||||
await cdpSession.detach();
|
||||
}
|
||||
}
|
||||
|
||||
interface PerPeerAudioStat {
|
||||
connectionState: string;
|
||||
inboundBytes: number;
|
||||
inboundPackets: number;
|
||||
outboundBytes: number;
|
||||
outboundPackets: number;
|
||||
}
|
||||
|
||||
/** Get per-peer audio stats for every tracked RTCPeerConnection. */
|
||||
export async function getPerPeerAudioStats(page: Page): Promise<PerPeerAudioStat[]> {
|
||||
return page.evaluate(async () => {
|
||||
const connections = (window as any).__rtcConnections as RTCPeerConnection[] | undefined;
|
||||
|
||||
if (!connections?.length) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const snapshots: PerPeerAudioStat[] = [];
|
||||
|
||||
for (const pc of connections) {
|
||||
let inboundBytes = 0;
|
||||
let inboundPackets = 0;
|
||||
let outboundBytes = 0;
|
||||
let outboundPackets = 0;
|
||||
|
||||
try {
|
||||
const stats = await pc.getStats();
|
||||
|
||||
stats.forEach((report: any) => {
|
||||
const kind = report.kind ?? report.mediaType;
|
||||
|
||||
if (report.type === 'outbound-rtp' && kind === 'audio') {
|
||||
outboundBytes += report.bytesSent ?? 0;
|
||||
outboundPackets += report.packetsSent ?? 0;
|
||||
}
|
||||
|
||||
if (report.type === 'inbound-rtp' && kind === 'audio') {
|
||||
inboundBytes += report.bytesReceived ?? 0;
|
||||
inboundPackets += report.packetsReceived ?? 0;
|
||||
}
|
||||
});
|
||||
} catch {
|
||||
// Closed connection.
|
||||
}
|
||||
|
||||
snapshots.push({
|
||||
connectionState: pc.connectionState,
|
||||
inboundBytes,
|
||||
inboundPackets,
|
||||
outboundBytes,
|
||||
outboundPackets
|
||||
});
|
||||
}
|
||||
|
||||
return snapshots;
|
||||
});
|
||||
}
|
||||
|
||||
/** Wait until every connected peer connection shows inbound and outbound audio flow. */
|
||||
export async function waitForAllPeerAudioFlow(
|
||||
page: Page,
|
||||
expectedConnectedPeers: number,
|
||||
timeoutMs = 45_000,
|
||||
pollIntervalMs = 1_000
|
||||
): Promise<void> {
|
||||
const deadline = Date.now() + timeoutMs;
|
||||
// Track which peer indices have been confirmed flowing at least once.
|
||||
// This prevents a peer from being missed just because it briefly paused
|
||||
// during one specific poll interval.
|
||||
const confirmedFlowing = new Set<number>();
|
||||
|
||||
let previous = await getPerPeerAudioStats(page);
|
||||
|
||||
while (Date.now() < deadline) {
|
||||
await page.waitForTimeout(pollIntervalMs);
|
||||
const current = await getPerPeerAudioStats(page);
|
||||
const connectedPeers = current.filter((stat) => stat.connectionState === 'connected');
|
||||
|
||||
if (connectedPeers.length >= expectedConnectedPeers) {
|
||||
for (let index = 0; index < current.length; index++) {
|
||||
const curr = current[index];
|
||||
|
||||
if (!curr || curr.connectionState !== 'connected') {
|
||||
continue;
|
||||
}
|
||||
|
||||
const prev = previous[index] ?? {
|
||||
connectionState: 'new',
|
||||
inboundBytes: 0,
|
||||
inboundPackets: 0,
|
||||
outboundBytes: 0,
|
||||
outboundPackets: 0
|
||||
};
|
||||
const inboundFlowing = curr.inboundBytes > prev.inboundBytes || curr.inboundPackets > prev.inboundPackets;
|
||||
const outboundFlowing = curr.outboundBytes > prev.outboundBytes || curr.outboundPackets > prev.outboundPackets;
|
||||
|
||||
if (inboundFlowing && outboundFlowing) {
|
||||
confirmedFlowing.add(index);
|
||||
}
|
||||
}
|
||||
|
||||
// Check if enough peers have been confirmed across all samples
|
||||
const connectedIndices = current
|
||||
.map((stat, idx) => stat.connectionState === 'connected' ? idx : -1)
|
||||
.filter((idx) => idx >= 0);
|
||||
const confirmedCount = connectedIndices.filter((idx) => confirmedFlowing.has(idx)).length;
|
||||
|
||||
if (confirmedCount >= expectedConnectedPeers) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
previous = current;
|
||||
}
|
||||
|
||||
throw new Error(`Timed out waiting for ${expectedConnectedPeers} peers with bidirectional audio flow`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get outbound and inbound audio RTP stats aggregated across all peer
|
||||
* connections. Uses a per-connection high water mark stored on `window` so
|
||||
|
||||
Reference in New Issue
Block a user