933 lines
29 KiB
TypeScript
933 lines
29 KiB
TypeScript
/* eslint-disable @typescript-eslint/member-ordering, @typescript-eslint/no-unused-vars, */
|
|
/**
|
|
* Manages local voice and camera media: getUserMedia, mute, deafen,
|
|
* attaching/detaching tracks to peer connections, bitrate tuning,
|
|
* and optional RNNoise-based noise reduction.
|
|
*/
|
|
import { Subject } from 'rxjs';
|
|
import { ChatEvent } from '../../../shared-kernel';
|
|
import { LatencyProfile } from '../realtime.constants';
|
|
import { PeerData } from '../realtime.types';
|
|
import { WebRTCLogger } from '../logging/webrtc-logger';
|
|
import { NoiseReductionManager } from './noise-reduction.manager';
|
|
import {
|
|
TRACK_KIND_AUDIO,
|
|
TRACK_KIND_VIDEO,
|
|
TRANSCEIVER_SEND_RECV,
|
|
TRANSCEIVER_RECV_ONLY,
|
|
TRANSCEIVER_INACTIVE,
|
|
AUDIO_BITRATE_MIN_BPS,
|
|
AUDIO_BITRATE_MAX_BPS,
|
|
KBPS_TO_BPS,
|
|
LATENCY_PROFILE_BITRATES,
|
|
VOLUME_MIN,
|
|
VOLUME_MAX,
|
|
VOICE_HEARTBEAT_INTERVAL_MS,
|
|
DEFAULT_DISPLAY_NAME,
|
|
P2P_TYPE_CAMERA_STATE,
|
|
P2P_TYPE_VOICE_STATE
|
|
} from '../realtime.constants';
|
|
|
|
/**
|
|
* Callbacks the MediaManager needs from the owning service / peer manager.
|
|
*/
|
|
export interface MediaManagerCallbacks {
|
|
/** All active peer connections (for attaching tracks). */
|
|
getActivePeers(): Map<string, PeerData>;
|
|
/** Trigger SDP renegotiation for a specific peer. */
|
|
renegotiate(peerId: string): Promise<void>;
|
|
/** Broadcast a message to all peers. */
|
|
broadcastMessage(event: ChatEvent): void;
|
|
/** Get identify credentials (for broadcasting). */
|
|
getIdentifyOderId(): string;
|
|
getIdentifyDisplayName(): string;
|
|
/** Push the current local camera state back into service-level signals. */
|
|
setCameraEnabled?(enabled: boolean): void;
|
|
}
|
|
|
|
export class MediaManager {
|
|
/** The stream sent to peers (may be raw or denoised). */
|
|
private localMediaStream: MediaStream | null = null;
|
|
|
|
/**
|
|
* The raw microphone stream from `getUserMedia`.
|
|
* Kept separately so noise reduction can be toggled
|
|
* without re-acquiring the mic.
|
|
*/
|
|
private rawMicStream: MediaStream | null = null;
|
|
|
|
/** The dedicated local camera stream, always captured without audio. */
|
|
private localCameraStream: MediaStream | null = null;
|
|
|
|
/** Remote audio output volume (0-1). */
|
|
private remoteAudioVolume = VOLUME_MAX;
|
|
|
|
// -- Input gain pipeline (mic volume) --
|
|
/** The stream BEFORE gain is applied (for identity checks). */
|
|
private preGainStream: MediaStream | null = null;
|
|
private inputGainCtx: AudioContext | null = null;
|
|
private inputGainSourceNode: MediaStreamAudioSourceNode | null = null;
|
|
private inputGainNode: GainNode | null = null;
|
|
private inputGainDest: MediaStreamAudioDestinationNode | null = null;
|
|
/** Normalised 0-1 input gain (1 = 100%). */
|
|
private inputGainVolume = 1.0;
|
|
|
|
/** Voice-presence heartbeat timer. */
|
|
private voicePresenceTimer: ReturnType<typeof setInterval> | null = null;
|
|
|
|
/** Emitted when voice is successfully connected. */
|
|
readonly voiceConnected$ = new Subject<void>();
|
|
|
|
/** RNNoise noise-reduction processor. */
|
|
private readonly noiseReduction: NoiseReductionManager;
|
|
|
|
/**
|
|
* Tracks the user's *desired* noise-reduction state, independent of
|
|
* whether the worklet is actually running. This lets us honour the
|
|
* preference even when it is set before the mic stream is acquired.
|
|
*/
|
|
private _noiseReductionDesired = true;
|
|
|
|
// State tracked locally (the service exposes these via signals)
|
|
private isVoiceActive = false;
|
|
private isMicMuted = false;
|
|
private isSelfDeafened = false;
|
|
private isCameraActive = false;
|
|
|
|
/** Current voice channel room ID (set when joining voice). */
|
|
private currentVoiceRoomId: string | undefined;
|
|
/** Current voice channel server ID (set when joining voice). */
|
|
private currentVoiceServerId: string | undefined;
|
|
private allowedVoicePeerIds = new Set<string>();
|
|
|
|
constructor(
|
|
private readonly logger: WebRTCLogger,
|
|
private callbacks: MediaManagerCallbacks
|
|
) {
|
|
this.noiseReduction = new NoiseReductionManager(logger);
|
|
}
|
|
|
|
/**
|
|
* Replace the callback set at runtime.
|
|
* Needed because of circular initialisation between managers.
|
|
*
|
|
* @param nextCallbacks - The new callback interface to wire into this manager.
|
|
*/
|
|
setCallbacks(nextCallbacks: MediaManagerCallbacks): void {
|
|
this.callbacks = nextCallbacks;
|
|
}
|
|
|
|
/** Returns the current local media stream, or `null` if voice is disabled. */
|
|
getLocalStream(): MediaStream | null {
|
|
return this.localMediaStream;
|
|
}
|
|
/** Returns the raw microphone stream before processing, if available. */
|
|
getRawMicStream(): MediaStream | null {
|
|
return this.rawMicStream;
|
|
}
|
|
/** Returns the current local camera stream, or `null` if the camera is disabled. */
|
|
getLocalCameraStream(): MediaStream | null {
|
|
return this.localCameraStream;
|
|
}
|
|
/** Whether voice is currently active (mic captured). */
|
|
getIsVoiceActive(): boolean {
|
|
return this.isVoiceActive;
|
|
}
|
|
/** Whether the local microphone is muted. */
|
|
getIsMicMuted(): boolean {
|
|
return this.isMicMuted;
|
|
}
|
|
/** Whether the user has self-deafened. */
|
|
getIsSelfDeafened(): boolean {
|
|
return this.isSelfDeafened;
|
|
}
|
|
/** Whether the local camera is currently active. */
|
|
getIsCameraActive(): boolean {
|
|
return this.isCameraActive;
|
|
}
|
|
/** Current remote audio output volume (normalised 0-1). */
|
|
getRemoteAudioVolume(): number {
|
|
return this.remoteAudioVolume;
|
|
}
|
|
/** The voice channel room ID, if currently in voice. */
|
|
getCurrentVoiceRoomId(): string | undefined {
|
|
return this.currentVoiceRoomId;
|
|
}
|
|
/** The voice channel server ID, if currently in voice. */
|
|
getCurrentVoiceServerId(): string | undefined {
|
|
return this.currentVoiceServerId;
|
|
}
|
|
/** Whether the user wants noise reduction (may or may not be running yet). */
|
|
getIsNoiseReductionEnabled(): boolean {
|
|
return this._noiseReductionDesired;
|
|
}
|
|
|
|
setAllowedVoicePeerIds(peerIds: Iterable<string>): void {
|
|
const nextAllowed = new Set(peerIds);
|
|
|
|
if (this.areSetsEqual(this.allowedVoicePeerIds, nextAllowed)) {
|
|
return;
|
|
}
|
|
|
|
this.allowedVoicePeerIds = nextAllowed;
|
|
this.syncVoiceRouting();
|
|
this.syncCameraRouting();
|
|
}
|
|
|
|
refreshVoiceRouting(): void {
|
|
this.syncVoiceRouting();
|
|
this.syncCameraRouting();
|
|
}
|
|
|
|
/**
|
|
* Request microphone access via `getUserMedia` and bind the resulting
|
|
* audio track to every active peer connection.
|
|
*
|
|
* If a local stream already exists it is stopped first.
|
|
*
|
|
* @returns The captured {@link MediaStream}.
|
|
* @throws If `getUserMedia` is unavailable (non-secure context) or the user denies access.
|
|
*/
|
|
async enableVoice(): Promise<MediaStream> {
|
|
try {
|
|
// Stop any existing stream first
|
|
if (this.localMediaStream) {
|
|
this.logger.info('Stopping existing local stream before enabling voice');
|
|
this.localMediaStream.getTracks().forEach((track) => track.stop());
|
|
this.localMediaStream = null;
|
|
}
|
|
|
|
const mediaConstraints: MediaStreamConstraints = {
|
|
audio: {
|
|
echoCancellation: true,
|
|
noiseSuppression: !this._noiseReductionDesired,
|
|
autoGainControl: true
|
|
},
|
|
video: false
|
|
};
|
|
|
|
this.logger.info('getUserMedia constraints', mediaConstraints);
|
|
|
|
if (!navigator.mediaDevices?.getUserMedia) {
|
|
throw new Error(
|
|
'navigator.mediaDevices is not available. ' +
|
|
'This requires a secure context (HTTPS or localhost). ' +
|
|
'If accessing from an external device, use HTTPS.'
|
|
);
|
|
}
|
|
|
|
const stream = await navigator.mediaDevices.getUserMedia(mediaConstraints);
|
|
|
|
this.rawMicStream = stream;
|
|
|
|
// If the user wants noise reduction, pipe through the denoiser
|
|
this.localMediaStream = this._noiseReductionDesired
|
|
? await this.noiseReduction.enable(stream)
|
|
: stream;
|
|
|
|
// Apply input gain (mic volume) before sending to peers
|
|
this.applyInputGainToCurrentStream();
|
|
|
|
this.logger.logStream('localVoice', this.localMediaStream);
|
|
|
|
this.bindLocalTracksToAllPeers();
|
|
|
|
this.isVoiceActive = true;
|
|
this.voiceConnected$.next();
|
|
return this.localMediaStream;
|
|
} catch (error) {
|
|
this.logger.error('Failed to getUserMedia', error);
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Stop all local media tracks and remove audio senders from peers.
|
|
* The peer connections themselves are kept alive.
|
|
*/
|
|
disableVoice(): void {
|
|
this.disableCamera();
|
|
this.noiseReduction.disable();
|
|
this.teardownInputGain();
|
|
|
|
// Stop the raw mic tracks (the denoised stream's tracks are
|
|
// derived nodes and will stop once their source is gone).
|
|
if (this.rawMicStream) {
|
|
this.rawMicStream.getTracks().forEach((track) => track.stop());
|
|
this.rawMicStream = null;
|
|
}
|
|
|
|
this.localMediaStream = null;
|
|
|
|
// Remove audio senders but keep connections alive
|
|
this.callbacks.getActivePeers().forEach((peerData) => {
|
|
const senders = peerData.connection.getSenders();
|
|
|
|
senders.forEach((sender) => {
|
|
if (sender.track?.kind === TRACK_KIND_AUDIO) {
|
|
peerData.connection.removeTrack(sender);
|
|
}
|
|
});
|
|
});
|
|
|
|
this.isVoiceActive = false;
|
|
this.currentVoiceRoomId = undefined;
|
|
this.currentVoiceServerId = undefined;
|
|
this.allowedVoicePeerIds.clear();
|
|
}
|
|
|
|
/**
|
|
* Set the local stream from an external source (e.g. voice-controls component).
|
|
*
|
|
* The raw stream is saved so noise reduction can be toggled on/off later.
|
|
* If noise reduction is already enabled the stream is piped through the
|
|
* denoiser before being sent to peers.
|
|
*/
|
|
async setLocalStream(stream: MediaStream): Promise<void> {
|
|
this.rawMicStream = stream;
|
|
this.logger.info('setLocalStream - noiseReductionDesired =', this._noiseReductionDesired);
|
|
|
|
// Pipe through the denoiser when the user wants noise reduction
|
|
if (this._noiseReductionDesired) {
|
|
this.logger.info('Piping new stream through noise reduction');
|
|
this.localMediaStream = await this.noiseReduction.enable(stream);
|
|
} else {
|
|
this.localMediaStream = stream;
|
|
}
|
|
|
|
// Apply input gain (mic volume) before sending to peers
|
|
this.applyInputGainToCurrentStream();
|
|
|
|
this.bindLocalTracksToAllPeers();
|
|
this.isVoiceActive = true;
|
|
this.voiceConnected$.next();
|
|
}
|
|
|
|
/**
|
|
* Request camera access and bind the resulting video track to peers in the
|
|
* active voice channel. Audio is explicitly disabled for this capture.
|
|
*/
|
|
async enableCamera(): Promise<MediaStream> {
|
|
if (!this.isVoiceActive) {
|
|
throw new Error('Voice must be active before enabling the camera.');
|
|
}
|
|
|
|
try {
|
|
this.stopLocalCameraStream();
|
|
|
|
const mediaConstraints: MediaStreamConstraints = {
|
|
audio: false,
|
|
video: true
|
|
};
|
|
|
|
this.logger.info('getUserMedia camera constraints', mediaConstraints);
|
|
|
|
if (!navigator.mediaDevices?.getUserMedia) {
|
|
throw new Error(
|
|
'navigator.mediaDevices is not available. '
|
|
+ 'This requires a secure context (HTTPS or localhost). '
|
|
+ 'If accessing from an external device, use HTTPS.'
|
|
);
|
|
}
|
|
|
|
const stream = await navigator.mediaDevices.getUserMedia(mediaConstraints);
|
|
const cameraTrack = stream.getVideoTracks()[0];
|
|
|
|
if (!cameraTrack) {
|
|
stream.getTracks().forEach((track) => track.stop());
|
|
throw new Error('Camera capture did not return a video track.');
|
|
}
|
|
|
|
cameraTrack.onended = () => {
|
|
if (this.isCameraActive) {
|
|
this.disableCamera();
|
|
}
|
|
};
|
|
|
|
this.localCameraStream = stream;
|
|
this.isCameraActive = true;
|
|
this.callbacks.setCameraEnabled?.(true);
|
|
|
|
this.logger.attachTrackDiagnostics(cameraTrack, 'localCamera');
|
|
this.logger.logStream('localCamera', stream);
|
|
|
|
this.syncCameraRouting();
|
|
this.broadcastCameraState();
|
|
|
|
return stream;
|
|
} catch (error) {
|
|
this.logger.error('Failed to get camera media', error);
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
/** Stop camera capture and remove camera senders from every peer. */
|
|
disableCamera(): void {
|
|
if (!this.localCameraStream && !this.isCameraActive) {
|
|
return;
|
|
}
|
|
|
|
this.stopLocalCameraStream();
|
|
this.isCameraActive = false;
|
|
this.callbacks.setCameraEnabled?.(false);
|
|
|
|
this.syncCameraRouting();
|
|
this.broadcastCameraState();
|
|
}
|
|
|
|
/**
|
|
* Toggle the local microphone mute state.
|
|
*
|
|
* @param muted - Explicit state; if omitted, the current state is toggled.
|
|
*/
|
|
toggleMute(muted?: boolean): void {
|
|
const newMutedState = muted !== undefined ? muted : !this.isMicMuted;
|
|
|
|
this.isMicMuted = newMutedState;
|
|
this.applyCurrentMuteState();
|
|
}
|
|
|
|
/**
|
|
* Toggle self-deafen (suppress all incoming audio playback).
|
|
*
|
|
* @param deafened - Explicit state; if omitted, the current state is toggled.
|
|
*/
|
|
toggleDeafen(deafened?: boolean): void {
|
|
this.isSelfDeafened = deafened !== undefined ? deafened : !this.isSelfDeafened;
|
|
}
|
|
|
|
/**
|
|
* Toggle RNNoise noise reduction on the local microphone.
|
|
*
|
|
* When enabled the raw mic stream is routed through the RNNoise
|
|
* AudioWorklet and peer senders are updated with the denoised track.
|
|
* When disabled the original raw mic track is restored.
|
|
*
|
|
* @param enabled - Explicit state; if omitted, the current state is toggled.
|
|
*/
|
|
async toggleNoiseReduction(enabled?: boolean): Promise<void> {
|
|
const shouldEnable = enabled !== undefined ? enabled : !this._noiseReductionDesired;
|
|
|
|
// Always persist the preference
|
|
this._noiseReductionDesired = shouldEnable;
|
|
this.logger.info(
|
|
'Noise reduction desired =',
|
|
shouldEnable,
|
|
'| worklet active =',
|
|
this.noiseReduction.isEnabled
|
|
);
|
|
|
|
// Do not update the browser's built-in noiseSuppression constraint on the
|
|
// live mic track here. Chromium may share the underlying capture source,
|
|
// which can leak the constraint change into other active streams. We only
|
|
// apply the browser constraint when the microphone stream is acquired.
|
|
|
|
if (shouldEnable === this.noiseReduction.isEnabled)
|
|
return;
|
|
|
|
if (shouldEnable) {
|
|
if (!this.rawMicStream) {
|
|
this.logger.warn(
|
|
'Cannot enable noise reduction - no mic stream yet (will apply on connect)'
|
|
);
|
|
|
|
return;
|
|
}
|
|
|
|
this.logger.info('Enabling noise reduction on raw mic stream');
|
|
const cleanStream = await this.noiseReduction.enable(this.rawMicStream);
|
|
|
|
this.localMediaStream = cleanStream;
|
|
} else {
|
|
this.noiseReduction.disable();
|
|
|
|
if (this.rawMicStream) {
|
|
this.localMediaStream = this.rawMicStream;
|
|
}
|
|
}
|
|
|
|
// Re-apply input gain to the (possibly new) stream
|
|
this.applyInputGainToCurrentStream();
|
|
|
|
// Propagate the new audio track to every peer connection
|
|
this.bindLocalTracksToAllPeers();
|
|
}
|
|
|
|
/**
|
|
* Set the output volume for remote audio.
|
|
*
|
|
* @param volume - Normalized value: 0 = silent, 1 = 100%, up to 2 = 200%.
|
|
*/
|
|
setOutputVolume(volume: number): void {
|
|
this.remoteAudioVolume = Math.max(VOLUME_MIN, Math.min(2, volume));
|
|
}
|
|
|
|
/**
|
|
* Set the input microphone gain.
|
|
*
|
|
* If a local stream is already active the gain node is updated immediately.
|
|
* Otherwise the value is stored and applied the next time voice starts.
|
|
*
|
|
* @param volume - Normalized 0-1 value.
|
|
*/
|
|
setInputVolume(volume: number): void {
|
|
this.inputGainVolume = Math.max(0, Math.min(1, volume));
|
|
|
|
if (this.inputGainNode) {
|
|
this.inputGainNode.gain.value = this.inputGainVolume;
|
|
return;
|
|
}
|
|
|
|
if (this.localMediaStream) {
|
|
this.applyInputGainToCurrentStream();
|
|
this.bindLocalTracksToAllPeers();
|
|
}
|
|
}
|
|
|
|
/** Return the current input gain value. */
|
|
getInputVolume(): number {
|
|
return this.inputGainVolume;
|
|
}
|
|
|
|
/**
|
|
* Set the maximum audio bitrate on every active peer audio sender.
|
|
*
|
|
* @param kbps - Target bitrate in kilobits per second.
|
|
*/
|
|
async setAudioBitrate(kbps: number): Promise<void> {
|
|
const targetBps = Math.max(
|
|
AUDIO_BITRATE_MIN_BPS,
|
|
Math.min(AUDIO_BITRATE_MAX_BPS, Math.floor(kbps * KBPS_TO_BPS))
|
|
);
|
|
|
|
this.callbacks.getActivePeers().forEach(async (peerData) => {
|
|
const sender = peerData.audioSender
|
|
|| peerData.connection.getSenders().find((candidate) => candidate.track?.kind === TRACK_KIND_AUDIO);
|
|
|
|
if (!sender?.track) {
|
|
return;
|
|
}
|
|
|
|
if (peerData.connection.signalingState !== 'stable') {
|
|
return;
|
|
}
|
|
|
|
let params: RTCRtpSendParameters;
|
|
|
|
try {
|
|
params = sender.getParameters();
|
|
} catch (error) {
|
|
this.logger.warn('getParameters failed; skipping bitrate apply', error);
|
|
return;
|
|
}
|
|
|
|
params.encodings = params.encodings || [{}];
|
|
params.encodings[0].maxBitrate = targetBps;
|
|
|
|
try {
|
|
await sender.setParameters(params);
|
|
this.logger.info('Applied audio bitrate', { targetBps });
|
|
} catch (error) {
|
|
this.logger.warn('Failed to set audio bitrate', error);
|
|
}
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Apply a named latency profile that maps to a predefined bitrate.
|
|
*
|
|
* @param profile - One of `low`, `balanced`, or `high`.
|
|
*/
|
|
async setLatencyProfile(profile: LatencyProfile): Promise<void> {
|
|
await this.setAudioBitrate(LATENCY_PROFILE_BITRATES[profile]);
|
|
}
|
|
|
|
/**
|
|
* Start periodically broadcasting voice presence to all peers.
|
|
*
|
|
* Optionally records the voice room/server so heartbeats include them.
|
|
*
|
|
* @param roomId - The voice channel room ID.
|
|
* @param serverId - The voice channel server ID.
|
|
*/
|
|
startVoiceHeartbeat(roomId?: string, serverId?: string): void {
|
|
this.stopVoiceHeartbeat();
|
|
|
|
// Persist voice channel context so heartbeats and state snapshots include it
|
|
if (roomId !== undefined)
|
|
this.currentVoiceRoomId = roomId;
|
|
|
|
if (serverId !== undefined)
|
|
this.currentVoiceServerId = serverId;
|
|
|
|
this.voicePresenceTimer = setInterval(() => {
|
|
if (this.isVoiceActive) {
|
|
this.broadcastVoicePresence();
|
|
}
|
|
}, VOICE_HEARTBEAT_INTERVAL_MS);
|
|
|
|
// Also send an immediate heartbeat
|
|
if (this.isVoiceActive) {
|
|
this.broadcastVoicePresence();
|
|
}
|
|
}
|
|
|
|
/** Stop the voice-presence heartbeat timer. */
|
|
stopVoiceHeartbeat(): void {
|
|
if (this.voicePresenceTimer) {
|
|
clearInterval(this.voicePresenceTimer);
|
|
this.voicePresenceTimer = null;
|
|
}
|
|
}
|
|
|
|
/** Bind any active local mic/camera tracks to the current peer set. */
|
|
private bindLocalTracksToAllPeers(): void {
|
|
this.syncVoiceRouting();
|
|
this.syncCameraRouting();
|
|
}
|
|
|
|
private syncVoiceRouting(): void {
|
|
const peers = this.callbacks.getActivePeers();
|
|
const localStream = this.localMediaStream;
|
|
const localAudioTrack = localStream?.getAudioTracks()[0] || null;
|
|
|
|
peers.forEach((peerData, peerId) => {
|
|
const didChange = localStream && localAudioTrack && this.allowedVoicePeerIds.has(peerId)
|
|
? this.attachVoiceTrackToPeer(peerId, peerData, localStream, localAudioTrack)
|
|
: this.detachVoiceTrackFromPeer(peerData);
|
|
|
|
if (didChange) {
|
|
void this.callbacks.renegotiate(peerId);
|
|
}
|
|
});
|
|
}
|
|
|
|
private syncCameraRouting(): void {
|
|
const peers = this.callbacks.getActivePeers();
|
|
const localCameraStream = this.localCameraStream;
|
|
const localCameraTrack = localCameraStream?.getVideoTracks()[0] || null;
|
|
|
|
peers.forEach((peerData, peerId) => {
|
|
const didChange = localCameraStream && localCameraTrack && this.allowedVoicePeerIds.has(peerId)
|
|
? this.attachCameraTrackToPeer(peerId, peerData, localCameraStream, localCameraTrack)
|
|
: this.detachCameraTrackFromPeer(peerData, peerId);
|
|
|
|
if (didChange) {
|
|
void this.callbacks.renegotiate(peerId);
|
|
}
|
|
});
|
|
}
|
|
|
|
private attachVoiceTrackToPeer(
|
|
peerId: string,
|
|
peerData: PeerData,
|
|
localStream: MediaStream,
|
|
localAudioTrack: MediaStreamTrack
|
|
): boolean {
|
|
const audioTransceiver = this.getOrCreateReusableTransceiver(peerData, TRACK_KIND_AUDIO, {
|
|
preferredSender: peerData.audioSender,
|
|
excludedSenders: [peerData.screenAudioSender]
|
|
});
|
|
const audioSender = audioTransceiver.sender;
|
|
const needsDirectionRestore = audioTransceiver.direction === TRANSCEIVER_RECV_ONLY
|
|
|| audioTransceiver.direction === TRANSCEIVER_INACTIVE;
|
|
const needsTrackReplace = audioSender.track !== localAudioTrack;
|
|
|
|
peerData.audioSender = audioSender;
|
|
|
|
if (!needsDirectionRestore && !needsTrackReplace) {
|
|
return false;
|
|
}
|
|
|
|
if (needsDirectionRestore) {
|
|
audioTransceiver.direction = TRANSCEIVER_SEND_RECV;
|
|
}
|
|
|
|
if (typeof audioSender.setStreams === 'function') {
|
|
audioSender.setStreams(localStream);
|
|
}
|
|
|
|
if (needsTrackReplace) {
|
|
audioSender
|
|
.replaceTrack(localAudioTrack)
|
|
.then(() => this.logger.info('audio replaceTrack ok', { peerId }))
|
|
.catch((error) => this.logger.error('audio replaceTrack failed', error));
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
private detachVoiceTrackFromPeer(peerData: PeerData): boolean {
|
|
const audioSender = peerData.audioSender
|
|
?? peerData.connection.getSenders().find((sender) => sender !== peerData.screenAudioSender && sender.track?.kind === TRACK_KIND_AUDIO);
|
|
|
|
if (!audioSender?.track) {
|
|
return false;
|
|
}
|
|
|
|
peerData.connection.removeTrack(audioSender);
|
|
return true;
|
|
}
|
|
|
|
private attachCameraTrackToPeer(
|
|
peerId: string,
|
|
peerData: PeerData,
|
|
localStream: MediaStream,
|
|
localCameraTrack: MediaStreamTrack
|
|
): boolean {
|
|
const videoTransceiver = this.getOrCreateReusableTransceiver(peerData, TRACK_KIND_VIDEO, {
|
|
preferredSender: peerData.videoSender,
|
|
excludedSenders: [peerData.screenVideoSender]
|
|
});
|
|
const videoSender = videoTransceiver.sender;
|
|
const needsDirectionRestore = videoTransceiver.direction === TRANSCEIVER_RECV_ONLY
|
|
|| videoTransceiver.direction === TRANSCEIVER_INACTIVE;
|
|
const needsTrackReplace = videoSender.track !== localCameraTrack;
|
|
|
|
peerData.videoSender = videoSender;
|
|
|
|
if (!needsDirectionRestore && !needsTrackReplace) {
|
|
return false;
|
|
}
|
|
|
|
if (needsDirectionRestore) {
|
|
videoTransceiver.direction = TRANSCEIVER_SEND_RECV;
|
|
}
|
|
|
|
if (typeof videoSender.setStreams === 'function') {
|
|
videoSender.setStreams(localStream);
|
|
}
|
|
|
|
if (needsTrackReplace) {
|
|
videoSender
|
|
.replaceTrack(localCameraTrack)
|
|
.then(() => this.logger.info('camera replaceTrack ok', { peerId }))
|
|
.catch((error) => this.logger.error('camera replaceTrack failed', error));
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
private detachCameraTrackFromPeer(peerData: PeerData, peerId: string): boolean {
|
|
const videoSender = peerData.videoSender
|
|
?? peerData.connection.getSenders().find((sender) => sender !== peerData.screenVideoSender && sender.track?.kind === TRACK_KIND_VIDEO);
|
|
const videoTransceiver = videoSender
|
|
? peerData.connection.getTransceivers().find((transceiver) => transceiver.sender === videoSender)
|
|
: undefined;
|
|
|
|
if (!videoTransceiver) {
|
|
return false;
|
|
}
|
|
|
|
peerData.videoSender = videoTransceiver.sender;
|
|
|
|
const hasTrack = !!videoTransceiver.sender.track;
|
|
const needsDirectionReset = videoTransceiver.direction === TRANSCEIVER_SEND_RECV;
|
|
|
|
if (!hasTrack && !needsDirectionReset) {
|
|
return false;
|
|
}
|
|
|
|
if (hasTrack) {
|
|
videoTransceiver.sender.replaceTrack(null)
|
|
.then(() => this.logger.info('camera replaceTrack cleared', { peerId }))
|
|
.catch((error) => this.logger.error('Failed to clear camera sender track', error, { peerId }));
|
|
}
|
|
|
|
if (needsDirectionReset) {
|
|
videoTransceiver.direction = TRANSCEIVER_RECV_ONLY;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
private areSetsEqual(left: Set<string>, right: Set<string>): boolean {
|
|
if (left.size !== right.size) {
|
|
return false;
|
|
}
|
|
|
|
for (const value of left) {
|
|
if (!right.has(value)) {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
private getOrCreateReusableTransceiver(
|
|
peerData: PeerData,
|
|
kind: typeof TRACK_KIND_AUDIO | typeof TRACK_KIND_VIDEO,
|
|
options: {
|
|
preferredSender?: RTCRtpSender;
|
|
excludedSenders?: (RTCRtpSender | undefined)[];
|
|
}
|
|
): RTCRtpTransceiver {
|
|
const excludedSenders = new Set(
|
|
(options.excludedSenders ?? []).filter((sender): sender is RTCRtpSender => !!sender)
|
|
);
|
|
const existingTransceivers = peerData.connection.getTransceivers();
|
|
const preferredTransceiver = options.preferredSender
|
|
? existingTransceivers.find((transceiver) => transceiver.sender === options.preferredSender)
|
|
: null;
|
|
|
|
if (preferredTransceiver) {
|
|
return preferredTransceiver;
|
|
}
|
|
|
|
const attachedSenderTransceiver = existingTransceivers.find((transceiver) =>
|
|
!excludedSenders.has(transceiver.sender)
|
|
&& transceiver.sender.track?.kind === kind
|
|
);
|
|
|
|
if (attachedSenderTransceiver) {
|
|
return attachedSenderTransceiver;
|
|
}
|
|
|
|
const reusableReceiverTransceiver = existingTransceivers.find((transceiver) =>
|
|
!excludedSenders.has(transceiver.sender)
|
|
&& !transceiver.sender.track
|
|
&& transceiver.receiver.track?.kind === kind
|
|
);
|
|
|
|
if (reusableReceiverTransceiver) {
|
|
return reusableReceiverTransceiver;
|
|
}
|
|
|
|
return peerData.connection.addTransceiver(kind, {
|
|
direction: TRANSCEIVER_SEND_RECV
|
|
});
|
|
}
|
|
|
|
/** Broadcast a voice-presence state event to all connected peers. */
|
|
private broadcastVoicePresence(): void {
|
|
const oderId = this.callbacks.getIdentifyOderId();
|
|
const displayName = this.callbacks.getIdentifyDisplayName();
|
|
|
|
this.callbacks.broadcastMessage({
|
|
type: P2P_TYPE_VOICE_STATE,
|
|
oderId,
|
|
displayName,
|
|
voiceState: {
|
|
isConnected: this.isVoiceActive,
|
|
isMuted: this.isMicMuted,
|
|
isDeafened: this.isSelfDeafened,
|
|
roomId: this.currentVoiceRoomId,
|
|
serverId: this.currentVoiceServerId
|
|
}
|
|
});
|
|
}
|
|
|
|
/** Broadcast the local camera state to all connected peers. */
|
|
private broadcastCameraState(): void {
|
|
const oderId = this.callbacks.getIdentifyOderId();
|
|
const displayName = this.callbacks.getIdentifyDisplayName();
|
|
|
|
this.callbacks.broadcastMessage({
|
|
type: P2P_TYPE_CAMERA_STATE,
|
|
oderId,
|
|
displayName,
|
|
isCameraEnabled: this.isCameraActive
|
|
});
|
|
}
|
|
|
|
// -- Input gain helpers --
|
|
|
|
/**
|
|
* Route the current `localMediaStream` through a Web Audio GainNode so
|
|
* the microphone level can be adjusted without renegotiating peers.
|
|
*
|
|
* If a gain pipeline already exists for the same source stream the gain
|
|
* value is simply updated. Otherwise a new pipeline is created.
|
|
*/
|
|
private applyInputGainToCurrentStream(): void {
|
|
const stream = this.localMediaStream;
|
|
|
|
if (!stream)
|
|
return;
|
|
|
|
// If the source stream hasn't changed, just update gain
|
|
if (this.preGainStream === stream && this.inputGainNode && this.inputGainCtx) {
|
|
this.inputGainNode.gain.value = this.inputGainVolume;
|
|
return;
|
|
}
|
|
|
|
// Tear down the old pipeline (if any)
|
|
this.teardownInputGain();
|
|
|
|
// Build new pipeline: source → gain → destination
|
|
this.preGainStream = stream;
|
|
this.inputGainCtx = new AudioContext();
|
|
this.inputGainSourceNode = this.inputGainCtx.createMediaStreamSource(stream);
|
|
this.inputGainNode = this.inputGainCtx.createGain();
|
|
this.inputGainNode.gain.value = this.inputGainVolume;
|
|
this.inputGainDest = this.inputGainCtx.createMediaStreamDestination();
|
|
|
|
this.inputGainSourceNode.connect(this.inputGainNode);
|
|
this.inputGainNode.connect(this.inputGainDest);
|
|
|
|
// Replace localMediaStream with the gained stream
|
|
this.localMediaStream = this.inputGainDest.stream;
|
|
this.applyCurrentMuteState();
|
|
}
|
|
|
|
/** Keep the active outbound track aligned with the stored mute state. */
|
|
private applyCurrentMuteState(): void {
|
|
if (!this.localMediaStream)
|
|
return;
|
|
|
|
const enabled = !this.isMicMuted;
|
|
|
|
this.localMediaStream.getAudioTracks().forEach((track) => {
|
|
track.enabled = enabled;
|
|
});
|
|
}
|
|
|
|
/** Disconnect and close the input-gain AudioContext. */
|
|
private teardownInputGain(): void {
|
|
try {
|
|
this.inputGainSourceNode?.disconnect();
|
|
this.inputGainNode?.disconnect();
|
|
} catch (error) {
|
|
this.logger.warn('Input gain nodes were already disconnected during teardown', error);
|
|
}
|
|
|
|
if (this.inputGainCtx && this.inputGainCtx.state !== 'closed') {
|
|
this.inputGainCtx.close().catch((error) => {
|
|
this.logger.warn('Failed to close input gain audio context', error);
|
|
});
|
|
}
|
|
|
|
this.inputGainCtx = null;
|
|
this.inputGainSourceNode = null;
|
|
this.inputGainNode = null;
|
|
this.inputGainDest = null;
|
|
this.preGainStream = null;
|
|
}
|
|
|
|
private stopLocalCameraStream(): void {
|
|
if (!this.localCameraStream) {
|
|
return;
|
|
}
|
|
|
|
this.localCameraStream.getTracks().forEach((track) => {
|
|
if (track.kind === TRACK_KIND_VIDEO) {
|
|
track.onended = null;
|
|
}
|
|
|
|
track.stop();
|
|
});
|
|
|
|
this.localCameraStream = null;
|
|
}
|
|
|
|
/** Clean up all resources. */
|
|
destroy(): void {
|
|
this.teardownInputGain();
|
|
this.disableVoice();
|
|
this.stopVoiceHeartbeat();
|
|
this.noiseReduction.destroy();
|
|
this.voiceConnected$.complete();
|
|
}
|
|
}
|