698 lines
22 KiB
TypeScript
698 lines
22 KiB
TypeScript
/* eslint-disable @typescript-eslint/member-ordering, @typescript-eslint/no-unused-vars,, id-length */
|
|
/**
|
|
* Manages local voice media: getUserMedia, mute, deafen,
|
|
* attaching/detaching audio tracks to peer connections, bitrate tuning,
|
|
* and optional RNNoise-based noise reduction.
|
|
*/
|
|
import { Subject } from 'rxjs';
|
|
import { ChatEvent } from '../../../shared-kernel';
|
|
import { LatencyProfile } from '../realtime.constants';
|
|
import { PeerData } from '../realtime.types';
|
|
import { WebRTCLogger } from '../logging/webrtc-logger';
|
|
import { NoiseReductionManager } from './noise-reduction.manager';
|
|
import {
|
|
TRACK_KIND_AUDIO,
|
|
TRACK_KIND_VIDEO,
|
|
TRANSCEIVER_SEND_RECV,
|
|
TRANSCEIVER_RECV_ONLY,
|
|
TRANSCEIVER_INACTIVE,
|
|
AUDIO_BITRATE_MIN_BPS,
|
|
AUDIO_BITRATE_MAX_BPS,
|
|
KBPS_TO_BPS,
|
|
LATENCY_PROFILE_BITRATES,
|
|
VOLUME_MIN,
|
|
VOLUME_MAX,
|
|
VOICE_HEARTBEAT_INTERVAL_MS,
|
|
DEFAULT_DISPLAY_NAME,
|
|
P2P_TYPE_VOICE_STATE
|
|
} from '../realtime.constants';
|
|
|
|
/**
|
|
* Callbacks the MediaManager needs from the owning service / peer manager.
|
|
*/
|
|
export interface MediaManagerCallbacks {
|
|
/** All active peer connections (for attaching tracks). */
|
|
getActivePeers(): Map<string, PeerData>;
|
|
/** Trigger SDP renegotiation for a specific peer. */
|
|
renegotiate(peerId: string): Promise<void>;
|
|
/** Broadcast a message to all peers. */
|
|
broadcastMessage(event: ChatEvent): void;
|
|
/** Get identify credentials (for broadcasting). */
|
|
getIdentifyOderId(): string;
|
|
getIdentifyDisplayName(): string;
|
|
}
|
|
|
|
export class MediaManager {
|
|
/** The stream sent to peers (may be raw or denoised). */
|
|
private localMediaStream: MediaStream | null = null;
|
|
|
|
/**
|
|
* The raw microphone stream from `getUserMedia`.
|
|
* Kept separately so noise reduction can be toggled
|
|
* without re-acquiring the mic.
|
|
*/
|
|
private rawMicStream: MediaStream | null = null;
|
|
|
|
/** Remote audio output volume (0-1). */
|
|
private remoteAudioVolume = VOLUME_MAX;
|
|
|
|
// -- Input gain pipeline (mic volume) --
|
|
/** The stream BEFORE gain is applied (for identity checks). */
|
|
private preGainStream: MediaStream | null = null;
|
|
private inputGainCtx: AudioContext | null = null;
|
|
private inputGainSourceNode: MediaStreamAudioSourceNode | null = null;
|
|
private inputGainNode: GainNode | null = null;
|
|
private inputGainDest: MediaStreamAudioDestinationNode | null = null;
|
|
/** Normalised 0-1 input gain (1 = 100%). */
|
|
private inputGainVolume = 1.0;
|
|
|
|
/** Voice-presence heartbeat timer. */
|
|
private voicePresenceTimer: ReturnType<typeof setInterval> | null = null;
|
|
|
|
/** Emitted when voice is successfully connected. */
|
|
readonly voiceConnected$ = new Subject<void>();
|
|
|
|
/** RNNoise noise-reduction processor. */
|
|
private readonly noiseReduction: NoiseReductionManager;
|
|
|
|
/**
|
|
* Tracks the user's *desired* noise-reduction state, independent of
|
|
* whether the worklet is actually running. This lets us honour the
|
|
* preference even when it is set before the mic stream is acquired.
|
|
*/
|
|
private _noiseReductionDesired = true;
|
|
|
|
// State tracked locally (the service exposes these via signals)
|
|
private isVoiceActive = false;
|
|
private isMicMuted = false;
|
|
private isSelfDeafened = false;
|
|
|
|
/** Current voice channel room ID (set when joining voice). */
|
|
private currentVoiceRoomId: string | undefined;
|
|
/** Current voice channel server ID (set when joining voice). */
|
|
private currentVoiceServerId: string | undefined;
|
|
|
|
constructor(
|
|
private readonly logger: WebRTCLogger,
|
|
private callbacks: MediaManagerCallbacks
|
|
) {
|
|
this.noiseReduction = new NoiseReductionManager(logger);
|
|
}
|
|
|
|
/**
|
|
* Replace the callback set at runtime.
|
|
* Needed because of circular initialisation between managers.
|
|
*
|
|
* @param nextCallbacks - The new callback interface to wire into this manager.
|
|
*/
|
|
setCallbacks(nextCallbacks: MediaManagerCallbacks): void {
|
|
this.callbacks = nextCallbacks;
|
|
}
|
|
|
|
/** Returns the current local media stream, or `null` if voice is disabled. */
|
|
getLocalStream(): MediaStream | null {
|
|
return this.localMediaStream;
|
|
}
|
|
/** Returns the raw microphone stream before processing, if available. */
|
|
getRawMicStream(): MediaStream | null {
|
|
return this.rawMicStream;
|
|
}
|
|
/** Whether voice is currently active (mic captured). */
|
|
getIsVoiceActive(): boolean {
|
|
return this.isVoiceActive;
|
|
}
|
|
/** Whether the local microphone is muted. */
|
|
getIsMicMuted(): boolean {
|
|
return this.isMicMuted;
|
|
}
|
|
/** Whether the user has self-deafened. */
|
|
getIsSelfDeafened(): boolean {
|
|
return this.isSelfDeafened;
|
|
}
|
|
/** Current remote audio output volume (normalised 0-1). */
|
|
getRemoteAudioVolume(): number {
|
|
return this.remoteAudioVolume;
|
|
}
|
|
/** The voice channel room ID, if currently in voice. */
|
|
getCurrentVoiceRoomId(): string | undefined {
|
|
return this.currentVoiceRoomId;
|
|
}
|
|
/** The voice channel server ID, if currently in voice. */
|
|
getCurrentVoiceServerId(): string | undefined {
|
|
return this.currentVoiceServerId;
|
|
}
|
|
/** Whether the user wants noise reduction (may or may not be running yet). */
|
|
getIsNoiseReductionEnabled(): boolean {
|
|
return this._noiseReductionDesired;
|
|
}
|
|
|
|
/**
|
|
* Request microphone access via `getUserMedia` and bind the resulting
|
|
* audio track to every active peer connection.
|
|
*
|
|
* If a local stream already exists it is stopped first.
|
|
*
|
|
* @returns The captured {@link MediaStream}.
|
|
* @throws If `getUserMedia` is unavailable (non-secure context) or the user denies access.
|
|
*/
|
|
async enableVoice(): Promise<MediaStream> {
|
|
try {
|
|
// Stop any existing stream first
|
|
if (this.localMediaStream) {
|
|
this.logger.info('Stopping existing local stream before enabling voice');
|
|
this.localMediaStream.getTracks().forEach((track) => track.stop());
|
|
this.localMediaStream = null;
|
|
}
|
|
|
|
const mediaConstraints: MediaStreamConstraints = {
|
|
audio: {
|
|
echoCancellation: true,
|
|
noiseSuppression: !this._noiseReductionDesired,
|
|
autoGainControl: true
|
|
},
|
|
video: false
|
|
};
|
|
|
|
this.logger.info('getUserMedia constraints', mediaConstraints);
|
|
|
|
if (!navigator.mediaDevices?.getUserMedia) {
|
|
throw new Error(
|
|
'navigator.mediaDevices is not available. ' +
|
|
'This requires a secure context (HTTPS or localhost). ' +
|
|
'If accessing from an external device, use HTTPS.'
|
|
);
|
|
}
|
|
|
|
const stream = await navigator.mediaDevices.getUserMedia(mediaConstraints);
|
|
|
|
this.rawMicStream = stream;
|
|
|
|
// If the user wants noise reduction, pipe through the denoiser
|
|
this.localMediaStream = this._noiseReductionDesired
|
|
? await this.noiseReduction.enable(stream)
|
|
: stream;
|
|
|
|
// Apply input gain (mic volume) before sending to peers
|
|
this.applyInputGainToCurrentStream();
|
|
|
|
this.logger.logStream('localVoice', this.localMediaStream);
|
|
|
|
this.bindLocalTracksToAllPeers();
|
|
|
|
this.isVoiceActive = true;
|
|
this.voiceConnected$.next();
|
|
return this.localMediaStream;
|
|
} catch (error) {
|
|
this.logger.error('Failed to getUserMedia', error);
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Stop all local media tracks and remove audio senders from peers.
|
|
* The peer connections themselves are kept alive.
|
|
*/
|
|
disableVoice(): void {
|
|
this.noiseReduction.disable();
|
|
this.teardownInputGain();
|
|
|
|
// Stop the raw mic tracks (the denoised stream's tracks are
|
|
// derived nodes and will stop once their source is gone).
|
|
if (this.rawMicStream) {
|
|
this.rawMicStream.getTracks().forEach((track) => track.stop());
|
|
this.rawMicStream = null;
|
|
}
|
|
|
|
this.localMediaStream = null;
|
|
|
|
// Remove audio senders but keep connections alive
|
|
this.callbacks.getActivePeers().forEach((peerData) => {
|
|
const senders = peerData.connection.getSenders();
|
|
|
|
senders.forEach((sender) => {
|
|
if (sender.track?.kind === TRACK_KIND_AUDIO) {
|
|
peerData.connection.removeTrack(sender);
|
|
}
|
|
});
|
|
});
|
|
|
|
this.isVoiceActive = false;
|
|
this.currentVoiceRoomId = undefined;
|
|
this.currentVoiceServerId = undefined;
|
|
}
|
|
|
|
/**
|
|
* Set the local stream from an external source (e.g. voice-controls component).
|
|
*
|
|
* The raw stream is saved so noise reduction can be toggled on/off later.
|
|
* If noise reduction is already enabled the stream is piped through the
|
|
* denoiser before being sent to peers.
|
|
*/
|
|
async setLocalStream(stream: MediaStream): Promise<void> {
|
|
this.rawMicStream = stream;
|
|
this.logger.info('setLocalStream - noiseReductionDesired =', this._noiseReductionDesired);
|
|
|
|
// Pipe through the denoiser when the user wants noise reduction
|
|
if (this._noiseReductionDesired) {
|
|
this.logger.info('Piping new stream through noise reduction');
|
|
this.localMediaStream = await this.noiseReduction.enable(stream);
|
|
} else {
|
|
this.localMediaStream = stream;
|
|
}
|
|
|
|
// Apply input gain (mic volume) before sending to peers
|
|
this.applyInputGainToCurrentStream();
|
|
|
|
this.bindLocalTracksToAllPeers();
|
|
this.isVoiceActive = true;
|
|
this.voiceConnected$.next();
|
|
}
|
|
|
|
/**
|
|
* Toggle the local microphone mute state.
|
|
*
|
|
* @param muted - Explicit state; if omitted, the current state is toggled.
|
|
*/
|
|
toggleMute(muted?: boolean): void {
|
|
const newMutedState = muted !== undefined ? muted : !this.isMicMuted;
|
|
|
|
this.isMicMuted = newMutedState;
|
|
this.applyCurrentMuteState();
|
|
}
|
|
|
|
/**
|
|
* Toggle self-deafen (suppress all incoming audio playback).
|
|
*
|
|
* @param deafened - Explicit state; if omitted, the current state is toggled.
|
|
*/
|
|
toggleDeafen(deafened?: boolean): void {
|
|
this.isSelfDeafened = deafened !== undefined ? deafened : !this.isSelfDeafened;
|
|
}
|
|
|
|
/**
|
|
* Toggle RNNoise noise reduction on the local microphone.
|
|
*
|
|
* When enabled the raw mic stream is routed through the RNNoise
|
|
* AudioWorklet and peer senders are updated with the denoised track.
|
|
* When disabled the original raw mic track is restored.
|
|
*
|
|
* @param enabled - Explicit state; if omitted, the current state is toggled.
|
|
*/
|
|
async toggleNoiseReduction(enabled?: boolean): Promise<void> {
|
|
const shouldEnable = enabled !== undefined ? enabled : !this._noiseReductionDesired;
|
|
|
|
// Always persist the preference
|
|
this._noiseReductionDesired = shouldEnable;
|
|
this.logger.info(
|
|
'Noise reduction desired =',
|
|
shouldEnable,
|
|
'| worklet active =',
|
|
this.noiseReduction.isEnabled
|
|
);
|
|
|
|
// Do not update the browser's built-in noiseSuppression constraint on the
|
|
// live mic track here. Chromium may share the underlying capture source,
|
|
// which can leak the constraint change into other active streams. We only
|
|
// apply the browser constraint when the microphone stream is acquired.
|
|
|
|
if (shouldEnable === this.noiseReduction.isEnabled)
|
|
return;
|
|
|
|
if (shouldEnable) {
|
|
if (!this.rawMicStream) {
|
|
this.logger.warn(
|
|
'Cannot enable noise reduction - no mic stream yet (will apply on connect)'
|
|
);
|
|
|
|
return;
|
|
}
|
|
|
|
this.logger.info('Enabling noise reduction on raw mic stream');
|
|
const cleanStream = await this.noiseReduction.enable(this.rawMicStream);
|
|
|
|
this.localMediaStream = cleanStream;
|
|
} else {
|
|
this.noiseReduction.disable();
|
|
|
|
if (this.rawMicStream) {
|
|
this.localMediaStream = this.rawMicStream;
|
|
}
|
|
}
|
|
|
|
// Re-apply input gain to the (possibly new) stream
|
|
this.applyInputGainToCurrentStream();
|
|
|
|
// Propagate the new audio track to every peer connection
|
|
this.bindLocalTracksToAllPeers();
|
|
}
|
|
|
|
/**
|
|
* Set the output volume for remote audio.
|
|
*
|
|
* @param volume - Normalised value: 0 = silent, 1 = 100%, up to 2 = 200%.
|
|
*/
|
|
setOutputVolume(volume: number): void {
|
|
this.remoteAudioVolume = Math.max(VOLUME_MIN, Math.min(2, volume));
|
|
}
|
|
|
|
/**
|
|
* Set the input (microphone) volume.
|
|
*
|
|
* If a local stream is active the gain node is updated in real time.
|
|
* If no stream exists yet the value is stored and applied on connect.
|
|
*
|
|
* @param volume - Normalised 0-1 (0 = silent, 1 = 100%).
|
|
*/
|
|
setInputVolume(volume: number): void {
|
|
this.inputGainVolume = Math.max(0, Math.min(1, volume));
|
|
|
|
if (this.inputGainNode) {
|
|
// Pipeline already exists - just update the gain value
|
|
this.inputGainNode.gain.value = this.inputGainVolume;
|
|
} else if (this.localMediaStream) {
|
|
// Stream is active but gain pipeline hasn't been created yet
|
|
this.applyInputGainToCurrentStream();
|
|
this.bindLocalTracksToAllPeers();
|
|
}
|
|
}
|
|
|
|
/** Get current input gain value (0-1). */
|
|
getInputVolume(): number {
|
|
return this.inputGainVolume;
|
|
}
|
|
|
|
/**
|
|
* Set the maximum audio bitrate on every active peer's audio sender.
|
|
*
|
|
* The value is clamped between {@link AUDIO_BITRATE_MIN_BPS} and
|
|
* {@link AUDIO_BITRATE_MAX_BPS}.
|
|
*
|
|
* @param kbps - Target bitrate in kilobits per second.
|
|
*/
|
|
async setAudioBitrate(kbps: number): Promise<void> {
|
|
const targetBps = Math.max(
|
|
AUDIO_BITRATE_MIN_BPS,
|
|
Math.min(AUDIO_BITRATE_MAX_BPS, Math.floor(kbps * KBPS_TO_BPS))
|
|
);
|
|
|
|
this.callbacks.getActivePeers().forEach(async (peerData) => {
|
|
const sender =
|
|
peerData.audioSender ||
|
|
peerData.connection.getSenders().find((s) => s.track?.kind === TRACK_KIND_AUDIO);
|
|
|
|
if (!sender?.track)
|
|
return;
|
|
|
|
if (peerData.connection.signalingState !== 'stable')
|
|
return;
|
|
|
|
let params: RTCRtpSendParameters;
|
|
|
|
try {
|
|
params = sender.getParameters();
|
|
} catch (error) {
|
|
this.logger.warn('getParameters failed; skipping bitrate apply', error);
|
|
return;
|
|
}
|
|
|
|
params.encodings = params.encodings || [{}];
|
|
params.encodings[0].maxBitrate = targetBps;
|
|
|
|
try {
|
|
await sender.setParameters(params);
|
|
this.logger.info('Applied audio bitrate', { targetBps });
|
|
} catch (error) {
|
|
this.logger.warn('Failed to set audio bitrate', error);
|
|
}
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Apply a named latency profile that maps to a predefined bitrate.
|
|
*
|
|
* @param profile - One of `'low'`, `'balanced'`, or `'high'`.
|
|
*/
|
|
async setLatencyProfile(profile: LatencyProfile): Promise<void> {
|
|
await this.setAudioBitrate(LATENCY_PROFILE_BITRATES[profile]);
|
|
}
|
|
|
|
/**
|
|
* Start periodically broadcasting voice presence to all peers.
|
|
*
|
|
* Optionally records the voice room/server so heartbeats include them.
|
|
*
|
|
* @param roomId - The voice channel room ID.
|
|
* @param serverId - The voice channel server ID.
|
|
*/
|
|
startVoiceHeartbeat(roomId?: string, serverId?: string): void {
|
|
this.stopVoiceHeartbeat();
|
|
|
|
// Persist voice channel context so heartbeats and state snapshots include it
|
|
if (roomId !== undefined)
|
|
this.currentVoiceRoomId = roomId;
|
|
|
|
if (serverId !== undefined)
|
|
this.currentVoiceServerId = serverId;
|
|
|
|
this.voicePresenceTimer = setInterval(() => {
|
|
if (this.isVoiceActive) {
|
|
this.broadcastVoicePresence();
|
|
}
|
|
}, VOICE_HEARTBEAT_INTERVAL_MS);
|
|
|
|
// Also send an immediate heartbeat
|
|
if (this.isVoiceActive) {
|
|
this.broadcastVoicePresence();
|
|
}
|
|
}
|
|
|
|
/** Stop the voice-presence heartbeat timer. */
|
|
stopVoiceHeartbeat(): void {
|
|
if (this.voicePresenceTimer) {
|
|
clearInterval(this.voicePresenceTimer);
|
|
this.voicePresenceTimer = null;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Bind local audio/video tracks to all existing peer transceivers.
|
|
* Restores transceiver direction to sendrecv if previously set to recvonly
|
|
* (which happens when disableVoice calls removeTrack).
|
|
*/
|
|
private bindLocalTracksToAllPeers(): void {
|
|
const peers = this.callbacks.getActivePeers();
|
|
|
|
if (!this.localMediaStream)
|
|
return;
|
|
|
|
const localStream = this.localMediaStream;
|
|
const localAudioTrack = localStream.getAudioTracks()[0] || null;
|
|
const localVideoTrack = localStream.getVideoTracks()[0] || null;
|
|
|
|
peers.forEach((peerData, peerId) => {
|
|
if (localAudioTrack) {
|
|
const audioTransceiver = this.getOrCreateReusableTransceiver(peerData, TRACK_KIND_AUDIO, {
|
|
preferredSender: peerData.audioSender,
|
|
excludedSenders: [peerData.screenAudioSender]
|
|
});
|
|
const audioSender = audioTransceiver.sender;
|
|
|
|
peerData.audioSender = audioSender;
|
|
|
|
// Restore direction after removeTrack (which sets it to recvonly)
|
|
if (
|
|
audioTransceiver &&
|
|
(audioTransceiver.direction === TRANSCEIVER_RECV_ONLY ||
|
|
audioTransceiver.direction === TRANSCEIVER_INACTIVE)
|
|
) {
|
|
audioTransceiver.direction = TRANSCEIVER_SEND_RECV;
|
|
}
|
|
|
|
if (typeof audioSender.setStreams === 'function') {
|
|
audioSender.setStreams(localStream);
|
|
}
|
|
|
|
audioSender
|
|
.replaceTrack(localAudioTrack)
|
|
.then(() => this.logger.info('audio replaceTrack ok', { peerId }))
|
|
.catch((error) => this.logger.error('audio replaceTrack failed', error));
|
|
}
|
|
|
|
if (localVideoTrack) {
|
|
const videoTransceiver = this.getOrCreateReusableTransceiver(peerData, TRACK_KIND_VIDEO, {
|
|
preferredSender: peerData.videoSender,
|
|
excludedSenders: [peerData.screenVideoSender]
|
|
});
|
|
const videoSender = videoTransceiver.sender;
|
|
|
|
peerData.videoSender = videoSender;
|
|
|
|
if (
|
|
videoTransceiver &&
|
|
(videoTransceiver.direction === TRANSCEIVER_RECV_ONLY ||
|
|
videoTransceiver.direction === TRANSCEIVER_INACTIVE)
|
|
) {
|
|
videoTransceiver.direction = TRANSCEIVER_SEND_RECV;
|
|
}
|
|
|
|
if (typeof videoSender.setStreams === 'function') {
|
|
videoSender.setStreams(localStream);
|
|
}
|
|
|
|
videoSender
|
|
.replaceTrack(localVideoTrack)
|
|
.then(() => this.logger.info('video replaceTrack ok', { peerId }))
|
|
.catch((error) => this.logger.error('video replaceTrack failed', error));
|
|
}
|
|
|
|
this.callbacks.renegotiate(peerId);
|
|
});
|
|
}
|
|
|
|
private getOrCreateReusableTransceiver(
|
|
peerData: PeerData,
|
|
kind: typeof TRACK_KIND_AUDIO | typeof TRACK_KIND_VIDEO,
|
|
options: {
|
|
preferredSender?: RTCRtpSender;
|
|
excludedSenders?: (RTCRtpSender | undefined)[];
|
|
}
|
|
): RTCRtpTransceiver {
|
|
const excludedSenders = new Set(
|
|
(options.excludedSenders ?? []).filter((sender): sender is RTCRtpSender => !!sender)
|
|
);
|
|
const existingTransceivers = peerData.connection.getTransceivers();
|
|
const preferredTransceiver = options.preferredSender
|
|
? existingTransceivers.find((transceiver) => transceiver.sender === options.preferredSender)
|
|
: null;
|
|
|
|
if (preferredTransceiver) {
|
|
return preferredTransceiver;
|
|
}
|
|
|
|
const attachedSenderTransceiver = existingTransceivers.find((transceiver) =>
|
|
!excludedSenders.has(transceiver.sender)
|
|
&& transceiver.sender.track?.kind === kind
|
|
);
|
|
|
|
if (attachedSenderTransceiver) {
|
|
return attachedSenderTransceiver;
|
|
}
|
|
|
|
const reusableReceiverTransceiver = existingTransceivers.find((transceiver) =>
|
|
!excludedSenders.has(transceiver.sender)
|
|
&& !transceiver.sender.track
|
|
&& transceiver.receiver.track?.kind === kind
|
|
);
|
|
|
|
if (reusableReceiverTransceiver) {
|
|
return reusableReceiverTransceiver;
|
|
}
|
|
|
|
return peerData.connection.addTransceiver(kind, {
|
|
direction: TRANSCEIVER_SEND_RECV
|
|
});
|
|
}
|
|
|
|
/** Broadcast a voice-presence state event to all connected peers. */
|
|
private broadcastVoicePresence(): void {
|
|
const oderId = this.callbacks.getIdentifyOderId();
|
|
const displayName = this.callbacks.getIdentifyDisplayName();
|
|
|
|
this.callbacks.broadcastMessage({
|
|
type: P2P_TYPE_VOICE_STATE,
|
|
oderId,
|
|
displayName,
|
|
voiceState: {
|
|
isConnected: this.isVoiceActive,
|
|
isMuted: this.isMicMuted,
|
|
isDeafened: this.isSelfDeafened,
|
|
roomId: this.currentVoiceRoomId,
|
|
serverId: this.currentVoiceServerId
|
|
}
|
|
});
|
|
}
|
|
|
|
// -- Input gain helpers --
|
|
|
|
/**
|
|
* Route the current `localMediaStream` through a Web Audio GainNode so
|
|
* the microphone level can be adjusted without renegotiating peers.
|
|
*
|
|
* If a gain pipeline already exists for the same source stream the gain
|
|
* value is simply updated. Otherwise a new pipeline is created.
|
|
*/
|
|
private applyInputGainToCurrentStream(): void {
|
|
const stream = this.localMediaStream;
|
|
|
|
if (!stream)
|
|
return;
|
|
|
|
// If the source stream hasn't changed, just update gain
|
|
if (this.preGainStream === stream && this.inputGainNode && this.inputGainCtx) {
|
|
this.inputGainNode.gain.value = this.inputGainVolume;
|
|
return;
|
|
}
|
|
|
|
// Tear down the old pipeline (if any)
|
|
this.teardownInputGain();
|
|
|
|
// Build new pipeline: source → gain → destination
|
|
this.preGainStream = stream;
|
|
this.inputGainCtx = new AudioContext();
|
|
this.inputGainSourceNode = this.inputGainCtx.createMediaStreamSource(stream);
|
|
this.inputGainNode = this.inputGainCtx.createGain();
|
|
this.inputGainNode.gain.value = this.inputGainVolume;
|
|
this.inputGainDest = this.inputGainCtx.createMediaStreamDestination();
|
|
|
|
this.inputGainSourceNode.connect(this.inputGainNode);
|
|
this.inputGainNode.connect(this.inputGainDest);
|
|
|
|
// Replace localMediaStream with the gained stream
|
|
this.localMediaStream = this.inputGainDest.stream;
|
|
this.applyCurrentMuteState();
|
|
}
|
|
|
|
/** Keep the active outbound track aligned with the stored mute state. */
|
|
private applyCurrentMuteState(): void {
|
|
if (!this.localMediaStream)
|
|
return;
|
|
|
|
const enabled = !this.isMicMuted;
|
|
|
|
this.localMediaStream.getAudioTracks().forEach((track) => {
|
|
track.enabled = enabled;
|
|
});
|
|
}
|
|
|
|
/** Disconnect and close the input-gain AudioContext. */
|
|
private teardownInputGain(): void {
|
|
try {
|
|
this.inputGainSourceNode?.disconnect();
|
|
this.inputGainNode?.disconnect();
|
|
} catch (error) {
|
|
this.logger.warn('Input gain nodes were already disconnected during teardown', error);
|
|
}
|
|
|
|
if (this.inputGainCtx && this.inputGainCtx.state !== 'closed') {
|
|
this.inputGainCtx.close().catch((error) => {
|
|
this.logger.warn('Failed to close input gain audio context', error);
|
|
});
|
|
}
|
|
|
|
this.inputGainCtx = null;
|
|
this.inputGainSourceNode = null;
|
|
this.inputGainNode = null;
|
|
this.inputGainDest = null;
|
|
this.preGainStream = null;
|
|
}
|
|
|
|
/** Clean up all resources. */
|
|
destroy(): void {
|
|
this.teardownInputGain();
|
|
this.disableVoice();
|
|
this.stopVoiceHeartbeat();
|
|
this.noiseReduction.destroy();
|
|
this.voiceConnected$.complete();
|
|
}
|
|
}
|