536 lines
17 KiB
TypeScript
536 lines
17 KiB
TypeScript
/* eslint-disable @typescript-eslint/member-ordering, @typescript-eslint/no-unused-vars, @typescript-eslint/no-explicit-any, id-length */
|
|
/**
|
|
* Manages local voice media: getUserMedia, mute, deafen,
|
|
* attaching/detaching audio tracks to peer connections, bitrate tuning,
|
|
* and optional RNNoise-based noise reduction.
|
|
*/
|
|
import { Subject } from 'rxjs';
|
|
import { WebRTCLogger } from './webrtc-logger';
|
|
import { PeerData } from './webrtc.types';
|
|
import { NoiseReductionManager } from './noise-reduction.manager';
|
|
import {
|
|
TRACK_KIND_AUDIO,
|
|
TRACK_KIND_VIDEO,
|
|
TRANSCEIVER_SEND_RECV,
|
|
TRANSCEIVER_RECV_ONLY,
|
|
TRANSCEIVER_INACTIVE,
|
|
AUDIO_BITRATE_MIN_BPS,
|
|
AUDIO_BITRATE_MAX_BPS,
|
|
KBPS_TO_BPS,
|
|
LATENCY_PROFILE_BITRATES,
|
|
VOLUME_MIN,
|
|
VOLUME_MAX,
|
|
VOICE_HEARTBEAT_INTERVAL_MS,
|
|
DEFAULT_DISPLAY_NAME,
|
|
P2P_TYPE_VOICE_STATE,
|
|
LatencyProfile
|
|
} from './webrtc.constants';
|
|
|
|
/**
|
|
* Callbacks the MediaManager needs from the owning service / peer manager.
|
|
*/
|
|
export interface MediaManagerCallbacks {
|
|
/** All active peer connections (for attaching tracks). */
|
|
getActivePeers(): Map<string, PeerData>;
|
|
/** Trigger SDP renegotiation for a specific peer. */
|
|
renegotiate(peerId: string): Promise<void>;
|
|
/** Broadcast a message to all peers. */
|
|
broadcastMessage(event: any): void;
|
|
/** Get identify credentials (for broadcasting). */
|
|
getIdentifyOderId(): string;
|
|
getIdentifyDisplayName(): string;
|
|
}
|
|
|
|
export class MediaManager {
|
|
/** The stream sent to peers (may be raw or denoised). */
|
|
private localMediaStream: MediaStream | null = null;
|
|
|
|
/**
|
|
* The raw microphone stream from `getUserMedia`.
|
|
* Kept separately so noise reduction can be toggled
|
|
* without re-acquiring the mic.
|
|
*/
|
|
private rawMicStream: MediaStream | null = null;
|
|
|
|
/** Remote audio output volume (0-1). */
|
|
private remoteAudioVolume = VOLUME_MAX;
|
|
|
|
/** Voice-presence heartbeat timer. */
|
|
private voicePresenceTimer: ReturnType<typeof setInterval> | null = null;
|
|
|
|
/** Emitted when voice is successfully connected. */
|
|
readonly voiceConnected$ = new Subject<void>();
|
|
|
|
/** RNNoise noise-reduction processor. */
|
|
private readonly noiseReduction: NoiseReductionManager;
|
|
|
|
/**
|
|
* Tracks the user's *desired* noise-reduction state, independent of
|
|
* whether the worklet is actually running. This lets us honour the
|
|
* preference even when it is set before the mic stream is acquired.
|
|
*/
|
|
private _noiseReductionDesired = false;
|
|
|
|
// State tracked locally (the service exposes these via signals)
|
|
private isVoiceActive = false;
|
|
private isMicMuted = false;
|
|
private isSelfDeafened = false;
|
|
|
|
/** Current voice channel room ID (set when joining voice). */
|
|
private currentVoiceRoomId: string | undefined;
|
|
/** Current voice channel server ID (set when joining voice). */
|
|
private currentVoiceServerId: string | undefined;
|
|
|
|
constructor(
|
|
private readonly logger: WebRTCLogger,
|
|
private callbacks: MediaManagerCallbacks
|
|
) {
|
|
this.noiseReduction = new NoiseReductionManager(logger);
|
|
}
|
|
|
|
/**
|
|
* Replace the callback set at runtime.
|
|
* Needed because of circular initialisation between managers.
|
|
*
|
|
* @param cb - The new callback interface to wire into this manager.
|
|
*/
|
|
setCallbacks(cb: MediaManagerCallbacks): void {
|
|
this.callbacks = cb;
|
|
}
|
|
|
|
/** Returns the current local media stream, or `null` if voice is disabled. */
|
|
getLocalStream(): MediaStream | null {
|
|
return this.localMediaStream;
|
|
}
|
|
/** Whether voice is currently active (mic captured). */
|
|
getIsVoiceActive(): boolean {
|
|
return this.isVoiceActive;
|
|
}
|
|
/** Whether the local microphone is muted. */
|
|
getIsMicMuted(): boolean {
|
|
return this.isMicMuted;
|
|
}
|
|
/** Whether the user has self-deafened. */
|
|
getIsSelfDeafened(): boolean {
|
|
return this.isSelfDeafened;
|
|
}
|
|
/** Current remote audio output volume (normalised 0-1). */
|
|
getRemoteAudioVolume(): number {
|
|
return this.remoteAudioVolume;
|
|
}
|
|
/** The voice channel room ID, if currently in voice. */
|
|
getCurrentVoiceRoomId(): string | undefined {
|
|
return this.currentVoiceRoomId;
|
|
}
|
|
/** The voice channel server ID, if currently in voice. */
|
|
getCurrentVoiceServerId(): string | undefined {
|
|
return this.currentVoiceServerId;
|
|
}
|
|
/** Whether the user wants noise reduction (may or may not be running yet). */
|
|
getIsNoiseReductionEnabled(): boolean {
|
|
return this._noiseReductionDesired;
|
|
}
|
|
|
|
/**
|
|
* Request microphone access via `getUserMedia` and bind the resulting
|
|
* audio track to every active peer connection.
|
|
*
|
|
* If a local stream already exists it is stopped first.
|
|
*
|
|
* @returns The captured {@link MediaStream}.
|
|
* @throws If `getUserMedia` is unavailable (non-secure context) or the user denies access.
|
|
*/
|
|
async enableVoice(): Promise<MediaStream> {
|
|
try {
|
|
// Stop any existing stream first
|
|
if (this.localMediaStream) {
|
|
this.logger.info('Stopping existing local stream before enabling voice');
|
|
this.localMediaStream.getTracks().forEach((track) => track.stop());
|
|
this.localMediaStream = null;
|
|
}
|
|
|
|
const mediaConstraints: MediaStreamConstraints = {
|
|
audio: {
|
|
echoCancellation: true,
|
|
noiseSuppression: true,
|
|
autoGainControl: true
|
|
},
|
|
video: false
|
|
};
|
|
|
|
this.logger.info('getUserMedia constraints', mediaConstraints);
|
|
|
|
if (!navigator.mediaDevices?.getUserMedia) {
|
|
throw new Error(
|
|
'navigator.mediaDevices is not available. ' +
|
|
'This requires a secure context (HTTPS or localhost). ' +
|
|
'If accessing from an external device, use HTTPS.'
|
|
);
|
|
}
|
|
|
|
const stream = await navigator.mediaDevices.getUserMedia(mediaConstraints);
|
|
|
|
this.rawMicStream = stream;
|
|
|
|
// If the user wants noise reduction, pipe through the denoiser
|
|
this.localMediaStream = this._noiseReductionDesired
|
|
? await this.noiseReduction.enable(stream)
|
|
: stream;
|
|
|
|
this.logger.logStream('localVoice', this.localMediaStream);
|
|
|
|
this.bindLocalTracksToAllPeers();
|
|
|
|
this.isVoiceActive = true;
|
|
this.voiceConnected$.next();
|
|
return this.localMediaStream;
|
|
} catch (error) {
|
|
this.logger.error('Failed to getUserMedia', error);
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Stop all local media tracks and remove audio senders from peers.
|
|
* The peer connections themselves are kept alive.
|
|
*/
|
|
disableVoice(): void {
|
|
this.noiseReduction.disable();
|
|
|
|
// Stop the raw mic tracks (the denoised stream's tracks are
|
|
// derived nodes and will stop once their source is gone).
|
|
if (this.rawMicStream) {
|
|
this.rawMicStream.getTracks().forEach((track) => track.stop());
|
|
this.rawMicStream = null;
|
|
}
|
|
|
|
this.localMediaStream = null;
|
|
|
|
// Remove audio senders but keep connections alive
|
|
this.callbacks.getActivePeers().forEach((peerData) => {
|
|
const senders = peerData.connection.getSenders();
|
|
|
|
senders.forEach((sender) => {
|
|
if (sender.track?.kind === TRACK_KIND_AUDIO) {
|
|
peerData.connection.removeTrack(sender);
|
|
}
|
|
});
|
|
});
|
|
|
|
this.isVoiceActive = false;
|
|
this.currentVoiceRoomId = undefined;
|
|
this.currentVoiceServerId = undefined;
|
|
}
|
|
|
|
/**
|
|
* Set the local stream from an external source (e.g. voice-controls component).
|
|
*
|
|
* The raw stream is saved so noise reduction can be toggled on/off later.
|
|
* If noise reduction is already enabled the stream is piped through the
|
|
* denoiser before being sent to peers.
|
|
*/
|
|
async setLocalStream(stream: MediaStream): Promise<void> {
|
|
this.rawMicStream = stream;
|
|
this.logger.info('setLocalStream - noiseReductionDesired =', this._noiseReductionDesired);
|
|
|
|
// Pipe through the denoiser when the user wants noise reduction
|
|
if (this._noiseReductionDesired) {
|
|
this.logger.info('Piping new stream through noise reduction');
|
|
this.localMediaStream = await this.noiseReduction.enable(stream);
|
|
} else {
|
|
this.localMediaStream = stream;
|
|
}
|
|
|
|
this.bindLocalTracksToAllPeers();
|
|
this.isVoiceActive = true;
|
|
this.voiceConnected$.next();
|
|
}
|
|
|
|
/**
|
|
* Toggle the local microphone mute state.
|
|
*
|
|
* @param muted - Explicit state; if omitted, the current state is toggled.
|
|
*/
|
|
toggleMute(muted?: boolean): void {
|
|
if (this.localMediaStream) {
|
|
const audioTracks = this.localMediaStream.getAudioTracks();
|
|
const newMutedState = muted !== undefined ? muted : !this.isMicMuted;
|
|
|
|
audioTracks.forEach((track) => {
|
|
track.enabled = !newMutedState;
|
|
});
|
|
|
|
this.isMicMuted = newMutedState;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Toggle self-deafen (suppress all incoming audio playback).
|
|
*
|
|
* @param deafened - Explicit state; if omitted, the current state is toggled.
|
|
*/
|
|
toggleDeafen(deafened?: boolean): void {
|
|
this.isSelfDeafened = deafened !== undefined ? deafened : !this.isSelfDeafened;
|
|
}
|
|
|
|
/**
|
|
* Toggle RNNoise noise reduction on the local microphone.
|
|
*
|
|
* When enabled the raw mic stream is routed through the RNNoise
|
|
* AudioWorklet and peer senders are updated with the denoised track.
|
|
* When disabled the original raw mic track is restored.
|
|
*
|
|
* @param enabled - Explicit state; if omitted, the current state is toggled.
|
|
*/
|
|
async toggleNoiseReduction(enabled?: boolean): Promise<void> {
|
|
const shouldEnable = enabled !== undefined ? enabled : !this._noiseReductionDesired;
|
|
|
|
// Always persist the preference
|
|
this._noiseReductionDesired = shouldEnable;
|
|
this.logger.info(
|
|
'Noise reduction desired =',
|
|
shouldEnable,
|
|
'| worklet active =',
|
|
this.noiseReduction.isEnabled
|
|
);
|
|
|
|
if (shouldEnable === this.noiseReduction.isEnabled)
|
|
return;
|
|
|
|
if (shouldEnable) {
|
|
if (!this.rawMicStream) {
|
|
this.logger.warn(
|
|
'Cannot enable noise reduction - no mic stream yet (will apply on connect)'
|
|
);
|
|
|
|
return;
|
|
}
|
|
|
|
this.logger.info('Enabling noise reduction on raw mic stream');
|
|
const cleanStream = await this.noiseReduction.enable(this.rawMicStream);
|
|
|
|
this.localMediaStream = cleanStream;
|
|
} else {
|
|
this.noiseReduction.disable();
|
|
|
|
if (this.rawMicStream) {
|
|
this.localMediaStream = this.rawMicStream;
|
|
}
|
|
}
|
|
|
|
// Propagate the new audio track to every peer connection
|
|
this.bindLocalTracksToAllPeers();
|
|
}
|
|
|
|
/**
|
|
* Set the output volume for remote audio.
|
|
*
|
|
* @param volume - A value between {@link VOLUME_MIN} (0) and {@link VOLUME_MAX} (1).
|
|
*/
|
|
setOutputVolume(volume: number): void {
|
|
this.remoteAudioVolume = Math.max(VOLUME_MIN, Math.min(VOLUME_MAX, volume));
|
|
}
|
|
|
|
/**
|
|
* Set the maximum audio bitrate on every active peer's audio sender.
|
|
*
|
|
* The value is clamped between {@link AUDIO_BITRATE_MIN_BPS} and
|
|
* {@link AUDIO_BITRATE_MAX_BPS}.
|
|
*
|
|
* @param kbps - Target bitrate in kilobits per second.
|
|
*/
|
|
async setAudioBitrate(kbps: number): Promise<void> {
|
|
const targetBps = Math.max(
|
|
AUDIO_BITRATE_MIN_BPS,
|
|
Math.min(AUDIO_BITRATE_MAX_BPS, Math.floor(kbps * KBPS_TO_BPS))
|
|
);
|
|
|
|
this.callbacks.getActivePeers().forEach(async (peerData) => {
|
|
const sender =
|
|
peerData.audioSender ||
|
|
peerData.connection.getSenders().find((s) => s.track?.kind === TRACK_KIND_AUDIO);
|
|
|
|
if (!sender?.track)
|
|
return;
|
|
|
|
if (peerData.connection.signalingState !== 'stable')
|
|
return;
|
|
|
|
let params: RTCRtpSendParameters;
|
|
|
|
try {
|
|
params = sender.getParameters();
|
|
} catch (error) {
|
|
this.logger.warn('getParameters failed; skipping bitrate apply', error as any);
|
|
return;
|
|
}
|
|
|
|
params.encodings = params.encodings || [{}];
|
|
params.encodings[0].maxBitrate = targetBps;
|
|
|
|
try {
|
|
await sender.setParameters(params);
|
|
this.logger.info('Applied audio bitrate', { targetBps });
|
|
} catch (error) {
|
|
this.logger.warn('Failed to set audio bitrate', error as any);
|
|
}
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Apply a named latency profile that maps to a predefined bitrate.
|
|
*
|
|
* @param profile - One of `'low'`, `'balanced'`, or `'high'`.
|
|
*/
|
|
async setLatencyProfile(profile: LatencyProfile): Promise<void> {
|
|
await this.setAudioBitrate(LATENCY_PROFILE_BITRATES[profile]);
|
|
}
|
|
|
|
/**
|
|
* Start periodically broadcasting voice presence to all peers.
|
|
*
|
|
* Optionally records the voice room/server so heartbeats include them.
|
|
*
|
|
* @param roomId - The voice channel room ID.
|
|
* @param serverId - The voice channel server ID.
|
|
*/
|
|
startVoiceHeartbeat(roomId?: string, serverId?: string): void {
|
|
this.stopVoiceHeartbeat();
|
|
|
|
// Persist voice channel context so heartbeats and state snapshots include it
|
|
if (roomId !== undefined)
|
|
this.currentVoiceRoomId = roomId;
|
|
|
|
if (serverId !== undefined)
|
|
this.currentVoiceServerId = serverId;
|
|
|
|
this.voicePresenceTimer = setInterval(() => {
|
|
if (this.isVoiceActive) {
|
|
this.broadcastVoicePresence();
|
|
}
|
|
}, VOICE_HEARTBEAT_INTERVAL_MS);
|
|
|
|
// Also send an immediate heartbeat
|
|
if (this.isVoiceActive) {
|
|
this.broadcastVoicePresence();
|
|
}
|
|
}
|
|
|
|
/** Stop the voice-presence heartbeat timer. */
|
|
stopVoiceHeartbeat(): void {
|
|
if (this.voicePresenceTimer) {
|
|
clearInterval(this.voicePresenceTimer);
|
|
this.voicePresenceTimer = null;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Bind local audio/video tracks to all existing peer transceivers.
|
|
* Restores transceiver direction to sendrecv if previously set to recvonly
|
|
* (which happens when disableVoice calls removeTrack).
|
|
*/
|
|
private bindLocalTracksToAllPeers(): void {
|
|
const peers = this.callbacks.getActivePeers();
|
|
|
|
if (!this.localMediaStream)
|
|
return;
|
|
|
|
const localAudioTrack = this.localMediaStream.getAudioTracks()[0] || null;
|
|
const localVideoTrack = this.localMediaStream.getVideoTracks()[0] || null;
|
|
|
|
peers.forEach((peerData, peerId) => {
|
|
if (localAudioTrack) {
|
|
let audioSender =
|
|
peerData.audioSender ||
|
|
peerData.connection.getSenders().find((s) => s.track?.kind === TRACK_KIND_AUDIO);
|
|
|
|
if (!audioSender) {
|
|
audioSender = peerData.connection.addTransceiver(TRACK_KIND_AUDIO, {
|
|
direction: TRANSCEIVER_SEND_RECV
|
|
}).sender;
|
|
}
|
|
|
|
peerData.audioSender = audioSender;
|
|
|
|
// Restore direction after removeTrack (which sets it to recvonly)
|
|
const audioTransceiver = peerData.connection
|
|
.getTransceivers()
|
|
.find((t) => t.sender === audioSender);
|
|
|
|
if (
|
|
audioTransceiver &&
|
|
(audioTransceiver.direction === TRANSCEIVER_RECV_ONLY ||
|
|
audioTransceiver.direction === TRANSCEIVER_INACTIVE)
|
|
) {
|
|
audioTransceiver.direction = TRANSCEIVER_SEND_RECV;
|
|
}
|
|
|
|
audioSender
|
|
.replaceTrack(localAudioTrack)
|
|
.then(() => this.logger.info('audio replaceTrack ok', { peerId }))
|
|
.catch((e) => this.logger.error('audio replaceTrack failed', e));
|
|
}
|
|
|
|
if (localVideoTrack) {
|
|
let videoSender =
|
|
peerData.videoSender ||
|
|
peerData.connection.getSenders().find((s) => s.track?.kind === TRACK_KIND_VIDEO);
|
|
|
|
if (!videoSender) {
|
|
videoSender = peerData.connection.addTransceiver(TRACK_KIND_VIDEO, {
|
|
direction: TRANSCEIVER_SEND_RECV
|
|
}).sender;
|
|
}
|
|
|
|
peerData.videoSender = videoSender;
|
|
|
|
const videoTransceiver = peerData.connection
|
|
.getTransceivers()
|
|
.find((t) => t.sender === videoSender);
|
|
|
|
if (
|
|
videoTransceiver &&
|
|
(videoTransceiver.direction === TRANSCEIVER_RECV_ONLY ||
|
|
videoTransceiver.direction === TRANSCEIVER_INACTIVE)
|
|
) {
|
|
videoTransceiver.direction = TRANSCEIVER_SEND_RECV;
|
|
}
|
|
|
|
videoSender
|
|
.replaceTrack(localVideoTrack)
|
|
.then(() => this.logger.info('video replaceTrack ok', { peerId }))
|
|
.catch((e) => this.logger.error('video replaceTrack failed', e));
|
|
}
|
|
|
|
this.callbacks.renegotiate(peerId);
|
|
});
|
|
}
|
|
|
|
/** Broadcast a voice-presence state event to all connected peers. */
|
|
private broadcastVoicePresence(): void {
|
|
const oderId = this.callbacks.getIdentifyOderId();
|
|
const displayName = this.callbacks.getIdentifyDisplayName();
|
|
|
|
this.callbacks.broadcastMessage({
|
|
type: P2P_TYPE_VOICE_STATE,
|
|
oderId,
|
|
displayName,
|
|
voiceState: {
|
|
isConnected: this.isVoiceActive,
|
|
isMuted: this.isMicMuted,
|
|
isDeafened: this.isSelfDeafened,
|
|
roomId: this.currentVoiceRoomId,
|
|
serverId: this.currentVoiceServerId
|
|
}
|
|
});
|
|
}
|
|
|
|
/** Clean up all resources. */
|
|
destroy(): void {
|
|
this.disableVoice();
|
|
this.stopVoiceHeartbeat();
|
|
this.noiseReduction.destroy();
|
|
this.voiceConnected$.complete();
|
|
}
|
|
}
|