Add seperation of voice channels, creation of new ones, and move around users

This commit is contained in:
2026-03-30 02:11:39 +02:00
parent 83694570e3
commit 727059fb52
19 changed files with 614 additions and 50 deletions

View File

@@ -200,7 +200,7 @@ When a peer connection enters `disconnected`, a 10-second grace period starts. I
## Data channel
A single ordered data channel carries all peer-to-peer messages: chat events, voice/screen state broadcasts, state requests, pings, and screen share control.
A single ordered data channel carries all peer-to-peer messages: chat events, voice/screen state broadcasts, voice-channel move control events, state requests, pings, and screen share control.
Back-pressure is handled with a high-water mark (4 MB) and low-water mark (1 MB). `sendToPeerBuffered()` waits for the buffer to drain before sending, which matters during file transfers.
@@ -229,10 +229,12 @@ graph LR
click Peers "media/media.manager.ts" "MediaManager.bindLocalTracksToAllPeers()" _blank
```
`MediaManager` grabs the mic with `getUserMedia`, optionally pipes it through the RNNoise AudioWorklet for noise reduction (48 kHz, loaded from `rnnoise-worklet.js`), optionally runs it through a `GainNode` for input volume control, and then pushes the resulting stream to every connected peer via `replaceTrack`.
`MediaManager` grabs the mic with `getUserMedia`, optionally pipes it through the RNNoise AudioWorklet for noise reduction (48 kHz, loaded from `rnnoise-worklet.js`), optionally runs it through a `GainNode` for input volume control, and then routes the resulting audio track only to peers that currently belong to the same active voice channel.
Mute just disables the audio track (`track.enabled = false`), the connection stays up. Deafen suppresses incoming audio playback on the local side.
Because peers stay connected across the server for shared state and chat, voice-channel isolation is enforced in both transport and playback: outgoing mic audio is only attached to peers whose voice membership matches the local user's current channel, and remote voice audio plus join/leave cues are only active when the remote peer's announced `voiceState.roomId` and `voiceState.serverId` match the local user's current voice channel.
### Screen share
Screen capture uses a platform-specific strategy:

View File

@@ -91,6 +91,7 @@ export class MediaManager {
private currentVoiceRoomId: string | undefined;
/** Current voice channel server ID (set when joining voice). */
private currentVoiceServerId: string | undefined;
private allowedVoicePeerIds = new Set<string>();
constructor(
private readonly logger: WebRTCLogger,
@@ -146,6 +147,21 @@ export class MediaManager {
return this._noiseReductionDesired;
}
setAllowedVoicePeerIds(peerIds: Iterable<string>): void {
const nextAllowed = new Set(peerIds);
if (this.areSetsEqual(this.allowedVoicePeerIds, nextAllowed)) {
return;
}
this.allowedVoicePeerIds = nextAllowed;
this.syncVoiceRouting();
}
refreshVoiceRouting(): void {
this.syncVoiceRouting();
}
/**
* Request microphone access via `getUserMedia` and bind the resulting
* audio track to every active peer connection.
@@ -239,6 +255,7 @@ export class MediaManager {
this.isVoiceActive = false;
this.currentVoiceRoomId = undefined;
this.currentVoiceServerId = undefined;
this.allowedVoicePeerIds.clear();
}
/**
@@ -491,31 +508,11 @@ export class MediaManager {
peers.forEach((peerData, peerId) => {
if (localAudioTrack) {
const audioTransceiver = this.getOrCreateReusableTransceiver(peerData, TRACK_KIND_AUDIO, {
preferredSender: peerData.audioSender,
excludedSenders: [peerData.screenAudioSender]
});
const audioSender = audioTransceiver.sender;
peerData.audioSender = audioSender;
// Restore direction after removeTrack (which sets it to recvonly)
if (
audioTransceiver &&
(audioTransceiver.direction === TRANSCEIVER_RECV_ONLY ||
audioTransceiver.direction === TRANSCEIVER_INACTIVE)
) {
audioTransceiver.direction = TRANSCEIVER_SEND_RECV;
if (this.allowedVoicePeerIds.has(peerId)) {
this.attachVoiceTrackToPeer(peerId, peerData, localStream, localAudioTrack);
} else {
this.detachVoiceTrackFromPeer(peerData);
}
if (typeof audioSender.setStreams === 'function') {
audioSender.setStreams(localStream);
}
audioSender
.replaceTrack(localAudioTrack)
.then(() => this.logger.info('audio replaceTrack ok', { peerId }))
.catch((error) => this.logger.error('audio replaceTrack failed', error));
}
if (localVideoTrack) {
@@ -549,6 +546,87 @@ export class MediaManager {
});
}
private syncVoiceRouting(): void {
const peers = this.callbacks.getActivePeers();
const localStream = this.localMediaStream;
const localAudioTrack = localStream?.getAudioTracks()[0] || null;
peers.forEach((peerData, peerId) => {
const didChange = localStream && localAudioTrack && this.allowedVoicePeerIds.has(peerId)
? this.attachVoiceTrackToPeer(peerId, peerData, localStream, localAudioTrack)
: this.detachVoiceTrackFromPeer(peerData);
if (didChange) {
void this.callbacks.renegotiate(peerId);
}
});
}
private attachVoiceTrackToPeer(
peerId: string,
peerData: PeerData,
localStream: MediaStream,
localAudioTrack: MediaStreamTrack
): boolean {
const audioTransceiver = this.getOrCreateReusableTransceiver(peerData, TRACK_KIND_AUDIO, {
preferredSender: peerData.audioSender,
excludedSenders: [peerData.screenAudioSender]
});
const audioSender = audioTransceiver.sender;
const needsDirectionRestore = audioTransceiver.direction === TRANSCEIVER_RECV_ONLY
|| audioTransceiver.direction === TRANSCEIVER_INACTIVE;
const needsTrackReplace = audioSender.track !== localAudioTrack;
peerData.audioSender = audioSender;
if (!needsDirectionRestore && !needsTrackReplace) {
return false;
}
if (needsDirectionRestore) {
audioTransceiver.direction = TRANSCEIVER_SEND_RECV;
}
if (typeof audioSender.setStreams === 'function') {
audioSender.setStreams(localStream);
}
if (needsTrackReplace) {
audioSender
.replaceTrack(localAudioTrack)
.then(() => this.logger.info('audio replaceTrack ok', { peerId }))
.catch((error) => this.logger.error('audio replaceTrack failed', error));
}
return true;
}
private detachVoiceTrackFromPeer(peerData: PeerData): boolean {
const audioSender = peerData.audioSender
?? peerData.connection.getSenders().find((sender) => sender !== peerData.screenAudioSender && sender.track?.kind === TRACK_KIND_AUDIO);
if (!audioSender?.track) {
return false;
}
peerData.connection.removeTrack(audioSender);
return true;
}
private areSetsEqual(left: Set<string>, right: Set<string>): boolean {
if (left.size !== right.size) {
return false;
}
for (const value of left) {
if (!right.has(value)) {
return false;
}
}
return true;
}
private getOrCreateReusableTransceiver(
peerData: PeerData,
kind: typeof TRACK_KIND_AUDIO | typeof TRACK_KIND_VIDEO,

View File

@@ -219,6 +219,8 @@ export class WebRTCService implements OnDestroy {
this.peerMediaFacade.syncScreenShareToPeer(peerId);
}
this.mediaManager.refreshVoiceRouting();
this.remoteScreenShareRequestController.handlePeerConnected(peerId);
});
@@ -575,6 +577,10 @@ export class WebRTCService implements OnDestroy {
this.voiceSessionController.stopVoiceHeartbeat();
}
syncOutgoingVoiceRouting(allowedPeerIds: string[]): void {
this.mediaManager.setAllowedVoicePeerIds(allowedPeerIds);
}
/**
* Start sharing the screen (or a window) with all connected peers.
*