Minor refactor
This commit is contained in:
@@ -34,6 +34,7 @@ import remarkGfm from 'remark-gfm';
|
||||
import remarkBreaks from 'remark-breaks';
|
||||
import remarkParse from 'remark-parse';
|
||||
import { unified } from 'unified';
|
||||
import { ChatMarkdownService } from './services/chat-markdown.service';
|
||||
|
||||
const COMMON_EMOJIS = ['👍', '❤️', '😂', '😮', '😢', '🎉', '🔥', '👀'];
|
||||
|
||||
@@ -79,6 +80,7 @@ export class ChatMessagesComponent implements AfterViewChecked, OnInit, OnDestro
|
||||
private serverDirectory = inject(ServerDirectoryService);
|
||||
private attachmentsSvc = inject(AttachmentService);
|
||||
private cdr = inject(ChangeDetectorRef);
|
||||
private markdown = inject(ChatMarkdownService);
|
||||
|
||||
/** Remark processor with GFM (tables, strikethrough, etc.) and line-break support */
|
||||
remarkProcessor = unified()
|
||||
@@ -277,7 +279,7 @@ export class ChatMessagesComponent implements AfterViewChecked, OnInit, OnDestro
|
||||
const raw = this.messageContent.trim();
|
||||
if (!raw && this.pendingFiles.length === 0) return;
|
||||
|
||||
const content = this.appendImageMarkdown(raw);
|
||||
const content = this.markdown.appendImageMarkdown(raw);
|
||||
|
||||
this.store.dispatch(
|
||||
MessagesActions.sendMessage({
|
||||
@@ -607,109 +609,58 @@ export class ChatMessagesComponent implements AfterViewChecked, OnInit, OnDestro
|
||||
|
||||
/** Wrap selected text in an inline markdown token (bold, italic, etc.). */
|
||||
applyInline(token: string): void {
|
||||
const { start, end } = this.getSelection();
|
||||
const before = this.messageContent.slice(0, start);
|
||||
const selected = this.messageContent.slice(start, end) || 'text';
|
||||
const after = this.messageContent.slice(end);
|
||||
const newText = `${before}${token}${selected}${token}${after}`;
|
||||
this.messageContent = newText;
|
||||
const cursor = before.length + token.length + selected.length + token.length;
|
||||
this.setSelection(cursor, cursor);
|
||||
const result = this.markdown.applyInline(this.messageContent, this.getSelection(), token);
|
||||
this.messageContent = result.text;
|
||||
this.setSelection(result.selectionStart, result.selectionEnd);
|
||||
}
|
||||
|
||||
/** Prepend each selected line with a markdown prefix (e.g. `- ` for lists). */
|
||||
applyPrefix(prefix: string): void {
|
||||
const { start, end } = this.getSelection();
|
||||
const before = this.messageContent.slice(0, start);
|
||||
const selected = this.messageContent.slice(start, end) || 'text';
|
||||
const after = this.messageContent.slice(end);
|
||||
const lines = selected.split('\n').map(line => `${prefix}${line}`);
|
||||
const newSelected = lines.join('\n');
|
||||
const newText = `${before}${newSelected}${after}`;
|
||||
this.messageContent = newText;
|
||||
const cursor = before.length + newSelected.length;
|
||||
this.setSelection(cursor, cursor);
|
||||
const result = this.markdown.applyPrefix(this.messageContent, this.getSelection(), prefix);
|
||||
this.messageContent = result.text;
|
||||
this.setSelection(result.selectionStart, result.selectionEnd);
|
||||
}
|
||||
|
||||
/** Insert a markdown heading at the given level around the current selection. */
|
||||
applyHeading(level: number): void {
|
||||
const hashes = '#'.repeat(Math.max(1, Math.min(6, level)));
|
||||
const { start, end } = this.getSelection();
|
||||
const before = this.messageContent.slice(0, start);
|
||||
const selected = this.messageContent.slice(start, end) || 'Heading';
|
||||
const after = this.messageContent.slice(end);
|
||||
const needsLeadingNewline = before.length > 0 && !before.endsWith('\n');
|
||||
const needsTrailingNewline = after.length > 0 && !after.startsWith('\n');
|
||||
const block = `${needsLeadingNewline ? '\n' : ''}${hashes} ${selected}${needsTrailingNewline ? '\n' : ''}`;
|
||||
const newText = `${before}${block}${after}`;
|
||||
this.messageContent = newText;
|
||||
const cursor = before.length + block.length;
|
||||
this.setSelection(cursor, cursor);
|
||||
const result = this.markdown.applyHeading(this.messageContent, this.getSelection(), level);
|
||||
this.messageContent = result.text;
|
||||
this.setSelection(result.selectionStart, result.selectionEnd);
|
||||
}
|
||||
|
||||
/** Convert selected lines into a numbered markdown list. */
|
||||
applyOrderedList(): void {
|
||||
const { start, end } = this.getSelection();
|
||||
const before = this.messageContent.slice(0, start);
|
||||
const selected = this.messageContent.slice(start, end) || 'item\nitem';
|
||||
const after = this.messageContent.slice(end);
|
||||
const lines = selected.split('\n').map((line, index) => `${index + 1}. ${line}`);
|
||||
const newSelected = lines.join('\n');
|
||||
const newText = `${before}${newSelected}${after}`;
|
||||
this.messageContent = newText;
|
||||
const cursor = before.length + newSelected.length;
|
||||
this.setSelection(cursor, cursor);
|
||||
const result = this.markdown.applyOrderedList(this.messageContent, this.getSelection());
|
||||
this.messageContent = result.text;
|
||||
this.setSelection(result.selectionStart, result.selectionEnd);
|
||||
}
|
||||
|
||||
/** Wrap the selection in a fenced markdown code block. */
|
||||
applyCodeBlock(): void {
|
||||
const { start, end } = this.getSelection();
|
||||
const before = this.messageContent.slice(0, start);
|
||||
const selected = this.messageContent.slice(start, end) || 'code';
|
||||
const after = this.messageContent.slice(end);
|
||||
const fenced = `\n\n\`\`\`\n${selected}\n\`\`\`\n\n`;
|
||||
const newText = `${before}${fenced}${after}`;
|
||||
this.messageContent = newText;
|
||||
const cursor = before.length + fenced.length;
|
||||
this.setSelection(cursor, cursor);
|
||||
const result = this.markdown.applyCodeBlock(this.messageContent, this.getSelection());
|
||||
this.messageContent = result.text;
|
||||
this.setSelection(result.selectionStart, result.selectionEnd);
|
||||
}
|
||||
|
||||
/** Insert a markdown link around the current selection. */
|
||||
applyLink(): void {
|
||||
const { start, end } = this.getSelection();
|
||||
const before = this.messageContent.slice(0, start);
|
||||
const selected = this.messageContent.slice(start, end) || 'link';
|
||||
const after = this.messageContent.slice(end);
|
||||
const link = `[${selected}](https://)`;
|
||||
const newText = `${before}${link}${after}`;
|
||||
this.messageContent = newText;
|
||||
const cursorStart = before.length + link.length - 1; // position inside url
|
||||
this.setSelection(cursorStart - 8, cursorStart - 1);
|
||||
const result = this.markdown.applyLink(this.messageContent, this.getSelection());
|
||||
this.messageContent = result.text;
|
||||
this.setSelection(result.selectionStart, result.selectionEnd);
|
||||
}
|
||||
|
||||
/** Insert a markdown image embed around the current selection. */
|
||||
applyImage(): void {
|
||||
const { start, end } = this.getSelection();
|
||||
const before = this.messageContent.slice(0, start);
|
||||
const selected = this.messageContent.slice(start, end) || 'alt';
|
||||
const after = this.messageContent.slice(end);
|
||||
const img = ``;
|
||||
const newText = `${before}${img}${after}`;
|
||||
this.messageContent = newText;
|
||||
const cursorStart = before.length + img.length - 1;
|
||||
this.setSelection(cursorStart - 8, cursorStart - 1);
|
||||
const result = this.markdown.applyImage(this.messageContent, this.getSelection());
|
||||
this.messageContent = result.text;
|
||||
this.setSelection(result.selectionStart, result.selectionEnd);
|
||||
}
|
||||
|
||||
/** Insert a horizontal rule at the cursor position. */
|
||||
applyHorizontalRule(): void {
|
||||
const { start, end } = this.getSelection();
|
||||
const before = this.messageContent.slice(0, start);
|
||||
const after = this.messageContent.slice(end);
|
||||
const hr = `\n\n---\n\n`;
|
||||
const newText = `${before}${hr}${after}`;
|
||||
this.messageContent = newText;
|
||||
const cursor = before.length + hr.length;
|
||||
this.setSelection(cursor, cursor);
|
||||
const result = this.markdown.applyHorizontalRule(this.messageContent, this.getSelection());
|
||||
this.messageContent = result.text;
|
||||
this.setSelection(result.selectionStart, result.selectionEnd);
|
||||
}
|
||||
|
||||
/** Handle drag-enter to activate the drop zone overlay. */
|
||||
@@ -902,34 +853,6 @@ export class ChatMessagesComponent implements AfterViewChecked, OnInit, OnDestro
|
||||
this.pendingFiles = [];
|
||||
}
|
||||
|
||||
// Detect image URLs and append Markdown embeds at the end
|
||||
private appendImageMarkdown(content: string): string {
|
||||
const imageUrlRegex = /(https?:\/\/[^\s)]+?\.(?:png|jpe?g|gif|webp|svg|bmp|tiff)(?:\?[^\s)]*)?)/ig;
|
||||
const urls = new Set<string>();
|
||||
let match: RegExpExecArray | null;
|
||||
const text = content;
|
||||
while ((match = imageUrlRegex.exec(text)) !== null) {
|
||||
urls.add(match[1]);
|
||||
}
|
||||
|
||||
if (urls.size === 0) return content;
|
||||
|
||||
let append = '';
|
||||
for (const url of urls) {
|
||||
// Skip if already embedded as a Markdown image
|
||||
const alreadyEmbedded = new RegExp(`!\\[[^\\]]*\\\\]\\(\s*${this.escapeRegex(url)}\s*\\)`, 'i').test(text);
|
||||
if (!alreadyEmbedded) {
|
||||
append += `\n`;
|
||||
}
|
||||
}
|
||||
|
||||
return append ? content + append : content;
|
||||
}
|
||||
|
||||
private escapeRegex(str: string): string {
|
||||
return str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
||||
}
|
||||
|
||||
/** Auto-resize the textarea to fit its content up to 520px, then allow scrolling. */
|
||||
autoResizeTextarea(): void {
|
||||
const el = this.messageInputRef?.nativeElement;
|
||||
|
||||
@@ -0,0 +1,133 @@
|
||||
import { Injectable } from '@angular/core';
|
||||
|
||||
export interface SelectionRange {
|
||||
start: number;
|
||||
end: number;
|
||||
}
|
||||
|
||||
export interface ComposeResult {
|
||||
text: string;
|
||||
selectionStart: number;
|
||||
selectionEnd: number;
|
||||
}
|
||||
|
||||
@Injectable({ providedIn: 'root' })
|
||||
export class ChatMarkdownService {
|
||||
applyInline(content: string, selection: SelectionRange, token: string): ComposeResult {
|
||||
const { start, end } = selection;
|
||||
const before = content.slice(0, start);
|
||||
const selected = content.slice(start, end) || 'text';
|
||||
const after = content.slice(end);
|
||||
const newText = `${before}${token}${selected}${token}${after}`;
|
||||
const cursor = before.length + token.length + selected.length + token.length;
|
||||
return { text: newText, selectionStart: cursor, selectionEnd: cursor };
|
||||
}
|
||||
|
||||
applyPrefix(content: string, selection: SelectionRange, prefix: string): ComposeResult {
|
||||
const { start, end } = selection;
|
||||
const before = content.slice(0, start);
|
||||
const selected = content.slice(start, end) || 'text';
|
||||
const after = content.slice(end);
|
||||
const lines = selected.split('\n').map(line => `${prefix}${line}`);
|
||||
const newSelected = lines.join('\n');
|
||||
const text = `${before}${newSelected}${after}`;
|
||||
const cursor = before.length + newSelected.length;
|
||||
return { text, selectionStart: cursor, selectionEnd: cursor };
|
||||
}
|
||||
|
||||
applyHeading(content: string, selection: SelectionRange, level: number): ComposeResult {
|
||||
const hashes = '#'.repeat(Math.max(1, Math.min(6, level)));
|
||||
const { start, end } = selection;
|
||||
const before = content.slice(0, start);
|
||||
const selected = content.slice(start, end) || 'Heading';
|
||||
const after = content.slice(end);
|
||||
const needsLeadingNewline = before.length > 0 && !before.endsWith('\n');
|
||||
const needsTrailingNewline = after.length > 0 && !after.startsWith('\n');
|
||||
const block = `${needsLeadingNewline ? '\n' : ''}${hashes} ${selected}${needsTrailingNewline ? '\n' : ''}`;
|
||||
const text = `${before}${block}${after}`;
|
||||
const cursor = before.length + block.length;
|
||||
return { text, selectionStart: cursor, selectionEnd: cursor };
|
||||
}
|
||||
|
||||
applyOrderedList(content: string, selection: SelectionRange): ComposeResult {
|
||||
const { start, end } = selection;
|
||||
const before = content.slice(0, start);
|
||||
const selected = content.slice(start, end) || 'item\nitem';
|
||||
const after = content.slice(end);
|
||||
const lines = selected.split('\n').map((line, index) => `${index + 1}. ${line}`);
|
||||
const newSelected = lines.join('\n');
|
||||
const text = `${before}${newSelected}${after}`;
|
||||
const cursor = before.length + newSelected.length;
|
||||
return { text, selectionStart: cursor, selectionEnd: cursor };
|
||||
}
|
||||
|
||||
applyCodeBlock(content: string, selection: SelectionRange): ComposeResult {
|
||||
const { start, end } = selection;
|
||||
const before = content.slice(0, start);
|
||||
const selected = content.slice(start, end) || 'code';
|
||||
const after = content.slice(end);
|
||||
const fenced = `\n\n\`\`\`\n${selected}\n\`\`\`\n\n`;
|
||||
const text = `${before}${fenced}${after}`;
|
||||
const cursor = before.length + fenced.length;
|
||||
return { text, selectionStart: cursor, selectionEnd: cursor };
|
||||
}
|
||||
|
||||
applyLink(content: string, selection: SelectionRange): ComposeResult {
|
||||
const { start, end } = selection;
|
||||
const before = content.slice(0, start);
|
||||
const selected = content.slice(start, end) || 'link';
|
||||
const after = content.slice(end);
|
||||
const link = `[${selected}](https://)`;
|
||||
const text = `${before}${link}${after}`;
|
||||
const cursorStart = before.length + link.length - 1;
|
||||
// Position inside the URL placeholder
|
||||
return { text, selectionStart: cursorStart - 8, selectionEnd: cursorStart - 1 };
|
||||
}
|
||||
|
||||
applyImage(content: string, selection: SelectionRange): ComposeResult {
|
||||
const { start, end } = selection;
|
||||
const before = content.slice(0, start);
|
||||
const selected = content.slice(start, end) || 'alt';
|
||||
const after = content.slice(end);
|
||||
const img = ``;
|
||||
const text = `${before}${img}${after}`;
|
||||
const cursorStart = before.length + img.length - 1;
|
||||
return { text, selectionStart: cursorStart - 8, selectionEnd: cursorStart - 1 };
|
||||
}
|
||||
|
||||
applyHorizontalRule(content: string, selection: SelectionRange): ComposeResult {
|
||||
const { start, end } = selection;
|
||||
const before = content.slice(0, start);
|
||||
const after = content.slice(end);
|
||||
const hr = `\n\n---\n\n`;
|
||||
const text = `${before}${hr}${after}`;
|
||||
const cursor = before.length + hr.length;
|
||||
return { text, selectionStart: cursor, selectionEnd: cursor };
|
||||
}
|
||||
|
||||
appendImageMarkdown(content: string): string {
|
||||
const imageUrlRegex = /(https?:\/\/[^\s)]+?\.(?:png|jpe?g|gif|webp|svg|bmp|tiff)(?:\?[^\s)]*)?)/ig;
|
||||
const urls = new Set<string>();
|
||||
let match: RegExpExecArray | null;
|
||||
const text = content;
|
||||
while ((match = imageUrlRegex.exec(text)) !== null) {
|
||||
urls.add(match[1]);
|
||||
}
|
||||
|
||||
if (urls.size === 0) return content;
|
||||
|
||||
let append = '';
|
||||
for (const url of urls) {
|
||||
const alreadyEmbedded = new RegExp(`!\\[[^\\]]*\\]\\(\\s*${this.escapeRegex(url)}\\s*\\)`, 'i').test(text);
|
||||
if (!alreadyEmbedded) {
|
||||
append += `\n`;
|
||||
}
|
||||
}
|
||||
|
||||
return append ? content + append : content;
|
||||
}
|
||||
|
||||
private escapeRegex(str: string): string {
|
||||
return str.replace(/[.*+?^${}()|[\\]\\]/g, '\\$&');
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,152 @@
|
||||
import { Injectable } from '@angular/core';
|
||||
import { WebRTCService } from '../../../../core/services/webrtc.service';
|
||||
import { VoiceLevelingService } from '../../../../core/services/voice-leveling.service';
|
||||
|
||||
export interface PlaybackOptions {
|
||||
isConnected: boolean;
|
||||
outputVolume: number;
|
||||
isDeafened: boolean;
|
||||
}
|
||||
|
||||
@Injectable({ providedIn: 'root' })
|
||||
export class VoicePlaybackService {
|
||||
private remoteAudioElements = new Map<string, HTMLAudioElement>();
|
||||
private pendingRemoteStreams = new Map<string, MediaStream>();
|
||||
private rawRemoteStreams = new Map<string, MediaStream>();
|
||||
|
||||
constructor(
|
||||
private voiceLeveling: VoiceLevelingService,
|
||||
private webrtc: WebRTCService,
|
||||
) {}
|
||||
|
||||
handleRemoteStream(peerId: string, stream: MediaStream, options: PlaybackOptions): void {
|
||||
if (!options.isConnected) {
|
||||
this.pendingRemoteStreams.set(peerId, stream);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.hasAudio(stream)) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.removeAudioElement(peerId);
|
||||
|
||||
// Always stash the raw stream so we can re-wire on toggle
|
||||
this.rawRemoteStreams.set(peerId, stream);
|
||||
|
||||
// Start playback immediately with the raw stream
|
||||
const audio = new Audio();
|
||||
audio.srcObject = stream;
|
||||
audio.autoplay = true;
|
||||
audio.volume = options.outputVolume;
|
||||
audio.muted = options.isDeafened;
|
||||
audio.play().catch(() => {});
|
||||
this.remoteAudioElements.set(peerId, audio);
|
||||
|
||||
// Swap to leveled stream if enabled
|
||||
if (this.voiceLeveling.enabled()) {
|
||||
this.voiceLeveling.enable(peerId, stream).then((leveledStream) => {
|
||||
const currentAudio = this.remoteAudioElements.get(peerId);
|
||||
if (currentAudio && leveledStream !== stream) {
|
||||
currentAudio.srcObject = leveledStream;
|
||||
}
|
||||
}).catch(() => {});
|
||||
}
|
||||
}
|
||||
|
||||
removeRemoteAudio(peerId: string): void {
|
||||
this.pendingRemoteStreams.delete(peerId);
|
||||
this.rawRemoteStreams.delete(peerId);
|
||||
this.voiceLeveling.disable(peerId);
|
||||
this.removeAudioElement(peerId);
|
||||
}
|
||||
|
||||
playPendingStreams(options: PlaybackOptions): void {
|
||||
if (!options.isConnected) return;
|
||||
this.pendingRemoteStreams.forEach((stream, peerId) => this.handleRemoteStream(peerId, stream, options));
|
||||
this.pendingRemoteStreams.clear();
|
||||
}
|
||||
|
||||
ensureAllRemoteStreamsPlaying(options: PlaybackOptions): void {
|
||||
if (!options.isConnected) return;
|
||||
const peers = this.webrtc.getConnectedPeers();
|
||||
for (const peerId of peers) {
|
||||
const stream = this.webrtc.getRemoteStream(peerId);
|
||||
if (stream && this.hasAudio(stream)) {
|
||||
const trackedRaw = this.rawRemoteStreams.get(peerId);
|
||||
if (!trackedRaw || trackedRaw !== stream) {
|
||||
this.handleRemoteStream(peerId, stream, options);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async rebuildAllRemoteAudio(enabled: boolean, options: PlaybackOptions): Promise<void> {
|
||||
if (enabled) {
|
||||
for (const [peerId, rawStream] of this.rawRemoteStreams) {
|
||||
try {
|
||||
const leveledStream = await this.voiceLeveling.enable(peerId, rawStream);
|
||||
const audio = this.remoteAudioElements.get(peerId);
|
||||
if (audio && leveledStream !== rawStream) {
|
||||
audio.srcObject = leveledStream;
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
} else {
|
||||
this.voiceLeveling.disableAll();
|
||||
for (const [peerId, rawStream] of this.rawRemoteStreams) {
|
||||
const audio = this.remoteAudioElements.get(peerId);
|
||||
if (audio) {
|
||||
audio.srcObject = rawStream;
|
||||
}
|
||||
}
|
||||
}
|
||||
this.updateOutputVolume(options.outputVolume);
|
||||
this.updateDeafened(options.isDeafened);
|
||||
}
|
||||
|
||||
updateOutputVolume(volume: number): void {
|
||||
this.remoteAudioElements.forEach((audio) => {
|
||||
audio.volume = volume;
|
||||
});
|
||||
}
|
||||
|
||||
updateDeafened(isDeafened: boolean): void {
|
||||
this.remoteAudioElements.forEach((audio) => {
|
||||
audio.muted = isDeafened;
|
||||
});
|
||||
}
|
||||
|
||||
applyOutputDevice(deviceId: string): void {
|
||||
if (!deviceId) return;
|
||||
this.remoteAudioElements.forEach((audio) => {
|
||||
const anyAudio = audio as any;
|
||||
if (typeof anyAudio.setSinkId === 'function') {
|
||||
anyAudio.setSinkId(deviceId).catch(() => {});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
teardownAll(): void {
|
||||
this.remoteAudioElements.forEach((audio) => {
|
||||
audio.srcObject = null;
|
||||
audio.remove();
|
||||
});
|
||||
this.remoteAudioElements.clear();
|
||||
this.rawRemoteStreams.clear();
|
||||
this.pendingRemoteStreams.clear();
|
||||
}
|
||||
|
||||
private hasAudio(stream: MediaStream): boolean {
|
||||
return stream.getAudioTracks().length > 0;
|
||||
}
|
||||
|
||||
private removeAudioElement(peerId: string): void {
|
||||
const audio = this.remoteAudioElements.get(peerId);
|
||||
if (audio) {
|
||||
audio.srcObject = null;
|
||||
audio.remove();
|
||||
this.remoteAudioElements.delete(peerId);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -34,6 +34,7 @@ import { selectCurrentRoom } from '../../../store/rooms/rooms.selectors';
|
||||
import { STORAGE_KEY_VOICE_SETTINGS } from '../../../core/constants';
|
||||
import { SettingsModalService } from '../../../core/services/settings-modal.service';
|
||||
import { UserAvatarComponent } from '../../../shared';
|
||||
import { PlaybackOptions, VoicePlaybackService } from './services/voice-playback.service';
|
||||
|
||||
interface AudioDevice {
|
||||
deviceId: string;
|
||||
@@ -64,15 +65,10 @@ export class VoiceControlsComponent implements OnInit, OnDestroy {
|
||||
private voiceSessionService = inject(VoiceSessionService);
|
||||
private voiceActivity = inject(VoiceActivityService);
|
||||
private voiceLeveling = inject(VoiceLevelingService);
|
||||
private voicePlayback = inject(VoicePlaybackService);
|
||||
private store = inject(Store);
|
||||
private settingsModal = inject(SettingsModalService);
|
||||
private remoteStreamSubscription: Subscription | null = null;
|
||||
private remoteAudioElements = new Map<string, HTMLAudioElement>();
|
||||
private pendingRemoteStreams = new Map<string, MediaStream>();
|
||||
/** Raw (unprocessed) remote streams keyed by peer ID — used to swap
|
||||
* between raw playback and leveled playback when the user toggles
|
||||
* the voice leveling setting. */
|
||||
private rawRemoteStreams = new Map<string, MediaStream>();
|
||||
/** Unsubscribe function for live voice-leveling toggle notifications. */
|
||||
private voiceLevelingUnsubscribe: (() => void) | null = null;
|
||||
|
||||
@@ -98,6 +94,14 @@ export class VoiceControlsComponent implements OnInit, OnDestroy {
|
||||
includeSystemAudio = signal(false);
|
||||
noiseReduction = signal(false);
|
||||
|
||||
private playbackOptions(): PlaybackOptions {
|
||||
return {
|
||||
isConnected: this.isConnected(),
|
||||
outputVolume: this.outputVolume() / 100,
|
||||
isDeafened: this.isDeafened(),
|
||||
};
|
||||
}
|
||||
|
||||
private voiceConnectedSubscription: Subscription | null = null;
|
||||
|
||||
async ngOnInit(): Promise<void> {
|
||||
@@ -110,28 +114,29 @@ export class VoiceControlsComponent implements OnInit, OnDestroy {
|
||||
// Subscribe to remote streams to play audio from peers
|
||||
this.remoteStreamSubscription = this.webrtcService.onRemoteStream.subscribe(
|
||||
({ peerId, stream }) => {
|
||||
this.playRemoteAudio(peerId, stream);
|
||||
this.voicePlayback.handleRemoteStream(peerId, stream, this.playbackOptions());
|
||||
},
|
||||
);
|
||||
|
||||
// Listen for live voice-leveling toggle changes so we can
|
||||
// rebuild all remote Audio elements immediately (no reconnect).
|
||||
this.voiceLevelingUnsubscribe = this.voiceLeveling.onEnabledChange(
|
||||
(enabled) => this.rebuildAllRemoteAudio(enabled),
|
||||
(enabled) => this.voicePlayback.rebuildAllRemoteAudio(enabled, this.playbackOptions()),
|
||||
);
|
||||
|
||||
// Subscribe to voice connected event to play pending streams and ensure all remote audio is set up
|
||||
this.voiceConnectedSubscription = this.webrtcService.onVoiceConnected.subscribe(() => {
|
||||
this.playPendingStreams();
|
||||
const options = this.playbackOptions();
|
||||
this.voicePlayback.playPendingStreams(options);
|
||||
// Also ensure all remote streams from connected peers are playing
|
||||
// This handles the case where streams were received while voice was "connected"
|
||||
// from a previous session but audio elements weren't set up
|
||||
this.ensureAllRemoteStreamsPlaying();
|
||||
this.voicePlayback.ensureAllRemoteStreamsPlaying(options);
|
||||
});
|
||||
|
||||
// Clean up audio when peer disconnects
|
||||
this.webrtcService.onPeerDisconnected.subscribe((peerId) => {
|
||||
this.removeRemoteAudio(peerId);
|
||||
this.voicePlayback.removeRemoteAudio(peerId);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -140,13 +145,7 @@ export class VoiceControlsComponent implements OnInit, OnDestroy {
|
||||
this.disconnect();
|
||||
}
|
||||
|
||||
// Clean up audio elements
|
||||
this.remoteAudioElements.forEach((audio) => {
|
||||
audio.srcObject = null;
|
||||
audio.remove();
|
||||
});
|
||||
this.remoteAudioElements.clear();
|
||||
this.rawRemoteStreams.clear();
|
||||
this.voicePlayback.teardownAll();
|
||||
this.voiceLeveling.disableAll();
|
||||
|
||||
this.remoteStreamSubscription?.unsubscribe();
|
||||
@@ -154,139 +153,6 @@ export class VoiceControlsComponent implements OnInit, OnDestroy {
|
||||
this.voiceLevelingUnsubscribe?.();
|
||||
}
|
||||
|
||||
/**
|
||||
* Play any pending remote streams that were received before we joined voice.
|
||||
* This is called when voice is connected to ensure audio works on first join.
|
||||
*/
|
||||
private playPendingStreams(): void {
|
||||
this.pendingRemoteStreams.forEach((stream, peerId) => {
|
||||
this.playRemoteAudio(peerId, stream);
|
||||
});
|
||||
this.pendingRemoteStreams.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure all remote streams from connected peers are playing.
|
||||
* This handles cases where voice was reconnected and streams were received
|
||||
* while the previous voice session was still "connected".
|
||||
*/
|
||||
private ensureAllRemoteStreamsPlaying(): void {
|
||||
const connectedPeers = this.webrtcService.getConnectedPeers();
|
||||
|
||||
for (const peerId of connectedPeers) {
|
||||
const stream = this.webrtcService.getRemoteStream(peerId);
|
||||
if (stream && stream.getAudioTracks().length > 0) {
|
||||
// Check if we already have an active audio element for this peer.
|
||||
// Compare against the stashed raw stream (not srcObject which may
|
||||
// be the leveled stream when voice leveling is enabled).
|
||||
const existingAudio = this.remoteAudioElements.get(peerId);
|
||||
const trackedRaw = this.rawRemoteStreams.get(peerId);
|
||||
if (!existingAudio || trackedRaw !== stream) {
|
||||
this.playRemoteAudio(peerId, stream);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private removeRemoteAudio(peerId: string): void {
|
||||
// Remove from pending streams
|
||||
this.pendingRemoteStreams.delete(peerId);
|
||||
this.rawRemoteStreams.delete(peerId);
|
||||
|
||||
// Remove voice leveling pipeline for this speaker
|
||||
this.voiceLeveling.disable(peerId);
|
||||
|
||||
// Remove audio element
|
||||
const audio = this.remoteAudioElements.get(peerId);
|
||||
if (audio) {
|
||||
audio.srcObject = null;
|
||||
audio.remove();
|
||||
this.remoteAudioElements.delete(peerId);
|
||||
}
|
||||
}
|
||||
|
||||
private playRemoteAudio(peerId: string, stream: MediaStream): void {
|
||||
// Only play remote audio if we have joined voice
|
||||
if (!this.isConnected()) {
|
||||
// Store the stream to play later when we connect
|
||||
this.pendingRemoteStreams.set(peerId, stream);
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if stream has audio tracks
|
||||
const audioTracks = stream.getAudioTracks();
|
||||
if (audioTracks.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Remove existing audio element for this peer if any
|
||||
const existingAudio = this.remoteAudioElements.get(peerId);
|
||||
if (existingAudio) {
|
||||
existingAudio.srcObject = null;
|
||||
existingAudio.remove();
|
||||
}
|
||||
|
||||
// Always stash the raw stream so we can re-wire on toggle
|
||||
this.rawRemoteStreams.set(peerId, stream);
|
||||
|
||||
// ── Step 1: Immediately start playback with the raw stream ──
|
||||
// This guarantees audio is never lost even if the pipeline
|
||||
// build takes time or fails.
|
||||
const audio = new Audio();
|
||||
audio.srcObject = stream;
|
||||
audio.autoplay = true;
|
||||
audio.volume = this.outputVolume() / 100;
|
||||
if (this.isDeafened()) {
|
||||
audio.muted = true;
|
||||
}
|
||||
audio.play().then(() => {}).catch(() => {});
|
||||
this.remoteAudioElements.set(peerId, audio);
|
||||
|
||||
// ── Step 2: Asynchronously swap in the leveled stream ──
|
||||
// Only when voice leveling is enabled. If it fails or is
|
||||
// disabled, playback continues on the raw stream.
|
||||
if (this.voiceLeveling.enabled()) {
|
||||
this.voiceLeveling.enable(peerId, stream).then((leveledStream) => {
|
||||
// Guard: audio element may have been replaced or removed
|
||||
const currentAudio = this.remoteAudioElements.get(peerId);
|
||||
if (currentAudio && leveledStream !== stream) {
|
||||
currentAudio.srcObject = leveledStream;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Rebuild all remote Audio elements when the user toggles voice
|
||||
* leveling on or off. This runs synchronously for each peer,
|
||||
* swapping `srcObject` between the raw stream and the leveled one.
|
||||
*
|
||||
* Mirrors the noise-reduction live-toggle pattern.
|
||||
*/
|
||||
private async rebuildAllRemoteAudio(enabled: boolean): Promise<void> {
|
||||
if (enabled) {
|
||||
// Enable: build pipelines and swap to leveled streams
|
||||
for (const [peerId, rawStream] of this.rawRemoteStreams) {
|
||||
try {
|
||||
const leveledStream = await this.voiceLeveling.enable(peerId, rawStream);
|
||||
const audio = this.remoteAudioElements.get(peerId);
|
||||
if (audio && leveledStream !== rawStream) {
|
||||
audio.srcObject = leveledStream;
|
||||
}
|
||||
} catch { /* already playing raw — fine */ }
|
||||
}
|
||||
} else {
|
||||
// Disable: tear down all pipelines, swap back to raw streams
|
||||
this.voiceLeveling.disableAll();
|
||||
for (const [peerId, rawStream] of this.rawRemoteStreams) {
|
||||
const audio = this.remoteAudioElements.get(peerId);
|
||||
if (audio) {
|
||||
audio.srcObject = rawStream;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async loadAudioDevices(): Promise<void> {
|
||||
try {
|
||||
if (!navigator.mediaDevices?.enumerateDevices) {
|
||||
@@ -355,10 +221,7 @@ export class VoiceControlsComponent implements OnInit, OnDestroy {
|
||||
});
|
||||
|
||||
// Play any pending remote streams now that we're connected
|
||||
this.pendingRemoteStreams.forEach((pendingStream, peerId) => {
|
||||
this.playRemoteAudio(peerId, pendingStream);
|
||||
});
|
||||
this.pendingRemoteStreams.clear();
|
||||
this.voicePlayback.playPendingStreams(this.playbackOptions());
|
||||
|
||||
// Persist settings after successful connection
|
||||
this.saveSettings();
|
||||
@@ -405,15 +268,7 @@ export class VoiceControlsComponent implements OnInit, OnDestroy {
|
||||
|
||||
// Tear down all voice leveling pipelines
|
||||
this.voiceLeveling.disableAll();
|
||||
|
||||
// Clear all remote audio elements
|
||||
this.remoteAudioElements.forEach((audio) => {
|
||||
audio.srcObject = null;
|
||||
audio.remove();
|
||||
});
|
||||
this.remoteAudioElements.clear();
|
||||
this.rawRemoteStreams.clear();
|
||||
this.pendingRemoteStreams.clear();
|
||||
this.voicePlayback.teardownAll();
|
||||
|
||||
const user = this.currentUser();
|
||||
if (user?.id) {
|
||||
@@ -460,10 +315,7 @@ export class VoiceControlsComponent implements OnInit, OnDestroy {
|
||||
this.isDeafened.update((current) => !current);
|
||||
this.webrtcService.toggleDeafen(this.isDeafened());
|
||||
|
||||
// Mute/unmute all remote audio elements
|
||||
this.remoteAudioElements.forEach((audio) => {
|
||||
audio.muted = this.isDeafened();
|
||||
});
|
||||
this.voicePlayback.updateDeafened(this.isDeafened());
|
||||
|
||||
// When deafening, also mute
|
||||
if (this.isDeafened() && !this.isMuted()) {
|
||||
@@ -532,11 +384,7 @@ export class VoiceControlsComponent implements OnInit, OnDestroy {
|
||||
const input = event.target as HTMLInputElement;
|
||||
this.outputVolume.set(parseInt(input.value, 10));
|
||||
this.webrtcService.setOutputVolume(this.outputVolume() / 100);
|
||||
|
||||
// Update volume on all remote audio elements
|
||||
this.remoteAudioElements.forEach((audio) => {
|
||||
audio.volume = this.outputVolume() / 100;
|
||||
});
|
||||
this.voicePlayback.updateOutputVolume(this.outputVolume() / 100);
|
||||
this.saveSettings();
|
||||
}
|
||||
|
||||
@@ -627,12 +475,7 @@ export class VoiceControlsComponent implements OnInit, OnDestroy {
|
||||
private async applyOutputDevice(): Promise<void> {
|
||||
const deviceId = this.selectedOutputDevice();
|
||||
if (!deviceId) return;
|
||||
this.remoteAudioElements.forEach((audio) => {
|
||||
const anyAudio = audio as any;
|
||||
if (typeof anyAudio.setSinkId === 'function') {
|
||||
anyAudio.setSinkId(deviceId).catch(() => {});
|
||||
}
|
||||
});
|
||||
this.voicePlayback.applyOutputDevice(deviceId);
|
||||
}
|
||||
|
||||
getMuteButtonClass(): string {
|
||||
|
||||
Reference in New Issue
Block a user