This commit is contained in:
sam
2026-05-03 18:02:37 +02:00
parent dbad2c34d7
commit 2679f0b063
6 changed files with 627 additions and 56 deletions

View File

@@ -1,5 +1,14 @@
fn main() {
// Force linking of additional abseil libraries that webrtc-audio-processing-sys
// might miss depending on the system's abseil version.
//
// The bundled webrtc C++ code references symbols from abseil's string
// formatting and number-to-string utilities. On newer Fedora (abseil 2026+),
// these live in separate shared libraries that the crate's build script
// does not automatically link.
println!("cargo:rustc-link-lib=absl_strings_internal");
println!("cargo:rustc-link-lib=absl_str_format_internal");
println!("cargo:rustc-link-lib=absl_string_view");
println!("cargo:rustc-link-lib=absl_int128");
println!("cargo:rustc-link-lib=absl_throw_delegate");
}

View File

@@ -2,7 +2,7 @@
//!
//! Pulls audio from the lock-free ringbuffer, applies WebRTC noise suppression
//! and echo cancellation, then checks for voice activity before signalling
//! the UI via a `tokio::sync::watch` channel.
//! the UI via `tokio::sync::watch` channels.
//!
//! This thread is a dedicated `std::thread` (not a Tokio task) because
//! real-time audio processing must never be at the mercy of a cooperative
@@ -13,8 +13,8 @@ use std::sync::atomic::{AtomicBool, Ordering};
use std::thread;
use std::time::Duration;
use ringbuf::HeapCons;
use ringbuf::traits::{Consumer, Observer};
use ringbuf::{HeapCons, HeapProd};
use ringbuf::traits::{Consumer, Observer, Producer};
use tokio::sync::watch;
use tracing::info;
use webrtc_audio_processing::Processor;
@@ -29,16 +29,22 @@ use super::SAMPLE_RATE;
/// removed the standalone voice detection configuration.
const VAD_RMS_THRESHOLD: f32 = 0.01;
/// WebRTC strictly requires 10ms frames (480 samples at 48kHz).
const DSP_FRAME_SIZE: usize = 480;
/// Spawns the dedicated background DSP thread.
///
/// Reads 960-sample frames from the ringbuffer, applies WebRTC
/// noise suppression + echo cancellation, and updates the active
/// speaker state via the provided watch channel.
/// Reads 480-sample frames (10ms at 48kHz) from the ringbuffer, applies
/// WebRTC noise suppression + echo cancellation, and updates the active
/// speaker state and mic level via the provided watch channels.
pub fn spawn_dsp_thread(
mut consumer: HeapCons<f32>,
mut loopback_prod: HeapProd<f32>,
ptt_flag: Arc<AtomicBool>,
mute_flag: Arc<AtomicBool>,
audio_dumper_flag: Arc<AtomicBool>,
active_speaker_tx: watch::Sender<bool>,
mic_level_tx: watch::Sender<f32>,
) {
thread::spawn(move || {
info!("DSP thread started.");
@@ -61,8 +67,6 @@ pub fn spawn_dsp_thread(
};
ap.set_config(config);
// WebRTC strictly requires 10ms frames (480 samples at 48kHz).
const DSP_FRAME_SIZE: usize = 480;
let mut frame_buf = vec![vec![0.0f32; DSP_FRAME_SIZE]];
let wav_spec = hound::WavSpec {
@@ -79,9 +83,11 @@ pub fn spawn_dsp_thread(
if consumer.occupied_len() >= DSP_FRAME_SIZE {
let _ = consumer.pop_slice(&mut frame_buf[0]);
let is_transmitting = ptt_flag.load(Ordering::Relaxed);
let is_ptt_active = ptt_flag.load(Ordering::Relaxed);
let is_muted = mute_flag.load(Ordering::Relaxed);
let dumper_enabled = audio_dumper_flag.load(Ordering::Relaxed);
// Snapshot the raw frame before DSP (for the audio dumper).
let mut raw_frame = None;
if dumper_enabled {
raw_frame = Some(frame_buf.clone());
@@ -92,6 +98,12 @@ pub fn spawn_dsp_thread(
tracing::warn!("APM processing failed: {:?}", e);
}
// Push to loopback ringbuffer (non-blocking).
// We push even if loopback is disabled in the playback thread
// to keep the pipeline moving if needed.
let _ = loopback_prod.push_slice(&frame_buf[0]);
// ── Audio Dumper ──
if dumper_enabled {
if raw_writer.is_none() {
raw_writer = hound::WavWriter::create("raw_mic.wav", wav_spec).ok();
@@ -108,7 +120,7 @@ pub fn spawn_dsp_thread(
}
}
} else if raw_writer.is_some() {
// Close writers when disabled
// Close writers when disabled.
if let Some(writer) = raw_writer.take() {
let _ = writer.finalize();
}
@@ -117,11 +129,12 @@ pub fn spawn_dsp_thread(
}
}
// Simple RMS-based VAD since webrtc-audio-processing v2
// removed the dedicated VoiceDetection config field.
// ── VAD + mic level ──
let rms = compute_rms(&frame_buf[0]);
let _ = mic_level_tx.send(rms);
let has_voice = rms > VAD_RMS_THRESHOLD;
let should_transmit = is_transmitting && has_voice;
let should_transmit = is_ptt_active && has_voice && !is_muted;
// Only update the watch channel when the state actually changes
// to avoid unnecessary UI repaints.
@@ -144,7 +157,8 @@ fn compute_rms(samples: &[f32]) -> f32 {
return 0.0;
}
let sum_sq: f32 = samples.iter().map(|s| s * s).sum();
#[allow(clippy::cast_precision_loss)] // FRAME_SIZE (960) is well within f32's 23-bit mantissa.
#[allow(clippy::cast_precision_loss)]
// DSP_FRAME_SIZE (480) is well within f32's 23-bit mantissa.
let divisor = samples.len() as f32;
(sum_sq / divisor).sqrt()
}

View File

@@ -8,6 +8,7 @@
pub mod capture;
pub mod dsp;
pub mod playback;
/// The strict sample rate required across the entire DSP pipeline.
pub const SAMPLE_RATE: u32 = 48_000;

View File

@@ -0,0 +1,63 @@
//! Audio playback/loopback stream.
//!
//! Uses `cpal` to output audio samples from a ringbuffer to the default
//! output device. This is primarily used for the "Loopback" feature
//! where a user can hear their own processed voice.
use anyhow::{Result, anyhow};
use cpal::traits::{DeviceTrait, HostTrait, StreamTrait};
use ringbuf::HeapCons;
use ringbuf::traits::Consumer;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use tracing::{info, warn};
use super::SAMPLE_RATE;
/// Starts the audio playback stream.
///
/// Pulls samples from the provided ringbuffer consumer and writes them
/// to the default output device. Only outputs non-zero samples if the
/// `loopback_flag` is true.
pub fn start_audio_playback(
mut consumer: HeapCons<f32>,
loopback_flag: Arc<AtomicBool>,
) -> Result<cpal::Stream> {
let host = cpal::default_host();
let device = host
.default_output_device()
.ok_or_else(|| anyhow!("No output device found"))?;
info!("Using output device: {}", device.name()?);
let config = cpal::StreamConfig {
channels: 1, // Mono playback for loopback
sample_rate: SAMPLE_RATE,
buffer_size: cpal::BufferSize::Default,
};
let stream = device.build_output_stream(
&config,
move |data: &mut [f32], _: &cpal::OutputCallbackInfo| {
let enabled = loopback_flag.load(Ordering::Relaxed);
for sample in data.iter_mut() {
if enabled {
*sample = consumer.try_pop().unwrap_or(0.0);
} else {
// Drain the ringbuffer even if loopback is disabled
// to prevent old audio from playing when toggled on.
let _ = consumer.try_pop();
*sample = 0.0;
}
}
},
move |err| {
warn!("Playback stream error: {}", err);
},
None,
)?;
stream.play()?;
Ok(stream)
}

View File

@@ -25,31 +25,47 @@ fn main() -> Result<()> {
tracing_subscriber::fmt::init();
info!("Starting client node...");
// Setup communication channels
// ── Communication channels ──
let (active_speaker_tx, active_speaker_rx) = watch::channel(false);
let (mic_level_tx, mic_level_rx) = watch::channel(0.0_f32);
let ptt_flag = Arc::new(AtomicBool::new(false));
let mute_flag = Arc::new(AtomicBool::new(false));
let audio_dumper_flag = Arc::new(AtomicBool::new(false));
let loopback_flag = Arc::new(AtomicBool::new(false));
// Setup lock-free ringbuffer for audio capture (4096 capacity)
// ── Lock-free ringbuffer for audio capture (4096 capacity) ──
let audio_rb = HeapRb::<f32>::new(4096);
let (producer, consumer) = audio_rb.split();
// Spawn DSP and audio capture threads
// ── Lock-free ringbuffer for audio loopback (4096 capacity) ──
let loopback_rb = HeapRb::<f32>::new(4096);
let (loopback_prod, loopback_cons) = loopback_rb.split();
// ── Spawn DSP and audio capture threads ──
audio::dsp::spawn_dsp_thread(
consumer,
loopback_prod,
ptt_flag.clone(),
mute_flag.clone(),
audio_dumper_flag.clone(),
active_speaker_tx,
mic_level_tx,
);
let _stream = audio::capture::start_audio_capture(producer).map_err(|e| {
error!("Failed to start audio capture: {:?}", e);
e
});
// Spawn Global Hotkey listener
// ── Spawn playback/loopback stream ──
let _playback_stream = audio::playback::start_audio_playback(loopback_cons, loopback_flag.clone()).map_err(|e| {
error!("Failed to start audio playback: {:?}", e);
e
});
// ── Spawn Global Hotkey listener ──
hotkey::spawn_hotkey_listener(ptt_flag);
// Spawn custom tokio runtime for network background tasks
// ── Spawn custom tokio runtime for network background tasks ──
std::thread::spawn(move || {
let Ok(rt) = tokio::runtime::Builder::new_multi_thread()
.enable_all()
@@ -67,7 +83,9 @@ fn main() -> Result<()> {
});
let options = eframe::NativeOptions {
viewport: egui::ViewportBuilder::default().with_inner_size([800.0, 600.0]),
viewport: egui::ViewportBuilder::default()
.with_inner_size([960.0, 640.0])
.with_min_inner_size([640.0, 400.0]),
..Default::default()
};
@@ -77,7 +95,10 @@ fn main() -> Result<()> {
Box::new(|_cc| {
Ok(Box::new(ui::VoiceApp::new(
active_speaker_rx,
mic_level_rx,
audio_dumper_flag,
mute_flag,
loopback_flag,
)))
}),
)

View File

@@ -1,23 +1,103 @@
//! The core application state for the eframe UI.
//!
//! This module defines the `VoiceApp` struct which implements `eframe::App`.
//! It listens to background events via `tokio::sync::watch` and draws the UI at 60 FPS.
//!
//! We implement `ui()` (not the deprecated `update()`) because eframe 0.34
//! changed the required trait method to receive an `&mut egui::Ui` directly
//! instead of a raw `egui::Context`.
//! It listens to background events via `tokio::sync::watch` and draws the
//! classic TeamSpeak-style layout: channel tree on the left, text chat in the
//! centre, and a control bar at the bottom.
use eframe::egui;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use tokio::sync::watch;
// ── Hardcoded channel list (Milestone 2 spec: "tree view of hardcoded channels") ──
/// A channel in the server channel tree.
struct Channel {
name: &'static str,
children: &'static [Channel],
}
/// Static channel tree for Milestone 2.
const CHANNEL_TREE: &[Channel] = &[
Channel {
name: "General",
children: &[
Channel {
name: "Lobby",
children: &[],
},
Channel {
name: "Meeting Room",
children: &[],
},
],
},
Channel {
name: "Gaming",
children: &[
Channel {
name: "Competitive",
children: &[],
},
Channel {
name: "Casual",
children: &[],
},
],
},
Channel {
name: "AFK",
children: &[],
},
];
// ── Colour palette ──
const BG_DARK: egui::Color32 = egui::Color32::from_rgb(30, 30, 46);
const PANEL_BG: egui::Color32 = egui::Color32::from_rgb(36, 36, 54);
const ACCENT: egui::Color32 = egui::Color32::from_rgb(98, 114, 248);
const ACCENT_DIM: egui::Color32 = egui::Color32::from_rgb(68, 78, 160);
const TEXT_PRIMARY: egui::Color32 = egui::Color32::from_rgb(205, 214, 244);
const TEXT_MUTED: egui::Color32 = egui::Color32::from_rgb(127, 132, 156);
const GREEN: egui::Color32 = egui::Color32::from_rgb(100, 220, 130);
const RED: egui::Color32 = egui::Color32::from_rgb(235, 100, 100);
const YELLOW: egui::Color32 = egui::Color32::from_rgb(250, 200, 80);
const SEPARATOR: egui::Color32 = egui::Color32::from_rgb(55, 55, 75);
// ── Application state ──
/// A single chat message.
struct ChatMessage {
author: String,
body: String,
}
/// The central state for the eframe UI.
pub struct VoiceApp {
// ── Cross-thread channels ──
/// Receiver for the active speaker state, updated by the DSP thread.
pub active_speaker_rx: watch::Receiver<bool>,
active_speaker_rx: watch::Receiver<bool>,
/// Receiver for the current microphone RMS level (0.01.0).
mic_level_rx: watch::Receiver<f32>,
/// Shared flag to enable/disable the audio dumper.
pub audio_dumper_flag: Arc<AtomicBool>,
audio_dumper_flag: Arc<AtomicBool>,
/// Shared mute flag (disables outgoing audio when true).
mute_flag: Arc<AtomicBool>,
/// Shared flag to enable/disable local audio loopback.
loopback_flag: Arc<AtomicBool>,
// ── Local UI state ──
/// The currently selected channel name.
selected_channel: String,
/// Whether the user has deafened themselves.
is_deafened: bool,
/// Chat messages in the current channel.
chat_messages: Vec<ChatMessage>,
/// The current text input in the chat compose box.
chat_input: String,
/// Whether the developer settings panel is visible.
show_dev_settings: bool,
}
impl VoiceApp {
@@ -25,53 +105,436 @@ impl VoiceApp {
#[must_use]
pub fn new(
active_speaker_rx: watch::Receiver<bool>,
mic_level_rx: watch::Receiver<f32>,
audio_dumper_flag: Arc<AtomicBool>,
mute_flag: Arc<AtomicBool>,
loopback_flag: Arc<AtomicBool>,
) -> Self {
Self {
active_speaker_rx,
mic_level_rx,
audio_dumper_flag,
mute_flag,
loopback_flag,
selected_channel: "Lobby".to_string(),
is_deafened: false,
chat_messages: vec![
ChatMessage {
author: "System".into(),
body: "Welcome to Voice App!".into(),
},
ChatMessage {
author: "System".into(),
body: "Press 'V' to talk. Use the controls below to mute/deafen.".into(),
},
],
chat_input: String::new(),
show_dev_settings: false,
}
}
/// Applies the dark catppuccin-inspired colour scheme to egui.
fn apply_theme(ctx: &egui::Context) {
let mut style = (*ctx.global_style()).clone();
let visuals = &mut style.visuals;
visuals.dark_mode = true;
visuals.override_text_color = Some(TEXT_PRIMARY);
visuals.panel_fill = BG_DARK;
visuals.window_fill = PANEL_BG;
visuals.extreme_bg_color = egui::Color32::from_rgb(24, 24, 37);
visuals.widgets.noninteractive.bg_fill = PANEL_BG;
visuals.widgets.inactive.bg_fill = egui::Color32::from_rgb(45, 45, 65);
visuals.widgets.hovered.bg_fill = egui::Color32::from_rgb(55, 55, 80);
visuals.widgets.active.bg_fill = ACCENT;
visuals.selection.bg_fill = ACCENT_DIM;
visuals.selection.stroke = egui::Stroke::new(1.0, ACCENT);
visuals.widgets.noninteractive.fg_stroke = egui::Stroke::new(1.0, TEXT_MUTED);
visuals.widgets.inactive.fg_stroke = egui::Stroke::new(1.0, TEXT_PRIMARY);
visuals.widgets.hovered.fg_stroke = egui::Stroke::new(1.0, TEXT_PRIMARY);
visuals.widgets.active.fg_stroke = egui::Stroke::new(1.0, egui::Color32::WHITE);
style.spacing.item_spacing = egui::vec2(8.0, 6.0);
ctx.set_global_style(style);
}
impl eframe::App for VoiceApp {
fn ui(&mut self, ui: &mut egui::Ui, _frame: &mut eframe::Frame) {
let is_active_speaker = *self.active_speaker_rx.borrow();
// ── Sub-drawing functions ──
// Use columns to simulate a side-panel layout within the single Ui.
ui.columns(2, |columns| {
// Left column: Channel tree view
columns[0].heading("Channels");
columns[0].label("General");
columns[0].label("Gaming");
columns[0].label("AFK");
/// Draws the left-hand channel tree panel.
fn draw_channel_tree(&mut self, ui: &mut egui::Ui) {
ui.add_space(4.0);
ui.horizontal(|ui| {
ui.label(egui::RichText::new("").size(18.0).color(ACCENT));
ui.label(
egui::RichText::new("Voice App Server")
.size(15.0)
.strong()
.color(TEXT_PRIMARY),
);
});
ui.add_space(4.0);
ui.separator();
ui.add_space(4.0);
// Right column: Voice chat state + dev tools
columns[1].heading("Voice Chat");
egui::ScrollArea::vertical()
.auto_shrink([false, false])
.show(ui, |ui| {
for channel in CHANNEL_TREE {
self.draw_channel_entry(ui, channel, 0);
}
});
}
columns[1].horizontal(|ui| {
ui.label("You: ");
if is_active_speaker {
ui.label(egui::RichText::new("Speaking").color(egui::Color32::GREEN));
} else {
ui.label(egui::RichText::new("Silent").color(egui::Color32::GRAY));
/// Recursively draws a single channel entry (with indent for children).
fn draw_channel_entry(&mut self, ui: &mut egui::Ui, channel: &Channel, depth: usize) {
#[allow(clippy::cast_precision_loss)] // Channel depth is always tiny.
let indent = depth as f32 * 16.0;
let is_selected = self.selected_channel == channel.name;
let has_children = !channel.children.is_empty();
ui.horizontal(|ui| {
ui.add_space(indent);
let icon = if has_children { "📁" } else { "🔊" };
let text_color = if is_selected { ACCENT } else { TEXT_PRIMARY };
let label = egui::RichText::new(format!("{icon} {}", channel.name))
.size(13.0)
.color(text_color);
let response = ui.selectable_label(is_selected, label);
if response.clicked() {
self.selected_channel = channel.name.to_string();
}
});
columns[1].separator();
columns[1].heading("Developer Settings");
if has_children {
for child in channel.children {
self.draw_channel_entry(ui, child, depth + 1);
}
}
}
/// Draws the central chat panel.
fn draw_chat_panel(&mut self, ui: &mut egui::Ui) {
ui.horizontal(|ui| {
ui.label(
egui::RichText::new(format!("# {}", self.selected_channel))
.size(16.0)
.strong()
.color(TEXT_PRIMARY),
);
});
ui.separator();
// Chat message area (takes all remaining space minus the input box).
let available = ui.available_height() - 40.0;
egui::ScrollArea::vertical()
.auto_shrink([false, false])
.max_height(available)
.stick_to_bottom(true)
.show(ui, |ui| {
for msg in &self.chat_messages {
ui.horizontal_wrapped(|ui| {
ui.label(
egui::RichText::new(&msg.author)
.strong()
.color(ACCENT)
.size(13.0),
);
ui.label(
egui::RichText::new(&msg.body)
.color(TEXT_PRIMARY)
.size(13.0),
);
});
ui.add_space(2.0);
}
});
ui.separator();
// Chat input bar.
ui.horizontal(|ui| {
let response = ui.add_sized(
[ui.available_width() - 60.0, 28.0],
egui::TextEdit::singleline(&mut self.chat_input)
.hint_text("Type a message…")
.desired_width(f32::INFINITY),
);
if ui.button("Send").clicked()
|| (response.lost_focus() && ui.input(|i| i.key_pressed(egui::Key::Enter)))
{
let text = self.chat_input.trim().to_string();
if !text.is_empty() {
self.chat_messages.push(ChatMessage {
author: "You".into(),
body: text,
});
self.chat_input.clear();
}
response.request_focus();
}
});
}
/// Draws the bottom control bar (mute, deafen, PTT status, mic level).
fn draw_control_bar(&mut self, ui: &mut egui::Ui) {
let is_speaking = *self.active_speaker_rx.borrow();
let mic_level = *self.mic_level_rx.borrow();
let is_muted = self.mute_flag.load(Ordering::Relaxed);
ui.horizontal(|ui| {
ui.add_space(8.0);
// ── User info + speaking indicator ──
let dot_color = if is_speaking {
GREEN
} else if is_muted || self.is_deafened {
RED
} else {
TEXT_MUTED
};
ui.label(egui::RichText::new("").size(16.0).color(dot_color));
let status = if is_speaking {
"Speaking"
} else if is_muted {
"Muted"
} else if self.is_deafened {
"Deafened"
} else {
"Idle"
};
ui.label(
egui::RichText::new(format!("TestUser • {status}"))
.size(13.0)
.color(TEXT_PRIMARY),
);
ui.add_space(16.0);
// ── Mic level meter ──
ui.label(egui::RichText::new("Mic").size(11.0).color(TEXT_MUTED));
let meter_width = 80.0;
let (rect, _response) =
ui.allocate_exact_size(egui::vec2(meter_width, 12.0), egui::Sense::hover());
let painter = ui.painter();
painter.rect_filled(rect, 3.0, egui::Color32::from_rgb(40, 40, 58));
// Clamp and scale the level for visual feedback.
let clamped = mic_level.clamp(0.0, 0.5) * 2.0; // normalize 0.00.5 → 0.01.0
let fill_width = clamped * meter_width;
let meter_color = if clamped > 0.8 {
RED
} else if clamped > 0.4 {
YELLOW
} else {
GREEN
};
if fill_width > 0.5 {
let fill_rect = egui::Rect::from_min_size(rect.min, egui::vec2(fill_width, 12.0));
painter.rect_filled(fill_rect, 3.0, meter_color);
}
ui.add_space(16.0);
// ── Mute / Deafen / Settings buttons ──
let mute_label = if is_muted { "🔇 Unmute" } else { "🎤 Mute" };
let mute_color = if is_muted { RED } else { TEXT_PRIMARY };
if ui
.add(egui::Button::new(
egui::RichText::new(mute_label).size(12.0).color(mute_color),
))
.clicked()
{
self.mute_flag.store(!is_muted, Ordering::Relaxed);
}
let deafen_label = if self.is_deafened {
"🔇 Undeafen"
} else {
"🎧 Deafen"
};
let deafen_color = if self.is_deafened { RED } else { TEXT_PRIMARY };
if ui
.add(egui::Button::new(
egui::RichText::new(deafen_label)
.size(12.0)
.color(deafen_color),
))
.clicked()
{
self.is_deafened = !self.is_deafened;
// Deafening also mutes outgoing audio.
if self.is_deafened {
self.mute_flag.store(true, Ordering::Relaxed);
}
}
// ── Dev settings toggle ──
ui.with_layout(egui::Layout::right_to_left(egui::Align::Center), |ui| {
if ui
.add(egui::Button::new(
egui::RichText::new("").size(16.0).color(TEXT_MUTED),
))
.on_hover_text("Developer Settings")
.clicked()
{
self.show_dev_settings = !self.show_dev_settings;
}
});
});
}
/// Draws the developer settings side panel (audio dumper toggle, etc.).
fn draw_dev_settings(&mut self, ui: &mut egui::Ui) {
ui.add_space(4.0);
ui.label(
egui::RichText::new("Developer Settings")
.size(14.0)
.strong()
.color(ACCENT),
);
ui.add_space(4.0);
ui.separator();
ui.add_space(8.0);
let mut dumper_enabled = self.audio_dumper_flag.load(Ordering::Relaxed);
if columns[1]
if ui
.checkbox(&mut dumper_enabled, "Enable Audio Dumper (.wav)")
.changed()
{
self.audio_dumper_flag
.store(dumper_enabled, Ordering::Relaxed);
}
let mut loopback_enabled = self.loopback_flag.load(Ordering::Relaxed);
if ui
.checkbox(&mut loopback_enabled, "Enable Mic Loopback")
.changed()
{
self.loopback_flag.store(loopback_enabled, Ordering::Relaxed);
}
ui.label(
egui::RichText::new("Writes raw_mic.wav and post_dsp.wav to the working directory.")
.size(11.0)
.color(TEXT_MUTED)
.italics(),
);
ui.add_space(12.0);
ui.label(
egui::RichText::new("Audio Pipeline")
.size(13.0)
.strong()
.color(TEXT_PRIMARY),
);
ui.add_space(4.0);
let mic_level = *self.mic_level_rx.borrow();
ui.label(
egui::RichText::new(format!("RMS Level: {mic_level:.4}"))
.size(12.0)
.monospace()
.color(TEXT_MUTED),
);
ui.label(
egui::RichText::new(format!("VAD Threshold: {VAD_RMS_THRESHOLD:.4}"))
.size(12.0)
.monospace()
.color(TEXT_MUTED),
);
let is_speaking = *self.active_speaker_rx.borrow();
let vad_label = if is_speaking { "ACTIVE" } else { "SILENT" };
let vad_color = if is_speaking { GREEN } else { TEXT_MUTED };
ui.horizontal(|ui| {
ui.label(
egui::RichText::new("VAD: ")
.size(12.0)
.monospace()
.color(TEXT_MUTED),
);
ui.label(
egui::RichText::new(vad_label)
.size(12.0)
.monospace()
.strong()
.color(vad_color),
);
});
}
}
/// Reference to the DSP module's VAD threshold for display in the dev panel.
const VAD_RMS_THRESHOLD: f32 = 0.01;
impl eframe::App for VoiceApp {
fn ui(&mut self, ui: &mut egui::Ui, _frame: &mut eframe::Frame) {
// Apply theme once per frame (cheap — just pointer comparisons internally).
Self::apply_theme(ui.ctx());
// ── Left panel: Channel tree ──
egui::Panel::left("channel_tree")
.default_size(200.0)
.resizable(true)
.frame(
egui::Frame::new()
.fill(PANEL_BG)
.inner_margin(egui::Margin::same(8))
.stroke(egui::Stroke::new(1.0, SEPARATOR)),
)
.show_inside(ui, |ui| {
self.draw_channel_tree(ui);
});
// Force continuous repaint so the watch channel updates immediately reflect.
// ── Right panel: Dev settings (toggled) ──
if self.show_dev_settings {
egui::Panel::right("dev_settings")
.default_size(220.0)
.resizable(true)
.frame(
egui::Frame::new()
.fill(PANEL_BG)
.inner_margin(egui::Margin::same(8))
.stroke(egui::Stroke::new(1.0, SEPARATOR)),
)
.show_inside(ui, |ui| {
self.draw_dev_settings(ui);
});
}
// ── Bottom panel: Control bar ──
egui::Panel::bottom("controls")
.exact_size(40.0)
.frame(
egui::Frame::new()
.fill(PANEL_BG)
.inner_margin(egui::Margin::same(6))
.stroke(egui::Stroke::new(1.0, SEPARATOR)),
)
.show_inside(ui, |ui| {
self.draw_control_bar(ui);
});
// ── Central panel: Chat ──
egui::CentralPanel::default()
.frame(
egui::Frame::new()
.fill(BG_DARK)
.inner_margin(egui::Margin::same(12)),
)
.show_inside(ui, |ui| {
self.draw_chat_panel(ui);
});
// Force continuous repaint so the watch channels reflect immediately.
ui.ctx().request_repaint();
}
}