Rust library to generate static websites

fix: no idea what im doing

+579 -299
+1
crates/maudit-cli/Cargo.toml
··· 38 38 39 39 [dev-dependencies] 40 40 tempfile = "3.24.0" 41 + tokio = { version = "1", features = ["macros", "rt-multi-thread", "test-util"] }
+13 -11
crates/maudit-cli/src/dev.rs
··· 10 10 }; 11 11 use notify_debouncer_full::{DebounceEventResult, DebouncedEvent, new_debouncer}; 12 12 use quanta::Instant; 13 - use server::WebSocketMessage; 13 + use server::StatusManager; 14 14 use std::{ 15 15 fs, 16 16 path::{Path, PathBuf}, 17 17 }; 18 18 use tokio::{ 19 19 signal, 20 - sync::{broadcast, mpsc::channel}, 20 + sync::mpsc::channel, 21 21 task::JoinHandle, 22 22 }; 23 23 use tracing::{error, info}; ··· 28 28 cwd: &str, 29 29 host: bool, 30 30 port: Option<u16>, 31 - ) -> Result<(), Box<dyn std::error::Error>> { 31 + ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> { 32 32 let start_time = Instant::now(); 33 33 info!(name: "dev", "Preparing dev environment…"); 34 34 35 - let (sender_websocket, _) = broadcast::channel::<WebSocketMessage>(100); 35 + // Create status manager (handles WebSocket communication) 36 + let status_manager = StatusManager::new(); 36 37 37 - // Create build manager (it will create its own status state internally) 38 - let build_manager = BuildManager::new(sender_websocket.clone()); 38 + // Create build manager 39 + let build_manager = BuildManager::new(status_manager.clone()); 39 40 40 41 // Do initial build 41 42 info!(name: "build", "Doing initial build…"); ··· 81 82 info!(name: "dev", "Starting web server..."); 82 83 web_server_thread = Some(tokio::spawn(server::start_dev_web_server( 83 84 start_time, 84 - sender_websocket.clone(), 85 + status_manager.clone(), 85 86 host, 86 87 port, 87 88 None, 88 - build_manager.current_status(), 89 89 ))); 90 90 } 91 91 92 92 // Clone build manager for the file watcher task 93 93 let build_manager_watcher = build_manager.clone(); 94 - let sender_websocket_watcher = sender_websocket.clone(); 94 + let status_manager_watcher = status_manager.clone(); 95 95 96 96 let file_watcher_task = tokio::spawn(async move { 97 97 let mut dev_server_started = initial_build_success; ··· 107 107 108 108 match result { 109 109 Ok(events) => { 110 + info!(name: "watch", "Received {} events: {:?}", events.len(), events); 110 111 // TODO: Handle rescan events, I don't fully understand the implication of them yet 111 112 // some issues: 112 113 // - https://github.com/notify-rs/notify/issues/434 ··· 155 156 dev_server_handle = 156 157 Some(tokio::spawn(server::start_dev_web_server( 157 158 start_time, 158 - sender_websocket_watcher.clone(), 159 + status_manager_watcher.clone(), 159 160 host, 160 161 port, 161 162 None, 162 - build_manager_watcher.current_status(), 163 163 ))); 164 164 } 165 165 Ok(false) => { ··· 171 171 } 172 172 } else { 173 173 // Normal rebuild - check if we need full recompilation or just rerun 174 + // Only collect paths from events that actually trigger a rebuild 174 175 let mut changed_paths: Vec<PathBuf> = events.iter() 176 + .filter(|e| should_rebuild_for_event(e)) 175 177 .flat_map(|e| e.paths.iter().cloned()) 176 178 .collect(); 177 179
+343 -225
crates/maudit-cli/src/dev/build.rs
··· 1 1 use cargo_metadata::Message; 2 2 use quanta::Instant; 3 - use server::{StatusType, WebSocketMessage, update_status}; 4 3 use std::path::PathBuf; 5 4 use std::sync::Arc; 6 5 use tokio::process::Command; 7 - use tokio::sync::broadcast; 6 + use tokio::sync::RwLock; 8 7 use tokio_util::sync::CancellationToken; 9 8 use tracing::{debug, error, info, warn}; 10 9 11 10 use crate::{ 12 - dev::server, 11 + dev::server::{StatusManager, StatusType}, 13 12 logging::{FormatElapsedTimeOptions, format_elapsed_time}, 14 13 }; 15 14 16 15 use super::dep_tracker::{DependencyTracker, find_target_dir}; 17 16 17 + /// Internal state shared across all BuildManager handles. 18 + struct BuildManagerState { 19 + current_cancel: RwLock<Option<CancellationToken>>, 20 + build_semaphore: tokio::sync::Semaphore, 21 + status_manager: StatusManager, 22 + dep_tracker: RwLock<Option<DependencyTracker>>, 23 + binary_path: RwLock<Option<PathBuf>>, 24 + // Cached values computed once at startup 25 + target_dir: Option<PathBuf>, 26 + binary_name: Option<String>, 27 + } 28 + 29 + /// Manages cargo build processes with cancellation support. 30 + /// Cheap to clone - all clones share the same underlying state. 18 31 #[derive(Clone)] 19 32 pub struct BuildManager { 20 - current_cancel: Arc<tokio::sync::RwLock<Option<CancellationToken>>>, 21 - build_semaphore: Arc<tokio::sync::Semaphore>, 22 - websocket_tx: broadcast::Sender<WebSocketMessage>, 23 - current_status: Arc<tokio::sync::RwLock<Option<server::PersistentStatus>>>, 24 - dep_tracker: Arc<tokio::sync::RwLock<Option<DependencyTracker>>>, 25 - binary_path: Arc<tokio::sync::RwLock<Option<PathBuf>>>, 26 - // Cached values computed once at startup 27 - target_dir: Arc<Option<PathBuf>>, 28 - binary_name: Arc<Option<String>>, 33 + state: Arc<BuildManagerState>, 29 34 } 30 35 31 36 impl BuildManager { 32 - pub fn new(websocket_tx: broadcast::Sender<WebSocketMessage>) -> Self { 37 + pub fn new(status_manager: StatusManager) -> Self { 33 38 // Try to determine target directory and binary name at startup 34 39 let target_dir = find_target_dir().ok(); 35 40 let binary_name = Self::get_binary_name_from_cargo_toml().ok(); ··· 42 47 } 43 48 44 49 Self { 45 - current_cancel: Arc::new(tokio::sync::RwLock::new(None)), 46 - build_semaphore: Arc::new(tokio::sync::Semaphore::new(1)), // Only one build at a time 47 - websocket_tx, 48 - current_status: Arc::new(tokio::sync::RwLock::new(None)), 49 - dep_tracker: Arc::new(tokio::sync::RwLock::new(None)), 50 - binary_path: Arc::new(tokio::sync::RwLock::new(None)), 51 - target_dir: Arc::new(target_dir), 52 - binary_name: Arc::new(binary_name), 50 + state: Arc::new(BuildManagerState { 51 + current_cancel: RwLock::new(None), 52 + build_semaphore: tokio::sync::Semaphore::new(1), 53 + status_manager, 54 + dep_tracker: RwLock::new(None), 55 + binary_path: RwLock::new(None), 56 + target_dir, 57 + binary_name, 58 + }), 53 59 } 54 60 } 55 61 56 - /// Get a reference to the current status for use with the web server 57 - pub fn current_status(&self) -> Arc<tokio::sync::RwLock<Option<server::PersistentStatus>>> { 58 - self.current_status.clone() 59 - } 60 - 61 - /// Check if the given paths require recompilation based on dependency tracking 62 - /// Returns true if recompilation is needed, false if we can just rerun the binary 62 + /// Check if the given paths require recompilation based on dependency tracking. 63 + /// Returns true if recompilation is needed, false if we can just rerun the binary. 63 64 pub async fn needs_recompile(&self, changed_paths: &[PathBuf]) -> bool { 64 - let dep_tracker = self.dep_tracker.read().await; 65 + let dep_tracker = self.state.dep_tracker.read().await; 65 66 66 67 if let Some(tracker) = dep_tracker.as_ref() 67 68 && tracker.has_dependencies() ··· 77 78 true 78 79 } 79 80 80 - /// Rerun the binary without recompiling 81 + /// Rerun the binary without recompiling. 81 82 pub async fn rerun_binary( 82 83 &self, 83 84 changed_paths: &[PathBuf], 84 - ) -> Result<bool, Box<dyn std::error::Error>> { 85 - let binary_path = self.binary_path.read().await; 86 - 87 - let Some(path) = binary_path.as_ref() else { 88 - warn!(name: "build", "No binary path available, falling back to full rebuild"); 89 - return self.start_build().await; 85 + ) -> Result<bool, Box<dyn std::error::Error + Send + Sync>> { 86 + // Get binary path with limited lock scope 87 + let path = { 88 + let guard = self.state.binary_path.read().await; 89 + match guard.as_ref() { 90 + Some(p) if p.exists() => p.clone(), 91 + Some(p) => { 92 + warn!(name: "build", "Binary at {:?} no longer exists, falling back to full rebuild", p); 93 + return self.start_build().await; 94 + } 95 + None => { 96 + warn!(name: "build", "No binary path available, falling back to full rebuild"); 97 + return self.start_build().await; 98 + } 99 + } 90 100 }; 91 101 92 - if !path.exists() { 93 - warn!(name: "build", "Binary at {:?} no longer exists, falling back to full rebuild", path); 94 - return self.start_build().await; 95 - } 96 - 97 102 // Log that we're doing an incremental build 98 103 info!(name: "build", "Incremental build: {} files changed", changed_paths.len()); 99 - debug!(name: "build", "Changed files: {:?}", changed_paths); 104 + info!(name: "build", "Changed files: {:?}", changed_paths); 100 105 info!(name: "build", "Rerunning binary without recompilation..."); 101 106 102 - // Notify that build is starting (even though we're just rerunning) 103 - update_status( 104 - &self.websocket_tx, 105 - self.current_status.clone(), 106 - StatusType::Info, 107 - "Rerunning...", 108 - ) 109 - .await; 107 + self.state 108 + .status_manager 109 + .update(StatusType::Info, "Rerunning...") 110 + .await; 110 111 111 112 let build_start_time = Instant::now(); 112 113 113 114 // Serialize changed paths to JSON for the binary 114 115 let changed_files_json = serde_json::to_string(changed_paths)?; 115 116 116 - let child = Command::new(path) 117 + let child = Command::new(&path) 117 118 .envs([ 118 119 ("MAUDIT_DEV", "true"), 119 120 ("MAUDIT_QUIET", "true"), ··· 123 124 .stderr(std::process::Stdio::piped()) 124 125 .spawn()?; 125 126 126 - // Wait for the process to complete 127 127 let output = child.wait_with_output().await?; 128 128 129 129 let duration = build_start_time.elapsed(); ··· 131 131 format_elapsed_time(duration, &FormatElapsedTimeOptions::default_dev()); 132 132 133 133 if output.status.success() { 134 - // Optionally log output from the binary (includes incremental build info) 135 - // Enable with MAUDIT_SHOW_BINARY_OUTPUT=1 for debugging 136 134 if std::env::var("MAUDIT_SHOW_BINARY_OUTPUT").is_ok() { 137 135 let stdout = String::from_utf8_lossy(&output.stdout); 138 136 let stderr = String::from_utf8_lossy(&output.stderr); ··· 143 141 } 144 142 } 145 143 info!(name: "build", "Binary rerun finished {}", formatted_elapsed_time); 146 - update_status( 147 - &self.websocket_tx, 148 - self.current_status.clone(), 149 - StatusType::Success, 150 - "Binary rerun finished successfully", 151 - ) 152 - .await; 144 + self.state 145 + .status_manager 146 + .update(StatusType::Success, "Binary rerun finished successfully") 147 + .await; 153 148 Ok(true) 154 149 } else { 155 150 let stderr = String::from_utf8_lossy(&output.stderr).to_string(); 156 151 let stdout = String::from_utf8_lossy(&output.stdout).to_string(); 157 152 error!(name: "build", "Binary rerun failed {}\nstdout: {}\nstderr: {}", 158 153 formatted_elapsed_time, stdout, stderr); 159 - update_status( 160 - &self.websocket_tx, 161 - self.current_status.clone(), 162 - StatusType::Error, 163 - &format!("Binary rerun failed:\n{}\n{}", stdout, stderr), 164 - ) 165 - .await; 154 + self.state 155 + .status_manager 156 + .update( 157 + StatusType::Error, 158 + &format!("Binary rerun failed:\n{}\n{}", stdout, stderr), 159 + ) 160 + .await; 166 161 Ok(false) 167 162 } 168 163 } 169 164 170 - /// Do initial build that can be cancelled (but isn't stored as current build) 171 - pub async fn do_initial_build(&self) -> Result<bool, Box<dyn std::error::Error>> { 165 + /// Do initial build that can be cancelled. 166 + pub async fn do_initial_build(&self) -> Result<bool, Box<dyn std::error::Error + Send + Sync>> { 172 167 self.internal_build(true).await 173 168 } 174 169 175 - /// Start a new build, cancelling any previous one 176 - pub async fn start_build(&self) -> Result<bool, Box<dyn std::error::Error>> { 170 + /// Start a new build, cancelling any previous one. 171 + pub async fn start_build(&self) -> Result<bool, Box<dyn std::error::Error + Send + Sync>> { 177 172 self.internal_build(false).await 178 173 } 179 174 180 - /// Internal build method that handles both initial and regular builds 181 - async fn internal_build(&self, is_initial: bool) -> Result<bool, Box<dyn std::error::Error>> { 175 + async fn internal_build(&self, is_initial: bool) -> Result<bool, Box<dyn std::error::Error + Send + Sync>> { 182 176 // Cancel any existing build immediately 183 177 let cancel = CancellationToken::new(); 184 178 { 185 - let mut current_cancel = self.current_cancel.write().await; 179 + let mut current_cancel = self.state.current_cancel.write().await; 186 180 if let Some(old_cancel) = current_cancel.replace(cancel.clone()) { 187 181 old_cancel.cancel(); 188 182 } 189 183 } 190 184 191 185 // Acquire semaphore to ensure only one build runs at a time 192 - // This prevents resource conflicts if cancellation fails 193 - let _ = self.build_semaphore.acquire().await?; 186 + let _permit = self.state.build_semaphore.acquire().await?; 194 187 195 - // Notify that build is starting 196 - update_status( 197 - &self.websocket_tx, 198 - self.current_status.clone(), 199 - StatusType::Info, 200 - "Building...", 201 - ) 202 - .await; 188 + self.state 189 + .status_manager 190 + .update(StatusType::Info, "Building...") 191 + .await; 203 192 204 193 let mut child = Command::new("cargo") 205 194 .args([ ··· 217 206 .stderr(std::process::Stdio::piped()) 218 207 .spawn()?; 219 208 220 - // Take the stderr stream for manual handling 221 - let mut stdout = child.stdout.take().unwrap(); 222 - let mut stderr = child.stderr.take().unwrap(); 209 + // Take stdout/stderr before select! so we can use them in the completion branch 210 + // while still being able to kill the child in the cancellation branch 211 + let stdout = child.stdout.take().unwrap(); 212 + let stderr = child.stderr.take().unwrap(); 223 213 224 214 let build_start_time = Instant::now(); 225 - let websocket_tx = self.websocket_tx.clone(); 226 - let current_status = self.current_status.clone(); 227 - let dep_tracker_clone = self.dep_tracker.clone(); 228 - let binary_path_clone = self.binary_path.clone(); 229 - let target_dir_clone = self.target_dir.clone(); 230 - let binary_name_clone = self.binary_name.clone(); 231 215 232 - // Create a channel to get the build result back 233 - let (result_tx, mut result_rx) = tokio::sync::mpsc::channel::<bool>(1); 216 + tokio::select! { 217 + _ = cancel.cancelled() => { 218 + debug!(name: "build", "Build cancelled"); 219 + let _ = child.kill().await; 220 + self.state.status_manager.update(StatusType::Info, "Build cancelled").await; 221 + Ok(false) 222 + } 223 + result = self.run_build_to_completion(&mut child, stdout, stderr, is_initial, build_start_time) => { 224 + result 225 + } 226 + } 227 + } 234 228 235 - // Spawn watcher task to monitor the child process 236 - tokio::spawn(async move { 237 - let output_future = async { 238 - // Read stdout concurrently with waiting for process to finish 239 - let stdout_task = tokio::spawn(async move { 240 - let mut out = Vec::new(); 241 - tokio::io::copy(&mut stdout, &mut out).await.unwrap_or(0); 229 + /// Run the cargo build process to completion and handle the output. 230 + async fn run_build_to_completion( 231 + &self, 232 + child: &mut tokio::process::Child, 233 + mut stdout: tokio::process::ChildStdout, 234 + mut stderr: tokio::process::ChildStderr, 235 + is_initial: bool, 236 + build_start_time: Instant, 237 + ) -> Result<bool, Box<dyn std::error::Error + Send + Sync>> { 238 + // Read stdout and stderr concurrently 239 + let stdout_task = tokio::spawn(async move { 240 + let mut out = Vec::new(); 241 + tokio::io::copy(&mut stdout, &mut out).await.unwrap_or(0); 242 242 243 - let mut rendered_messages: Vec<String> = Vec::new(); 243 + let mut rendered_messages: Vec<String> = Vec::new(); 244 244 245 - // Ideally we'd stream things as they come, but I can't figure it out 246 - for message in cargo_metadata::Message::parse_stream( 247 - String::from_utf8_lossy(&out).to_string().as_bytes(), 248 - ) { 249 - match message { 250 - Err(e) => { 251 - error!(name: "build", "Failed to parse cargo message: {}", e); 252 - continue; 253 - } 254 - Ok(message) => { 255 - match message { 256 - // Compiler wants to tell us something 257 - Message::CompilerMessage(msg) => { 258 - // TODO: For now, just send through the rendered messages, but in the future let's send 259 - // structured messages to the frontend so we can do better formatting 260 - if let Some(rendered) = &msg.message.rendered { 261 - info!("{}", rendered); 262 - rendered_messages.push(rendered.to_string()); 263 - } 264 - } 265 - // Random text came in, just log it 266 - Message::TextLine(msg) => { 267 - info!("{}", msg); 268 - } 269 - _ => {} 270 - } 271 - } 245 + for message in cargo_metadata::Message::parse_stream( 246 + String::from_utf8_lossy(&out).to_string().as_bytes(), 247 + ) { 248 + match message { 249 + Err(e) => { 250 + error!(name: "build", "Failed to parse cargo message: {}", e); 251 + } 252 + Ok(Message::CompilerMessage(msg)) => { 253 + if let Some(rendered) = &msg.message.rendered { 254 + info!("{}", rendered); 255 + rendered_messages.push(rendered.to_string()); 272 256 } 273 257 } 258 + Ok(Message::TextLine(msg)) => { 259 + info!("{}", msg); 260 + } 261 + _ => {} 262 + } 263 + } 274 264 275 - (out, rendered_messages) 276 - }); 265 + (out, rendered_messages) 266 + }); 277 267 278 - let stderr_task = tokio::spawn(async move { 279 - let mut err = Vec::new(); 280 - tokio::io::copy(&mut stderr, &mut err).await.unwrap_or(0); 268 + let stderr_task = tokio::spawn(async move { 269 + let mut err = Vec::new(); 270 + tokio::io::copy(&mut stderr, &mut err).await.unwrap_or(0); 271 + err 272 + }); 281 273 282 - err 283 - }); 274 + let status = child.wait().await?; 275 + let (_stdout_bytes, rendered_messages) = stdout_task.await.unwrap_or_default(); 276 + let stderr_bytes = stderr_task.await.unwrap_or_default(); 284 277 285 - let status = child.wait().await?; 286 - let stdout_data = stdout_task.await.unwrap_or_default(); 287 - let stderr_data = stderr_task.await.unwrap_or_default(); 278 + let duration = build_start_time.elapsed(); 279 + let formatted_elapsed_time = format_elapsed_time( 280 + duration, 281 + &FormatElapsedTimeOptions::default_dev(), 282 + ); 288 283 289 - Ok::<(std::process::Output, Vec<String>), Box<dyn std::error::Error + Send + Sync>>( 290 - ( 291 - std::process::Output { 292 - status, 293 - stdout: stdout_data.0, 294 - stderr: stderr_data, 295 - }, 296 - stdout_data.1, 297 - ), 298 - ) 299 - }; 284 + if status.success() { 285 + let build_type = if is_initial { "Initial build" } else { "Rebuild" }; 286 + info!(name: "build", "{} finished {}", build_type, formatted_elapsed_time); 287 + self.state.status_manager.update(StatusType::Success, "Build finished successfully").await; 300 288 301 - tokio::select! { 302 - _ = cancel.cancelled() => { 303 - debug!(name: "build", "Build cancelled"); 304 - let _ = child.kill().await; 305 - update_status(&websocket_tx, current_status, StatusType::Info, "Build cancelled").await; 306 - let _ = result_tx.send(false).await; // Build failed due to cancellation 307 - } 308 - res = output_future => { 309 - let duration = build_start_time.elapsed(); 310 - let formatted_elapsed_time = format_elapsed_time( 311 - duration, 312 - &FormatElapsedTimeOptions::default_dev(), 313 - ); 289 + self.update_dependency_tracker().await; 314 290 315 - let success = match res { 316 - Ok(output) => { 317 - let (output, rendered_messages) = output; 318 - if output.status.success() { 319 - let build_type = if is_initial { "Initial build" } else { "Rebuild" }; 320 - info!(name: "build", "{} finished {}", build_type, formatted_elapsed_time); 321 - update_status(&websocket_tx, current_status.clone(), StatusType::Success, "Build finished successfully").await; 291 + Ok(true) 292 + } else { 293 + let stderr_str = String::from_utf8_lossy(&stderr_bytes).to_string(); 294 + // Raw stderr sometimes has something to say whenever cargo fails 295 + println!("{}", stderr_str); 322 296 323 - // Update dependency tracker after successful build 324 - Self::update_dependency_tracker_after_build( 325 - dep_tracker_clone.clone(), 326 - binary_path_clone.clone(), 327 - target_dir_clone.clone(), 328 - binary_name_clone.clone(), 329 - ).await; 297 + let build_type = if is_initial { "Initial build" } else { "Rebuild" }; 298 + error!(name: "build", "{} failed with errors {}", build_type, formatted_elapsed_time); 330 299 331 - true 332 - } else { 333 - let stderr = String::from_utf8_lossy(&output.stderr).to_string(); 334 - println!("{}", stderr); // Raw stderr sometimes has something to say whenever cargo fails, even if the errors messages are actually in stdout 335 - let build_type = if is_initial { "Initial build" } else { "Rebuild" }; 336 - error!(name: "build", "{} failed with errors {}", build_type, formatted_elapsed_time); 337 - if is_initial { 338 - error!(name: "build", "Initial build needs to succeed before we can start the dev server"); 339 - update_status(&websocket_tx, current_status, StatusType::Error, "Initial build failed - fix errors and save to retry").await; 340 - } else { 341 - update_status(&websocket_tx, current_status, StatusType::Error, &rendered_messages.join("\n")).await; 342 - } 343 - false 344 - } 345 - } 346 - Err(e) => { 347 - error!(name: "build", "Failed to wait for build: {}", e); 348 - update_status(&websocket_tx, current_status, StatusType::Error, &format!("Failed to wait for build: {}", e)).await; 349 - false 350 - } 351 - }; 352 - let _ = result_tx.send(success).await; 353 - } 300 + if is_initial { 301 + error!(name: "build", "Initial build needs to succeed before we can start the dev server"); 302 + self.state.status_manager.update(StatusType::Error, "Initial build failed - fix errors and save to retry").await; 303 + } else { 304 + self.state.status_manager.update(StatusType::Error, &rendered_messages.join("\n")).await; 354 305 } 355 - }); 356 306 357 - // Wait for the build result 358 - let success = result_rx.recv().await.unwrap_or(false); 359 - Ok(success) 307 + Ok(false) 308 + } 360 309 } 361 310 362 - /// Update the dependency tracker after a successful build 363 - async fn update_dependency_tracker_after_build( 364 - dep_tracker: Arc<tokio::sync::RwLock<Option<DependencyTracker>>>, 365 - binary_path: Arc<tokio::sync::RwLock<Option<PathBuf>>>, 366 - target_dir: Arc<Option<PathBuf>>, 367 - binary_name: Arc<Option<String>>, 368 - ) { 369 - // Use cached binary name (computed once at startup) 370 - let Some(name) = binary_name.as_ref() else { 311 + /// Update the dependency tracker after a successful build. 312 + async fn update_dependency_tracker(&self) { 313 + let Some(ref name) = self.state.binary_name else { 371 314 debug!(name: "build", "No binary name available, skipping dependency tracker update"); 372 315 return; 373 316 }; 374 317 375 - // Use cached target directory (computed once at startup) 376 - let Some(target) = target_dir.as_ref() else { 318 + let Some(ref target) = self.state.target_dir else { 377 319 debug!(name: "build", "No target directory available, skipping dependency tracker update"); 378 320 return; 379 321 }; 380 322 381 - // Update binary path using cached values 323 + // Update binary path 382 324 let bin_path = target.join(name); 383 325 if bin_path.exists() { 384 - *binary_path.write().await = Some(bin_path.clone()); 326 + *self.state.binary_path.write().await = Some(bin_path.clone()); 385 327 debug!(name: "build", "Binary path set to: {:?}", bin_path); 386 328 } else { 387 329 debug!(name: "build", "Binary not found at expected path: {:?}", bin_path); 388 330 } 389 331 390 332 // Reload the dependency tracker from the .d file 391 - // This MUST be done after each build since dependencies can change 392 333 match DependencyTracker::load_from_binary_name(name) { 393 334 Ok(tracker) => { 394 335 debug!(name: "build", "Loaded {} dependencies for tracking", tracker.get_dependencies().len()); 395 - *dep_tracker.write().await = Some(tracker); 336 + *self.state.dep_tracker.write().await = Some(tracker); 396 337 } 397 338 Err(e) => { 398 339 debug!(name: "build", "Could not load dependency tracker: {}", e); ··· 400 341 } 401 342 } 402 343 403 - /// Get the binary name from Cargo.toml in the current directory 404 - fn get_binary_name_from_cargo_toml() -> Result<String, Box<dyn std::error::Error>> { 344 + fn get_binary_name_from_cargo_toml() -> Result<String, Box<dyn std::error::Error + Send + Sync>> { 405 345 let cargo_toml_path = PathBuf::from("Cargo.toml"); 406 346 if !cargo_toml_path.exists() { 407 347 return Err("Cargo.toml not found in current directory".into()); ··· 410 350 let cargo_toml_content = std::fs::read_to_string(&cargo_toml_path)?; 411 351 let cargo_toml: toml::Value = toml::from_str(&cargo_toml_content)?; 412 352 413 - // First, try to get the package name 414 353 if let Some(package_name) = cargo_toml 415 354 .get("package") 416 355 .and_then(|p| p.get("name")) ··· 424 363 return Ok(bin_name.to_string()); 425 364 } 426 365 427 - // No explicit bin name, use package name 428 366 return Ok(package_name.to_string()); 429 367 } 430 368 431 369 Err("Could not find package name in Cargo.toml".into()) 370 + } 371 + 372 + /// Set the dependency tracker directly (for testing). 373 + #[cfg(test)] 374 + pub(crate) async fn set_dep_tracker(&self, tracker: Option<DependencyTracker>) { 375 + *self.state.dep_tracker.write().await = tracker; 376 + } 377 + 378 + /// Set the binary path directly (for testing). 379 + #[cfg(test)] 380 + pub(crate) async fn set_binary_path(&self, path: Option<PathBuf>) { 381 + *self.state.binary_path.write().await = path; 382 + } 383 + 384 + /// Get the current binary path (for testing). 385 + #[cfg(test)] 386 + pub(crate) async fn get_binary_path(&self) -> Option<PathBuf> { 387 + self.state.binary_path.read().await.clone() 388 + } 389 + 390 + /// Create a BuildManager with custom target_dir and binary_name (for testing). 391 + #[cfg(test)] 392 + pub(crate) fn new_with_config( 393 + status_manager: StatusManager, 394 + target_dir: Option<PathBuf>, 395 + binary_name: Option<String>, 396 + ) -> Self { 397 + Self { 398 + state: Arc::new(BuildManagerState { 399 + current_cancel: RwLock::new(None), 400 + build_semaphore: tokio::sync::Semaphore::new(1), 401 + status_manager, 402 + dep_tracker: RwLock::new(None), 403 + binary_path: RwLock::new(None), 404 + target_dir, 405 + binary_name, 406 + }), 407 + } 408 + } 409 + } 410 + 411 + #[cfg(test)] 412 + mod tests { 413 + use super::*; 414 + use std::collections::HashMap; 415 + use std::time::SystemTime; 416 + use tempfile::TempDir; 417 + 418 + fn create_test_manager() -> BuildManager { 419 + let status_manager = StatusManager::new(); 420 + BuildManager::new_with_config(status_manager, None, None) 421 + } 422 + 423 + fn create_test_manager_with_config( 424 + target_dir: Option<PathBuf>, 425 + binary_name: Option<String>, 426 + ) -> BuildManager { 427 + let status_manager = StatusManager::new(); 428 + BuildManager::new_with_config(status_manager, target_dir, binary_name) 429 + } 430 + 431 + #[tokio::test] 432 + async fn test_build_manager_clone_shares_state() { 433 + let manager1 = create_test_manager(); 434 + let manager2 = manager1.clone(); 435 + 436 + // Set binary path via one clone 437 + let test_path = PathBuf::from("/test/path"); 438 + manager1.set_binary_path(Some(test_path.clone())).await; 439 + 440 + // Should be visible via the other clone 441 + assert_eq!(manager2.get_binary_path().await, Some(test_path)); 442 + } 443 + 444 + #[tokio::test] 445 + async fn test_needs_recompile_without_tracker() { 446 + let manager = create_test_manager(); 447 + 448 + // Without a dependency tracker, should always return true 449 + let changed = vec![PathBuf::from("src/main.rs")]; 450 + assert!(manager.needs_recompile(&changed).await); 451 + } 452 + 453 + #[tokio::test] 454 + async fn test_needs_recompile_with_empty_tracker() { 455 + let manager = create_test_manager(); 456 + 457 + // Set an empty tracker (no dependencies) 458 + let tracker = DependencyTracker::new(); 459 + manager.set_dep_tracker(Some(tracker)).await; 460 + 461 + // Empty tracker has no dependencies, so has_dependencies() returns false 462 + // This means we should still return true (recompile needed) 463 + let changed = vec![PathBuf::from("src/main.rs")]; 464 + assert!(manager.needs_recompile(&changed).await); 465 + } 466 + 467 + #[tokio::test] 468 + async fn test_needs_recompile_with_matching_dependency() { 469 + let manager = create_test_manager(); 470 + 471 + // Create a tracker with some dependencies 472 + let temp_dir = TempDir::new().unwrap(); 473 + let dep_file = temp_dir.path().join("src/lib.rs"); 474 + std::fs::create_dir_all(dep_file.parent().unwrap()).unwrap(); 475 + std::fs::write(&dep_file, "// test").unwrap(); 476 + 477 + // Get canonical path and current mod time 478 + let canonical_path = dep_file.canonicalize().unwrap(); 479 + let old_time = SystemTime::UNIX_EPOCH; // Very old time 480 + 481 + let mut tracker = DependencyTracker::new(); 482 + tracker.dependencies = HashMap::from([(canonical_path, old_time)]); 483 + 484 + manager.set_dep_tracker(Some(tracker)).await; 485 + 486 + // Changed file IS a dependency and is newer - should need recompile 487 + let changed = vec![dep_file]; 488 + assert!(manager.needs_recompile(&changed).await); 489 + } 490 + 491 + #[tokio::test] 492 + async fn test_needs_recompile_with_non_matching_file() { 493 + let manager = create_test_manager(); 494 + 495 + // Create a tracker with some dependencies 496 + let temp_dir = TempDir::new().unwrap(); 497 + let dep_file = temp_dir.path().join("src/lib.rs"); 498 + std::fs::create_dir_all(dep_file.parent().unwrap()).unwrap(); 499 + std::fs::write(&dep_file, "// test").unwrap(); 500 + 501 + let canonical_path = dep_file.canonicalize().unwrap(); 502 + let mod_time = std::fs::metadata(&dep_file).unwrap().modified().unwrap(); 503 + 504 + let mut tracker = DependencyTracker::new(); 505 + tracker.dependencies = HashMap::from([(canonical_path, mod_time)]); 506 + 507 + manager.set_dep_tracker(Some(tracker)).await; 508 + 509 + // Changed file is NOT a dependency (different file) 510 + let other_file = temp_dir.path().join("assets/style.css"); 511 + std::fs::create_dir_all(other_file.parent().unwrap()).unwrap(); 512 + std::fs::write(&other_file, "/* css */").unwrap(); 513 + 514 + let changed = vec![other_file]; 515 + assert!(!manager.needs_recompile(&changed).await); 516 + } 517 + 518 + #[tokio::test] 519 + async fn test_update_dependency_tracker_with_config_missing_binary() { 520 + let temp_dir = TempDir::new().unwrap(); 521 + let manager = create_test_manager_with_config( 522 + Some(temp_dir.path().to_path_buf()), 523 + Some("nonexistent-binary".to_string()), 524 + ); 525 + 526 + // Binary doesn't exist, so binary_path should not be set 527 + manager.update_dependency_tracker().await; 528 + 529 + assert!(manager.get_binary_path().await.is_none()); 530 + } 531 + 532 + #[tokio::test] 533 + async fn test_update_dependency_tracker_with_existing_binary() { 534 + let temp_dir = TempDir::new().unwrap(); 535 + let binary_name = "test-binary"; 536 + let binary_path = temp_dir.path().join(binary_name); 537 + 538 + // Create a fake binary file 539 + std::fs::write(&binary_path, "fake binary").unwrap(); 540 + 541 + let manager = create_test_manager_with_config( 542 + Some(temp_dir.path().to_path_buf()), 543 + Some(binary_name.to_string()), 544 + ); 545 + 546 + manager.update_dependency_tracker().await; 547 + 548 + // Binary path should be set 549 + assert_eq!(manager.get_binary_path().await, Some(binary_path)); 432 550 } 433 551 }
+2 -2
crates/maudit-cli/src/dev/dep_tracker.rs
··· 8 8 #[derive(Debug, Clone)] 9 9 pub struct DependencyTracker { 10 10 /// Path to the .d file 11 - d_file_path: Option<PathBuf>, 11 + pub(crate) d_file_path: Option<PathBuf>, 12 12 /// Map of dependency paths to their last modification times 13 - dependencies: HashMap<PathBuf, SystemTime>, 13 + pub(crate) dependencies: HashMap<PathBuf, SystemTime>, 14 14 } 15 15 16 16 /// Find the target directory using multiple strategies
+219 -60
crates/maudit-cli/src/dev/server.rs
··· 64 64 pub message: String, 65 65 } 66 66 67 + /// Manages status updates and WebSocket broadcasting. 68 + /// Cheap to clone - all clones share the same underlying state. 67 69 #[derive(Clone)] 68 - struct AppState { 70 + pub struct StatusManager { 69 71 tx: broadcast::Sender<WebSocketMessage>, 70 72 current_status: Arc<RwLock<Option<PersistentStatus>>>, 71 73 } 72 74 75 + impl StatusManager { 76 + pub fn new() -> Self { 77 + let (tx, _) = broadcast::channel::<WebSocketMessage>(100); 78 + Self { 79 + tx, 80 + current_status: Arc::new(RwLock::new(None)), 81 + } 82 + } 83 + 84 + /// Update the status and broadcast to all connected WebSocket clients. 85 + pub async fn update(&self, status_type: StatusType, message: &str) { 86 + // Only store persistent states (Success clears errors, Error stores the error) 87 + let persistent_status = match status_type { 88 + StatusType::Success => None, // Clear any error state 89 + StatusType::Error => Some(PersistentStatus { 90 + status_type: StatusType::Error, 91 + message: message.to_string(), 92 + }), 93 + // Everything else just keeps the current state 94 + _ => { 95 + let status = self.current_status.read().await; 96 + status.clone() // Keep existing persistent state 97 + } 98 + }; 99 + 100 + // Update the stored status 101 + { 102 + let mut status = self.current_status.write().await; 103 + *status = persistent_status; 104 + } 105 + 106 + // Send the message to all connected clients 107 + let _ = self.tx.send(WebSocketMessage { 108 + data: json!({ 109 + "type": status_type.to_string(), 110 + "message": message 111 + }) 112 + .to_string(), 113 + }); 114 + } 115 + 116 + /// Subscribe to WebSocket messages (for new connections). 117 + pub fn subscribe(&self) -> broadcast::Receiver<WebSocketMessage> { 118 + self.tx.subscribe() 119 + } 120 + 121 + /// Get the current persistent status (for new connections). 122 + pub async fn get_current(&self) -> Option<PersistentStatus> { 123 + self.current_status.read().await.clone() 124 + } 125 + 126 + /// Send a raw WebSocket message (for initial errors, etc.). 127 + pub fn send_raw(&self, message: WebSocketMessage) { 128 + let _ = self.tx.send(message); 129 + } 130 + } 131 + 132 + impl Default for StatusManager { 133 + fn default() -> Self { 134 + Self::new() 135 + } 136 + } 137 + 138 + #[derive(Clone)] 139 + struct AppState { 140 + status_manager: StatusManager, 141 + } 142 + 73 143 fn inject_live_reload_script(html_content: &str, socket_addr: SocketAddr, host: bool) -> String { 74 144 let mut content = html_content.to_string(); 75 145 ··· 93 163 94 164 pub async fn start_dev_web_server( 95 165 start_time: Instant, 96 - tx: broadcast::Sender<WebSocketMessage>, 166 + status_manager: StatusManager, 97 167 host: bool, 98 168 port: Option<u16>, 99 169 initial_error: Option<String>, 100 - current_status: Arc<RwLock<Option<PersistentStatus>>>, 101 170 ) { 102 171 // TODO: The dist dir should be configurable 103 172 let dist_dir = "dist"; 104 173 105 174 // Send initial error if present 106 175 if let Some(error) = initial_error { 107 - let _ = tx.send(WebSocketMessage { 176 + status_manager.send_raw(WebSocketMessage { 108 177 data: json!({ 109 178 "type": StatusType::Error.to_string(), 110 179 "message": error ··· 172 241 .on_response(CustomOnResponse), 173 242 ) 174 243 .with_state(AppState { 175 - tx: tx.clone(), 176 - current_status: current_status.clone(), 244 + status_manager: status_manager.clone(), 177 245 }); 178 246 179 247 log_server_start( ··· 192 260 .unwrap(); 193 261 } 194 262 195 - pub async fn update_status( 196 - tx: &broadcast::Sender<WebSocketMessage>, 197 - current_status: Arc<RwLock<Option<PersistentStatus>>>, 198 - status_type: StatusType, 199 - message: &str, 200 - ) { 201 - // Only store persistent states (Success clears errors, Error stores the error) 202 - let persistent_status = match status_type { 203 - StatusType::Success => None, // Clear any error state 204 - StatusType::Error => Some(PersistentStatus { 205 - status_type: StatusType::Error, 206 - message: message.to_string(), 207 - }), 208 - // Everything else just keeps the current state 209 - _ => { 210 - let status = current_status.read().await; 211 - status.clone() // Keep existing persistent state 212 - } 213 - }; 214 - 215 - // Update the stored status 216 - { 217 - let mut status = current_status.write().await; 218 - *status = persistent_status; 219 - } 220 - 221 - // Send the message to all connected clients 222 - let _ = tx.send(WebSocketMessage { 223 - data: json!({ 224 - "type": status_type.to_string(), 225 - "message": message 226 - }) 227 - .to_string(), 228 - }); 229 - } 230 - 231 263 async fn add_dev_client_script( 232 264 req: Request, 233 265 next: Next, ··· 311 343 debug!("`{addr} connected."); 312 344 // finalize the upgrade process by returning upgrade callback. 313 345 // we can customize the callback by sending additional info such as address. 314 - ws.on_upgrade(move |socket| handle_socket(socket, addr, state.tx, state.current_status)) 346 + ws.on_upgrade(move |socket| handle_socket(socket, addr, state.status_manager)) 315 347 } 316 348 317 349 async fn handle_socket( 318 350 socket: WebSocket, 319 351 who: SocketAddr, 320 - tx: broadcast::Sender<WebSocketMessage>, 321 - current_status: Arc<RwLock<Option<PersistentStatus>>>, 352 + status_manager: StatusManager, 322 353 ) { 323 354 let (mut sender, mut receiver) = socket.split(); 324 355 325 356 // Send current persistent status to new connection if there is one 326 - { 327 - let status = current_status.read().await; 328 - if let Some(persistent_status) = status.as_ref() { 329 - let _ = sender 330 - .send(Message::Text( 331 - json!({ 332 - "type": persistent_status.status_type.to_string(), 333 - "message": persistent_status.message 334 - }) 335 - .to_string() 336 - .into(), 337 - )) 338 - .await; 339 - } 357 + if let Some(persistent_status) = status_manager.get_current().await { 358 + let _ = sender 359 + .send(Message::Text( 360 + json!({ 361 + "type": persistent_status.status_type.to_string(), 362 + "message": persistent_status.message 363 + }) 364 + .to_string() 365 + .into(), 366 + )) 367 + .await; 340 368 } 341 369 342 - let mut rx = tx.subscribe(); 370 + let mut rx = status_manager.subscribe(); 343 371 344 372 tokio::select! { 345 373 _ = async { ··· 387 415 _ = terminate => {}, 388 416 } 389 417 } 418 + 419 + #[cfg(test)] 420 + mod tests { 421 + use super::*; 422 + 423 + #[tokio::test] 424 + async fn test_status_manager_update_error_persists() { 425 + let manager = StatusManager::new(); 426 + 427 + manager.update(StatusType::Error, "Something went wrong").await; 428 + 429 + let status = manager.get_current().await; 430 + assert!(status.is_some()); 431 + let status = status.unwrap(); 432 + assert!(matches!(status.status_type, StatusType::Error)); 433 + assert_eq!(status.message, "Something went wrong"); 434 + } 435 + 436 + #[tokio::test] 437 + async fn test_status_manager_update_success_clears_error() { 438 + let manager = StatusManager::new(); 439 + 440 + // First set an error 441 + manager.update(StatusType::Error, "Build failed").await; 442 + assert!(manager.get_current().await.is_some()); 443 + 444 + // Then send success - should clear the error 445 + manager.update(StatusType::Success, "Build succeeded").await; 446 + assert!(manager.get_current().await.is_none()); 447 + } 448 + 449 + #[tokio::test] 450 + async fn test_status_manager_update_info_preserves_state() { 451 + let manager = StatusManager::new(); 452 + 453 + // Set an error 454 + manager.update(StatusType::Error, "Build failed").await; 455 + let original_status = manager.get_current().await; 456 + assert!(original_status.is_some()); 457 + 458 + // Send info - should preserve the error state 459 + manager.update(StatusType::Info, "Building...").await; 460 + let status = manager.get_current().await; 461 + assert!(status.is_some()); 462 + assert_eq!(status.unwrap().message, "Build failed"); 463 + } 464 + 465 + #[tokio::test] 466 + async fn test_status_manager_update_info_when_no_error() { 467 + let manager = StatusManager::new(); 468 + 469 + // No prior state 470 + assert!(manager.get_current().await.is_none()); 471 + 472 + // Send info - should remain None 473 + manager.update(StatusType::Info, "Building...").await; 474 + assert!(manager.get_current().await.is_none()); 475 + } 476 + 477 + #[tokio::test] 478 + async fn test_status_manager_subscribe_receives_messages() { 479 + let manager = StatusManager::new(); 480 + let mut rx = manager.subscribe(); 481 + 482 + manager.update(StatusType::Info, "Hello").await; 483 + 484 + let msg = rx.try_recv(); 485 + assert!(msg.is_ok()); 486 + let msg = msg.unwrap(); 487 + assert!(msg.data.contains("Hello")); 488 + assert!(msg.data.contains("info")); 489 + } 490 + 491 + #[tokio::test] 492 + async fn test_status_manager_multiple_subscribers() { 493 + let manager = StatusManager::new(); 494 + let mut rx1 = manager.subscribe(); 495 + let mut rx2 = manager.subscribe(); 496 + 497 + manager.update(StatusType::Success, "Done").await; 498 + 499 + // Both subscribers should receive the message 500 + assert!(rx1.try_recv().is_ok()); 501 + assert!(rx2.try_recv().is_ok()); 502 + } 503 + 504 + #[tokio::test] 505 + async fn test_status_manager_send_raw() { 506 + let manager = StatusManager::new(); 507 + let mut rx = manager.subscribe(); 508 + 509 + manager.send_raw(WebSocketMessage { 510 + data: r#"{"custom": "message"}"#.to_string(), 511 + }); 512 + 513 + let msg = rx.try_recv(); 514 + assert!(msg.is_ok()); 515 + assert_eq!(msg.unwrap().data, r#"{"custom": "message"}"#); 516 + } 517 + 518 + #[tokio::test] 519 + async fn test_status_manager_clone_shares_state() { 520 + let manager1 = StatusManager::new(); 521 + let manager2 = manager1.clone(); 522 + 523 + // Update via one clone 524 + manager1.update(StatusType::Error, "Error from clone 1").await; 525 + 526 + // Should be visible via the other clone 527 + let status = manager2.get_current().await; 528 + assert!(status.is_some()); 529 + assert_eq!(status.unwrap().message, "Error from clone 1"); 530 + } 531 + 532 + #[tokio::test] 533 + async fn test_status_manager_clone_shares_broadcast() { 534 + let manager1 = StatusManager::new(); 535 + let manager2 = manager1.clone(); 536 + 537 + // Subscribe via one clone 538 + let mut rx = manager2.subscribe(); 539 + 540 + // Send via the other clone 541 + manager1.update(StatusType::Info, "From clone 1").await; 542 + 543 + // Should receive the message 544 + let msg = rx.try_recv(); 545 + assert!(msg.is_ok()); 546 + assert!(msg.unwrap().data.contains("From clone 1")); 547 + } 548 + }
+1 -1
examples/basics/src/routes/index.rs
··· 11 11 12 12 Ok(layout(html! { 13 13 (logo.render("Maudit logo, a crudely drawn crown")) 14 - h1 { "Hello World" } 14 + h1 { "Hello World 3" } 15 15 })) 16 16 } 17 17 }