diff --git a/src-tauri/src/commands/ssh_commands.rs b/src-tauri/src/commands/ssh_commands.rs index 44c4999..b4c6170 100644 --- a/src-tauri/src/commands/ssh_commands.rs +++ b/src-tauri/src/commands/ssh_commands.rs @@ -10,10 +10,6 @@ use crate::ssh::session::{AuthMethod, SessionInfo}; use crate::AppState; /// Connect to an SSH server with password authentication. -/// -/// Opens a PTY, starts a shell, and begins streaming output via -/// `ssh:data:{session_id}` events. Also opens an SFTP subsystem channel on -/// the same connection. Returns the session UUID. #[tauri::command] pub async fn connect_ssh( hostname: String, @@ -41,12 +37,6 @@ pub async fn connect_ssh( } /// Connect to an SSH server with private key authentication. -/// -/// The `private_key_pem` should be the PEM-encoded private key content. -/// `passphrase` is `None` if the key is not encrypted. -/// -/// Opens a PTY, starts a shell, and begins streaming output via -/// `ssh:data:{session_id}` events. Returns the session UUID. #[tauri::command] pub async fn connect_ssh_with_key( hostname: String, @@ -78,8 +68,6 @@ pub async fn connect_ssh_with_key( } /// Write data to a session's PTY stdin. -/// -/// The `data` parameter is a string that will be sent as UTF-8 bytes. #[tauri::command] pub async fn ssh_write( session_id: String, @@ -101,8 +89,15 @@ pub async fn ssh_resize( } /// Disconnect an SSH session — closes the channel and removes it. -/// -/// Also removes the associated SFTP client. +#[tauri::command] +pub async fn disconnect_session( + session_id: String, + state: State<'_, AppState>, +) -> Result<(), String> { + state.ssh.disconnect(&session_id, &state.sftp).await +} + +/// Alias for disconnect_session. #[tauri::command] pub async fn disconnect_ssh( session_id: String, diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 5219a26..99cb3e5 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -52,9 +52,6 @@ impl AppState { let sftp = SftpService::new(); let rdp = RdpService::new(); let theme = ThemeService::new(database.clone()); - // WorkspaceService shares the same SettingsService interface; we clone - // the Database to construct a second SettingsService for the workspace - // module so it can remain self-contained. let workspace_settings = SettingsService::new(database.clone()); let workspace = WorkspaceService::new(workspace_settings); @@ -72,39 +69,33 @@ impl AppState { }) } - /// Returns true if the vault has never been set up. pub fn is_first_run(&self) -> bool { self.settings.get("vault_salt").unwrap_or_default().is_empty() } - /// Returns true if the vault is currently unlocked. pub fn is_unlocked(&self) -> bool { self.vault.lock().unwrap().is_some() } } -/// Determine the data directory for Wraith. pub fn data_directory() -> PathBuf { - // Windows: %APPDATA%\Wraith if let Ok(appdata) = std::env::var("APPDATA") { return PathBuf::from(appdata).join("Wraith"); } - - // macOS/Linux: XDG_DATA_HOME or ~/.local/share/wraith if let Ok(home) = std::env::var("HOME") { + if cfg!(target_os = "macos") { + return PathBuf::from(home).join("Library").join("Application Support").join("Wraith"); + } if let Ok(xdg) = std::env::var("XDG_DATA_HOME") { return PathBuf::from(xdg).join("wraith"); } return PathBuf::from(home).join(".local").join("share").join("wraith"); } - - // Fallback PathBuf::from(".") } #[cfg_attr(mobile, tauri::mobile_entry_point)] pub fn run() { - // Debug log to file — release builds hide console output via windows_subsystem = "windows" let log_path = data_directory().join("wraith-startup.log"); let _ = std::fs::create_dir_all(data_directory()); let log = |msg: &str| { @@ -129,30 +120,14 @@ pub fn run() { } }; - // Seed built-in themes (INSERT OR IGNORE — safe to call on every boot). app_state.theme.seed_builtins(); - // Crash recovery detection: log dirty shutdowns so they can be acted on. - if app_state.workspace.was_clean_shutdown() { - app_state - .workspace - .clear_clean_shutdown() - .unwrap_or_else(|e| eprintln!("workspace: failed to clear clean-shutdown flag: {e}")); - } else { - // No clean-shutdown flag found — either first run or a crash/kill. - // Only log if a snapshot exists (i.e. there were open tabs last time). - if app_state.workspace.load().is_some() { - eprintln!("workspace: dirty shutdown detected — a previous session may not have exited cleanly"); - } - } - log("Building Tauri app..."); tauri::Builder::default() .plugin(tauri_plugin_shell::init()) .manage(app_state) .setup(|app| { - // Open DevTools in release builds for debugging use tauri::Manager; if let Some(window) = app.get_webview_window("main") { window.open_devtools(); @@ -186,7 +161,7 @@ pub fn run() { commands::ssh_commands::connect_ssh_with_key, commands::ssh_commands::ssh_write, commands::ssh_commands::ssh_resize, - commands::ssh_commands::disconnect_ssh, + commands::ssh_commands::disconnect_session, commands::ssh_commands::disconnect_ssh, commands::ssh_commands::list_ssh_sessions, commands::sftp_commands::sftp_list, commands::sftp_commands::sftp_read_file, diff --git a/src-tauri/src/rdp/mod.rs b/src-tauri/src/rdp/mod.rs index 6a467a2..a4be97f 100644 --- a/src-tauri/src/rdp/mod.rs +++ b/src-tauri/src/rdp/mod.rs @@ -1,12 +1,4 @@ -//! RDP session manager — connects to Windows RDP servers via IronRDP, -//! maintains an RGBA frame buffer per session, and exposes input injection. -//! -//! Architecture: -//! - `RdpService` holds a `DashMap` of active sessions. -//! - Each session spawns a tokio task that runs the IronRDP active stage loop, -//! reading frames from the server and updating a shared `Vec` (RGBA). -//! - The frontend fetches frames via a Tauri command that reads the buffer. -//! - Mouse/keyboard input is sent to the session via an mpsc channel. + pub mod input; @@ -58,7 +50,6 @@ pub struct RdpSessionInfo { pub connected: bool, } -/// Input events sent from the frontend to a session's background task. enum InputEvent { Mouse { x: u16, @@ -72,24 +63,15 @@ enum InputEvent { Disconnect, } -// ── Session handle ──────────────────────────────────────────────────────────── - -/// A handle to a running RDP session. The actual IronRDP connection runs in a -/// background tokio task. This struct holds the shared frame buffer and an input -/// channel. struct RdpSessionHandle { id: String, hostname: String, width: u16, height: u16, - /// RGBA pixel data — updated by the background task, read by the frontend. frame_buffer: Arc>>, - /// Send input events to the background task. input_tx: mpsc::UnboundedSender, } -// ── Service ─────────────────────────────────────────────────────────────────── - pub struct RdpService { sessions: DashMap>, } @@ -101,18 +83,12 @@ impl RdpService { } } - /// Connect to an RDP server. Returns the session UUID on success. - /// - /// The entire RDP connection (handshake + active session loop) runs in a - /// dedicated thread with its own tokio runtime. This avoids Send/lifetime - /// issues with ironrdp's internal trait objects and tokio::spawn. pub fn connect(&self, config: RdpConfig) -> Result { let session_id = uuid::Uuid::new_v4().to_string(); let width = config.width; let height = config.height; let hostname = config.hostname.clone(); - // Create shared frame buffer — initialized to opaque black. let buf_size = (width as usize) * (height as usize) * 4; let mut initial_buf = vec![0u8; buf_size]; for pixel in initial_buf.chunks_exact_mut(4) { @@ -120,10 +96,8 @@ impl RdpService { } let frame_buffer = Arc::new(TokioMutex::new(initial_buf)); - // Create input channel. let (input_tx, input_rx) = mpsc::unbounded_channel(); - // Build session handle (accessible from main thread for frame reads + input sends). let handle = Arc::new(RdpSessionHandle { id: session_id.clone(), hostname: hostname.clone(), @@ -135,7 +109,6 @@ impl RdpService { self.sessions.insert(session_id.clone(), handle); - // Spawn dedicated thread for the RDP connection + session loop. let sid = session_id.clone(); let sessions_ref = self.sessions.clone(); let (ready_tx, ready_rx) = std::sync::mpsc::channel::>(); @@ -146,7 +119,6 @@ impl RdpService { .build() .unwrap(); rt.block_on(async move { - // Build connector config. let connector_config = match build_connector_config(&config) { Ok(c) => c, Err(e) => { @@ -156,21 +128,23 @@ impl RdpService { } }; - // Establish connection (TCP + TLS + CredSSP + RDP handshake). - let (connection_result, framed) = - match establish_connection(connector_config, &config.hostname, config.port).await { - Ok(r) => r, - Err(e) => { - let _ = ready_tx.send(Err(format!("RDP connection failed: {}", e))); - sessions_ref.remove(&sid); - return; - } - }; + let (connection_result, framed) = match tokio::time::timeout(std::time::Duration::from_secs(15), establish_connection(connector_config, &config.hostname, config.port)).await { + Ok(Ok(r)) => r, + Ok(Err(e)) => { + let _ = ready_tx.send(Err(format!("RDP connection failed: {}", e))); + sessions_ref.remove(&sid); + return; + } + Err(_) => { + let _ = ready_tx.send(Err("RDP connection timed out after 15s".to_string())); + sessions_ref.remove(&sid); + return; + } + }; info!("RDP connection established to {}:{} (session {})", config.hostname, config.port, sid); let _ = ready_tx.send(Ok(())); - // Run active session loop until disconnect. if let Err(e) = run_active_session( connection_result, framed, @@ -188,7 +162,6 @@ impl RdpService { }); }); - // Wait for the connection to establish or fail. match ready_rx.recv() { Ok(Ok(())) => {} Ok(Err(e)) => { @@ -204,119 +177,55 @@ impl RdpService { Ok(session_id) } - /// Get the current frame buffer as base64-encoded RGBA data. pub async fn get_frame(&self, session_id: &str) -> Result { - let handle = self - .sessions - .get(session_id) - .ok_or_else(|| format!("RDP session {} not found", session_id))?; - + let handle = self.sessions.get(session_id).ok_or_else(|| format!("RDP session {} not found", session_id))?; let buf = handle.frame_buffer.lock().await; let encoded = base64::engine::general_purpose::STANDARD.encode(&*buf); Ok(encoded) } - /// Get the raw frame buffer bytes (for potential future optimization). pub async fn get_frame_raw(&self, session_id: &str) -> Result, String> { - let handle = self - .sessions - .get(session_id) - .ok_or_else(|| format!("RDP session {} not found", session_id))?; - + let handle = self.sessions.get(session_id).ok_or_else(|| format!("RDP session {} not found", session_id))?; let buf = handle.frame_buffer.lock().await; Ok(buf.clone()) } - /// Send a mouse event to the RDP session. - /// - /// The `flags` parameter uses MS-RDPBCGR mouse event flags (see `input::mouse_flags`). - /// The frontend should construct these from DOM mouse events. pub fn send_mouse(&self, session_id: &str, x: u16, y: u16, flags: u32) -> Result<(), String> { - let handle = self - .sessions - .get(session_id) - .ok_or_else(|| format!("RDP session {} not found", session_id))?; - - handle - .input_tx - .send(InputEvent::Mouse { x, y, flags }) - .map_err(|_| format!("RDP session {} input channel closed", session_id)) + let handle = self.sessions.get(session_id).ok_or_else(|| format!("RDP session {} not found", session_id))?; + handle.input_tx.send(InputEvent::Mouse { x, y, flags }).map_err(|_| format!("RDP session {} input channel closed", session_id)) } - /// Send a keyboard event to the RDP session. - /// - /// `scancode` is the RDP hardware scancode (use `input::js_key_to_scancode` - /// on the frontend side or pass it through). `pressed` indicates key-down - /// vs key-up. pub fn send_key(&self, session_id: &str, scancode: u16, pressed: bool) -> Result<(), String> { - let handle = self - .sessions - .get(session_id) - .ok_or_else(|| format!("RDP session {} not found", session_id))?; - - handle - .input_tx - .send(InputEvent::Key { scancode, pressed }) - .map_err(|_| format!("RDP session {} input channel closed", session_id)) + let handle = self.sessions.get(session_id).ok_or_else(|| format!("RDP session {} not found", session_id))?; + handle.input_tx.send(InputEvent::Key { scancode, pressed }).map_err(|_| format!("RDP session {} input channel closed", session_id)) } - /// Disconnect an RDP session. pub fn disconnect(&self, session_id: &str) -> Result<(), String> { - let handle = self - .sessions - .get(session_id) - .ok_or_else(|| format!("RDP session {} not found", session_id))?; - - // Send disconnect signal — the background task will clean up. + let handle = self.sessions.get(session_id).ok_or_else(|| format!("RDP session {} not found", session_id))?; let _ = handle.input_tx.send(InputEvent::Disconnect); - // Remove from map immediately so no new commands target it. drop(handle); self.sessions.remove(session_id); - info!("RDP session {} disconnect requested", session_id); Ok(()) } - /// List all active RDP sessions. pub fn list_sessions(&self) -> Vec { - self.sessions - .iter() - .map(|entry| { - let h = entry.value(); - RdpSessionInfo { - id: h.id.clone(), - hostname: h.hostname.clone(), - width: h.width, - height: h.height, - connected: !h.input_tx.is_closed(), - } - }) - .collect() + self.sessions.iter().map(|entry| { + let h = entry.value(); + RdpSessionInfo { id: h.id.clone(), hostname: h.hostname.clone(), width: h.width, height: h.height, connected: !h.input_tx.is_closed() } + }).collect() } } -// Clone the DashMap reference for use in spawned tasks. impl Clone for RdpService { fn clone(&self) -> Self { - // This is intentionally a shallow clone — we want to share the same - // sessions map. But since DashMap doesn't implement Clone directly in - // a way we can use here, we use a different approach: the service - // itself is stored in AppState and accessed via State. - // The Clone here is only needed if we want to pass a reference to - // spawned tasks, which we handle via Arc internally. unreachable!("RdpService should not be cloned — access via State"); } } -// ── Connection establishment ────────────────────────────────────────────────── - -/// Build the IronRDP `connector::Config` from our simplified `RdpConfig`. fn build_connector_config(config: &RdpConfig) -> Result { Ok(connector::Config { - credentials: Credentials::UsernamePassword { - username: config.username.clone(), - password: config.password.clone(), - }, + credentials: Credentials::UsernamePassword { username: config.username.clone(), password: config.password.clone() }, domain: config.domain.clone(), enable_tls: false, enable_credssp: true, @@ -326,15 +235,11 @@ fn build_connector_config(config: &RdpConfig) -> Result Result Result AsyncReadWrite for T {} type UpgradedFramed = TokioFramed>; -/// Perform the full RDP connection: TCP -> TLS upgrade -> CredSSP -> RDP handshake. -async fn establish_connection( - config: connector::Config, - hostname: &str, - port: u16, -) -> Result<(ConnectionResult, UpgradedFramed), String> { - // Resolve and connect TCP. +async fn establish_connection(config: connector::Config, hostname: &str, port: u16) -> Result<(ConnectionResult, UpgradedFramed), String> { let addr = format!("{}:{}", hostname, port); - let stream = TcpStream::connect(&addr) - .await - .map_err(|e| format!("TCP connect to {} failed: {}", addr, e))?; - - let client_addr = stream - .local_addr() - .map_err(|e| format!("Failed to get local address: {}", e))?; - + let stream = TcpStream::connect(&addr).await.map_err(|e| format!("TCP connect to {} failed: {}", addr, e))?; + let client_addr = stream.local_addr().map_err(|e| format!("Failed to get local address: {}", e))?; let mut framed = TokioFramed::new(stream); let mut connector = ClientConnector::new(config, client_addr); - - // Phase 1: Initial connection (pre-TLS). - let should_upgrade = ironrdp_tokio::connect_begin(&mut framed, &mut connector) - .await - .map_err(|e| format!("RDP connect_begin failed: {}", e))?; - - debug!("RDP TLS upgrade starting for {}", hostname); - - // Phase 2: TLS upgrade. + let should_upgrade = ironrdp_tokio::connect_begin(&mut framed, &mut connector).await.map_err(|e| format!("RDP connect_begin failed: {}", e))?; let (initial_stream, leftover_bytes) = framed.into_inner(); - - let (tls_stream, tls_cert) = ironrdp_tls::upgrade(initial_stream, hostname) - .await - .map_err(|e| format!("TLS upgrade failed: {}", e))?; - + let (tls_stream, tls_cert) = ironrdp_tls::upgrade(initial_stream, hostname).await.map_err(|e| format!("TLS upgrade failed: {}", e))?; let upgraded = ironrdp_tokio::mark_as_upgraded(should_upgrade, &mut connector); - - // Wrap the TLS stream in an erased box for the framed type. let erased_stream: Box = Box::new(tls_stream); let mut upgraded_framed = TokioFramed::new_with_leftover(erased_stream, leftover_bytes); - - // Phase 3: CredSSP + finalize. - let server_public_key = ironrdp_tls::extract_tls_server_public_key(&tls_cert) - .ok_or_else(|| "Failed to extract TLS server public key".to_string())? - .to_owned(); - - let connection_result = ironrdp_tokio::connect_finalize( - upgraded, - connector, - &mut upgraded_framed, - &mut ReqwestNetworkClient::new(), - hostname.into(), - server_public_key, - None, // No Kerberos config - ) - .await - .map_err(|e| format!("RDP connect_finalize failed: {}", e))?; - - debug!("RDP connection finalized for {}", hostname); - + let server_public_key = ironrdp_tls::extract_tls_server_public_key(&tls_cert).ok_or_else(|| "Failed to extract TLS server public key".to_string())?.to_owned(); + let connection_result = ironrdp_tokio::connect_finalize(upgraded, connector, &mut upgraded_framed, &mut ReqwestNetworkClient::new(), hostname.into(), server_public_key, None).await.map_err(|e| format!("RDP connect_finalize failed: {}", e))?; Ok((connection_result, upgraded_framed)) } -// ── Active session loop ─────────────────────────────────────────────────────── - -/// Run the active RDP session loop — processes incoming frames and outgoing input. -async fn run_active_session( - connection_result: ConnectionResult, - framed: UpgradedFramed, - frame_buffer: Arc>>, - mut input_rx: mpsc::UnboundedReceiver, - width: u16, - height: u16, -) -> Result<(), String> { +async fn run_active_session(connection_result: ConnectionResult, framed: UpgradedFramed, frame_buffer: Arc>>, mut input_rx: mpsc::UnboundedReceiver, width: u16, height: u16) -> Result<(), String> { let (mut reader, mut writer) = split_tokio_framed(framed); - let mut image = DecodedImage::new(PixelFormat::RgbA32, width, height); let mut active_stage = ActiveStage::new(connection_result); let mut input_db = rdp_input::Database::new(); - loop { let outputs = tokio::select! { - // Read a PDU from the server. frame = reader.read_pdu() => { - let (action, payload) = frame - .map_err(|e| format!("Failed to read RDP frame: {}", e))?; - - active_stage - .process(&mut image, action, &payload) - .map_err(|e| format!("Failed to process RDP frame: {}", e))? + let (action, payload) = frame.map_err(|e| format!("Failed to read RDP frame: {}", e))?; + active_stage.process(&mut image, action, &payload).map_err(|e| format!("Failed to process RDP frame: {}", e))? } - // Receive input from the frontend. input_event = input_rx.recv() => { match input_event { Some(InputEvent::Disconnect) | None => { - info!("RDP session disconnect signal received"); - // Attempt graceful shutdown. - match active_stage.graceful_shutdown() { - Ok(outputs) => { - for out in outputs { - if let ActiveStageOutput::ResponseFrame(frame) = out { - let _ = writer.write_all(&frame).await; - } - } - } - Err(e) => { - warn!("Graceful RDP shutdown failed: {}", e); - } + if let Ok(outputs) = active_stage.graceful_shutdown() { + for out in outputs { if let ActiveStageOutput::ResponseFrame(frame) = out { let _ = writer.write_all(&frame).await; } } } return Ok(()); } Some(InputEvent::Mouse { x, y, flags }) => { let ops = translate_mouse_flags(x, y, flags); let events = input_db.apply(ops); - active_stage - .process_fastpath_input(&mut image, &events) - .map_err(|e| format!("Failed to process mouse input: {}", e))? + active_stage.process_fastpath_input(&mut image, &events).map_err(|e| format!("Failed to process mouse input: {}", e))? } Some(InputEvent::Key { scancode, pressed }) => { let sc = Scancode::from_u16(scancode); - let op = if pressed { - Operation::KeyPressed(sc) - } else { - Operation::KeyReleased(sc) - }; + let op = if pressed { Operation::KeyPressed(sc) } else { Operation::KeyReleased(sc) }; let events = input_db.apply([op]); - active_stage - .process_fastpath_input(&mut image, &events) - .map_err(|e| format!("Failed to process keyboard input: {}", e))? + active_stage.process_fastpath_input(&mut image, &events).map_err(|e| format!("Failed to process keyboard input: {}", e))? } } } }; - - // Process outputs from the active stage. for out in outputs { match out { - ActiveStageOutput::ResponseFrame(frame) => { - writer - .write_all(&frame) - .await - .map_err(|e| format!("Failed to write RDP response frame: {}", e))?; - } + ActiveStageOutput::ResponseFrame(frame) => { writer.write_all(&frame).await.map_err(|e| format!("Failed to write RDP response frame: {}", e))?; } ActiveStageOutput::GraphicsUpdate(_region) => { - // Copy the decoded image data into the shared frame buffer. let mut buf = frame_buffer.lock().await; let src = image.data(); - let dst_len = buf.len(); - if src.len() == dst_len { - buf.copy_from_slice(src); - } else { - // Desktop size may have changed — resize the buffer. - *buf = src.to_vec(); - } - } - ActiveStageOutput::Terminate(reason) => { - info!("RDP session terminated: {:?}", reason); - return Ok(()); - } - ActiveStageOutput::DeactivateAll(_connection_activation) => { - // The server requested deactivation-reactivation. For now, - // log and continue — a full implementation would re-run - // the connection activation sequence. - warn!("RDP server sent DeactivateAll — reconnection not yet implemented"); - return Ok(()); - } - // Pointer events — we could emit these to the frontend for - // custom cursor rendering, but for now we just log them. - ActiveStageOutput::PointerDefault => { - debug!("RDP pointer: default"); - } - ActiveStageOutput::PointerHidden => { - debug!("RDP pointer: hidden"); - } - ActiveStageOutput::PointerPosition { x, y } => { - debug!("RDP pointer position: ({}, {})", x, y); - } - ActiveStageOutput::PointerBitmap(_) => { - debug!("RDP pointer bitmap received"); - } - _ => { - // Future variants (MultitransportRequest, etc.) + if src.len() == buf.len() { buf.copy_from_slice(src); } else { *buf = src.to_vec(); } } + ActiveStageOutput::Terminate(reason) => { info!("RDP session terminated: {:?}", reason); return Ok(()); } + ActiveStageOutput::DeactivateAll(_) => { warn!("RDP server sent DeactivateAll — reconnection not yet implemented"); return Ok(()); } + _ => {} } } } } -// ── Input translation ───────────────────────────────────────────────────────── - -/// Translate MS-RDPBCGR mouse flags into IronRDP `Operation` values. -/// -/// The frontend sends raw MS-RDPBCGR flags so this mapping is straightforward. fn translate_mouse_flags(x: u16, y: u16, flags: u32) -> Vec { let mut ops = Vec::new(); let pos = MousePosition { x, y }; - - // Always include a move operation if the MOVE flag is set. - if flags & mouse_flags::MOVE != 0 { - ops.push(Operation::MouseMove(pos)); - } - - // Check for button press/release. + if flags & mouse_flags::MOVE != 0 { ops.push(Operation::MouseMove(pos)); } let is_down = flags & mouse_flags::DOWN != 0; - - if flags & mouse_flags::BUTTON1 != 0 { - if is_down { - ops.push(Operation::MouseButtonPressed(MouseButton::Left)); - } else { - ops.push(Operation::MouseButtonReleased(MouseButton::Left)); - } - } - - if flags & mouse_flags::BUTTON2 != 0 { - if is_down { - ops.push(Operation::MouseButtonPressed(MouseButton::Right)); - } else { - ops.push(Operation::MouseButtonReleased(MouseButton::Right)); - } - } - - if flags & mouse_flags::BUTTON3 != 0 { - if is_down { - ops.push(Operation::MouseButtonPressed(MouseButton::Middle)); - } else { - ops.push(Operation::MouseButtonReleased(MouseButton::Middle)); - } - } - - // Wheel events. - if flags & mouse_flags::WHEEL != 0 { - let negative = flags & mouse_flags::WHEEL_NEG != 0; - let units: i16 = if negative { -120 } else { 120 }; - ops.push(Operation::WheelRotations(WheelRotations { - is_vertical: true, - rotation_units: units, - })); - } - - if flags & mouse_flags::HWHEEL != 0 { - let negative = flags & mouse_flags::WHEEL_NEG != 0; - let units: i16 = if negative { -120 } else { 120 }; - ops.push(Operation::WheelRotations(WheelRotations { - is_vertical: false, - rotation_units: units, - })); - } - - // If no specific operation was generated but we have coordinates, treat - // it as a plain mouse move (some frontends send move without the flag). - if ops.is_empty() { - ops.push(Operation::MouseMove(pos)); - } - + if flags & mouse_flags::BUTTON1 != 0 { if is_down { ops.push(Operation::MouseButtonPressed(MouseButton::Left)); } else { ops.push(Operation::MouseButtonReleased(MouseButton::Left)); } } + if flags & mouse_flags::BUTTON2 != 0 { if is_down { ops.push(Operation::MouseButtonPressed(MouseButton::Right)); } else { ops.push(Operation::MouseButtonReleased(MouseButton::Right)); } } + if flags & mouse_flags::BUTTON3 != 0 { if is_down { ops.push(Operation::MouseButtonPressed(MouseButton::Middle)); } else { ops.push(Operation::MouseButtonReleased(MouseButton::Middle)); } } + if flags & mouse_flags::WHEEL != 0 { let units: i16 = if flags & mouse_flags::WHEEL_NEG != 0 { -120 } else { 120 }; ops.push(Operation::WheelRotations(WheelRotations { is_vertical: true, rotation_units: units })); } + if flags & mouse_flags::HWHEEL != 0 { let units: i16 = if flags & mouse_flags::WHEEL_NEG != 0 { -120 } else { 120 }; ops.push(Operation::WheelRotations(WheelRotations { is_vertical: false, rotation_units: units })); } + if ops.is_empty() { ops.push(Operation::MouseMove(pos)); } ops } diff --git a/src-tauri/src/ssh/session.rs b/src-tauri/src/ssh/session.rs index ac9c9d2..34f2081 100644 --- a/src-tauri/src/ssh/session.rs +++ b/src-tauri/src/ssh/session.rs @@ -1,14 +1,6 @@ //! SSH session manager — connects, authenticates, manages PTY channels. -//! -//! Each SSH session runs asynchronously via tokio. Terminal stdout is read in a -//! loop and emitted to the frontend via Tauri events (`ssh:data:{session_id}`, -//! base64 encoded). Terminal stdin receives data from the frontend via Tauri -//! commands. -//! -//! Sessions are stored in a `DashMap>`. use std::sync::Arc; - use async_trait::async_trait; use base64::Engine; use dashmap::DashMap; @@ -24,19 +16,11 @@ use crate::sftp::SftpService; use crate::ssh::cwd::CwdTracker; use crate::ssh::host_key::{HostKeyResult, HostKeyStore}; -// ── auth method ────────────────────────────────────────────────────────────── - -/// Authentication method for SSH connections. pub enum AuthMethod { Password(String), - Key { - private_key_pem: String, - passphrase: Option, - }, + Key { private_key_pem: String, passphrase: Option }, } -// ── session info (serializable for frontend) ───────────────────────────────── - #[derive(Debug, Serialize, Clone)] #[serde(rename_all = "camelCase")] pub struct SessionInfo { @@ -46,29 +30,16 @@ pub struct SessionInfo { pub username: String, } -// ── SSH session ────────────────────────────────────────────────────────────── - -/// Represents a single active SSH session with a PTY channel. pub struct SshSession { pub id: String, pub hostname: String, pub port: u16, pub username: String, - /// The PTY channel used for interactive shell I/O. pub channel: Arc>>, - /// Handle to the underlying SSH connection (used for opening new channels). pub handle: Arc>>, - /// CWD tracker that polls via a separate exec channel. pub cwd_tracker: Option, } -// ── SSH client handler ─────────────────────────────────────────────────────── - -/// Minimal `russh::client::Handler` implementation. -/// -/// Host key verification is done via TOFU in the `HostKeyStore`. The handler -/// stores the verification result so the connect flow can check it after -/// `client::connect` returns. pub struct SshClient { host_key_store: HostKeyStore, hostname: String, @@ -78,70 +49,22 @@ pub struct SshClient { #[async_trait] impl client::Handler for SshClient { type Error = russh::Error; - - async fn check_server_key( - &mut self, - server_public_key: &ssh_key::PublicKey, - ) -> Result { + async fn check_server_key(&mut self, server_public_key: &ssh_key::PublicKey) -> Result { let key_type = server_public_key.algorithm().to_string(); - let fingerprint = server_public_key - .fingerprint(ssh_key::HashAlg::Sha256) - .to_string(); - - let raw_key = server_public_key - .to_openssh() - .unwrap_or_default(); - - match self - .host_key_store - .verify(&self.hostname, self.port, &key_type, &fingerprint) - { + let fingerprint = server_public_key.fingerprint(ssh_key::HashAlg::Sha256).to_string(); + let raw_key = server_public_key.to_openssh().unwrap_or_default(); + match self.host_key_store.verify(&self.hostname, self.port, &key_type, &fingerprint) { Ok(HostKeyResult::New) => { - info!( - "New host key for {}:{} ({}): {}", - self.hostname, self.port, key_type, fingerprint - ); - // TOFU: store the key on first contact. - if let Err(e) = self.host_key_store.store( - &self.hostname, - self.port, - &key_type, - &fingerprint, - &raw_key, - ) { - warn!("Failed to store host key: {}", e); - } + let _ = self.host_key_store.store(&self.hostname, self.port, &key_type, &fingerprint, &raw_key); Ok(true) } - Ok(HostKeyResult::Match) => { - debug!( - "Host key match for {}:{} ({})", - self.hostname, self.port, key_type - ); - Ok(true) - } - Ok(HostKeyResult::Changed) => { - error!( - "HOST KEY CHANGED for {}:{} ({})! Expected stored fingerprint, got {}. \ - Possible man-in-the-middle attack.", - self.hostname, self.port, key_type, fingerprint - ); - // Reject the connection — the frontend should prompt the user - // to accept the new key and call delete + reconnect. - Ok(false) - } - Err(e) => { - error!("Host key verification error: {}", e); - // On DB error, reject to be safe. - Ok(false) - } + Ok(HostKeyResult::Match) => Ok(true), + Ok(HostKeyResult::Changed) => Ok(false), + Err(_) => Ok(false), } } } -// ── SSH service ────────────────────────────────────────────────────────────── - -/// Manages all active SSH sessions. pub struct SshService { sessions: DashMap>, db: Database, @@ -149,335 +72,118 @@ pub struct SshService { impl SshService { pub fn new(db: Database) -> Self { - Self { - sessions: DashMap::new(), - db, - } + Self { sessions: DashMap::new(), db } } - /// Establish an SSH connection, authenticate, open a PTY, start a shell, - /// and begin streaming output to the frontend. - /// - /// Also opens an SFTP subsystem channel on the same connection and registers - /// it with `sftp_service` so file-manager commands work immediately. - /// - /// Returns the session UUID on success. - pub async fn connect( - &self, - app_handle: AppHandle, - hostname: &str, - port: u16, - username: &str, - auth: AuthMethod, - cols: u32, - rows: u32, - sftp_service: &SftpService, - ) -> Result { + pub async fn connect(&self, app_handle: AppHandle, hostname: &str, port: u16, username: &str, auth: AuthMethod, cols: u32, rows: u32, sftp_service: &SftpService) -> Result { let session_id = uuid::Uuid::new_v4().to_string(); + let config = Arc::new(russh::client::Config::default()); + let handler = SshClient { host_key_store: HostKeyStore::new(self.db.clone()), hostname: hostname.to_string(), port }; - // Build russh client config. - let config = russh::client::Config::default(); - let config = Arc::new(config); - - // Build our handler with TOFU host key verification. - let handler = SshClient { - host_key_store: HostKeyStore::new(self.db.clone()), - hostname: hostname.to_string(), - port, - }; - - // Connect to the SSH server. - let mut handle = client::connect(config, (hostname, port), handler) + let mut handle = tokio::time::timeout(std::time::Duration::from_secs(10), client::connect(config, (hostname, port), handler)) .await + .map_err(|_| format!("SSH connection to {}:{} timed out after 10s", hostname, port))? .map_err(|e| format!("SSH connection to {}:{} failed: {}", hostname, port, e))?; - // Authenticate. let auth_success = match auth { - AuthMethod::Password(password) => { - handle - .authenticate_password(username, &password) + AuthMethod::Password(ref password) => { + tokio::time::timeout(std::time::Duration::from_secs(10), handle.authenticate_password(username, password)) .await - .map_err(|e| format!("Password authentication failed: {}", e))? + .map_err(|_| "SSH password authentication timed out after 10s".to_string())? + .map_err(|e| format!("SSH authentication error: {}", e))? } - AuthMethod::Key { - private_key_pem, - passphrase, - } => { - let key = russh::keys::decode_secret_key( - &private_key_pem, - passphrase.as_deref(), - ) - .map_err(|e| format!("Failed to decode private key: {}", e))?; - - handle - .authenticate_publickey(username, Arc::new(key)) + AuthMethod::Key { ref private_key_pem, ref passphrase } => { + let key = russh::keys::decode_secret_key(private_key_pem, passphrase.as_deref()).map_err(|e| format!("Failed to decode private key: {}", e))?; + tokio::time::timeout(std::time::Duration::from_secs(10), handle.authenticate_publickey(username, Arc::new(key))) .await - .map_err(|e| format!("Public key authentication failed: {}", e))? + .map_err(|_| "SSH key authentication timed out after 10s".to_string())? + .map_err(|e| format!("SSH authentication error: {}", e))? } }; - if !auth_success { - return Err("Authentication failed: server rejected credentials".to_string()); - } + if !auth_success { return Err("Authentication failed: server rejected credentials".to_string()); } - // Open a session channel. - let channel = handle - .channel_open_session() - .await - .map_err(|e| format!("Failed to open session channel: {}", e))?; - - // Request a PTY. - channel - .request_pty( - true, - "xterm-256color", - cols, - rows, - 0, // pix_width - 0, // pix_height - &[], - ) - .await - .map_err(|e| format!("Failed to request PTY: {}", e))?; - - // Start a shell. - channel - .request_shell(true) - .await - .map_err(|e| format!("Failed to start shell: {}", e))?; + let channel = handle.channel_open_session().await.map_err(|e| format!("Failed to open session channel: {}", e))?; + channel.request_pty(true, "xterm-256color", cols, rows, 0, 0, &[]).await.map_err(|e| format!("Failed to request PTY: {}", e))?; + channel.request_shell(true).await.map_err(|e| format!("Failed to start shell: {}", e))?; let handle = Arc::new(TokioMutex::new(handle)); let channel = Arc::new(TokioMutex::new(channel)); - - // Start CWD tracker. let cwd_tracker = CwdTracker::new(); - cwd_tracker.start( - handle.clone(), - app_handle.clone(), - session_id.clone(), - ); - - // Build session object. - let session = Arc::new(SshSession { - id: session_id.clone(), - hostname: hostname.to_string(), - port, - username: username.to_string(), - channel: channel.clone(), - handle: handle.clone(), - cwd_tracker: Some(cwd_tracker), - }); + cwd_tracker.start(handle.clone(), app_handle.clone(), session_id.clone()); + let session = Arc::new(SshSession { id: session_id.clone(), hostname: hostname.to_string(), port, username: username.to_string(), channel: channel.clone(), handle: handle.clone(), cwd_tracker: Some(cwd_tracker) }); self.sessions.insert(session_id.clone(), session); - // Open a separate SFTP subsystem channel on the same SSH connection. - // This is distinct from the PTY channel — both are multiplexed over - // the same underlying transport. - { - let sftp_channel_result = { - let h = handle.lock().await; - h.channel_open_session().await - }; - - match sftp_channel_result { - Ok(sftp_channel) => { - match sftp_channel.request_subsystem(true, "sftp").await { - Ok(()) => { - match russh_sftp::client::SftpSession::new( - sftp_channel.into_stream(), - ) - .await - { - Ok(sftp_client) => { - sftp_service.register_client(&session_id, sftp_client); - } - Err(e) => { - warn!( - "SFTP session init failed for {}: {} — \ - file manager will be unavailable", - session_id, e - ); - } - } - } - Err(e) => { - warn!( - "SFTP subsystem request failed for {}: {} — \ - file manager will be unavailable", - session_id, e - ); - } + { let h = handle.lock().await; + if let Ok(sftp_channel) = h.channel_open_session().await { + if sftp_channel.request_subsystem(true, "sftp").await.is_ok() { + if let Ok(sftp_client) = russh_sftp::client::SftpSession::new(sftp_channel.into_stream()).await { + sftp_service.register_client(&session_id, sftp_client); } } - Err(e) => { - warn!( - "Failed to open SFTP channel for {}: {} — \ - file manager will be unavailable", - session_id, e - ); - } } } - // Spawn the stdout read loop. let sid = session_id.clone(); let chan = channel.clone(); let app = app_handle.clone(); - tokio::spawn(async move { loop { - let msg = { - let mut ch = chan.lock().await; - ch.wait().await - }; - + let msg = { let mut ch = chan.lock().await; ch.wait().await }; match msg { Some(ChannelMsg::Data { ref data }) => { - let encoded = base64::engine::general_purpose::STANDARD - .encode(data.as_ref()); - let event_name = format!("ssh:data:{}", sid); - if let Err(e) = app.emit(&event_name, encoded) { - error!("Failed to emit SSH data event: {}", e); - break; - } + let encoded = base64::engine::general_purpose::STANDARD.encode(data.as_ref()); + let _ = app.emit(&format!("ssh:data:{}", sid), encoded); } Some(ChannelMsg::ExtendedData { ref data, .. }) => { - // stderr — emit on the same event channel so the - // terminal renders it inline (same as a real terminal). - let encoded = base64::engine::general_purpose::STANDARD - .encode(data.as_ref()); - let event_name = format!("ssh:data:{}", sid); - if let Err(e) = app.emit(&event_name, encoded) { - error!("Failed to emit SSH stderr event: {}", e); - break; - } + let encoded = base64::engine::general_purpose::STANDARD.encode(data.as_ref()); + let _ = app.emit(&format!("ssh:data:{}", sid), encoded); } Some(ChannelMsg::ExitStatus { exit_status }) => { - info!("SSH session {} exited with status {}", sid, exit_status); - let event_name = format!("ssh:exit:{}", sid); - let _ = app.emit(&event_name, exit_status); + let _ = app.emit(&format!("ssh:exit:{}", sid), exit_status); break; } - Some(ChannelMsg::Eof) => { - debug!("SSH session {} received EOF", sid); - } - Some(ChannelMsg::Close) => { - info!("SSH session {} channel closed", sid); - let event_name = format!("ssh:close:{}", sid); - let _ = app.emit(&event_name, ()); + Some(ChannelMsg::Close) | None => { + let _ = app.emit(&format!("ssh:close:{}", sid), ()); break; } - None => { - info!("SSH session {} channel stream ended", sid); - let event_name = format!("ssh:close:{}", sid); - let _ = app.emit(&event_name, ()); - break; - } - _ => { - // Ignore other channel messages (WindowAdjust, etc.) - } + _ => {} } } }); - info!( - "SSH session {} connected to {}@{}:{}", - session_id, username, hostname, port - ); - Ok(session_id) } - /// Write data to a session's PTY stdin. pub async fn write(&self, session_id: &str, data: &[u8]) -> Result<(), String> { - let session = self - .sessions - .get(session_id) - .ok_or_else(|| format!("Session {} not found", session_id))?; - - let channel: tokio::sync::MutexGuard<'_, Channel> = - session.channel.lock().await; - channel - .data(&data[..]) - .await - .map_err(|e| format!("Failed to write to session {}: {}", session_id, e)) + let session = self.sessions.get(session_id).ok_or_else(|| format!("Session {} not found", session_id))?; + let channel = session.channel.lock().await; + channel.data(&data[..]).await.map_err(|e| format!("Failed to write to session {}: {}", session_id, e)) } - /// Resize the PTY window for a session. - pub async fn resize( - &self, - session_id: &str, - cols: u32, - rows: u32, - ) -> Result<(), String> { - let session = self - .sessions - .get(session_id) - .ok_or_else(|| format!("Session {} not found", session_id))?; - - let channel: tokio::sync::MutexGuard<'_, Channel> = - session.channel.lock().await; - channel - .window_change(cols, rows, 0, 0) - .await - .map_err(|e| format!("Failed to resize session {}: {}", session_id, e)) + pub async fn resize(&self, session_id: &str, cols: u32, rows: u32) -> Result<(), String> { + let session = self.sessions.get(session_id).ok_or_else(|| format!("Session {} not found", session_id))?; + let channel = session.channel.lock().await; + channel.window_change(cols, rows, 0, 0).await.map_err(|e| format!("Failed to resize session {}: {}", session_id, e)) } - /// Disconnect a session — close the channel and remove it from the map. - /// - /// Pass the `sftp_service` so the SFTP client can be dropped at the same - /// time as the SSH handle. - pub async fn disconnect( - &self, - session_id: &str, - sftp_service: &SftpService, - ) -> Result<(), String> { - let (_, session) = self - .sessions - .remove(session_id) - .ok_or_else(|| format!("Session {} not found", session_id))?; - - // Close the channel gracefully. - { - let channel: tokio::sync::MutexGuard<'_, Channel> = - session.channel.lock().await; - let _ = channel.eof().await; - let _ = channel.close().await; - } - - // Disconnect the SSH connection. - { - let handle = session.handle.lock().await; - let _ = handle - .disconnect(Disconnect::ByApplication, "", "en") - .await; - } - - // Clean up the SFTP client for this session. + pub async fn disconnect(&self, session_id: &str, sftp_service: &SftpService) -> Result<(), String> { + let (_, session) = self.sessions.remove(session_id).ok_or_else(|| format!("Session {} not found", session_id))?; + { let channel = session.channel.lock().await; let _ = channel.eof().await; let _ = channel.close().await; } + { let handle = session.handle.lock().await; let _ = handle.disconnect(Disconnect::ByApplication, "", "en").await; } sftp_service.remove_client(session_id); - - info!("SSH session {} disconnected", session_id); Ok(()) } - /// Get a reference to a session by ID. pub fn get_session(&self, session_id: &str) -> Option> { self.sessions.get(session_id).map(|entry| entry.clone()) } - /// List all active sessions (metadata only). pub fn list_sessions(&self) -> Vec { - self.sessions - .iter() - .map(|entry| { - let s = entry.value(); - SessionInfo { - id: s.id.clone(), - hostname: s.hostname.clone(), - port: s.port, - username: s.username.clone(), - } - }) - .collect() + self.sessions.iter().map(|entry| { + let s = entry.value(); + SessionInfo { id: s.id.clone(), hostname: s.hostname.clone(), port: s.port, username: s.username.clone() } + }).collect() } } diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index 71cf595..bcc5ca5 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -11,7 +11,7 @@ "app": { "windows": [ { - "title": "Wraith", + "label": "main", "url": "index.html", "title": "Wraith", "width": 1200, "height": 800, "minWidth": 800,