Fix background process lifecycle
This commit is contained in:
parent
d72867529a
commit
079c8828a6
@ -21,7 +21,7 @@ def main():
|
|||||||
os.makedirs(os.environ["WEBUI_UPLOAD_DIR"], exist_ok=True)
|
os.makedirs(os.environ["WEBUI_UPLOAD_DIR"], exist_ok=True)
|
||||||
os.makedirs(os.environ["WEBUI_CACHE_DIR"], exist_ok=True)
|
os.makedirs(os.environ["WEBUI_CACHE_DIR"], exist_ok=True)
|
||||||
|
|
||||||
# Run the FastAPI app using uvicorn
|
# Run the FastAPI app - this will handle the command line args including "serve"
|
||||||
app()
|
app()
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
use std::fs;
|
||||||
|
use std::path::PathBuf;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use tauri::Manager;
|
use tauri::Manager;
|
||||||
use tauri_plugin_shell::{process::CommandEvent, ShellExt};
|
use tauri_plugin_shell::{process::CommandEvent, ShellExt};
|
||||||
@ -7,6 +9,29 @@ use tauri_plugin_shell::{process::CommandEvent, ShellExt};
|
|||||||
struct AppState {
|
struct AppState {
|
||||||
backend_port: Arc<Mutex<Option<u16>>>,
|
backend_port: Arc<Mutex<Option<u16>>>,
|
||||||
backend_process: Arc<Mutex<Option<tauri_plugin_shell::process::CommandChild>>>,
|
backend_process: Arc<Mutex<Option<tauri_plugin_shell::process::CommandChild>>>,
|
||||||
|
backend_pid: Arc<Mutex<Option<u32>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate or load a persistent secret key for Open WebUI
|
||||||
|
fn get_or_create_secret_key(config_dir: &PathBuf) -> Result<String, Box<dyn std::error::Error>> {
|
||||||
|
let secret_file = config_dir.join("webui_secret.txt");
|
||||||
|
|
||||||
|
// Try to read existing secret
|
||||||
|
if let Ok(secret) = fs::read_to_string(&secret_file) {
|
||||||
|
let secret = secret.trim();
|
||||||
|
if !secret.is_empty() {
|
||||||
|
return Ok(secret.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate a new secure secret key
|
||||||
|
let secret = generate_secret_key();
|
||||||
|
|
||||||
|
// Save the secret for future use
|
||||||
|
fs::create_dir_all(config_dir)?;
|
||||||
|
fs::write(&secret_file, &secret)?;
|
||||||
|
|
||||||
|
Ok(secret)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate a secure secret key for Open WebUI
|
// Generate a secure secret key for Open WebUI
|
||||||
@ -51,8 +76,21 @@ async fn start_backend(app: tauri::AppHandle) -> Result<u16, String> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find an available port
|
// Kill any orphaned backend processes first
|
||||||
let port = find_available_port().map_err(|e| e.to_string())?;
|
let _ = kill_orphaned_backends(); // Ignore errors, just try to clean up
|
||||||
|
|
||||||
|
// Find a persistent port (reuse existing or create new)
|
||||||
|
let app_config_dir = app
|
||||||
|
.path()
|
||||||
|
.app_config_dir()
|
||||||
|
.map_err(|e| format!("Failed to get app config directory: {}", e))?;
|
||||||
|
|
||||||
|
let port = get_or_create_persistent_port(&app_config_dir)
|
||||||
|
.map_err(|e| format!("Failed to get persistent port: {}", e))?;
|
||||||
|
|
||||||
|
// Get or create a persistent secret key
|
||||||
|
let secret_key = get_or_create_secret_key(&app_config_dir)
|
||||||
|
.map_err(|e| format!("Failed to get persistent secret key: {}", e))?;
|
||||||
|
|
||||||
// Get the app data directory for persistence
|
// Get the app data directory for persistence
|
||||||
let app_data_dir = app
|
let app_data_dir = app
|
||||||
@ -69,28 +107,54 @@ async fn start_backend(app: tauri::AppHandle) -> Result<u16, String> {
|
|||||||
std::fs::create_dir_all(&webui_data_dir)
|
std::fs::create_dir_all(&webui_data_dir)
|
||||||
.map_err(|e| format!("Failed to create Open WebUI data directory: {}", e))?;
|
.map_err(|e| format!("Failed to create Open WebUI data directory: {}", e))?;
|
||||||
|
|
||||||
|
println!("Using persistent port: {}", port);
|
||||||
|
println!(
|
||||||
|
"Using secret key (first 8 chars): {}...",
|
||||||
|
&secret_key[..8.min(secret_key.len())]
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
"Open WebUI data directory: {}",
|
||||||
|
webui_data_dir.to_string_lossy()
|
||||||
|
);
|
||||||
|
println!("App config directory: {}", app_config_dir.to_string_lossy());
|
||||||
|
|
||||||
// Start the backend sidecar with proper environment variables
|
// Start the backend sidecar with proper environment variables
|
||||||
let sidecar_command = app
|
let sidecar_command = app
|
||||||
.shell()
|
.shell()
|
||||||
.sidecar("glowpath-backend")
|
.sidecar("glowpath-backend")
|
||||||
.map_err(|e| format!("Failed to create sidecar command: {}", e))?
|
.map_err(|e| format!("Failed to create sidecar command: {}", e))?
|
||||||
.args(&["serve", "--host", "127.0.0.1", "--port", &port.to_string()])
|
.args(&["serve", "--host", "127.0.0.1", "--port", &port.to_string()])
|
||||||
.env("WEBUI_SECRET_KEY", &generate_secret_key()) // Generate a proper secret
|
.env("WEBUI_SECRET_KEY", &secret_key) // Use persistent secret key
|
||||||
.env("DATA_DIR", &webui_data_dir)
|
.env("DATA_DIR", &webui_data_dir)
|
||||||
.env(
|
.env(
|
||||||
"WEBUI_DATABASE_URL",
|
"WEBUI_DATABASE_URL",
|
||||||
format!("sqlite:///{}/webui.db", webui_data_dir.to_string_lossy()),
|
format!("sqlite:///{}/webui.db", webui_data_dir.to_string_lossy()),
|
||||||
);
|
)
|
||||||
|
.env("WEBUI_AUTH", "false") // Disable authentication completely
|
||||||
|
.env("WEBUI_ENABLE_SIGNUP", "false") // Disable user signup
|
||||||
|
.env("WEBUI_ENABLE_LOGIN_FORM", "false") // Disable login form
|
||||||
|
.env("WEBUI_ENABLE_OAUTH_SIGNUP", "false") // Disable OAuth signup
|
||||||
|
.env("WEBUI_ENABLE_LDAP", "false") // Disable LDAP
|
||||||
|
.env("WEBUI_DEFAULT_USER_ROLE", "admin") // Default role when auth is disabled
|
||||||
|
.env("WEBUI_SESSION_COOKIE_SECURE", "false") // Allow insecure cookies for localhost
|
||||||
|
.env("WEBUI_SESSION_COOKIE_SAME_SITE", "lax"); // More permissive cookie policy
|
||||||
|
|
||||||
let (mut rx, child) = sidecar_command
|
let (mut rx, child) = sidecar_command
|
||||||
.spawn()
|
.spawn()
|
||||||
.map_err(|e| format!("Failed to spawn backend process: {}", e))?;
|
.map_err(|e| format!("Failed to spawn backend process: {}", e))?;
|
||||||
|
|
||||||
// Store the process and port
|
// Get the PID for process group management
|
||||||
|
let pid = child.pid();
|
||||||
|
|
||||||
|
// Store the process, PID, and port
|
||||||
if let Ok(mut process_guard) = state.backend_process.lock() {
|
if let Ok(mut process_guard) = state.backend_process.lock() {
|
||||||
*process_guard = Some(child);
|
*process_guard = Some(child);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Ok(mut pid_guard) = state.backend_pid.lock() {
|
||||||
|
*pid_guard = Some(pid);
|
||||||
|
}
|
||||||
|
|
||||||
if let Ok(mut port_guard) = state.backend_port.lock() {
|
if let Ok(mut port_guard) = state.backend_port.lock() {
|
||||||
*port_guard = Some(port);
|
*port_guard = Some(port);
|
||||||
}
|
}
|
||||||
@ -119,6 +183,9 @@ async fn start_backend(app: tauri::AppHandle) -> Result<u16, String> {
|
|||||||
if let Ok(mut process_guard) = state.backend_process.lock() {
|
if let Ok(mut process_guard) = state.backend_process.lock() {
|
||||||
*process_guard = None;
|
*process_guard = None;
|
||||||
}
|
}
|
||||||
|
if let Ok(mut pid_guard) = state.backend_pid.lock() {
|
||||||
|
*pid_guard = None;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -148,18 +215,69 @@ async fn get_backend_port(app: tauri::AppHandle) -> Result<u16, String> {
|
|||||||
async fn stop_backend(app: tauri::AppHandle) -> Result<(), String> {
|
async fn stop_backend(app: tauri::AppHandle) -> Result<(), String> {
|
||||||
let state = app.state::<AppState>();
|
let state = app.state::<AppState>();
|
||||||
|
|
||||||
if let Ok(mut process_guard) = state.backend_process.lock() {
|
// Get the stored PID first
|
||||||
if let Some(child) = process_guard.take() {
|
let stored_pid = if let Ok(pid_guard) = state.backend_pid.lock() {
|
||||||
|
*pid_guard
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get the child process and PID, then release the lock
|
||||||
|
let (child_option, pid_option) = {
|
||||||
|
let mut process_guard = state
|
||||||
|
.backend_process
|
||||||
|
.lock()
|
||||||
|
.map_err(|_| "Failed to acquire process lock".to_string())?;
|
||||||
|
|
||||||
|
let child = process_guard.take();
|
||||||
|
let pid = if let Some(ref child) = child {
|
||||||
|
Some(stored_pid.unwrap_or_else(|| child.pid()))
|
||||||
|
} else {
|
||||||
|
stored_pid
|
||||||
|
};
|
||||||
|
|
||||||
|
(child, pid)
|
||||||
|
}; // Lock is released here
|
||||||
|
|
||||||
|
if let Some(child) = child_option {
|
||||||
|
if let Some(pid) = pid_option {
|
||||||
|
println!("Stopping backend process gracefully...");
|
||||||
|
|
||||||
|
// First try graceful termination (SIGTERM)
|
||||||
|
match graceful_terminate_process_group(pid).await {
|
||||||
|
Ok(_) => {
|
||||||
|
println!("Backend process group terminated gracefully");
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("Graceful termination failed: {}, trying forceful kill", e);
|
||||||
|
// Fallback to forceful kill
|
||||||
|
child
|
||||||
|
.kill()
|
||||||
|
.map_err(|e| format!("Failed to kill backend process: {}", e))?;
|
||||||
|
|
||||||
|
// Also try to kill any remaining processes by name
|
||||||
|
let _ = kill_orphaned_backends(); // Best effort cleanup
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("Backend process stopped");
|
||||||
|
} else {
|
||||||
|
// No PID available, just kill the child process
|
||||||
child
|
child
|
||||||
.kill()
|
.kill()
|
||||||
.map_err(|e| format!("Failed to kill backend process: {}", e))?;
|
.map_err(|e| format!("Failed to kill backend process: {}", e))?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clear all stored state
|
||||||
if let Ok(mut port_guard) = state.backend_port.lock() {
|
if let Ok(mut port_guard) = state.backend_port.lock() {
|
||||||
*port_guard = None;
|
*port_guard = None;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Ok(mut pid_guard) = state.backend_pid.lock() {
|
||||||
|
*pid_guard = None;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -173,6 +291,78 @@ fn get_data_directory(app: tauri::AppHandle) -> Result<String, String> {
|
|||||||
Ok(app_data_dir.to_string_lossy().to_string())
|
Ok(app_data_dir.to_string_lossy().to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tauri::command]
|
||||||
|
fn reset_backend_port(app: tauri::AppHandle) -> Result<(), String> {
|
||||||
|
let app_config_dir = app
|
||||||
|
.path()
|
||||||
|
.app_config_dir()
|
||||||
|
.map_err(|e| format!("Failed to get app config directory: {}", e))?;
|
||||||
|
|
||||||
|
let port_file = app_config_dir.join("backend_port.txt");
|
||||||
|
|
||||||
|
if port_file.exists() {
|
||||||
|
fs::remove_file(&port_file).map_err(|e| format!("Failed to remove port file: {}", e))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tauri::command]
|
||||||
|
fn reset_session_data(app: tauri::AppHandle) -> Result<(), String> {
|
||||||
|
let app_config_dir = app
|
||||||
|
.path()
|
||||||
|
.app_config_dir()
|
||||||
|
.map_err(|e| format!("Failed to get app config directory: {}", e))?;
|
||||||
|
|
||||||
|
// Remove port and secret files to force regeneration
|
||||||
|
let port_file = app_config_dir.join("backend_port.txt");
|
||||||
|
let secret_file = app_config_dir.join("webui_secret.txt");
|
||||||
|
|
||||||
|
if port_file.exists() {
|
||||||
|
fs::remove_file(&port_file).map_err(|e| format!("Failed to remove port file: {}", e))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if secret_file.exists() {
|
||||||
|
fs::remove_file(&secret_file)
|
||||||
|
.map_err(|e| format!("Failed to remove secret file: {}", e))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tauri::command]
|
||||||
|
fn kill_orphaned_backends() -> Result<String, String> {
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
|
// First try graceful termination with SIGTERM
|
||||||
|
let sigterm_output = Command::new("pkill")
|
||||||
|
.arg("-TERM")
|
||||||
|
.arg("-f")
|
||||||
|
.arg("glowpath-backend")
|
||||||
|
.output();
|
||||||
|
|
||||||
|
// Give processes a moment to shut down gracefully
|
||||||
|
std::thread::sleep(std::time::Duration::from_millis(1000));
|
||||||
|
|
||||||
|
// Then force kill any remaining processes
|
||||||
|
let sigkill_output = Command::new("pkill")
|
||||||
|
.arg("-KILL")
|
||||||
|
.arg("-f")
|
||||||
|
.arg("glowpath-backend")
|
||||||
|
.output();
|
||||||
|
|
||||||
|
match (sigterm_output, sigkill_output) {
|
||||||
|
(Ok(term_result), Ok(kill_result)) => {
|
||||||
|
if term_result.status.success() || kill_result.status.success() {
|
||||||
|
Ok("Successfully cleaned up orphaned backend processes".to_string())
|
||||||
|
} else {
|
||||||
|
Ok("No orphaned backend processes found".to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(Err(e), _) | (_, Err(e)) => Err(format!("Failed to check for orphaned processes: {}", e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn find_available_port() -> Result<u16, Box<dyn std::error::Error>> {
|
fn find_available_port() -> Result<u16, Box<dyn std::error::Error>> {
|
||||||
use std::net::TcpListener;
|
use std::net::TcpListener;
|
||||||
|
|
||||||
@ -182,6 +372,140 @@ fn find_available_port() -> Result<u16, Box<dyn std::error::Error>> {
|
|||||||
Ok(addr.port())
|
Ok(addr.port())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Load port from config file, or find an available port and save it
|
||||||
|
fn get_or_create_persistent_port(config_dir: &PathBuf) -> Result<u16, Box<dyn std::error::Error>> {
|
||||||
|
let port_file = config_dir.join("backend_port.txt");
|
||||||
|
|
||||||
|
// Try to read existing port
|
||||||
|
if let Ok(port_str) = fs::read_to_string(&port_file) {
|
||||||
|
if let Ok(port) = port_str.trim().parse::<u16>() {
|
||||||
|
// Check if the port is still available
|
||||||
|
if is_port_available(port) {
|
||||||
|
return Ok(port);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no saved port or port is not available, find a new one
|
||||||
|
let port = find_available_port()?;
|
||||||
|
|
||||||
|
// Save the port for future use
|
||||||
|
fs::create_dir_all(config_dir)?;
|
||||||
|
fs::write(&port_file, port.to_string())?;
|
||||||
|
|
||||||
|
Ok(port)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_port_available(port: u16) -> bool {
|
||||||
|
use std::net::TcpListener;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
// Try to bind to the port
|
||||||
|
let result = TcpListener::bind(format!("127.0.0.1:{}", port));
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(listener) => {
|
||||||
|
// Successfully bound, port is available
|
||||||
|
drop(listener); // Close the listener
|
||||||
|
|
||||||
|
// Give a small delay to ensure the port is fully released
|
||||||
|
std::thread::sleep(Duration::from_millis(100));
|
||||||
|
true
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
// Failed to bind, port is not available
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gracefully terminate a process group with timeout
|
||||||
|
async fn graceful_terminate_process_group(pid: u32) -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
use std::process::Command;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
// Send SIGTERM to the entire process group
|
||||||
|
// Using negative PID to target the process group
|
||||||
|
let sigterm_result = Command::new("kill")
|
||||||
|
.arg("-TERM")
|
||||||
|
.arg(format!("-{}", pid)) // Negative PID targets the process group
|
||||||
|
.output();
|
||||||
|
|
||||||
|
match sigterm_result {
|
||||||
|
Ok(output) => {
|
||||||
|
if !output.status.success() {
|
||||||
|
return Err(format!(
|
||||||
|
"Failed to send SIGTERM to process group {}: {}",
|
||||||
|
pid,
|
||||||
|
String::from_utf8_lossy(&output.stderr)
|
||||||
|
)
|
||||||
|
.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
return Err(format!("Failed to execute kill command: {}", e).into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait up to 5 seconds for graceful shutdown
|
||||||
|
for _ in 0..50 {
|
||||||
|
// Check every 100ms for 5 seconds
|
||||||
|
let check_result = Command::new("kill")
|
||||||
|
.arg("-0") // Signal 0 just checks if process exists
|
||||||
|
.arg(pid.to_string())
|
||||||
|
.output();
|
||||||
|
|
||||||
|
match check_result {
|
||||||
|
Ok(output) => {
|
||||||
|
if !output.status.success() {
|
||||||
|
// Process no longer exists, graceful shutdown succeeded
|
||||||
|
println!("Process group {} terminated gracefully", pid);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
// Error usually means process doesn't exist
|
||||||
|
println!("Process group {} terminated gracefully", pid);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use tauri's async runtime sleep
|
||||||
|
tauri::async_runtime::spawn(async {
|
||||||
|
std::thread::sleep(Duration::from_millis(100));
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout occurred, force kill the process group
|
||||||
|
println!(
|
||||||
|
"Graceful termination timed out, force killing process group {}",
|
||||||
|
pid
|
||||||
|
);
|
||||||
|
|
||||||
|
let force_kill_result = Command::new("kill")
|
||||||
|
.arg("-KILL")
|
||||||
|
.arg(format!("-{}", pid)) // Negative PID targets the process group
|
||||||
|
.output();
|
||||||
|
|
||||||
|
match force_kill_result {
|
||||||
|
Ok(output) => {
|
||||||
|
if output.status.success() {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(format!(
|
||||||
|
"Failed to force kill process group {}: {}",
|
||||||
|
pid,
|
||||||
|
String::from_utf8_lossy(&output.stderr)
|
||||||
|
)
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => Err(format!("Failed to execute force kill: {}", e).into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg_attr(mobile, tauri::mobile_entry_point)]
|
#[cfg_attr(mobile, tauri::mobile_entry_point)]
|
||||||
pub fn run() {
|
pub fn run() {
|
||||||
tauri::Builder::default()
|
tauri::Builder::default()
|
||||||
@ -194,8 +518,22 @@ pub fn run() {
|
|||||||
start_backend,
|
start_backend,
|
||||||
get_backend_port,
|
get_backend_port,
|
||||||
stop_backend,
|
stop_backend,
|
||||||
get_data_directory
|
get_data_directory,
|
||||||
|
reset_backend_port,
|
||||||
|
reset_session_data,
|
||||||
|
kill_orphaned_backends
|
||||||
])
|
])
|
||||||
|
.on_window_event(|window, event| {
|
||||||
|
if let tauri::WindowEvent::CloseRequested { .. } = event {
|
||||||
|
println!("App closing, stopping backend...");
|
||||||
|
let app_handle = window.app_handle().clone();
|
||||||
|
tauri::async_runtime::spawn(async move {
|
||||||
|
if let Err(e) = stop_backend(app_handle).await {
|
||||||
|
eprintln!("Failed to stop backend on app exit: {}", e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
})
|
||||||
.setup(|app| {
|
.setup(|app| {
|
||||||
// Optionally start backend on app startup
|
// Optionally start backend on app startup
|
||||||
let app_handle = app.handle().clone();
|
let app_handle = app.handle().clone();
|
||||||
|
@ -19,7 +19,8 @@
|
|||||||
],
|
],
|
||||||
"security": {
|
"security": {
|
||||||
"csp": null
|
"csp": null
|
||||||
}
|
},
|
||||||
|
"withGlobalTauri": false
|
||||||
},
|
},
|
||||||
"bundle": {
|
"bundle": {
|
||||||
"active": true,
|
"active": true,
|
||||||
|
@ -79,6 +79,24 @@ class BackendService {
|
|||||||
return await invoke<string>("get_data_directory");
|
return await invoke<string>("get_data_directory");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async resetPort(): Promise<void> {
|
||||||
|
await invoke("reset_backend_port");
|
||||||
|
// Clear the current state
|
||||||
|
this.port = null;
|
||||||
|
this.baseUrl = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
async resetSessionData(): Promise<void> {
|
||||||
|
await invoke("reset_session_data");
|
||||||
|
// Clear the current state
|
||||||
|
this.port = null;
|
||||||
|
this.baseUrl = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
async killOrphanedBackends(): Promise<string> {
|
||||||
|
return await invoke<string>("kill_orphaned_backends");
|
||||||
|
}
|
||||||
|
|
||||||
async fetch(path: string, options?: RequestInit): Promise<Response> {
|
async fetch(path: string, options?: RequestInit): Promise<Response> {
|
||||||
if (!this.baseUrl) {
|
if (!this.baseUrl) {
|
||||||
throw new Error("Backend service not initialized");
|
throw new Error("Backend service not initialized");
|
||||||
|
Loading…
x
Reference in New Issue
Block a user