516 lines
18 KiB
Rust
516 lines
18 KiB
Rust
// file: kb_app/src/lib.rs
|
|
|
|
//! Tauri application library for `khadhroony-bobobot`.
|
|
//!
|
|
//! This crate is intentionally thin. It loads the shared configuration,
|
|
//! initializes shared tracing from `kb_lib`, and wires the desktop shell
|
|
//! to the reusable backend logic.
|
|
|
|
#![deny(unreachable_pub)]
|
|
#![warn(missing_docs)]
|
|
|
|
mod demo_http;
|
|
mod demo_ws;
|
|
mod splash;
|
|
|
|
pub use crate::splash::SplashOrder;
|
|
use tauri::Emitter;
|
|
use tauri::Manager;
|
|
|
|
/// Runtime state for started WebSocket clients.
|
|
struct KbWsRuntimeState {
|
|
clients: std::vec::Vec<kb_lib::WsClient>,
|
|
relay_tasks: std::vec::Vec<tauri::async_runtime::JoinHandle<()>>,
|
|
}
|
|
|
|
impl KbWsRuntimeState {
|
|
fn new() -> Self {
|
|
Self {
|
|
clients: std::vec::Vec::new(),
|
|
relay_tasks: std::vec::Vec::new(),
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Shared application state stored inside Tauri.
|
|
struct KbAppState {
|
|
config: kb_lib::KbConfig,
|
|
ws_runtime: tokio::sync::Mutex<KbWsRuntimeState>,
|
|
demo_ws_runtime: std::sync::Arc<tokio::sync::Mutex<crate::demo_ws::KbDemoWsRuntimeState>>,
|
|
http_pool: kb_lib::HttpEndpointPool,
|
|
}
|
|
|
|
/// Runs the desktop application.
|
|
#[cfg_attr(mobile, tauri::mobile_entry_point)]
|
|
pub fn run() {
|
|
let config_path = kb_lib::KbConfig::default_path();
|
|
let config_result = kb_lib::KbConfig::load_from_path(&config_path);
|
|
let config = match config_result {
|
|
Ok(config) => config,
|
|
Err(error) => {
|
|
eprintln!(
|
|
"kb_app configuration load error from '{}': {}",
|
|
config_path.display(),
|
|
error
|
|
);
|
|
return;
|
|
}
|
|
};
|
|
let prepare_result = config.prepare_filesystem();
|
|
if let Err(error) = prepare_result {
|
|
eprintln!("kb_app filesystem preparation error: {error}");
|
|
return;
|
|
}
|
|
let tracing_guard_result = kb_lib::init_tracing(&config.logging);
|
|
let _tracing_guard = match tracing_guard_result {
|
|
Ok(guard) => guard,
|
|
Err(error) => {
|
|
eprintln!("kb_app tracing initialization error: {error}");
|
|
return;
|
|
}
|
|
};
|
|
tracing::info!(
|
|
app_name = %config.app.name,
|
|
environment = %config.app.environment,
|
|
"starting desktop application"
|
|
);
|
|
let http_pool_result = kb_lib::HttpEndpointPool::from_config(&config);
|
|
let http_pool = match http_pool_result {
|
|
Ok(http_pool) => http_pool,
|
|
Err(error) => {
|
|
tracing::error!("cannot create http endpoint pool: {}", error);
|
|
panic!("cannot create http endpoint pool: {}", error);
|
|
}
|
|
};
|
|
let app_state = KbAppState {
|
|
config: config.clone(),
|
|
ws_runtime: tokio::sync::Mutex::new(KbWsRuntimeState::new()),
|
|
demo_ws_runtime: std::sync::Arc::new(tokio::sync::Mutex::new(
|
|
crate::demo_ws::KbDemoWsRuntimeState::new(),
|
|
)),
|
|
http_pool,
|
|
};
|
|
let tracing_builder = tauri_plugin_tracing::Builder::new();
|
|
let mut tauri_builder = tauri::Builder::default();
|
|
tauri_builder = tauri_builder.manage(app_state);
|
|
tauri_builder = tauri_builder.invoke_handler(tauri::generate_handler![
|
|
start_ws_clients,
|
|
stop_ws_clients,
|
|
crate::demo_ws::open_demo_ws_window,
|
|
crate::demo_ws::demo_ws_list_endpoints,
|
|
crate::demo_ws::demo_ws_get_status,
|
|
crate::demo_ws::demo_ws_connect,
|
|
crate::demo_ws::demo_ws_disconnect,
|
|
crate::demo_ws::demo_ws_subscribe,
|
|
crate::demo_ws::demo_ws_unsubscribe_current,
|
|
crate::demo_http::open_demo_http_window,
|
|
crate::demo_http::demo_http_list_pool_clients,
|
|
crate::demo_http::demo_http_execute_request,
|
|
]);
|
|
tauri_builder = tauri_builder.plugin(tracing_builder.build::<tauri::Wry>());
|
|
tauri_builder = tauri_builder.setup(|app| {
|
|
let app_handle = app.handle().clone();
|
|
tauri::async_runtime::spawn(async move {
|
|
let splash_window_option = app_handle.get_webview_window("splash");
|
|
let splash_window = match splash_window_option {
|
|
Some(window) => window,
|
|
None => {
|
|
tracing::error!("splash window not found");
|
|
return;
|
|
}
|
|
};
|
|
let main_window_option = app_handle.get_webview_window("main");
|
|
let main_window = match main_window_option {
|
|
Some(window) => window,
|
|
None => {
|
|
tracing::error!("main window not found");
|
|
return;
|
|
}
|
|
};
|
|
let is_debug = cfg!(debug_assertions);
|
|
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
|
|
if is_debug {
|
|
emit_splash_order(&splash_window, "add_log", Some("Start Fade-In"), None);
|
|
}
|
|
emit_splash_order(&splash_window, "fadein", None, None);
|
|
emit_splash_order(
|
|
&splash_window,
|
|
"add_msg",
|
|
Some("Initialisation..."),
|
|
Some("info"),
|
|
);
|
|
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
|
|
emit_splash_order(
|
|
&splash_window,
|
|
"add_msg",
|
|
Some("Loading resources..."),
|
|
Some("info"),
|
|
);
|
|
tokio::time::sleep(std::time::Duration::from_millis(1000)).await;
|
|
emit_splash_order(
|
|
&splash_window,
|
|
"add_msg",
|
|
Some("Loading complete..."),
|
|
Some("success"),
|
|
);
|
|
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
|
|
tracing::debug!("start splash fadeout");
|
|
if is_debug {
|
|
emit_splash_order(&splash_window, "add_log", Some("Start Fade-out"), None);
|
|
}
|
|
emit_splash_order(&splash_window, "fadeout", None, None);
|
|
tracing::debug!("end splash fadeout");
|
|
tokio::time::sleep(std::time::Duration::from_millis(3100)).await;
|
|
let close_result = splash_window.destroy();
|
|
if let Err(error) = close_result {
|
|
tracing::error!("error closing splash window: {error:?}");
|
|
}
|
|
let show_result = main_window.show();
|
|
if let Err(error) = show_result {
|
|
tracing::error!("error showing main window: {error:?}");
|
|
} else {
|
|
let emit_result = main_window.emit("setupTray", ());
|
|
if let Err(error) = emit_result {
|
|
tracing::error!("error emitting setupTray event: {error:?}");
|
|
}
|
|
}
|
|
});
|
|
Ok(())
|
|
});
|
|
let run_result = tauri_builder.run(tauri::generate_context!());
|
|
if let Err(error) = run_result {
|
|
tracing::error!("error while running tauri application: {error:?}");
|
|
}
|
|
}
|
|
|
|
fn emit_splash_order(
|
|
splash_window: &tauri::WebviewWindow,
|
|
order: &str,
|
|
msg: std::option::Option<&str>,
|
|
status: std::option::Option<&str>,
|
|
) {
|
|
let payload = crate::SplashOrder {
|
|
order: order.to_string(),
|
|
msg: msg.map(std::string::ToString::to_string),
|
|
status: status.map(std::string::ToString::to_string),
|
|
};
|
|
let emit_result = splash_window.emit("splash", payload);
|
|
if let Err(error) = emit_result {
|
|
tracing::error!("error emitting splash event '{order}': {error:?}");
|
|
}
|
|
}
|
|
|
|
#[tauri::command]
|
|
async fn start_ws_clients(
|
|
app_handle: tauri::AppHandle,
|
|
state: tauri::State<'_, KbAppState>,
|
|
) -> Result<usize, std::string::String> {
|
|
{
|
|
let runtime_guard = state.ws_runtime.lock().await;
|
|
if !runtime_guard.clients.is_empty() {
|
|
return Err("websocket clients are already running".to_string());
|
|
}
|
|
}
|
|
let enabled_endpoints: std::vec::Vec<kb_lib::KbWsEndpointConfig> = state
|
|
.config
|
|
.solana
|
|
.ws_endpoints
|
|
.iter()
|
|
.filter(|endpoint| endpoint.enabled)
|
|
.cloned()
|
|
.collect();
|
|
if enabled_endpoints.is_empty() {
|
|
return Err("no enabled websocket endpoint found in config.json".to_string());
|
|
}
|
|
kb_emit_app_log(
|
|
&app_handle,
|
|
&format!(
|
|
"[app] starting {} websocket client(s)",
|
|
enabled_endpoints.len()
|
|
),
|
|
);
|
|
let mut started_clients: std::vec::Vec<kb_lib::WsClient> = std::vec::Vec::new();
|
|
let mut relay_tasks: std::vec::Vec<tauri::async_runtime::JoinHandle<()>> = std::vec::Vec::new();
|
|
for endpoint in enabled_endpoints {
|
|
kb_emit_app_log(
|
|
&app_handle,
|
|
&format!(
|
|
"[app] preparing websocket endpoint '{}' ({})",
|
|
endpoint.name, endpoint.url
|
|
),
|
|
);
|
|
let client_result = kb_lib::WsClient::new(endpoint.clone());
|
|
let client = match client_result {
|
|
Ok(client) => client,
|
|
Err(error) => {
|
|
kb_shutdown_started_clients(&started_clients, &mut relay_tasks).await;
|
|
return Err(format!(
|
|
"cannot create websocket client for endpoint '{}': {}",
|
|
endpoint.name, error
|
|
));
|
|
}
|
|
};
|
|
let mut event_receiver = client.subscribe_events();
|
|
let relay_app_handle = app_handle.clone();
|
|
let relay_task = tauri::async_runtime::spawn(async move {
|
|
loop {
|
|
let recv_result = event_receiver.recv().await;
|
|
match recv_result {
|
|
Ok(event) => {
|
|
let line = kb_format_ws_event(&event);
|
|
kb_emit_app_log(&relay_app_handle, &line);
|
|
}
|
|
Err(tokio::sync::broadcast::error::RecvError::Lagged(skipped)) => {
|
|
kb_emit_app_log(
|
|
&relay_app_handle,
|
|
&format!(
|
|
"[ws] event receiver lagged and skipped {} message(s)",
|
|
skipped
|
|
),
|
|
);
|
|
}
|
|
Err(tokio::sync::broadcast::error::RecvError::Closed) => {
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
});
|
|
let connect_result = client.connect().await;
|
|
if let Err(error) = connect_result {
|
|
relay_task.abort();
|
|
kb_shutdown_started_clients(&started_clients, &mut relay_tasks).await;
|
|
return Err(format!(
|
|
"cannot connect websocket client for endpoint '{}': {}",
|
|
endpoint.name, error
|
|
));
|
|
}
|
|
started_clients.push(client);
|
|
relay_tasks.push(relay_task);
|
|
}
|
|
{
|
|
let mut runtime_guard = state.ws_runtime.lock().await;
|
|
if !runtime_guard.clients.is_empty() {
|
|
kb_shutdown_started_clients(&started_clients, &mut relay_tasks).await;
|
|
return Err("websocket clients were started concurrently".to_string());
|
|
}
|
|
runtime_guard.clients = started_clients;
|
|
runtime_guard.relay_tasks = relay_tasks;
|
|
}
|
|
let started_count = {
|
|
let runtime_guard = state.ws_runtime.lock().await;
|
|
runtime_guard.clients.len()
|
|
};
|
|
kb_emit_app_log(
|
|
&app_handle,
|
|
&format!("[app] {} websocket client(s) started", started_count),
|
|
);
|
|
Ok(started_count)
|
|
}
|
|
|
|
#[tauri::command]
|
|
async fn stop_ws_clients(
|
|
app_handle: tauri::AppHandle,
|
|
state: tauri::State<'_, KbAppState>,
|
|
) -> Result<usize, std::string::String> {
|
|
let (clients, mut relay_tasks) = {
|
|
let mut runtime_guard = state.ws_runtime.lock().await;
|
|
(
|
|
std::mem::take(&mut runtime_guard.clients),
|
|
std::mem::take(&mut runtime_guard.relay_tasks),
|
|
)
|
|
};
|
|
if clients.is_empty() {
|
|
kb_emit_app_log(&app_handle, "[app] websocket clients are already stopped");
|
|
return Ok(0);
|
|
}
|
|
kb_emit_app_log(
|
|
&app_handle,
|
|
&format!("[app] stopping {} websocket client(s)", clients.len()),
|
|
);
|
|
let stopped_count = clients.len();
|
|
for client in &clients {
|
|
let disconnect_result = client.disconnect().await;
|
|
if let Err(error) = disconnect_result {
|
|
kb_emit_app_log(
|
|
&app_handle,
|
|
&format!(
|
|
"[app] disconnect error for endpoint '{}': {}",
|
|
client.endpoint_name(),
|
|
error
|
|
),
|
|
);
|
|
}
|
|
}
|
|
for relay_task in relay_tasks.drain(..) {
|
|
relay_task.abort();
|
|
}
|
|
kb_emit_app_log(
|
|
&app_handle,
|
|
&format!("[app] {} websocket client(s) stopped", stopped_count),
|
|
);
|
|
Ok(stopped_count)
|
|
}
|
|
|
|
fn kb_emit_app_log(app_handle: &tauri::AppHandle, message: &str) {
|
|
let emit_result = app_handle.emit("kb-log", message.to_string());
|
|
if let Err(error) = emit_result {
|
|
tracing::error!("error emitting app log event: {error:?}");
|
|
}
|
|
}
|
|
|
|
fn kb_format_ws_event(event: &kb_lib::WsEvent) -> std::string::String {
|
|
match event {
|
|
kb_lib::WsEvent::Connected {
|
|
endpoint_name,
|
|
endpoint_url,
|
|
} => {
|
|
format!("[ws:{endpoint_name}] connected to {endpoint_url}")
|
|
}
|
|
kb_lib::WsEvent::TextMessage {
|
|
endpoint_name,
|
|
text,
|
|
} => {
|
|
format!("[ws:{endpoint_name}] text: {text}")
|
|
}
|
|
kb_lib::WsEvent::JsonRpcMessage {
|
|
endpoint_name,
|
|
message,
|
|
} => match message {
|
|
kb_lib::KbJsonRpcWsIncomingMessage::SuccessResponse(response) => {
|
|
format!(
|
|
"[ws:{endpoint_name}] json-rpc success id={} result={}",
|
|
response.id, response.result
|
|
)
|
|
}
|
|
kb_lib::KbJsonRpcWsIncomingMessage::ErrorResponse(response) => {
|
|
format!(
|
|
"[ws:{endpoint_name}] json-rpc error id={} code={} message={}",
|
|
response.id, response.error.code, response.error.message
|
|
)
|
|
}
|
|
kb_lib::KbJsonRpcWsIncomingMessage::Notification(notification) => {
|
|
format!(
|
|
"[ws:{endpoint_name}] json-rpc notification method={} subscription={} result={}",
|
|
notification.method,
|
|
notification.params.subscription,
|
|
notification.params.result
|
|
)
|
|
}
|
|
},
|
|
kb_lib::WsEvent::JsonRpcParseError {
|
|
endpoint_name,
|
|
text,
|
|
error,
|
|
} => {
|
|
format!(
|
|
"[ws:{endpoint_name}] json-rpc parse error: {} | raw={}",
|
|
error, text
|
|
)
|
|
}
|
|
kb_lib::WsEvent::SubscriptionRegistered {
|
|
endpoint_name,
|
|
subscription,
|
|
} => {
|
|
format!(
|
|
"[ws:{endpoint_name}] subscription registered subscribe_method={} unsubscribe_method={} notification_method={} request_id={} subscription_id={}",
|
|
subscription.subscribe_method,
|
|
subscription.unsubscribe_method,
|
|
subscription.notification_method,
|
|
subscription.request_id,
|
|
subscription.subscription_id
|
|
)
|
|
}
|
|
kb_lib::WsEvent::SubscriptionNotification {
|
|
endpoint_name,
|
|
subscription,
|
|
notification,
|
|
method_matches_registry,
|
|
} => {
|
|
format!(
|
|
"[ws:{endpoint_name}] tracked notification method={} expected_method={} matches_registry={} subscription_id={} result={}",
|
|
notification.method,
|
|
subscription.notification_method,
|
|
method_matches_registry,
|
|
subscription.subscription_id,
|
|
notification.params.result
|
|
)
|
|
}
|
|
kb_lib::WsEvent::JsonRpcNotificationWithoutSubscription {
|
|
endpoint_name,
|
|
notification,
|
|
} => {
|
|
format!(
|
|
"[ws:{endpoint_name}] untracked notification method={} subscription={} result={}",
|
|
notification.method, notification.params.subscription, notification.params.result
|
|
)
|
|
}
|
|
kb_lib::WsEvent::SubscriptionUnregistered {
|
|
endpoint_name,
|
|
subscription_id,
|
|
unsubscribe_method,
|
|
was_active,
|
|
} => {
|
|
format!(
|
|
"[ws:{endpoint_name}] subscription unregistered subscription_id={} unsubscribe_method={} was_active={}",
|
|
subscription_id, unsubscribe_method, was_active
|
|
)
|
|
}
|
|
kb_lib::WsEvent::BinaryMessage {
|
|
endpoint_name,
|
|
data,
|
|
} => {
|
|
format!("[ws:{endpoint_name}] binary message ({} bytes)", data.len())
|
|
}
|
|
kb_lib::WsEvent::Ping {
|
|
endpoint_name,
|
|
data,
|
|
} => {
|
|
format!("[ws:{endpoint_name}] ping ({} bytes)", data.len())
|
|
}
|
|
kb_lib::WsEvent::Pong {
|
|
endpoint_name,
|
|
data,
|
|
} => {
|
|
format!("[ws:{endpoint_name}] pong ({} bytes)", data.len())
|
|
}
|
|
kb_lib::WsEvent::CloseReceived {
|
|
endpoint_name,
|
|
code,
|
|
reason,
|
|
} => {
|
|
format!(
|
|
"[ws:{endpoint_name}] close received code={:?} reason={:?}",
|
|
code, reason
|
|
)
|
|
}
|
|
kb_lib::WsEvent::Disconnected { endpoint_name } => {
|
|
format!("[ws:{endpoint_name}] disconnected")
|
|
}
|
|
kb_lib::WsEvent::Error {
|
|
endpoint_name,
|
|
error,
|
|
} => {
|
|
format!("[ws:{endpoint_name}] error: {error}")
|
|
}
|
|
}
|
|
}
|
|
|
|
async fn kb_shutdown_started_clients(
|
|
started_clients: &[kb_lib::WsClient],
|
|
relay_tasks: &mut std::vec::Vec<tauri::async_runtime::JoinHandle<()>>,
|
|
) {
|
|
for client in started_clients {
|
|
let disconnect_result = client.disconnect().await;
|
|
if let Err(error) = disconnect_result {
|
|
tracing::error!(
|
|
endpoint_name = %client.endpoint_name(),
|
|
"cleanup disconnect error: {}",
|
|
error
|
|
);
|
|
}
|
|
}
|
|
for relay_task in relay_tasks.drain(..) {
|
|
relay_task.abort();
|
|
}
|
|
}
|