This commit is contained in:
2026-04-20 20:14:40 +02:00
parent 4261291ac1
commit 176fe3db99
21 changed files with 1445 additions and 132 deletions

View File

@@ -1 +1,2 @@
0.0.1 - initial skel 0.0.1 - initial skel
0.0.2 - Socle conforme

View File

@@ -8,7 +8,7 @@ members = [
] ]
[workspace.package] [workspace.package]
version = "0.0.1" version = "0.0.2"
edition = "2024" edition = "2024"
license = "MIT" license = "MIT"
repository = "https://git.sasedev.com/Sasedev/khadhroony-bobobot" repository = "https://git.sasedev.com/Sasedev/khadhroony-bobobot"
@@ -46,7 +46,7 @@ spl-token-2022-interface = { version = "^2.1", features = [] }
sqlx = { version = "^0.8", features = ["chrono", "uuid", "bigdecimal", "json", "sqlite", "runtime-tokio-rustls"] } sqlx = { version = "^0.8", features = ["chrono", "uuid", "bigdecimal", "json", "sqlite", "runtime-tokio-rustls"] }
tauri = { version = "^2.10", features = ["default"] } tauri = { version = "^2.10", features = ["default"] }
tauri-build = { version = "2", features = [] } tauri-build = { version = "2", features = [] }
tauri-plugin-tracing = { version = "^0.3", features = [] } tauri-plugin-tracing = { version = "^0.3", default-features = false, features = [] }
tempfile = { version = "^3", features = [] } tempfile = { version = "^3", features = [] }
tokio = { version = "^1.52", features = ["full"] } tokio = { version = "^1.52", features = ["full"] }
tokio-stream = { version = "^0.1", features = ["full"] } tokio-stream = { version = "^0.1", features = ["full"] }

View File

@@ -410,4 +410,3 @@ La priorité immédiate est la suivante :
6. préparer `ws_client` et `http_client`, 6. préparer `ws_client` et `http_client`,
7. remettre `kb_app` en conformité, 7. remettre `kb_app` en conformité,
8. conserver une UI minimale, puis brancher progressivement les clients réseau. 8. conserver une UI minimale, puis brancher progressivement les clients réseau.

View File

@@ -28,7 +28,5 @@ tauri.workspace = true
tauri-plugin-tracing.workspace = true tauri-plugin-tracing.workspace = true
tokio.workspace = true tokio.workspace = true
tracing.workspace = true tracing.workspace = true
tracing-appender.workspace = true
tracing-subscriber.workspace = true
ts-rs.workspace = true ts-rs.workspace = true
uuid.workspace = true uuid.workspace = true

View File

@@ -1,3 +1,18 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type SplashOrder = { order: string, msg: string | null, status: string | null, }; /**
* Command payload sent from Rust to the splash frontend.
*/
export type SplashOrder = {
/**
* Splash command name such as `fadein`, `fadeout`, `add_msg`, or `add_log`.
*/
order: string,
/**
* Optional message payload attached to the command.
*/
msg: string | null,
/**
* Optional status payload attached to the command.
*/
status: string | null, };

View File

@@ -7,12 +7,13 @@ import ResizeObserver from "resize-observer-polyfill";
//import { listen, type UnlistenFn } from "@tauri-apps/api/event"; //import { listen, type UnlistenFn } from "@tauri-apps/api/event";
//import { error } from "@fltsci/tauri-plugin-tracing"; //import { error } from "@fltsci/tauri-plugin-tracing";
//import { info } from "@fltsci/tauri-plugin-tracing"; //import { info } from "@fltsci/tauri-plugin-tracing";
import { trace } from "@fltsci/tauri-plugin-tracing"; import { trace, takeoverConsole } from "@fltsci/tauri-plugin-tracing";
(window as Window & typeof globalThis & { bootstrap?: typeof bootstrap }).bootstrap = bootstrap; (window as Window & typeof globalThis & { bootstrap?: typeof bootstrap }).bootstrap = bootstrap;
(window as Window & typeof globalThis & { ResizeObserver?: typeof ResizeObserver }).ResizeObserver = ResizeObserver; (window as Window & typeof globalThis & { ResizeObserver?: typeof ResizeObserver }).ResizeObserver = ResizeObserver;
document.addEventListener("DOMContentLoaded", () => { document.addEventListener("DOMContentLoaded", () => {
void takeoverConsole();
const sidebarToggle = document.querySelector<HTMLButtonElement>('#sidebarToggle'); const sidebarToggle = document.querySelector<HTMLButtonElement>('#sidebarToggle');
if (sidebarToggle) { if (sidebarToggle) {
// restaurer létat depuis localStorage // restaurer létat depuis localStorage

View File

@@ -1,7 +1,6 @@
// file: kb_app/frontend/ts/splash.ts // file: kb_app/frontend/ts/splash.ts
import { error } from "@fltsci/tauri-plugin-tracing"; import { error, info, takeoverConsole } from "@fltsci/tauri-plugin-tracing";
import { info } from "@fltsci/tauri-plugin-tracing";
import { listen } from '@tauri-apps/api/event'; import { listen } from '@tauri-apps/api/event';
import { SplashOrder } from './bindings/SplashOrder.ts'; import { SplashOrder } from './bindings/SplashOrder.ts';
@@ -90,7 +89,7 @@ listen("splash", (event) => {
//window.addEventListener('DOMContentLoaded', initialize); //window.addEventListener('DOMContentLoaded', initialize);
document.addEventListener("DOMContentLoaded", () => { document.addEventListener("DOMContentLoaded", () => {
void takeoverConsole();
info("window loaded"); info("window loaded");
}); });

View File

@@ -1,7 +1,7 @@
{ {
"name": "kb-app", "name": "kb-app",
"private": true, "private": true,
"version": "0.0.1", "version": "0.0.2",
"type": "module", "type": "module",
"scripts": { "scripts": {
"dev": "vite", "dev": "vite",

View File

@@ -1,136 +1,145 @@
// file: kb_app/src/lib.rs // file: kb_app/src/lib.rs
//#![deny(unreachable_pub)] //! Tauri application library for `khadhroony-bobobot`.
//#![warn(missing_docs)] //!
//! This crate is intentionally thin. It loads the shared configuration,
//! initializes shared tracing from `kb_lib`, and wires the desktop shell
//! to the reusable backend logic.
#![deny(unreachable_pub)]
#![warn(missing_docs)]
mod splash; mod splash;
pub use crate::splash::SplashOrder; pub use crate::splash::SplashOrder;
use tauri::Emitter; use tauri::Emitter;
use tauri::{Manager}; use tauri::Manager;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
fn setup_logger() -> tauri_plugin_tracing::Builder {
let log_dir = std::env::temp_dir().join("kb_app");
match std::fs::create_dir_all(&log_dir) {
Ok(_) => {},
Err(err) => {
eprintln!("failed to create log directory: {:?}", err);
},
}
let file_appender = tracing_appender::rolling::daily(&log_dir, "app");
let (non_blocking, guard) = tracing_appender::non_blocking(file_appender);
std::mem::forget(guard);
let targets = tracing_subscriber::filter::Targets::new()
.with_default(tracing::Level::DEBUG)
.with_target("hyper", tracing::Level::WARN)
.with_target("reqwest", tracing::Level::WARN)
.with_target("tao", tracing::Level::WARN)
.with_target("wry", tracing::Level::WARN);
tracing_subscriber::registry()
.with(tracing_subscriber::fmt::layer().with_ansi(true))
.with(
tracing_subscriber::fmt::layer()
.with_writer(tauri_plugin_tracing::StripAnsiWriter::new(non_blocking))
.with_ansi(false),
)
.with(targets)
.init();
tauri_plugin_tracing::Builder::new()
}
/// Runs the desktop application.
#[cfg_attr(mobile, tauri::mobile_entry_point)] #[cfg_attr(mobile, tauri::mobile_entry_point)]
pub fn run() { pub fn run() {
let tracing_builder = setup_logger(); let config_path = kb_lib::KbConfig::default_path();
let config_result = kb_lib::KbConfig::load_from_path(&config_path);
let config = match config_result {
Ok(config) => config,
Err(error) => {
eprintln!(
"kb_app configuration load error from '{}': {}",
config_path.display(),
error
);
return;
}
};
let prepare_result = config.prepare_filesystem();
if let Err(error) = prepare_result {
eprintln!("kb_app filesystem preparation error: {error}");
return;
}
let tracing_guard_result = kb_lib::init_tracing(&config.logging);
let _tracing_guard = match tracing_guard_result {
Ok(guard) => guard,
Err(error) => {
eprintln!("kb_app tracing initialization error: {error}");
return;
}
};
tracing::info!(
app_name = %config.app.name,
environment = %config.app.environment,
"starting desktop application"
);
let tracing_builder = tauri_plugin_tracing::Builder::new();
let mut tauri_builder = tauri::Builder::default(); let mut tauri_builder = tauri::Builder::default();
tauri_builder = tauri_builder.plugin(tracing_builder.build::<tauri::Wry>()); tauri_builder = tauri_builder.plugin(tracing_builder.build::<tauri::Wry>());
tauri_builder = tauri_builder.setup(|app| { tauri_builder = tauri_builder.setup(|app| {
let app_handle = app.handle().clone(); let app_handle = app.handle().clone();
tauri::async_runtime::spawn(async move { tauri::async_runtime::spawn(async move {
let splash_window = app_handle.get_webview_window("splash").unwrap(); let splash_window_option = app_handle.get_webview_window("splash");
let main_window = app_handle.get_webview_window("main").unwrap(); let splash_window = match splash_window_option {
//main_window.set_title(&app_name).unwrap(); Some(window) => window,
None => {
tracing::error!("splash window not found");
return;
}
};
let main_window_option = app_handle.get_webview_window("main");
let main_window = match main_window_option {
Some(window) => window,
None => {
tracing::error!("main window not found");
return;
}
};
let is_debug = cfg!(debug_assertions); let is_debug = cfg!(debug_assertions);
tokio::time::sleep(std::time::Duration::from_millis(500)).await; tokio::time::sleep(std::time::Duration::from_millis(500)).await;
if is_debug { if is_debug {
let _ = splash_window.emit( emit_splash_order(&splash_window, "add_log", Some("Start Fade-In"), None);
"splash",
splash::SplashOrder {
order: "add_log".to_string(),
msg: Some("Start Fade-In".to_string()),
status: None,
},
);
} }
let _ = splash_window.emit( emit_splash_order(&splash_window, "fadein", None, None);
"splash", emit_splash_order(
splash::SplashOrder { &splash_window,
order: "fadein".to_string(), "add_msg",
msg: None, Some("Initialisation..."),
status: None, Some("info"),
},
);
let _ = splash_window.emit(
"splash",
splash::SplashOrder {
order: "add_msg".to_string(),
msg: Some("Initialisation...".to_string()),
status: Some("info".to_string()),
},
); );
tokio::time::sleep(std::time::Duration::from_millis(500)).await; tokio::time::sleep(std::time::Duration::from_millis(500)).await;
let _ = splash_window.emit( emit_splash_order(
"splash", &splash_window,
splash::SplashOrder { "add_msg",
order: "add_msg".to_string(), Some("Loading resources..."),
msg: Some("Loading resources...".to_string()), Some("info"),
status: Some("info".to_string()),
},
); );
tokio::time::sleep(std::time::Duration::from_millis(1000)).await; tokio::time::sleep(std::time::Duration::from_millis(1000)).await;
let _ = splash_window.emit( emit_splash_order(
"splash", &splash_window,
splash::SplashOrder { "add_msg",
order: "add_msg".to_string(), Some("Loading complete..."),
msg: Some("Loading complete...".to_string()), Some("success"),
status: Some("success".to_string()),
},
); );
tokio::time::sleep(std::time::Duration::from_millis(500)).await; tokio::time::sleep(std::time::Duration::from_millis(500)).await;
tracing::debug!("Start fadeout"); tracing::debug!("start splash fadeout");
if is_debug { if is_debug {
let _ = splash_window.emit( emit_splash_order(&splash_window, "add_log", Some("Start Fade-out"), None);
"splash",
splash::SplashOrder {
order: "add_log".to_string(),
msg: Some("Start Fade-out".to_string()),
status: None,
},
);
} }
let _ = splash_window.emit( emit_splash_order(&splash_window, "fadeout", None, None);
"splash", tracing::debug!("end splash fadeout");
splash::SplashOrder {
order: "fadeout".to_string(),
msg: None,
status: None,
},
);
tracing::debug!("End fadeout");
tokio::time::sleep(std::time::Duration::from_millis(3100)).await; tokio::time::sleep(std::time::Duration::from_millis(3100)).await;
if let Err(err) = splash_window.close() { let close_result = splash_window.close();
tracing::error!("Error closing splash window: {:?}", err); if let Err(error) = close_result {
tracing::error!("error closing splash window: {error:?}");
} }
if let Err(err) = main_window.show() { let show_result = main_window.show();
tracing::error!("Error showing main window: {:?}", err); if let Err(error) = show_result {
tracing::error!("error showing main window: {error:?}");
} else { } else {
let _ = main_window.emit("setupTray", ()); let emit_result = main_window.emit("setupTray", ());
if let Err(error) = emit_result {
tracing::error!("error emitting setupTray event: {error:?}");
}
} }
}); });
Ok(()) Ok(())
}); });
if let Err(err) = tauri_builder.run(tauri::generate_context!()) { let run_result = tauri_builder.run(tauri::generate_context!());
tracing::error!("error while running tauri application: {:?}", err); if let Err(error) = run_result {
tracing::error!("error while running tauri application: {error:?}");
}
}
fn emit_splash_order(
splash_window: &tauri::WebviewWindow,
order: &str,
msg: std::option::Option<&str>,
status: std::option::Option<&str>,
) {
let payload = crate::SplashOrder {
order: order.to_string(),
msg: msg.map(std::string::ToString::to_string),
status: status.map(std::string::ToString::to_string),
};
let emit_result = splash_window.emit("splash", payload);
if let Err(error) = emit_result {
tracing::error!("error emitting splash event '{order}': {error:?}");
} }
} }

View File

@@ -31,7 +31,6 @@ fn main() -> std::process::ExitCode
eprintln!("Another instance of the app is already running!"); eprintln!("Another instance of the app is already running!");
std::process::exit(1); std::process::exit(1);
} }
if rustls::crypto::CryptoProvider::get_default().is_none() { if rustls::crypto::CryptoProvider::get_default().is_none() {
let provider_result = rustls::crypto::aws_lc_rs::default_provider().install_default(); let provider_result = rustls::crypto::aws_lc_rs::default_provider().install_default();
match provider_result { match provider_result {
@@ -41,8 +40,7 @@ fn main() -> std::process::ExitCode
return std::process::ExitCode::FAILURE; return std::process::ExitCode::FAILURE;
}, },
} }
} }
kb_app_lib::run(); kb_app_lib::run();
std::process::ExitCode::SUCCESS std::process::ExitCode::SUCCESS
} }

View File

@@ -1,9 +1,18 @@
// file: kb_app/src/splash.rs // file: kb_app/src/splash.rs
//! Shared splash-screen payload types.
//!
//! These types are serialized by the Rust backend and exported to the
//! TypeScript frontend through `ts-rs`.
/// Command payload sent from Rust to the splash frontend.
#[derive(serde::Serialize, serde::Deserialize, Clone, Debug, ts_rs::TS)] #[derive(serde::Serialize, serde::Deserialize, Clone, Debug, ts_rs::TS)]
#[ts(export, export_to = "../frontend/ts/bindings/SplashOrder.ts")] #[ts(export, export_to = "../frontend/ts/bindings/SplashOrder.ts")]
pub struct SplashOrder { pub struct SplashOrder {
pub order: String, /// Splash command name such as `fadein`, `fadeout`, `add_msg`, or `add_log`.
pub msg: Option<String>, pub order: std::string::String,
pub status: Option<String>, /// Optional message payload attached to the command.
} pub msg: std::option::Option<std::string::String>,
/// Optional status payload attached to the command.
pub status: std::option::Option<std::string::String>,
}

View File

@@ -1,7 +1,7 @@
{ {
"$schema": "https://schema.tauri.app/config/2", "$schema": "https://schema.tauri.app/config/2",
"productName": "kb-bapp", "productName": "kb-bapp",
"version": "0.0.1", "version": "0.0.2",
"identifier": "com.sasedev.kb-app", "identifier": "com.sasedev.kb-app",
"build": { "build": {
"beforeDevCommand": "npm run dev", "beforeDevCommand": "npm run dev",

View File

@@ -22,6 +22,7 @@ solana-address-lookup-table-interface.workspace = true
solana-client.workspace = true solana-client.workspace = true
solana-compute-budget-interface.workspace = true solana-compute-budget-interface.workspace = true
solana-rpc-client-api.workspace = true solana-rpc-client-api.workspace = true
solana-rpc-client-types.workspace = true
solana-sdk.workspace = true solana-sdk.workspace = true
solana-sdk-ids.workspace = true solana-sdk-ids.workspace = true
solana-system-interface.workspace = true solana-system-interface.workspace = true
@@ -35,6 +36,7 @@ tokio.workspace = true
tokio-stream.workspace = true tokio-stream.workspace = true
tokio-tungstenite.workspace = true tokio-tungstenite.workspace = true
tracing.workspace = true tracing.workspace = true
tracing-appender.workspace = true
tracing-subscriber.workspace = true tracing-subscriber.workspace = true
yellowstone-grpc-client.workspace = true yellowstone-grpc-client.workspace = true
yellowstone-grpc-proto.workspace = true yellowstone-grpc-proto.workspace = true

455
kb_lib/src/config.rs Normal file
View File

@@ -0,0 +1,455 @@
// file: kb_lib/src/config.rs
//! JSON configuration structures and loading helpers for `kb_lib`.
/// Root application configuration loaded from `config.json`.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
pub struct KbConfig {
/// Application-level metadata and global behavior.
pub app: KbAppConfig,
/// Tracing and log output configuration.
pub logging: KbLoggingConfig,
/// Data directory configuration.
pub data: KbDataConfig,
/// Solana endpoint configuration.
pub solana: KbSolanaConfig,
}
/// Generic application settings.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
pub struct KbAppConfig {
/// Human-readable application name.
pub name: std::string::String,
/// Current environment name such as `development` or `production`.
pub environment: std::string::String,
/// Default reconnection preference used by future UI settings.
pub auto_reconnect_default: bool,
}
/// Logging and tracing configuration.
///
/// In version `0.0.2`, the project actively uses:
/// `level`, `console_enabled`, `console_ansi`, `file_enabled`,
/// `directory`, `file_prefix`, and `rotation`.
///
/// The fields `message_format` and `time_format` are already stored in the
/// configuration so that the format policy is stabilized early, even though
/// their handling will be refined in later versions.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
pub struct KbLoggingConfig {
/// Global default log level.
pub level: std::string::String,
/// Enables console logging.
pub console_enabled: bool,
/// Enables ANSI colors on console output.
pub console_ansi: bool,
/// Enables file logging.
pub file_enabled: bool,
/// Directory where log files are stored.
pub directory: std::string::String,
/// Prefix used for log file names.
pub file_prefix: std::string::String,
/// File rotation strategy such as `daily`, `hourly`, or `never`.
pub rotation: std::string::String,
/// Preferred message formatting preset.
pub message_format: std::string::String,
/// Preferred time formatting preset.
pub time_format: std::string::String,
/// Per-target log level overrides.
pub target_filters: std::collections::BTreeMap<std::string::String, std::string::String>,
}
/// Local data paths used by the application.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
pub struct KbDataConfig {
/// SQLite database path.
pub sqlite_path: std::string::String,
/// Directory storing Solana wallets and related material in future versions.
pub wallets_directory: std::string::String,
}
/// Solana transport configuration.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
pub struct KbSolanaConfig {
/// Named HTTP endpoints.
pub http_endpoints: std::vec::Vec<KbHttpEndpointConfig>,
/// Named WebSocket endpoints.
pub ws_endpoints: std::vec::Vec<KbWsEndpointConfig>,
}
/// HTTP endpoint configuration.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
pub struct KbHttpEndpointConfig {
/// Stable internal endpoint name used by the application.
pub name: std::string::String,
/// Enables or disables the endpoint.
pub enabled: bool,
/// Provider name such as `solana-public`, `helius`, or `custom`.
pub provider: std::string::String,
/// Base HTTP RPC URL.
pub url: std::string::String,
/// Optional environment variable name used to resolve an API key later.
pub api_key_env_var: std::option::Option<std::string::String>,
/// Logical roles assigned to this endpoint.
pub roles: std::vec::Vec<std::string::String>,
/// Allowed average request rate.
pub requests_per_second: u32,
/// Burst capacity for future rate-limiting.
pub burst: u32,
/// HTTP connect timeout in milliseconds.
pub connect_timeout_ms: u64,
/// HTTP request timeout in milliseconds.
pub request_timeout_ms: u64,
}
/// WebSocket endpoint configuration.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
pub struct KbWsEndpointConfig {
/// Stable internal endpoint name used by the application.
pub name: std::string::String,
/// Enables or disables the endpoint.
pub enabled: bool,
/// Provider name such as `solana-public`, `helius`, or `custom`.
pub provider: std::string::String,
/// Base WebSocket RPC URL.
pub url: std::string::String,
/// Optional environment variable name used to resolve an API key later.
pub api_key_env_var: std::option::Option<std::string::String>,
/// Logical roles assigned to this endpoint.
pub roles: std::vec::Vec<std::string::String>,
/// Maximum number of subscriptions allowed on this endpoint.
pub max_subscriptions: u32,
/// WebSocket connect timeout in milliseconds.
pub connect_timeout_ms: u64,
/// Timeout for request/response round-trips in milliseconds.
pub request_timeout_ms: u64,
/// Timeout used during unsubscribe on disconnect in milliseconds.
pub unsubscribe_timeout_ms: u64,
/// Capacity of the future outgoing write channel.
pub write_channel_capacity: usize,
/// Capacity of the future event channel.
pub event_channel_capacity: usize,
/// Enables future automatic reconnection behavior.
pub auto_reconnect: bool,
}
impl KbConfig {
/// Returns the default path of the JSON configuration file.
pub fn default_path() -> std::path::PathBuf {
kb_workspace_root_dir().join("config.json")
}
/// Loads a configuration from a JSON file and validates it.
pub fn load_from_path<P: AsRef<std::path::Path>>(path: P) -> Result<Self, crate::KbError> {
let path_ref = path.as_ref();
let content_result = std::fs::read_to_string(path_ref);
let content = match content_result {
Ok(content) => content,
Err(error) => {
return Err(crate::KbError::Io(format!(
"cannot read configuration file '{}': {error}",
path_ref.display()
)));
}
};
let config_result = serde_json::from_str::<Self>(&content);
let config = match config_result {
Ok(config) => config,
Err(error) => {
return Err(crate::KbError::Json(format!(
"cannot parse configuration file '{}': {error}",
path_ref.display()
)));
}
};
let validation_result = config.validate();
match validation_result {
Ok(()) => Ok(config),
Err(error) => Err(error),
}
}
/// Validates the current configuration.
pub fn validate(&self) -> Result<(), crate::KbError> {
if self.app.name.trim().is_empty() {
return Err(crate::KbError::Config(
"app.name must not be empty".to_string(),
));
}
if self.app.environment.trim().is_empty() {
return Err(crate::KbError::Config(
"app.environment must not be empty".to_string(),
));
}
if self.logging.level.trim().is_empty() {
return Err(crate::KbError::Config(
"logging.level must not be empty".to_string(),
));
}
if self.logging.directory.trim().is_empty() {
return Err(crate::KbError::Config(
"logging.directory must not be empty".to_string(),
));
}
if self.logging.file_prefix.trim().is_empty() {
return Err(crate::KbError::Config(
"logging.file_prefix must not be empty".to_string(),
));
}
if self.data.sqlite_path.trim().is_empty() {
return Err(crate::KbError::Config(
"data.sqlite_path must not be empty".to_string(),
));
}
if self.data.wallets_directory.trim().is_empty() {
return Err(crate::KbError::Config(
"data.wallets_directory must not be empty".to_string(),
));
}
if self.logging.rotation != "daily"
&& self.logging.rotation != "hourly"
&& self.logging.rotation != "never"
{
return Err(crate::KbError::Config(format!(
"unsupported logging.rotation '{}'",
self.logging.rotation
)));
}
if self.logging.message_format != "full"
&& self.logging.message_format != "compact"
&& self.logging.message_format != "pretty"
&& self.logging.message_format != "json"
{
return Err(crate::KbError::Config(format!(
"unsupported logging.message_format '{}'",
self.logging.message_format
)));
}
if self.logging.time_format != "rfc3339"
&& self.logging.time_format != "rfc3339_millis"
&& self.logging.time_format != "none"
{
return Err(crate::KbError::Config(format!(
"unsupported logging.time_format '{}'",
self.logging.time_format
)));
}
let mut endpoint_names: std::vec::Vec<std::string::String> = std::vec::Vec::new();
for endpoint in &self.solana.http_endpoints {
let validation_result = self.validate_http_endpoint(endpoint, &mut endpoint_names);
if let Err(error) = validation_result {
return Err(error);
}
}
for endpoint in &self.solana.ws_endpoints {
let validation_result = self.validate_ws_endpoint(endpoint, &mut endpoint_names);
if let Err(error) = validation_result {
return Err(error);
}
}
Ok(())
}
/// Creates the basic runtime directories required by the current configuration.
pub fn prepare_filesystem(&self) -> Result<(), crate::KbError> {
let logging_directory = self.logging.directory_path();
let create_logs_result = std::fs::create_dir_all(&logging_directory);
if let Err(error) = create_logs_result {
return Err(crate::KbError::Io(format!(
"cannot create logging directory '{}': {error}",
logging_directory.display()
)));
}
let wallets_directory = self.data.wallets_directory_path();
let create_wallets_result = std::fs::create_dir_all(&wallets_directory);
if let Err(error) = create_wallets_result {
return Err(crate::KbError::Io(format!(
"cannot create wallets directory '{}': {error}",
wallets_directory.display()
)));
}
let sqlite_path = self.data.sqlite_path_buf();
let sqlite_parent_option = sqlite_path.parent();
if let Some(sqlite_parent) = sqlite_parent_option {
if !sqlite_parent.as_os_str().is_empty() {
let create_db_parent_result = std::fs::create_dir_all(sqlite_parent);
if let Err(error) = create_db_parent_result {
return Err(crate::KbError::Io(format!(
"cannot create database parent directory '{}': {error}",
sqlite_parent.display()
)));
}
}
}
Ok(())
}
/// Returns a named HTTP endpoint by reference.
pub fn find_http_endpoint(
&self,
endpoint_name: &str,
) -> std::option::Option<&KbHttpEndpointConfig> {
self.solana
.http_endpoints
.iter()
.find(|endpoint| endpoint.name == endpoint_name)
}
/// Returns a named WebSocket endpoint by reference.
pub fn find_ws_endpoint(
&self,
endpoint_name: &str,
) -> std::option::Option<&KbWsEndpointConfig> {
self.solana
.ws_endpoints
.iter()
.find(|endpoint| endpoint.name == endpoint_name)
}
fn validate_http_endpoint(
&self,
endpoint: &KbHttpEndpointConfig,
endpoint_names: &mut std::vec::Vec<std::string::String>,
) -> Result<(), crate::KbError> {
if endpoint.name.trim().is_empty() {
return Err(crate::KbError::Config(
"http endpoint name must not be empty".to_string(),
));
}
if endpoint_names.iter().any(|name| name == &endpoint.name) {
return Err(crate::KbError::Config(format!(
"duplicated endpoint name '{}'",
endpoint.name
)));
}
if !endpoint.url.starts_with("http://") && !endpoint.url.starts_with("https://") {
return Err(crate::KbError::Config(format!(
"http endpoint '{}' must start with http:// or https://",
endpoint.name
)));
}
if endpoint.requests_per_second == 0 {
return Err(crate::KbError::Config(format!(
"http endpoint '{}' requests_per_second must be > 0",
endpoint.name
)));
}
if endpoint.burst == 0 {
return Err(crate::KbError::Config(format!(
"http endpoint '{}' burst must be > 0",
endpoint.name
)));
}
if endpoint.connect_timeout_ms == 0 {
return Err(crate::KbError::Config(format!(
"http endpoint '{}' connect_timeout_ms must be > 0",
endpoint.name
)));
}
if endpoint.request_timeout_ms == 0 {
return Err(crate::KbError::Config(format!(
"http endpoint '{}' request_timeout_ms must be > 0",
endpoint.name
)));
}
endpoint_names.push(endpoint.name.clone());
Ok(())
}
fn validate_ws_endpoint(
&self,
endpoint: &KbWsEndpointConfig,
endpoint_names: &mut std::vec::Vec<std::string::String>,
) -> Result<(), crate::KbError> {
if endpoint.name.trim().is_empty() {
return Err(crate::KbError::Config(
"ws endpoint name must not be empty".to_string(),
));
}
if endpoint_names.iter().any(|name| name == &endpoint.name) {
return Err(crate::KbError::Config(format!(
"duplicated endpoint name '{}'",
endpoint.name
)));
}
if !endpoint.url.starts_with("ws://") && !endpoint.url.starts_with("wss://") {
return Err(crate::KbError::Config(format!(
"ws endpoint '{}' must start with ws:// or wss://",
endpoint.name
)));
}
if endpoint.max_subscriptions == 0 {
return Err(crate::KbError::Config(format!(
"ws endpoint '{}' max_subscriptions must be > 0",
endpoint.name
)));
}
if endpoint.connect_timeout_ms == 0 {
return Err(crate::KbError::Config(format!(
"ws endpoint '{}' connect_timeout_ms must be > 0",
endpoint.name
)));
}
if endpoint.request_timeout_ms == 0 {
return Err(crate::KbError::Config(format!(
"ws endpoint '{}' request_timeout_ms must be > 0",
endpoint.name
)));
}
if endpoint.unsubscribe_timeout_ms == 0 {
return Err(crate::KbError::Config(format!(
"ws endpoint '{}' unsubscribe_timeout_ms must be > 0",
endpoint.name
)));
}
if endpoint.write_channel_capacity == 0 {
return Err(crate::KbError::Config(format!(
"ws endpoint '{}' write_channel_capacity must be > 0",
endpoint.name
)));
}
if endpoint.event_channel_capacity == 0 {
return Err(crate::KbError::Config(format!(
"ws endpoint '{}' event_channel_capacity must be > 0",
endpoint.name
)));
}
endpoint_names.push(endpoint.name.clone());
Ok(())
}
}
fn kb_workspace_root_dir() -> std::path::PathBuf {
let manifest_dir = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
match manifest_dir.parent() {
Some(parent) => parent.to_path_buf(),
None => manifest_dir,
}
}
fn kb_resolve_workspace_relative_path<P: AsRef<std::path::Path>>(path: P) -> std::path::PathBuf {
let input_path = std::path::PathBuf::from(path.as_ref());
if input_path.is_absolute() {
return input_path;
}
kb_workspace_root_dir().join(input_path)
}
impl KbLoggingConfig {
/// Returns the resolved logging directory path.
pub fn directory_path(&self) -> std::path::PathBuf {
kb_resolve_workspace_relative_path(&self.directory)
}
}
impl KbDataConfig {
/// Returns the resolved SQLite database path.
pub fn sqlite_path_buf(&self) -> std::path::PathBuf {
kb_resolve_workspace_relative_path(&self.sqlite_path)
}
/// Returns the resolved wallets directory path.
pub fn wallets_directory_path(&self) -> std::path::PathBuf {
kb_resolve_workspace_relative_path(&self.wallets_directory)
}
}

23
kb_lib/src/constants.rs Normal file
View File

@@ -0,0 +1,23 @@
// file: kb_lib/src/constants.rs
//! Solana program and mint constants reused by the project.
/// SPL Token program identifier.
pub const SPL_TOKEN_PROGRAM_ID: solana_sdk::pubkey::Pubkey = spl_token_interface::ID;
/// SPL Token-2022 program identifier.
pub const SPL_TOKEN_2022_PROGRAM_ID: solana_sdk::pubkey::Pubkey = spl_token_2022_interface::ID;
/// Associated Token Account program identifier.
pub const ASSOCIATED_TOKEN_PROGRAM_ID: solana_sdk::pubkey::Pubkey =
spl_associated_token_account_interface::program::ID;
/// Wrapped SOL mint identifier.
pub const WSOL_MINT_ID: solana_sdk::pubkey::Pubkey = spl_token_interface::native_mint::ID;
/// System program identifier.
pub const SYSTEM_PROGRAM_ID: solana_sdk::pubkey::Pubkey = solana_sdk_ids::system_program::ID;
/// Compute Budget program identifier.
pub const COMPUTE_BUDGET_PROGRAM_ID: solana_sdk::pubkey::Pubkey =
solana_sdk_ids::compute_budget::ID;

68
kb_lib/src/error.rs Normal file
View File

@@ -0,0 +1,68 @@
// file: kb_lib/src/error.rs
//! Shared error type for `kb_lib`.
/// Global error type used by the `kb_lib` crate.
///
/// The project intentionally avoids `anyhow` and `thiserror`, so this
/// enum centralizes the main error families with explicit textual messages.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum KbError {
/// Configuration error.
Config(std::string::String),
/// Filesystem or standard I/O error.
Io(std::string::String),
/// JSON serialization or deserialization error.
Json(std::string::String),
/// Tracing initialization or logging error.
Tracing(std::string::String),
/// HTTP transport error.
Http(std::string::String),
/// WebSocket transport error.
Ws(std::string::String),
/// Invalid internal state error.
InvalidState(std::string::String),
/// Operation requested while the client is not connected.
NotConnected(std::string::String),
/// Placeholder for a feature intentionally scheduled for a later version.
NotImplemented(std::string::String),
}
impl std::fmt::Display for KbError {
fn fmt(
&self,
formatter: &mut std::fmt::Formatter<'_>,
) -> std::fmt::Result {
match self {
Self::Config(message) => {
write!(formatter, "configuration error: {message}")
},
Self::Io(message) => {
write!(formatter, "io error: {message}")
},
Self::Json(message) => {
write!(formatter, "json error: {message}")
},
Self::Tracing(message) => {
write!(formatter, "tracing error: {message}")
},
Self::Http(message) => {
write!(formatter, "http error: {message}")
},
Self::Ws(message) => {
write!(formatter, "websocket error: {message}")
},
Self::InvalidState(message) => {
write!(formatter, "invalid state: {message}")
},
Self::NotConnected(message) => {
write!(formatter, "not connected: {message}")
},
Self::NotImplemented(message) => {
write!(formatter, "not implemented: {message}")
},
}
}
}
impl std::error::Error for KbError {}

73
kb_lib/src/http_client.rs Normal file
View File

@@ -0,0 +1,73 @@
// file: kb_lib/src/http_client.rs
//! Generic asynchronous HTTP client skeleton.
//!
//! The transport is intentionally minimal in `0.0.2`. Endpoint binding and
//! client construction are stabilized now, while JSON-RPC request execution,
//! throttling, and batching are scheduled for `0.4.x`.
/// Generic asynchronous HTTP client placeholder.
#[derive(Clone, Debug)]
pub struct HttpClient {
endpoint: crate::KbHttpEndpointConfig,
client: reqwest::Client,
}
impl HttpClient {
/// Creates a new HTTP client bound to a named endpoint configuration.
pub fn new(endpoint: crate::KbHttpEndpointConfig) -> Result<Self, crate::KbError> {
if endpoint.name.trim().is_empty() {
return Err(crate::KbError::Config(
"http client endpoint name must not be empty".to_string(),
));
}
let builder = reqwest::Client::builder()
.connect_timeout(std::time::Duration::from_millis(
endpoint.connect_timeout_ms,
))
.timeout(std::time::Duration::from_millis(
endpoint.request_timeout_ms,
));
let client_result = builder.build();
let client = match client_result {
Ok(client) => client,
Err(error) => {
return Err(crate::KbError::Http(format!(
"cannot build reqwest client for endpoint '{}': {error}",
endpoint.name
)));
}
};
Ok(Self { endpoint, client })
}
/// Returns the endpoint name of this client.
pub fn endpoint_name(&self) -> &str {
&self.endpoint.name
}
/// Returns the endpoint URL of this client.
pub fn endpoint_url(&self) -> &str {
&self.endpoint.url
}
/// Returns the endpoint configuration of this client.
pub fn endpoint_config(&self) -> &crate::KbHttpEndpointConfig {
&self.endpoint
}
/// Returns the underlying reqwest client reference.
pub fn raw_client(&self) -> &reqwest::Client {
&self.client
}
/// Sends a JSON-RPC payload.
pub async fn send_json_rpc_request(
&self,
_payload: &serde_json::Value,
) -> Result<serde_json::Value, crate::KbError> {
Err(crate::KbError::NotImplemented(
"HttpClient::send_json_rpc_request is scheduled for version 0.4.x".to_string(),
))
}
}

View File

@@ -1,14 +1,34 @@
pub fn add(left: u64, right: u64) -> u64 { // file: kb_lib/src/lib.rs
left + right
}
#[cfg(test)] //! Core library of the `khadhroony-bobobot` workspace.
mod tests { //!
use super::*; //! This crate contains the reusable backend logic shared by the desktop
//! application and future clients. The first milestone focuses on a
//! conformant project skeleton with configuration loading, tracing setup,
//! shared constants, and transport client placeholders.
#[test] #![deny(unreachable_pub)]
fn it_works() { #![warn(missing_docs)]
let result = add(2, 2);
assert_eq!(result, 4); mod config;
} mod constants;
} mod error;
mod http_client;
mod tracing;
mod types;
mod ws_client;
pub use crate::config::KbAppConfig;
pub use crate::config::KbConfig;
pub use crate::config::KbDataConfig;
pub use crate::config::KbHttpEndpointConfig;
pub use crate::config::KbLoggingConfig;
pub use crate::config::KbSolanaConfig;
pub use crate::config::KbWsEndpointConfig;
pub use crate::constants::*;
pub use crate::error::KbError;
pub use crate::http_client::HttpClient;
pub use crate::tracing::KbTracingGuard;
pub use crate::tracing::init_tracing;
pub use crate::types::KbConnectionState;
pub use crate::ws_client::WsClient;

536
kb_lib/src/tracing.rs Normal file
View File

@@ -0,0 +1,536 @@
// file: kb_lib/src/tracing.rs
//! Tracing initialization helpers for `kb_lib`.
//!
//! This module initializes `tracing_subscriber` using the shared JSON
//! configuration loaded by `kb_lib::KbConfig`.
//!
//! The implementation intentionally avoids dynamic boxed layers because they
//! become awkward once additional layers are stacked on the registry.
//! Instead, it follows an explicit branch-per-output strategy similar to the
//! reference implementation you provided.
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
/// Guard keeping non-blocking tracing workers alive.
///
/// The guard must remain alive during the whole application lifetime so that
/// buffered log records are flushed correctly.
#[derive(Debug)]
pub struct KbTracingGuard {
guards: std::vec::Vec<tracing_appender::non_blocking::WorkerGuard>,
}
impl KbTracingGuard {
/// Returns the number of active tracing worker guards.
pub fn guard_count(&self) -> usize {
self.guards.len()
}
}
/// Initializes the global tracing subscriber.
///
/// This function is expected to be called once at application startup.
pub fn init_tracing(config: &crate::KbLoggingConfig) -> Result<KbTracingGuard, crate::KbError> {
let filter_expression = build_filter_expression(config);
let timer_format = resolve_time_format(&config.time_format);
match (config.console_enabled, config.file_enabled) {
(true, false) => {
let (stdout_non_blocking, stdout_guard) =
tracing_appender::non_blocking(std::io::stdout());
let init_result = init_single_layer_subscriber(
&filter_expression,
&timer_format,
&config.message_format,
config.console_ansi,
stdout_non_blocking,
);
match init_result {
Ok(()) => Ok(KbTracingGuard {
guards: vec![stdout_guard],
}),
Err(error) => Err(error),
}
}
(false, true) => {
let file_writer_result = build_file_writer(config);
let (file_non_blocking, file_guard) = match file_writer_result {
Ok(parts) => parts,
Err(error) => return Err(error),
};
let init_result = init_single_layer_subscriber(
&filter_expression,
&timer_format,
&config.message_format,
false,
file_non_blocking,
);
match init_result {
Ok(()) => Ok(KbTracingGuard {
guards: vec![file_guard],
}),
Err(error) => Err(error),
}
}
(true, true) => {
let (stdout_non_blocking, stdout_guard) =
tracing_appender::non_blocking(std::io::stdout());
let file_writer_result = build_file_writer(config);
let (file_non_blocking, file_guard) = match file_writer_result {
Ok(parts) => parts,
Err(error) => return Err(error),
};
let init_result = init_dual_layer_subscriber(
&filter_expression,
&timer_format,
&config.message_format,
config.console_ansi,
stdout_non_blocking,
file_non_blocking,
);
match init_result {
Ok(()) => Ok(KbTracingGuard {
guards: vec![stdout_guard, file_guard],
}),
Err(error) => Err(error),
}
}
(false, false) => {
let env_filter_result = build_env_filter(&filter_expression);
let env_filter = match env_filter_result {
Ok(env_filter) => env_filter,
Err(error) => return Err(error),
};
let init_result = tracing_subscriber::registry().with(env_filter).try_init();
match init_result {
Ok(()) => Ok(KbTracingGuard { guards: vec![] }),
Err(error) => Err(crate::KbError::Tracing(format!(
"cannot initialize tracing subscriber: {error}"
))),
}
}
}
}
fn build_filter_expression(config: &crate::KbLoggingConfig) -> std::string::String {
let mut directives = std::vec::Vec::<std::string::String>::new();
directives.push(config.level.clone());
for (target, level) in &config.target_filters {
directives.push(format!("{target}={level}"));
}
directives.join(",")
}
fn build_env_filter(
filter_expression: &str,
) -> Result<tracing_subscriber::EnvFilter, crate::KbError> {
let from_env_result = tracing_subscriber::EnvFilter::try_from_default_env();
match from_env_result {
Ok(env_filter) => Ok(env_filter),
Err(_) => {
let new_result = tracing_subscriber::EnvFilter::try_new(filter_expression);
match new_result {
Ok(env_filter) => Ok(env_filter),
Err(error) => Err(crate::KbError::Tracing(format!(
"cannot build env filter from '{filter_expression}': {error}"
))),
}
}
}
}
fn resolve_time_format(value: &str) -> std::string::String {
if value == "rfc3339" {
return "%+".to_string();
}
if value == "rfc3339_millis" {
return "%Y-%m-%dT%H:%M:%S%.3f%:z".to_string();
}
if value == "none" {
return "%+".to_string();
}
"%+".to_string()
}
fn build_file_writer(
config: &crate::KbLoggingConfig,
) -> Result<
(
KbStripAnsiMakeWriter<tracing_appender::non_blocking::NonBlocking>,
tracing_appender::non_blocking::WorkerGuard,
),
crate::KbError,
> {
let directory_path = config.directory_path();
let create_dir_result = std::fs::create_dir_all(&directory_path);
if let Err(error) = create_dir_result {
return Err(crate::KbError::Tracing(format!(
"cannot create log directory '{}': {error}",
directory_path.display()
)));
}
let rotation = if config.rotation == "hourly" {
tracing_appender::rolling::Rotation::HOURLY
} else if config.rotation == "never" {
tracing_appender::rolling::Rotation::NEVER
} else {
tracing_appender::rolling::Rotation::DAILY
};
let appender_result = tracing_appender::rolling::Builder::default()
.rotation(rotation)
.filename_prefix(config.file_prefix.clone())
.filename_suffix("log")
.build(&directory_path);
let file_appender = match appender_result {
Ok(file_appender) => file_appender,
Err(error) => {
return Err(crate::KbError::Tracing(format!(
"cannot build rolling file appender in '{}': {error}",
directory_path.display()
)));
}
};
let (non_blocking, guard) = tracing_appender::non_blocking(file_appender);
Ok((KbStripAnsiMakeWriter::new(non_blocking), guard))
}
fn init_single_layer_subscriber<W>(
filter_expression: &str,
timer_format: &str,
message_format: &str,
ansi: bool,
writer: W,
) -> Result<(), crate::KbError>
where
W: for<'writer> tracing_subscriber::fmt::MakeWriter<'writer>
+ std::marker::Send
+ std::marker::Sync
+ 'static,
{
let env_filter_result = build_env_filter(filter_expression);
let env_filter = match env_filter_result {
Ok(env_filter) => env_filter,
Err(error) => return Err(error),
};
let timer = tracing_subscriber::fmt::time::ChronoLocal::new(timer_format.to_string());
let init_result = if message_format == "compact" {
tracing_subscriber::registry()
.with(env_filter)
.with(
tracing_subscriber::fmt::layer()
.compact()
.with_timer(timer)
.with_ansi(ansi)
.with_target(true)
.with_thread_ids(false)
.with_thread_names(false)
.with_writer(writer),
)
.try_init()
} else if message_format == "pretty" {
tracing_subscriber::registry()
.with(env_filter)
.with(
tracing_subscriber::fmt::layer()
.pretty()
.with_timer(timer)
.with_ansi(ansi)
.with_target(true)
.with_thread_ids(false)
.with_thread_names(false)
.with_writer(writer),
)
.try_init()
} else if message_format == "json" {
tracing_subscriber::registry()
.with(env_filter)
.with(
tracing_subscriber::fmt::layer()
.json()
.with_timer(timer)
.with_ansi(false)
.with_target(true)
.with_thread_ids(false)
.with_thread_names(false)
.with_writer(writer),
)
.try_init()
} else {
tracing_subscriber::registry()
.with(env_filter)
.with(
tracing_subscriber::fmt::layer()
.with_timer(timer)
.with_ansi(ansi)
.with_target(true)
.with_thread_ids(false)
.with_thread_names(false)
.with_writer(writer),
)
.try_init()
};
match init_result {
Ok(()) => Ok(()),
Err(error) => Err(crate::KbError::Tracing(format!(
"cannot initialize tracing subscriber: {error}"
))),
}
}
fn init_dual_layer_subscriber<W1, W2>(
filter_expression: &str,
timer_format: &str,
message_format: &str,
console_ansi: bool,
stdout_writer: W1,
file_writer: W2,
) -> Result<(), crate::KbError>
where
W1: for<'writer> tracing_subscriber::fmt::MakeWriter<'writer>
+ std::marker::Send
+ std::marker::Sync
+ 'static,
W2: for<'writer> tracing_subscriber::fmt::MakeWriter<'writer>
+ std::marker::Send
+ std::marker::Sync
+ 'static,
{
let env_filter_result = build_env_filter(filter_expression);
let env_filter = match env_filter_result {
Ok(env_filter) => env_filter,
Err(error) => return Err(error),
};
let timer_stdout = tracing_subscriber::fmt::time::ChronoLocal::new(timer_format.to_string());
let timer_file = tracing_subscriber::fmt::time::ChronoLocal::new(timer_format.to_string());
let init_result = if message_format == "compact" {
tracing_subscriber::registry()
.with(env_filter)
.with(
tracing_subscriber::fmt::layer()
.compact()
.with_timer(timer_stdout)
.with_ansi(console_ansi)
.with_target(true)
.with_thread_ids(false)
.with_thread_names(false)
.with_writer(stdout_writer),
)
.with(
tracing_subscriber::fmt::layer()
.compact()
.with_timer(timer_file)
.with_ansi(false)
.with_target(true)
.with_thread_ids(false)
.with_thread_names(false)
.with_writer(file_writer),
)
.try_init()
} else if message_format == "pretty" {
tracing_subscriber::registry()
.with(env_filter)
.with(
tracing_subscriber::fmt::layer()
.pretty()
.with_timer(timer_stdout)
.with_ansi(console_ansi)
.with_target(true)
.with_thread_ids(false)
.with_thread_names(false)
.with_writer(stdout_writer),
)
.with(
tracing_subscriber::fmt::layer()
.pretty()
.with_timer(timer_file)
.with_ansi(false)
.with_target(true)
.with_thread_ids(false)
.with_thread_names(false)
.with_writer(file_writer),
)
.try_init()
} else if message_format == "json" {
tracing_subscriber::registry()
.with(env_filter)
.with(
tracing_subscriber::fmt::layer()
.json()
.with_timer(timer_stdout)
.with_ansi(false)
.with_target(true)
.with_thread_ids(false)
.with_thread_names(false)
.with_writer(stdout_writer),
)
.with(
tracing_subscriber::fmt::layer()
.json()
.with_timer(timer_file)
.with_ansi(false)
.with_target(true)
.with_thread_ids(false)
.with_thread_names(false)
.with_writer(file_writer),
)
.try_init()
} else {
tracing_subscriber::registry()
.with(env_filter)
.with(
tracing_subscriber::fmt::layer()
.with_timer(timer_stdout)
.with_ansi(console_ansi)
.with_target(true)
.with_thread_ids(false)
.with_thread_names(false)
.with_writer(stdout_writer),
)
.with(
tracing_subscriber::fmt::layer()
.with_timer(timer_file)
.with_ansi(false)
.with_target(true)
.with_thread_ids(false)
.with_thread_names(false)
.with_writer(file_writer),
)
.try_init()
};
match init_result {
Ok(()) => Ok(()),
Err(error) => Err(crate::KbError::Tracing(format!(
"cannot initialize tracing subscriber: {error}"
))),
}
}
struct KbStripAnsiMakeWriter<W> {
inner: W,
}
impl<W> KbStripAnsiMakeWriter<W> {
fn new(inner: W) -> Self {
Self { inner }
}
}
struct KbStripAnsiWriter<W> {
inner: W,
}
impl<'a, W> tracing_subscriber::fmt::MakeWriter<'a> for KbStripAnsiMakeWriter<W>
where
W: tracing_subscriber::fmt::MakeWriter<'a>,
{
type Writer = KbStripAnsiWriter<W::Writer>;
fn make_writer(&'a self) -> Self::Writer {
KbStripAnsiWriter {
inner: self.inner.make_writer(),
}
}
}
impl<W> std::io::Write for KbStripAnsiWriter<W>
where
W: std::io::Write,
{
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
let stripped = kb_strip_ansi_bytes(buf);
let write_result = self.inner.write_all(&stripped);
match write_result {
Ok(()) => Ok(buf.len()),
Err(error) => Err(error),
}
}
fn flush(&mut self) -> std::io::Result<()> {
self.inner.flush()
}
}
fn kb_strip_ansi_bytes(input: &[u8]) -> std::vec::Vec<u8> {
let mut output = std::vec::Vec::with_capacity(input.len());
let mut index = 0usize;
while index < input.len() {
let byte = input[index];
if byte == 0x1B {
if index + 1 < input.len() && input[index + 1] == b'[' {
index += 2;
while index < input.len() {
let current = input[index];
if (0x40..=0x7E).contains(&current) {
index += 1;
break;
}
index += 1;
}
continue;
}
}
output.push(byte);
index += 1;
}
output
}
#[cfg(test)]
mod tests {
#[test]
fn resolve_time_format_maps_rfc3339() {
let value = super::resolve_time_format("rfc3339");
assert_eq!(value, "%+");
}
#[test]
fn resolve_time_format_maps_rfc3339_millis() {
let value = super::resolve_time_format("rfc3339_millis");
assert_eq!(value, "%Y-%m-%dT%H:%M:%S%.3f%:z");
}
#[test]
fn build_filter_expression_includes_default_level() {
let config = crate::KbLoggingConfig {
level: "debug".to_string(),
console_enabled: true,
console_ansi: true,
file_enabled: false,
directory: "./logs".to_string(),
file_prefix: "app".to_string(),
rotation: "daily".to_string(),
message_format: "compact".to_string(),
time_format: "rfc3339_millis".to_string(),
target_filters: std::collections::BTreeMap::new(),
};
let value = super::build_filter_expression(&config);
assert_eq!(value, "debug");
}
#[test]
fn build_filter_expression_includes_target_overrides() {
let mut target_filters = std::collections::BTreeMap::new();
target_filters.insert("hyper".to_string(), "warn".to_string());
target_filters.insert("reqwest".to_string(), "error".to_string());
let config = crate::KbLoggingConfig {
level: "info".to_string(),
console_enabled: true,
console_ansi: true,
file_enabled: true,
directory: "./logs".to_string(),
file_prefix: "app".to_string(),
rotation: "daily".to_string(),
message_format: "compact".to_string(),
time_format: "rfc3339".to_string(),
target_filters,
};
let value = super::build_filter_expression(&config);
assert!(value.contains("info"));
assert!(value.contains("hyper=warn"));
assert!(value.contains("reqwest=error"));
}
}

16
kb_lib/src/types.rs Normal file
View File

@@ -0,0 +1,16 @@
// file: kb_lib/src/types.rs
//! Shared generic types for `kb_lib`.
/// Generic connection state used by transport clients.
#[derive(Clone, Copy, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum KbConnectionState {
/// The client is fully disconnected.
Disconnected,
/// The client is establishing its connection.
Connecting,
/// The client is connected and ready.
Connected,
/// The client is shutting down its connection.
Disconnecting,
}

91
kb_lib/src/ws_client.rs Normal file
View File

@@ -0,0 +1,91 @@
// file: kb_lib/src/ws_client.rs
//! Generic asynchronous WebSocket client skeleton.
//!
//! This module prepares the shape of the future Solana WebSocket transport.
//! The actual transport loop, split read/write tasks, request tracking,
//! subscribe registry, and notification routing are scheduled for `0.1.x`
//! and `0.2.x` / `0.3.x`.
/// Generic asynchronous WebSocket client placeholder.
#[derive(Clone, Debug)]
pub struct WsClient {
endpoint: crate::KbWsEndpointConfig,
next_request_id: std::sync::Arc<std::sync::atomic::AtomicU64>,
state: std::sync::Arc<tokio::sync::RwLock<crate::KbConnectionState>>,
}
impl WsClient {
/// Creates a new WebSocket client bound to a named endpoint configuration.
pub fn new(endpoint: crate::KbWsEndpointConfig) -> Result<Self, crate::KbError> {
if endpoint.name.trim().is_empty() {
return Err(crate::KbError::Config(
"ws client endpoint name must not be empty".to_string(),
));
}
Ok(Self {
endpoint,
next_request_id: std::sync::Arc::new(std::sync::atomic::AtomicU64::new(1)),
state: std::sync::Arc::new(tokio::sync::RwLock::new(
crate::KbConnectionState::Disconnected,
)),
})
}
/// Returns the endpoint name of this client.
pub fn endpoint_name(&self) -> &str {
&self.endpoint.name
}
/// Returns the endpoint URL of this client.
pub fn endpoint_url(&self) -> &str {
&self.endpoint.url
}
/// Returns the endpoint configuration of this client.
pub fn endpoint_config(&self) -> &crate::KbWsEndpointConfig {
&self.endpoint
}
/// Returns the next request identifier and increments the internal counter.
pub fn next_request_id(&self) -> u64 {
self.next_request_id
.fetch_add(1, std::sync::atomic::Ordering::Relaxed)
}
/// Returns the current connection state.
pub async fn connection_state(&self) -> crate::KbConnectionState {
let state_guard = self.state.read().await;
*state_guard
}
/// Connects the client to its remote WebSocket endpoint.
pub async fn connect(&self) -> Result<(), crate::KbError> {
Err(crate::KbError::NotImplemented(
"WsClient::connect is scheduled for version 0.1.x".to_string(),
))
}
/// Sends a text frame through the WebSocket connection.
pub async fn send_text(&self, _text: std::string::String) -> Result<(), crate::KbError> {
Err(crate::KbError::NotImplemented(
"WsClient::send_text is scheduled for version 0.1.x".to_string(),
))
}
/// Sends a JSON value through the WebSocket connection.
pub async fn send_json_value(&self, _value: &serde_json::Value) -> Result<(), crate::KbError> {
Err(crate::KbError::NotImplemented(
"WsClient::send_json_value is scheduled for version 0.2.x".to_string(),
))
}
/// Disconnects the client from its remote endpoint.
///
/// The final implementation will unsubscribe with timeout before close.
pub async fn disconnect(&self) -> Result<(), crate::KbError> {
Err(crate::KbError::NotImplemented(
"WsClient::disconnect is scheduled for version 0.1.x / 0.3.x".to_string(),
))
}
}