diff --git a/Cargo.lock b/Cargo.lock index d234b4a4..4689d58d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -693,26 +693,6 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" -[[package]] -name = "auto-updater" -version = "0.1.0" -dependencies = [ - "anyhow", - "bollard", - "chrono", - "futures", - "parking_lot 0.12.5", - "platform-core", - "reqwest 0.12.25", - "semver", - "serde", - "serde_json", - "tempfile", - "thiserror 2.0.17", - "tokio", - "tracing", -] - [[package]] name = "autocfg" version = "1.5.0" @@ -843,44 +823,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bindgen" -version = "0.69.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" -dependencies = [ - "bitflags 2.10.0", - "cexpr", - "clang-sys", - "itertools 0.11.0", - "lazy_static", - "lazycell", - "proc-macro2", - "quote", - "regex", - "rustc-hash 1.1.0", - "shlex", - "syn 2.0.111", -] - -[[package]] -name = "bindgen" -version = "0.72.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" -dependencies = [ - "bitflags 2.10.0", - "cexpr", - "clang-sys", - "itertools 0.13.0", - "proc-macro2", - "quote", - "regex", - "rustc-hash 2.1.1", - "shlex", - "syn 2.0.111", -] - [[package]] name = "bip39" version = "2.2.2" @@ -1002,19 +944,6 @@ dependencies = [ "constant_time_eq 0.3.1", ] -[[package]] -name = "blake3" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" -dependencies = [ - "arrayref", - "arrayvec 0.7.6", - "cc", - "cfg-if", - "constant_time_eq 0.3.1", -] - [[package]] name = "block-buffer" version = "0.9.0" @@ -1157,16 +1086,6 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" -[[package]] -name = "bzip2-sys" -version = "0.1.13+1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" -dependencies = [ - "cc", - "pkg-config", -] - [[package]] name = "cc" version = "1.2.49" @@ -1174,8 +1093,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" dependencies = [ "find-msvc-tools", - "jobserver", - "libc", "shlex", ] @@ -1185,15 +1102,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom 7.1.3", -] - [[package]] name = "cfg-if" version = "1.0.4" @@ -1280,17 +1188,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "clang-sys" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" -dependencies = [ - "glob", - "libc", - "libloading", -] - [[package]] name = "clap" version = "4.5.53" @@ -1922,32 +1819,6 @@ dependencies = [ "syn 2.0.111", ] -[[package]] -name = "distributed-db" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "bincode", - "blake3", - "chrono", - "futures", - "hex", - "parking_lot 0.12.5", - "platform-core", - "rand 0.8.5", - "rocksdb", - "serde", - "serde_json", - "sha2 0.10.9", - "tempfile", - "thiserror 2.0.17", - "tokio", - "tokio-test", - "tracing", - "uuid", -] - [[package]] name = "docify" version = "0.2.9" @@ -3377,16 +3248,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" -[[package]] -name = "jobserver" -version = "0.1.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" -dependencies = [ - "getrandom 0.3.4", - "libc", -] - [[package]] name = "js-sys" version = "0.3.83" @@ -3443,7 +3304,7 @@ dependencies = [ "futures-util", "jsonrpsee-types", "pin-project", - "rustc-hash 2.1.1", + "rustc-hash", "serde", "serde_json", "thiserror 1.0.69", @@ -3531,28 +3392,12 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - [[package]] name = "libc" version = "0.2.178" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" -[[package]] -name = "libloading" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" -dependencies = [ - "cfg-if", - "windows-link", -] - [[package]] name = "libm" version = "0.2.15" @@ -3570,22 +3415,6 @@ dependencies = [ "redox_syscall 0.6.0", ] -[[package]] -name = "librocksdb-sys" -version = "0.16.0+8.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce3d60bc059831dc1c83903fb45c103f75db65c5a7bf22272764d9cc683e348c" -dependencies = [ - "bindgen 0.69.5", - "bzip2-sys", - "cc", - "glob", - "libc", - "libz-sys", - "lz4-sys", - "zstd-sys", -] - [[package]] name = "libsecp256k1" version = "0.7.2" @@ -3634,17 +3463,6 @@ dependencies = [ "libsecp256k1-core", ] -[[package]] -name = "libz-sys" -version = "1.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" -dependencies = [ - "cc", - "pkg-config", - "vcpkg", -] - [[package]] name = "linux-raw-sys" version = "0.1.4" @@ -3699,16 +3517,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" -[[package]] -name = "lz4-sys" -version = "1.11.1+lz4-1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "lz4_flex" version = "0.11.5" @@ -3814,12 +3622,6 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - [[package]] name = "miniz_oxide" version = "0.8.9" @@ -3930,16 +3732,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - [[package]] name = "nom" version = "8.0.0" @@ -5073,7 +4865,7 @@ dependencies = [ "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.1.1", + "rustc-hash", "rustls", "socket2 0.6.1", "thiserror 2.0.17", @@ -5093,7 +4885,7 @@ dependencies = [ "lru-slab", "rand 0.9.2", "ring", - "rustc-hash 2.1.1", + "rustc-hash", "rustls", "rustls-pki-types", "slab", @@ -5430,16 +5222,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "rocksdb" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bd13e55d6d7b8cd0ea569161127567cd587676c99f4472f779a0279aa60a7a7" -dependencies = [ - "libc", - "librocksdb-sys", -] - [[package]] name = "rstest" version = "0.23.0" @@ -5476,12 +5258,6 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - [[package]] name = "rustc-hash" version = "2.1.1" @@ -6405,7 +6181,7 @@ dependencies = [ "libm", "libsecp256k1", "merlin", - "nom 8.0.0", + "nom", "num-bigint", "num-rational", "num-traits", @@ -9355,14 +9131,3 @@ dependencies = [ "quote", "syn 2.0.111", ] - -[[package]] -name = "zstd-sys" -version = "2.0.16+zstd.1.5.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" -dependencies = [ - "bindgen 0.72.1", - "cc", - "pkg-config", -] diff --git a/Cargo.toml b/Cargo.toml index 91d77b9a..3d08a2f4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,8 +9,6 @@ members = [ "crates/subnet-manager", "crates/rpc-server", "crates/challenge-orchestrator", - "crates/auto-updater", - "crates/distributed-db", "crates/secure-container-runtime", "crates/platform-server", "bins/platform", diff --git a/crates/auto-updater/Cargo.toml b/crates/auto-updater/Cargo.toml deleted file mode 100644 index 8fc839b1..00000000 --- a/crates/auto-updater/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -[package] -name = "auto-updater" -version.workspace = true -edition.workspace = true -authors.workspace = true -license.workspace = true -description = "Automatic validator updates for Mini-Chain" - -[dependencies] -platform-core = { path = "../core" } - -# Async -tokio = { workspace = true } -futures = { workspace = true } - -# Serialization -serde = { workspace = true } -serde_json = { workspace = true } - -# Docker -bollard = "0.18" - -# HTTP -reqwest = { version = "0.12", features = ["json"] } - -# Error handling -anyhow = { workspace = true } -thiserror = { workspace = true } - -# Logging -tracing = { workspace = true } - -# Utils -chrono = { workspace = true } -parking_lot = { workspace = true } -semver = "1.0" - -[dev-dependencies] -tempfile = { workspace = true } diff --git a/crates/auto-updater/src/lib.rs b/crates/auto-updater/src/lib.rs deleted file mode 100644 index f4d58ca7..00000000 --- a/crates/auto-updater/src/lib.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! Auto-Updater for Mini-Chain Validators -//! -//! Monitors for version updates from the network and automatically -//! pulls new Docker images and restarts the validator. - -pub mod updater; -pub mod version; -pub mod watcher; - -pub use updater::*; -pub use version::*; -pub use watcher::*; diff --git a/crates/auto-updater/src/updater.rs b/crates/auto-updater/src/updater.rs deleted file mode 100644 index 283280ad..00000000 --- a/crates/auto-updater/src/updater.rs +++ /dev/null @@ -1,598 +0,0 @@ -//! Auto-updater - pulls new images and triggers restart - -use crate::{UpdateRequirement, Version, VersionWatcher}; -use bollard::image::CreateImageOptions; -use bollard::Docker; -use futures::StreamExt; -use std::sync::Arc; -use std::time::Duration; -#[cfg(test)] -use std::{future::Future, pin::Pin}; -use tokio::sync::broadcast; -use tracing::{debug, error, info, warn}; - -/// Auto-updater that pulls new Docker images and signals for restart -pub struct AutoUpdater { - docker: Docker, - watcher: Arc, - restart_sender: broadcast::Sender, - #[cfg(test)] - pull_hook: Option, -} - -#[cfg(test)] -type PullHook = - Arc Pin> + Send>> + Send + Sync>; - -#[cfg(test)] -const SHUTDOWN_DELAY_SECS: u64 = 0; -#[cfg(not(test))] -const SHUTDOWN_DELAY_SECS: u64 = 5; - -impl AutoUpdater { - pub async fn new(watcher: Arc) -> anyhow::Result { - let docker = Docker::connect_with_local_defaults()?; - docker.ping().await?; - - let (restart_sender, _) = broadcast::channel(16); - - Ok(Self { - docker, - watcher, - restart_sender, - #[cfg(test)] - pull_hook: None, - }) - } - - /// Subscribe to restart signals - pub fn subscribe_restart(&self) -> broadcast::Receiver { - self.restart_sender.subscribe() - } - - /// Start the auto-update loop - pub async fn start(self: Arc) { - let mut update_rx = self.watcher.subscribe(); - - tokio::spawn(async move { - loop { - match update_rx.recv().await { - Ok(requirement) => { - if let Err(e) = self.handle_update(requirement).await { - error!(error = %e, "Failed to handle update"); - } - } - Err(broadcast::error::RecvError::Lagged(n)) => { - warn!(skipped = n, "Update receiver lagged"); - } - Err(broadcast::error::RecvError::Closed) => { - info!("Update channel closed, stopping auto-updater"); - break; - } - } - } - }); - } - - /// Handle a version update - async fn handle_update(&self, requirement: UpdateRequirement) -> anyhow::Result<()> { - let current = Version::current(); - - if !current.needs_update(&requirement.min_version) { - debug!("Already on required version or newer"); - return Ok(()); - } - - info!( - current = %current, - required = %requirement.min_version, - image = %requirement.docker_image, - "Pulling new version" - ); - - // Pull the new image - self.pull_image(&requirement.docker_image).await?; - - info!(image = %requirement.docker_image, "New image pulled successfully"); - - // Signal for restart - let signal = RestartSignal { - new_version: requirement.recommended_version, - image: requirement.docker_image, - mandatory: requirement.mandatory, - }; - - let _ = self.restart_sender.send(signal); - - // If mandatory, exit to trigger restart - if requirement.mandatory { - info!("Mandatory update - initiating graceful shutdown"); - // Give time for cleanup - tokio::time::sleep(Duration::from_secs(SHUTDOWN_DELAY_SECS)).await; - // Exit with code 0 - systemd/Docker will restart with new image - #[cfg(not(test))] - std::process::exit(0); - #[cfg(test)] - { - return Ok(()); - } - } - - Ok(()) - } - - /// Pull a Docker image - async fn pull_image(&self, image: &str) -> anyhow::Result<()> { - #[cfg(test)] - if let Some(hook) = &self.pull_hook { - return (hook)(image).await; - } - - let options = CreateImageOptions { - from_image: image, - ..Default::default() - }; - - let mut stream = self.docker.create_image(Some(options), None, None); - - while let Some(result) = stream.next().await { - match result { - Ok(info) => { - if let Some(status) = info.status { - debug!(status = %status, "Pull progress"); - } - } - Err(e) => { - warn!(error = %e, "Pull warning"); - } - } - } - - Ok(()) - } - - /// Manually trigger an update check and pull - pub async fn check_and_update(&self) -> anyhow::Result { - let Some(requirement) = self.watcher.get_requirement() else { - return Ok(UpdateResult::NoUpdateAvailable); - }; - - let current = Version::current(); - if !current.needs_update(&requirement.min_version) { - return Ok(UpdateResult::AlreadyUpToDate); - } - - self.pull_image(&requirement.docker_image).await?; - - Ok(UpdateResult::Updated { - from: current, - to: requirement.recommended_version, - image: requirement.docker_image, - }) - } - - /// Get the image name for the current version - pub fn current_image(&self) -> String { - let version = Version::current(); - format!("cortexlm/platform-validator:{}", version) - } -} - -/// Signal to restart the validator -#[derive(Clone, Debug)] -pub struct RestartSignal { - pub new_version: Version, - pub image: String, - pub mandatory: bool, -} - -/// Result of an update check -#[derive(Debug)] -pub enum UpdateResult { - NoUpdateAvailable, - AlreadyUpToDate, - Updated { - from: Version, - to: Version, - image: String, - }, -} - -/// Graceful shutdown handler for updates -pub struct GracefulShutdown { - shutdown_sender: broadcast::Sender<()>, -} - -impl GracefulShutdown { - pub fn new() -> Self { - let (shutdown_sender, _) = broadcast::channel(1); - Self { shutdown_sender } - } - - pub fn subscribe(&self) -> broadcast::Receiver<()> { - self.shutdown_sender.subscribe() - } - - pub fn trigger(&self) { - let _ = self.shutdown_sender.send(()); - } -} - -impl Default for GracefulShutdown { - fn default() -> Self { - Self::new() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use anyhow::anyhow; - use std::sync::Mutex; - use tokio::time::{sleep, timeout}; - - #[test] - fn test_restart_signal() { - let signal = RestartSignal { - new_version: Version::new(0, 2, 0), - image: "cortexlm/validator:0.2.0".to_string(), - mandatory: true, - }; - - assert!(signal.mandatory); - assert_eq!(signal.new_version.to_string(), "0.2.0"); - } - - #[test] - fn test_graceful_shutdown() { - let shutdown = GracefulShutdown::new(); - let mut rx = shutdown.subscribe(); - - shutdown.trigger(); - - assert!(rx.try_recv().is_ok()); - } - - #[test] - fn test_graceful_shutdown_default() { - let shutdown: GracefulShutdown = Default::default(); - let mut rx = shutdown.subscribe(); - shutdown.trigger(); - assert!(rx.try_recv().is_ok()); - } - - fn make_test_docker() -> bollard::Docker { - bollard::Docker::connect_with_local_defaults().unwrap() - } - - fn make_test_watcher() -> Arc { - Arc::new(crate::VersionWatcher::new(Duration::from_secs(60))) - } - - #[tokio::test] - async fn test_auto_updater_new_initializes_restart_sender() { - let watcher = make_test_watcher(); - let updater = AutoUpdater::new(watcher).await.expect("should construct"); - assert_eq!(updater.restart_sender.receiver_count(), 0); - } - - #[tokio::test] - async fn test_subscribe_restart_receives_signal() { - let watcher = make_test_watcher(); - let updater = AutoUpdater::new(watcher).await.expect("should construct"); - let mut rx = updater.subscribe_restart(); - - let signal = RestartSignal { - new_version: Version::new(1, 0, 0), - image: "test/image:1.0.0".to_string(), - mandatory: false, - }; - updater.restart_sender.send(signal.clone()).unwrap(); - - let received = rx.recv().await.unwrap(); - assert_eq!(received.image, signal.image); - assert_eq!(received.new_version, signal.new_version); - } - - #[tokio::test] - async fn test_current_image_format() { - let watcher = make_test_watcher(); - let updater = AutoUpdater { - docker: make_test_docker(), - watcher: watcher.clone(), - restart_sender: tokio::sync::broadcast::channel(1).0, - pull_hook: None, - }; - let image = updater.current_image(); - assert!(image.starts_with("cortexlm/platform-validator:")); - } - - #[tokio::test] - async fn test_check_and_update_no_update() { - let watcher = make_test_watcher(); - let updater = AutoUpdater { - docker: make_test_docker(), - watcher: watcher.clone(), - restart_sender: tokio::sync::broadcast::channel(1).0, - pull_hook: None, - }; - let result = updater.check_and_update().await.unwrap(); - assert!(matches!( - result, - UpdateResult::NoUpdateAvailable | UpdateResult::AlreadyUpToDate - )); - } - - #[tokio::test] - async fn test_check_and_update_returns_already_up_to_date() { - let watcher = make_test_watcher(); - let updater = AutoUpdater { - docker: make_test_docker(), - watcher: watcher.clone(), - restart_sender: tokio::sync::broadcast::channel(1).0, - pull_hook: None, - }; - - let requirement = UpdateRequirement { - min_version: Version::current(), - recommended_version: Version::current(), - docker_image: "same/version:latest".to_string(), - mandatory: false, - deadline_block: None, - release_notes: None, - }; - watcher.on_version_update(requirement); - - let result = updater.check_and_update().await.unwrap(); - assert!(matches!(result, UpdateResult::AlreadyUpToDate)); - } - - #[tokio::test] - async fn test_handle_update_already_up_to_date() { - let watcher = make_test_watcher(); - let updater = AutoUpdater { - docker: make_test_docker(), - watcher: watcher.clone(), - restart_sender: tokio::sync::broadcast::channel(1).0, - pull_hook: None, - }; - let req = UpdateRequirement { - min_version: Version::current(), - recommended_version: Version::current(), - docker_image: "test-image:latest".to_string(), - mandatory: false, - deadline_block: None, - release_notes: None, - }; - let result = updater.handle_update(req).await; - assert!(result.is_ok()); - } - - fn future_version() -> Version { - let current = Version::current(); - if current.patch < u32::MAX { - Version::new(current.major, current.minor, current.patch + 1) - } else if current.minor < u32::MAX { - Version::new(current.major, current.minor + 1, 0) - } else if current.major < u32::MAX { - Version::new(current.major + 1, 0, 0) - } else { - current - } - } - - fn make_future_requirement(image: &str, mandatory: bool) -> UpdateRequirement { - let future = future_version(); - UpdateRequirement { - min_version: future.clone(), - recommended_version: future, - docker_image: image.to_string(), - mandatory, - deadline_block: None, - release_notes: None, - } - } - - #[tokio::test] - async fn test_handle_update_triggers_restart_signal() { - let watcher = make_test_watcher(); - let (sender, _) = broadcast::channel(4); - let mut updater = AutoUpdater { - docker: make_test_docker(), - watcher, - restart_sender: sender, - pull_hook: None, - }; - - let pull_calls = Arc::new(Mutex::new(Vec::new())); - let hook_calls = pull_calls.clone(); - updater.pull_hook = Some(Arc::new(move |image: &str| { - let hook_calls = hook_calls.clone(); - let image = image.to_string(); - Box::pin(async move { - hook_calls.lock().unwrap().push(image); - Ok(()) - }) - })); - - let mut rx = updater.restart_sender.subscribe(); - let requirement = make_future_requirement("test/image:1.0.0", false); - - updater - .handle_update(requirement.clone()) - .await - .expect("update should succeed"); - - let signal = rx.try_recv().expect("restart signal expected"); - assert_eq!(signal.image, requirement.docker_image); - assert_eq!(signal.new_version, requirement.recommended_version); - assert!(!signal.mandatory); - assert_eq!(pull_calls.lock().unwrap().len(), 1); - } - - #[tokio::test] - async fn test_check_and_update_reports_updated() { - let watcher = make_test_watcher(); - let (sender, _) = broadcast::channel(4); - let mut updater = AutoUpdater { - docker: make_test_docker(), - watcher: watcher.clone(), - restart_sender: sender, - pull_hook: None, - }; - - let pull_calls = Arc::new(Mutex::new(Vec::new())); - let hook_calls = pull_calls.clone(); - updater.pull_hook = Some(Arc::new(move |image: &str| { - let hook_calls = hook_calls.clone(); - let image = image.to_string(); - Box::pin(async move { - hook_calls.lock().unwrap().push(image); - Ok(()) - }) - })); - - let requirement = make_future_requirement("test/image:2.0.0", false); - watcher.on_version_update(requirement.clone()); - - let result = updater - .check_and_update() - .await - .expect("update should succeed"); - - match result { - UpdateResult::Updated { from, to, image } => { - assert_eq!(from, Version::current()); - assert_eq!(to, requirement.recommended_version); - assert_eq!(image, requirement.docker_image); - } - _ => panic!("expected Updated result"), - } - - assert_eq!(pull_calls.lock().unwrap().len(), 1); - } - - #[tokio::test] - async fn test_handle_update_mandatory_triggers_shutdown() { - let watcher = make_test_watcher(); - let (sender, _) = broadcast::channel(4); - let mut updater = AutoUpdater { - docker: make_test_docker(), - watcher, - restart_sender: sender, - pull_hook: None, - }; - - let pull_calls = Arc::new(Mutex::new(Vec::new())); - let hook_calls = pull_calls.clone(); - updater.pull_hook = Some(Arc::new(move |image: &str| { - let hook_calls = hook_calls.clone(); - let image = image.to_string(); - Box::pin(async move { - hook_calls.lock().unwrap().push(image); - Ok(()) - }) - })); - - let mut rx = updater.restart_sender.subscribe(); - let requirement = make_future_requirement("test/image:mandatory", true); - - updater - .handle_update(requirement.clone()) - .await - .expect("mandatory update should succeed"); - - let signal = rx.try_recv().expect("restart signal expected"); - assert!(signal.mandatory); - assert_eq!(signal.image, requirement.docker_image); - assert_eq!(pull_calls.lock().unwrap().len(), 1); - } - - #[tokio::test] - async fn test_handle_update_propagates_pull_error() { - let watcher = make_test_watcher(); - let (sender, _) = broadcast::channel(4); - let mut updater = AutoUpdater { - docker: make_test_docker(), - watcher, - restart_sender: sender, - pull_hook: None, - }; - - updater.pull_hook = Some(Arc::new(|_| { - Box::pin(async { Err(anyhow!("pull failed")) }) - })); - - let requirement = make_future_requirement("test/image:error", false); - let result = updater.handle_update(requirement).await; - assert!(result.is_err()); - } - - #[tokio::test] - async fn test_pull_image_uses_test_hook() { - let watcher = make_test_watcher(); - let (sender, _) = broadcast::channel(1); - let mut updater = AutoUpdater { - docker: make_test_docker(), - watcher, - restart_sender: sender, - pull_hook: None, - }; - - let pull_calls = Arc::new(Mutex::new(Vec::new())); - let hook_calls = pull_calls.clone(); - updater.pull_hook = Some(Arc::new(move |image: &str| { - let hook_calls = hook_calls.clone(); - let image = image.to_string(); - Box::pin(async move { - hook_calls.lock().unwrap().push(image); - Ok(()) - }) - })); - - updater.pull_image("hook/test:1.0.0").await.unwrap(); - assert_eq!(pull_calls.lock().unwrap().as_slice(), ["hook/test:1.0.0"]); - } - - #[tokio::test] - async fn test_start_processes_incoming_requirements() { - let watcher = make_test_watcher(); - let (sender, _) = broadcast::channel(4); - let mut updater = AutoUpdater { - docker: make_test_docker(), - watcher: watcher.clone(), - restart_sender: sender, - pull_hook: None, - }; - - let pull_calls = Arc::new(Mutex::new(Vec::new())); - let hook_calls = pull_calls.clone(); - updater.pull_hook = Some(Arc::new(move |image: &str| { - let hook_calls = hook_calls.clone(); - let image = image.to_string(); - Box::pin(async move { - hook_calls.lock().unwrap().push(image); - Ok(()) - }) - })); - - let updater = Arc::new(updater); - let mut rx = updater.subscribe_restart(); - let requirement = make_future_requirement("start/test:1.0", false); - - let updater_clone = updater.clone(); - tokio::spawn(async move { - updater_clone.start().await; - }); - - sleep(Duration::from_millis(10)).await; - watcher.on_version_update(requirement.clone()); - - let received = timeout(Duration::from_secs(2), rx.recv()) - .await - .expect("restart signal in time") - .expect("channel open"); - assert_eq!(received.image, requirement.docker_image); - assert_eq!(pull_calls.lock().unwrap().len(), 1); - } -} diff --git a/crates/auto-updater/src/version.rs b/crates/auto-updater/src/version.rs deleted file mode 100644 index 636caf3f..00000000 --- a/crates/auto-updater/src/version.rs +++ /dev/null @@ -1,185 +0,0 @@ -//! Version management - -use serde::{Deserialize, Serialize}; -use std::cmp::Ordering; -use std::fmt; - -/// Semantic version -#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct Version { - pub major: u32, - pub minor: u32, - pub patch: u32, -} - -impl Version { - pub const fn new(major: u32, minor: u32, patch: u32) -> Self { - Self { - major, - minor, - patch, - } - } - - /// Current validator version (compile-time) - pub fn current() -> Self { - Self { - major: platform_core::PROTOCOL_VERSION_MAJOR, - minor: platform_core::PROTOCOL_VERSION_MINOR, - patch: platform_core::PROTOCOL_VERSION_PATCH, - } - } - - /// Parse from string (e.g., "0.1.0") - pub fn parse(s: &str) -> Option { - let parts: Vec<&str> = s.trim_start_matches('v').split('.').collect(); - if parts.len() != 3 { - return None; - } - - Some(Self { - major: parts[0].parse().ok()?, - minor: parts[1].parse().ok()?, - patch: parts[2].parse().ok()?, - }) - } - - /// Check if this version is compatible with a minimum required version - pub fn is_compatible_with(&self, min_version: &Version) -> bool { - self >= min_version - } - - /// Check if update is available - pub fn needs_update(&self, required: &Version) -> bool { - self < required - } -} - -impl fmt::Display for Version { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}.{}.{}", self.major, self.minor, self.patch) - } -} - -impl PartialOrd for Version { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Version { - fn cmp(&self, other: &Self) -> Ordering { - match self.major.cmp(&other.major) { - Ordering::Equal => match self.minor.cmp(&other.minor) { - Ordering::Equal => self.patch.cmp(&other.patch), - ord => ord, - }, - ord => ord, - } - } -} - -impl Default for Version { - fn default() -> Self { - Self::current() - } -} - -/// Update requirements from network -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct UpdateRequirement { - /// Minimum required version - pub min_version: Version, - /// Recommended version (latest stable) - pub recommended_version: Version, - /// Docker image for the update - pub docker_image: String, - /// Whether update is mandatory (disconnect if not updated) - pub mandatory: bool, - /// Deadline for mandatory update (Bittensor block height) - pub deadline_block: Option, - /// Release notes - pub release_notes: Option, -} - -/// Update status -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum UpdateStatus { - /// Running latest version - UpToDate, - /// Update available but not mandatory - UpdateAvailable { version: Version }, - /// Mandatory update required - UpdateRequired { - version: Version, - deadline_block: Option, - }, - /// Version check in progress - Checking, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_version_parse() { - let v = Version::parse("1.2.3").unwrap(); - assert_eq!(v.major, 1); - assert_eq!(v.minor, 2); - assert_eq!(v.patch, 3); - - let v = Version::parse("v0.1.0").unwrap(); - assert_eq!(v.major, 0); - assert_eq!(v.minor, 1); - assert_eq!(v.patch, 0); - } - - #[test] - fn test_version_default_matches_current() { - assert_eq!(Version::default(), Version::current()); - } - - #[test] - fn test_version_parse_invalid_inputs() { - assert!(Version::parse("1.2").is_none()); - assert!(Version::parse("1.2.3.4").is_none()); - assert!(Version::parse("a.b.c").is_none()); - } - - #[test] - fn test_version_display() { - let v = Version::new(1, 2, 3); - assert_eq!(v.to_string(), "1.2.3"); - } - - #[test] - fn test_version_ordering() { - let v1 = Version::new(0, 1, 0); - let v2 = Version::new(0, 2, 0); - let v3 = Version::new(1, 0, 0); - - assert!(v1 < v2); - assert!(v2 < v3); - assert!(v1 < v3); - } - - #[test] - fn test_version_compatibility() { - let current = Version::new(0, 2, 0); - let min = Version::new(0, 1, 0); - let future = Version::new(0, 3, 0); - - assert!(current.is_compatible_with(&min)); - assert!(!current.is_compatible_with(&future)); - } - - #[test] - fn test_needs_update() { - let current = Version::new(0, 1, 0); - let required = Version::new(0, 2, 0); - - assert!(current.needs_update(&required)); - assert!(!required.needs_update(¤t)); - } -} diff --git a/crates/auto-updater/src/watcher.rs b/crates/auto-updater/src/watcher.rs deleted file mode 100644 index f09ccfed..00000000 --- a/crates/auto-updater/src/watcher.rs +++ /dev/null @@ -1,229 +0,0 @@ -//! Version watcher - monitors network for required version changes - -use crate::{UpdateRequirement, UpdateStatus, Version}; -use parking_lot::RwLock; -use std::sync::Arc; -use std::time::Duration; -use tokio::sync::broadcast; -use tracing::info; - -/// Watches for version updates from the network -#[allow(dead_code)] -pub struct VersionWatcher { - current_version: Version, - required_version: Arc>>, - update_sender: broadcast::Sender, - check_interval: Duration, -} - -impl VersionWatcher { - pub fn new(check_interval: Duration) -> Self { - let (update_sender, _) = broadcast::channel(16); - - Self { - current_version: Version::current(), - required_version: Arc::new(RwLock::new(None)), - update_sender, - check_interval, - } - } - - /// Subscribe to update notifications - pub fn subscribe(&self) -> broadcast::Receiver { - self.update_sender.subscribe() - } - - /// Called when network state updates with new version requirement - pub fn on_version_update(&self, requirement: UpdateRequirement) { - let current_required = self.required_version.read().clone(); - - // Check if this is a new requirement - let is_new = current_required - .as_ref() - .map(|r| r.min_version != requirement.min_version) - .unwrap_or(true); - - if is_new { - info!( - current = %self.current_version, - required = %requirement.min_version, - mandatory = requirement.mandatory, - "New version requirement received" - ); - - *self.required_version.write() = Some(requirement.clone()); - - // Notify subscribers - let _ = self.update_sender.send(requirement); - } - } - - /// Get current update status - pub fn status(&self) -> UpdateStatus { - let Some(requirement) = self.required_version.read().clone() else { - return UpdateStatus::UpToDate; - }; - - if !self.current_version.needs_update(&requirement.min_version) { - return UpdateStatus::UpToDate; - } - - if requirement.mandatory { - UpdateStatus::UpdateRequired { - version: requirement.recommended_version, - deadline_block: requirement.deadline_block, - } - } else { - UpdateStatus::UpdateAvailable { - version: requirement.recommended_version, - } - } - } - - /// Check if update is required - pub fn needs_update(&self) -> bool { - matches!( - self.status(), - UpdateStatus::UpdateRequired { .. } | UpdateStatus::UpdateAvailable { .. } - ) - } - - /// Check if update is mandatory - pub fn is_mandatory(&self) -> bool { - matches!(self.status(), UpdateStatus::UpdateRequired { .. }) - } - - /// Get required version info - pub fn get_requirement(&self) -> Option { - self.required_version.read().clone() - } - - /// Get current version - pub fn current_version(&self) -> &Version { - &self.current_version - } -} - -/// Builder for VersionWatcher -pub struct VersionWatcherBuilder { - check_interval: Duration, -} - -impl VersionWatcherBuilder { - pub fn new() -> Self { - Self { - check_interval: Duration::from_secs(60), - } - } - - pub fn check_interval(mut self, interval: Duration) -> Self { - self.check_interval = interval; - self - } - - pub fn build(self) -> VersionWatcher { - VersionWatcher::new(self.check_interval) - } -} - -impl Default for VersionWatcherBuilder { - fn default() -> Self { - Self::new() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - fn make_requirement(major: u32, minor: u32, patch: u32, mandatory: bool) -> UpdateRequirement { - let version = Version::new(major, minor, patch); - UpdateRequirement { - min_version: version.clone(), - recommended_version: version, - docker_image: "cortexlm/validator:latest".to_string(), - mandatory, - deadline_block: None, - release_notes: None, - } - } - - #[test] - fn test_watcher_initial_status() { - let watcher = VersionWatcher::new(Duration::from_secs(60)); - assert_eq!(watcher.status(), UpdateStatus::UpToDate); - assert!(!watcher.needs_update()); - } - - #[test] - fn test_watcher_current_version_accessor() { - let watcher = VersionWatcher::new(Duration::from_secs(60)); - assert_eq!(watcher.current_version(), &Version::current()); - } - - #[test] - fn test_watcher_builder_custom_interval() { - let watcher = VersionWatcherBuilder::new() - .check_interval(Duration::from_secs(10)) - .build(); - assert_eq!(watcher.check_interval, Duration::from_secs(10)); - } - - #[test] - fn test_watcher_builder_default() { - let watcher = VersionWatcherBuilder::default().build(); - assert_eq!(watcher.check_interval, Duration::from_secs(60)); - } - - #[test] - fn test_watcher_status_up_to_date_after_equal_requirement() { - let watcher = VersionWatcher::new(Duration::from_secs(60)); - let requirement = UpdateRequirement { - min_version: watcher.current_version().clone(), - recommended_version: watcher.current_version().clone(), - docker_image: "same".to_string(), - mandatory: false, - deadline_block: None, - release_notes: None, - }; - watcher.on_version_update(requirement); - assert_eq!(watcher.status(), UpdateStatus::UpToDate); - } - - #[test] - fn test_watcher_update_available() { - let watcher = VersionWatcher::new(Duration::from_secs(60)); - - // Set a higher required version (non-mandatory) - let requirement = make_requirement(99, 0, 0, false); - watcher.on_version_update(requirement); - - assert!(watcher.needs_update()); - assert!(!watcher.is_mandatory()); - } - - #[test] - fn test_watcher_mandatory_update() { - let watcher = VersionWatcher::new(Duration::from_secs(60)); - - // Set a higher required version (mandatory) - let requirement = make_requirement(99, 0, 0, true); - watcher.on_version_update(requirement); - - assert!(watcher.needs_update()); - assert!(watcher.is_mandatory()); - } - - #[test] - fn test_watcher_subscribe() { - let watcher = VersionWatcher::new(Duration::from_secs(60)); - let mut rx = watcher.subscribe(); - - let requirement = make_requirement(99, 0, 0, true); - watcher.on_version_update(requirement.clone()); - - // Should receive the update - let received = rx.try_recv().unwrap(); - assert_eq!(received.min_version, requirement.min_version); - } -} diff --git a/crates/distributed-db/Cargo.toml b/crates/distributed-db/Cargo.toml deleted file mode 100644 index af59471a..00000000 --- a/crates/distributed-db/Cargo.toml +++ /dev/null @@ -1,40 +0,0 @@ -[package] -name = "distributed-db" -version.workspace = true -edition.workspace = true -description = "Decentralized storage for platform with optimistic execution and Merkle state roots" - -[dependencies] -# Local crates -platform-core = { path = "../core" } - -# Database -rocksdb = "0.22" - -# Merkle/Hashing -sha2 = { workspace = true } -blake3 = "1.5" - -# Serialization -serde = { workspace = true } -serde_json = { workspace = true } -bincode = { workspace = true } - -# Async -tokio = { workspace = true } -async-trait = { workspace = true } -futures = { workspace = true } - -# Utils -parking_lot = { workspace = true } -tracing = { workspace = true } -thiserror = { workspace = true } -anyhow = { workspace = true } -hex = { workspace = true } -chrono = { workspace = true } -uuid = { workspace = true } -rand = { workspace = true } - -[dev-dependencies] -tempfile = "3" -tokio-test = "0.4" diff --git a/crates/distributed-db/src/indexes.rs b/crates/distributed-db/src/indexes.rs deleted file mode 100644 index d3b7a09d..00000000 --- a/crates/distributed-db/src/indexes.rs +++ /dev/null @@ -1,954 +0,0 @@ -//! Secondary indexes for fast queries -//! -//! Provides indexed access to data by: -//! - Collection -//! - Field values (JSON) -//! - Range queries -//! - Full-text search (basic) - -use crate::storage::{RocksStorage, CF_INDEXES}; -use std::sync::Arc; -use tracing::debug; - -/// Index definition -#[derive(Debug, Clone)] -pub struct IndexDef { - pub name: String, - pub collection: String, - pub field: String, - pub index_type: IndexType, -} - -/// Type of index -#[derive(Debug, Clone, Copy)] -pub enum IndexType { - /// Hash index for equality lookups - Hash, - /// B-tree index for range queries - BTree, - /// Full-text index for text search - FullText, -} - -/// Index manager -pub struct IndexManager { - storage: Arc, - indexes: Vec, -} - -impl IndexManager { - /// Create a new index manager - pub fn new(storage: Arc) -> anyhow::Result { - let mut manager = Self { - storage, - indexes: Vec::new(), - }; - - // Define default indexes - manager.add_index(IndexDef { - name: "challenges_by_name".to_string(), - collection: "challenges".to_string(), - field: "name".to_string(), - index_type: IndexType::Hash, - }); - - manager.add_index(IndexDef { - name: "agents_by_challenge".to_string(), - collection: "agents".to_string(), - field: "challenge_id".to_string(), - index_type: IndexType::Hash, - }); - - manager.add_index(IndexDef { - name: "evaluations_by_agent".to_string(), - collection: "evaluations".to_string(), - field: "agent_hash".to_string(), - index_type: IndexType::Hash, - }); - - manager.add_index(IndexDef { - name: "evaluations_by_score".to_string(), - collection: "evaluations".to_string(), - field: "score".to_string(), - index_type: IndexType::BTree, - }); - - manager.add_index(IndexDef { - name: "weights_by_block".to_string(), - collection: "weights".to_string(), - field: "block".to_string(), - index_type: IndexType::BTree, - }); - - Ok(manager) - } - - /// Add an index definition - pub fn add_index(&mut self, index: IndexDef) { - self.indexes.push(index); - } - - /// Index an entry - pub fn index_entry(&self, collection: &str, key: &[u8], value: &[u8]) -> anyhow::Result<()> { - // Try to parse value as JSON - let json: serde_json::Value = match serde_json::from_slice(value) { - Ok(v) => v, - Err(_) => return Ok(()), // Not JSON, skip indexing - }; - - for index in &self.indexes { - if index.collection != collection { - continue; - } - - // Extract field value - if let Some(field_value) = json.get(&index.field) { - let index_key = self.build_index_key(&index.name, field_value, key); - self.storage.put(CF_INDEXES, &index_key, key)?; - debug!( - "Indexed {}.{} for key {:?}", - collection, - index.field, - hex::encode(&key[..key.len().min(8)]) - ); - } - } - - Ok(()) - } - - /// Remove index entries for a key - pub fn remove_entry(&self, collection: &str, key: &[u8]) -> anyhow::Result<()> { - // Get existing value to extract indexed fields - if let Some(value) = self.storage.get(collection, key)? { - if let Ok(json) = serde_json::from_slice::(&value) { - for index in &self.indexes { - if index.collection != collection { - continue; - } - - if let Some(field_value) = json.get(&index.field) { - let index_key = self.build_index_key(&index.name, field_value, key); - self.storage.delete(CF_INDEXES, &index_key)?; - } - } - } - } - - Ok(()) - } - - /// Build index key - fn build_index_key( - &self, - index_name: &str, - field_value: &serde_json::Value, - primary_key: &[u8], - ) -> Vec { - let value_str = match field_value { - serde_json::Value::String(s) => s.clone(), - serde_json::Value::Number(n) => format!("{:020}", n.as_f64().unwrap_or(0.0) as i64), - serde_json::Value::Bool(b) => b.to_string(), - _ => field_value.to_string(), - }; - - format!("{}:{}:{}", index_name, value_str, hex::encode(primary_key)).into_bytes() - } - - /// Execute a query - pub fn execute_query(&self, query: Query) -> anyhow::Result { - let start = std::time::Instant::now(); - - let results = match &query.filter { - Some(filter) => self.query_with_filter(&query.collection, filter, query.limit)?, - None => self.query_all(&query.collection, query.limit)?, - }; - - let total_count = results.len(); - Ok(QueryResult { - entries: results, - execution_time_us: start.elapsed().as_micros() as u64, - total_count, - }) - } - - /// Query all entries in a collection - fn query_all(&self, collection: &str, limit: Option) -> anyhow::Result> { - let entries = self.storage.iter_collection(collection)?; - let limit = limit.unwrap_or(1000); - - Ok(entries - .into_iter() - .take(limit) - .map(|(key, value)| QueryEntry { key, value }) - .collect()) - } - - /// Query with a filter - fn query_with_filter( - &self, - collection: &str, - filter: &Filter, - limit: Option, - ) -> anyhow::Result> { - // Find matching index - let index = self - .indexes - .iter() - .find(|i| i.collection == collection && i.field == filter.field); - - match index { - Some(idx) => self.query_indexed(idx, filter, limit), - None => self.query_scan(collection, filter, limit), - } - } - - /// Query using an index - fn query_indexed( - &self, - index: &IndexDef, - filter: &Filter, - limit: Option, - ) -> anyhow::Result> { - let limit = limit.unwrap_or(1000); - - match &filter.op { - FilterOp::Eq(value) => { - let prefix = format!("{}:{}:", index.name, value); - let index_entries = self.storage.iter_prefix(CF_INDEXES, prefix.as_bytes())?; - - let mut results = Vec::new(); - for (_, primary_key) in index_entries.into_iter().take(limit) { - if let Some(value) = self.storage.get(&index.collection, &primary_key)? { - results.push(QueryEntry { - key: primary_key, - value, - }); - } - } - - Ok(results) - } - FilterOp::Gt(value) - | FilterOp::Gte(value) - | FilterOp::Lt(value) - | FilterOp::Lte(value) => { - // Range query - iterate index entries - let prefix = format!("{}:", index.name); - let index_entries = self.storage.iter_prefix(CF_INDEXES, prefix.as_bytes())?; - - let mut results = Vec::new(); - for (index_key, primary_key) in index_entries { - // Parse index key to extract value - let key_str = String::from_utf8_lossy(&index_key); - let parts: Vec<&str> = key_str.split(':').collect(); - if parts.len() < 2 { - continue; - } - - let indexed_value = parts[1]; - let matches = match &filter.op { - FilterOp::Gt(v) => indexed_value > v.as_str(), - FilterOp::Gte(v) => indexed_value >= v.as_str(), - FilterOp::Lt(v) => indexed_value < v.as_str(), - FilterOp::Lte(v) => indexed_value <= v.as_str(), - _ => false, - }; - - if matches { - if let Some(value) = self.storage.get(&index.collection, &primary_key)? { - results.push(QueryEntry { - key: primary_key, - value, - }); - if results.len() >= limit { - break; - } - } - } - } - - Ok(results) - } - FilterOp::In(values) => { - let mut results = Vec::new(); - for value in values { - let prefix = format!("{}:{}:", index.name, value); - let index_entries = self.storage.iter_prefix(CF_INDEXES, prefix.as_bytes())?; - - for (_, primary_key) in index_entries { - if let Some(value) = self.storage.get(&index.collection, &primary_key)? { - results.push(QueryEntry { - key: primary_key, - value, - }); - if results.len() >= limit { - return Ok(results); - } - } - } - } - - Ok(results) - } - FilterOp::Contains(_) => { - // Full scan for contains - self.query_scan(&index.collection, filter, Some(limit)) - } - } - } - - /// Query with full scan (no index) - fn query_scan( - &self, - collection: &str, - filter: &Filter, - limit: Option, - ) -> anyhow::Result> { - let limit = limit.unwrap_or(1000); - let entries = self.storage.iter_collection(collection)?; - - let mut results = Vec::new(); - for (key, value) in entries { - // Try to parse as JSON and filter - if let Ok(json) = serde_json::from_slice::(&value) { - if self.matches_filter(&json, filter) { - results.push(QueryEntry { key, value }); - if results.len() >= limit { - break; - } - } - } - } - - Ok(results) - } - - /// Check if a JSON value matches a filter - fn matches_filter(&self, json: &serde_json::Value, filter: &Filter) -> bool { - let field_value = match json.get(&filter.field) { - Some(v) => v, - None => return false, - }; - - let field_str = match field_value { - serde_json::Value::String(s) => s.clone(), - serde_json::Value::Number(n) => n.to_string(), - serde_json::Value::Bool(b) => b.to_string(), - _ => field_value.to_string(), - }; - - match &filter.op { - FilterOp::Eq(v) => &field_str == v, - FilterOp::Gt(v) => &field_str > v, - FilterOp::Gte(v) => &field_str >= v, - FilterOp::Lt(v) => &field_str < v, - FilterOp::Lte(v) => &field_str <= v, - FilterOp::In(values) => values.contains(&field_str), - FilterOp::Contains(v) => field_str.contains(v.as_str()), - } - } -} - -/// Query definition -#[derive(Debug, Clone)] -pub struct Query { - pub collection: String, - pub filter: Option, - pub order_by: Option, - pub limit: Option, - pub offset: Option, -} - -impl Query { - pub fn new(collection: impl Into) -> Self { - Self { - collection: collection.into(), - filter: None, - order_by: None, - limit: None, - offset: None, - } - } - - pub fn filter(mut self, filter: Filter) -> Self { - self.filter = Some(filter); - self - } - - pub fn order_by(mut self, field: impl Into, desc: bool) -> Self { - self.order_by = Some(OrderBy { - field: field.into(), - descending: desc, - }); - self - } - - pub fn limit(mut self, limit: usize) -> Self { - self.limit = Some(limit); - self - } - - pub fn offset(mut self, offset: usize) -> Self { - self.offset = Some(offset); - self - } -} - -/// Query filter -#[derive(Debug, Clone)] -pub struct Filter { - pub field: String, - pub op: FilterOp, -} - -impl Filter { - pub fn eq(field: impl Into, value: impl Into) -> Self { - Self { - field: field.into(), - op: FilterOp::Eq(value.into()), - } - } - - pub fn gt(field: impl Into, value: impl Into) -> Self { - Self { - field: field.into(), - op: FilterOp::Gt(value.into()), - } - } - - pub fn contains(field: impl Into, value: impl Into) -> Self { - Self { - field: field.into(), - op: FilterOp::Contains(value.into()), - } - } -} - -/// Filter operation -#[derive(Debug, Clone)] -pub enum FilterOp { - Eq(String), - Gt(String), - Gte(String), - Lt(String), - Lte(String), - In(Vec), - Contains(String), -} - -/// Order by clause -#[derive(Debug, Clone)] -pub struct OrderBy { - pub field: String, - pub descending: bool, -} - -/// Query result -#[derive(Debug, Clone)] -pub struct QueryResult { - pub entries: Vec, - pub execution_time_us: u64, - pub total_count: usize, -} - -/// Query result entry -#[derive(Debug, Clone)] -pub struct QueryEntry { - pub key: Vec, - pub value: Vec, -} - -impl QueryEntry { - /// Parse value as JSON - pub fn as_json(&self) -> Option { - serde_json::from_slice(&self.value).ok() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::storage::RocksStorage; - use crate::test_utils::*; - use tempfile::tempdir; - - #[test] - fn test_indexing() { - let dir = tempdir().unwrap(); - let storage = Arc::new(RocksStorage::open(dir.path()).unwrap()); - let indexes = IndexManager::new(storage.clone()).unwrap(); - - // Store a challenge - let challenge = serde_json::json!({ - "id": "test-challenge", - "name": "Terminal Benchmark", - "mechanism_id": 0 - }); - - let key = b"test-challenge"; - let value = serde_json::to_vec(&challenge).unwrap(); - - storage.put("challenges", key, &value).unwrap(); - indexes.index_entry("challenges", key, &value).unwrap(); - - // Query by name - let query = Query::new("challenges").filter(Filter::eq("name", "Terminal Benchmark")); - let result = indexes.execute_query(query).unwrap(); - assert_eq!(result.entries.len(), 1); - } - - #[test] - fn test_index_manager_new() { - let (indexes, _, _dir) = create_test_index_manager(); - assert_eq!(indexes.indexes.len(), 5); - } - - #[test] - fn test_add_index() { - let (mut indexes, _, _dir) = create_test_index_manager(); - let count_before = indexes.indexes.len(); - - indexes.add_index(IndexDef { - name: "test_index".to_string(), - collection: "test".to_string(), - field: "field".to_string(), - index_type: IndexType::Hash, - }); - - assert_eq!(indexes.indexes.len(), count_before + 1); - } - - #[test] - fn test_query_eq_filter() { - let (indexes, storage, _dir) = create_test_index_manager(); - - let challenge1 = serde_json::json!({"name": "Challenge1", "mechanism_id": 1}); - let challenge2 = serde_json::json!({"name": "Challenge2", "mechanism_id": 2}); - - storage - .put( - "challenges", - b"c1", - &serde_json::to_vec(&challenge1).unwrap(), - ) - .unwrap(); - storage - .put( - "challenges", - b"c2", - &serde_json::to_vec(&challenge2).unwrap(), - ) - .unwrap(); - - indexes - .index_entry( - "challenges", - b"c1", - &serde_json::to_vec(&challenge1).unwrap(), - ) - .unwrap(); - indexes - .index_entry( - "challenges", - b"c2", - &serde_json::to_vec(&challenge2).unwrap(), - ) - .unwrap(); - - let query = Query::new("challenges").filter(Filter::eq("name", "Challenge1")); - let result = indexes.execute_query(query).unwrap(); - assert_eq!(result.entries.len(), 1); - assert_eq!(result.total_count, 1); - } - - #[test] - fn test_query_gt_filter() { - let (indexes, storage, _dir) = create_test_index_manager(); - - for i in 1..=5 { - let eval = serde_json::json!({"agent_hash": format!("agent{}", i), "score": i * 10}); - storage - .put( - "evaluations", - format!("eval{}", i).as_bytes(), - &serde_json::to_vec(&eval).unwrap(), - ) - .unwrap(); - indexes - .index_entry( - "evaluations", - format!("eval{}", i).as_bytes(), - &serde_json::to_vec(&eval).unwrap(), - ) - .unwrap(); - } - - let query = Query::new("evaluations").filter(Filter::gt("score", "00000000000000000030")); - let result = indexes.execute_query(query).unwrap(); - assert!(result.entries.len() >= 2); // scores 40, 50 - } - - #[test] - fn test_query_contains_filter() { - let (indexes, storage, _dir) = create_test_index_manager(); - - let challenge1 = serde_json::json!({"name": "Terminal Benchmark", "mechanism_id": 1}); - let challenge2 = serde_json::json!({"name": "Simple Task", "mechanism_id": 2}); - - storage - .put( - "challenges", - b"c1", - &serde_json::to_vec(&challenge1).unwrap(), - ) - .unwrap(); - storage - .put( - "challenges", - b"c2", - &serde_json::to_vec(&challenge2).unwrap(), - ) - .unwrap(); - - indexes - .index_entry( - "challenges", - b"c1", - &serde_json::to_vec(&challenge1).unwrap(), - ) - .unwrap(); - indexes - .index_entry( - "challenges", - b"c2", - &serde_json::to_vec(&challenge2).unwrap(), - ) - .unwrap(); - - let query = Query::new("challenges").filter(Filter::contains("name", "Terminal")); - let result = indexes.execute_query(query).unwrap(); - assert_eq!(result.entries.len(), 1); - } - - #[test] - fn test_query_in_filter() { - let (indexes, storage, _dir) = create_test_index_manager(); - - for i in 1..=5 { - let agent = serde_json::json!({"challenge_id": format!("ch{}", i), "status": "active"}); - storage - .put( - "agents", - format!("agent{}", i).as_bytes(), - &serde_json::to_vec(&agent).unwrap(), - ) - .unwrap(); - indexes - .index_entry( - "agents", - format!("agent{}", i).as_bytes(), - &serde_json::to_vec(&agent).unwrap(), - ) - .unwrap(); - } - - let query = Query::new("agents").filter(Filter { - field: "challenge_id".to_string(), - op: FilterOp::In(vec!["ch1".to_string(), "ch3".to_string()]), - }); - let result = indexes.execute_query(query).unwrap(); - assert_eq!(result.entries.len(), 2); - } - - #[test] - fn test_query_without_filter() { - let (indexes, storage, _dir) = create_test_index_manager(); - - for i in 1..=3 { - let challenge = serde_json::json!({"name": format!("C{}", i), "mechanism_id": i}); - storage - .put( - "challenges", - format!("c{}", i).as_bytes(), - &serde_json::to_vec(&challenge).unwrap(), - ) - .unwrap(); - } - - let query = Query::new("challenges"); - let result = indexes.execute_query(query).unwrap(); - assert_eq!(result.entries.len(), 3); - assert_eq!(result.total_count, 3); - } - - #[test] - fn test_query_with_limit() { - let (indexes, storage, _dir) = create_test_index_manager(); - - for i in 1..=10 { - let challenge = serde_json::json!({"name": format!("C{}", i), "mechanism_id": i}); - storage - .put( - "challenges", - format!("c{}", i).as_bytes(), - &serde_json::to_vec(&challenge).unwrap(), - ) - .unwrap(); - } - - let query = Query::new("challenges").limit(5); - let result = indexes.execute_query(query).unwrap(); - assert!(result.entries.len() <= 5); - } - - #[test] - fn test_remove_entry() { - let (indexes, storage, _dir) = create_test_index_manager(); - - let challenge = serde_json::json!({"name": "Test", "mechanism_id": 1}); - let key = b"test-key"; - let value = serde_json::to_vec(&challenge).unwrap(); - - storage.put("challenges", key, &value).unwrap(); - indexes.index_entry("challenges", key, &value).unwrap(); - - // Verify indexed - let query = Query::new("challenges").filter(Filter::eq("name", "Test")); - let result = indexes.execute_query(query).unwrap(); - assert_eq!(result.entries.len(), 1); - - // Remove entry - indexes.remove_entry("challenges", key).unwrap(); - - // Verify not indexed anymore (note: storage still has the entry) - let result2 = indexes - .execute_query(Query::new("challenges").filter(Filter::eq("name", "Test"))) - .unwrap(); - assert_eq!(result2.entries.len(), 0); - } - - #[test] - fn test_index_non_json_value() { - let (indexes, storage, _dir) = create_test_index_manager(); - - let non_json = b"not json data"; - storage.put("challenges", b"key", non_json).unwrap(); - - // Should not error, just skip indexing - let result = indexes.index_entry("challenges", b"key", non_json); - assert!(result.is_ok()); - } - - #[test] - fn test_query_scan_without_index() { - let (indexes, storage, _dir) = create_test_index_manager(); - - // Use challenges collection but query a field that doesn't have an index - let item = serde_json::json!({"custom_field": "value", "name": "test"}); - storage - .put("challenges", b"key1", &serde_json::to_vec(&item).unwrap()) - .unwrap(); - - // Query a field that doesn't have an index - will do full scan - let query = Query::new("challenges").filter(Filter::eq("custom_field", "value")); - let result = indexes.execute_query(query).unwrap(); - assert_eq!(result.entries.len(), 1); - } - - #[test] - fn test_query_entry_as_json() { - let entry = QueryEntry { - key: b"key".to_vec(), - value: serde_json::to_vec(&serde_json::json!({"field": "value"})).unwrap(), - }; - - let json = entry.as_json().unwrap(); - assert_eq!(json["field"], "value"); - } - - #[test] - fn test_query_entry_as_json_invalid() { - let entry = QueryEntry { - key: b"key".to_vec(), - value: b"not json".to_vec(), - }; - - assert!(entry.as_json().is_none()); - } - - #[test] - fn test_query_builder_methods() { - let query = Query::new("test") - .filter(Filter::eq("field", "value")) - .order_by("field", true) - .limit(10) - .offset(5); - - assert_eq!(query.collection, "test"); - assert!(query.filter.is_some()); - assert!(query.order_by.is_some()); - assert_eq!(query.limit, Some(10)); - assert_eq!(query.offset, Some(5)); - } - - #[test] - fn test_filter_constructors() { - let eq_filter = Filter::eq("field", "value"); - assert_eq!(eq_filter.field, "field"); - match eq_filter.op { - FilterOp::Eq(v) => assert_eq!(v, "value"), - _ => panic!("Expected Eq"), - } - - let gt_filter = Filter::gt("field", "100"); - match gt_filter.op { - FilterOp::Gt(_) => {} - _ => panic!("Expected Gt"), - } - - let contains_filter = Filter::contains("field", "substring"); - match contains_filter.op { - FilterOp::Contains(_) => {} - _ => panic!("Expected Contains"), - } - } - - #[test] - fn test_index_type_variants() { - let _ = IndexType::Hash; - let _ = IndexType::BTree; - let _ = IndexType::FullText; - } - - #[test] - fn test_filter_op_variants() { - let _eq = FilterOp::Eq("test".to_string()); - let _gt = FilterOp::Gt("test".to_string()); - let _gte = FilterOp::Gte("test".to_string()); - let _lt = FilterOp::Lt("test".to_string()); - let _lte = FilterOp::Lte("test".to_string()); - let _in = FilterOp::In(vec!["a".to_string(), "b".to_string()]); - let _contains = FilterOp::Contains("test".to_string()); - } - - #[test] - fn test_query_gte_filter() { - let (indexes, storage, _dir) = create_test_index_manager(); - - for i in 1..=3 { - let weight = serde_json::json!({"block": i * 100, "value": i}); - storage - .put( - "weights", - format!("w{}", i).as_bytes(), - &serde_json::to_vec(&weight).unwrap(), - ) - .unwrap(); - indexes - .index_entry( - "weights", - format!("w{}", i).as_bytes(), - &serde_json::to_vec(&weight).unwrap(), - ) - .unwrap(); - } - - let query = Query::new("weights").filter(Filter { - field: "block".to_string(), - op: FilterOp::Gte("00000000000000000200".to_string()), - }); - let result = indexes.execute_query(query).unwrap(); - assert!(result.entries.len() >= 2); // blocks 200, 300 - } - - #[test] - fn test_query_lt_filter() { - let (indexes, storage, _dir) = create_test_index_manager(); - - for i in 1..=3 { - let weight = serde_json::json!({"block": i * 100, "value": i}); - storage - .put( - "weights", - format!("w{}", i).as_bytes(), - &serde_json::to_vec(&weight).unwrap(), - ) - .unwrap(); - indexes - .index_entry( - "weights", - format!("w{}", i).as_bytes(), - &serde_json::to_vec(&weight).unwrap(), - ) - .unwrap(); - } - - let query = Query::new("weights").filter(Filter { - field: "block".to_string(), - op: FilterOp::Lt("00000000000000000200".to_string()), - }); - let result = indexes.execute_query(query).unwrap(); - assert!(result.entries.len() >= 1); // block 100 - } - - #[test] - fn test_query_lte_filter() { - let (indexes, storage, _dir) = create_test_index_manager(); - - for i in 1..=3 { - let weight = serde_json::json!({"block": i * 100, "value": i}); - storage - .put( - "weights", - format!("w{}", i).as_bytes(), - &serde_json::to_vec(&weight).unwrap(), - ) - .unwrap(); - indexes - .index_entry( - "weights", - format!("w{}", i).as_bytes(), - &serde_json::to_vec(&weight).unwrap(), - ) - .unwrap(); - } - - let query = Query::new("weights").filter(Filter { - field: "block".to_string(), - op: FilterOp::Lte("00000000000000000200".to_string()), - }); - let result = indexes.execute_query(query).unwrap(); - assert!(result.entries.len() >= 2); // blocks 100, 200 - } - - #[test] - fn test_matches_filter_with_number() { - let (indexes, _, _dir) = create_test_index_manager(); - - let json = serde_json::json!({"score": 100}); - let filter = Filter::eq("score", "100"); - - assert!(indexes.matches_filter(&json, &filter)); - } - - #[test] - fn test_matches_filter_with_bool() { - let (indexes, _, _dir) = create_test_index_manager(); - - let json = serde_json::json!({"active": true}); - let filter = Filter::eq("active", "true"); - - assert!(indexes.matches_filter(&json, &filter)); - } - - #[test] - fn test_matches_filter_missing_field() { - let (indexes, _, _dir) = create_test_index_manager(); - - let json = serde_json::json!({"other": "value"}); - let filter = Filter::eq("missing_field", "value"); - - assert!(!indexes.matches_filter(&json, &filter)); - } -} diff --git a/crates/distributed-db/src/lib.rs b/crates/distributed-db/src/lib.rs deleted file mode 100644 index 90d26916..00000000 --- a/crates/distributed-db/src/lib.rs +++ /dev/null @@ -1,619 +0,0 @@ -//! Distributed Database for Platform -//! -#![allow(dead_code)] -#![allow(unused_variables)] -//! A storage system with: -//! - **Optimistic Execution**: Apply transactions immediately, confirm at Bittensor block -//! - **Merkle State Root**: Verifiable state integrity -//! - **Indexed Queries**: Fast lookups with secondary indexes -//! -//! # Architecture -//! -//! ```text -//! ┌─────────────────────────────────────────────────────────┐ -//! │ DistributedDB │ -//! ├─────────────────────────────────────────────────────────┤ -//! │ ┌─────────────┐ ┌─────────────┐ │ -//! │ │ Storage │ │ Merkle │ │ -//! │ │ (RocksDB) │ │ Trie │ │ -//! │ └─────────────┘ └─────────────┘ │ -//! │ │ │ │ -//! │ └────────────────┘ │ -//! │ │ │ -//! │ ┌───────────▼───────────┐ │ -//! │ │ Transaction Pool │ │ -//! │ │ (Optimistic Exec) │ │ -//! │ └───────────────────────┘ │ -//! └─────────────────────────────────────────────────────────┘ -//! ``` - -pub mod indexes; -pub mod merkle; -pub mod merkle_verification; -pub mod queries; -pub mod state; -pub mod storage; -pub mod transactions; - -#[cfg(test)] -mod test_utils; - -pub use indexes::*; -pub use merkle::*; -pub use merkle_verification::*; -pub use queries::*; -pub use state::*; -pub use storage::*; -pub use transactions::*; - -use parking_lot::RwLock; -use platform_core::Hotkey; -use std::path::Path; -use std::sync::Arc; - -/// Main distributed database instance -pub struct DistributedDB { - /// Local RocksDB storage - storage: Arc, - /// Merkle state trie - merkle: Arc>, - /// Transaction pool for optimistic execution - tx_pool: Arc>, - /// Index manager - indexes: Arc, - /// State manager - state: Arc>, - /// Current Bittensor block - current_block: Arc>, - /// Our validator hotkey - validator: Hotkey, -} - -impl DistributedDB { - /// Open or create a distributed database - pub fn open(path: impl AsRef, validator: Hotkey) -> anyhow::Result { - let storage = Arc::new(RocksStorage::open(path.as_ref())?); - let merkle = Arc::new(RwLock::new(MerkleTrie::new())); - let tx_pool = Arc::new(RwLock::new(TransactionPool::new())); - let indexes = Arc::new(IndexManager::new(storage.clone())?); - - // Load existing state root from storage - let state_root = storage.get_state_root()?; - let state = Arc::new(RwLock::new(StateManager::new(state_root))); - - // Rebuild merkle trie from storage - let db = Self { - storage, - merkle, - tx_pool, - indexes, - state, - current_block: Arc::new(RwLock::new(0)), - validator, - }; - - db.rebuild_merkle_trie()?; - - Ok(db) - } - - /// Apply a transaction optimistically (immediate local execution) - pub fn apply_optimistic(&self, tx: Transaction) -> anyhow::Result { - // Validate transaction - tx.validate()?; - - // Execute immediately - let receipt = self.execute_transaction(&tx)?; - - // Add to pending pool (will be confirmed at next block) - self.tx_pool - .write() - .add_pending(tx.clone(), receipt.clone()); - - // Update local state - self.apply_to_state(&tx)?; - - // Update merkle root - self.update_merkle_root()?; - - Ok(receipt) - } - - /// Confirm transactions at a Bittensor block - pub fn confirm_block(&self, block_number: u64) -> anyhow::Result { - let mut pool = self.tx_pool.write(); - let pending = pool.get_pending_for_block(block_number); - - // Move from pending to confirmed - let confirmed_count = pending.len(); - for (tx, receipt) in pending { - pool.confirm(tx.id(), block_number); - self.storage - .store_confirmed_tx(&tx, &receipt, block_number)?; - } - - // Update current block - *self.current_block.write() = block_number; - - // Persist state root - let state_root = self.merkle.read().root_hash(); - self.storage.set_state_root(&state_root)?; - - // Cleanup old pending - pool.cleanup_old(block_number.saturating_sub(100)); - - Ok(BlockConfirmation { - block_number, - confirmed_count, - state_root, - }) - } - - /// Get current state root (Merkle root) - pub fn state_root(&self) -> [u8; 32] { - self.merkle.read().root_hash() - } - - /// Get current block number - pub fn current_block(&self) -> u64 { - *self.current_block.read() - } - - /// Query data with filters - pub fn query(&self, query: Query) -> anyhow::Result { - self.indexes.execute_query(query) - } - - /// Get raw value by key - pub fn get(&self, collection: &str, key: &[u8]) -> anyhow::Result>> { - self.storage.get(collection, key) - } - - /// Put raw value - pub fn put(&self, collection: &str, key: &[u8], value: &[u8]) -> anyhow::Result<()> { - self.storage.put(collection, key, value)?; - self.indexes.index_entry(collection, key, value)?; - self.update_merkle_root()?; - Ok(()) - } - - /// Delete value - pub fn delete(&self, collection: &str, key: &[u8]) -> anyhow::Result<()> { - self.storage.delete(collection, key)?; - self.indexes.remove_entry(collection, key)?; - self.update_merkle_root()?; - Ok(()) - } - - /// Rebuild merkle trie from storage - fn rebuild_merkle_trie(&self) -> anyhow::Result<()> { - let mut merkle = self.merkle.write(); - merkle.clear(); - - // Iterate all collections and rebuild trie - for collection in self.storage.list_collections()? { - for (key, value) in self.storage.iter_collection(&collection)? { - let full_key = format!("{}:{}", collection, hex::encode(&key)); - merkle.insert(full_key.as_bytes(), &value); - } - } - - Ok(()) - } - - /// Update merkle root after changes - fn update_merkle_root(&self) -> anyhow::Result<()> { - // Merkle trie is updated incrementally, just recompute root - let root = self.merkle.read().root_hash(); - self.state.write().set_root(root); - Ok(()) - } - - /// Execute a transaction and return receipt - fn execute_transaction(&self, tx: &Transaction) -> anyhow::Result { - let start = std::time::Instant::now(); - - match &tx.operation { - Operation::Put { - collection, - key, - value, - } => { - self.storage.put(collection, key, value)?; - self.indexes.index_entry(collection, key, value)?; - } - Operation::Delete { collection, key } => { - self.storage.delete(collection, key)?; - self.indexes.remove_entry(collection, key)?; - } - Operation::BatchPut { operations } => { - for (collection, key, value) in operations { - self.storage.put(collection, key, value)?; - self.indexes.index_entry(collection, key, value)?; - } - } - } - - Ok(TransactionReceipt { - tx_id: tx.id(), - success: true, - execution_time_us: start.elapsed().as_micros() as u64, - state_root: self.merkle.read().root_hash(), - }) - } - - /// Apply transaction to local state - fn apply_to_state(&self, tx: &Transaction) -> anyhow::Result<()> { - let mut state = self.state.write(); - state.apply_tx(tx); - Ok(()) - } - - /// Get sync state for peer synchronization - pub fn get_sync_state(&self) -> SyncState { - SyncState { - state_root: self.state_root(), - block_number: self.current_block(), - pending_count: self.tx_pool.read().pending_count(), - } - } - - /// Apply sync data from peer - pub fn apply_sync_data(&self, data: SyncData) -> anyhow::Result<()> { - // Verify state root matches - if data.verify()? { - // Apply missing data - for (collection, key, value) in data.entries { - self.storage.put(&collection, &key, &value)?; - self.indexes.index_entry(&collection, &key, &value)?; - } - self.rebuild_merkle_trie()?; - } - Ok(()) - } -} - -/// Block confirmation result -#[derive(Debug, Clone)] -pub struct BlockConfirmation { - pub block_number: u64, - pub confirmed_count: usize, - pub state_root: [u8; 32], -} - -/// Sync state for peer comparison -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -pub struct SyncState { - pub state_root: [u8; 32], - pub block_number: u64, - pub pending_count: usize, -} - -/// Sync data from peer -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -pub struct SyncData { - pub state_root: [u8; 32], - pub entries: Vec<(String, Vec, Vec)>, -} - -impl SyncData { - pub fn verify(&self) -> anyhow::Result { - // Merkle proof verification - Ok(true) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_utils::*; - use tempfile::tempdir; - - #[test] - fn test_basic_operations() { - let dir = tempdir().unwrap(); - let validator = Hotkey::from_bytes(&[1u8; 32]).unwrap(); - let db = DistributedDB::open(dir.path(), validator).unwrap(); - - // Put - db.put("challenges", b"test-id", b"test-data").unwrap(); - - // Get - let value = db.get("challenges", b"test-id").unwrap(); - assert_eq!(value, Some(b"test-data".to_vec())); - - // Delete - db.delete("challenges", b"test-id").unwrap(); - let value = db.get("challenges", b"test-id").unwrap(); - assert!(value.is_none()); - } - - #[test] - fn test_optimistic_execution() { - let dir = tempdir().unwrap(); - let validator = Hotkey::from_bytes(&[1u8; 32]).unwrap(); - let db = DistributedDB::open(dir.path(), validator.clone()).unwrap(); - - let tx = Transaction::new( - validator, - Operation::Put { - collection: "challenges".to_string(), - key: b"key1".to_vec(), - value: b"value1".to_vec(), - }, - ); - - // Apply optimistically - let receipt = db.apply_optimistic(tx).unwrap(); - assert!(receipt.success); - - // Data should be available immediately - let value = db.get("challenges", b"key1").unwrap(); - assert_eq!(value, Some(b"value1".to_vec())); - } - - #[test] - fn test_db_open() { - let dir = tempdir().unwrap(); - let validator = create_test_hotkey(1); - let db = DistributedDB::open(dir.path(), validator).unwrap(); - assert_eq!(db.current_block(), 0); - assert_eq!(db.state_root(), [0u8; 32]); - } - - #[test] - fn test_state_root() { - let dir = tempdir().unwrap(); - let validator = create_test_hotkey(1); - let db = DistributedDB::open(dir.path(), validator.clone()).unwrap(); - - let root1 = db.state_root(); - - // Use apply_optimistic which properly updates merkle trie through transactions - let tx = Transaction::new( - validator, - Operation::Put { - collection: "challenges".to_string(), - key: b"key1".to_vec(), - value: b"value1".to_vec(), - }, - ); - db.apply_optimistic(tx).unwrap(); - let root2 = db.state_root(); - - // State root is retrieved (both roots are valid 32-byte arrays) - assert_eq!(root1.len(), 32); - assert_eq!(root2.len(), 32); - } - - #[test] - fn test_current_block() { - let dir = tempdir().unwrap(); - let validator = create_test_hotkey(1); - let db = DistributedDB::open(dir.path(), validator.clone()).unwrap(); - - assert_eq!(db.current_block(), 0); - - // Confirm block should update current block - let tx = Transaction::new( - validator.clone(), - Operation::Put { - collection: "challenges".to_string(), - key: b"key1".to_vec(), - value: b"value1".to_vec(), - }, - ); - db.apply_optimistic(tx).unwrap(); - let conf = db.confirm_block(100).unwrap(); - - assert_eq!(conf.block_number, 100); - assert_eq!(db.current_block(), 100); - } - - #[test] - fn test_confirm_block() { - let dir = tempdir().unwrap(); - let validator = create_test_hotkey(1); - let db = DistributedDB::open(dir.path(), validator.clone()).unwrap(); - - // Apply multiple transactions - for i in 1..=3 { - let tx = Transaction::new( - validator.clone(), - Operation::Put { - collection: "challenges".to_string(), - key: format!("key{}", i).into_bytes(), - value: format!("value{}", i).into_bytes(), - }, - ); - db.apply_optimistic(tx).unwrap(); - } - - let conf = db.confirm_block(10).unwrap(); - assert_eq!(conf.block_number, 10); - assert!(conf.confirmed_count >= 1); // At least one transaction confirmed - // State root may or may not be non-zero depending on merkle implementation - } - - #[test] - fn test_query_operations() { - let dir = tempdir().unwrap(); - let validator = create_test_hotkey(1); - let db = DistributedDB::open(dir.path(), validator).unwrap(); - - // Insert JSON data - let challenge = serde_json::json!({"name": "Test", "mechanism_id": 1}); - db.put( - "challenges", - b"ch1", - &serde_json::to_vec(&challenge).unwrap(), - ) - .unwrap(); - - let query = Query::new("challenges").filter(Filter::eq("name", "Test")); - let result = db.query(query).unwrap(); - assert_eq!(result.entries.len(), 1); - } - - #[test] - fn test_delete_operation() { - let dir = tempdir().unwrap(); - let validator = create_test_hotkey(1); - let db = DistributedDB::open(dir.path(), validator.clone()).unwrap(); - - let tx = Transaction::new( - validator.clone(), - Operation::Put { - collection: "challenges".to_string(), - key: b"key1".to_vec(), - value: b"value1".to_vec(), - }, - ); - db.apply_optimistic(tx).unwrap(); - - // Delete via transaction - let tx_delete = Transaction::new( - validator, - Operation::Delete { - collection: "challenges".to_string(), - key: b"key1".to_vec(), - }, - ); - db.apply_optimistic(tx_delete).unwrap(); - - assert!(db.get("challenges", b"key1").unwrap().is_none()); - } - - #[test] - fn test_batch_put_operation() { - let dir = tempdir().unwrap(); - let validator = create_test_hotkey(1); - let db = DistributedDB::open(dir.path(), validator.clone()).unwrap(); - - let tx = Transaction::new( - validator, - Operation::BatchPut { - operations: vec![ - ( - "challenges".to_string(), - b"key1".to_vec(), - b"value1".to_vec(), - ), - ( - "challenges".to_string(), - b"key2".to_vec(), - b"value2".to_vec(), - ), - ], - }, - ); - - let receipt = db.apply_optimistic(tx).unwrap(); - assert!(receipt.success); - - assert_eq!( - db.get("challenges", b"key1").unwrap(), - Some(b"value1".to_vec()) - ); - assert_eq!( - db.get("challenges", b"key2").unwrap(), - Some(b"value2".to_vec()) - ); - } - - #[test] - fn test_get_sync_state() { - let dir = tempdir().unwrap(); - let validator = create_test_hotkey(1); - let db = DistributedDB::open(dir.path(), validator.clone()).unwrap(); - - let tx = Transaction::new( - validator, - Operation::Put { - collection: "challenges".to_string(), - key: b"key1".to_vec(), - value: b"value1".to_vec(), - }, - ); - db.apply_optimistic(tx).unwrap(); - - let sync_state = db.get_sync_state(); - assert_eq!(sync_state.block_number, 0); - assert!(sync_state.pending_count >= 1); - // State root is available - } - - #[test] - fn test_apply_sync_data() { - let dir = tempdir().unwrap(); - let validator = create_test_hotkey(1); - let db = DistributedDB::open(dir.path(), validator).unwrap(); - - let sync_data = SyncData { - state_root: [1u8; 32], - entries: vec![( - "challenges".to_string(), - b"key1".to_vec(), - b"value1".to_vec(), - )], - }; - - db.apply_sync_data(sync_data).unwrap(); - assert_eq!( - db.get("challenges", b"key1").unwrap(), - Some(b"value1".to_vec()) - ); - } - - #[test] - fn test_block_confirmation_structure() { - let conf = BlockConfirmation { - block_number: 100, - confirmed_count: 5, - state_root: [42u8; 32], - }; - - assert_eq!(conf.block_number, 100); - assert_eq!(conf.confirmed_count, 5); - assert_eq!(conf.state_root, [42u8; 32]); - } - - #[test] - fn test_sync_state_serialization() { - let sync_state = SyncState { - state_root: [1u8; 32], - block_number: 100, - pending_count: 5, - }; - - let serialized = serde_json::to_string(&sync_state).unwrap(); - let deserialized: SyncState = serde_json::from_str(&serialized).unwrap(); - - assert_eq!(deserialized.state_root, sync_state.state_root); - assert_eq!(deserialized.block_number, sync_state.block_number); - assert_eq!(deserialized.pending_count, sync_state.pending_count); - } - - #[test] - fn test_sync_data_verify() { - let sync_data = SyncData { - state_root: [1u8; 32], - entries: vec![], - }; - - assert!(sync_data.verify().unwrap()); - } - - #[test] - fn test_rebuild_merkle_trie() { - let dir = tempdir().unwrap(); - let validator = create_test_hotkey(1); - let db = DistributedDB::open(dir.path(), validator).unwrap(); - - db.put("challenges", b"key1", b"value1").unwrap(); - db.put("agents", b"key2", b"value2").unwrap(); - - let root = db.state_root(); - // Root is computed from the merkle trie - } -} diff --git a/crates/distributed-db/src/merkle.rs b/crates/distributed-db/src/merkle.rs deleted file mode 100644 index 277922a1..00000000 --- a/crates/distributed-db/src/merkle.rs +++ /dev/null @@ -1,531 +0,0 @@ -//! Merkle Patricia Trie for state verification -//! -//! Provides: -//! - O(log n) insertions and lookups -//! - Cryptographic state root -//! - Merkle proofs for verification - -use sha2::{Digest, Sha256}; -use std::collections::HashMap; - -/// Merkle Patricia Trie node -#[derive(Debug, Clone)] -enum Node { - Empty, - Leaf { - key: Vec, - value: Vec, - hash: [u8; 32], - }, - Branch { - children: Box<[Option>; 16]>, - value: Option>, - hash: [u8; 32], - }, - Extension { - prefix: Vec, - child: Box, - hash: [u8; 32], - }, -} - -impl Node { - fn hash(&self) -> [u8; 32] { - match self { - Node::Empty => [0u8; 32], - Node::Leaf { hash, .. } => *hash, - Node::Branch { hash, .. } => *hash, - Node::Extension { hash, .. } => *hash, - } - } -} - -/// Merkle Patricia Trie -pub struct MerkleTrie { - root: Node, - /// Cache of key -> value for fast lookups - cache: HashMap, Vec>, - /// Dirty flag for root recalculation - dirty: bool, - /// Cached root hash - root_hash: [u8; 32], -} - -impl MerkleTrie { - /// Create a new empty trie - pub fn new() -> Self { - Self { - root: Node::Empty, - cache: HashMap::new(), - dirty: false, - root_hash: [0u8; 32], - } - } - - /// Insert a key-value pair - pub fn insert(&mut self, key: &[u8], value: &[u8]) { - self.cache.insert(key.to_vec(), value.to_vec()); - self.dirty = true; - } - - /// Get a value by key - pub fn get(&self, key: &[u8]) -> Option<&Vec> { - self.cache.get(key) - } - - /// Remove a key - pub fn remove(&mut self, key: &[u8]) -> Option> { - self.dirty = true; - self.cache.remove(key) - } - - /// Clear all entries - pub fn clear(&mut self) { - self.root = Node::Empty; - self.cache.clear(); - self.dirty = true; - self.root_hash = [0u8; 32]; - } - - /// Get the root hash - pub fn root_hash(&self) -> [u8; 32] { - if self.dirty { - // Recompute root hash - self.compute_root_hash() - } else { - self.root_hash - } - } - - /// Compute root hash from cache - fn compute_root_hash(&self) -> [u8; 32] { - if self.cache.is_empty() { - return [0u8; 32]; - } - - // Sort keys for deterministic ordering - let mut entries: Vec<_> = self.cache.iter().collect(); - entries.sort_by(|a, b| a.0.cmp(b.0)); - - // Build merkle tree from sorted entries - let mut hashes: Vec<[u8; 32]> = entries - .iter() - .map(|(k, v)| Self::hash_entry(k, v)) - .collect(); - - // Merkle tree construction - while hashes.len() > 1 { - let mut next_level = Vec::new(); - for chunk in hashes.chunks(2) { - if chunk.len() == 2 { - next_level.push(Self::hash_pair(&chunk[0], &chunk[1])); - } else { - next_level.push(chunk[0]); - } - } - hashes = next_level; - } - - hashes.first().copied().unwrap_or([0u8; 32]) - } - - /// Hash a key-value entry - fn hash_entry(key: &[u8], value: &[u8]) -> [u8; 32] { - let mut hasher = Sha256::new(); - hasher.update(key); - hasher.update(value); - hasher.finalize().into() - } - - /// Hash two child hashes - fn hash_pair(left: &[u8; 32], right: &[u8; 32]) -> [u8; 32] { - let mut hasher = Sha256::new(); - hasher.update(left); - hasher.update(right); - hasher.finalize().into() - } - - /// Generate a merkle proof for a key - pub fn generate_proof(&self, key: &[u8]) -> Option { - if !self.cache.contains_key(key) { - return None; - } - - let mut entries: Vec<_> = self.cache.iter().collect(); - entries.sort_by(|a, b| a.0.cmp(b.0)); - - let key_index = entries.iter().position(|(k, _)| *k == key)?; - - // Build proof path - let mut hashes: Vec<[u8; 32]> = entries - .iter() - .map(|(k, v)| Self::hash_entry(k, v)) - .collect(); - - let mut proof_path = Vec::new(); - let mut current_index = key_index; - - while hashes.len() > 1 { - let sibling_index = if current_index % 2 == 0 { - current_index + 1 - } else { - current_index - 1 - }; - - if sibling_index < hashes.len() { - proof_path.push(ProofNode { - hash: hashes[sibling_index], - is_left: current_index % 2 == 1, - }); - } - - // Move to next level - let mut next_level = Vec::new(); - for chunk in hashes.chunks(2) { - if chunk.len() == 2 { - next_level.push(Self::hash_pair(&chunk[0], &chunk[1])); - } else { - next_level.push(chunk[0]); - } - } - hashes = next_level; - current_index /= 2; - } - - Some(MerkleProof { - key: key.to_vec(), - value: self.cache.get(key)?.clone(), - path: proof_path, - root: self.root_hash(), - }) - } - - /// Verify a merkle proof - pub fn verify_proof(proof: &MerkleProof) -> bool { - let mut current_hash = Self::hash_entry(&proof.key, &proof.value); - - for node in &proof.path { - current_hash = if node.is_left { - Self::hash_pair(&node.hash, ¤t_hash) - } else { - Self::hash_pair(¤t_hash, &node.hash) - }; - } - - current_hash == proof.root - } - - /// Get number of entries - pub fn len(&self) -> usize { - self.cache.len() - } - - /// Check if empty - pub fn is_empty(&self) -> bool { - self.cache.is_empty() - } - - /// Iterate over all entries - pub fn iter(&self) -> impl Iterator, &Vec)> { - self.cache.iter() - } -} - -impl Default for MerkleTrie { - fn default() -> Self { - Self::new() - } -} - -/// Merkle proof for a single key -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -pub struct MerkleProof { - pub key: Vec, - pub value: Vec, - pub path: Vec, - pub root: [u8; 32], -} - -/// Node in proof path -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -pub struct ProofNode { - pub hash: [u8; 32], - pub is_left: bool, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_basic_operations() { - let mut trie = MerkleTrie::new(); - - trie.insert(b"key1", b"value1"); - trie.insert(b"key2", b"value2"); - trie.insert(b"key3", b"value3"); - - assert_eq!(trie.get(b"key1"), Some(&b"value1".to_vec())); - assert_eq!(trie.get(b"key2"), Some(&b"value2".to_vec())); - assert_eq!(trie.len(), 3); - - let removed = trie.remove(b"key2"); - assert_eq!(removed, Some(b"value2".to_vec())); - assert_eq!(trie.len(), 2); - } - - #[test] - fn test_root_hash() { - let mut trie1 = MerkleTrie::new(); - trie1.insert(b"key1", b"value1"); - trie1.insert(b"key2", b"value2"); - - let mut trie2 = MerkleTrie::new(); - trie2.insert(b"key2", b"value2"); - trie2.insert(b"key1", b"value1"); - - // Same entries, same root hash (order independent) - assert_eq!(trie1.root_hash(), trie2.root_hash()); - - // Different entries, different root hash - trie2.insert(b"key3", b"value3"); - assert_ne!(trie1.root_hash(), trie2.root_hash()); - } - - #[test] - fn test_merkle_proof() { - let mut trie = MerkleTrie::new(); - trie.insert(b"key1", b"value1"); - trie.insert(b"key2", b"value2"); - trie.insert(b"key3", b"value3"); - trie.insert(b"key4", b"value4"); - - let proof = trie.generate_proof(b"key2").unwrap(); - assert!(MerkleTrie::verify_proof(&proof)); - - // Tampered proof should fail - let mut tampered = proof.clone(); - tampered.value = b"tampered".to_vec(); - assert!(!MerkleTrie::verify_proof(&tampered)); - } - - #[test] - fn test_new_trie_is_empty() { - let trie = MerkleTrie::new(); - assert!(trie.is_empty()); - assert_eq!(trie.len(), 0); - assert_eq!(trie.root_hash(), [0u8; 32]); - } - - #[test] - fn test_default_trie() { - let trie = MerkleTrie::default(); - assert!(trie.is_empty()); - assert_eq!(trie.len(), 0); - } - - #[test] - fn test_clear() { - let mut trie = MerkleTrie::new(); - trie.insert(b"key1", b"value1"); - trie.insert(b"key2", b"value2"); - assert_eq!(trie.len(), 2); - assert!(!trie.is_empty()); - - trie.clear(); - assert!(trie.is_empty()); - assert_eq!(trie.len(), 0); - assert_eq!(trie.root_hash(), [0u8; 32]); - assert_eq!(trie.get(b"key1"), None); - } - - #[test] - fn test_iter() { - let mut trie = MerkleTrie::new(); - trie.insert(b"key1", b"value1"); - trie.insert(b"key2", b"value2"); - trie.insert(b"key3", b"value3"); - - let mut entries: Vec<_> = trie.iter().collect(); - entries.sort_by_key(|(k, _)| (*k).clone()); - - assert_eq!(entries.len(), 3); - assert_eq!(entries[0], (&b"key1".to_vec(), &b"value1".to_vec())); - assert_eq!(entries[1], (&b"key2".to_vec(), &b"value2".to_vec())); - assert_eq!(entries[2], (&b"key3".to_vec(), &b"value3".to_vec())); - } - - #[test] - fn test_get_nonexistent_key() { - let mut trie = MerkleTrie::new(); - trie.insert(b"key1", b"value1"); - assert_eq!(trie.get(b"key2"), None); - } - - #[test] - fn test_remove_nonexistent_key() { - let mut trie = MerkleTrie::new(); - trie.insert(b"key1", b"value1"); - let removed = trie.remove(b"key2"); - assert_eq!(removed, None); - assert_eq!(trie.len(), 1); - } - - #[test] - fn test_insert_overwrites() { - let mut trie = MerkleTrie::new(); - trie.insert(b"key1", b"value1"); - assert_eq!(trie.get(b"key1"), Some(&b"value1".to_vec())); - - trie.insert(b"key1", b"value2"); - assert_eq!(trie.get(b"key1"), Some(&b"value2".to_vec())); - assert_eq!(trie.len(), 1); - } - - #[test] - fn test_root_hash_empty_trie() { - let trie = MerkleTrie::new(); - assert_eq!(trie.root_hash(), [0u8; 32]); - } - - #[test] - fn test_root_hash_single_entry() { - let mut trie = MerkleTrie::new(); - trie.insert(b"key1", b"value1"); - let hash = trie.root_hash(); - assert_ne!(hash, [0u8; 32]); - - // Same single entry should produce same hash - let mut trie2 = MerkleTrie::new(); - trie2.insert(b"key1", b"value1"); - assert_eq!(trie2.root_hash(), hash); - } - - #[test] - fn test_root_hash_changes_on_insert() { - let mut trie = MerkleTrie::new(); - trie.insert(b"key1", b"value1"); - let hash1 = trie.root_hash(); - - trie.insert(b"key2", b"value2"); - let hash2 = trie.root_hash(); - - assert_ne!(hash1, hash2); - } - - #[test] - fn test_root_hash_changes_on_remove() { - let mut trie = MerkleTrie::new(); - trie.insert(b"key1", b"value1"); - trie.insert(b"key2", b"value2"); - let hash1 = trie.root_hash(); - - trie.remove(b"key2"); - let hash2 = trie.root_hash(); - - assert_ne!(hash1, hash2); - } - - #[test] - fn test_generate_proof_nonexistent_key() { - let mut trie = MerkleTrie::new(); - trie.insert(b"key1", b"value1"); - trie.insert(b"key2", b"value2"); - - let proof = trie.generate_proof(b"key3"); - assert!(proof.is_none()); - } - - #[test] - fn test_generate_proof_empty_trie() { - let trie = MerkleTrie::new(); - let proof = trie.generate_proof(b"key1"); - assert!(proof.is_none()); - } - - #[test] - fn test_generate_proof_single_entry() { - let mut trie = MerkleTrie::new(); - trie.insert(b"key1", b"value1"); - - let proof = trie.generate_proof(b"key1").unwrap(); - assert_eq!(proof.key, b"key1"); - assert_eq!(proof.value, b"value1"); - assert!(MerkleTrie::verify_proof(&proof)); - } - - #[test] - fn test_verify_proof_wrong_root() { - let mut trie = MerkleTrie::new(); - trie.insert(b"key1", b"value1"); - trie.insert(b"key2", b"value2"); - - let mut proof = trie.generate_proof(b"key1").unwrap(); - proof.root = [99u8; 32]; - - assert!(!MerkleTrie::verify_proof(&proof)); - } - - #[test] - fn test_verify_proof_wrong_key() { - let mut trie = MerkleTrie::new(); - trie.insert(b"key1", b"value1"); - trie.insert(b"key2", b"value2"); - - let mut proof = trie.generate_proof(b"key1").unwrap(); - proof.key = b"key2".to_vec(); - - assert!(!MerkleTrie::verify_proof(&proof)); - } - - #[test] - fn test_merkle_proof_with_odd_number_of_entries() { - let mut trie = MerkleTrie::new(); - trie.insert(b"key1", b"value1"); - trie.insert(b"key2", b"value2"); - trie.insert(b"key3", b"value3"); - - let proof = trie.generate_proof(b"key3").unwrap(); - assert!(MerkleTrie::verify_proof(&proof)); - } - - #[test] - fn test_node_empty_hash() { - let node = Node::Empty; - assert_eq!(node.hash(), [0u8; 32]); - } - - #[test] - fn test_node_leaf_hash() { - let hash = [42u8; 32]; - let node = Node::Leaf { - key: b"key".to_vec(), - value: b"value".to_vec(), - hash, - }; - assert_eq!(node.hash(), hash); - } - - #[test] - fn test_node_branch_hash() { - let hash = [43u8; 32]; - let children: [Option>; 16] = Default::default(); - let node = Node::Branch { - children: Box::new(children), - value: Some(b"value".to_vec()), - hash, - }; - assert_eq!(node.hash(), hash); - } - - #[test] - fn test_node_extension_hash() { - let hash = [44u8; 32]; - let node = Node::Extension { - prefix: b"prefix".to_vec(), - child: Box::new(Node::Empty), - hash, - }; - assert_eq!(node.hash(), hash); - } -} diff --git a/crates/distributed-db/src/merkle_verification.rs b/crates/distributed-db/src/merkle_verification.rs deleted file mode 100644 index 926f4a95..00000000 --- a/crates/distributed-db/src/merkle_verification.rs +++ /dev/null @@ -1,449 +0,0 @@ -//! Merkle Proof Verification for State Sync -//! -//! This module provides cryptographic verification of state sync data using -//! Merkle trees. Each entry received during sync must have a valid proof -//! that connects it to the announced state root. -//! -//! # Security -//! - Prevents malicious peers from injecting corrupted data -//! - Verifies each entry belongs to the claimed state root -//! - Detects tampering during transmission - -use serde::{Deserialize, Serialize}; -use sha2::{Digest, Sha256}; -use std::collections::BTreeMap; - -/// Merkle node hash -pub type MerkleHash = [u8; 32]; - -/// Direction in merkle path -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] -pub enum ProofDirection { - Left, - Right, -} - -/// Single element in a merkle proof -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ProofElement { - pub hash: MerkleHash, - pub direction: ProofDirection, -} - -/// Merkle proof for a single entry (used in state sync verification) -/// Named differently from merkle::MerkleProof to avoid conflicts -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct SyncMerkleProof { - /// Leaf hash (hash of the entry) - pub leaf_hash: MerkleHash, - /// Path from leaf to root - pub path: Vec, -} - -/// Entry with merkle proof -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct VerifiedEntry { - pub collection: String, - pub key: Vec, - pub value: Vec, - pub proof: SyncMerkleProof, -} - -/// Result of merkle verification -#[derive(Debug, Clone)] -pub enum VerificationResult { - /// Proof is valid - Valid, - /// Proof is invalid - Invalid { reason: String }, - /// Entry hash doesn't match leaf - LeafMismatch { - expected: MerkleHash, - got: MerkleHash, - }, - /// Computed root doesn't match expected - RootMismatch { - expected: MerkleHash, - computed: MerkleHash, - }, -} - -impl VerificationResult { - pub fn is_valid(&self) -> bool { - matches!(self, VerificationResult::Valid) - } -} - -/// Hash a single entry (collection + key + value) -pub fn hash_entry(collection: &str, key: &[u8], value: &[u8]) -> MerkleHash { - let mut hasher = Sha256::new(); - hasher.update(collection.as_bytes()); - hasher.update(b":"); - hasher.update(key); - hasher.update(b":"); - hasher.update(value); - hasher.finalize().into() -} - -/// Hash two nodes together -pub fn hash_nodes(left: &MerkleHash, right: &MerkleHash) -> MerkleHash { - let mut hasher = Sha256::new(); - hasher.update(left); - hasher.update(right); - hasher.finalize().into() -} - -/// Verify a merkle proof against a state root -pub fn verify_proof(entry: &VerifiedEntry, expected_root: &MerkleHash) -> VerificationResult { - // 1. Compute leaf hash - let computed_leaf = hash_entry(&entry.collection, &entry.key, &entry.value); - - // 2. Verify leaf hash matches - if computed_leaf != entry.proof.leaf_hash { - return VerificationResult::LeafMismatch { - expected: entry.proof.leaf_hash, - got: computed_leaf, - }; - } - - // 3. Traverse path to compute root - let mut current_hash = computed_leaf; - - for element in &entry.proof.path { - current_hash = match element.direction { - ProofDirection::Left => hash_nodes(&element.hash, ¤t_hash), - ProofDirection::Right => hash_nodes(¤t_hash, &element.hash), - }; - } - - // 4. Verify computed root matches expected - if current_hash != *expected_root { - return VerificationResult::RootMismatch { - expected: *expected_root, - computed: current_hash, - }; - } - - VerificationResult::Valid -} - -/// Build a merkle tree from entries and return root + proofs -pub struct MerkleTreeBuilder { - /// Entries by full key (collection:key) - entries: BTreeMap, (String, Vec, Vec)>, -} - -impl MerkleTreeBuilder { - pub fn new() -> Self { - Self { - entries: BTreeMap::new(), - } - } - - /// Add an entry - pub fn add_entry(&mut self, collection: &str, key: &[u8], value: &[u8]) { - let full_key = format!("{}:{}", collection, hex::encode(key)).into_bytes(); - self.entries.insert( - full_key, - (collection.to_string(), key.to_vec(), value.to_vec()), - ); - } - - /// Build tree and get root - pub fn build(&self) -> (MerkleHash, Vec) { - if self.entries.is_empty() { - return ([0u8; 32], vec![]); - } - - // Hash all leaves - let leaves: Vec<(Vec, MerkleHash)> = self - .entries - .iter() - .map(|(full_key, (collection, key, value))| { - (full_key.clone(), hash_entry(collection, key, value)) - }) - .collect(); - - // Build tree layer by layer, tracking paths - let mut paths: Vec> = vec![vec![]; leaves.len()]; - - let mut current_level: Vec = leaves.iter().map(|(_, h)| *h).collect(); - - while current_level.len() > 1 { - let mut next_level = Vec::new(); - let mut new_paths = paths.clone(); - - for i in (0..current_level.len()).step_by(2) { - let left = current_level[i]; - let right = if i + 1 < current_level.len() { - current_level[i + 1] - } else { - left // Odd number: duplicate last node - }; - - let parent = hash_nodes(&left, &right); - next_level.push(parent); - - // Update paths for affected leaves - let left_indices: Vec = (0..leaves.len()) - .filter(|&j| { - let level_index = j / (1 << paths[j].len()); - level_index / 2 == i / 2 && level_index.is_multiple_of(2) - }) - .collect(); - - let right_indices: Vec = (0..leaves.len()) - .filter(|&j| { - let level_index = j / (1 << paths[j].len()); - level_index / 2 == i / 2 && !level_index.is_multiple_of(2) - }) - .collect(); - - // Add sibling to path - for &idx in &left_indices { - if i + 1 < current_level.len() { - new_paths[idx].push(ProofElement { - hash: right, - direction: ProofDirection::Right, - }); - } - } - - for &idx in &right_indices { - new_paths[idx].push(ProofElement { - hash: left, - direction: ProofDirection::Left, - }); - } - } - - paths = new_paths; - current_level = next_level; - } - - let root = current_level[0]; - - // Build verified entries - let verified_entries: Vec = self - .entries - .iter() - .enumerate() - .map(|(i, (_, (collection, key, value)))| VerifiedEntry { - collection: collection.clone(), - key: key.clone(), - value: value.clone(), - proof: SyncMerkleProof { - leaf_hash: hash_entry(collection, key, value), - path: paths[i].clone(), - }, - }) - .collect(); - - (root, verified_entries) - } -} - -impl Default for MerkleTreeBuilder { - fn default() -> Self { - Self::new() - } -} - -/// Verifier for incoming state sync data -pub struct StateSyncVerifier { - expected_root: MerkleHash, - verified_count: usize, - rejected_count: usize, -} - -impl StateSyncVerifier { - pub fn new(expected_root: MerkleHash) -> Self { - Self { - expected_root, - verified_count: 0, - rejected_count: 0, - } - } - - /// Verify a single entry - pub fn verify(&mut self, entry: &VerifiedEntry) -> VerificationResult { - let result = verify_proof(entry, &self.expected_root); - - match &result { - VerificationResult::Valid => self.verified_count += 1, - _ => self.rejected_count += 1, - } - - result - } - - /// Get stats - pub fn stats(&self) -> (usize, usize) { - (self.verified_count, self.rejected_count) - } - - /// Check if any entries were rejected - pub fn has_rejections(&self) -> bool { - self.rejected_count > 0 - } -} - -// ============================================================================ -// TESTS -// ============================================================================ - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_hash_entry() { - let hash1 = hash_entry("challenges", b"key1", b"value1"); - let hash2 = hash_entry("challenges", b"key1", b"value1"); - let hash3 = hash_entry("challenges", b"key1", b"value2"); - - assert_eq!(hash1, hash2); - assert_ne!(hash1, hash3); - } - - #[test] - fn test_hash_nodes() { - let left = [1u8; 32]; - let right = [2u8; 32]; - - let parent1 = hash_nodes(&left, &right); - let parent2 = hash_nodes(&left, &right); - let parent3 = hash_nodes(&right, &left); // Different order - - assert_eq!(parent1, parent2); - assert_ne!(parent1, parent3); - } - - #[test] - fn test_merkle_tree_single_entry() { - let mut builder = MerkleTreeBuilder::new(); - builder.add_entry("challenges", b"test", b"data"); - - let (root, entries) = builder.build(); - - assert_eq!(entries.len(), 1); - assert_ne!(root, [0u8; 32]); - - // Verify the entry - let result = verify_proof(&entries[0], &root); - assert!(result.is_valid()); - } - - #[test] - fn test_merkle_tree_multiple_entries() { - let mut builder = MerkleTreeBuilder::new(); - builder.add_entry("challenges", b"key1", b"value1"); - builder.add_entry("challenges", b"key2", b"value2"); - builder.add_entry("agents", b"agent1", b"data1"); - builder.add_entry("weights", b"w1", b"100"); - - let (root, entries) = builder.build(); - - assert_eq!(entries.len(), 4); - - // All entries should verify - for entry in &entries { - let result = verify_proof(entry, &root); - assert!( - result.is_valid(), - "Entry {:?} failed verification: {:?}", - entry.key, - result - ); - } - } - - #[test] - fn test_tampered_entry_rejected() { - let mut builder = MerkleTreeBuilder::new(); - builder.add_entry("challenges", b"key1", b"value1"); - builder.add_entry("challenges", b"key2", b"value2"); - - let (root, mut entries) = builder.build(); - - // Tamper with an entry - entries[0].value = b"TAMPERED".to_vec(); - - // Should fail verification - let result = verify_proof(&entries[0], &root); - assert!(!result.is_valid()); - assert!(matches!(result, VerificationResult::LeafMismatch { .. })); - } - - #[test] - fn test_wrong_root_rejected() { - let mut builder = MerkleTreeBuilder::new(); - builder.add_entry("challenges", b"key1", b"value1"); - - let (_, entries) = builder.build(); - - // Use wrong root - let wrong_root = [99u8; 32]; - - let result = verify_proof(&entries[0], &wrong_root); - assert!(!result.is_valid()); - assert!(matches!(result, VerificationResult::RootMismatch { .. })); - } - - #[test] - fn test_state_sync_verifier() { - let mut builder = MerkleTreeBuilder::new(); - builder.add_entry("challenges", b"k1", b"v1"); - builder.add_entry("challenges", b"k2", b"v2"); - - let (root, entries) = builder.build(); - - let mut verifier = StateSyncVerifier::new(root); - - // Verify valid entries - for entry in &entries { - let result = verifier.verify(entry); - assert!(result.is_valid()); - } - - assert_eq!(verifier.stats(), (2, 0)); - assert!(!verifier.has_rejections()); - - // Try tampered entry - let mut tampered = entries[0].clone(); - tampered.value = b"bad".to_vec(); - let result = verifier.verify(&tampered); - assert!(!result.is_valid()); - - assert_eq!(verifier.stats(), (2, 1)); - assert!(verifier.has_rejections()); - } - - #[test] - fn test_empty_tree() { - let builder = MerkleTreeBuilder::new(); - let (root, entries) = builder.build(); - - assert_eq!(root, [0u8; 32]); - assert!(entries.is_empty()); - } - - #[test] - fn test_verification_result_is_valid() { - assert!(VerificationResult::Valid.is_valid()); - assert!(!VerificationResult::Invalid { - reason: "test".to_string() - } - .is_valid()); - assert!(!VerificationResult::LeafMismatch { - expected: [1u8; 32], - got: [2u8; 32] - } - .is_valid()); - assert!(!VerificationResult::RootMismatch { - expected: [1u8; 32], - computed: [2u8; 32] - } - .is_valid()); - } -} diff --git a/crates/distributed-db/src/queries.rs b/crates/distributed-db/src/queries.rs deleted file mode 100644 index 60163f69..00000000 --- a/crates/distributed-db/src/queries.rs +++ /dev/null @@ -1,815 +0,0 @@ -//! High-level query API for the distributed database -//! -//! Provides convenient methods for common operations - -use crate::{DistributedDB, Filter, Query, QueryResult}; -use platform_core::{ChallengeContainerConfig, Hotkey}; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; - -/// Challenge data stored in DB -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct StoredChallenge { - pub id: String, - pub name: String, - pub docker_image: String, - pub mechanism_id: u8, - pub emission_weight: f64, - pub created_at: u64, - pub created_by: String, - pub status: ChallengeStatus, -} - -/// Challenge status -#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] -pub enum ChallengeStatus { - Active, - Paused, - Deprecated, -} - -/// Agent submission data -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct StoredAgent { - pub hash: String, - pub challenge_id: String, - pub submitter: String, - pub submitted_at: u64, - pub code_hash: String, - pub status: AgentStatus, -} - -/// Agent status -#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] -pub enum AgentStatus { - Pending, - Evaluating, - Evaluated, - Failed, -} - -/// Evaluation result data -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct StoredEvaluation { - pub id: String, - pub agent_hash: String, - pub challenge_id: String, - pub validator: String, - pub score: f64, - pub metrics: serde_json::Value, - pub evaluated_at: u64, - pub block_number: u64, -} - -/// Weight submission data -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct StoredWeight { - pub id: String, - pub challenge_id: String, - pub validator: String, - pub weights: Vec<(u16, u16)>, // (uid, weight) - pub block_number: u64, - pub submitted_at: u64, -} - -impl DistributedDB { - // ==================== Challenge Operations ==================== - - /// Store a challenge configuration - pub fn store_challenge(&self, config: &ChallengeContainerConfig) -> anyhow::Result<()> { - let challenge = StoredChallenge { - id: config.challenge_id.to_string(), - name: config.name.clone(), - docker_image: config.docker_image.clone(), - mechanism_id: config.mechanism_id, - emission_weight: config.emission_weight, - created_at: chrono::Utc::now().timestamp_millis() as u64, - created_by: "sudo".to_string(), - status: ChallengeStatus::Active, - }; - - let key = config.challenge_id.to_string(); - let value = serde_json::to_vec(&challenge)?; - self.put("challenges", key.as_bytes(), &value) - } - - /// Get a challenge by ID - pub fn get_challenge(&self, challenge_id: &str) -> anyhow::Result> { - self.get_typed("challenges", challenge_id.as_bytes()) - } - - /// List all active challenges - pub fn list_challenges(&self) -> anyhow::Result> { - let query = Query::new("challenges"); - let result = self.query(query)?; - self.parse_results(result) - } - - /// List challenges by mechanism - pub fn list_challenges_by_mechanism( - &self, - mechanism_id: u8, - ) -> anyhow::Result> { - let query = - Query::new("challenges").filter(Filter::eq("mechanism_id", mechanism_id.to_string())); - let result = self.query(query)?; - self.parse_results(result) - } - - // ==================== Agent Operations ==================== - - /// Store an agent submission - pub fn store_agent(&self, agent: &StoredAgent) -> anyhow::Result<()> { - let key = agent.hash.as_bytes(); - let value = serde_json::to_vec(agent)?; - self.put("agents", key, &value) - } - - /// Get an agent by hash - pub fn get_agent(&self, hash: &str) -> anyhow::Result> { - self.get_typed("agents", hash.as_bytes()) - } - - /// List agents for a challenge - pub fn list_agents_for_challenge( - &self, - challenge_id: &str, - ) -> anyhow::Result> { - let query = Query::new("agents").filter(Filter::eq("challenge_id", challenge_id)); - let result = self.query(query)?; - self.parse_results(result) - } - - /// List agents by submitter - pub fn list_agents_by_submitter(&self, submitter: &Hotkey) -> anyhow::Result> { - let query = - Query::new("agents").filter(Filter::eq("submitter", hex::encode(submitter.as_bytes()))); - let result = self.query(query)?; - self.parse_results(result) - } - - /// Update agent status - pub fn update_agent_status(&self, hash: &str, status: AgentStatus) -> anyhow::Result<()> { - if let Some(mut agent) = self.get_agent(hash)? { - agent.status = status; - self.store_agent(&agent)?; - } - Ok(()) - } - - // ==================== Evaluation Operations ==================== - - /// Store an evaluation result - pub fn store_evaluation(&self, eval: &StoredEvaluation) -> anyhow::Result<()> { - let key = eval.id.as_bytes(); - let value = serde_json::to_vec(eval)?; - self.put("evaluations", key, &value) - } - - /// Get evaluations for an agent - pub fn get_evaluations_for_agent( - &self, - agent_hash: &str, - ) -> anyhow::Result> { - let query = Query::new("evaluations").filter(Filter::eq("agent_hash", agent_hash)); - let result = self.query(query)?; - self.parse_results(result) - } - - /// Get evaluations by validator - pub fn get_evaluations_by_validator( - &self, - validator: &Hotkey, - ) -> anyhow::Result> { - let query = Query::new("evaluations") - .filter(Filter::eq("validator", hex::encode(validator.as_bytes()))); - let result = self.query(query)?; - self.parse_results(result) - } - - /// Get top scores for a challenge - pub fn get_top_scores( - &self, - challenge_id: &str, - limit: usize, - ) -> anyhow::Result> { - let query = Query::new("evaluations") - .filter(Filter::eq("challenge_id", challenge_id)) - .order_by("score", true) // descending - .limit(limit); - let result = self.query(query)?; - self.parse_results(result) - } - - // ==================== Weight Operations ==================== - - /// Store weight submission - pub fn store_weights(&self, weights: &StoredWeight) -> anyhow::Result<()> { - let key = weights.id.as_bytes(); - let value = serde_json::to_vec(weights)?; - self.put("weights", key, &value) - } - - /// Get weights for a block - pub fn get_weights_at_block(&self, block: u64) -> anyhow::Result> { - let query = Query::new("weights").filter(Filter::eq("block_number", block.to_string())); - let result = self.query(query)?; - self.parse_results(result) - } - - /// Get latest weights for challenge - pub fn get_latest_weights(&self, challenge_id: &str) -> anyhow::Result> { - let query = Query::new("weights") - .filter(Filter::eq("challenge_id", challenge_id)) - .order_by("block_number", true) // descending - .limit(100); - let result = self.query(query)?; - self.parse_results(result) - } - - // ==================== Helper Methods ==================== - - /// Get typed value from collection - fn get_typed( - &self, - collection: &str, - key: &[u8], - ) -> anyhow::Result> { - match self.get(collection, key)? { - Some(value) => { - let parsed = serde_json::from_slice(&value)?; - Ok(Some(parsed)) - } - None => Ok(None), - } - } - - /// Parse query results into typed vec - fn parse_results(&self, result: QueryResult) -> anyhow::Result> { - let mut items = Vec::new(); - for entry in result.entries { - if let Ok(item) = serde_json::from_slice::(&entry.value) { - items.push(item); - } - } - Ok(items) - } - - // ==================== Aggregation Operations ==================== - - /// Get challenge statistics - pub fn get_challenge_stats(&self, challenge_id: &str) -> anyhow::Result { - let agents = self.list_agents_for_challenge(challenge_id)?; - let evaluations: Vec = { - let query = Query::new("evaluations").filter(Filter::eq("challenge_id", challenge_id)); - let result = self.query(query)?; - self.parse_results(result)? - }; - - let total_agents = agents.len(); - let evaluated_agents = agents - .iter() - .filter(|a| a.status == AgentStatus::Evaluated) - .count(); - let avg_score = if !evaluations.is_empty() { - evaluations.iter().map(|e| e.score).sum::() / evaluations.len() as f64 - } else { - 0.0 - }; - let max_score = evaluations.iter().map(|e| e.score).fold(0.0f64, f64::max); - - Ok(ChallengeStats { - challenge_id: challenge_id.to_string(), - total_agents, - evaluated_agents, - total_evaluations: evaluations.len(), - avg_score, - max_score, - }) - } - - /// Get global stats - pub fn get_global_stats(&self) -> anyhow::Result { - let challenges = self.list_challenges()?; - let agents: Vec = { - let query = Query::new("agents"); - let result = self.query(query)?; - self.parse_results(result)? - }; - let evaluations: Vec = { - let query = Query::new("evaluations"); - let result = self.query(query)?; - self.parse_results(result)? - }; - - Ok(GlobalStats { - total_challenges: challenges.len(), - active_challenges: challenges - .iter() - .filter(|c| c.status == ChallengeStatus::Active) - .count(), - total_agents: agents.len(), - total_evaluations: evaluations.len(), - state_root: hex::encode(self.state_root()), - }) - } -} - -/// Challenge statistics -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ChallengeStats { - pub challenge_id: String, - pub total_agents: usize, - pub evaluated_agents: usize, - pub total_evaluations: usize, - pub avg_score: f64, - pub max_score: f64, -} - -/// Global statistics -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct GlobalStats { - pub total_challenges: usize, - pub active_challenges: usize, - pub total_agents: usize, - pub total_evaluations: usize, - pub state_root: String, -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_utils::*; - - #[test] - fn test_challenge_operations() { - let (_dir, db) = create_test_db(); - - // Store challenge - let config = ChallengeContainerConfig::new("Test Challenge", "test:latest", 0, 1.0); - db.store_challenge(&config).unwrap(); - - // Get challenge - let challenge = db.get_challenge(&config.challenge_id.to_string()).unwrap(); - assert!(challenge.is_some()); - assert_eq!(challenge.unwrap().name, "Test Challenge"); - - // List challenges - let challenges = db.list_challenges().unwrap(); - assert_eq!(challenges.len(), 1); - } - - #[test] - fn test_agent_operations() { - let (_dir, db) = create_test_db(); - - let agent = StoredAgent { - hash: "abc123".to_string(), - challenge_id: "test-challenge".to_string(), - submitter: "submitter123".to_string(), - submitted_at: 1000, - code_hash: "code123".to_string(), - status: AgentStatus::Pending, - }; - - db.store_agent(&agent).unwrap(); - - let retrieved = db.get_agent("abc123").unwrap(); - assert!(retrieved.is_some()); - assert_eq!(retrieved.unwrap().challenge_id, "test-challenge"); - } - - #[test] - fn test_list_challenges_by_mechanism() { - let (_dir, db) = create_test_db(); - - // Store challenges with different mechanisms - let config1 = ChallengeContainerConfig::new("Challenge 1", "test1:latest", 1, 1.0); - let config2 = ChallengeContainerConfig::new("Challenge 2", "test2:latest", 2, 1.0); - let config3 = ChallengeContainerConfig::new("Challenge 3", "test3:latest", 1, 1.0); - - db.store_challenge(&config1).unwrap(); - db.store_challenge(&config2).unwrap(); - db.store_challenge(&config3).unwrap(); - - // Query by mechanism 1 - let challenges = db.list_challenges_by_mechanism(1).unwrap(); - assert_eq!(challenges.len(), 2); - assert!(challenges.iter().all(|c| c.mechanism_id == 1)); - - // Query by mechanism 2 - let challenges = db.list_challenges_by_mechanism(2).unwrap(); - assert_eq!(challenges.len(), 1); - assert_eq!(challenges[0].mechanism_id, 2); - } - - #[test] - fn test_get_challenge_not_found() { - let (_dir, db) = create_test_db(); - - let result = db.get_challenge("nonexistent").unwrap(); - assert!(result.is_none()); - } - - #[test] - fn test_list_agents_for_challenge() { - let (_dir, db) = create_test_db(); - - // Store agents for different challenges - let agent1 = StoredAgent { - hash: "agent1".to_string(), - challenge_id: "challenge1".to_string(), - submitter: "submitter1".to_string(), - submitted_at: 1000, - code_hash: "code1".to_string(), - status: AgentStatus::Pending, - }; - - let agent2 = StoredAgent { - hash: "agent2".to_string(), - challenge_id: "challenge1".to_string(), - submitter: "submitter2".to_string(), - submitted_at: 2000, - code_hash: "code2".to_string(), - status: AgentStatus::Pending, - }; - - let agent3 = StoredAgent { - hash: "agent3".to_string(), - challenge_id: "challenge2".to_string(), - submitter: "submitter3".to_string(), - submitted_at: 3000, - code_hash: "code3".to_string(), - status: AgentStatus::Pending, - }; - - db.store_agent(&agent1).unwrap(); - db.store_agent(&agent2).unwrap(); - db.store_agent(&agent3).unwrap(); - - // List agents for challenge1 - let agents = db.list_agents_for_challenge("challenge1").unwrap(); - assert_eq!(agents.len(), 2); - assert!(agents.iter().all(|a| a.challenge_id == "challenge1")); - } - - #[test] - fn test_list_agents_by_submitter() { - let (_dir, db) = create_test_db(); - - let submitter = Hotkey::from_bytes(&[5u8; 32]).unwrap(); - let submitter_hex = hex::encode(submitter.as_bytes()); - - let agent1 = StoredAgent { - hash: "agent1".to_string(), - challenge_id: "challenge1".to_string(), - submitter: submitter_hex.clone(), - submitted_at: 1000, - code_hash: "code1".to_string(), - status: AgentStatus::Pending, - }; - - let agent2 = StoredAgent { - hash: "agent2".to_string(), - challenge_id: "challenge2".to_string(), - submitter: submitter_hex.clone(), - submitted_at: 2000, - code_hash: "code2".to_string(), - status: AgentStatus::Pending, - }; - - let agent3 = StoredAgent { - hash: "agent3".to_string(), - challenge_id: "challenge1".to_string(), - submitter: "different_submitter".to_string(), - submitted_at: 3000, - code_hash: "code3".to_string(), - status: AgentStatus::Pending, - }; - - db.store_agent(&agent1).unwrap(); - db.store_agent(&agent2).unwrap(); - db.store_agent(&agent3).unwrap(); - - // List agents by submitter - let agents = db.list_agents_by_submitter(&submitter).unwrap(); - assert_eq!(agents.len(), 2); - assert!(agents.iter().all(|a| a.submitter == submitter_hex)); - } - - #[test] - fn test_update_agent_status() { - let (_dir, db) = create_test_db(); - - let agent = StoredAgent { - hash: "agent1".to_string(), - challenge_id: "challenge1".to_string(), - submitter: "submitter1".to_string(), - submitted_at: 1000, - code_hash: "code1".to_string(), - status: AgentStatus::Pending, - }; - - db.store_agent(&agent).unwrap(); - - // Update status - db.update_agent_status("agent1", AgentStatus::Evaluating) - .unwrap(); - - let updated = db.get_agent("agent1").unwrap().unwrap(); - assert_eq!(updated.status, AgentStatus::Evaluating); - - // Update to evaluated - db.update_agent_status("agent1", AgentStatus::Evaluated) - .unwrap(); - - let updated = db.get_agent("agent1").unwrap().unwrap(); - assert_eq!(updated.status, AgentStatus::Evaluated); - } - - #[test] - fn test_update_agent_status_not_found() { - let (_dir, db) = create_test_db(); - - // Updating non-existent agent should not error - db.update_agent_status("nonexistent", AgentStatus::Failed) - .unwrap(); - } - - #[test] - fn test_evaluation_operations() { - let (_dir, db) = create_test_db(); - - let validator = Hotkey::from_bytes(&[7u8; 32]).unwrap(); - let validator_hex = hex::encode(validator.as_bytes()); - - let eval = StoredEvaluation { - id: "eval1".to_string(), - agent_hash: "agent1".to_string(), - challenge_id: "challenge1".to_string(), - validator: validator_hex.clone(), - score: 0.95, - metrics: serde_json::json!({"accuracy": 0.95}), - evaluated_at: 10000, - block_number: 100, - }; - - db.store_evaluation(&eval).unwrap(); - - // Get evaluations for agent - let evals = db.get_evaluations_for_agent("agent1").unwrap(); - assert_eq!(evals.len(), 1); - assert_eq!(evals[0].score, 0.95); - } - - #[test] - fn test_get_evaluations_by_validator() { - let (_dir, db) = create_test_db(); - - let validator = Hotkey::from_bytes(&[8u8; 32]).unwrap(); - let validator_hex = hex::encode(validator.as_bytes()); - - let eval1 = StoredEvaluation { - id: "eval1".to_string(), - agent_hash: "agent1".to_string(), - challenge_id: "challenge1".to_string(), - validator: validator_hex.clone(), - score: 0.95, - metrics: serde_json::json!({}), - evaluated_at: 10000, - block_number: 100, - }; - - let eval2 = StoredEvaluation { - id: "eval2".to_string(), - agent_hash: "agent2".to_string(), - challenge_id: "challenge1".to_string(), - validator: validator_hex.clone(), - score: 0.85, - metrics: serde_json::json!({}), - evaluated_at: 11000, - block_number: 101, - }; - - let eval3 = StoredEvaluation { - id: "eval3".to_string(), - agent_hash: "agent3".to_string(), - challenge_id: "challenge1".to_string(), - validator: "different_validator".to_string(), - score: 0.75, - metrics: serde_json::json!({}), - evaluated_at: 12000, - block_number: 102, - }; - - db.store_evaluation(&eval1).unwrap(); - db.store_evaluation(&eval2).unwrap(); - db.store_evaluation(&eval3).unwrap(); - - let evals = db.get_evaluations_by_validator(&validator).unwrap(); - assert_eq!(evals.len(), 2); - assert!(evals.iter().all(|e| e.validator == validator_hex)); - } - - #[test] - fn test_get_top_scores() { - let (_dir, db) = create_test_db(); - - // Store evaluations with different scores - for i in 0..5 { - let eval = StoredEvaluation { - id: format!("eval{}", i), - agent_hash: format!("agent{}", i), - challenge_id: "challenge1".to_string(), - validator: "validator1".to_string(), - score: (i as f64) / 10.0, - metrics: serde_json::json!({}), - evaluated_at: 10000 + i as u64, - block_number: 100 + i as u64, - }; - db.store_evaluation(&eval).unwrap(); - } - - // Get top 3 scores - let top = db.get_top_scores("challenge1", 3).unwrap(); - assert!(top.len() <= 3); - // Scores should be in descending order (but sorting may not be fully implemented) - } - - #[test] - fn test_weight_operations() { - let (_dir, db) = create_test_db(); - - let weights = StoredWeight { - id: "weight1".to_string(), - challenge_id: "challenge1".to_string(), - validator: "validator1".to_string(), - weights: vec![(0, 100), (1, 200), (2, 150)], - block_number: 100, - submitted_at: 10000, - }; - - db.store_weights(&weights).unwrap(); - - // Get weights at block - let weights_at_block = db.get_weights_at_block(100).unwrap(); - assert_eq!(weights_at_block.len(), 1); - assert_eq!(weights_at_block[0].weights.len(), 3); - } - - #[test] - fn test_get_latest_weights() { - let (_dir, db) = create_test_db(); - - // Store weights at different blocks - for block in 100..105 { - let weights = StoredWeight { - id: format!("weight_block_{}", block), - challenge_id: "challenge1".to_string(), - validator: "validator1".to_string(), - weights: vec![(0, block as u16)], - block_number: block, - submitted_at: block * 1000, - }; - db.store_weights(&weights).unwrap(); - } - - let latest = db.get_latest_weights("challenge1").unwrap(); - assert!(latest.len() >= 5); - } - - #[test] - fn test_challenge_stats() { - let (_dir, db) = create_test_db(); - - // Store challenge, agents, and evaluations - let config = ChallengeContainerConfig::new("Test Challenge", "test:latest", 0, 1.0); - db.store_challenge(&config).unwrap(); - let challenge_id = config.challenge_id.to_string(); - - // Store agents - for i in 0..3 { - let agent = StoredAgent { - hash: format!("agent{}", i), - challenge_id: challenge_id.clone(), - submitter: "submitter".to_string(), - submitted_at: 1000 + i, - code_hash: format!("code{}", i), - status: if i < 2 { - AgentStatus::Evaluated - } else { - AgentStatus::Pending - }, - }; - db.store_agent(&agent).unwrap(); - } - - // Store evaluations - for i in 0..2 { - let eval = StoredEvaluation { - id: format!("eval{}", i), - agent_hash: format!("agent{}", i), - challenge_id: challenge_id.clone(), - validator: "validator".to_string(), - score: 0.8 + (i as f64 * 0.1), - metrics: serde_json::json!({}), - evaluated_at: 10000, - block_number: 100, - }; - db.store_evaluation(&eval).unwrap(); - } - - let stats = db.get_challenge_stats(&challenge_id).unwrap(); - assert_eq!(stats.total_agents, 3); - assert_eq!(stats.evaluated_agents, 2); - assert_eq!(stats.total_evaluations, 2); - assert!(stats.avg_score > 0.8); - assert!(stats.max_score >= 0.9); - } - - #[test] - fn test_global_stats() { - let (_dir, db) = create_test_db(); - - // Store some challenges - for i in 0..3 { - let config = - ChallengeContainerConfig::new(&format!("Challenge {}", i), "test:latest", 0, 1.0); - db.store_challenge(&config).unwrap(); - } - - // Store some agents - for i in 0..5 { - let agent = StoredAgent { - hash: format!("agent{}", i), - challenge_id: "challenge1".to_string(), - submitter: "submitter".to_string(), - submitted_at: 1000 + i, - code_hash: format!("code{}", i), - status: AgentStatus::Pending, - }; - db.store_agent(&agent).unwrap(); - } - - // Store some evaluations - for i in 0..7 { - let eval = StoredEvaluation { - id: format!("eval{}", i), - agent_hash: format!("agent{}", i % 5), - challenge_id: "challenge1".to_string(), - validator: "validator".to_string(), - score: 0.8, - metrics: serde_json::json!({}), - evaluated_at: 10000, - block_number: 100, - }; - db.store_evaluation(&eval).unwrap(); - } - - let stats = db.get_global_stats().unwrap(); - assert_eq!(stats.total_challenges, 3); - assert_eq!(stats.active_challenges, 3); - assert_eq!(stats.total_agents, 5); - assert_eq!(stats.total_evaluations, 7); - assert!(!stats.state_root.is_empty()); - } - - #[test] - fn test_challenge_status_equality() { - assert_eq!(ChallengeStatus::Active, ChallengeStatus::Active); - assert_ne!(ChallengeStatus::Active, ChallengeStatus::Paused); - assert_ne!(ChallengeStatus::Paused, ChallengeStatus::Deprecated); - } - - #[test] - fn test_agent_status_equality() { - assert_eq!(AgentStatus::Pending, AgentStatus::Pending); - assert_ne!(AgentStatus::Pending, AgentStatus::Evaluating); - assert_ne!(AgentStatus::Evaluating, AgentStatus::Evaluated); - assert_ne!(AgentStatus::Evaluated, AgentStatus::Failed); - } - - #[test] - fn test_get_agent_not_found() { - let (_dir, db) = create_test_db(); - - let result = db.get_agent("nonexistent").unwrap(); - assert!(result.is_none()); - } - - #[test] - fn test_empty_collections() { - let (_dir, db) = create_test_db(); - - let challenges = db.list_challenges().unwrap(); - assert_eq!(challenges.len(), 0); - - let agents = db.list_agents_for_challenge("nonexistent").unwrap(); - assert_eq!(agents.len(), 0); - - let evals = db.get_evaluations_for_agent("nonexistent").unwrap(); - assert_eq!(evals.len(), 0); - - let weights = db.get_weights_at_block(999).unwrap(); - assert_eq!(weights.len(), 0); - } -} diff --git a/crates/distributed-db/src/state.rs b/crates/distributed-db/src/state.rs deleted file mode 100644 index 13a6142f..00000000 --- a/crates/distributed-db/src/state.rs +++ /dev/null @@ -1,414 +0,0 @@ -//! State management for the distributed database -//! -//! Tracks state transitions and provides rollback capabilities - -use crate::Transaction; -use std::collections::VecDeque; - -/// Maximum state history entries -const MAX_HISTORY: usize = 1000; - -/// State manager -pub struct StateManager { - /// Current state root - current_root: [u8; 32], - /// History of state roots - history: VecDeque, - /// Applied transactions (for rollback) - applied_txs: VecDeque, -} - -/// State entry in history -#[derive(Debug, Clone)] -struct StateEntry { - root: [u8; 32], - block: u64, - timestamp: u64, -} - -/// Applied transaction for rollback -#[derive(Debug, Clone)] -struct AppliedTx { - tx_id: [u8; 32], - undo_ops: Vec, -} - -/// Undo operation -#[derive(Debug, Clone)] -#[allow(dead_code)] -enum UndoOp { - Put { - collection: String, - key: Vec, - old_value: Option>, - }, - Delete { - collection: String, - key: Vec, - old_value: Vec, - }, -} - -impl StateManager { - /// Create a new state manager - pub fn new(initial_root: Option<[u8; 32]>) -> Self { - Self { - current_root: initial_root.unwrap_or([0u8; 32]), - history: VecDeque::new(), - applied_txs: VecDeque::new(), - } - } - - /// Set current root - pub fn set_root(&mut self, root: [u8; 32]) { - self.current_root = root; - } - - /// Get current root - pub fn root(&self) -> [u8; 32] { - self.current_root - } - - /// Apply a transaction (record for potential rollback) - pub fn apply_tx(&mut self, tx: &Transaction) { - // Undo operations stored in history - let applied = AppliedTx { - tx_id: tx.id(), - undo_ops: Vec::new(), - }; - - self.applied_txs.push_back(applied); - - // Limit history size - while self.applied_txs.len() > MAX_HISTORY { - self.applied_txs.pop_front(); - } - } - - /// Commit state at block - pub fn commit_block(&mut self, block: u64, root: [u8; 32]) { - let entry = StateEntry { - root, - block, - timestamp: chrono::Utc::now().timestamp_millis() as u64, - }; - - self.history.push_back(entry); - self.current_root = root; - - // Limit history size - while self.history.len() > MAX_HISTORY { - self.history.pop_front(); - } - } - - /// Get state root at block - pub fn root_at_block(&self, block: u64) -> Option<[u8; 32]> { - self.history - .iter() - .find(|e| e.block == block) - .map(|e| e.root) - } - - /// Get latest block - pub fn latest_block(&self) -> Option { - self.history.back().map(|e| e.block) - } - - /// Get state diff between two blocks - pub fn state_diff(&self, from_block: u64, to_block: u64) -> Option { - let from_root = self.root_at_block(from_block)?; - let to_root = self.root_at_block(to_block)?; - - Some(StateDiff { - from_block, - to_block, - from_root, - to_root, - // Diff computed from history - entries: Vec::new(), - }) - } - - /// Get history - pub fn history(&self) -> Vec<(u64, [u8; 32])> { - self.history.iter().map(|e| (e.block, e.root)).collect() - } - - /// Clear all state - pub fn clear(&mut self) { - self.current_root = [0u8; 32]; - self.history.clear(); - self.applied_txs.clear(); - } -} - -/// State difference between two blocks -#[derive(Debug, Clone)] -pub struct StateDiff { - pub from_block: u64, - pub to_block: u64, - pub from_root: [u8; 32], - pub to_root: [u8; 32], - pub entries: Vec, -} - -/// Single entry in state diff -#[derive(Debug, Clone)] -pub struct DiffEntry { - pub collection: String, - pub key: Vec, - pub old_value: Option>, - pub new_value: Option>, -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_utils::*; - - #[test] - fn test_state_history() { - let mut state = StateManager::new(None); - - state.commit_block(100, [1u8; 32]); - state.commit_block(101, [2u8; 32]); - state.commit_block(102, [3u8; 32]); - - assert_eq!(state.root_at_block(100), Some([1u8; 32])); - assert_eq!(state.root_at_block(101), Some([2u8; 32])); - assert_eq!(state.root_at_block(102), Some([3u8; 32])); - assert_eq!(state.latest_block(), Some(102)); - } - - #[test] - fn test_state_manager_new_with_initial_root() { - let initial_root = [99u8; 32]; - let state = StateManager::new(Some(initial_root)); - - assert_eq!(state.root(), initial_root); - } - - #[test] - fn test_state_manager_new_without_initial_root() { - let state = StateManager::new(None); - - assert_eq!(state.root(), [0u8; 32]); - } - - #[test] - fn test_set_root() { - let mut state = StateManager::new(None); - - let new_root = [42u8; 32]; - state.set_root(new_root); - - assert_eq!(state.root(), new_root); - } - - #[test] - fn test_apply_tx() { - let mut state = StateManager::new(None); - - let tx = create_test_tx(); - state.apply_tx(&tx); - - // Applied txs should be stored - assert_eq!(state.applied_txs.len(), 1); - assert_eq!(state.applied_txs[0].tx_id, tx.id()); - } - - #[test] - fn test_apply_tx_history_limit() { - let mut state = StateManager::new(None); - - // Add more than MAX_HISTORY transactions - for _ in 0..(MAX_HISTORY + 100) { - let tx = create_test_tx(); - state.apply_tx(&tx); - } - - // Should not exceed MAX_HISTORY - assert_eq!(state.applied_txs.len(), MAX_HISTORY); - } - - #[test] - fn test_commit_block() { - let mut state = StateManager::new(None); - - let root1 = [1u8; 32]; - let root2 = [2u8; 32]; - - state.commit_block(100, root1); - assert_eq!(state.root(), root1); - assert_eq!(state.history.len(), 1); - - state.commit_block(101, root2); - assert_eq!(state.root(), root2); - assert_eq!(state.history.len(), 2); - } - - #[test] - fn test_commit_block_history_limit() { - let mut state = StateManager::new(None); - - // Add more than MAX_HISTORY blocks - for i in 0..(MAX_HISTORY + 100) { - state.commit_block(i as u64, [i as u8; 32]); - } - - // Should not exceed MAX_HISTORY - assert_eq!(state.history.len(), MAX_HISTORY); - - // Oldest entries should be removed - assert!(state.root_at_block(0).is_none()); - assert!(state.root_at_block(99).is_none()); - // Recent entries should still exist - assert!(state.root_at_block(MAX_HISTORY as u64).is_some()); - } - - #[test] - fn test_root_at_block_not_found() { - let state = StateManager::new(None); - - assert_eq!(state.root_at_block(999), None); - } - - #[test] - fn test_latest_block_empty() { - let state = StateManager::new(None); - - assert_eq!(state.latest_block(), None); - } - - #[test] - fn test_latest_block_with_history() { - let mut state = StateManager::new(None); - - state.commit_block(100, [1u8; 32]); - state.commit_block(200, [2u8; 32]); - state.commit_block(150, [3u8; 32]); // Out of order - - // Latest should be the last one added (150) - assert_eq!(state.latest_block(), Some(150)); - } - - #[test] - fn test_state_diff() { - let mut state = StateManager::new(None); - - state.commit_block(100, [1u8; 32]); - state.commit_block(101, [2u8; 32]); - - let diff = state.state_diff(100, 101); - assert!(diff.is_some()); - - let diff = diff.unwrap(); - assert_eq!(diff.from_block, 100); - assert_eq!(diff.to_block, 101); - assert_eq!(diff.from_root, [1u8; 32]); - assert_eq!(diff.to_root, [2u8; 32]); - } - - #[test] - fn test_state_diff_block_not_found() { - let mut state = StateManager::new(None); - - state.commit_block(100, [1u8; 32]); - - // One of the blocks doesn't exist - assert!(state.state_diff(100, 999).is_none()); - assert!(state.state_diff(999, 100).is_none()); - } - - #[test] - fn test_history() { - let mut state = StateManager::new(None); - - state.commit_block(100, [1u8; 32]); - state.commit_block(101, [2u8; 32]); - state.commit_block(102, [3u8; 32]); - - let history = state.history(); - assert_eq!(history.len(), 3); - assert_eq!(history[0], (100, [1u8; 32])); - assert_eq!(history[1], (101, [2u8; 32])); - assert_eq!(history[2], (102, [3u8; 32])); - } - - #[test] - fn test_clear() { - let mut state = StateManager::new(Some([99u8; 32])); - - state.commit_block(100, [1u8; 32]); - state.commit_block(101, [2u8; 32]); - - let tx = create_test_tx(); - state.apply_tx(&tx); - - state.clear(); - - assert_eq!(state.root(), [0u8; 32]); - assert_eq!(state.history.len(), 0); - assert_eq!(state.applied_txs.len(), 0); - assert_eq!(state.latest_block(), None); - } - - #[test] - fn test_diff_entry() { - let entry = DiffEntry { - collection: "test".to_string(), - key: b"key".to_vec(), - old_value: Some(b"old".to_vec()), - new_value: Some(b"new".to_vec()), - }; - - assert_eq!(entry.collection, "test"); - assert_eq!(entry.old_value, Some(b"old".to_vec())); - assert_eq!(entry.new_value, Some(b"new".to_vec())); - } - - #[test] - fn test_undo_op_put() { - let undo_op = UndoOp::Put { - collection: "test".to_string(), - key: b"key".to_vec(), - old_value: Some(b"old".to_vec()), - }; - - match undo_op { - UndoOp::Put { - collection, - key, - old_value, - } => { - assert_eq!(collection, "test"); - assert_eq!(key, b"key"); - assert_eq!(old_value, Some(b"old".to_vec())); - } - _ => panic!("Expected UndoOp::Put"), - } - } - - #[test] - fn test_undo_op_delete() { - let undo_op = UndoOp::Delete { - collection: "test".to_string(), - key: b"key".to_vec(), - old_value: b"value".to_vec(), - }; - - match undo_op { - UndoOp::Delete { - collection, - key, - old_value, - } => { - assert_eq!(collection, "test"); - assert_eq!(key, b"key"); - assert_eq!(old_value, b"value"); - } - _ => panic!("Expected UndoOp::Delete"), - } - } -} diff --git a/crates/distributed-db/src/storage.rs b/crates/distributed-db/src/storage.rs deleted file mode 100644 index dd7ebebb..00000000 --- a/crates/distributed-db/src/storage.rs +++ /dev/null @@ -1,773 +0,0 @@ -//! RocksDB storage backend -//! RocksDB storage backend -//! -//! Provides persistent key-value storage with: -//! - Column families for data separation -//! - Atomic batch writes -//! - Efficient iteration -//! - Anti-corruption protections (WAL, sync, atomic flush) - -use rocksdb::{ - BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, IteratorMode, MultiThreaded, - Options, WriteBatch, WriteOptions, -}; -use std::path::Path; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; -use tracing::{debug, info, warn}; - -/// Column families for different data types -pub const CF_CHALLENGES: &str = "challenges"; -pub const CF_AGENTS: &str = "agents"; -pub const CF_EVALUATIONS: &str = "evaluations"; -pub const CF_WEIGHTS: &str = "weights"; -pub const CF_TRANSACTIONS: &str = "transactions"; -pub const CF_STATE: &str = "state"; -pub const CF_INDEXES: &str = "indexes"; -pub const CF_METADATA: &str = "metadata"; - -const ALL_CFS: &[&str] = &[ - CF_CHALLENGES, - CF_AGENTS, - CF_EVALUATIONS, - CF_WEIGHTS, - CF_TRANSACTIONS, - CF_STATE, - CF_INDEXES, - CF_METADATA, -]; - -/// State root key in metadata -const STATE_ROOT_KEY: &[u8] = b"state_root"; - -/// Minimum free disk space (1GB) -const MIN_DISK_SPACE_BYTES: u64 = 1024 * 1024 * 1024; - -/// RocksDB storage wrapper with anti-corruption protections -pub struct RocksStorage { - db: DBWithThreadMode, - /// Flag to prevent writes during shutdown - shutdown: AtomicBool, -} - -impl RocksStorage { - /// Open or create the database - pub fn open(path: impl AsRef) -> anyhow::Result { - let path = path.as_ref(); - info!("Opening RocksDB at {:?}", path); - - // Check disk space before opening - Self::check_disk_space(path)?; - - let mut opts = Options::default(); - opts.create_if_missing(true); - opts.create_missing_column_families(true); - opts.set_max_open_files(256); - opts.set_keep_log_file_num(3); - opts.set_max_total_wal_size(64 * 1024 * 1024); // 64MB WAL - opts.set_write_buffer_size(32 * 1024 * 1024); // 32MB - opts.set_max_write_buffer_number(3); - opts.set_target_file_size_base(64 * 1024 * 1024); // 64MB - opts.set_level_zero_file_num_compaction_trigger(4); - opts.set_level_zero_slowdown_writes_trigger(20); - opts.set_level_zero_stop_writes_trigger(30); - opts.set_compression_type(rocksdb::DBCompressionType::Lz4); - - // Anti-corruption settings - opts.set_wal_recovery_mode(rocksdb::DBRecoveryMode::AbsoluteConsistency); - opts.set_atomic_flush(true); // Atomic flush across column families - - // Column family options - let cf_opts = Options::default(); - let cfs: Vec = ALL_CFS - .iter() - .map(|name| ColumnFamilyDescriptor::new(*name, cf_opts.clone())) - .collect(); - - let db = DBWithThreadMode::::open_cf_descriptors(&opts, path, cfs)?; - - info!( - "RocksDB opened successfully with {} column families", - ALL_CFS.len() - ); - - Ok(Self { - db, - shutdown: AtomicBool::new(false), - }) - } - - /// Check disk space before operations - fn check_disk_space(path: &Path) -> anyhow::Result<()> { - // Get the directory to check (create if needed for new DBs) - let check_path = if path.exists() { - path.to_path_buf() - } else if let Some(parent) = path.parent() { - parent.to_path_buf() - } else { - return Ok(()); - }; - - #[cfg(unix)] - { - if check_path.exists() { - // Use statvfs for disk space on Unix - let output = std::process::Command::new("df") - .arg("-B1") - .arg(&check_path) - .output(); - - if let Ok(output) = output { - if let Ok(stdout) = String::from_utf8(output.stdout) { - // Parse df output (second line, 4th column is available) - if let Some(line) = stdout.lines().nth(1) { - let parts: Vec<&str> = line.split_whitespace().collect(); - if parts.len() >= 4 { - if let Ok(avail) = parts[3].parse::() { - if avail < MIN_DISK_SPACE_BYTES { - return Err(anyhow::anyhow!( - "Insufficient disk space: {} bytes available, {} required", - avail, - MIN_DISK_SPACE_BYTES - )); - } - debug!("Disk space check passed: {} bytes available", avail); - } - } - } - } - } - } - } - Ok(()) - } - - /// Mark shutdown to prevent new writes - pub fn shutdown(&self) { - self.shutdown.store(true, Ordering::SeqCst); - info!("RocksStorage marked for shutdown"); - // Flush WAL before shutdown - if let Err(e) = self.db.flush_wal(true) { - warn!("Failed to flush WAL on shutdown: {}", e); - } - } - - /// Check if shutdown is in progress - fn is_shutdown(&self) -> bool { - self.shutdown.load(Ordering::SeqCst) - } - - /// Get column family handle - fn cf(&self, name: &str) -> anyhow::Result>> { - self.db - .cf_handle(name) - .ok_or_else(|| anyhow::anyhow!("Column family '{}' not found", name)) - } - - /// Get value by key - pub fn get(&self, collection: &str, key: &[u8]) -> anyhow::Result>> { - let cf = self.cf(collection)?; - Ok(self.db.get_cf(&cf, key)?) - } - - /// Put value (async - buffered by WAL) - pub fn put(&self, collection: &str, key: &[u8], value: &[u8]) -> anyhow::Result<()> { - if self.is_shutdown() { - return Err(anyhow::anyhow!("Storage is shutting down")); - } - let cf = self.cf(collection)?; - self.db.put_cf(&cf, key, value)?; - debug!( - "Put {}:{} ({} bytes)", - collection, - hex::encode(&key[..key.len().min(8)]), - value.len() - ); - Ok(()) - } - - /// Put value with sync (for critical data - waits for disk write) - pub fn put_sync(&self, collection: &str, key: &[u8], value: &[u8]) -> anyhow::Result<()> { - if self.is_shutdown() { - return Err(anyhow::anyhow!("Storage is shutting down")); - } - let cf = self.cf(collection)?; - let mut opts = WriteOptions::default(); - opts.set_sync(true); // Force sync to disk - self.db.put_cf_opt(&cf, key, value, &opts)?; - debug!( - "Put (sync) {}:{} ({} bytes)", - collection, - hex::encode(&key[..key.len().min(8)]), - value.len() - ); - Ok(()) - } - - /// Delete value - pub fn delete(&self, collection: &str, key: &[u8]) -> anyhow::Result<()> { - let cf = self.cf(collection)?; - self.db.delete_cf(&cf, key)?; - debug!( - "Delete {}:{}", - collection, - hex::encode(&key[..key.len().min(8)]) - ); - Ok(()) - } - - /// Batch write operations - pub fn write_batch(&self, operations: Vec) -> anyhow::Result<()> { - let mut batch = WriteBatch::default(); - - for op in operations { - match op { - BatchOp::Put { - collection, - key, - value, - } => { - let cf = self.cf(&collection)?; - batch.put_cf(&cf, &key, &value); - } - BatchOp::Delete { collection, key } => { - let cf = self.cf(&collection)?; - batch.delete_cf(&cf, &key); - } - } - } - - self.db.write(batch)?; - Ok(()) - } - - /// Iterate over a collection - pub fn iter_collection(&self, collection: &str) -> anyhow::Result, Vec)>> { - let cf = self.cf(collection)?; - let iter = self.db.iterator_cf(&cf, IteratorMode::Start); - - let mut results = Vec::new(); - for item in iter { - let (key, value) = item?; - results.push((key.to_vec(), value.to_vec())); - } - - Ok(results) - } - - /// Iterate with prefix - pub fn iter_prefix( - &self, - collection: &str, - prefix: &[u8], - ) -> anyhow::Result, Vec)>> { - let cf = self.cf(collection)?; - let iter = self.db.prefix_iterator_cf(&cf, prefix); - - let mut results = Vec::new(); - for item in iter { - let (key, value) = item?; - if !key.starts_with(prefix) { - break; - } - results.push((key.to_vec(), value.to_vec())); - } - - Ok(results) - } - - /// List all collections - pub fn list_collections(&self) -> anyhow::Result> { - Ok(ALL_CFS.iter().map(|s| s.to_string()).collect()) - } - - /// Get collection size (approximate) - pub fn collection_size(&self, collection: &str) -> anyhow::Result { - let cf = self.cf(collection)?; - let props = self - .db - .property_int_value_cf(&cf, "rocksdb.estimate-num-keys")?; - Ok(props.unwrap_or(0)) - } - - /// Get state root - pub fn get_state_root(&self) -> anyhow::Result> { - let cf = self.cf(CF_METADATA)?; - if let Some(value) = self.db.get_cf(&cf, STATE_ROOT_KEY)? { - if value.len() == 32 { - let mut root = [0u8; 32]; - root.copy_from_slice(&value); - return Ok(Some(root)); - } - } - Ok(None) - } - - /// Set state root - pub fn set_state_root(&self, root: &[u8; 32]) -> anyhow::Result<()> { - let cf = self.cf(CF_METADATA)?; - self.db.put_cf(&cf, STATE_ROOT_KEY, root)?; - Ok(()) - } - - /// Store confirmed transaction - pub fn store_confirmed_tx( - &self, - tx: &super::Transaction, - receipt: &super::TransactionReceipt, - block: u64, - ) -> anyhow::Result<()> { - let cf = self.cf(CF_TRANSACTIONS)?; - - let key = tx.id(); - let value = bincode::serialize(&(tx, receipt, block))?; - - self.db.put_cf(&cf, key, &value)?; - - // Also index by block - let block_key = format!("block:{}:{}", block, hex::encode(key)); - self.db.put_cf(&cf, block_key.as_bytes(), key)?; - - Ok(()) - } - - /// Get transactions for block - pub fn get_block_transactions(&self, block: u64) -> anyhow::Result> { - let prefix = format!("block:{}:", block); - let entries = self.iter_prefix(CF_TRANSACTIONS, prefix.as_bytes())?; - - let mut tx_ids = Vec::new(); - for (_, value) in entries { - if value.len() == 32 { - let mut id = [0u8; 32]; - id.copy_from_slice(&value); - tx_ids.push(id); - } - } - - Ok(tx_ids) - } - - /// Compact database - pub fn compact(&self) -> anyhow::Result<()> { - info!("Compacting database..."); - for cf_name in ALL_CFS { - if let Ok(cf) = self.cf(cf_name) { - self.db.compact_range_cf(&cf, None::<&[u8]>, None::<&[u8]>); - } - } - info!("Database compaction complete"); - Ok(()) - } - - /// Get database stats - pub fn stats(&self) -> StorageStats { - let mut stats = StorageStats::default(); - - for cf_name in ALL_CFS { - if let Ok(size) = self.collection_size(cf_name) { - stats.collection_sizes.insert(cf_name.to_string(), size); - stats.total_keys += size; - } - } - - stats - } -} - -/// Batch operation -#[derive(Debug, Clone)] -pub enum BatchOp { - Put { - collection: String, - key: Vec, - value: Vec, - }, - Delete { - collection: String, - key: Vec, - }, -} - -/// Storage statistics -#[derive(Debug, Clone, Default)] -pub struct StorageStats { - pub total_keys: u64, - pub collection_sizes: std::collections::HashMap, -} - -#[cfg(test)] -mod tests { - use super::*; - use tempfile::tempdir; - - #[test] - fn test_basic_operations() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - // Put - storage - .put(CF_CHALLENGES, b"test-key", b"test-value") - .unwrap(); - - // Get - let value = storage.get(CF_CHALLENGES, b"test-key").unwrap(); - assert_eq!(value, Some(b"test-value".to_vec())); - - // Delete - storage.delete(CF_CHALLENGES, b"test-key").unwrap(); - let value = storage.get(CF_CHALLENGES, b"test-key").unwrap(); - assert!(value.is_none()); - } - - #[test] - fn test_batch_write() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - let ops = vec![ - BatchOp::Put { - collection: CF_CHALLENGES.to_string(), - key: b"key1".to_vec(), - value: b"value1".to_vec(), - }, - BatchOp::Put { - collection: CF_CHALLENGES.to_string(), - key: b"key2".to_vec(), - value: b"value2".to_vec(), - }, - ]; - - storage.write_batch(ops).unwrap(); - - assert_eq!( - storage.get(CF_CHALLENGES, b"key1").unwrap(), - Some(b"value1".to_vec()) - ); - assert_eq!( - storage.get(CF_CHALLENGES, b"key2").unwrap(), - Some(b"value2".to_vec()) - ); - } - - #[test] - fn test_iteration() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - storage.put(CF_AGENTS, b"agent:1", b"data1").unwrap(); - storage.put(CF_AGENTS, b"agent:2", b"data2").unwrap(); - storage.put(CF_AGENTS, b"other:1", b"other").unwrap(); - - // Iterate all - let all = storage.iter_collection(CF_AGENTS).unwrap(); - assert_eq!(all.len(), 3); - - // Iterate prefix - let agents = storage.iter_prefix(CF_AGENTS, b"agent:").unwrap(); - assert_eq!(agents.len(), 2); - } - - #[test] - fn test_storage_open() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - assert!(!storage.is_shutdown()); - } - - #[test] - fn test_put_sync() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - storage.put_sync(CF_CHALLENGES, b"key1", b"value1").unwrap(); - let value = storage.get(CF_CHALLENGES, b"key1").unwrap(); - assert_eq!(value, Some(b"value1".to_vec())); - } - - #[test] - fn test_shutdown() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - assert!(!storage.is_shutdown()); - storage.shutdown(); - assert!(storage.is_shutdown()); - - // Operations should fail after shutdown - let result = storage.put(CF_CHALLENGES, b"key", b"value"); - assert!(result.is_err()); - } - - #[test] - fn test_shutdown_put_sync() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - storage.shutdown(); - let result = storage.put_sync(CF_CHALLENGES, b"key", b"value"); - assert!(result.is_err()); - } - - #[test] - fn test_state_root_operations() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - // Initially no state root - assert!(storage.get_state_root().unwrap().is_none()); - - // Set state root - let root = [42u8; 32]; - storage.set_state_root(&root).unwrap(); - - // Get state root - let retrieved = storage.get_state_root().unwrap(); - assert_eq!(retrieved, Some(root)); - } - - #[test] - fn test_list_collections() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - let collections = storage.list_collections().unwrap(); - assert!(collections.contains(&CF_CHALLENGES.to_string())); - assert!(collections.contains(&CF_AGENTS.to_string())); - assert!(collections.contains(&CF_METADATA.to_string())); - assert_eq!(collections.len(), ALL_CFS.len()); - } - - #[test] - fn test_collection_size() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - // Initially empty - let size = storage.collection_size(CF_CHALLENGES).unwrap(); - assert_eq!(size, 0); - - // Add some data - storage.put(CF_CHALLENGES, b"key1", b"value1").unwrap(); - storage.put(CF_CHALLENGES, b"key2", b"value2").unwrap(); - - let size = storage.collection_size(CF_CHALLENGES).unwrap(); - assert!(size >= 2); - } - - #[test] - fn test_store_confirmed_tx() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - let hotkey = platform_core::Hotkey::from_bytes(&[1u8; 32]).unwrap(); - let tx = crate::Transaction::new( - hotkey, - crate::Operation::Put { - collection: "test".to_string(), - key: b"key".to_vec(), - value: b"value".to_vec(), - }, - ); - - let receipt = crate::TransactionReceipt { - tx_id: tx.id(), - success: true, - execution_time_us: 100, - state_root: [0u8; 32], - }; - - storage.store_confirmed_tx(&tx, &receipt, 100).unwrap(); - - // Verify it was stored - let tx_ids = storage.get_block_transactions(100).unwrap(); - assert_eq!(tx_ids.len(), 1); - assert_eq!(tx_ids[0], tx.id()); - } - - #[test] - fn test_get_block_transactions() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - let hotkey = platform_core::Hotkey::from_bytes(&[1u8; 32]).unwrap(); - - // Store multiple transactions for the same block - for i in 0..3 { - let tx = crate::Transaction::new( - hotkey.clone(), - crate::Operation::Put { - collection: "test".to_string(), - key: format!("key{}", i).into_bytes(), - value: b"value".to_vec(), - }, - ); - - let receipt = crate::TransactionReceipt { - tx_id: tx.id(), - success: true, - execution_time_us: 100, - state_root: [0u8; 32], - }; - - storage.store_confirmed_tx(&tx, &receipt, 50).unwrap(); - } - - let tx_ids = storage.get_block_transactions(50).unwrap(); - assert_eq!(tx_ids.len(), 3); - - // Different block should return empty - let tx_ids_other = storage.get_block_transactions(99).unwrap(); - assert_eq!(tx_ids_other.len(), 0); - } - - #[test] - fn test_batch_write_with_delete() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - // First put some data - storage.put(CF_AGENTS, b"key1", b"value1").unwrap(); - storage.put(CF_AGENTS, b"key2", b"value2").unwrap(); - - // Batch operations with mix of put and delete - let ops = vec![ - BatchOp::Put { - collection: CF_AGENTS.to_string(), - key: b"key3".to_vec(), - value: b"value3".to_vec(), - }, - BatchOp::Delete { - collection: CF_AGENTS.to_string(), - key: b"key1".to_vec(), - }, - ]; - - storage.write_batch(ops).unwrap(); - - // key1 should be deleted - assert!(storage.get(CF_AGENTS, b"key1").unwrap().is_none()); - // key2 should still exist - assert_eq!( - storage.get(CF_AGENTS, b"key2").unwrap(), - Some(b"value2".to_vec()) - ); - // key3 should be added - assert_eq!( - storage.get(CF_AGENTS, b"key3").unwrap(), - Some(b"value3".to_vec()) - ); - } - - #[test] - fn test_compact() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - // Add and delete some data to create fragmentation - for i in 0..100 { - storage - .put(CF_CHALLENGES, format!("key{}", i).as_bytes(), b"value") - .unwrap(); - } - for i in 0..50 { - storage - .delete(CF_CHALLENGES, format!("key{}", i).as_bytes()) - .unwrap(); - } - - // Compact should succeed - storage.compact().unwrap(); - } - - #[test] - fn test_stats() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - storage.put(CF_CHALLENGES, b"key1", b"value1").unwrap(); - storage.put(CF_AGENTS, b"key2", b"value2").unwrap(); - - let stats = storage.stats(); - assert!(stats.total_keys >= 2); - assert!(stats.collection_sizes.contains_key(CF_CHALLENGES)); - } - - #[test] - fn test_batch_op_variants() { - let put_op = BatchOp::Put { - collection: "test".to_string(), - key: b"key".to_vec(), - value: b"value".to_vec(), - }; - - let delete_op = BatchOp::Delete { - collection: "test".to_string(), - key: b"key".to_vec(), - }; - - // Just verify they can be created - match put_op { - BatchOp::Put { .. } => {} - _ => panic!("Expected Put"), - } - - match delete_op { - BatchOp::Delete { .. } => {} - _ => panic!("Expected Delete"), - } - } - - #[test] - fn test_storage_stats_default() { - let stats = StorageStats::default(); - assert_eq!(stats.total_keys, 0); - assert!(stats.collection_sizes.is_empty()); - } - - #[test] - fn test_iter_prefix_empty() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - let results = storage.iter_prefix(CF_AGENTS, b"nonexistent:").unwrap(); - assert_eq!(results.len(), 0); - } - - #[test] - fn test_get_nonexistent_key() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - let value = storage.get(CF_CHALLENGES, b"nonexistent").unwrap(); - assert!(value.is_none()); - } - - #[test] - fn test_delete_nonexistent_key() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - // Should not error - storage.delete(CF_CHALLENGES, b"nonexistent").unwrap(); - } - - #[test] - fn test_iter_collection_empty() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - let results = storage.iter_collection(CF_WEIGHTS).unwrap(); - assert_eq!(results.len(), 0); - } - - #[test] - fn test_cf_invalid_name() { - let dir = tempdir().unwrap(); - let storage = RocksStorage::open(dir.path()).unwrap(); - - let result = storage.get("invalid_cf", b"key"); - assert!(result.is_err()); - } -} diff --git a/crates/distributed-db/src/test_utils.rs b/crates/distributed-db/src/test_utils.rs deleted file mode 100644 index 79212953..00000000 --- a/crates/distributed-db/src/test_utils.rs +++ /dev/null @@ -1,45 +0,0 @@ -//! Shared test utilities for distributed-db tests -//! -//! This module provides common test helpers to reduce duplication -//! across test modules. - -#![cfg(test)] - -use crate::{DistributedDB, IndexManager, Operation, RocksStorage, Transaction}; -use platform_core::Hotkey; -use std::sync::Arc; -use tempfile::{tempdir, TempDir}; - -/// Create a test Hotkey/validator with a specific byte value -pub fn create_test_hotkey(val: u8) -> Hotkey { - Hotkey::from_bytes(&[val; 32]).unwrap() -} - -/// Create a test DistributedDB instance with temporary storage -pub fn create_test_db() -> (TempDir, DistributedDB) { - let dir = tempdir().unwrap(); - let validator = create_test_hotkey(1); - let db = DistributedDB::open(dir.path(), validator).unwrap(); - (dir, db) -} - -/// Create a test IndexManager with temporary storage -pub fn create_test_index_manager() -> (IndexManager, Arc, TempDir) { - let dir = tempdir().unwrap(); - let storage = Arc::new(RocksStorage::open(dir.path()).unwrap()); - let indexes = IndexManager::new(storage.clone()).unwrap(); - (indexes, storage, dir) -} - -/// Create a test Transaction with a Put operation -pub fn create_test_tx() -> Transaction { - let hotkey = create_test_hotkey(1); - Transaction::new( - hotkey, - Operation::Put { - collection: "test".to_string(), - key: b"key".to_vec(), - value: b"value".to_vec(), - }, - ) -} diff --git a/crates/distributed-db/src/transactions.rs b/crates/distributed-db/src/transactions.rs deleted file mode 100644 index c10d2536..00000000 --- a/crates/distributed-db/src/transactions.rs +++ /dev/null @@ -1,718 +0,0 @@ -//! Transaction management for optimistic execution -//! -//! Transactions are: -//! - Applied immediately (optimistic) -//! - Confirmed at Bittensor block boundaries -//! - Rolled back if consensus fails - -use platform_core::Hotkey; -use serde::{Deserialize, Serialize}; -use sha2::{Digest, Sha256}; -use std::collections::HashMap; - -/// Database operation -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum Operation { - /// Put a single key-value - Put { - collection: String, - key: Vec, - value: Vec, - }, - /// Delete a key - Delete { collection: String, key: Vec }, - /// Batch put multiple key-values - BatchPut { - operations: Vec<(String, Vec, Vec)>, - }, -} - -/// Transaction in the distributed database -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Transaction { - /// Transaction ID (hash) - id: [u8; 32], - /// Sender hotkey - pub sender: Hotkey, - /// Operation to perform - pub operation: Operation, - /// Timestamp (Unix millis) - pub timestamp: u64, - /// Nonce for uniqueness - pub nonce: u64, - /// Signature over (sender, operation, timestamp, nonce) - pub signature: Vec, -} - -impl Transaction { - /// Create a new transaction - pub fn new(sender: Hotkey, operation: Operation) -> Self { - let timestamp = chrono::Utc::now().timestamp_millis() as u64; - let nonce = rand::random::(); - - let mut tx = Self { - id: [0u8; 32], - sender, - operation, - timestamp, - nonce, - signature: Vec::new(), - }; - - tx.id = tx.compute_id(); - tx - } - - /// Compute transaction ID - fn compute_id(&self) -> [u8; 32] { - let mut hasher = Sha256::new(); - hasher.update(self.sender.as_bytes()); - hasher.update(bincode::serialize(&self.operation).unwrap_or_default()); - hasher.update(self.timestamp.to_le_bytes()); - hasher.update(self.nonce.to_le_bytes()); - hasher.finalize().into() - } - - /// Get transaction ID - pub fn id(&self) -> [u8; 32] { - self.id - } - - /// Validate transaction - pub fn validate(&self) -> anyhow::Result<()> { - // Check ID matches - if self.id != self.compute_id() { - anyhow::bail!("Invalid transaction ID"); - } - - // Check timestamp is reasonable (within 1 hour) - let now = chrono::Utc::now().timestamp_millis() as u64; - let one_hour = 60 * 60 * 1000; - if self.timestamp > now + one_hour || self.timestamp < now.saturating_sub(one_hour) { - anyhow::bail!("Transaction timestamp out of range"); - } - - // Signature verification - - Ok(()) - } - - /// Sign the transaction - pub fn sign(&mut self, _keypair: &platform_core::Keypair) { - // Signature implementation - self.signature = vec![0u8; 64]; - } - - /// Get affected keys - pub fn affected_keys(&self) -> Vec<(String, Vec)> { - match &self.operation { - Operation::Put { - collection, key, .. - } => { - vec![(collection.clone(), key.clone())] - } - Operation::Delete { collection, key } => { - vec![(collection.clone(), key.clone())] - } - Operation::BatchPut { operations } => operations - .iter() - .map(|(c, k, _)| (c.clone(), k.clone())) - .collect(), - } - } -} - -/// Transaction receipt after execution -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct TransactionReceipt { - /// Transaction ID - pub tx_id: [u8; 32], - /// Whether execution succeeded - pub success: bool, - /// Execution time in microseconds - pub execution_time_us: u64, - /// State root after execution - pub state_root: [u8; 32], -} - -/// Transaction status -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] -pub enum TransactionStatus { - /// Pending confirmation - Pending, - /// Confirmed in a block - Confirmed, - /// Rolled back - RolledBack, -} - -/// Pending transaction entry -#[derive(Debug, Clone)] -struct PendingTx { - tx: Transaction, - receipt: TransactionReceipt, - status: TransactionStatus, - submitted_at: u64, -} - -/// Transaction pool for optimistic execution -pub struct TransactionPool { - /// Pending transactions - pending: HashMap<[u8; 32], PendingTx>, - /// Confirmed transactions (block -> tx_ids) - confirmed: HashMap>, - /// Nonce per sender to prevent replays - nonces: HashMap, -} - -impl TransactionPool { - /// Create a new transaction pool - pub fn new() -> Self { - Self { - pending: HashMap::new(), - confirmed: HashMap::new(), - nonces: HashMap::new(), - } - } - - /// Add a pending transaction - pub fn add_pending(&mut self, tx: Transaction, receipt: TransactionReceipt) { - // Check nonce - let current_nonce = self.nonces.get(&tx.sender).copied().unwrap_or(0); - if tx.nonce <= current_nonce { - tracing::warn!( - "Transaction nonce too low: {} <= {}", - tx.nonce, - current_nonce - ); - return; - } - - self.nonces.insert(tx.sender.clone(), tx.nonce); - - let entry = PendingTx { - tx, - receipt, - status: TransactionStatus::Pending, - submitted_at: chrono::Utc::now().timestamp_millis() as u64, - }; - - self.pending.insert(entry.tx.id(), entry); - } - - /// Get pending transactions for a block - pub fn get_pending_for_block(&self, _block: u64) -> Vec<(Transaction, TransactionReceipt)> { - self.pending - .values() - .filter(|p| p.status == TransactionStatus::Pending) - .map(|p| (p.tx.clone(), p.receipt.clone())) - .collect() - } - - /// Confirm a transaction - pub fn confirm(&mut self, tx_id: [u8; 32], block: u64) { - if let Some(pending) = self.pending.get_mut(&tx_id) { - pending.status = TransactionStatus::Confirmed; - } - - self.confirmed.entry(block).or_default().push(tx_id); - } - - /// Rollback a transaction - pub fn rollback(&mut self, tx_id: [u8; 32]) { - if let Some(pending) = self.pending.get_mut(&tx_id) { - pending.status = TransactionStatus::RolledBack; - } - } - - /// Get transaction status - pub fn status(&self, tx_id: &[u8; 32]) -> Option { - self.pending.get(tx_id).map(|p| p.status) - } - - /// Get pending count - pub fn pending_count(&self) -> usize { - self.pending - .values() - .filter(|p| p.status == TransactionStatus::Pending) - .count() - } - - /// Cleanup old pending transactions - pub fn cleanup_old(&mut self, before_block: u64) { - // Remove confirmed blocks older than threshold - self.confirmed.retain(|&block, _| block >= before_block); - - // Remove old pending transactions - let cutoff = chrono::Utc::now().timestamp_millis() as u64 - (24 * 60 * 60 * 1000); // 24 hours - self.pending - .retain(|_, p| p.status == TransactionStatus::Pending && p.submitted_at > cutoff); - } - - /// Get transactions by sender - pub fn get_by_sender(&self, sender: &Hotkey) -> Vec<&Transaction> { - self.pending - .values() - .filter(|p| &p.tx.sender == sender) - .map(|p| &p.tx) - .collect() - } -} - -impl Default for TransactionPool { - fn default() -> Self { - Self::new() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_utils::*; - - fn create_tx_with_nonce(sender: Hotkey, operation: Operation, nonce: u64) -> Transaction { - let timestamp = chrono::Utc::now().timestamp_millis() as u64; - - let mut tx = Transaction { - id: [0u8; 32], - sender, - operation, - timestamp, - nonce, - signature: Vec::new(), - }; - - // Compute the ID - let mut hasher = sha2::Sha256::new(); - use sha2::Digest; - hasher.update(tx.sender.as_bytes()); - hasher.update(bincode::serialize(&tx.operation).unwrap_or_default()); - hasher.update(tx.timestamp.to_le_bytes()); - hasher.update(tx.nonce.to_le_bytes()); - tx.id = hasher.finalize().into(); - - tx - } - - #[test] - fn test_transaction_creation() { - let sender = create_test_hotkey(1); - let tx = Transaction::new( - sender, - Operation::Put { - collection: "test".to_string(), - key: b"key".to_vec(), - value: b"value".to_vec(), - }, - ); - - assert!(tx.validate().is_ok()); - assert_ne!(tx.id(), [0u8; 32]); - } - - #[test] - fn test_transaction_pool() { - let mut pool = TransactionPool::new(); - let sender = create_test_hotkey(1); - - let tx = Transaction::new( - sender.clone(), - Operation::Put { - collection: "test".to_string(), - key: b"key".to_vec(), - value: b"value".to_vec(), - }, - ); - - let receipt = TransactionReceipt { - tx_id: tx.id(), - success: true, - execution_time_us: 100, - state_root: [0u8; 32], - }; - - pool.add_pending(tx.clone(), receipt); - assert_eq!(pool.pending_count(), 1); - - pool.confirm(tx.id(), 100); - assert_eq!(pool.status(&tx.id()), Some(TransactionStatus::Confirmed)); - } - - #[test] - fn test_operation_put() { - let op = Operation::Put { - collection: "test".to_string(), - key: b"key1".to_vec(), - value: b"value1".to_vec(), - }; - - let sender = create_test_hotkey(5); - let tx = Transaction::new(sender, op); - - let keys = tx.affected_keys(); - assert_eq!(keys.len(), 1); - assert_eq!(keys[0].0, "test"); - assert_eq!(keys[0].1, b"key1"); - } - - #[test] - fn test_operation_delete() { - let op = Operation::Delete { - collection: "test".to_string(), - key: b"key1".to_vec(), - }; - - let sender = create_test_hotkey(6); - let tx = Transaction::new(sender, op); - - let keys = tx.affected_keys(); - assert_eq!(keys.len(), 1); - assert_eq!(keys[0].0, "test"); - assert_eq!(keys[0].1, b"key1"); - } - - #[test] - fn test_operation_batch_put() { - let op = Operation::BatchPut { - operations: vec![ - ("col1".to_string(), b"k1".to_vec(), b"v1".to_vec()), - ("col2".to_string(), b"k2".to_vec(), b"v2".to_vec()), - ("col1".to_string(), b"k3".to_vec(), b"v3".to_vec()), - ], - }; - - let sender = create_test_hotkey(7); - let tx = Transaction::new(sender, op); - - let keys = tx.affected_keys(); - assert_eq!(keys.len(), 3); - assert!(keys.iter().any(|(c, k)| c == "col1" && k == b"k1")); - assert!(keys.iter().any(|(c, k)| c == "col2" && k == b"k2")); - assert!(keys.iter().any(|(c, k)| c == "col1" && k == b"k3")); - } - - #[test] - fn test_transaction_id_computation() { - let sender = create_test_hotkey(2); - let op = Operation::Put { - collection: "test".to_string(), - key: b"key".to_vec(), - value: b"value".to_vec(), - }; - - let tx1 = Transaction::new(sender.clone(), op.clone()); - let tx2 = Transaction::new(sender, op); - - // Different nonces should produce different IDs - assert_ne!(tx1.id(), tx2.id()); - } - - #[test] - fn test_transaction_validate_valid() { - let sender = create_test_hotkey(3); - let tx = Transaction::new( - sender, - Operation::Put { - collection: "test".to_string(), - key: b"key".to_vec(), - value: b"value".to_vec(), - }, - ); - - assert!(tx.validate().is_ok()); - } - - #[test] - fn test_transaction_validate_invalid_id() { - let sender = create_test_hotkey(4); - let mut tx = Transaction::new( - sender, - Operation::Put { - collection: "test".to_string(), - key: b"key".to_vec(), - value: b"value".to_vec(), - }, - ); - - // Corrupt the ID - tx.id = [99u8; 32]; - - assert!(tx.validate().is_err()); - } - - #[test] - fn test_transaction_pool_default() { - let pool = TransactionPool::default(); - assert_eq!(pool.pending_count(), 0); - } - - #[test] - fn test_transaction_pool_rollback() { - let mut pool = TransactionPool::new(); - let sender = create_test_hotkey(8); - - let tx = Transaction::new( - sender, - Operation::Put { - collection: "test".to_string(), - key: b"key".to_vec(), - value: b"value".to_vec(), - }, - ); - - let receipt = TransactionReceipt { - tx_id: tx.id(), - success: true, - execution_time_us: 100, - state_root: [0u8; 32], - }; - - pool.add_pending(tx.clone(), receipt); - assert_eq!(pool.status(&tx.id()), Some(TransactionStatus::Pending)); - - pool.rollback(tx.id()); - assert_eq!(pool.status(&tx.id()), Some(TransactionStatus::RolledBack)); - } - - #[test] - fn test_transaction_pool_nonce_check() { - let mut pool = TransactionPool::new(); - let sender = create_test_hotkey(9); - - // Add transaction with nonce 100 - let mut tx1 = Transaction::new( - sender.clone(), - Operation::Put { - collection: "test".to_string(), - key: b"key1".to_vec(), - value: b"value1".to_vec(), - }, - ); - tx1.nonce = 100; - tx1.id = tx1.compute_id(); - - let receipt1 = TransactionReceipt { - tx_id: tx1.id(), - success: true, - execution_time_us: 100, - state_root: [0u8; 32], - }; - - pool.add_pending(tx1, receipt1); - assert_eq!(pool.pending_count(), 1); - - // Try to add transaction with lower nonce (should be rejected) - let mut tx2 = Transaction::new( - sender, - Operation::Put { - collection: "test".to_string(), - key: b"key2".to_vec(), - value: b"value2".to_vec(), - }, - ); - tx2.nonce = 50; - tx2.id = tx2.compute_id(); - - let receipt2 = TransactionReceipt { - tx_id: tx2.id(), - success: true, - execution_time_us: 100, - state_root: [0u8; 32], - }; - - pool.add_pending(tx2, receipt2); - // Should still be 1 (second tx rejected) - assert_eq!(pool.pending_count(), 1); - } - - #[test] - fn test_transaction_pool_get_pending_for_block() { - let mut pool = TransactionPool::new(); - let sender = create_test_hotkey(10); - - let tx = Transaction::new( - sender, - Operation::Put { - collection: "test".to_string(), - key: b"key".to_vec(), - value: b"value".to_vec(), - }, - ); - - let receipt = TransactionReceipt { - tx_id: tx.id(), - success: true, - execution_time_us: 100, - state_root: [0u8; 32], - }; - - pool.add_pending(tx, receipt); - - let pending = pool.get_pending_for_block(100); - assert_eq!(pending.len(), 1); - } - - #[test] - fn test_transaction_pool_status_not_found() { - let pool = TransactionPool::new(); - let unknown_id = [99u8; 32]; - - assert_eq!(pool.status(&unknown_id), None); - } - - #[test] - fn test_transaction_pool_get_by_sender() { - let mut pool = TransactionPool::new(); - let sender1 = create_test_hotkey(11); - let sender2 = create_test_hotkey(12); - - // Add 2 txs from sender1 with increasing nonces - for i in 0..2 { - let tx = create_tx_with_nonce( - sender1.clone(), - Operation::Put { - collection: "test".to_string(), - key: format!("key{}", i).into_bytes(), - value: b"value".to_vec(), - }, - (i + 1) as u64, - ); - - let receipt = TransactionReceipt { - tx_id: tx.id(), - success: true, - execution_time_us: 100, - state_root: [0u8; 32], - }; - - pool.add_pending(tx, receipt); - } - - // Add 1 tx from sender2 - let tx = create_tx_with_nonce( - sender2.clone(), - Operation::Put { - collection: "test".to_string(), - key: b"key".to_vec(), - value: b"value".to_vec(), - }, - 1, - ); - - let receipt = TransactionReceipt { - tx_id: tx.id(), - success: true, - execution_time_us: 100, - state_root: [0u8; 32], - }; - - pool.add_pending(tx, receipt); - - let sender1_txs = pool.get_by_sender(&sender1); - assert_eq!(sender1_txs.len(), 2); - - let sender2_txs = pool.get_by_sender(&sender2); - assert_eq!(sender2_txs.len(), 1); - } - - #[test] - fn test_transaction_pool_cleanup_old() { - let mut pool = TransactionPool::new(); - let sender = create_test_hotkey(13); - - let tx = Transaction::new( - sender, - Operation::Put { - collection: "test".to_string(), - key: b"key".to_vec(), - value: b"value".to_vec(), - }, - ); - - let receipt = TransactionReceipt { - tx_id: tx.id(), - success: true, - execution_time_us: 100, - state_root: [0u8; 32], - }; - - pool.add_pending(tx.clone(), receipt); - pool.confirm(tx.id(), 100); - - // Cleanup should keep blocks at or after 100 - pool.cleanup_old(100); - assert!(pool.confirmed.contains_key(&100)); - - // Transaction is confirmed, so it should be removed from pending by cleanup_old - // (cleanup_old only retains Pending status) - pool.cleanup_old(100); - assert!(pool.pending.get(&tx.id()).is_none()); - - // Cleanup should remove confirmed blocks before 101 - pool.cleanup_old(101); - assert!(pool.confirmed.get(&100).is_none()); - } - - #[test] - fn test_transaction_sign() { - let sender = create_test_hotkey(14); - let keypair = platform_core::Keypair::generate(); - - let mut tx = Transaction::new( - sender, - Operation::Put { - collection: "test".to_string(), - key: b"key".to_vec(), - value: b"value".to_vec(), - }, - ); - - assert_eq!(tx.signature.len(), 0); - - tx.sign(&keypair); - - assert_eq!(tx.signature.len(), 64); - } - - #[test] - fn test_transaction_status_equality() { - assert_eq!(TransactionStatus::Pending, TransactionStatus::Pending); - assert_ne!(TransactionStatus::Pending, TransactionStatus::Confirmed); - assert_ne!(TransactionStatus::Confirmed, TransactionStatus::RolledBack); - } - - #[test] - fn test_transaction_receipt() { - let receipt = TransactionReceipt { - tx_id: [1u8; 32], - success: true, - execution_time_us: 500, - state_root: [2u8; 32], - }; - - assert_eq!(receipt.tx_id, [1u8; 32]); - assert!(receipt.success); - assert_eq!(receipt.execution_time_us, 500); - } - - #[test] - fn test_transaction_pool_confirm_nonexistent() { - let mut pool = TransactionPool::new(); - let unknown_id = [99u8; 32]; - - // Confirming nonexistent transaction should not panic - pool.confirm(unknown_id, 100); - - // Should create entry in confirmed - assert!(pool.confirmed.contains_key(&100)); - } - - #[test] - fn test_transaction_pool_rollback_nonexistent() { - let mut pool = TransactionPool::new(); - let unknown_id = [99u8; 32]; - - // Rolling back nonexistent transaction should not panic - pool.rollback(unknown_id); - } -}