diff --git a/.github/workflows/build-release-tags.yml b/.github/workflows/build-release-tags.yml index 9db80aea3..1ff306c91 100644 --- a/.github/workflows/build-release-tags.yml +++ b/.github/workflows/build-release-tags.yml @@ -296,6 +296,39 @@ jobs: - name: Add Android toolchain if: ${{ matrix.target == 'Android' }} run: | + # Setup Android SDK + ANDROID_SDK_ROOT="$HOME/android-sdk" + + # Download command-line tools if not present + if [ ! -d "$ANDROID_SDK_ROOT/cmdline-tools/latest" ]; then + echo "Downloading Android command-line tools..." + mkdir -p "$ANDROID_SDK_ROOT/cmdline-tools" + wget https://dl.google.com/android/repository/commandlinetools-linux-11076708_latest.zip -O cmdline-tools.zip + unzip -q cmdline-tools.zip -d "$ANDROID_SDK_ROOT/cmdline-tools" + mv "$ANDROID_SDK_ROOT/cmdline-tools/cmdline-tools" "$ANDROID_SDK_ROOT/cmdline-tools/latest" + rm cmdline-tools.zip + else + echo "Android command-line tools already exist" + fi + + # Always accept licenses and install/update required SDK components + echo "Installing/updating Android SDK components..." + yes | "$ANDROID_SDK_ROOT/cmdline-tools/latest/bin/sdkmanager" --licenses || true + "$ANDROID_SDK_ROOT/cmdline-tools/latest/bin/sdkmanager" "platform-tools" "platforms;android-34" "build-tools;34.0.0" + + # Set environment variables + echo "ANDROID_HOME=$ANDROID_SDK_ROOT" >> $GITHUB_ENV + echo "ANDROID_SDK_ROOT=$ANDROID_SDK_ROOT" >> $GITHUB_ENV + echo "$ANDROID_SDK_ROOT/platform-tools" >> $GITHUB_PATH + echo "$ANDROID_SDK_ROOT/cmdline-tools/latest/bin" >> $GITHUB_PATH + + # Verify SDK installation + echo "=== Android SDK Setup ===" + echo "ANDROID_HOME: $ANDROID_SDK_ROOT" + echo "SDK contents:" + ls -la "$ANDROID_SDK_ROOT" + + # Download and setup Android NDK NDK_VERSION="r27b" NDK_DIR="$HOME/android-ndk-$NDK_VERSION" @@ -308,6 +341,8 @@ jobs: echo "Android NDK already exists at $NDK_DIR, skipping download" fi + echo "ANDROID_HOME=$ANDROID_SDK_ROOT" + echo "ANDROID_SDK_ROOT=$ANDROID_SDK_ROOT" echo "ANDROID_NDK_HOME=$NDK_DIR" >> $GITHUB_ENV rustup target add aarch64-linux-android diff --git a/.github/workflows/cmake.yml b/.github/workflows/cmake.yml index 57a160fa9..3212643e0 100644 --- a/.github/workflows/cmake.yml +++ b/.github/workflows/cmake.yml @@ -449,6 +449,39 @@ jobs: - name: Add Android toolchain if: ${{ matrix.target == 'Android' }} run: | + # Setup Android SDK + ANDROID_SDK_ROOT="$HOME/android-sdk" + + # Download command-line tools if not present + if [ ! -d "$ANDROID_SDK_ROOT/cmdline-tools/latest" ]; then + echo "Downloading Android command-line tools..." + mkdir -p "$ANDROID_SDK_ROOT/cmdline-tools" + wget https://dl.google.com/android/repository/commandlinetools-linux-11076708_latest.zip -O cmdline-tools.zip + unzip -q cmdline-tools.zip -d "$ANDROID_SDK_ROOT/cmdline-tools" + mv "$ANDROID_SDK_ROOT/cmdline-tools/cmdline-tools" "$ANDROID_SDK_ROOT/cmdline-tools/latest" + rm cmdline-tools.zip + else + echo "Android command-line tools already exist" + fi + + # Always accept licenses and install/update required SDK components + echo "Installing/updating Android SDK components..." + yes | "$ANDROID_SDK_ROOT/cmdline-tools/latest/bin/sdkmanager" --licenses || true + "$ANDROID_SDK_ROOT/cmdline-tools/latest/bin/sdkmanager" "platform-tools" "platforms;android-34" "build-tools;34.0.0" + + # Set environment variables + echo "ANDROID_HOME=$ANDROID_SDK_ROOT" >> $GITHUB_ENV + echo "ANDROID_SDK_ROOT=$ANDROID_SDK_ROOT" >> $GITHUB_ENV + echo "$ANDROID_SDK_ROOT/platform-tools" >> $GITHUB_PATH + echo "$ANDROID_SDK_ROOT/cmdline-tools/latest/bin" >> $GITHUB_PATH + + # Verify SDK installation + echo "=== Android SDK Setup ===" + echo "ANDROID_HOME: $ANDROID_SDK_ROOT" + echo "SDK contents:" + ls -la "$ANDROID_SDK_ROOT" + + # Download and setup Android NDK NDK_VERSION="r27b" NDK_DIR="$HOME/android-ndk-$NDK_VERSION" @@ -459,6 +492,8 @@ jobs: rm ndk.zip fi + echo "ANDROID_HOME=$ANDROID_SDK_ROOT" >> $GITHUB_ENV + echo "ANDROID_SDK_ROOT=$ANDROID_SDK_ROOT" >> $GITHUB_ENV echo "ANDROID_NDK_HOME=$NDK_DIR" >> $GITHUB_ENV rustup target add aarch64-linux-android diff --git a/.gitignore b/.gitignore index 6f3e25696..1d9073e6b 100644 --- a/.gitignore +++ b/.gitignore @@ -73,4 +73,67 @@ CRDT.Datastore.TEST/ #Data files for deepseek test example/BpeTokenizer/data/**/*.mnn example/BpeTokenizer/data/**/*.mnn.weight -example/BpeTokenizer/data/**/*.json \ No newline at end of file +example/BpeTokenizer/data/**/*.json + +# Local environment / secrets — never commit +.env +.env.* +!.env.example +examples/.env +examples/.env.* +!examples/.env.example + +# Build outputs +build/ + +# macOS +.DS_Store + +# Test log files +/tmp/eth_watch_*.log +examples/*.log + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# github copilot +.idea/**/copilot* +# AWS User-specific +.idea/**/aws.xml + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# CMake +cmake-build-*/ + +# File-based project format +*.iws + +# IntelliJ +out/ + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### CLion+iml Patch ### +# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 + +*.iml +modules.xml +.idea/misc.xml +*.ipr + diff --git a/.gitmodules b/.gitmodules index 885238514..9dfeeb470 100644 --- a/.gitmodules +++ b/.gitmodules @@ -14,3 +14,6 @@ [submodule "docs"] path = docs url = ../sg-docs +[submodule "evmrelay"] + path = evmrelay + url = ../evmrelay diff --git a/AgentDocs/CLAUDE.md b/AgentDocs/CLAUDE.md index c13739496..95715dc85 100644 --- a/AgentDocs/CLAUDE.md +++ b/AgentDocs/CLAUDE.md @@ -61,10 +61,15 @@ Your default mode is “tiny, surgical insertion into existing code”. - Always run the linter before committing. - Always run the formatter before committing. - Always run the build before committing. -- Always run in interactive mode with the user on a step by step basis +- Always run in interactive mode with the user on a step-by-step basis - Always look in AgentDocs for other instructions. - The files can include SPRINT_PLAN.md, Architecture.md, CHECKPOINT.md, AGENT_MISTAKES.md - +- Always look in AgentDocs for other instructions. + - The files can include SPRINT_PLAN.md, Architecture.md, CHECKPOINT.md, AGENT_MISTAKES.md +- Always make sure to only use C++17 features and below. + - For instance boost::coroutines only work in C++20, do NOT use it. + - Make sure not to use other C++ versions' features above C++17 + ## Build Commands > **See `README.md` → "Building the Project" for the full authoritative build instructions.** diff --git a/GeniusKDF b/GeniusKDF index 529077413..f93495c27 160000 --- a/GeniusKDF +++ b/GeniusKDF @@ -1 +1 @@ -Subproject commit 529077413dfaddceb7df5f46ae3c36e9256c9b0d +Subproject commit f93495c27d8dadf689ec73f00ae0a212c71323d2 diff --git a/ProofSystem b/ProofSystem index 50f86f4cc..99593ca66 160000 --- a/ProofSystem +++ b/ProofSystem @@ -1 +1 @@ -Subproject commit 50f86f4cc507aa79281d21aca1ad1683623ce9ab +Subproject commit 99593ca662d996869273f5e7414157e1d502ccf2 diff --git a/SGProcessingManager b/SGProcessingManager index b08326de8..2f0c7a307 160000 --- a/SGProcessingManager +++ b/SGProcessingManager @@ -1 +1 @@ -Subproject commit b08326de8e0ac7ef8db99b1ca0348db5f4d15b1e +Subproject commit 2f0c7a307874fb928844d992c886eeaa48c2ee42 diff --git a/build/Android/CMakeLists.txt b/build/Android/CMakeLists.txt index 074c1e4f9..c798e83ab 100644 --- a/build/Android/CMakeLists.txt +++ b/build/Android/CMakeLists.txt @@ -49,8 +49,14 @@ set(PROTOC_EXECUTABLE "${_THIRDPARTY_BUILD_DIR}/protobuf_host/bin/protoc${CMAKE_ set(TESTING OFF) -find_library(log-lib log) -find_library(android-lib android) +add_library(android::log INTERFACE IMPORTED GLOBAL) +set_target_properties(android::log PROPERTIES IMPORTED_LIBNAME "log") + +add_library(android::android INTERFACE IMPORTED GLOBAL) +set_target_properties(android::android PROPERTIES IMPORTED_LIBNAME "android") # Include common build parameters include(../CommonBuildParameters.cmake) + +# Add Android AAR build for secure storage +add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/securestorage ${CMAKE_BINARY_DIR}/android_aar) diff --git a/build/Android/securestorage/.gitignore b/build/Android/securestorage/.gitignore new file mode 100644 index 000000000..25d94c1b2 --- /dev/null +++ b/build/Android/securestorage/.gitignore @@ -0,0 +1,18 @@ +# Gradle build outputs +.gradle/ +build/ +library/build/ + +# Gradle wrapper (download on demand) +.gradle-wrapper.jar +gradlew +gradlew.bat +gradle/ + +# Local configuration +local.properties + +# IDE files +.idea/ +*.iml +.DS_Store diff --git a/build/Android/securestorage/CMakeLists.txt b/build/Android/securestorage/CMakeLists.txt new file mode 100644 index 000000000..c5243f452 --- /dev/null +++ b/build/Android/securestorage/CMakeLists.txt @@ -0,0 +1,33 @@ +# Android AAR build for KeyStoreHelper +cmake_minimum_required(VERSION 3.22) + +# Check if Gradle wrapper exists, if not, set it up +set(GRADLEW_PATH "${CMAKE_CURRENT_SOURCE_DIR}/gradlew") +if(NOT EXISTS "${GRADLEW_PATH}") + message(STATUS "Gradle wrapper not found, running setup script...") + execute_process( + COMMAND bash ${CMAKE_CURRENT_SOURCE_DIR}/setup-gradle-wrapper.sh + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + RESULT_VARIABLE SETUP_RESULT + ) + if(NOT SETUP_RESULT EQUAL 0) + message(FATAL_ERROR "Failed to set up Gradle wrapper. Please check your internet connection.") + endif() + message(STATUS "Gradle wrapper setup complete") +endif() + +# Custom target to build the AAR using Gradle +add_custom_target(build_android_aar ALL + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/gradlew :library:assembleRelease + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + COMMENT "Building securestorage AAR with Gradle" + BYPRODUCTS ${CMAKE_CURRENT_SOURCE_DIR}/library/build/outputs/aar/library-release.aar +) + +# Install the AAR to the installation directory +install(FILES + ${CMAKE_CURRENT_SOURCE_DIR}/library/build/outputs/aar/library-release.aar + DESTINATION lib/android + RENAME securestorage-release.aar + OPTIONAL +) diff --git a/build/Android/securestorage/README.md b/build/Android/securestorage/README.md new file mode 100644 index 000000000..34a4093be --- /dev/null +++ b/build/Android/securestorage/README.md @@ -0,0 +1,78 @@ +# Android Secure Storage AAR + +This directory contains the Android Gradle project for building the SuperGenius secure storage AAR. + +## Overview + +The AAR contains: +- `ai.gnus.sdk.KeyStoreHelper` - Java class providing secure key storage using Android KeyStore + +**Source Location**: The Java source is maintained in `src/local_secure_storage/impl/KeyStoreHelper.java` and referenced by the Gradle build. This ensures a single source of truth alongside the C++ implementation. + +## Building + +### Via CMake (Recommended) +When building the Android version of SuperGenius, the AAR will be built automatically during `make`: + +```bash +cd build/OSX +mkdir -p Android && cd Android +cmake ../.. \ + -DCMAKE_ANDROID_NDK=/path/to/ndk \ + -DANDROID_ABI=arm64-v8a \ + -DCMAKE_BUILD_TYPE=Release +make +make install +``` + +The Gradle wrapper will be set up automatically if needed during the CMake configure step. +The AAR will be built during `make` and installed to `${CMAKE_INSTALL_PREFIX}/lib/android/securestorage-release.aar` during `make install`. + +### Via Gradle Directly +You can also build the AAR directly using Gradle: + +```bash +cd build/Android/securestorage +./gradlew :library:assembleRelease +``` + +Output: `build/Android/securestorage/library/build/outputs/aar/library-release.aar` + +## Usage in Unity/GeniusSDK + +1. Include the AAR in your Unity project's `Assets/Plugins/Android/` directory +2. Include your SDK `.so` file in `Assets/Plugins/Android/libs/[ABI]/` +3. Initialize KeyStoreHelper before using native secure storage: + +```java +import ai.gnus.sdk.KeyStoreHelper; + +public class YourUnityActivity extends UnityPlayerActivity { + @Override + protected void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + // This must be called AFTER the native SDK .so is loaded + // It caches the class reference using the app's ClassLoader + KeyStoreHelper.initialize(this); + } +} +``` + +**Important**: The AAR's `nativeInit` method is implemented in your SDK's `.so` file. Make sure Unity loads the native library before calling `KeyStoreHelper.initialize()`. + +## JNI Contract + +The C++ code in `src/local_secure_storage/impl/Android.cpp`: +- Implements: `Java_ai_gnus_sdk_KeyStoreHelper_nativeInit` - Caches class reference using app ClassLoader +- Calls these static Java methods: + - `ai.gnus.sdk.KeyStoreHelper.load()Ljava/lang/String;` + - `ai.gnus.sdk.KeyStoreHelper.save(Ljava/lang/String;)Z` + - `ai.gnus.sdk.KeyStoreHelper.delete(Ljava/lang/String;)Z` + +**ClassLoader Fix**: The native code now uses the app's ClassLoader (via the Context passed to `initialize()`) instead of the system ClassLoader, which fixes crashes when secure storage is accessed from worker threads. + +## Requirements + +- Android SDK 33 +- Min SDK 28 (Android 9.0) +- Gradle 8.1+ diff --git a/build/Android/securestorage/build.gradle b/build/Android/securestorage/build.gradle new file mode 100644 index 000000000..6d2af6ec1 --- /dev/null +++ b/build/Android/securestorage/build.gradle @@ -0,0 +1,21 @@ +// Top-level build file +buildscript { + repositories { + google() + mavenCentral() + } + dependencies { + classpath 'com.android.tools.build:gradle:8.1.0' + } +} + +allprojects { + repositories { + google() + mavenCentral() + } +} + +task clean(type: Delete) { + delete rootProject.buildDir +} diff --git a/build/Android/securestorage/gradle.properties b/build/Android/securestorage/gradle.properties new file mode 100644 index 000000000..dbe66c699 --- /dev/null +++ b/build/Android/securestorage/gradle.properties @@ -0,0 +1,4 @@ +# Project-wide Gradle settings. +org.gradle.jvmargs=-Xmx2048m -Dfile.encoding=UTF-8 +android.useAndroidX=true +android.enableJetifier=false diff --git a/build/Android/securestorage/library/build.gradle b/build/Android/securestorage/library/build.gradle new file mode 100644 index 000000000..5f2d318b8 --- /dev/null +++ b/build/Android/securestorage/library/build.gradle @@ -0,0 +1,40 @@ +plugins { + id 'com.android.library' +} + +android { + namespace 'ai.gnus.sdk' + compileSdk 33 + + defaultConfig { + minSdk 28 + targetSdk 33 + + consumerProguardFiles "consumer-rules.pro" + } + + buildTypes { + release { + minifyEnabled false + proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro' + } + } + + compileOptions { + sourceCompatibility JavaVersion.VERSION_1_8 + targetCompatibility JavaVersion.VERSION_1_8 + } + + sourceSets { + main { + java { + srcDirs = ['../../../../src/local_secure_storage/impl'] + } + manifest.srcFile 'src/main/AndroidManifest.xml' + } + } +} + +dependencies { + implementation 'androidx.appcompat:appcompat:1.6.1' +} diff --git a/build/Android/securestorage/library/consumer-rules.pro b/build/Android/securestorage/library/consumer-rules.pro new file mode 100644 index 000000000..b1f65d7a6 --- /dev/null +++ b/build/Android/securestorage/library/consumer-rules.pro @@ -0,0 +1,4 @@ +# Consumer ProGuard rules for securestorage library +-keep class ai.gnus.sdk.KeyStoreHelper { + public static *; +} diff --git a/build/Android/securestorage/library/proguard-rules.pro b/build/Android/securestorage/library/proguard-rules.pro new file mode 100644 index 000000000..f33fe4cd9 --- /dev/null +++ b/build/Android/securestorage/library/proguard-rules.pro @@ -0,0 +1,4 @@ +# Keep KeyStoreHelper class and all its methods for JNI access +-keep class ai.gnus.sdk.KeyStoreHelper { + public static *; +} diff --git a/build/Android/securestorage/library/src/main/AndroidManifest.xml b/build/Android/securestorage/library/src/main/AndroidManifest.xml new file mode 100644 index 000000000..7d00b808e --- /dev/null +++ b/build/Android/securestorage/library/src/main/AndroidManifest.xml @@ -0,0 +1,4 @@ + + + + diff --git a/build/Android/securestorage/settings.gradle b/build/Android/securestorage/settings.gradle new file mode 100644 index 000000000..a10833bc3 --- /dev/null +++ b/build/Android/securestorage/settings.gradle @@ -0,0 +1,2 @@ +rootProject.name = 'SuperGenius' +include ':library' diff --git a/build/Android/securestorage/setup-gradle-wrapper.sh b/build/Android/securestorage/setup-gradle-wrapper.sh new file mode 100755 index 000000000..501a930ec --- /dev/null +++ b/build/Android/securestorage/setup-gradle-wrapper.sh @@ -0,0 +1,61 @@ +#!/bin/bash +# Script to set up Gradle wrapper for the Android AAR build + +set -e + +cd "$(dirname "$0")" + +# Check if gradlew already exists +if [ -f gradlew ]; then + echo "Gradle wrapper already exists" + exit 0 +fi + +GRADLE_VERSION="8.2" + +# Check if gradle is installed and what version +if command -v gradle &> /dev/null; then + GRADLE_VER=$(gradle --version | grep "^Gradle" | awk '{print $2}') + echo "Found Gradle $GRADLE_VER" + + # Check if version is at least 6.0 + if [ "$(printf '%s\n' "6.0" "$GRADLE_VER" | sort -V | head -n1)" = "6.0" ]; then + echo "Using installed Gradle to create wrapper..." + gradle wrapper --gradle-version $GRADLE_VERSION + exit 0 + else + echo "Warning: Gradle $GRADLE_VER is too old (need 6.0+)" + fi +fi + +echo "Downloading Gradle wrapper files directly..." + +# Create gradle/wrapper directory +mkdir -p gradle/wrapper + +# Download gradle-wrapper.jar +curl -L "https://raw.githubusercontent.com/gradle/gradle/v${GRADLE_VERSION}.0/gradle/wrapper/gradle-wrapper.jar" \ + -o gradle/wrapper/gradle-wrapper.jar + +# Download gradlew +curl -L "https://raw.githubusercontent.com/gradle/gradle/v${GRADLE_VERSION}.0/gradlew" \ + -o gradlew + +# Download gradlew.bat +curl -L "https://raw.githubusercontent.com/gradle/gradle/v${GRADLE_VERSION}.0/gradlew.bat" \ + -o gradlew.bat + +# Create gradle-wrapper.properties +cat > gradle/wrapper/gradle-wrapper.properties << EOF +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-${GRADLE_VERSION}-bin.zip +networkTimeout=10000 +validateDistributionUrl=true +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists +EOF + +chmod +x gradlew + +echo "Gradle wrapper setup complete" diff --git a/build/CommonBuildParameters.cmake b/build/CommonBuildParameters.cmake index a912a41a4..400c6c416 100644 --- a/build/CommonBuildParameters.cmake +++ b/build/CommonBuildParameters.cmake @@ -49,6 +49,14 @@ if(NOT DEFINED Protobuf_DIR) set(Protobuf_DIR "${_THIRDPARTY_BUILD_DIR}/protobuf/lib/cmake/protobuf") endif() +if(NOT DEFINED grpc_INCLUDE_DIR) + set(grpc_INCLUDE_DIR "${_THIRDPARTY_BUILD_DIR}/grpc/include") +endif() + +if(NOT DEFINED Protobuf_INCLUDE_DIR) + set(Protobuf_INCLUDE_DIR "${grpc_INCLUDE_DIR}/google/protobuf") +endif() + find_package(Protobuf CONFIG REQUIRED) if(NOT DEFINED PROTOC_EXECUTABLE) @@ -149,6 +157,7 @@ set(Boost_DIR "${Boost_LIB_DIR}/cmake/Boost-${BOOST_VERSION}") set(boost_atomic_DIR "${Boost_LIB_DIR}/cmake/boost_atomic-${BOOST_VERSION}") set(boost_chrono_DIR "${Boost_LIB_DIR}/cmake/boost_chrono-${BOOST_VERSION}") set(boost_container_DIR "${Boost_LIB_DIR}/cmake/boost_container-${BOOST_VERSION}") +set(boost_context_DIR "${Boost_LIB_DIR}/cmake/boost_context-${BOOST_VERSION}") set(boost_date_time_DIR "${Boost_LIB_DIR}/cmake/boost_date_time-${BOOST_VERSION}") set(boost_filesystem_DIR "${Boost_LIB_DIR}/cmake/boost_filesystem-${BOOST_VERSION}") set(boost_headers_DIR "${Boost_LIB_DIR}/cmake/boost_headers-${BOOST_VERSION}") @@ -160,11 +169,37 @@ set(boost_random_DIR "${Boost_LIB_DIR}/cmake/boost_random-${BOOST_VERSION}") set(boost_regex_DIR "${Boost_LIB_DIR}/cmake/boost_regex-${BOOST_VERSION}") set(boost_system_DIR "${Boost_LIB_DIR}/cmake/boost_system-${BOOST_VERSION}") set(boost_thread_DIR "${Boost_LIB_DIR}/cmake/boost_thread-${BOOST_VERSION}") +set(boost_context_DIR "${Boost_LIB_DIR}/cmake/boost_context-${BOOST_VERSION}") +set(boost_coroutine_DIR "${Boost_LIB_DIR}/cmake/boost_coroutine-${BOOST_VERSION}") set(boost_unit_test_framework_DIR "${Boost_LIB_DIR}/cmake/boost_unit_test_framework-${BOOST_VERSION}") set(Boost_USE_MULTITHREADED ON) set(Boost_USE_STATIC_LIBS ON) set(Boost_NO_SYSTEM_PATHS ON) option(Boost_USE_STATIC_RUNTIME "Use static runtimes" ON) +set(_BOOST_CACHE_ARGS + -DBOOST_ROOT:PATH=${_BOOST_ROOT} + -DBoost_DIR:PATH=${Boost_DIR}/Boost-${BOOST_VERSION} + -DBoost_INCLUDE_DIR:PATH=${Boost_INCLUDE_DIR} + -Dboost_headers_DIR:PATH=${Boost_DIR}/boost_headers-${BOOST_VERSION} + -Dboost_date_time_DIR:PATH=${Boost_DIR}/boost_date_time-${BOOST_VERSION} + -Dboost_filesystem_DIR:PATH=${Boost_DIR}/boost_filesystem-${BOOST_VERSION} + -Dboost_program_options_DIR:PATH=${Boost_DIR}/boost_program_options-${BOOST_VERSION} + -Dboost_random_DIR:PATH=${Boost_DIR}/boost_random-${BOOST_VERSION} + -Dboost_regex_DIR:PATH=${Boost_DIR}/boost_regex-${BOOST_VERSION} + -Dboost_system_DIR:PATH=${Boost_DIR}/boost_system-${BOOST_VERSION} + -Dboost_context_DIR:PATH=${Boost_DIR}/boost_context-${BOOST_VERSION} + -Dboost_coroutine_DIR:PATH=${Boost_DIR}/boost_coroutine-${BOOST_VERSION} + -Dboost_thread_DIR:PATH=${Boost_DIR}/boost_thread-${BOOST_VERSION} + -Dboost_log_DIR:PATH=${Boost_DIR}/boost_log-${BOOST_VERSION} + -Dboost_log_setup_DIR:PATH=${Boost_DIR}/boost_log_setup-${BOOST_VERSION} + -Dboost_unit_test_framework_DIR:PATH=${Boost_DIR}/boost_unit_test_framework-${BOOST_VERSION} + -Dboost_json_DIR:PATH=${Boost_DIR}/boost_json-${BOOST_VERSION} + -DBoost_USE_STATIC_RUNTIME:BOOL=ON + -DBoost_NO_SYSTEM_PATHS:BOOL=ON + -DBoost_USE_MULTITHREADED:BOOL=ON + -DBoost_USE_STATIC_LIBS:BOOL=ON + -DBoost_USE_STATIC_RUNTIME:BOOL=ON +) option(SGNS_STACKTRACE_BACKTRACE "Use BOOST_STACKTRACE_USE_BACKTRACE in stacktraces, for POSIX" OFF) @@ -176,8 +211,10 @@ if(SGNS_STACKTRACE_BACKTRACE) endif() endif() -# header only libraries must not be added here -find_package(Boost REQUIRED COMPONENTS container date_time filesystem random regex system thread log log_setup program_options unit_test_framework json) +if(POLICY CMP0167) + cmake_policy(SET CMP0167 OLD) +endif() +find_package(Boost REQUIRED COMPONENTS container date_time filesystem random regex system thread log log_setup program_options unit_test_framework json context coroutine) include_directories(${Boost_INCLUDE_DIRS}) # SQLiteModernCpp project @@ -234,6 +271,9 @@ find_package(ipfs-bitswap-cpp CONFIG REQUIRED) set(ed25519_DIR "${_THIRDPARTY_BUILD_DIR}/ed25519/lib/cmake/ed25519") find_package(ed25519 CONFIG REQUIRED) +set(CMAKE_SUPPRESS_DEVELOPER_WARNINGS ON CACHE BOOL "Suppress developer warnings" FORCE) +# Globally suppress ALL CMake deprecation warnings (including from third-party Config.cmake files) +set(CMAKE_WARN_DEPRECATED OFF CACHE BOOL "Disable deprecation warnings" FORCE) # RapidJSON set(RapidJSON_DIR "${_THIRDPARTY_BUILD_DIR}/rapidjson/lib/cmake/RapidJSON") find_package(RapidJSON CONFIG REQUIRED) @@ -366,15 +406,19 @@ endif() include_directories( ${PROJECT_ROOT}/src ) + include_directories( - ${PROJECT_ROOT}/GeniusKDF + ${PROJECT_ROOT}/ProofSystem/include ) + include_directories( - ${PROJECT_ROOT}/ProofSystem + ${PROJECT_ROOT}/SGProcessingManager/include ) + include_directories( - ${PROJECT_ROOT}/SGProcessingManager + ${PROJECT_ROOT}/evmrelay/include ) + include_directories( ${PROJECT_ROOT}/app ) @@ -397,14 +441,13 @@ link_directories( ${ipfs-lite-cpp_LIB_DIR} ) +add_subdirectory(${PROJECT_ROOT}/ProofSystem ${CMAKE_BINARY_DIR}/ProofSystem) +add_subdirectory(${PROJECT_ROOT}/SGProcessingManager ${CMAKE_BINARY_DIR}/SGProcessingManager) +add_subdirectory(${PROJECT_ROOT}/evmrelay ${CMAKE_BINARY_DIR}/evmrelay) add_subdirectory(${PROJECT_ROOT}/src ${CMAKE_BINARY_DIR}/src) #add_subdirectory(${PROJECT_ROOT}/GeniusKDF ${CMAKE_BINARY_DIR}/GeniusKDF) -add_subdirectory(${PROJECT_ROOT}/ProofSystem ${CMAKE_BINARY_DIR}/ProofSystem) -add_subdirectory(${PROJECT_ROOT}/SGProcessingManager ${CMAKE_BINARY_DIR}/SGProcessingManager) - -# add_subdirectory(${PROJECT_ROOT}/app ${CMAKE_BINARY_DIR}/app) if(BUILD_TESTING) enable_testing() add_subdirectory(${PROJECT_ROOT}/test ${CMAKE_BINARY_DIR}/test) @@ -436,40 +479,40 @@ write_basic_package_version_file( ) # install header files -install_hfile(${PROJECT_ROOT}/src/api) -install_hfile(${PROJECT_ROOT}/src/authorship) -install_hfile(${PROJECT_ROOT}/src/application) -install_hfile(${PROJECT_ROOT}/src/base) -install_hfile(${PROJECT_ROOT}/src/blockchain) -install_hfile(${PROJECT_ROOT}/src/clock) -install_hfile(${PROJECT_ROOT}/src/crdt) -install_hfile(${PROJECT_ROOT}/src/crypto) -install_hfile(${PROJECT_ROOT}/src/extensions) -install_hfile(${PROJECT_ROOT}/src/injector) -install_hfile(${PROJECT_ROOT}/src/macro) -install_hfile(${PROJECT_ROOT}/src/network) -install_hfile(${PROJECT_ROOT}/src/outcome) -install_hfile(${PROJECT_ROOT}/src/processing) -install_hfile(${PROJECT_ROOT}/src/primitives) -install_hfile(${PROJECT_ROOT}/src/runtime) -install_hfile(${PROJECT_ROOT}/src/scale) -install_hfile(${PROJECT_ROOT}/src/storage) -install_hfile(${PROJECT_ROOT}/src/subscription) -install_hfile(${PROJECT_ROOT}/src/transaction_pool) -install_hfile(${PROJECT_ROOT}/src/verification) -install_hfile(${PROJECT_ROOT}/src/account) -install_hfile(${PROJECT_ROOT}/app/integration) -install_hfile(${PROJECT_ROOT}/src/local_secure_storage) -install_hfile(${PROJECT_ROOT}/src/singleton) -install_hfile(${PROJECT_ROOT}/src/coinprices) -install_hfile(${PROJECT_ROOT}/ProcessingSchema/generated) - -# install proto header files -install_hfile(${CMAKE_CURRENT_BINARY_DIR}/generated/crdt) -install_hfile(${CMAKE_CURRENT_BINARY_DIR}/generated/processing) -install_hfile(${CMAKE_CURRENT_BINARY_DIR}/generated/account) -install_hfile(${CMAKE_CURRENT_BINARY_DIR}/generated/blockchain) -install_hfile(${CMAKE_CURRENT_BINARY_DIR}/generated/proof) +#install_hfile(${PROJECT_ROOT}/src/api) +#install_hfile(${PROJECT_ROOT}/src/authorship) +#install_hfile(${PROJECT_ROOT}/src/application) +#install_hfile(${PROJECT_ROOT}/src/base) +#install_hfile(${PROJECT_ROOT}/src/blockchain) +#install_hfile(${PROJECT_ROOT}/src/clock) +#install_hfile(${PROJECT_ROOT}/src/crdt) +#install_hfile(${PROJECT_ROOT}/src/crypto) +#install_hfile(${PROJECT_ROOT}/src/extensions) +#install_hfile(${PROJECT_ROOT}/src/injector) +#install_hfile(${PROJECT_ROOT}/src/macro) +#install_hfile(${PROJECT_ROOT}/src/network) +#install_hfile(${PROJECT_ROOT}/src/outcome) +#install_hfile(${PROJECT_ROOT}/src/processing) +#install_hfile(${PROJECT_ROOT}/src/primitives) +#install_hfile(${PROJECT_ROOT}/src/runtime) +#install_hfile(${PROJECT_ROOT}/src/scale) +#install_hfile(${PROJECT_ROOT}/src/storage) +#install_hfile(${PROJECT_ROOT}/src/subscription) +#install_hfile(${PROJECT_ROOT}/src/transaction_pool) +#install_hfile(${PROJECT_ROOT}/src/verification) +#install_hfile(${PROJECT_ROOT}/src/account) +#install_hfile(${PROJECT_ROOT}/app/integration) +#install_hfile(${PROJECT_ROOT}/src/local_secure_storage) +#install_hfile(${PROJECT_ROOT}/src/singleton) +#install_hfile(${PROJECT_ROOT}/src/coinprices) +#install_hfile(${PROJECT_ROOT}/ProcessingSchema/generated) +# +## install proto header files +#install_hfile(${CMAKE_CURRENT_BINARY_DIR}/generated/crdt) +#install_hfile(${CMAKE_CURRENT_BINARY_DIR}/generated/processing) +#install_hfile(${CMAKE_CURRENT_BINARY_DIR}/generated/account) +#install_hfile(${CMAKE_CURRENT_BINARY_DIR}/generated/blockchain) +#install_hfile(${CMAKE_CURRENT_BINARY_DIR}/generated/proof) # install the configuration file install(FILES diff --git a/build/CommonCompilerOptions.cmake b/build/CommonCompilerOptions.cmake index 074513ba0..2112a7211 100644 --- a/build/CommonCompilerOptions.cmake +++ b/build/CommonCompilerOptions.cmake @@ -19,19 +19,20 @@ set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY BOTH) set(CMAKE_POSITION_INDEPENDENT_CODE ON) -if(DEFINED SANITIZE_CODE AND "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=${SANITIZE_CODE}") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=${SANITIZE_CODE}") - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=${SANITIZE_CODE}") - set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fsanitize=${SANITIZE_CODE}") - add_compile_options("-fsanitize=${SANITIZE_CODE}") - add_link_options("-fsanitize=${SANITIZE_CODE}") -endif() - -#TODO Remove this once we update gRPC, its dependencies, fix libp2p and change some of our internal projects - -if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-missing-template-arg-list-after-template-kw") +if (DEFINED SANITIZE_CODE) + message(STATUS "Building with sanitizer: ${SANITIZE_CODE}") + if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=${SANITIZE_CODE}") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=${SANITIZE_CODE}") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=${SANITIZE_CODE}") + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fsanitize=${SANITIZE_CODE}") + add_compile_options("-fsanitize=${SANITIZE_CODE}") + add_link_options("-fsanitize=${SANITIZE_CODE}") + elseif (MSVC) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /fsanitize=${SANITIZE_CODE}") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /fsanitize=${SANITIZE_CODE}") + add_compile_options("/fsanitize=${SANITIZE_CODE}") + endif() endif() include(GNUInstallDirs) @@ -47,6 +48,11 @@ if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE "Release") endif() +if(WIN32) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_WIN32_WINNT=0x0A00 -DNOMINMAX") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_WIN32_WINNT=0x0A00 -DNOMINMAX") +endif() + # Define zkllvm directory if(NOT DEFINED ZKLLVM_BUILD_DIR) get_filename_component(BUILD_PLATFORM_NAME ${CMAKE_CURRENT_SOURCE_DIR} NAME) diff --git a/build/CompilationFlags.cmake b/build/CompilationFlags.cmake index 78685fef6..6ce404a35 100644 --- a/build/CompilationFlags.cmake +++ b/build/CompilationFlags.cmake @@ -25,6 +25,8 @@ if("${CMAKE_CXX_COMPILER_ID}" MATCHES "^(AppleClang|Clang|GNU)$") add_flag(-Wno-unused-but-set-variable) add_flag(-Wno-macro-redefined) add_flag(-Wno-deprecated-copy-with-user-provided-copy) + # dumb crypto3 warnings + add_flag(-Wno-reorder-ctor) if(APPLE) add_link_options(-Wl,-no_warn_duplicate_libraries) endif() diff --git a/cmake/functions.cmake b/cmake/functions.cmake index 6997e0b9b..2df24a30a 100644 --- a/cmake/functions.cmake +++ b/cmake/functions.cmake @@ -87,7 +87,6 @@ endif() function(add_proto_library NAME) set(SOURCES "") set(HEADERS "") - set(PB_REL_PATH "") foreach(PROTO IN ITEMS ${ARGN}) compile_proto_to_cpp(H C PB_REL_PATH ${PROTO}) @@ -110,14 +109,14 @@ function(add_proto_library NAME) # target_include_directories(${NAME} PUBLIC # ${CMAKE_BINARY_DIR}/generated/ # ) + install(TARGETS ${NAME} EXPORT supergeniusTargets) + foreach(H IN ITEMS ${HEADERS}) - set_target_properties(${NAME} PROPERTIES PUBLIC_HEADER "${H}") + file(RELATIVE_PATH H_REL_PATH "${CMAKE_BINARY_DIR}/generated" "${H}") + get_filename_component(H_REL_DIR "${H_REL_PATH}" DIRECTORY) + install(FILES "${H}" DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/${H_REL_DIR}") endforeach() - install(TARGETS ${NAME} EXPORT supergeniusTargets - PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/${PB_REL_PATH} - ) - disable_clang_tidy(${NAME}) add_dependencies(generated ${NAME}) diff --git a/evmrelay b/evmrelay new file mode 160000 index 000000000..8acae379b --- /dev/null +++ b/evmrelay @@ -0,0 +1 @@ +Subproject commit 8acae379b6cd37e5903de71e8dadb55ae72cffb4 diff --git a/example/account_handling/AccountHandling.cpp b/example/account_handling/AccountHandling.cpp index 908eb9861..5925ab419 100644 --- a/example/account_handling/AccountHandling.cpp +++ b/example/account_handling/AccountHandling.cpp @@ -71,7 +71,7 @@ void MintTokens( const std::vector &args, sgns::TransactionManager std::cerr << "Invalid process command format.\n"; return; } - transaction_manager.MintFunds( std::stoull( args[1] ), "", "", sgns::TokenID::FromBytes( { 0x00 } ) ); + transaction_manager.MintFunds( std::stoull( args[1] ), "", "", sgns::TokenID::FromBytes( { 0x00 } ) ); } void PrintAccountInfo( const std::vector &args, sgns::TransactionManager &transaction_manager ) diff --git a/example/account_handling/AccountHelper.cpp b/example/account_handling/AccountHelper.cpp index fdfed98e1..636469662 100644 --- a/example/account_handling/AccountHelper.cpp +++ b/example/account_handling/AccountHelper.cpp @@ -1,6 +1,6 @@ /** * @file AccountHelper.cpp - * @brief + * @brief * @date 2024-05-15 * @author Henrique A. Klein (hklein@gnus.ai) */ @@ -83,7 +83,7 @@ namespace sgns crdt::KeyPairFileStorage( pubsubKeyPath ).GetKeyPair().value() ); pubsub_->Start( 40001, {} ); - auto scheduler = std::make_shared( io_, libp2p::protocol::SchedulerConfig{} ); + auto scheduler = std::make_shared( std::make_shared( io_ ), libp2p::basic::Scheduler::Config{ std::chrono::milliseconds( 100 ) } ); auto graphsyncnetwork = std::make_shared( pubsub_->GetHost(), scheduler ); auto generator = std::make_shared(); @@ -141,14 +141,14 @@ namespace sgns std::vector hash( SHA256_DIGEST_LENGTH ); SHA256( inputBytes.data(), inputBytes.size(), hash.data() ); //Provide CID - libp2p::protocol::kademlia::ContentId key( hash ); + auto key = libp2p::multi::ContentIdentifierCodec::encodeCIDV0( hash.data(), hash.size() ); pubsub_->GetDHT()->Start(); pubsub_->GetDHT()->ProvideCID( key, true ); - auto cidtest = libp2p::multi::ContentIdentifierCodec::decode( key.data ); + auto cidtest = libp2p::multi::ContentIdentifierCodec::decode( key ); auto cidstring = libp2p::multi::ContentIdentifierCodec::toString( cidtest.value() ); - std::cout << "CID Test::" << cidstring.value() << std::endl; + std::cout << "CID Test::" << cidstring.value() << '\n'; //Also Find providers pubsub_->StartFindingPeers( key ); diff --git a/example/account_handling/CMakeLists.txt b/example/account_handling/CMakeLists.txt index 195f161a2..7c402d5f4 100644 --- a/example/account_handling/CMakeLists.txt +++ b/example/account_handling/CMakeLists.txt @@ -10,11 +10,6 @@ include_directories( target_include_directories(account_handling PRIVATE ${GSL_INCLUDE_DIR} ${TrustWalletCore_INCLUDE_DIR}) target_link_libraries(account_handling PRIVATE -# ipfs-lite-cpp::ipfs_datastore_rocksdb -# ipfs-lite-cpp::buffer -# ipfs-lite-cpp::ipld_node -# ipfs-lite-cpp::ipfs_merkledag_service -# ipfs-lite-cpp::graphsync genius_node blockchain_common block_header_repository @@ -34,7 +29,6 @@ target_link_libraries(account_handling PRIVATE Boost::Boost.DI Boost::program_options ipfs-bitswap-cpp - ipfs-pubsub rapidjson ${WIN_CRYPT_LIBRARY} ) diff --git a/example/crdt_globaldb/globaldb_app.cpp b/example/crdt_globaldb/globaldb_app.cpp index 8afa1442e..a42d7e2f4 100644 --- a/example/crdt_globaldb/globaldb_app.cpp +++ b/example/crdt_globaldb/globaldb_app.cpp @@ -13,9 +13,10 @@ #include #include +#include +#include #include #include -#include using Buffer = sgns::base::Buffer; using HierarchicalKey = sgns::crdt::HierarchicalKey; @@ -119,13 +120,14 @@ int main( int argc, char **argv ) sgns::crdt::KeyPairFileStorage( strDatabasePath + "/pubsub" ).GetKeyPair().value() ); pubsub->Start( pubsubListeningPort, pubsubBootstrapPeers ); - auto scheduler = std::make_shared( io, libp2p::protocol::SchedulerConfig{} ); + auto scheduler = std::make_shared( + std::make_shared( io ), + libp2p::basic::Scheduler::Config{ std::chrono::milliseconds( 100 ) } ); auto graphsyncnetwork = std::make_shared( pubsub->GetHost(), scheduler ); auto generator = std::make_shared(); auto crdtOptions = sgns::crdt::CrdtOptions::DefaultOptions(); crdtOptions->logger = logger; - auto globaldb_ret = sgns::crdt::GlobalDB::New( io, strDatabasePath, pubsub, crdtOptions, graphsyncnetwork, scheduler, generator ); diff --git a/example/graphsync_app/CMakeLists.txt b/example/graphsync_app/CMakeLists.txt index f0cbe7fa1..9d825c334 100644 --- a/example/graphsync_app/CMakeLists.txt +++ b/example/graphsync_app/CMakeLists.txt @@ -6,7 +6,6 @@ add_executable(graphsync_app target_link_libraries(graphsync_app ipfs-lite-cpp::graphsync ipfs-lite-cpp::ipfs_merkledag_service - p2p::asio_scheduler Boost::program_options ${WIN_CRYPT_LIBRARY} ) diff --git a/example/graphsync_app/graphsync_acceptance_common.cpp b/example/graphsync_app/graphsync_acceptance_common.cpp index 0f71764f4..ab3bd3d70 100644 --- a/example/graphsync_app/graphsync_acceptance_common.cpp +++ b/example/graphsync_app/graphsync_acceptance_common.cpp @@ -3,7 +3,7 @@ #include #include -#include +#include #include #include @@ -32,7 +32,7 @@ createNodeObjects(std::shared_ptr io) { // [boost::di::override] allows for creating multiple hosts for testing // purposes - auto injector = + auto injector = libp2p::injector::makeHostInjector( boost::di::bind.to(io)[boost::di::override]); @@ -40,8 +40,8 @@ createNodeObjects(std::shared_ptr io) std::pair, std::shared_ptr> objects; objects.second = injector.template create>(); - auto scheduler = std::make_shared( - io, libp2p::protocol::SchedulerConfig{}); + auto scheduler = std::make_shared( + std::make_shared(io), libp2p::basic::Scheduler::Config{ std::chrono::milliseconds(100) }); auto graphsyncnetwork = std::make_shared( objects.second, scheduler ); auto generator = std::make_shared(); objects.first = @@ -54,7 +54,7 @@ createNodeObjects(std::shared_ptr io, libp2p::crypto::K { // [boost::di::override] allows for creating multiple hosts for testing // purposes - auto injector = + auto injector = libp2p::injector::makeHostInjector( boost::di::bind.to(io)[boost::di::override], boost::di::bind.to(std::move(keyPair))[boost::di::override]); @@ -63,8 +63,8 @@ createNodeObjects(std::shared_ptr io, libp2p::crypto::K std::pair, std::shared_ptr> objects; objects.second = injector.template create>(); - auto scheduler = std::make_shared( - io, libp2p::protocol::SchedulerConfig{}); + auto scheduler = std::make_shared( + std::make_shared(io), libp2p::basic::Scheduler::Config{ std::chrono::milliseconds(100) }); auto graphsyncnetwork = std::make_shared( objects.second, scheduler ); auto generator = std::make_shared(); objects.first = std::make_shared( objects.second, diff --git a/example/ipfs_client/ipfs.cpp b/example/ipfs_client/ipfs.cpp index 1e805f5ad..2b505a3b0 100644 --- a/example/ipfs_client/ipfs.cpp +++ b/example/ipfs_client/ipfs.cpp @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include #include @@ -207,7 +207,7 @@ int main(int argc, char* argv[]) //auto peer_id = libp2p::peer::PeerId::fromBase58("QmRXP6S7qwSH4vjSrZeJUGT68ww8rQVhoFWU5Kp7UkVkPN").value(); //auto peer_id = libp2p::peer::PeerId::fromBase58("QmTigmvYEhvcwEpZMuXHcC5HGQG4iKDCKaNeZuoy69QsJw").value(); - // Peers addresses: + // Peers addresses: // /ip4/127.0.0.1/udp/4001/quic; // /ip4/54.89.142.24/udp/4001/quic; // /ip4/54.89.142.24/tcp/1031; @@ -219,7 +219,7 @@ int main(int argc, char* argv[]) // /ip4/10.0.65.121/tcp/4001; // /ip4/127.0.0.1/tcp/4001; // /ip4/54.89.142.24/tcp/1024; - auto peer_address = + auto peer_address = libp2p::multi::Multiaddress::create( //"/ip4/10.0.65.121/tcp/4001/p2p/QmRXP6S7qwSH4vjSrZeJUGT68ww8rQVhoFWU5Kp7UkVkPN" //"/ip4/54.89.142.24/tcp/4001/p2p/QmRXP6S7qwSH4vjSrZeJUGT68ww8rQVhoFWU5Kp7UkVkPN" diff --git a/example/ipfs_client/ipfs_dht.cpp b/example/ipfs_client/ipfs_dht.cpp index 88df91e06..963c7f6fa 100644 --- a/example/ipfs_client/ipfs_dht.cpp +++ b/example/ipfs_client/ipfs_dht.cpp @@ -28,20 +28,10 @@ void IpfsDHT::FindProviders( const libp2p::multi::ContentIdentifier& cid, std::function> onProvidersFound)> onProvidersFound) { - auto kadCID = libp2p::protocol::kademlia::ContentId::fromWire( + auto kadCID = libp2p::protocol::kademlia::ContentId( libp2p::multi::ContentIdentifierCodec::encode(cid).value()); - if (!kadCID) - { - logger_->error("Wrong CID {}", - libp2p::peer::PeerId::fromHash(cid.content_address).value().toBase58()); - // TODO: pass an error to callback - //onProvidersFound(ERROR); - } - else - { - [[maybe_unused]] auto res = kademlia_->findProviders( - kadCID.value(), 0, onProvidersFound); - } + + [[maybe_unused]] auto res = kademlia_->findProviders(kadCID, 0, std::move(onProvidersFound)); } std::vector IpfsDHT::GetBootstrapNodes() const diff --git a/example/ipfs_client/ping_session.cpp b/example/ipfs_client/ping_session.cpp index 7bf347777..b73d987c7 100644 --- a/example/ipfs_client/ping_session.cpp +++ b/example/ipfs_client/ping_session.cpp @@ -1,4 +1,5 @@ #include "ping_session.hpp" +#include #include PingSession::PingSession(std::shared_ptr io, std::shared_ptr host) @@ -19,8 +20,8 @@ void PingSession::Init() }); host_->setProtocolHandler( - ping_->getProtocolId(), - [ctx = shared_from_this()](libp2p::protocol::BaseProtocol::StreamResult rstream) { + {ping_->getProtocolId()}, + [ctx = shared_from_this()](libp2p::StreamAndProtocol rstream) { ctx->ping_->handle(std::move(rstream)); }); } diff --git a/example/ipfs_client2/testipfs.cpp b/example/ipfs_client2/testipfs.cpp index 7578b0583..86c13b126 100644 --- a/example/ipfs_client2/testipfs.cpp +++ b/example/ipfs_client2/testipfs.cpp @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include @@ -336,7 +336,7 @@ int main(int argc, char *argv[]) { auto cid = libp2p::multi::ContentIdentifierCodec::fromString("QmSnuWmxptJZdLJpKRarxBMS2Ju2oANVrgbr2xWbie9b2D").value(); auto content_id = libp2p::protocol::kademlia::ContentId::fromWire( libp2p::multi::ContentIdentifierCodec::encode(cid).value()); - auto &scheduler = injector.create(); + auto &scheduler = injector.create(); std::function find_providers = [&] { [[maybe_unused]] auto res1 = kademlia->findProviders( @@ -344,7 +344,7 @@ int main(int argc, char *argv[]) { [&](libp2p::outcome::result> res) { scheduler - .schedule(libp2p::protocol::scheduler::toTicks( + .schedule(libp2p::basic::Scheduler::toTicks( kademlia_config.randomWalk.interval), find_providers) .detach(); @@ -368,7 +368,7 @@ int main(int argc, char *argv[]) { kademlia->provide(content_id.value(), !kademlia_config.passiveMode); scheduler - .schedule(libp2p::protocol::scheduler::toTicks( + .schedule(libp2p::basic::Scheduler::toTicks( kademlia_config.randomWalk.interval), provide) .detach(); diff --git a/example/ipfs_pubsub/CMakeLists.txt b/example/ipfs_pubsub/CMakeLists.txt index 39bddbb58..379db2620 100644 --- a/example/ipfs_pubsub/CMakeLists.txt +++ b/example/ipfs_pubsub/CMakeLists.txt @@ -1,25 +1,20 @@ add_executable(ipfs_pubsub ipfs_pubsub.cpp - ) +) # Copy the required headers to install directory include_directories( ${PROJECT_SOURCE_DIR}/src - ) +) target_include_directories(ipfs_pubsub PRIVATE ${GSL_INCLUDE_DIR}) target_link_libraries(ipfs_pubsub -# ipfs-lite-cpp::ipfs_datastore_rocksdb -# ipfs-lite-cpp::buffer -# ipfs-lite-cpp::ipld_node -# ipfs-lite-cpp::ipfs_merkledag_service -# ipfs-lite-cpp::graphsync logger - crdt_globaldb - processing_service + crdt_globaldb + processing_service p2p::p2p_basic_host p2p::p2p_default_network p2p::p2p_peer_repository @@ -30,10 +25,10 @@ target_link_libraries(ipfs_pubsub p2p::p2p_kademlia p2p::p2p_identify p2p::p2p_ping - p2p::p2p_logger + p2p::p2p_logger Boost::Boost.DI Boost::program_options ipfs-bitswap-cpp - ipfs-pubsub + ipfs-pubsub ${WIN_CRYPT_LIBRARY} - ) +) diff --git a/example/node_test/NodeExample.cpp b/example/node_test/NodeExample.cpp index 04ba65034..3c280f398 100644 --- a/example/node_test/NodeExample.cpp +++ b/example/node_test/NodeExample.cpp @@ -153,7 +153,10 @@ void MintTokens( const std::vector &args, std::shared_ptrMintTokens( std::stoull( args[1] ), "", "", sgns::TokenID::FromBytes( { 0x00 } ) ); + genius_node->MintTokens( std::stoull( args[1] ), + "", + "", + sgns::TokenID::FromBytes( { 0x00 } ) ); } void TransferTokens( const std::vector &args, std::shared_ptr genius_node ) @@ -352,9 +355,10 @@ void status_polling_thread( std::shared_ptr genius_node ) } auto status = genius_node->GetProcessingStatus(); - + std::string status_str; - switch (status.status) { + switch ( status.status ) + { case sgns::processing::ProcessingServiceImpl::Status::DISABLED: status_str = "DISABLED"; break; @@ -365,10 +369,10 @@ void status_polling_thread( std::shared_ptr genius_node ) status_str = "PROCESSING"; break; } - + // Simple output without terminal manipulation - std::cout << "[Status: " << status_str << " | Progress: " - << std::fixed << std::setprecision(2) << status.percentage << "%]" << std::endl; + std::cout << "[Status: " << status_str << " | Progress: " << std::fixed << std::setprecision( 2 ) + << status.percentage << "%]" << std::endl; } } @@ -738,7 +742,7 @@ int main( int argc, char *argv[] ) { input_thread.join(); } - + if ( status_thread.joinable() ) { status_thread.join(); diff --git a/example/processing_dapp/processing_dapp.cpp b/example/processing_dapp/processing_dapp.cpp index 547c8be02..9eca8db70 100644 --- a/example/processing_dapp/processing_dapp.cpp +++ b/example/processing_dapp/processing_dapp.cpp @@ -3,6 +3,7 @@ #include "crdt/globaldb/globaldb.hpp" #include "crdt/globaldb/keypair_file_storage.hpp" +#include #include #include #include @@ -13,7 +14,7 @@ #include #include #include -#include +#include using namespace sgns::processing; @@ -235,7 +236,7 @@ int main( int argc, char *argv[] ) auto io = std::make_shared(); auto crdtOptions = sgns::crdt::CrdtOptions::DefaultOptions(); - auto scheduler = std::make_shared( io, libp2p::protocol::SchedulerConfig{} ); + auto scheduler = std::make_shared( std::make_shared( io ), libp2p::basic::Scheduler::Config{ std::chrono::milliseconds( 100 ) } ); auto graphsyncnetwork = std::make_shared( pubs->GetHost(), scheduler ); auto generator = std::make_shared(); diff --git a/example/processing_dapp/processing_dapp_processor.cpp b/example/processing_dapp/processing_dapp_processor.cpp index 5f0e453ef..0e9e08377 100644 --- a/example/processing_dapp/processing_dapp_processor.cpp +++ b/example/processing_dapp/processing_dapp_processor.cpp @@ -12,7 +12,8 @@ #include "crdt/globaldb/globaldb.hpp" #include #include -#include +#include +#include using namespace sgns::processing; @@ -286,7 +287,7 @@ int main( int argc, char *argv[] ) auto io = std::make_shared(); auto crdtOptions = sgns::crdt::CrdtOptions::DefaultOptions(); - auto scheduler = std::make_shared( io, libp2p::protocol::SchedulerConfig{} ); + auto scheduler = std::make_shared( std::make_shared( io ), libp2p::basic::Scheduler::Config{ std::chrono::milliseconds( 100 ) } ); auto graphsyncnetwork = std::make_shared( pubs->GetHost(), scheduler ); auto generator = std::make_shared(); auto globaldb_ret = sgns::crdt::GlobalDB::New( diff --git a/example/processing_room/CMakeLists.txt b/example/processing_room/CMakeLists.txt index 9184d56da..ff20bbe51 100644 --- a/example/processing_room/CMakeLists.txt +++ b/example/processing_room/CMakeLists.txt @@ -1,20 +1,20 @@ add_executable(processing_app - processing_app.cpp - ) + processing_app.cpp +) # Copy the required headers to install directory include_directories( - ${PROJECT_SOURCE_DIR}/src - ) + ${PROJECT_SOURCE_DIR}/src +) target_include_directories(processing_app PRIVATE ${GSL_INCLUDE_DIR}) target_link_libraries(processing_app - SGProcessingProto - processing_service - logger - Boost::program_options - ipfs-pubsub + SGProcessingProto + processing_service + logger + Boost::program_options + ipfs-pubsub ) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 034c5dd47..cc2280d4f 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -15,3 +15,10 @@ add_subdirectory(singleton) add_subdirectory(watcher) add_subdirectory(coinprices) add_subdirectory(proof) + +install( + DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/" + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}" + FILES_MATCHING PATTERN "*.h*" +) + diff --git a/src/account/AccountMessenger.cpp b/src/account/AccountMessenger.cpp index a8fceffd4..4a3b53a5d 100644 --- a/src/account/AccountMessenger.cpp +++ b/src/account/AccountMessenger.cpp @@ -1,6 +1,6 @@ /** * @file AccountMessenger.cpp - * @brief + * @brief * @date 2025-07-22 * @author Henrique A. Klein (hklein@gnus.ai) */ @@ -289,7 +289,7 @@ namespace sgns } std::vector serialized_vec( encoded.begin(), encoded.end() ); - OUTCOME_TRY( auto &&signature, methods_.sign_( serialized_vec ) ); + BOOST_OUTCOME_TRY( auto signature, methods_.sign_( serialized_vec ) ); accountComm::SignedNonceRequest signed_req; *signed_req.mutable_data() = req; signed_req.set_signature( signature.data(), signature.size() ); @@ -407,7 +407,7 @@ namespace sgns } std::vector serialized_vec( encoded.begin(), encoded.end() ); - OUTCOME_TRY( auto &&signature, methods_.sign_( serialized_vec ) ); + BOOST_OUTCOME_TRY( auto signature, methods_.sign_( serialized_vec ) ); accountComm::SignedBlockRequest signed_req; *signed_req.mutable_data() = req; @@ -436,7 +436,7 @@ namespace sgns } std::vector serialized_vec( encoded.begin(), encoded.end() ); - OUTCOME_TRY( auto &&signature, methods_.sign_( serialized_vec ) ); + BOOST_OUTCOME_TRY( auto signature, methods_.sign_( serialized_vec ) ); accountComm::SignedBlockCidRequest signed_req; *signed_req.mutable_data() = req; @@ -466,7 +466,7 @@ namespace sgns } std::vector serialized_vec( encoded.begin(), encoded.end() ); - OUTCOME_TRY( auto &&signature, methods_.sign_( serialized_vec ) ); + BOOST_OUTCOME_TRY( auto signature, methods_.sign_( serialized_vec ) ); accountComm::SignedTransactionRequest signed_req; *signed_req.mutable_data() = req; @@ -495,7 +495,7 @@ namespace sgns } std::vector serialized_vec( encoded.begin(), encoded.end() ); - OUTCOME_TRY( auto &&signature, methods_.sign_( serialized_vec ) ); + BOOST_OUTCOME_TRY( auto signature, methods_.sign_( serialized_vec ) ); accountComm::SignedUTXORequest signed_req; *signed_req.mutable_data() = req; @@ -1025,7 +1025,7 @@ namespace sgns first_response_time_.erase( req_id ); } - OUTCOME_TRY( RequestNonce( req_id ) ); + BOOST_OUTCOME_TRY( RequestNonce( req_id ) ); const auto start_time = std::chrono::steady_clock::now(); const auto full_timeout = std::chrono::milliseconds( timeout_ms ); diff --git a/src/account/CMakeLists.txt b/src/account/CMakeLists.txt index f30b3f1df..3682843f6 100644 --- a/src/account/CMakeLists.txt +++ b/src/account/CMakeLists.txt @@ -15,11 +15,12 @@ add_library(sgns_genius_account target_link_libraries(sgns_genius_account PUBLIC Boost::headers - KeyGeneration + ProofSystem outcome logger crdt_globaldb ipfs-pubsub + TrustWalletCore PRIVATE hasher json_secure_storage @@ -29,7 +30,6 @@ target_link_libraries(sgns_genius_account SGTransactionProto ${WIN_API_LIBRARY} ${USER_ENV_LIBRARY} - TrustWalletCore wallet_core_rs TrezorCrypto protobuf::libprotobuf @@ -56,6 +56,7 @@ add_library(genius_node IGeniusTransactions.cpp TransferTransaction.cpp MintTransaction.cpp + MintTransactionV2.cpp ProcessingTransaction.cpp EscrowTransaction.cpp EscrowReleaseTransaction.cpp @@ -74,7 +75,7 @@ set_target_properties(genius_node PROPERTIES UNITY_BUILD ON) target_link_libraries(genius_node PUBLIC Boost::headers - KeyGeneration + ProofSystem outcome logger crdt_globaldb @@ -83,6 +84,7 @@ target_link_libraries(genius_node sgns_genius_account # Link to the GeniusAccount library blockchain_genesis ProcessingBase + evmrelay PRIVATE secure_storage component_factory @@ -117,7 +119,9 @@ target_link_libraries(genius_node processing_proof ) -set_target_properties(genius_node PROPERTIES PUBLIC_HEADER "GeniusNode.hpp") +add_dependencies(genius_node rlp) + +#set_target_properties(genius_node PROPERTIES PU@) if(MSVC) target_compile_options(genius_node PUBLIC /constexpr:steps1500000) elseif(ANDROID_ABI STREQUAL "armeabi-v7a") @@ -127,4 +131,3 @@ endif() # Install both libraries supergenius_install(sgns_genius_account) supergenius_install(genius_node) -install(DIRECTORY "${CMAKE_SOURCE_DIR}/../../ProofSystem/include/" DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}" FILES_MATCHING PATTERN "*.h*") diff --git a/src/account/EscrowReleaseTransaction.cpp b/src/account/EscrowReleaseTransaction.cpp index 704ac4093..28884feba 100644 --- a/src/account/EscrowReleaseTransaction.cpp +++ b/src/account/EscrowReleaseTransaction.cpp @@ -69,7 +69,10 @@ namespace sgns tx_struct.set_original_escrow_hash( original_escrow_hash_ ); size_t size = tx_struct.ByteSizeLong(); std::vector serialized_proto( size ); - tx_struct.SerializeToArray( serialized_proto.data(), static_cast( size ) ); + if ( !tx_struct.SerializeToArray( serialized_proto.data(), static_cast( size ) ) ) + { + std::cerr << "Failed to serialize transaction\n"; + } return serialized_proto; } @@ -79,7 +82,7 @@ namespace sgns SGTransaction::EscrowReleaseTx tx_struct; if ( !tx_struct.ParseFromArray( data.data(), static_cast( data.size() ) ) ) { - std::cerr << "Failed to parse EscrowReleaseTx from array." << std::endl; + std::cerr << "Failed to parse EscrowReleaseTx from array.\n"; return nullptr; } std::vector inputs; @@ -91,7 +94,7 @@ namespace sgns auto maybe_hash = base::Hash256::fromReadableString( input_proto.tx_id_hash() ); if ( !maybe_hash ) { - std::cerr << "Invalid hash in input" << std::endl; + std::cerr << "Invalid hash in input\n"; return nullptr; } curr.txid_hash_ = maybe_hash.value(); @@ -123,6 +126,16 @@ namespace sgns return utxo_params_; } + bool EscrowReleaseTransaction::HasUTXOParameters() const + { + return true; + } + + std::optional EscrowReleaseTransaction::GetUTXOParametersOpt() const + { + return utxo_params_; + } + uint64_t EscrowReleaseTransaction::GetReleaseAmount() const { return release_amount_; diff --git a/src/account/EscrowReleaseTransaction.hpp b/src/account/EscrowReleaseTransaction.hpp index ed9aef592..a8b9fe28a 100644 --- a/src/account/EscrowReleaseTransaction.hpp +++ b/src/account/EscrowReleaseTransaction.hpp @@ -65,6 +65,18 @@ namespace sgns */ UTXOTxParameters GetUTXOParameters() const; + /** + * @brief Returns if transaction supports UTXOs + * @return True if supported, false otherwise + */ + bool HasUTXOParameters() const override; + + /** + * @brief Returns the UTXOs + * @return If exists, returns the UTXOs of the transaction + */ + std::optional GetUTXOParametersOpt() const override; + /** * @brief Gets the release amount. * diff --git a/src/account/EscrowTransaction.cpp b/src/account/EscrowTransaction.cpp index bd99cbfa1..d51e0366f 100644 --- a/src/account/EscrowTransaction.cpp +++ b/src/account/EscrowTransaction.cpp @@ -20,7 +20,7 @@ namespace sgns SGTransaction::DAGStruct dag ) : IGeniusTransactions( "escrow-hold", SetDAGWithType( std::move( dag ), "escrow-hold" ) ), utxo_params_( std::move( params ) ), - amount_( std::move( amount ) ), + amount_( amount ), dev_addr_( std::move( dev_addr ) ), peers_cut_( peers_cut ) { @@ -63,7 +63,11 @@ namespace sgns size_t size = tx_struct.ByteSizeLong(); std::vector serialized_proto( size ); - tx_struct.SerializeToArray( serialized_proto.data(), serialized_proto.size() ); + if ( !tx_struct.SerializeToArray( serialized_proto.data(), serialized_proto.size() ) ) + { + std::cerr << "Failed to serialize transaction\n"; + } + return serialized_proto; } diff --git a/src/account/EscrowTransaction.hpp b/src/account/EscrowTransaction.hpp index bdc46797d..38652f79e 100644 --- a/src/account/EscrowTransaction.hpp +++ b/src/account/EscrowTransaction.hpp @@ -39,6 +39,24 @@ namespace sgns return utxo_params_; } + /** + * @brief Returns if transaction supports UTXOs + * @return True if supported, false otherwise + */ + bool HasUTXOParameters() const override + { + return true; + } + + /** + * @brief Returns the UTXOs + * @return If exists, returns the UTXOs of the transaction + */ + std::optional GetUTXOParametersOpt() const override + { + return utxo_params_; + } + std::string GetDevAddress() const { return dev_addr_; diff --git a/src/account/GeniusAccount.cpp b/src/account/GeniusAccount.cpp index da90b6ff3..13bd42dcf 100644 --- a/src/account/GeniusAccount.cpp +++ b/src/account/GeniusAccount.cpp @@ -1,5 +1,9 @@ #include "GeniusAccount.hpp" +#include +#include +#include +#include #include #include @@ -85,7 +89,7 @@ namespace nil::crypto3::multiprecision::uint256_t key_seed( maybe_key->value.GetString() ); ethereum::EthereumKeyGenerator eth_key( key_seed ); auto pub_key = eth_key.GetEntirePubValue(); - OUTCOME_TRY( std::vector pub_key_vec, base::unhex( pub_key ) ); + BOOST_OUTCOME_TRY( std::vector pub_key_vec, base::unhex( pub_key ) ); auto secure_storage = std::make_shared( std::string( SECURE_STORAGE_PREFIX ) + libp2p::multi::detail::encodeBase58( pub_key_vec ) ); @@ -97,7 +101,7 @@ namespace rj::Document new_doc; new_doc.CopyFrom( maybe_field->value, new_doc.GetAllocator() ); - OUTCOME_TRY( secure_storage->SaveJSON( std::move( new_doc ) ) ); + BOOST_OUTCOME_TRY( secure_storage->SaveJSON( std::move( new_doc ) ) ); genius_account_logger()->debug( "Successfully migrated JSON secure storage" ); return secure_storage; @@ -137,6 +141,7 @@ namespace sgns genius_account_logger()->info( "Could not load Genius address from storage, attempting to generate from ethereum private key" ); + auto response = GenerateGeniusAddress( eth_private_key, base_path ); if ( response.has_error() ) { @@ -148,10 +153,10 @@ namespace sgns return CreateInstanceFromResponse( token_id, std::move( response.value() ), full_node ); } - std::shared_ptr GeniusAccount::New( TokenID token_id, - const Credentials &credentials, - const boost::filesystem::path &base_path, - bool full_node ) + std::shared_ptr GeniusAccount::NewFromMnemonic( TokenID token_id, + const std::string &mnemonic, + const boost::filesystem::path &base_path, + bool full_node ) { if ( auto response = LoadGeniusAccount( base_path ); response.has_value() ) { @@ -160,50 +165,52 @@ namespace sgns } genius_account_logger()->info( - "Could not load Genius address from storage, attempting to generate from credentials" ); - auto response = GenerateGeniusAddress( credentials, base_path ); - if ( response.has_error() ) + "Could not load Genius address from storage, attempting to generate from mnemonic" ); + + try { - genius_account_logger()->error( "Failed to generate Genius address from credentials" ); - return nullptr; - } + TW::HDWallet wallet( mnemonic, "", true ); + auto derivation_path = TW::derivationPath( TWCoinTypeEthereum ); + TW::PrivateKey private_key = wallet.getKey( TWCoinTypeEthereum, derivation_path ); - genius_account_logger()->debug( "Generated a Genius address from credentials" ); - // Save credentials to storage - auto &[storage, addresses] = response.value(); - storage->Save( "email", credentials.email ); - storage->Save( "password", credentials.password ); + auto response = GenerateGeniusAddress( private_key, base_path ); + if ( response.has_error() ) + { + genius_account_logger()->error( "Failed to generate Genius address from private key" ); + return nullptr; + } - return CreateInstanceFromResponse( token_id, std::move( response.value() ), full_node ); + genius_account_logger()->debug( "Generated a Genius address from private key" ); + auto account = CreateInstanceFromResponse( token_id, std::move( response.value() ), full_node ); + + account->storage_->Save( "mnemonic", wallet.getMnemonic() ); + + return account; + } + catch ( const std::invalid_argument & ) + { + genius_account_logger()->error( "Tried to create private key from invalid mnemonic" ); + } + + return nullptr; } std::shared_ptr GeniusAccount::New( TokenID token_id, const boost::filesystem::path &base_path, bool full_node ) { - static std::string_view SUFFIX = "@gnus.ai"; - static std::mt19937_64 eng( ( std::random_device() )() ); - if ( auto response = LoadGeniusAccount( base_path ); response.has_value() ) { genius_account_logger()->debug( "Loaded existing Genius address" ); return CreateInstanceFromResponse( token_id, std::move( response.value() ), full_node ); } - genius_account_logger()->info( - "Could not find existing Genius address, generating one with random credentials" ); - - std::uniform_int_distribution dist( 0, std::numeric_limits::max() ); - uint64_t num = dist( eng ); - std::string email = base::hex_lower( - gsl::span( reinterpret_cast( &num ), sizeof( num ) ) ); - email.append( SUFFIX ); + genius_account_logger()->error( + "Could not find existing Genius address, generating one from a random mnemonic" ); - num = dist( eng ); - std::string password = base::hex_lower( - gsl::span( reinterpret_cast( &num ), sizeof( num ) ) ); + TW::HDWallet wallet( 128, "" ); - return New( token_id, { std::move( email ), std::move( password ) }, base_path, full_node ); + return GeniusAccount::NewFromMnemonic( token_id, wallet.getMnemonic(), base_path, full_node ); } outcome::result GeniusAccount::LoadGeniusAccount( @@ -224,7 +231,7 @@ namespace sgns public_key.substr( 0, 16 ) + "...", public_key.length() ); - OUTCOME_TRY( std::vector vec, base::unhex( public_key ) ); + BOOST_OUTCOME_TRY( std::vector vec, base::unhex( public_key ) ); storage = std::make_shared( std::string( SECURE_STORAGE_PREFIX ) + libp2p::multi::detail::encodeBase58( vec ) ); @@ -235,7 +242,7 @@ namespace sgns else { genius_account_logger()->debug( "Secure storage ID file does not exist, will try migration" ); - OUTCOME_TRY( storage, MigrateSecureStorage( base_path ) ); + BOOST_OUTCOME_TRY( storage, MigrateSecureStorage( base_path ) ); } auto load_res = storage->Load( "sgns_key" ); @@ -266,39 +273,38 @@ namespace sgns } outcome::result GeniusAccount::GenerateGeniusAddress( - const Credentials &credentials, + const char *eth_private_key, const boost::filesystem::path &base_path ) { - genius_account_logger()->trace( "Key seed from credentials" ); + genius_account_logger()->trace( "Key seed from ethereum private key" ); - if ( credentials.email.empty() || credentials.password.empty() ) + if ( eth_private_key == nullptr ) { + genius_account_logger()->error( "No ethereum address to generate from" ); return std::errc::invalid_argument; } - std::string s = credentials.email + credentials.password; - auto hashed = TW::Hash::sha256( s ); - auto hexed = base::hex_lower( hashed ); + auto private_key_vec = base::unhex( eth_private_key ); + if ( private_key_vec.has_error() ) + { + genius_account_logger()->error( "Could not extract private key from hexadecimal" ); + return std::errc::invalid_argument; + } - return GenerateGeniusAddress( hexed.data(), base_path ); + TW::PrivateKey tw_private_key( private_key_vec.value() ); + + return GenerateGeniusAddress( tw_private_key, base_path ); } outcome::result GeniusAccount::GenerateGeniusAddress( - const char *eth_private_key, + const TW::PrivateKey &private_key, const boost::filesystem::path &base_path ) { - genius_account_logger()->trace( "Key seed from ethereum private key" ); - - if ( eth_private_key == nullptr ) - { - return outcome::failure( std::errc::invalid_argument ); - } + genius_account_logger()->trace( "Key seed from TW private key" ); - OUTCOME_TRY( auto pri_key_vec, base::unhex( eth_private_key ) ); - auto signed_secret = TW::PrivateKey( pri_key_vec ) - .sign( - TW::Data( ELGAMAL_PUBKEY_PREDEFINED.cbegin(), ELGAMAL_PUBKEY_PREDEFINED.cend() ), - TWCurveSECP256k1 ); + auto signed_secret = private_key.sign( + TW::Data( ELGAMAL_PUBKEY_PREDEFINED.cbegin(), ELGAMAL_PUBKEY_PREDEFINED.cend() ), + TWCurveSECP256k1 ); if ( signed_secret.empty() ) { @@ -311,7 +317,7 @@ namespace sgns // Create storage and keys ethereum::EthereumKeyGenerator eth_key( key_seed ); auto pub_key = eth_key.GetEntirePubValue(); - OUTCOME_TRY( std::vector pub_key_vec, base::unhex( pub_key ) ); + BOOST_OUTCOME_TRY( std::vector pub_key_vec, base::unhex( pub_key ) ); auto storage = std::make_shared( std::string( SECURE_STORAGE_PREFIX ) + libp2p::multi::detail::encodeBase58( pub_key_vec ) ); diff --git a/src/account/GeniusAccount.hpp b/src/account/GeniusAccount.hpp index 0f7143c40..0d8e9ddb8 100644 --- a/src/account/GeniusAccount.hpp +++ b/src/account/GeniusAccount.hpp @@ -21,6 +21,8 @@ #include #include +#include +#include #include #include @@ -30,8 +32,6 @@ #include "outcome/outcome.hpp" #include -#include -#include namespace sgns { @@ -62,7 +62,7 @@ namespace sgns }; static const std::array ELGAMAL_PUBKEY_PREDEFINED; ///< Predefined ElGamal public key - static constexpr uint64_t NONCE_CACHE_DURATION_MS = 5000; ///< Cache nonce results for 5 seconds + static constexpr int64_t NONCE_CACHE_DURATION_MS = 5000; ///< Cache nonce results for 5 seconds /** * @brief Factory constructor of new GeniusAccount. @@ -77,14 +77,10 @@ namespace sgns const boost::filesystem::path &base_path, bool full_node = false ); - /** - * @brief Factory constructor of new GeniusAccount - * @param[in] token_id Token ID of the account - */ - static std::shared_ptr New( TokenID token_id, - const Credentials &credentials, - const boost::filesystem::path &base_path, - bool full_node = false ); + static std::shared_ptr NewFromMnemonic( TokenID token_id, + const std::string &mnemonic, + const boost::filesystem::path &base_path, + bool full_node = false ); /** * @brief Factory constructor of new GeniusAccount @@ -256,7 +252,7 @@ namespace sgns static outcome::result GenerateGeniusAddress( const char *eth_private_key, const boost::filesystem::path &base_path ); - static outcome::result GenerateGeniusAddress( const Credentials &credentials, + static outcome::result GenerateGeniusAddress( const TW::PrivateKey &private_key, const boost::filesystem::path &base_path ); protected: @@ -286,12 +282,12 @@ namespace sgns StorageWithAddress response_value, bool full_node ); - TokenID token; ///< Token ID of the account - bool is_full_node_; ///< Whether this account is a full node + TokenID token; ///< Token ID of the account + std::shared_ptr storage_; ///< Secure storage instance + bool is_full_node_; ///< Whether this account is a full node std::shared_ptr eth_keypair_; ///< Ethereum keypair std::shared_ptr elgamal_address_; ///< ElGamal keypair - std::shared_ptr storage_; ///< Secure storage instance std::unordered_map confirmed_nonces_; ///< Map of the confirmed nonces from peers mutable std::shared_mutex nonce_mutex_; ///< Mutex for the nonce map std::set pending_nonces_; ///< Reserved but not confirmed nonces diff --git a/src/account/GeniusNode.cpp b/src/account/GeniusNode.cpp index 26ba64c38..44e31f4c1 100644 --- a/src/account/GeniusNode.cpp +++ b/src/account/GeniusNode.cpp @@ -5,7 +5,6 @@ * @author Henrique A. Klein (hklein@gnus.ai) */ -#include #include #include #include @@ -24,7 +23,9 @@ #include #include #include -#include +#include +#include +#include #include "base/sgns_version.hpp" #include "account/TokenAmount.hpp" @@ -171,29 +172,46 @@ namespace sgns return instance; } - std::shared_ptr GeniusNode::New( const DevConfig_st &dev_config, - const GeniusAccount::Credentials &credentials, - bool autodht, - bool isprocessor, - uint16_t base_port, - bool is_full_node, - bool use_upnp ) + std::shared_ptr GeniusNode::NewFromMnemonic( const DevConfig_st &dev_config, + const std::string &mnemonic, + bool autodht, + bool isprocessor, + uint16_t base_port, + bool is_full_node, + bool use_upnp ) { - auto instance = std::shared_ptr( new GeniusNode( - dev_config, - GeniusAccount::New( dev_config.TokenID, credentials, dev_config.BaseWritePath, is_full_node ), - autodht, - isprocessor, - base_port, - is_full_node, - use_upnp ) ); + try + { + auto account = GeniusAccount::NewFromMnemonic( dev_config.TokenID, + mnemonic, + dev_config.BaseWritePath, + is_full_node ); - if ( instance ) + if ( account == nullptr ) + { + return nullptr; + } + + auto instance = std::shared_ptr( new GeniusNode( dev_config, + std::move( account ), + autodht, + isprocessor, + base_port, + is_full_node, + use_upnp ) ); + + if ( instance ) + { + instance->BeginDBInitialization(); + } + + return instance; + } + catch ( const std::invalid_argument &err ) { - instance->BeginDBInitialization(); + std::cerr << "Failed to generate address from mnemonic: " << err.what() << '\n'; } - - return instance; + return nullptr; } GeniusNode::GeniusNode( const DevConfig_st &dev_config, @@ -223,7 +241,9 @@ namespace sgns processing_channel_topic_( std::string( PROCESSING_CHANNEL ) ), processing_grid_chanel_topic_( std::string( PROCESSING_GRID_CHANNEL ) ), m_lastApiCall( std::chrono::system_clock::now() - m_minApiCallInterval ), - scheduler_( std::make_shared( io_, libp2p::protocol::SchedulerConfig{} ) ), + scheduler_( std::make_shared( + std::make_shared( io_ ), + libp2p::basic::Scheduler::Config{ std::chrono::milliseconds( 100 ) } ) ), generator_( std::make_shared() ), processing_callback_pool_( std::make_unique( 1 ) ), use_upnp_( use_upnp ) @@ -365,7 +385,8 @@ namespace sgns strong->node_logger_->error( "Error starting blockchain: {}", result.error().message() ); strong->node_logger_->info( "Scheduling blockchain retry after failure" ); - strong->account_->RequestHeads({std::string(blockchain::ValidatorRegistry::ValidatorTopic())}); + strong->account_->RequestHeads( + { std::string( blockchain::ValidatorRegistry::ValidatorTopic() ) } ); strong->ScheduleBlockchainRetry(); return; } @@ -387,21 +408,23 @@ namespace sgns } // Move transaction initialization off the AccountMessenger worker thread. - boost::asio::post( *strong->io_, [weak_self]() - { - if ( auto strong = weak_self.lock() ) + boost::asio::post( + *strong->io_, + [weak_self]() { - auto current_state = strong->state_.load(); - if ( current_state != NodeState::INITIALIZING_BLOCKCHAIN ) + if ( auto strong = weak_self.lock() ) { - strong->node_logger_->debug( - "Skipping transaction initialization, unexpected state: {}", - NodeStateToString( current_state ) ); - return; + auto current_state = strong->state_.load(); + if ( current_state != NodeState::INITIALIZING_BLOCKCHAIN ) + { + strong->node_logger_->debug( + "Skipping transaction initialization, unexpected state: {}", + NodeStateToString( current_state ) ); + return; + } + strong->StateTransition( NodeState::INITIALIZING_TRANSACTIONS ); } - strong->StateTransition( NodeState::INITIALIZING_TRANSACTIONS ); - } - } ); + } ); } } ); } @@ -598,8 +621,8 @@ namespace sgns std::vector hash( SHA256_DIGEST_LENGTH ); SHA256( inputBytes.data(), inputBytes.size(), hash.data() ); - libp2p::protocol::kademlia::ContentId key( hash ); - auto acc_cid = libp2p::multi::ContentIdentifierCodec::decode( key.data ); + auto key = libp2p::multi::ContentIdentifierCodec::encodeCIDV0( hash.data(), hash.size() ); + auto acc_cid = libp2p::multi::ContentIdentifierCodec::decode( key ); auto maybe_base58 = libp2p::multi::ContentIdentifierCodec::toString( acc_cid.value() ); if ( !maybe_base58 ) { @@ -937,11 +960,11 @@ namespace sgns std::vector hash( SHA256_DIGEST_LENGTH ); SHA256( inputBytes.data(), inputBytes.size(), hash.data() ); // Provide CID - libp2p::protocol::kademlia::ContentId key( hash ); + auto key = libp2p::multi::ContentIdentifierCodec::encodeCIDV0( hash.data(), hash.size() ); pubsub_->GetDHT()->Start(); pubsub_->ProvideCID( key ); - auto cidtest = libp2p::multi::ContentIdentifierCodec::decode( key.data ); + auto cidtest = libp2p::multi::ContentIdentifierCodec::decode( key ); auto cidstring = libp2p::multi::ContentIdentifierCodec::toString( cidtest.value() ); node_logger_->info( "CID Test:: {}", cidstring.value() ); @@ -978,7 +1001,7 @@ namespace sgns { return outcome::failure( boost::system::error_code{} ); } - OUTCOME_TRY( auto procmgr, sgns::sgprocessing::ProcessingManager::Create( jsondata ) ); + BOOST_OUTCOME_TRY( auto procmgr, sgns::sgprocessing::ProcessingManager::Create( jsondata ) ); auto funds = GetProcessCost( procmgr ); if ( funds <= 0 ) @@ -1040,9 +1063,9 @@ namespace sgns return outcome::failure( cut.error() ); } - OUTCOME_TRY( auto &&manager, GetTransactionManager() ); - OUTCOME_TRY( ( auto &&, result_pair ), - manager->HoldEscrow( funds, std::string( dev_config_.Addr ), cut.value(), uuidstring ) ); + BOOST_OUTCOME_TRY( auto manager, GetTransactionManager() ); + BOOST_OUTCOME_TRY( auto result_pair, + manager->HoldEscrow( funds, std::string( dev_config_.Addr ), cut.value(), uuidstring ) ); auto [tx_id, escrow_data_pair] = result_pair; @@ -1115,6 +1138,7 @@ namespace sgns const std::string &transaction_hash, const std::string &chainid, TokenID tokenid, + std::string destination, std::chrono::milliseconds timeout ) { if ( GetTransactionManagerState() != TransactionManager::State::READY ) @@ -1123,9 +1147,13 @@ namespace sgns return outcome::failure( boost::system::error_code{} ); } auto start_time = std::chrono::steady_clock::now(); + if ( destination.empty() ) + { + destination = account_->GetAddress(); + } - OUTCOME_TRY( auto &&manager, GetTransactionManager() ); - OUTCOME_TRY( auto &&tx_id, manager->MintFunds( amount, transaction_hash, chainid, tokenid ) ); + BOOST_OUTCOME_TRY( auto manager, GetTransactionManager() ); + BOOST_OUTCOME_TRY( auto tx_id, manager->MintFunds( amount, transaction_hash, chainid, tokenid, destination ) ); auto mint_result = manager->WaitForTransactionOutgoing( tx_id, timeout ); @@ -1153,8 +1181,8 @@ namespace sgns } auto start_time = std::chrono::steady_clock::now(); - OUTCOME_TRY( auto &&manager, GetTransactionManager() ); - OUTCOME_TRY( auto &&tx_id, manager->TransferFunds( amount, destination, token_id ) ); + BOOST_OUTCOME_TRY( auto manager, GetTransactionManager() ); + BOOST_OUTCOME_TRY( auto tx_id, manager->TransferFunds( amount, destination, token_id ) ); auto transfer_result = manager->WaitForTransactionOutgoing( tx_id, timeout ); @@ -1180,8 +1208,8 @@ namespace sgns return outcome::failure( boost::system::error_code{} ); } - OUTCOME_TRY( auto &&manager, GetTransactionManager() ); - OUTCOME_TRY( auto &&tx_id, manager->TransferFunds( amount, destination, token_id ) ); + BOOST_OUTCOME_TRY( auto manager, GetTransactionManager() ); + BOOST_OUTCOME_TRY( auto tx_id, manager->TransferFunds( amount, destination, token_id ) ); node_logger_->debug( "TransferFunds transaction {} sent", tx_id ); return tx_id; @@ -1196,8 +1224,8 @@ namespace sgns return outcome::failure( boost::system::error_code{} ); } auto start_time = std::chrono::steady_clock::now(); - OUTCOME_TRY( auto &&manager, GetTransactionManager() ); - OUTCOME_TRY( auto &&tx_id, manager->TransferFunds( amount, dev_config_.Addr, token_id ) ); + BOOST_OUTCOME_TRY( auto manager, GetTransactionManager() ); + BOOST_OUTCOME_TRY( auto tx_id, manager->TransferFunds( amount, dev_config_.Addr, token_id ) ); auto paydev_result = manager->WaitForTransactionOutgoing( tx_id, timeout ); @@ -1226,8 +1254,8 @@ namespace sgns } auto start_time = std::chrono::steady_clock::now(); - OUTCOME_TRY( auto &&manager, GetTransactionManager() ); - OUTCOME_TRY( auto &&tx_id, manager->PayEscrow( escrow_path, taskresult, std::move( crdt_transaction ) ) ); + BOOST_OUTCOME_TRY( auto manager, GetTransactionManager() ); + BOOST_OUTCOME_TRY( auto tx_id, manager->PayEscrow( escrow_path, taskresult, std::move( crdt_transaction ) ) ); auto payescrow_result = manager->WaitForTransactionOutgoing( tx_id, timeout ); diff --git a/src/account/GeniusNode.hpp b/src/account/GeniusNode.hpp index 37edc6846..5190e8194 100644 --- a/src/account/GeniusNode.hpp +++ b/src/account/GeniusNode.hpp @@ -66,17 +66,17 @@ namespace sgns bool is_full_node = false, bool use_upnp = true ); - static std::shared_ptr New( const DevConfig_st &dev_config, - const GeniusAccount::Credentials &credentials, - bool autodht = true, - bool isprocessor = true, - uint16_t base_port = 40001, - bool is_full_node = false, - bool use_upnp = true ); + static std::shared_ptr NewFromMnemonic( const DevConfig_st &dev_config, + const std::string &mnemonic, + bool autodht = true, + bool isprocessor = true, + uint16_t base_port = 40001, + bool is_full_node = false, + bool use_upnp = true ); ~GeniusNode() override; - enum class NodeState: uint8_t + enum class NodeState : uint8_t { CREATING = 0, MIGRATING_DATABASE, @@ -91,7 +91,7 @@ namespace sgns /** * @brief GeniusNode Error class */ - enum class Error: uint8_t + enum class Error : uint8_t { INSUFFICIENT_FUNDS = 1, ///< Insufficient funds for a transaction DATABASE_WRITE_ERROR = 2, ///< Error writing data into the database @@ -145,7 +145,8 @@ namespace sgns const std::string &transaction_hash, const std::string &chainid, TokenID tokenid, - std::chrono::milliseconds timeout = std::chrono::milliseconds( TIMEOUT_MINT ) ); + std::string destination = "", + std::chrono::milliseconds timeout = std::chrono::milliseconds( TIMEOUT_MINT ) ); void AddPeer( const std::string &peer ); void RefreshUPNP( uint16_t pubsubport ); @@ -353,7 +354,7 @@ namespace sgns std::thread upnp_thread; std::atomic stop_upnp{ false }; std::string base58key_; - std::shared_ptr scheduler_; + std::shared_ptr scheduler_; std::shared_ptr generator_; std::shared_ptr graphsyncnetwork_; diff --git a/src/account/IGeniusTransactions.cpp b/src/account/IGeniusTransactions.cpp index 9eda254d7..e27050f27 100644 --- a/src/account/IGeniusTransactions.cpp +++ b/src/account/IGeniusTransactions.cpp @@ -88,7 +88,10 @@ namespace sgns dag_st.clear_signature(); auto size = dag_st.ByteSizeLong(); std::vector serialized( size ); - dag_st.SerializeToArray( serialized.data(), size ); + if ( !dag_st.SerializeToArray( serialized.data(), size ) ) + { + std::cerr << "Failed to serialize DAG struct\n"; + } dag_st.set_signature( str_signature ); return GeniusAccount::VerifySignature( dag_st.source_addr(), str_signature, serialized ) && CheckHash(); diff --git a/src/account/IGeniusTransactions.hpp b/src/account/IGeniusTransactions.hpp index 890f96bb9..d97f70cd8 100644 --- a/src/account/IGeniusTransactions.hpp +++ b/src/account/IGeniusTransactions.hpp @@ -10,11 +10,13 @@ #include #include #include +#include #include #include "outcome/outcome.hpp" #include "account/proto/SGTransaction.pb.h" +#include "account/UTXOStructs.hpp" #include "GeniusAccount.hpp" #include @@ -57,6 +59,24 @@ namespace sgns virtual std::vector SerializeByteVector() = 0; + /** + * @brief Returns if transaction supports UTXOs + * @return True if supported, false otherwise + */ + virtual bool HasUTXOParameters() const + { + return false; + } + + /** + * @brief Returns the UTXOs + * @return If exists, returns the UTXOs of the transaction + */ + virtual std::optional GetUTXOParametersOpt() const + { + return std::nullopt; + } + virtual std::string GetTransactionSpecificPath() const = 0; static std::string GetTransactionFullPath( const std::string &tx_hash ) diff --git a/src/account/Migration0_2_0To1_0_0.cpp b/src/account/Migration0_2_0To1_0_0.cpp index ecc3267dc..f68d331ca 100644 --- a/src/account/Migration0_2_0To1_0_0.cpp +++ b/src/account/Migration0_2_0To1_0_0.cpp @@ -52,7 +52,7 @@ namespace sgns std::shared_ptr ioContext, std::shared_ptr pubSub, std::shared_ptr graphsync, - std::shared_ptr scheduler, + std::shared_ptr scheduler, std::shared_ptr generator, std::string writeBasePath, std::string base58key ) : @@ -99,7 +99,7 @@ namespace sgns if ( db_0_0_2_out_ && db_0_0_2_in_ ) { - OUTCOME_TRY( auto &&target_db, InitTargetDb() ); + BOOST_OUTCOME_TRY( auto target_db, InitTargetDb() ); db_1_0_0_ = std::move( target_db ); } @@ -178,7 +178,7 @@ namespace sgns m_logger->debug( "Initializing legacy DB at path {}", fullPath ); - OUTCOME_TRY( auto &&db, + BOOST_OUTCOME_TRY( auto db, crdt::GlobalDB::New( ioContext_, fullPath, pubSub_, @@ -283,13 +283,13 @@ namespace sgns m_logger->debug( "Need to remove previous transaction, since new one is older {}", transaction_path ); - BOOST_OUTCOME_TRYV2( auto &&, crdt_transaction_->Erase( tx_key ) ); + BOOST_OUTCOME_TRY( crdt_transaction_->Erase( tx_key ) ); sgns::crdt::HierarchicalKey replicated_proof_key( BASE + BuildLegacyProofPath( *tx ) ); m_logger->debug( "Need to remove previous proof as well {}", replicated_proof_key.GetKey() ); - BOOST_OUTCOME_TRYV2( auto &&, crdt_transaction_->Erase( replicated_proof_key ) ); + BOOST_OUTCOME_TRY( crdt_transaction_->Erase( replicated_proof_key ) ); } else { @@ -302,12 +302,12 @@ namespace sgns migrate_tx = true; m_logger->debug( "Invalid transaction, deleting from migration {}", transaction_path ); - BOOST_OUTCOME_TRYV2( auto &&, crdt_transaction_->Erase( tx_key ) ); + BOOST_OUTCOME_TRY( crdt_transaction_->Erase( tx_key ) ); sgns::crdt::HierarchicalKey replicated_proof_key( BASE + BuildLegacyProofPath( *tx ) ); m_logger->debug( "Need to remove previous proof as well {}", replicated_proof_key.GetKey() ); - BOOST_OUTCOME_TRYV2( auto &&, crdt_transaction_->Erase( replicated_proof_key ) ); + BOOST_OUTCOME_TRY( crdt_transaction_->Erase( replicated_proof_key ) ); } } } @@ -329,7 +329,7 @@ namespace sgns sgns::crdt::GlobalDB::Buffer data_transaction; data_transaction.put( tx->SerializeByteVector() ); - BOOST_OUTCOME_TRYV2( auto &&, + BOOST_OUTCOME_TRY( crdt_transaction_->Put( std::move( tx_key ), std::move( data_transaction ) ) ); sgns::crdt::HierarchicalKey proof_crdt_key( BASE + BuildLegacyProofPath( *tx ) ); @@ -347,7 +347,7 @@ namespace sgns ++migrated_count; if ( migrated_count >= BATCH_SIZE ) { - OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); + BOOST_OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); crdt_transaction_ = db_1_0_0_->BeginTransaction(); // start fresh topics_.clear(); @@ -377,7 +377,7 @@ namespace sgns topics_.emplace( std::string( TransactionManager::GNUS_FULL_NODES_TOPIC ) ); m_logger->debug( "Migrating output DB into new DB" ); - OUTCOME_TRY( auto &&remainder_outdb, MigrateDb( db_0_0_2_out_, db_1_0_0_ ) ); + BOOST_OUTCOME_TRY( auto remainder_outdb, MigrateDb( db_0_0_2_out_, db_1_0_0_ ) ); if ( remainder_outdb > 0 ) { @@ -385,7 +385,7 @@ namespace sgns { m_logger->debug( "Commiting migrating to topics {}", topic ); } - OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); + BOOST_OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); crdt_transaction_ = db_1_0_0_->BeginTransaction(); topics_.clear(); topics_.emplace( std::string( TransactionManager::GNUS_FULL_NODES_TOPIC ) ); @@ -393,7 +393,7 @@ namespace sgns } m_logger->debug( "Migrating input DB into new DB" ); - OUTCOME_TRY( auto &&remainder_indb, MigrateDb( db_0_0_2_in_, db_1_0_0_ ) ); + BOOST_OUTCOME_TRY( auto remainder_indb, MigrateDb( db_0_0_2_in_, db_1_0_0_ ) ); if ( remainder_indb > 0 ) { @@ -401,14 +401,14 @@ namespace sgns { m_logger->debug( "Commiting migrating to topics {}", topic ); } - OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); + BOOST_OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); } sgns::crdt::GlobalDB::Buffer version_buffer; sgns::crdt::GlobalDB::Buffer version_key; version_key.put( std::string( MigrationManager::VERSION_INFO_KEY ) ); version_buffer.put( ToVersion() ); - OUTCOME_TRY( db_1_0_0_->GetDataStore()->put( version_key, version_buffer ) ); + BOOST_OUTCOME_TRY( db_1_0_0_->GetDataStore()->put( version_key, version_buffer ) ); m_logger->debug( "Apply step of Migration0_2_0To1_0_0 finished successfully" ); diff --git a/src/account/Migration0_2_0To1_0_0.hpp b/src/account/Migration0_2_0To1_0_0.hpp index 1ad752b6f..bd2ca6704 100644 --- a/src/account/Migration0_2_0To1_0_0.hpp +++ b/src/account/Migration0_2_0To1_0_0.hpp @@ -21,7 +21,7 @@ #include "outcome/outcome.hpp" #include #include -#include +#include #include "IMigrationStep.hpp" @@ -48,7 +48,7 @@ namespace sgns Migration0_2_0To1_0_0( std::shared_ptr ioContext, std::shared_ptr pubSub, std::shared_ptr graphsync, - std::shared_ptr scheduler, + std::shared_ptr scheduler, std::shared_ptr generator, std::string writeBasePath, std::string base58key ); @@ -110,7 +110,7 @@ namespace sgns std::shared_ptr ioContext_; ///< IO context for DB I/O. std::shared_ptr pubSub_; ///< PubSub instance for legacy DB. std::shared_ptr graphsync_; ///< GraphSync network. - std::shared_ptr scheduler_; ///< libp2p scheduler. + std::shared_ptr scheduler_; ///< libp2p scheduler. std::shared_ptr generator_; ///< Request ID generator. std::shared_ptr crdt_transaction_; ///< CRDT transaction to make it all atomic std::string writeBasePath_; ///< Base path for writing DB files. diff --git a/src/account/Migration1_0_0To3_4_0.cpp b/src/account/Migration1_0_0To3_4_0.cpp index 97ac60e0b..00e196172 100644 --- a/src/account/Migration1_0_0To3_4_0.cpp +++ b/src/account/Migration1_0_0To3_4_0.cpp @@ -1,6 +1,6 @@ /** * @file Migration1_0_0To3_4_0.cpp - * @brief + * @brief * @date 2025-10-03 * @author Henrique A. Klein (hklein@gnus.ai) */ @@ -23,7 +23,7 @@ namespace sgns std::shared_ptr ioContext, std::shared_ptr pubSub, std::shared_ptr graphsync, - std::shared_ptr scheduler, + std::shared_ptr scheduler, std::shared_ptr generator, std::string writeBasePath, std::string base58key ) : @@ -84,11 +84,11 @@ namespace sgns outcome::result Migration1_0_0To3_4_0::Init() { - OUTCOME_TRY( auto &&legacy_db, InitLegacyDb() ); + BOOST_OUTCOME_TRY( auto legacy_db, InitLegacyDb() ); db_1_0_0_ = std::move( legacy_db ); if ( db_1_0_0_ ) { - OUTCOME_TRY( auto &&new_db, InitTargetDb() ); + BOOST_OUTCOME_TRY( auto new_db, InitTargetDb() ); db_3_4_0_ = std::move( new_db ); } return outcome::success(); @@ -109,7 +109,7 @@ namespace sgns topics_.emplace( std::string( TransactionManager::GNUS_FULL_NODES_TOPIC ) ); const std::string BASE = "/bc-963/"; - OUTCOME_TRY( auto &&entries, db_1_0_0_->QueryKeyValues( BASE, "*", "/tx" ) ); + BOOST_OUTCOME_TRY( auto entries, db_1_0_0_->QueryKeyValues( BASE, "*", "/tx" ) ); logger_->debug( "Found {} transaction keys to migrate", entries.size() ); size_t migrated_count = 0; size_t BATCH_SIZE = 50; @@ -163,7 +163,7 @@ namespace sgns sgns::crdt::GlobalDB::Buffer data_transaction; data_transaction.put( tx->SerializeByteVector() ); - BOOST_OUTCOME_TRYV2( auto &&, crdt_transaction_->Put( transaction_key, std::move( data_transaction ) ) ); + BOOST_OUTCOME_TRY( crdt_transaction_->Put( transaction_key, std::move( data_transaction ) ) ); sgns::crdt::HierarchicalKey proof_crdt_key( BASE + BuildLegacyProofPath_1_0_0( *tx ) ); sgns::crdt::GlobalDB::Buffer proof_transaction; @@ -176,7 +176,7 @@ namespace sgns ++migrated_count; if ( migrated_count >= BATCH_SIZE ) { - OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); + BOOST_OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); crdt_transaction_ = db_3_4_0_->BeginTransaction(); // start fresh topics_.clear(); @@ -187,7 +187,7 @@ namespace sgns } if ( migrated_count ) { - OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); + BOOST_OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); logger_->debug( "Committed remaining {} transactions", migrated_count ); } @@ -196,7 +196,7 @@ namespace sgns version_key.put( std::string( MigrationManager::VERSION_INFO_KEY ) ); version_buffer.put( ToVersion() ); - OUTCOME_TRY( db_3_4_0_->GetDataStore()->put( version_key, version_buffer ) ); + BOOST_OUTCOME_TRY( db_3_4_0_->GetDataStore()->put( version_key, version_buffer ) ); logger_->debug( "Migration from {} to {} completed successfully", FromVersion(), ToVersion() ); return outcome::success(); diff --git a/src/account/Migration1_0_0To3_4_0.hpp b/src/account/Migration1_0_0To3_4_0.hpp index 79d63e989..ed5b83932 100644 --- a/src/account/Migration1_0_0To3_4_0.hpp +++ b/src/account/Migration1_0_0To3_4_0.hpp @@ -18,7 +18,7 @@ namespace sgns { /** * @brief Migration step for version 1.0.0 to 3.4.0. - * Changes the full node topic from CRDT heads + * Changes the full node topic from CRDT heads */ class Migration1_0_0To3_4_0 : public IMigrationStep { @@ -26,7 +26,7 @@ namespace sgns Migration1_0_0To3_4_0( std::shared_ptr ioContext, std::shared_ptr pubSub, std::shared_ptr graphsync, - std::shared_ptr scheduler, + std::shared_ptr scheduler, std::shared_ptr generator, std::string writeBasePath, std::string base58key ); @@ -70,7 +70,7 @@ namespace sgns std::shared_ptr ioContext_; ///< IO context for DB I/O. std::shared_ptr pubSub_; ///< PubSub instance for legacy DB. std::shared_ptr graphsync_; ///< GraphSync network. - std::shared_ptr scheduler_; ///< libp2p scheduler. + std::shared_ptr scheduler_; ///< libp2p scheduler. std::shared_ptr generator_; ///< Request ID generator. std::string writeBasePath_; ///< Base path for writing DB files. std::string base58key_; ///< Key to build legacy paths. diff --git a/src/account/Migration3_4_0To3_5_0.cpp b/src/account/Migration3_4_0To3_5_0.cpp index 8f3835766..d7ad9f863 100644 --- a/src/account/Migration3_4_0To3_5_0.cpp +++ b/src/account/Migration3_4_0To3_5_0.cpp @@ -1,6 +1,6 @@ /** * @file Migration3_4_0To3_5_0.cpp - * @brief + * @brief * @date 2025-11-14 * @author Henrique A. Klein (hklein@gnus.ai) */ @@ -31,7 +31,7 @@ namespace sgns std::shared_ptr ioContext, std::shared_ptr pubSub, std::shared_ptr graphsync, - std::shared_ptr scheduler, + std::shared_ptr scheduler, std::shared_ptr generator, std::string writeBasePath, std::string base58key, @@ -101,11 +101,11 @@ namespace sgns outcome::result Migration3_4_0To3_5_0::Init() { - OUTCOME_TRY( auto &&legacy_db, InitLegacyDb() ); + BOOST_OUTCOME_TRY( auto legacy_db, InitLegacyDb() ); db_3_4_0_ = std::move( legacy_db ); if ( db_3_4_0_ ) { - OUTCOME_TRY( auto &&new_db, InitTargetDb() ); + BOOST_OUTCOME_TRY( auto new_db, InitTargetDb() ); db_3_5_0_ = std::move( new_db ); } return outcome::success(); @@ -124,6 +124,9 @@ namespace sgns account_->ConfigureDatabaseDependencies( db_3_5_0_ ); db_3_5_0_->Start(); + db_3_5_0_->SetBroadcastEnabled( false ); + logger_->info( "Broadcast suppression enabled for migration target DB" ); + //init blockchain if ( !blockchain_ ) { @@ -252,7 +255,7 @@ namespace sgns for ( const auto network_id : monitored_networks ) { auto blockchain_base = TransactionManager::GetBlockChainBase( network_id ); - OUTCOME_TRY( auto &&entries, db_3_4_0_->QueryKeyValues( blockchain_base, "*", "/tx" ) ); + BOOST_OUTCOME_TRY( auto entries, db_3_4_0_->QueryKeyValues( blockchain_base, "*", "/tx" ) ); logger_->debug( "Found {} transaction keys to migrate on network {}", entries.size(), network_id ); std::vector owned_transactions; @@ -264,7 +267,7 @@ namespace sgns { sgns::crdt::GlobalDB::Buffer data_transaction; data_transaction.put( record.tx->SerializeByteVector() ); - BOOST_OUTCOME_TRYV2( auto &&, crdt_transaction_->Put( record.key, std::move( data_transaction ) ) ); + BOOST_OUTCOME_TRY( crdt_transaction_->Put( record.key, std::move( data_transaction ) ) ); topics_.emplace( record.tx->GetSrcAddress() ); if ( auto transfer_tx = std::dynamic_pointer_cast( record.tx ) ) @@ -283,7 +286,7 @@ namespace sgns ++migrated_count; if ( migrated_count >= BATCH_SIZE ) { - OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); + BOOST_OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); crdt_transaction_ = db_3_5_0_->BeginTransaction(); topics_.clear(); topics_.emplace( std::string( TransactionManager::GNUS_FULL_NODES_TOPIC ) ); @@ -387,12 +390,12 @@ namespace sgns logger_->info( "Synthesized zero-value mint for missing nonce {} on network {}", expected_nonce, network_id ); - OUTCOME_TRY( persist_record( filler_record ) ); + BOOST_OUTCOME_TRY( persist_record( filler_record ) ); last_timestamp = filler_record.tx->GetTimestamp(); ++expected_nonce; } - OUTCOME_TRY( persist_record( record ) ); + BOOST_OUTCOME_TRY( persist_record( record ) ); last_timestamp = record.tx->GetTimestamp(); expected_nonce = record.tx->dag_st.nonce() + 1; } @@ -400,12 +403,12 @@ namespace sgns for ( const auto &record : other_transactions ) { - OUTCOME_TRY( persist_record( record ) ); + BOOST_OUTCOME_TRY( persist_record( record ) ); } } if ( migrated_count != 0 ) { - OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); + BOOST_OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); logger_->debug( "Committed remaining {} transactions", migrated_count ); } @@ -414,7 +417,7 @@ namespace sgns version_key.put( std::string( MigrationManager::VERSION_INFO_KEY ) ); version_buffer.put( ToVersion() ); - OUTCOME_TRY( db_3_5_0_->GetDataStore()->put( version_key, version_buffer ) ); + BOOST_OUTCOME_TRY( db_3_5_0_->GetDataStore()->put( version_key, version_buffer ) ); logger_->debug( "Migration from {} to {} completed successfully", FromVersion(), ToVersion() ); return outcome::success(); diff --git a/src/account/Migration3_4_0To3_5_0.hpp b/src/account/Migration3_4_0To3_5_0.hpp index 7234205ec..f5b00caa0 100644 --- a/src/account/Migration3_4_0To3_5_0.hpp +++ b/src/account/Migration3_4_0To3_5_0.hpp @@ -1,6 +1,6 @@ /** * @file Migration3_4_0To3_5_0.hpp - * @brief + * @brief * @date 2025-11-11 * @author Henrique A. Klein (hklein@gnus.ai) */ @@ -22,7 +22,7 @@ namespace sgns /** * @brief Migration step for version 1.0.0 to 3.4.0. - * Changes the full node topic from CRDT heads + * Changes the full node topic from CRDT heads */ class Migration3_4_0To3_5_0 : public IMigrationStep, public std::enable_shared_from_this { @@ -30,7 +30,7 @@ namespace sgns Migration3_4_0To3_5_0( std::shared_ptr ioContext, std::shared_ptr pubSub, std::shared_ptr graphsync, - std::shared_ptr scheduler, + std::shared_ptr scheduler, std::shared_ptr generator, std::string writeBasePath, std::string base58key, @@ -81,7 +81,7 @@ namespace sgns std::shared_ptr ioContext_; ///< IO context for DB I/O. std::shared_ptr pubSub_; ///< PubSub instance for legacy DB. std::shared_ptr graphsync_; ///< GraphSync network. - std::shared_ptr scheduler_; ///< libp2p scheduler. + std::shared_ptr scheduler_; ///< libp2p scheduler. std::shared_ptr generator_; ///< Request ID generator. std::string writeBasePath_; ///< Base path for writing DB files. std::string base58key_; ///< Key to build legacy paths. diff --git a/src/account/Migration3_5_0To3_6_0.cpp b/src/account/Migration3_5_0To3_6_0.cpp index 3e818d5f0..174c414c9 100644 --- a/src/account/Migration3_5_0To3_6_0.cpp +++ b/src/account/Migration3_5_0To3_6_0.cpp @@ -17,7 +17,7 @@ namespace sgns std::shared_ptr ioContext, std::shared_ptr pubSub, std::shared_ptr graphsync, - std::shared_ptr scheduler, + std::shared_ptr scheduler, std::shared_ptr generator, std::string writeBasePath, std::string base58key ) : @@ -75,11 +75,11 @@ namespace sgns outcome::result Migration3_5_0To3_6_0::Init() { - OUTCOME_TRY( auto &&legacy_db, InitLegacyDb() ); + BOOST_OUTCOME_TRY( auto legacy_db, InitLegacyDb() ); db_3_5_1_ = std::move( legacy_db ); if ( db_3_5_1_ ) { - OUTCOME_TRY( auto &&new_db, InitTargetDb() ); + BOOST_OUTCOME_TRY( auto new_db, InitTargetDb() ); db_3_6_0_ = std::move( new_db ); } return outcome::success(); @@ -95,8 +95,8 @@ namespace sgns logger_->info( "Starting migration from {} to {}", FromVersion(), ToVersion() ); - OUTCOME_TRY( blockchain::ValidatorRegistry::MigrateCids( db_3_5_1_, db_3_6_0_ ) ); - OUTCOME_TRY( Blockchain::MigrateCids( db_3_5_1_, db_3_6_0_ ) ); + BOOST_OUTCOME_TRY( blockchain::ValidatorRegistry::MigrateCids( db_3_5_1_, db_3_6_0_ ) ); + BOOST_OUTCOME_TRY( Blockchain::MigrateCids( db_3_5_1_, db_3_6_0_ ) ); auto crdt_transaction_ = db_3_6_0_->BeginTransaction(); std::unordered_set topics_; @@ -114,7 +114,7 @@ namespace sgns std::unordered_set unique_keys; std::vector transaction_keys; - OUTCOME_TRY( auto &&entries, db_3_5_1_->QueryKeyValues( blockchain_base, "*", "/tx" ) ); + BOOST_OUTCOME_TRY( auto entries, db_3_5_1_->QueryKeyValues( blockchain_base, "*", "/tx" ) ); for ( const auto &entry : entries ) { auto keyOpt = db_3_5_1_->KeyToString( entry.first ); @@ -153,7 +153,7 @@ namespace sgns sgns::crdt::GlobalDB::Buffer data_transaction; data_transaction.put( tx->SerializeByteVector() ); - BOOST_OUTCOME_TRYV2( auto &&, crdt_transaction_->Put( new_tx_key, std::move( data_transaction ) ) ); + BOOST_OUTCOME_TRY( crdt_transaction_->Put( new_tx_key, std::move( data_transaction ) ) ); topics_.emplace( tx->GetSrcAddress() ); if ( auto transfer_tx = std::dynamic_pointer_cast( tx ) ) @@ -172,7 +172,7 @@ namespace sgns ++migrated_count; if ( migrated_count >= BATCH_SIZE ) { - OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); + BOOST_OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); crdt_transaction_ = db_3_6_0_->BeginTransaction(); topics_.clear(); topics_.emplace( std::string( TransactionManager::GNUS_FULL_NODES_TOPIC ) ); @@ -184,7 +184,7 @@ namespace sgns if ( migrated_count ) { - OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); + BOOST_OUTCOME_TRY( crdt_transaction_->Commit( topics_ ) ); logger_->debug( "Committed remaining {} transactions", migrated_count ); } @@ -193,7 +193,7 @@ namespace sgns version_key.put( std::string( MigrationManager::VERSION_INFO_KEY ) ); version_buffer.put( ToVersion() ); - OUTCOME_TRY( db_3_6_0_->GetDataStore()->put( version_key, version_buffer ) ); + BOOST_OUTCOME_TRY( db_3_6_0_->GetDataStore()->put( version_key, version_buffer ) ); logger_->debug( "Migration from {} to {} completed successfully", FromVersion(), ToVersion() ); return outcome::success(); diff --git a/src/account/Migration3_5_0To3_6_0.hpp b/src/account/Migration3_5_0To3_6_0.hpp index 11f4a5d1a..63ce9c715 100644 --- a/src/account/Migration3_5_0To3_6_0.hpp +++ b/src/account/Migration3_5_0To3_6_0.hpp @@ -13,7 +13,7 @@ namespace sgns Migration3_5_0To3_6_0( std::shared_ptr ioContext, std::shared_ptr pubSub, std::shared_ptr graphsync, - std::shared_ptr scheduler, + std::shared_ptr scheduler, std::shared_ptr generator, std::string writeBasePath, std::string base58key ); @@ -34,7 +34,7 @@ namespace sgns std::shared_ptr ioContext_; std::shared_ptr pubSub_; std::shared_ptr graphsync_; - std::shared_ptr scheduler_; + std::shared_ptr scheduler_; std::shared_ptr generator_; std::string writeBasePath_; std::string base58key_; diff --git a/src/account/MigrationManager.cpp b/src/account/MigrationManager.cpp index f26088b58..a13234edf 100644 --- a/src/account/MigrationManager.cpp +++ b/src/account/MigrationManager.cpp @@ -34,7 +34,7 @@ namespace sgns std::shared_ptr ioContext, std::shared_ptr pubSub, std::shared_ptr graphsync, - std::shared_ptr scheduler, + std::shared_ptr scheduler, std::shared_ptr generator, std::string writeBasePath, std::string base58key, @@ -89,20 +89,20 @@ namespace sgns { m_logger->debug( "Starting migration step from {} to {}", step->FromVersion(), step->ToVersion() ); - OUTCOME_TRY( step->Init() ); + BOOST_OUTCOME_TRY( step->Init() ); - OUTCOME_TRY( bool is_req, step->IsRequired() ); + BOOST_OUTCOME_TRY( bool is_req, step->IsRequired() ); if ( is_req ) { - OUTCOME_TRY( step->Apply() ); + BOOST_OUTCOME_TRY( step->Apply() ); m_logger->debug( "Completed migration step to {}", step->ToVersion() ); } else { m_logger->debug( "Skipping migration step from {} to {}", step->FromVersion(), step->ToVersion() ); } - OUTCOME_TRY( step->ShutDown() ); + BOOST_OUTCOME_TRY( step->ShutDown() ); std::this_thread::sleep_for( std::chrono::milliseconds( 200 ) ); } diff --git a/src/account/MigrationManager.hpp b/src/account/MigrationManager.hpp index b30fd80a7..8c7f0d4a6 100644 --- a/src/account/MigrationManager.hpp +++ b/src/account/MigrationManager.hpp @@ -23,7 +23,7 @@ #include "outcome/outcome.hpp" #include #include -#include +#include #include "IMigrationStep.hpp" @@ -57,7 +57,7 @@ namespace sgns std::shared_ptr ioContext, std::shared_ptr pubSub, std::shared_ptr graphsync, - std::shared_ptr scheduler, + std::shared_ptr scheduler, std::shared_ptr generator, std::string writeBasePath, std::string base58key, diff --git a/src/account/MintTransaction.cpp b/src/account/MintTransaction.cpp index bebd2dcf7..bbca4034c 100644 --- a/src/account/MintTransaction.cpp +++ b/src/account/MintTransaction.cpp @@ -6,8 +6,6 @@ */ #include "account/MintTransaction.hpp" -#include "crypto/hasher/hasher_impl.hpp" - namespace sgns { MintTransaction::MintTransaction( uint64_t new_amount, @@ -31,7 +29,11 @@ namespace sgns size_t size = tx_struct.ByteSizeLong(); std::vector serialized_proto( size ); - tx_struct.SerializeToArray( serialized_proto.data(), serialized_proto.size() ); + + if ( !tx_struct.SerializeToArray( serialized_proto.data(), serialized_proto.size() ) ) + { + std::cerr << "Failed to serialize transaction\n"; + } return serialized_proto; } @@ -49,7 +51,7 @@ namespace sgns TokenID tokenid = TokenID::FromBytes( tx_struct.token_id().data(), tx_struct.token_id().size() ); return std::make_shared( - MintTransaction( amount, chainid, tokenid, tx_struct.dag_struct() ) ); // Return new instance + MintTransaction( amount, chainid, tokenid, tx_struct.dag_struct() ) ); } uint64_t MintTransaction::GetAmount() const diff --git a/src/account/MintTransactionV2.cpp b/src/account/MintTransactionV2.cpp new file mode 100644 index 000000000..9acdd55d4 --- /dev/null +++ b/src/account/MintTransactionV2.cpp @@ -0,0 +1,192 @@ +/** + * @file MintTransactionV2.cpp + * @brief UTXO-aware mint transaction implementation. + * @date 2026-03-18 + */ +#include "account/MintTransactionV2.hpp" + +#include "base/blob.hpp" +#include "crypto/hasher/hasher_impl.hpp" + +namespace sgns +{ + MintTransactionV2::MintTransactionV2( UTXOTxParameters utxo_params, + std::string chain_id, + TokenID token_id, + SGTransaction::DAGStruct dag ) : + IGeniusTransactions( "mint-v2", SetDAGWithType( std::move( dag ), "mint-v2" ) ), + utxo_params_( std::move( utxo_params ) ), + chain_id_( std::move( chain_id ) ), + token_id_( std::move( token_id ) ) + { + } + + std::vector MintTransactionV2::SerializeByteVector() + { + SGTransaction::MintTxV2 tx_struct; + tx_struct.mutable_dag_struct()->CopyFrom( this->dag_st ); + tx_struct.set_chain_id( chain_id_ ); + + auto *utxo_proto_params = tx_struct.mutable_utxo_params(); + for ( const auto &[txid_hash_, output_idx_, signature_] : utxo_params_.first ) + { + auto *input_proto = utxo_proto_params->add_inputs(); + input_proto->set_tx_id_hash( txid_hash_.toReadableString() ); + input_proto->set_output_index( output_idx_ ); + input_proto->set_signature( signature_.data(), signature_.size() ); + } + + for ( const auto &[encrypted_amount, dest_address, token_id] : utxo_params_.second ) + { + auto *output_proto = utxo_proto_params->add_outputs(); + output_proto->set_encrypted_amount( encrypted_amount ); + output_proto->set_dest_addr( dest_address ); + output_proto->set_token_id( token_id.bytes().data(), token_id.size() ); + } + + const auto amount = GetAmount(); + const auto token = GetTokenID(); + tx_struct.set_amount( amount ); + tx_struct.set_token_id( token.bytes().data(), token.size() ); + + size_t size = tx_struct.ByteSizeLong(); + std::vector serialized_proto( size ); + if ( !tx_struct.SerializeToArray( serialized_proto.data(), serialized_proto.size() ) ) + { + std::cerr << "Failed to Serialize MintTxV2 to array" << std::endl; + } + return serialized_proto; + } + + std::shared_ptr MintTransactionV2::DeSerializeByteVector( const std::vector &data ) + { + SGTransaction::MintTxV2 tx_struct; + if ( !tx_struct.ParseFromArray( data.data(), data.size() ) ) + { + std::cerr << "Failed to parse MintTxV2 from array\n"; + return nullptr; + } + + uint64_t amount = tx_struct.amount(); + std::string chainid = tx_struct.chain_id(); + TokenID tokenid = TokenID::FromBytes( tx_struct.token_id().data(), tx_struct.token_id().size() ); + + std::vector inputs; + auto *utxo_proto_params = tx_struct.mutable_utxo_params(); + for ( int i = 0; i < utxo_proto_params->inputs_size(); ++i ) + { + const auto &input_proto = utxo_proto_params->inputs( i ); + auto maybe_hash = base::Hash256::fromReadableString( input_proto.tx_id_hash() ); + if ( !maybe_hash ) + { + std::cerr << "Invalid hash in mint-v2 input." << std::endl; + return nullptr; + } + + inputs.push_back( { maybe_hash.value(), + input_proto.output_index(), + std::vector( input_proto.signature().cbegin(), input_proto.signature().cend() ) } ); + } + + std::vector outputs; + for ( int i = 0; i < utxo_proto_params->outputs_size(); ++i ) + { + const auto &output_proto = utxo_proto_params->outputs( i ); + outputs.push_back( { output_proto.encrypted_amount(), + output_proto.dest_addr(), + TokenID::FromBytes( output_proto.token_id().data(), output_proto.token_id().size() ) } ); + } + + if ( outputs.empty() ) + { + outputs.push_back( { amount, tx_struct.dag_struct().source_addr(), tokenid } ); + } + + if ( inputs.empty() && !tx_struct.dag_struct().previous_hash().empty() ) + { + auto maybe_prev_hash = base::Hash256::fromReadableString( tx_struct.dag_struct().previous_hash() ); + if ( maybe_prev_hash ) + { + inputs.push_back( { maybe_prev_hash.value(), 0, {} } ); + } + } + + return std::make_shared( MintTransactionV2( { std::move( inputs ), std::move( outputs ) }, + chainid, + tokenid, + tx_struct.dag_struct() ) ); + } + + uint64_t MintTransactionV2::GetAmount() const + { + if ( utxo_params_.second.empty() ) + { + return 0; + } + return utxo_params_.second.front().encrypted_amount; + } + + TokenID MintTransactionV2::GetTokenID() const + { + if ( utxo_params_.second.empty() ) + { + return token_id_; + } + return utxo_params_.second.front().token_id; + } + + UTXOTxParameters MintTransactionV2::GetUTXOParameters() const + { + return utxo_params_; + } + + bool MintTransactionV2::HasUTXOParameters() const + { + return true; + } + + std::optional MintTransactionV2::GetUTXOParametersOpt() const + { + return utxo_params_; + } + + std::unordered_set MintTransactionV2::GetTopics() const + { + auto topics = IGeniusTransactions::GetTopics(); + for ( const auto &output : utxo_params_.second ) + { + topics.emplace( output.dest_address ); + } + return topics; + } + + MintTransactionV2 MintTransactionV2::New( uint64_t new_amount, + std::string chain_id, + TokenID token_id, + SGTransaction::DAGStruct dag, + std::string mint_destination ) + { + std::vector mint_inputs; + if ( !dag.previous_hash().empty() ) + { + auto maybe_hash = base::Hash256::fromReadableString( dag.previous_hash() ); + if ( maybe_hash ) + { + mint_inputs.push_back( { maybe_hash.value(), 0, {} } ); + } + } + + if ( mint_destination.empty() ) + { + mint_destination = dag.source_addr(); + } + + std::vector mint_outputs{ { new_amount, mint_destination, token_id } }; + MintTransactionV2 instance( { std::move( mint_inputs ), std::move( mint_outputs ) }, + std::move( chain_id ), + std::move( token_id ), + std::move( dag ) ); + instance.FillHash(); + return instance; + } +} diff --git a/src/account/MintTransactionV2.hpp b/src/account/MintTransactionV2.hpp new file mode 100644 index 000000000..6aa9de0e9 --- /dev/null +++ b/src/account/MintTransactionV2.hpp @@ -0,0 +1,134 @@ +/** + * @file MintTransactionV2.hpp + * @brief Header file of the Version 2 of the Mint transaction class + * @date 2026-03-19 + * @author Henrique A. Klein (hklein@gnus.ai) + */ +#pragma once + +#include +#include + +#include "account/IGeniusTransactions.hpp" +#include "account/TokenID.hpp" +#include "account/UTXOStructs.hpp" + +namespace sgns +{ + /** + * @brief Implements a Mint Version 2 transaction + */ + class MintTransactionV2 final : public IGeniusTransactions + { + public: + /** + * @brief Destroy the Mint Transaction V 2 object + */ + ~MintTransactionV2() override = default; + + /** + * @brief Deserializes a MintV2 serialized byte vector into an object + * @param[in] data The serialized MintV2 data + * @return A shared pointer to a MintV2 object + */ + static std::shared_ptr DeSerializeByteVector( const std::vector &data ); + + /** + * @brief Creates a new MintV2 transaction + * @param[in] new_amount The amount to be minted + * @param[in] chain_id The chain ID from where the mint came from + * @param[in] token_id The token ID + * @param[in] dag The DAG structure with the common transaction data + * @param[in] mint_destination The destination of the Mint + * @return A @ref MintTransactionV2 + */ + static MintTransactionV2 New( uint64_t new_amount, + std::string chain_id, + TokenID token_id, + SGTransaction::DAGStruct dag, + std::string mint_destination ); + + /** + * @brief Serializes the transaction + * @return The serialized byte vector + */ + std::vector SerializeByteVector() override; + + /** + * @brief Get the amount of the mint + * @return The amount of tokens minted + */ + uint64_t GetAmount() const; + + /** + * @brief Get the Token ID + * @return The ID which identifies what token was minted + */ + TokenID GetTokenID() const; + + /** + * @brief Returns the UTXOs + * @return The UTXOs of the MintV2 transaction + */ + UTXOTxParameters GetUTXOParameters() const; + + /** + * @brief Returns if transaction supports UTXOs + * @return True if supported, false otherwise + */ + bool HasUTXOParameters() const override; + + /** + * @brief Returns the UTXOs + * @return If exists, returns the UTXOs of the transaction + */ + std::optional GetUTXOParametersOpt() const override; + + /** + * @brief Gets the transaction specific path + * @return Returns the transaction specific path + */ + std::string GetTransactionSpecificPath() const override + { + return GetType(); + } + + /** + * @brief Returns the topics of interest of this transaction + * @return A set of topics + */ + std::unordered_set GetTopics() const override; + + private: + /** + * @brief Construct a new Mint Transaction V2 + * @param[in] utxo_params The UTXO set (inputs and outputs) + * @param[in] chain_id The chain ID form which the inputs came + * @param[in] token_id The Token ID + * @param[in] dag The basic DAG structure of every transaction + */ + MintTransactionV2( UTXOTxParameters utxo_params, + std::string chain_id, + TokenID token_id, + SGTransaction::DAGStruct dag ); + + UTXOTxParameters utxo_params_; ///< The UTXOs (inputs and outputs) + std::string chain_id_; ///< The chain ID from the bridge + TokenID token_id_; ///< The ID of the token minted + + /** + * @brief Registers a deserializer for MintV2 transactions + * @return Returns true when registered + */ + static bool Register() + { + RegisterDeserializer( "mint-v2", &MintTransactionV2::DeSerializeByteVector ); + return true; + } + + /** + * @brief Forces the static initialization of the Deserializer. + */ + static inline bool registered = Register(); + }; +} diff --git a/src/account/ProcessingTransaction.cpp b/src/account/ProcessingTransaction.cpp index 5ede92fae..47f623078 100644 --- a/src/account/ProcessingTransaction.cpp +++ b/src/account/ProcessingTransaction.cpp @@ -59,7 +59,10 @@ namespace sgns } size_t size = tx_struct.ByteSizeLong(); std::vector serialized_proto( size ); - tx_struct.SerializeToArray( serialized_proto.data(), serialized_proto.size() ); + if ( !tx_struct.SerializeToArray( serialized_proto.data(), serialized_proto.size() ) ) + { + std::cerr << "Failed to serialize transaction\n"; + } return serialized_proto; } diff --git a/src/account/TokenAmount.cpp b/src/account/TokenAmount.cpp index 01fd62103..bf56e653f 100644 --- a/src/account/TokenAmount.cpp +++ b/src/account/TokenAmount.cpp @@ -21,14 +21,14 @@ namespace sgns outcome::result> TokenAmount::New( double value ) { - OUTCOME_TRY( auto &&from_dbl_value, ScaledInteger::FromDouble( value, PRECISION ) ); + BOOST_OUTCOME_TRY( auto from_dbl_value, ScaledInteger::FromDouble( value, PRECISION ) ); auto ptr = std::shared_ptr( new TokenAmount( from_dbl_value ) ); return outcome::success( ptr ); } outcome::result> TokenAmount::New( const std::string &str ) { - OUTCOME_TRY( auto &&from_str_value, ScaledInteger::FromString( str, PRECISION ) ); + BOOST_OUTCOME_TRY( auto from_str_value, ScaledInteger::FromString( str, PRECISION ) ); auto ptr = std::shared_ptr( new TokenAmount( from_str_value ) ); return outcome::success( ptr ); } @@ -37,13 +37,13 @@ namespace sgns outcome::result TokenAmount::Multiply( const TokenAmount &other ) const { - OUTCOME_TRY( auto &&multiply_res, ScaledInteger::Multiply( minions_, other.minions_, PRECISION ) ); + BOOST_OUTCOME_TRY( auto multiply_res, ScaledInteger::Multiply( minions_, other.minions_, PRECISION ) ); return outcome::success( TokenAmount( multiply_res ) ); } outcome::result TokenAmount::Divide( const TokenAmount &other ) const { - OUTCOME_TRY( auto &÷_res, ScaledInteger::Divide( minions_, other.minions_, PRECISION ) ); + BOOST_OUTCOME_TRY( auto divide_res, ScaledInteger::Divide( minions_, other.minions_, PRECISION ) ); return outcome::success( TokenAmount( divide_res ) ); } @@ -105,7 +105,7 @@ namespace sgns return outcome::failure( std::errc::value_too_large ); } - OUTCOME_TRY( auto &&minions_res, cost_fp->ConvertPrecision( PRECISION ) ); + BOOST_OUTCOME_TRY( auto minions_res, cost_fp->ConvertPrecision( PRECISION ) ); uint64_t raw_minions = minions_res.Value(); raw_minions = std::max( raw_minions, MIN_MINION_UNITS ); @@ -114,22 +114,22 @@ namespace sgns outcome::result TokenAmount::ConvertToChildToken( uint64_t in, std::string ratio ) { - OUTCOME_TRY( auto ratio_fp, ScaledInteger::New( ratio, PRECISION ) ); + BOOST_OUTCOME_TRY( auto ratio_fp, ScaledInteger::New( ratio, PRECISION ) ); - OUTCOME_TRY( auto minion_fp, ScaledInteger::New( in, PRECISION ) ); + BOOST_OUTCOME_TRY( auto minion_fp, ScaledInteger::New( in, PRECISION ) ); - OUTCOME_TRY( auto child_fp, minion_fp->Divide( *ratio_fp ) ); + BOOST_OUTCOME_TRY( auto child_fp, minion_fp->Divide( *ratio_fp ) ); return outcome::success( child_fp.ToString() ); } outcome::result TokenAmount::ConvertFromChildToken( std::string in, std::string ratio ) { - OUTCOME_TRY( auto ratio_fp, ScaledInteger::New( ratio, PRECISION ) ); + BOOST_OUTCOME_TRY( auto ratio_fp, ScaledInteger::New( ratio, PRECISION ) ); - OUTCOME_TRY( auto child_fp, ScaledInteger::New( in, PRECISION, ScaledInteger::ParseMode::Truncate ) ); + BOOST_OUTCOME_TRY( auto child_fp, ScaledInteger::New( in, PRECISION, ScaledInteger::ParseMode::Truncate ) ); - OUTCOME_TRY( auto minion_fp, child_fp->Multiply( *ratio_fp ) ); + BOOST_OUTCOME_TRY( auto minion_fp, child_fp->Multiply( *ratio_fp ) ); return outcome::success( minion_fp.Value() ); } diff --git a/src/account/TokenID.hpp b/src/account/TokenID.hpp index 0be641cd2..16a458d19 100644 --- a/src/account/TokenID.hpp +++ b/src/account/TokenID.hpp @@ -25,6 +25,9 @@ namespace sgns TokenID() : data_{}, valid_( false ) {} TokenID( const TokenID &other ) = default; + TokenID( TokenID &&other ) = default; + TokenID &operator=( const TokenID &other ) = default; + TokenID &operator=( TokenID &&other ) = default; static TokenID FromBytes( std::initializer_list list ) { diff --git a/src/account/TransactionManager.cpp b/src/account/TransactionManager.cpp index 7bd57f217..a8ed6ef01 100644 --- a/src/account/TransactionManager.cpp +++ b/src/account/TransactionManager.cpp @@ -18,6 +18,7 @@ #include #include "TransferTransaction.hpp" #include "MintTransaction.hpp" +#include "MintTransactionV2.hpp" #include "EscrowTransaction.hpp" #include "EscrowReleaseTransaction.hpp" #include "account/TokenAmount.hpp" @@ -26,6 +27,7 @@ #include "crdt/proto/delta.pb.h" #include "base/sgns_version.hpp" +#include "outcome/outcome.hpp" #include "proof/ProcessingProof.hpp" namespace sgns @@ -458,7 +460,7 @@ namespace sgns { return outcome::failure( boost::system::error_code{} ); } - OUTCOME_TRY( auto &¶ms, utxo_manager_.CreateTxParameter( amount, destination, token_id ) ); + BOOST_OUTCOME_TRY( auto params, utxo_manager_.CreateTxParameter( amount, destination, token_id ) ); auto [inputs, outputs] = params; auto transfer_transaction = std::make_shared( @@ -476,17 +478,23 @@ namespace sgns outcome::result TransactionManager::MintFunds( uint64_t amount, std::string transaction_hash, std::string chainid, - TokenID tokenid ) + TokenID tokenid, + std::string destination ) { if ( GetState() != State::READY ) { return outcome::failure( boost::system::error_code{} ); } - auto mint_transaction = std::make_shared( - MintTransaction::New( amount, - std::move( chainid ), - std::move( tokenid ), - FillDAGStruct( std::move( transaction_hash ) ) ) ); + if ( destination.empty() ) + { + destination = account_m->GetAddress(); + } + auto mint_transaction = std::make_shared( + MintTransactionV2::New( amount, + std::move( chainid ), + std::move( tokenid ), + FillDAGStruct( std::move( transaction_hash ) ), + destination ) ); mint_transaction->MakeSignature( *account_m ); @@ -509,7 +517,7 @@ namespace sgns } auto hash_data = hasher_m->blake2b_256( std::vector{ job_id.begin(), job_id.end() } ); - OUTCOME_TRY( ( auto &&, params ), + BOOST_OUTCOME_TRY( auto params, utxo_manager_.CreateTxParameter( amount, "0x" + hash_data.toReadableString(), TokenID::FromBytes( { 0x00 } ) ) ); @@ -556,17 +564,17 @@ namespace sgns account_m->GetAddress().substr( 0, 8 ), full_node_m, escrow_path ); - OUTCOME_TRY( ( auto &&, transaction ), FetchTransaction( globaldb_m, escrow_path ) ); + BOOST_OUTCOME_TRY( auto transaction, FetchTransaction( globaldb_m, escrow_path ) ); std::shared_ptr escrow_tx = std::dynamic_pointer_cast( transaction ); std::vector subtask_ids; std::vector payout_peers; - OUTCOME_TRY( ( auto &&, escrow_amount_ptr ), TokenAmount::New( escrow_tx->GetAmount() ) ); + BOOST_OUTCOME_TRY( auto escrow_amount_ptr, TokenAmount::New( escrow_tx->GetAmount() ) ); - OUTCOME_TRY( ( auto &&, peers_cut_ptr ), TokenAmount::New( escrow_tx->GetPeersCut() ) ); + BOOST_OUTCOME_TRY( auto peers_cut_ptr, TokenAmount::New( escrow_tx->GetPeersCut() ) ); - OUTCOME_TRY( ( auto &&, peer_total ), escrow_amount_ptr->Multiply( *peers_cut_ptr ) ); + BOOST_OUTCOME_TRY( auto peer_total, escrow_amount_ptr->Multiply( *peers_cut_ptr ) ); const auto escrowTokenId = escrow_tx->GetUTXOParameters().second[0].token_id; @@ -744,7 +752,7 @@ namespace sgns tx_key.GetKey() ); data_transaction.put( transaction->SerializeByteVector() ); - BOOST_OUTCOME_TRYV2( auto &&, crdt_transaction->Put( std::move( tx_key ), std::move( data_transaction ) ) ); + BOOST_OUTCOME_TRY( crdt_transaction->Put( std::move( tx_key ), std::move( data_transaction ) ) ); if ( maybe_proof ) { @@ -758,7 +766,7 @@ namespace sgns proof_key.GetKey() ); proof_transaction.put( proof ); - BOOST_OUTCOME_TRYV2( auto &&, + BOOST_OUTCOME_TRY( crdt_transaction->Put( std::move( proof_key ), std::move( proof_transaction ) ) ); } nonces_set.insert( transaction->dag_st.nonce() ); @@ -773,7 +781,7 @@ namespace sgns } for ( auto &[tx, _] : transaction_batch ) { - OUTCOME_TRY( ParseTransaction( tx ) ); + BOOST_OUTCOME_TRY( ParseTransaction( tx ) ); topicSet.merge( tx->GetTopics() ); std::unique_lock tx_lock( tx_mutex_m ); const auto key = GetTransactionPath( *tx ); @@ -802,7 +810,7 @@ namespace sgns } } - BOOST_OUTCOME_TRYV2( auto &&, crdt_transaction->Commit( topicSet ) ); + BOOST_OUTCOME_TRY( crdt_transaction->Commit( topicSet ) ); return nonces_set; } @@ -983,7 +991,7 @@ namespace sgns outcome::result> TransactionManager::DeSerializeTransaction( std::string tx_data ) { - OUTCOME_TRY( ( auto &&, dag ), IGeniusTransactions::DeSerializeDAGStruct( tx_data ) ); + BOOST_OUTCOME_TRY( auto dag, IGeniusTransactions::DeSerializeDAGStruct( tx_data ) ); auto it = IGeniusTransactions::GetDeSerializers().find( dag.type() ); if ( it == IGeniusTransactions::GetDeSerializers().end() ) @@ -1025,7 +1033,7 @@ namespace sgns const std::shared_ptr &db, std::string_view transaction_key ) { - OUTCOME_TRY( auto transaction_data, db->Get( { std::string( transaction_key ) } ) ); + BOOST_OUTCOME_TRY( auto transaction_data, db->Get( { std::string( transaction_key ) } ) ); return DeSerializeTransaction( transaction_data ); } @@ -1035,7 +1043,7 @@ namespace sgns { const auto &transaction_data_vector = tx_data.toVector(); - OUTCOME_TRY( ( auto &&, dag ), IGeniusTransactions::DeSerializeDAGStruct( transaction_data_vector ) ); + BOOST_OUTCOME_TRY( auto dag, IGeniusTransactions::DeSerializeDAGStruct( transaction_data_vector ) ); auto it = IGeniusTransactions::GetDeSerializers().find( dag.type() ); if ( it == IGeniusTransactions::GetDeSerializers().end() ) @@ -1052,7 +1060,7 @@ namespace sgns account_m->GetAddress().substr( 0, 8 ), full_node_m, proof_path ); - OUTCOME_TRY( ( auto &&, proof_data ), globaldb_m->Get( { proof_path } ) ); + BOOST_OUTCOME_TRY( auto proof_data, globaldb_m->Get( { proof_path } ) ); auto proof_data_vector = proof_data.toVector(); @@ -1074,7 +1082,7 @@ namespace sgns account_m->GetAddress().substr( 0, 8 ), full_node_m, query_path ); - OUTCOME_TRY( auto transaction_list, globaldb_m->QueryKeyValues( query_path ) ); + BOOST_OUTCOME_TRY( auto transaction_list, globaldb_m->QueryKeyValues( query_path ) ); m_logger->trace( "[{} - full: {}] Transaction list grabbed from CRDT with Size {}", account_m->GetAddress().substr( 0, 8 ), @@ -1198,7 +1206,7 @@ namespace sgns { auto hash = ( base::Hash256::fromReadableString( transfer_tx->GetHash() ) ).value(); GeniusUTXO new_utxo( hash, i, dest_infos[i].encrypted_amount, dest_infos[i].token_id ); - utxo_manager_.PutUTXO( new_utxo, dest_infos[i].dest_address ); + BOOST_OUTCOME_TRY( utxo_manager_.PutUTXO( new_utxo, dest_infos[i].dest_address ) ); m_logger->debug( "[{} - full: {}] Notify {} of transfer of {} to it", account_m->GetAddress().substr( 0, 8 ), @@ -1218,17 +1226,44 @@ namespace sgns full_node_m, input.output_idx_ ); } - utxo_manager_.ConsumeUTXOs( transfer_tx->GetInputInfos(), transfer_tx->GetSrcAddress() ); + BOOST_OUTCOME_TRY( utxo_manager_.ConsumeUTXOs( transfer_tx->GetInputInfos(), transfer_tx->GetSrcAddress() ) ); return outcome::success(); } outcome::result TransactionManager::ParseMintTransaction( const std::shared_ptr &tx ) { + if ( auto mint_tx_v2 = std::dynamic_pointer_cast( tx ) ) + { + auto [inputs, outputs] = mint_tx_v2->GetUTXOParameters(); + auto hash = ( base::Hash256::fromReadableString( mint_tx_v2->GetHash() ) ).value(); + for ( std::uint32_t i = 0; i < outputs.size(); ++i ) + { + GeniusUTXO new_utxo( hash, i, outputs[i].encrypted_amount, outputs[i].token_id ); + utxo_manager_.PutUTXO( new_utxo, outputs[i].dest_address ); + } + + if ( !inputs.empty() ) + { + utxo_manager_.ConsumeUTXOs( inputs, mint_tx_v2->GetSrcAddress() ); + } + + m_logger->info( "[{} - full: {}] Created tokens (mint-v2), amount {} balance {}", + account_m->GetAddress().substr( 0, 8 ), + full_node_m, + std::to_string( mint_tx_v2->GetAmount() ), + std::to_string( utxo_manager_.GetBalance() ) ); + return outcome::success(); + } + auto mint_tx = std::dynamic_pointer_cast( tx ); + if ( !mint_tx ) + { + return std::errc::invalid_argument; + } - auto hash = ( base::Hash256::fromReadableString( mint_tx->GetHash() ) ).value(); - GeniusUTXO new_utxo( hash, 0, mint_tx->GetAmount(), mint_tx->GetTokenID() ); - utxo_manager_.PutUTXO( new_utxo, mint_tx->GetSrcAddress() ); + auto hash = ( base::Hash256::fromReadableString( mint_tx->GetHash() ) ).value(); + BOOST_OUTCOME_TRY( utxo_manager_.PutUTXO( GeniusUTXO( hash, 0, mint_tx->GetAmount(), mint_tx->GetTokenID() ), + mint_tx->GetSrcAddress() ) ); m_logger->info( "[{} - full: {}] Created tokens, amount {} balance {}", account_m->GetAddress().substr( 0, 8 ), full_node_m, @@ -1253,9 +1288,10 @@ namespace sgns if ( outputs.size() > 1 ) { GeniusUTXO new_utxo( hash, 1, outputs[1].encrypted_amount, outputs[1].token_id ); - utxo_manager_.PutUTXO( new_utxo, outputs[1].dest_address ); + BOOST_OUTCOME_TRY( utxo_manager_.PutUTXO( new_utxo, outputs[1].dest_address ) ); } - utxo_manager_.ConsumeUTXOs( escrow_tx->GetUTXOParameters().first, escrow_tx->GetSrcAddress() ); + BOOST_OUTCOME_TRY( + utxo_manager_.ConsumeUTXOs( escrow_tx->GetUTXOParameters().first, escrow_tx->GetSrcAddress() ) ); } } @@ -1293,7 +1329,7 @@ namespace sgns for ( const auto &dest_info : dest_infos ) { auto hash = ( base::Hash256::fromReadableString( transfer_tx->GetHash() ) ).value(); - utxo_manager_.DeleteUTXO( hash, dest_info.dest_address ); + BOOST_OUTCOME_TRY( utxo_manager_.DeleteUTXO( hash, dest_info.dest_address ) ); m_logger->debug( "[{} - full: {}] Notify {} of deletion of {} to it", account_m->GetAddress().substr( 0, 8 ), @@ -1323,7 +1359,7 @@ namespace sgns account_m->GetAddress().substr( 0, 8 ), full_node_m, tx->GetType() ); - OUTCOME_TRY( ParseTransaction( tx ) ); + BOOST_OUTCOME_TRY( ParseTransaction( tx ) ); } } utxo_manager_.RollbackUTXOs( transfer_tx->GetInputInfos() ); @@ -1333,10 +1369,37 @@ namespace sgns outcome::result TransactionManager::RevertMintTransaction( const std::shared_ptr &tx ) { + if ( auto mint_tx_v2 = std::dynamic_pointer_cast( tx ) ) + { + auto [inputs, outputs] = mint_tx_v2->GetUTXOParameters(); + auto hash = ( base::Hash256::fromReadableString( mint_tx_v2->GetHash() ) ).value(); + + for ( const auto &dest_info : outputs ) + { + utxo_manager_.DeleteUTXO( hash, dest_info.dest_address ); + } + if ( !inputs.empty() ) + { + utxo_manager_.RollbackUTXOs( inputs ); + } + + m_logger->info( "[{} - full: {}] Deleted {} tokens (mint-v2), from tx {}, final balance {}", + account_m->GetAddress().substr( 0, 8 ), + full_node_m, + mint_tx_v2->GetAmount(), + mint_tx_v2->GetHash(), + std::to_string( utxo_manager_.GetBalance() ) ); + return outcome::success(); + } + auto mint_tx = std::dynamic_pointer_cast( tx ); + if ( !mint_tx ) + { + return std::errc::invalid_argument; + } auto hash = ( base::Hash256::fromReadableString( mint_tx->GetHash() ) ).value(); - utxo_manager_.DeleteUTXO( hash, mint_tx->GetSrcAddress() ); + BOOST_OUTCOME_TRY( utxo_manager_.DeleteUTXO( hash, mint_tx->GetSrcAddress() ) ); m_logger->info( "[{} - full: {}] Deleted {} tokens, from tx {}, final balance {}", account_m->GetAddress().substr( 0, 8 ), full_node_m, @@ -1359,7 +1422,7 @@ namespace sgns auto hash = ( base::Hash256::fromReadableString( escrow_tx->GetHash() ) ).value(); if ( outputs.size() > 1 ) { - utxo_manager_.DeleteUTXO( hash, outputs[1].dest_address ); + BOOST_OUTCOME_TRY( utxo_manager_.DeleteUTXO( hash, outputs[1].dest_address ) ); } for ( auto &input : inputs ) { @@ -1370,7 +1433,7 @@ namespace sgns account_m->GetAddress().substr( 0, 8 ), full_node_m, tx->GetType() ); - OUTCOME_TRY( ParseTransaction( tx ) ); + BOOST_OUTCOME_TRY( ParseTransaction( tx ) ); } } utxo_manager_.RollbackUTXOs( inputs ); @@ -2032,7 +2095,7 @@ namespace sgns for ( auto it = invalid_transaction_keys.rbegin(); it != invalid_transaction_keys.rend(); ++it ) { - RemoveTransactionFromProcessedMaps( *it, true ); + BOOST_OUTCOME_TRY( RemoveTransactionFromProcessedMaps( *it, true ) ); } return changed; } @@ -2047,14 +2110,14 @@ namespace sgns full_node_m, tx_key ); - OUTCOME_TRY( crdt_transaction->Remove( { std::move( tx_key ) } ) ); + BOOST_OUTCOME_TRY( crdt_transaction->Remove( { std::move( tx_key ) } ) ); m_logger->debug( "[{} - full: {}] Removed key transaction on {}", account_m->GetAddress().substr( 0, 8 ), full_node_m, tx_key ); - OUTCOME_TRY( crdt_transaction->Commit( topics ) ); + BOOST_OUTCOME_TRY( crdt_transaction->Commit( topics ) ); m_logger->debug( "[{} - full: {}] Commited tx on {}", account_m->GetAddress().substr( 0, 8 ), @@ -2614,11 +2677,11 @@ namespace sgns if ( it->second.tx ) { - OUTCOME_TRY( RevertTransaction( it->second.tx ) ); + BOOST_OUTCOME_TRY( RevertTransaction( it->second.tx ) ); if ( delete_from_crdt ) { auto topics = it->second.tx->GetTopics(); - OUTCOME_TRY( DeleteTransaction( transaction_key, topics ) ); + BOOST_OUTCOME_TRY( DeleteTransaction( transaction_key, topics ) ); } account_m->RollBackPeerConfirmedNonce( it->second.tx->dag_st.nonce(), it->second.tx->dag_st.source_addr() ); @@ -2652,7 +2715,7 @@ namespace sgns full_node_m, key ); - OUTCOME_TRY( auto &&new_tx, DeSerializeTransaction( value ) ); + BOOST_OUTCOME_TRY( auto new_tx, DeSerializeTransaction( value ) ); m_logger->debug( "[{} - full: {}] Deserialized transaction {}", account_m->GetAddress().substr( 0, 8 ), @@ -2702,7 +2765,7 @@ namespace sgns const auto conflict_hash = conflicting_tx.value()->GetHash(); tx_lock.unlock(); - OUTCOME_TRY( RemoveTransactionFromProcessedMaps( GetTransactionPath( conflict_hash ), true ) ); + BOOST_OUTCOME_TRY( RemoveTransactionFromProcessedMaps( GetTransactionPath( conflict_hash ), true ) ); tx_lock.lock(); } @@ -2710,7 +2773,7 @@ namespace sgns account_m->GetAddress().substr( 0, 8 ), full_node_m, key ); - OUTCOME_TRY( ParseTransaction( new_tx ) ); + BOOST_OUTCOME_TRY( ParseTransaction( new_tx ) ); const auto nonce = new_tx->dag_st.nonce(); diff --git a/src/account/TransactionManager.hpp b/src/account/TransactionManager.hpp index ce949db79..d223e9757 100644 --- a/src/account/TransactionManager.hpp +++ b/src/account/TransactionManager.hpp @@ -114,7 +114,8 @@ namespace sgns outcome::result MintFunds( uint64_t amount, std::string transaction_hash, std::string chainid, - TokenID tokenid ); + TokenID tokenid, + std::string destination = "" ); outcome::result> HoldEscrow( uint64_t amount, const std::string &dev_addr, uint64_t peers_cut, @@ -321,6 +322,8 @@ namespace sgns { "transfer", { &TransactionManager::ParseTransferTransaction, &TransactionManager::RevertTransferTransaction } }, { "mint", { &TransactionManager::ParseMintTransaction, &TransactionManager::RevertMintTransaction } }, + { "mint-v2", + { &TransactionManager::ParseMintTransaction, &TransactionManager::RevertMintTransaction } }, { "escrow-hold", { &TransactionManager::ParseEscrowTransaction, &TransactionManager::RevertEscrowTransaction } }, { "escrow-release", diff --git a/src/account/TransferTransaction.cpp b/src/account/TransferTransaction.cpp index c50a5d1c9..e912461b6 100644 --- a/src/account/TransferTransaction.cpp +++ b/src/account/TransferTransaction.cpp @@ -6,7 +6,6 @@ */ #include "TransferTransaction.hpp" -#include "crypto/hasher/hasher_impl.hpp" #include "base/blob.hpp" namespace sgns @@ -53,7 +52,10 @@ namespace sgns size_t size = tx_struct.ByteSizeLong(); std::vector serialized_proto( size ); - tx_struct.SerializeToArray( serialized_proto.data(), serialized_proto.size() ); + if ( !tx_struct.SerializeToArray( serialized_proto.data(), serialized_proto.size() ) ) + { + std::cerr << "Failed to serialize transaction\n"; + } return serialized_proto; } @@ -62,7 +64,7 @@ namespace sgns SGTransaction::TransferTx tx_struct; if ( !tx_struct.ParseFromArray( data.data(), data.size() ) ) { - std::cerr << "Failed to parse TransferTx from array." << std::endl; + std::cerr << "Failed to parse TransferTx from array.\n"; } std::vector inputs; SGTransaction::UTXOTxParams *utxo_proto_params = tx_struct.mutable_utxo_params(); @@ -100,6 +102,16 @@ namespace sgns return input_tx_; } + bool TransferTransaction::HasUTXOParameters() const + { + return true; + } + + std::optional TransferTransaction::GetUTXOParametersOpt() const + { + return UTXOTxParameters{ input_tx_, outputs_ }; + } + std::unordered_set TransferTransaction::GetTopics() const { auto topics = IGeniusTransactions::GetTopics(); diff --git a/src/account/TransferTransaction.hpp b/src/account/TransferTransaction.hpp index 273f3d577..ab10f5d57 100644 --- a/src/account/TransferTransaction.hpp +++ b/src/account/TransferTransaction.hpp @@ -43,6 +43,18 @@ namespace sgns std::vector GetDstInfos() const; std::vector GetInputInfos() const; + /** + * @brief Returns if transaction supports UTXOs + * @return True if supported, false otherwise + */ + bool HasUTXOParameters() const override; + + /** + * @brief Returns the UTXOs + * @return If exists, returns the UTXOs of the transaction + */ + std::optional GetUTXOParametersOpt() const override; + std::string GetTransactionSpecificPath() const override { return GetType(); diff --git a/src/account/UTXOManager.cpp b/src/account/UTXOManager.cpp index 41fdd8725..04fff7115 100644 --- a/src/account/UTXOManager.cpp +++ b/src/account/UTXOManager.cpp @@ -70,7 +70,7 @@ namespace sgns return balance; } - bool UTXOManager::PutUTXO( GeniusUTXO new_utxo, const std::string &address ) + outcome::result UTXOManager::PutUTXO( GeniusUTXO new_utxo, const std::string &address ) { // If not a full node and trying to store UTXOs for other addresses, reject if ( !is_full_node_ && address != address_ ) @@ -109,12 +109,12 @@ namespace sgns if ( is_new ) { utxo_list.emplace_back( UTXOState::UTXO_READY, std::move( new_utxo ) ); - StoreUTXOs( address ); + BOOST_OUTCOME_TRY( StoreUTXOs( address ) ); } return is_new; } - void UTXOManager::DeleteUTXO( const base::Hash256 &utxo_id, const std::string &address ) + outcome::result UTXOManager::DeleteUTXO( const base::Hash256 &utxo_id, const std::string &address ) { // If not a full node and trying to delete UTXOs for other addresses, reject if ( !is_full_node_ && address != address_ ) @@ -140,12 +140,15 @@ namespace sgns } if ( deleted ) { - StoreUTXOs( address ); + BOOST_OUTCOME_TRY( StoreUTXOs( address ) ); } } + + return outcome::success(); } - bool UTXOManager::ConsumeUTXOs( const std::vector &infos, const std::string &address ) + outcome::result UTXOManager::ConsumeUTXOs( const std::vector &infos, + const std::string &address ) { bool consumed = true; std::unique_lock lock( utxos_mutex_ ); @@ -181,7 +184,7 @@ namespace sgns consumed = consumed && utxo_found; } - StoreUTXOs( address ); + BOOST_OUTCOME_TRY( StoreUTXOs( address ) ); return consumed; } @@ -243,7 +246,7 @@ namespace sgns const std::string &dest_address, const TokenID &token_id ) { - OUTCOME_TRY( auto selection_result, SelectUTXOs( amount, token_id ) ); + BOOST_OUTCOME_TRY( auto selection_result, SelectUTXOs( amount, token_id ) ); auto [inputs, selected_amount] = selection_result; std::vector outputs; @@ -274,7 +277,7 @@ namespace sgns total_amount += d.encrypted_amount; } - OUTCOME_TRY( auto selection_result, SelectUTXOs( total_amount, token_id ) ); + BOOST_OUTCOME_TRY( auto selection_result, SelectUTXOs( total_amount, token_id ) ); auto [inputs, selected_amount] = selection_result; std::vector outputs = destinations; @@ -424,7 +427,7 @@ namespace sgns for ( int i = 0; i < utxos.utxos_size(); ++i ) { const auto &utxo = utxos.utxos( i ); - OUTCOME_TRY( auto hash, + BOOST_OUTCOME_TRY( auto hash, base::Hash256::fromSpan( gsl::span( reinterpret_cast( const_cast( utxo.hash().data() ) ), utxo.hash().size() ) ) ); diff --git a/src/account/UTXOManager.hpp b/src/account/UTXOManager.hpp index 39529d697..b4bf979fd 100644 --- a/src/account/UTXOManager.hpp +++ b/src/account/UTXOManager.hpp @@ -61,9 +61,9 @@ namespace sgns * @param address Address to add the UTXO to * @return true if the UTXO was added, false otherwise */ - bool PutUTXO( GeniusUTXO new_utxo, const std::string &address ); + outcome::result PutUTXO( GeniusUTXO new_utxo, const std::string &address ); - bool PutUTXO( const GeniusUTXO &new_utxo ) + outcome::result PutUTXO( const GeniusUTXO &new_utxo ) { return PutUTXO( new_utxo, address_ ); } @@ -73,7 +73,7 @@ namespace sgns * @param[in] utxo_id The ID of the UTXO to be deleted * @param address Address to remove the UTXO from */ - void DeleteUTXO( const base::Hash256 &utxo_id, const std::string &address ); + outcome::result DeleteUTXO( const base::Hash256 &utxo_id, const std::string &address ); /** * @brief Consume UTXOs from the account @@ -81,9 +81,9 @@ namespace sgns * @param address Address to consume UTXOs from * @return true if all UTXOs were consumed, false otherwise */ - bool ConsumeUTXOs( const std::vector &infos, const std::string &address ); + outcome::result ConsumeUTXOs( const std::vector &infos, const std::string &address ); - bool ConsumeUTXOs( const std::vector &infos ) + outcome::result ConsumeUTXOs( const std::vector &infos ) { return ConsumeUTXOs( infos, address_ ); } diff --git a/src/account/proto/SGTransaction.proto b/src/account/proto/SGTransaction.proto index 0a8b30c14..bb4a5ff32 100644 --- a/src/account/proto/SGTransaction.proto +++ b/src/account/proto/SGTransaction.proto @@ -70,6 +70,14 @@ message MintTx bytes token_id = 3; uint64 amount = 4; // } +message MintTxV2 +{ + DAGStruct dag_struct = 1;// + bytes chain_id = 2; + bytes token_id = 3; + uint64 amount = 4; // + UTXOTxParams utxo_params = 5; +} message EscrowTx { DAGStruct dag_struct = 1;// @@ -87,5 +95,3 @@ message EscrowReleaseTx string escrow_source = 5; string original_escrow_hash = 6; } - - diff --git a/src/base/CMakeLists.txt b/src/base/CMakeLists.txt index 58a9cba1f..6796f6c48 100644 --- a/src/base/CMakeLists.txt +++ b/src/base/CMakeLists.txt @@ -42,16 +42,6 @@ target_link_libraries(logger ) supergenius_install(logger) -add_library(mp_utils - mp_utils.cpp - mp_utils.hpp -) -target_link_libraries(mp_utils - PUBLIC - Boost::headers -) -supergenius_install(mp_utils) - add_library(ScaledInteger ScaledInteger.cpp ScaledInteger.hpp diff --git a/src/base/ScaledInteger.cpp b/src/base/ScaledInteger.cpp index de252da95..bde3c2d50 100644 --- a/src/base/ScaledInteger.cpp +++ b/src/base/ScaledInteger.cpp @@ -26,7 +26,7 @@ namespace sgns outcome::result> ScaledInteger::New( double raw_value, uint64_t precision ) { - OUTCOME_TRY( auto &&from_dbl_value, FromDouble( raw_value, precision ) ); + BOOST_OUTCOME_TRY( auto from_dbl_value, FromDouble( raw_value, precision ) ); auto ptr = std::shared_ptr( new ScaledInteger( from_dbl_value, precision ) ); return outcome::success( ptr ); } @@ -35,7 +35,7 @@ namespace sgns uint64_t precision, ParseMode mode ) { - OUTCOME_TRY( auto &&from_str_value, FromString( str_value, precision, mode ) ); + BOOST_OUTCOME_TRY( auto from_str_value, FromString( str_value, precision, mode ) ); auto ptr = std::shared_ptr( new ScaledInteger( from_str_value, precision ) ); return outcome::success( ptr ); } @@ -52,7 +52,7 @@ namespace sgns { precision_calc = str_value.size() - dot_pos - 1; } - OUTCOME_TRY( auto &&raw_value, FromString( str_value, precision_calc ) ); + BOOST_OUTCOME_TRY( auto raw_value, FromString( str_value, precision_calc ) ); auto ptr = std::shared_ptr( new ScaledInteger( raw_value, precision_calc ) ); return outcome::success( ptr ); } @@ -284,7 +284,7 @@ namespace sgns { return outcome::failure( std::make_error_code( std::errc::invalid_argument ) ); } - OUTCOME_TRY( auto &&multiply_res, ScaledInteger::Multiply( value_, other.value_, precision_ ) ); + BOOST_OUTCOME_TRY( auto multiply_res, ScaledInteger::Multiply( value_, other.value_, precision_ ) ); return outcome::success( ScaledInteger( multiply_res, precision_ ) ); } @@ -294,13 +294,13 @@ namespace sgns { return outcome::failure( std::make_error_code( std::errc::invalid_argument ) ); } - OUTCOME_TRY( auto &÷_res, ScaledInteger::Divide( value_, other.value_, precision_ ) ); + BOOST_OUTCOME_TRY( auto divide_res, ScaledInteger::Divide( value_, other.value_, precision_ ) ); return outcome::success( ScaledInteger( divide_res, precision_ ) ); } outcome::result ScaledInteger::ConvertPrecision( uint64_t to ) const { - OUTCOME_TRY( auto &&convert_res, ScaledInteger::ConvertPrecision( value_, precision_, to ) ); + BOOST_OUTCOME_TRY( auto convert_res, ScaledInteger::ConvertPrecision( value_, precision_, to ) ); return outcome::success( ScaledInteger( convert_res, to ) ); } } // namespace sgns diff --git a/src/base/blob.hpp b/src/base/blob.hpp index a9b172939..14adea69c 100644 --- a/src/base/blob.hpp +++ b/src/base/blob.hpp @@ -124,7 +124,7 @@ namespace sgns::base { * is in hex format */ static outcome::result> fromHex(std::string_view hex) { - OUTCOME_TRY((auto &&, res), unhex(hex)); + BOOST_OUTCOME_TRY( auto res, unhex(hex)); return fromSpan(res); } @@ -136,7 +136,7 @@ namespace sgns::base { */ static outcome::result> fromHexWithPrefix( std::string_view hex) { - OUTCOME_TRY((auto &&, res), unhexWith0x(hex)); + BOOST_OUTCOME_TRY( auto res, unhexWith0x(hex)); return fromSpan(res); } diff --git a/src/base/buffer.cpp b/src/base/buffer.cpp index b7226aaba..5b1fbbe06 100644 --- a/src/base/buffer.cpp +++ b/src/base/buffer.cpp @@ -74,7 +74,7 @@ namespace sgns::base { } outcome::result Buffer::fromHex(std::string_view hex) { - OUTCOME_TRY((auto &&, bytes), unhex(hex)); + BOOST_OUTCOME_TRY( auto bytes, unhex(hex)); return Buffer{std::move(bytes)}; } diff --git a/src/base/mp_utils.cpp b/src/base/mp_utils.cpp deleted file mode 100644 index 4f0d990fa..000000000 --- a/src/base/mp_utils.cpp +++ /dev/null @@ -1,63 +0,0 @@ -#include "base/mp_utils.hpp" - -#include - -namespace sgns::base { - - namespace detail { - template - std::array uint_to_bytes(uint &&i) { - std::array res {}; - res.fill(0); - export_bits(i, res.begin(), 8, false); - return res; - } - - template - uint bytes_to_uint(gsl::span bytes) { - if (bytes.empty()) { - return uint(0); - } - uint result; - import_bits(result, bytes.begin(), bytes.end(), 8, false); - return result; - } - } // namespace detail - - std::array uint64_t_to_bytes(uint64_t number) { - std::array result{}; - for (auto i = 0; i < 8; ++i) { - gsl::at(result, i) = static_cast((number >> 8 * (7 - i)) & 0xFF); - } - return result; - } - - uint64_t bytes_to_uint64_t(gsl::span bytes) { - uint64_t result{0}; - for (auto i = 0; i < 8; ++i) { - result |= static_cast(bytes[i]) << (i * 8); - } - return result; - } - - std::array uint128_t_to_bytes( - const boost::multiprecision::uint128_t &i) { - return detail::uint_to_bytes<16>(i); - } - - boost::multiprecision::uint128_t bytes_to_uint128_t( - gsl::span bytes) { - return detail::bytes_to_uint<16, boost::multiprecision::uint128_t>(bytes); - } - - std::array uint256_t_to_bytes( - const boost::multiprecision::uint256_t &i) { - return detail::uint_to_bytes<32>(i); - } - - boost::multiprecision::uint256_t bytes_to_uint256_t( - gsl::span bytes) { - return detail::bytes_to_uint<32, boost::multiprecision::uint256_t>(bytes); - } - -} // namespace sgns::base diff --git a/src/base/mp_utils.hpp b/src/base/mp_utils.hpp deleted file mode 100644 index 4c2219400..000000000 --- a/src/base/mp_utils.hpp +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef SUPERGENIUS_SRC_CRYPTO_MP_UTILS_HPP -#define SUPERGENIUS_SRC_CRYPTO_MP_UTILS_HPP - -#include -#include - -namespace sgns::base { - - namespace detail { - template - std::array uint_to_bytes(uint &&i); - - template - uint bytes_to_uint(gsl::span bytes); - } // namespace detail - - std::array uint64_t_to_bytes(uint64_t number); - - uint64_t bytes_to_uint64_t(gsl::span bytes); - - std::array uint128_t_to_bytes( - const boost::multiprecision::uint128_t &i); - - boost::multiprecision::uint128_t bytes_to_uint128_t( - gsl::span bytes); - - std::array uint256_t_to_bytes( - const boost::multiprecision::uint256_t &i); - - boost::multiprecision::uint256_t bytes_to_uint256_t( - gsl::span bytes); - -} // namespace sgns::base - -#endif // SUPERGENIUS_SRC_CRYPTO_MP_UTILS_HPP diff --git a/src/blockchain/Blockchain.hpp b/src/blockchain/Blockchain.hpp index dcfe47d8d..b5f081cf5 100644 --- a/src/blockchain/Blockchain.hpp +++ b/src/blockchain/Blockchain.hpp @@ -142,11 +142,13 @@ namespace sgns static bool ShouldReplaceAccountCreation( const blockchain::AccountCreationBlock &existing, const blockchain::AccountCreationBlock &candidate ); - void GenesisReceivedCallback( const crdt::CRDTCallbackManager::NewDataPair &new_data, const std::string &cid ); - void AccountCreationReceivedCallback( const crdt::CRDTCallbackManager::NewDataPair& new_data, const std::string &cid ); + outcome::result GenesisReceivedCallback( const crdt::CRDTCallbackManager::NewDataPair &new_data, + const std::string &cid ); + outcome::result AccountCreationReceivedCallback( const crdt::CRDTCallbackManager::NewDataPair &new_data, + const std::string &cid ); outcome::result InformBlockchainResult( outcome::result result ) const; - void InformGenesisResult( outcome::result result ); - void InformAccountCreationResponse( outcome::result creation_result ); + outcome::result InformGenesisResult( outcome::result result ); + outcome::result InformAccountCreationResponse( outcome::result creation_result ); void WatchCIDDownload( const std::string &cid, Error error_on_failure, uint64_t timeout_ms ); outcome::result EnsureValidatorRegistry() const; diff --git a/src/blockchain/ValidatorRegistry.cpp b/src/blockchain/ValidatorRegistry.cpp index 24617b824..2d26f8121 100644 --- a/src/blockchain/ValidatorRegistry.cpp +++ b/src/blockchain/ValidatorRegistry.cpp @@ -162,8 +162,8 @@ namespace sgns::blockchain while ( !current_cid.empty() ) { registry_chain.push_back( current_cid ); - OUTCOME_TRY( auto &&cid, CID::fromString( current_cid ) ); - OUTCOME_TRY( auto &&node, old_syncer->GetNodeFromMerkleDAG( cid ) ); + BOOST_OUTCOME_TRY( auto cid, CID::fromString( current_cid ) ); + BOOST_OUTCOME_TRY( auto node, old_syncer->GetNodeFromMerkleDAG( cid ) ); auto prev_result = ExtractPrevRegistryCid( *node ); nodes.push_back( std::move( node ) ); if ( prev_result.has_error() ) @@ -191,7 +191,7 @@ namespace sgns::blockchain registry_cid_value.put( cid_string ); (void)new_store->put( registry_cid_key, std::move( registry_cid_value ) ); - OUTCOME_TRY( new_crdt->AddDAGNode( node ) ); + BOOST_OUTCOME_TRY( new_crdt->AddDAGNode( node ) ); } } validator_registry_logger()->debug( "{}: Finished migrating validator registry: ", __func__ ); diff --git a/src/blockchain/impl/Blockchain.cpp b/src/blockchain/impl/Blockchain.cpp index e3ceee06d..6d532c941 100644 --- a/src/blockchain/impl/Blockchain.cpp +++ b/src/blockchain/impl/Blockchain.cpp @@ -11,6 +11,7 @@ #include "blockchain/ValidatorRegistry.hpp" #include #include "crdt/graphsync_dagsyncer.hpp" +#include "outcome/outcome.hpp" OUTCOME_CPP_DEFINE_CATEGORY_3( sgns, Blockchain::Error, err ) { @@ -208,8 +209,8 @@ namespace sgns } ); instance->account_->SetGetValidatorWeightMethod( - [weak_ptr( std::weak_ptr( - instance ) )]( const std::string &address ) -> outcome::result> + [weak_ptr( std::weak_ptr( instance ) )]( + const std::string &address ) -> outcome::result> { if ( auto strong = weak_ptr.lock() ) { @@ -254,9 +255,9 @@ namespace sgns } blockchain_logger()->debug( " Migrating CID: {}", cid_string ); - OUTCOME_TRY( auto &&cid, CID::fromString( cid_string ) ); - OUTCOME_TRY( auto &&node, old_syncer->GetNodeFromMerkleDAG( cid ) ); - OUTCOME_TRY( new_crdt->AddDAGNode( node ) ); + BOOST_OUTCOME_TRY( auto cid, CID::fromString( cid_string ) ); + BOOST_OUTCOME_TRY( auto node, old_syncer->GetNodeFromMerkleDAG( cid ) ); + BOOST_OUTCOME_TRY( new_crdt->AddDAGNode( node ) ); return outcome::success(); }; @@ -274,7 +275,7 @@ namespace sgns crdt::GlobalDB::Buffer genesis_cid_value; genesis_cid_value.putBuffer( genesis_cid.value() ); (void)new_store->put( genesis_cid_key, std::move( genesis_cid_value ) ); - OUTCOME_TRY( MigrateCIDToNewDB( std::string( genesis_cid.value().toString() ) ) ); + BOOST_OUTCOME_TRY( MigrateCIDToNewDB( std::string( genesis_cid.value().toString() ) ) ); } blockchain_logger()->debug( "{}: Getting the account creation CIDs from old database", __func__ ); @@ -288,7 +289,7 @@ namespace sgns blockchain_logger()->debug( "{}: Account creation CID: {}", __func__, entry.second.toString() ); (void)new_store->put( entry.first, entry.second ); - OUTCOME_TRY( MigrateCIDToNewDB( std::string( entry.second.toString() ) ) ); + BOOST_OUTCOME_TRY( MigrateCIDToNewDB( std::string( entry.second.toString() ) ) ); } } blockchain_logger()->debug( "{}: Finalized migrating the blockchain", __func__ ); @@ -367,11 +368,11 @@ namespace sgns } logger_->info( "[{}] Genesis block also found locally, verifying account creation block", account_->GetAddress().substr( 0, 8 ) ); - OUTCOME_TRY( OnGenesisBlockReceived( get_genesis_result.value() ) ); - OUTCOME_TRY( InitGenesisCID() ); - OUTCOME_TRY( EnsureValidatorRegistry() ); - OUTCOME_TRY( OnAccountCreationBlockReceived( get_account_creation_result.value() ) ); - OUTCOME_TRY( InitAccountCreationCID( account_->GetAddress() ) ); + BOOST_OUTCOME_TRY( OnGenesisBlockReceived( get_genesis_result.value() ) ); + BOOST_OUTCOME_TRY( InitGenesisCID() ); + BOOST_OUTCOME_TRY( EnsureValidatorRegistry() ); + BOOST_OUTCOME_TRY( OnAccountCreationBlockReceived( get_account_creation_result.value() ) ); + BOOST_OUTCOME_TRY( InitAccountCreationCID( account_->GetAddress() ) ); auto query_result = db_->QueryKeyValues( std::string( ACCOUNT_CREATION_KEY_PREFIX ) ); if ( query_result.has_error() ) @@ -422,8 +423,7 @@ namespace sgns logger_->info( "[{}] Account creation block verification completed successfully", account_->GetAddress().substr( 0, 8 ) ); - InformBlockchainResult( outcome::success() ); - return outcome::success(); + return InformBlockchainResult( outcome::success() ); } logger_->info( "[{}] Account creation block not found locally, proceeding to check genesis block", account_->GetAddress().substr( 0, 8 ) ); @@ -432,15 +432,15 @@ namespace sgns if ( !get_genesis_result.has_error() ) { logger_->info( "[{}] Genesis block found locally, verifying", account_->GetAddress().substr( 0, 8 ) ); - OUTCOME_TRY( OnGenesisBlockReceived( get_genesis_result.value() ) ); - OUTCOME_TRY( InitGenesisCID() ); - OUTCOME_TRY( EnsureValidatorRegistry() ); + BOOST_OUTCOME_TRY( OnGenesisBlockReceived( get_genesis_result.value() ) ); + BOOST_OUTCOME_TRY( InitGenesisCID() ); + BOOST_OUTCOME_TRY( EnsureValidatorRegistry() ); logger_->info( "[{}] Genesis block verification completed successfully", account_->GetAddress().substr( 0, 8 ) ); logger_->info( "[{}] Requesting account creation block via pubsub", account_->GetAddress().substr( 0, 8 ) ); - account_->RequestAccountCreation( + return account_->RequestAccountCreation( TIMEOUT_ACC_CREATION_BLOCK_MS, [weakptr( weak_from_this() )]( outcome::result creation_cid_res ) { @@ -453,39 +453,36 @@ namespace sgns } } ); } - else + + logger_->info( "[{}] Genesis block not found locally, proceeding to creation/request", + account_->GetAddress().substr( 0, 8 ) ); + // Genesis block not found locally + if ( account_->GetAddress() == GetAuthorizedFullNodeAddress() ) { - logger_->info( "[{}] Genesis block not found locally, proceeding to creation/request", - account_->GetAddress().substr( 0, 8 ) ); - // Genesis block not found locally - if ( account_->GetAddress() == GetAuthorizedFullNodeAddress() ) + logger_->info( "[{}] Full node detected, creating genesis block", account_->GetAddress().substr( 0, 8 ) ); + auto create_result = CreateGenesisBlock(); + return create_result; + } + logger_->info( "[{}] Regular node detected, requesting genesis block via pubsub", + account_->GetAddress().substr( 0, 8 ) ); + auto genesis_request_result = account_->RequestGenesis( + TIMEOUT_GENESIS_BLOCK_MS, + [weakptr( weak_from_this() )]( outcome::result genesis_cid_res ) { - logger_->info( "[{}] Full node detected, creating genesis block", - account_->GetAddress().substr( 0, 8 ) ); - auto create_result = CreateGenesisBlock(); - return create_result; - } - logger_->info( "[{}] Regular node detected, requesting genesis block via pubsub", - account_->GetAddress().substr( 0, 8 ) ); - auto genesis_request_result = account_->RequestGenesis( - TIMEOUT_GENESIS_BLOCK_MS, - [weakptr( weak_from_this() )]( outcome::result genesis_cid_res ) + if ( auto self = weakptr.lock() ) { - if ( auto self = weakptr.lock() ) - { - self->logger_->debug( "[{}] Genesis request finished", - self->account_->GetAddress().substr( 0, 8 ) ); - self->InformGenesisResult( std::move( genesis_cid_res ) ); - } - } ); - if ( genesis_request_result.has_error() ) - { - logger_->error( "[{}] Genesis request failed: no response received", - account_->GetAddress().substr( 0, 8 ) ); - return outcome::failure( Error::GENESIS_BLOCK_MISSING ); - } - logger_->info( "[{}] Request succeeded for Genesis", account_->GetAddress().substr( 0, 8 ) ); + self->logger_->debug( "[{}] Genesis request finished", + self->account_->GetAddress().substr( 0, 8 ) ); + self->InformGenesisResult( std::move( genesis_cid_res ) ); + } + } ); + if ( genesis_request_result.has_error() ) + { + logger_->error( "[{}] Genesis request failed: no response received", + account_->GetAddress().substr( 0, 8 ) ); + return outcome::failure( Error::GENESIS_BLOCK_MISSING ); } + logger_->info( "[{}] Request succeeded for Genesis", account_->GetAddress().substr( 0, 8 ) ); return outcome::success(); } @@ -621,41 +618,39 @@ namespace sgns return result; } - void Blockchain::InformGenesisResult( outcome::result genesis_result ) + outcome::result Blockchain::InformGenesisResult( outcome::result genesis_result ) { if ( genesis_result.has_error() ) { logger_->debug( "[{}] Genesis block not found", account_->GetAddress().substr( 0, 8 ) ); - InformBlockchainResult( outcome::failure( Error::GENESIS_BLOCK_MISSING ) ); - } - else - { - logger_->debug( "[{}] Informing genesis result response with CID: {}", - account_->GetAddress().substr( 0, 8 ), - genesis_result.value() ); - WatchCIDDownload( genesis_result.value(), Error::GENESIS_BLOCK_MISSING, TIMEOUT_GENESIS_BLOCK_MS ); + return InformBlockchainResult( outcome::failure( Error::GENESIS_BLOCK_MISSING ) ); } + logger_->debug( "[{}] Informing genesis result response with CID: {}", + account_->GetAddress().substr( 0, 8 ), + genesis_result.value() ); + WatchCIDDownload( genesis_result.value(), Error::GENESIS_BLOCK_MISSING, TIMEOUT_GENESIS_BLOCK_MS ); + return outcome::success(); } - void Blockchain::InformAccountCreationResponse( outcome::result creation_result ) + outcome::result Blockchain::InformAccountCreationResponse( outcome::result creation_result ) { if ( creation_result.has_error() ) { logger_->debug( "[{}] Received empty account creation CID, no account created yet", account_->GetAddress().substr( 0, 8 ) ); - CreateAccountCreationBlock(); - } - else - { - logger_->debug( "[{}] Informing account creation response with CID: {}", - account_->GetAddress().substr( 0, 8 ), - creation_result.value() ); - WatchCIDDownload( creation_result.value(), - Error::ACCOUNT_CREATION_BLOCK_MISSING, - TIMEOUT_ACC_CREATION_BLOCK_MS ); + return CreateAccountCreationBlock(); } + + logger_->debug( "[{}] Informing account creation response with CID: {}", + account_->GetAddress().substr( 0, 8 ), + creation_result.value() ); + WatchCIDDownload( creation_result.value(), + Error::ACCOUNT_CREATION_BLOCK_MISSING, + TIMEOUT_ACC_CREATION_BLOCK_MS ); + + return outcome::success(); } void Blockchain::WatchCIDDownload( const std::string &cid, Error error_on_failure, uint64_t timeout_ms ) @@ -730,8 +725,8 @@ namespace sgns .detach(); } - void Blockchain::GenesisReceivedCallback( const crdt::CRDTCallbackManager::NewDataPair &new_data, - const std::string &cid ) + outcome::result Blockchain::GenesisReceivedCallback( const crdt::CRDTCallbackManager::NewDataPair &new_data, + const std::string &cid ) { logger_->debug( "[{}] Genesis received callback triggered with CID: {}", account_->GetAddress().substr( 0, 8 ), @@ -767,8 +762,7 @@ namespace sgns if ( new_genesis_return.has_error() ) { - InformBlockchainResult( new_genesis_return ); - return; + return InformBlockchainResult( new_genesis_return ); } logger_->info( "[{}] Requesting account creation block via pubsub (async)", @@ -788,13 +782,10 @@ namespace sgns logger_->error( "[{}] Account creation request failed {}. Creating account...", account_->GetAddress().substr( 0, 8 ), result.error().message() ); - InformAccountCreationResponse( outcome::failure( Error::ACCOUNT_CREATION_BLOCK_CREATION_FAILED ) ); - } - else - { - logger_->info( "[{}] Triggered Request account creation successfully", - account_->GetAddress().substr( 0, 8 ) ); + return InformAccountCreationResponse( outcome::failure( Error::ACCOUNT_CREATION_BLOCK_CREATION_FAILED ) ); } + logger_->info( "[{}] Triggered Request account creation successfully", account_->GetAddress().substr( 0, 8 ) ); + return outcome::success(); } outcome::result Blockchain::CreateGenesisBlock() @@ -947,7 +938,10 @@ namespace sgns size_t size = g_copy.ByteSizeLong(); std::vector signature_data( size ); - g_copy.SerializeToArray( signature_data.data(), signature_data.size() ); + if ( !g_copy.SerializeToArray( signature_data.data(), signature_data.size() ) ) + { + logger_->error( "Failed to serialize signature into array" ); + } logger_->trace( "[{}] Signature data computed (size: {} bytes)", account_->GetAddress().substr( 0, 8 ), size ); @@ -962,7 +956,10 @@ namespace sgns size_t size = ac_copy.ByteSizeLong(); std::vector signature_data( size ); - ac_copy.SerializeToArray( signature_data.data(), signature_data.size() ); + if ( !ac_copy.SerializeToArray( signature_data.data(), signature_data.size() ) ) + { + logger_->error( "Failed to serialize signature into array" ); + } return signature_data; } @@ -1384,7 +1381,7 @@ namespace sgns return outcome::success(); } - void Blockchain::AccountCreationReceivedCallback( const crdt::CRDTCallbackManager::NewDataPair &new_data, + outcome::result Blockchain::AccountCreationReceivedCallback( const crdt::CRDTCallbackManager::NewDataPair &new_data, const std::string &cid ) { logger_->debug( "[{}] Account creation received callback triggered with CID: {}", @@ -1465,8 +1462,10 @@ namespace sgns if ( notify_blockchain ) { - InformBlockchainResult( new_account_return ); + return InformBlockchainResult( new_account_return ); } + + return outcome::success(); } outcome::result Blockchain::GetGenesisCID() const diff --git a/src/blockchain/impl/block_tree_impl.cpp b/src/blockchain/impl/block_tree_impl.cpp index 1193a8b17..7a65f72a4 100644 --- a/src/blockchain/impl/block_tree_impl.cpp +++ b/src/blockchain/impl/block_tree_impl.cpp @@ -109,9 +109,9 @@ namespace sgns::blockchain { std::shared_ptr extrinsic_observer, std::shared_ptr hasher) { // retrieve the block's header: we need data from it - OUTCOME_TRY((auto &&, header), storage->getBlockHeader(last_finalized_block)); + BOOST_OUTCOME_TRY( auto header, storage->getBlockHeader(last_finalized_block)); // create meta structures from the retrieved header - OUTCOME_TRY((auto &&, hash_res), header_repo->getHashById(last_finalized_block)); + BOOST_OUTCOME_TRY( auto hash_res, header_repo->getHashById(last_finalized_block)); auto tree = std::make_shared(hash_res, header.number, nullptr, true); @@ -146,7 +146,7 @@ namespace sgns::blockchain { if (!parent) { return BlockTreeError::NO_PARENT; } - OUTCOME_TRY((auto &&, block_hash), storage_->putBlockHeader(header)); + BOOST_OUTCOME_TRY( auto block_hash, storage_->putBlockHeader(header)); // update local meta with the new block auto new_node = std::make_shared(block_hash, header.number, parent); @@ -169,7 +169,7 @@ namespace sgns::blockchain { if (!parent) { return BlockTreeError::NO_PARENT; } - OUTCOME_TRY((auto &&, block_hash), storage_->putBlock(block)); + BOOST_OUTCOME_TRY( auto block_hash, storage_->putBlock(block)); // update local meta with the new block auto new_node = std::make_shared(block_hash, block.header.number, parent); @@ -292,7 +292,7 @@ namespace sgns::blockchain { // the function returns the blocks in the chronological order, but we want a // reverted one in this case - OUTCOME_TRY((auto &&, chain), getChainByBlocks(finish_block_hash.value(), block)); + BOOST_OUTCOME_TRY( auto chain, getChainByBlocks(finish_block_hash.value(), block)); std::reverse(chain.begin(), chain.end()); return chain; } @@ -384,11 +384,11 @@ namespace sgns::blockchain { outcome::result BlockTreeImpl::getBestContaining( const primitives::BlockHash &target_hash, const boost::optional &max_number) const { - OUTCOME_TRY((auto &&, target_header), header_repo_->getBlockHeader(target_hash)); + BOOST_OUTCOME_TRY( auto target_header, header_repo_->getBlockHeader(target_hash)); if (max_number.has_value() && target_header.number > max_number.value()) { return Error::TARGET_IS_PAST_MAX; } - OUTCOME_TRY((auto &&, canon_hash), + BOOST_OUTCOME_TRY( auto canon_hash, header_repo_->getHashByNumber(target_header.number)); // if a max number is given we try to fetch the block at the // given depth, if it doesn't exist or `max_number` is not @@ -397,13 +397,13 @@ namespace sgns::blockchain { if (max_number.has_value()) { auto header = header_repo_->getBlockHeader(max_number.value()); if (header) { - OUTCOME_TRY((auto &&, hash), + BOOST_OUTCOME_TRY( auto hash, header_repo_->getHashByNumber(header.value().number)); return primitives::BlockInfo{header.value().number, hash}; } } } else { - OUTCOME_TRY((auto &&, last_finalized), + BOOST_OUTCOME_TRY( auto last_finalized, header_repo_->getNumberByHash(getLastFinalized().block_hash)); if (last_finalized >= target_header.number) { return Error::BLOCK_ON_DEAD_END; @@ -413,13 +413,13 @@ namespace sgns::blockchain { auto current_hash = leaf_hash; auto best_hash = current_hash; if (max_number.has_value()) { - OUTCOME_TRY((auto &&, hash), walkBackUntilLess(current_hash, max_number.value())); + BOOST_OUTCOME_TRY( auto hash, walkBackUntilLess(current_hash, max_number.value())); best_hash = hash; current_hash = hash; } - OUTCOME_TRY((auto &&, best_header), header_repo_->getBlockHeader(best_hash)); + BOOST_OUTCOME_TRY( auto best_header, header_repo_->getBlockHeader(best_hash)); while (true) { - OUTCOME_TRY((auto &&, current_header), header_repo_->getBlockHeader(current_hash)); + BOOST_OUTCOME_TRY( auto current_header, header_repo_->getBlockHeader(current_hash)); if (current_hash == target_hash) { return primitives::BlockInfo{best_header.number, best_hash}; } @@ -496,7 +496,7 @@ namespace sgns::blockchain { const primitives::BlockNumber &limit) const { auto current_hash = start; while (true) { - OUTCOME_TRY((auto &&, current_header), header_repo_->getBlockHeader(current_hash)); + BOOST_OUTCOME_TRY( auto current_header, header_repo_->getBlockHeader(current_hash)); if (current_header.number <= limit) { return current_hash; } @@ -550,7 +550,7 @@ namespace sgns::blockchain { } } - BOOST_OUTCOME_TRYV2( auto &&, storage_->removeBlock( hash, number ) ); + BOOST_OUTCOME_TRY( storage_->removeBlock( hash, number ) ); } // trying to return back extrinsics to transaction pool diff --git a/src/blockchain/impl/key_value_block_header_repository.cpp b/src/blockchain/impl/key_value_block_header_repository.cpp index d09fb5d8f..4ad364eb2 100644 --- a/src/blockchain/impl/key_value_block_header_repository.cpp +++ b/src/blockchain/impl/key_value_block_header_repository.cpp @@ -24,7 +24,7 @@ namespace sgns::blockchain outcome::result KeyValueBlockHeaderRepository::getNumberByHash( const Hash256 &hash ) const { - OUTCOME_TRY( ( auto &&, key ), idToBufferKey( *db_, hash ) ); + BOOST_OUTCOME_TRY( auto key, idToBufferKey( *db_, hash ) ); return BufferToNumber( key ); } @@ -32,7 +32,7 @@ namespace sgns::blockchain outcome::result KeyValueBlockHeaderRepository::getHashByNumber( const primitives::BlockNumber &number ) const { - OUTCOME_TRY( ( auto &&, header ), getBlockHeader( number ) ); + BOOST_OUTCOME_TRY( auto header, getBlockHeader( number ) ); auto serializedHeader = GetHeaderSerializedData( header ); return hasher_->blake2b_256( serializedHeader ); @@ -40,7 +40,7 @@ namespace sgns::blockchain outcome::result KeyValueBlockHeaderRepository::getBlockHeader( const BlockId &id ) const { - OUTCOME_TRY( ( auto &&, header_string_val ), idToStringKey( *db_, id ) ); + BOOST_OUTCOME_TRY( auto header_string_val, idToStringKey( *db_, id ) ); auto header_res = db_->Get( { block_header_key_prefix + header_string_val } ); if ( !header_res ) @@ -57,11 +57,11 @@ namespace sgns::blockchain auto encoded_header = GetHeaderSerializedData( header ); auto header_hash = hasher_->blake2b_256( encoded_header ); - OUTCOME_TRY( ( auto &&, id_string ), idToStringKey( *db_, header.number ) ); + BOOST_OUTCOME_TRY( auto id_string, idToStringKey( *db_, header.number ) ); BOOST_OUTCOME_TRYV2( auto &&, db_->Put( { header_hash.toReadableString() }, NumberToBuffer( header.number ), { "topic" } ) ); - BOOST_OUTCOME_TRYV2( auto &&, + BOOST_OUTCOME_TRY( db_->Put( { block_header_key_prefix + id_string }, base::Buffer{ std::move( encoded_header ) }, { "topic" } ) ); @@ -71,8 +71,8 @@ namespace sgns::blockchain outcome::result KeyValueBlockHeaderRepository::removeBlockHeader( const BlockId &id ) { - OUTCOME_TRY( ( auto &&, header_string_val ), idToStringKey( *db_, id ) ); - OUTCOME_TRY( db_->Remove( { block_header_key_prefix + header_string_val }, { "topic" } ) ); + BOOST_OUTCOME_TRY( auto header_string_val, idToStringKey( *db_, id ) ); + BOOST_OUTCOME_TRY( db_->Remove( { block_header_key_prefix + header_string_val }, { "topic" } ) ); return outcome::success(); } @@ -97,7 +97,10 @@ namespace sgns::blockchain size_t size = header_proto.ByteSizeLong(); std::vector serialized_proto( size ); - header_proto.SerializeToArray( serialized_proto.data(), static_cast( serialized_proto.size() ) ); + if ( !header_proto.SerializeToArray( serialized_proto.data(), static_cast( serialized_proto.size() ) ) ) + { + std::cerr << "Failed to serialize header into array.\n"; + } return serialized_proto; } @@ -109,7 +112,7 @@ namespace sgns::blockchain if ( !header_proto.ParseFromArray( serialized_data.data(), static_cast( serialized_data.size() ) ) ) { - std::cerr << "Failed to parse BlockHeaderData from array." << std::endl; + std::cerr << "Failed to parse BlockHeaderData from array.\n"; } primitives::BlockHeader block_header; diff --git a/src/blockchain/impl/key_value_block_storage.cpp b/src/blockchain/impl/key_value_block_storage.cpp index 40e0dcb6a..c88033d23 100644 --- a/src/blockchain/impl/key_value_block_storage.cpp +++ b/src/blockchain/impl/key_value_block_storage.cpp @@ -4,6 +4,7 @@ #include "blockchain/impl/common.hpp" #include "blockchain/impl/storage_util.hpp" +#include "outcome/outcome.hpp" #include "storage/database_error.hpp" #include "blockchain/impl/proto/SGBlocks.pb.h" #include "storage/predefined_keys.hpp" @@ -78,9 +79,9 @@ namespace sgns::blockchain auto block_storage = std::make_shared( KeyValueBlockStorage( std::move( db ), std::move( hasher ), std::move( header_repo ) ) ); - OUTCOME_TRY( ( auto &&, last_finalized_block_hash ), block_storage->getLastFinalizedBlockHash() ); + BOOST_OUTCOME_TRY( auto last_finalized_block_hash, block_storage->getLastFinalizedBlockHash() ); - OUTCOME_TRY( ( auto &&, block_header ), block_storage->getBlockHeader( last_finalized_block_hash ) ); + BOOST_OUTCOME_TRY( auto block_header, block_storage->getBlockHeader( last_finalized_block_hash ) ); primitives::Block finalized_block; finalized_block.header = block_header; @@ -100,15 +101,15 @@ namespace sgns::blockchain auto block_storage = std::make_shared( KeyValueBlockStorage( db, std::move( hasher ), std::move( header_repo ) ) ); - BOOST_OUTCOME_TRYV2( auto &&, block_storage->ensureGenesisNotExists() ); + BOOST_OUTCOME_TRY( block_storage->ensureGenesisNotExists() ); // state root type is Hash256, however for consistency with spec root hash // returns buffer. So we need this conversion - OUTCOME_TRY( ( auto &&, state_root_blob ), base::Hash256::fromSpan( state_root.toVector() ) ); + BOOST_OUTCOME_TRY( auto state_root_blob, base::Hash256::fromSpan( state_root.toVector() ) ); auto extrinsics_root_buf = trieRoot( {} ); // same reason for conversion as few lines above - OUTCOME_TRY( ( auto &&, extrinsics_root ), base::Hash256::fromSpan( extrinsics_root_buf.toVector() ) ); + BOOST_OUTCOME_TRY( auto extrinsics_root, base::Hash256::fromSpan( extrinsics_root_buf.toVector() ) ); // genesis block initialization primitives::Block genesis_block; @@ -117,10 +118,11 @@ namespace sgns::blockchain genesis_block.header.state_root = state_root_blob; // the rest of the fields have default value - OUTCOME_TRY( ( auto &&, genesis_block_hash ), block_storage->putBlock( genesis_block ) ); - BOOST_OUTCOME_TRYV2( auto &&, - db->Put( { storage::GetGenesisBlockHashLookupKey() }, Buffer{ genesis_block_hash }, { "topic" } ) ); - BOOST_OUTCOME_TRYV2( auto &&, block_storage->setLastFinalizedBlockHash( genesis_block_hash ) ); + BOOST_OUTCOME_TRY( auto genesis_block_hash, block_storage->putBlock( genesis_block ) ); + BOOST_OUTCOME_TRYV2( + auto &&, + db->Put( { storage::GetGenesisBlockHashLookupKey() }, Buffer{ genesis_block_hash }, { "topic" } ) ); + BOOST_OUTCOME_TRY( block_storage->setLastFinalizedBlockHash( genesis_block_hash ) ); on_genesis_created( genesis_block ); return block_storage; @@ -133,7 +135,7 @@ namespace sgns::blockchain outcome::result KeyValueBlockStorage::getBlockBody( const primitives::BlockId &id ) const { - OUTCOME_TRY( ( auto &&, block_data ), getBlockData( id ) ); + BOOST_OUTCOME_TRY( auto block_data, getBlockData( id ) ); if ( block_data.body ) { return block_data.body.value(); @@ -143,7 +145,7 @@ namespace sgns::blockchain outcome::result KeyValueBlockStorage::getBlockData( const primitives::BlockId &id ) const { - OUTCOME_TRY( ( auto &&, encoded_block_data ), GetRawBlock( id ) ); + BOOST_OUTCOME_TRY( auto encoded_block_data, GetRawBlock( id ) ); auto block_data = GetBlockDataFromSerialized( encoded_block_data.toVector() ); return block_data; @@ -152,7 +154,7 @@ namespace sgns::blockchain outcome::result KeyValueBlockStorage::getJustification( const primitives::BlockId &block ) const { - OUTCOME_TRY( ( auto &&, block_data ), getBlockData( block ) ); + BOOST_OUTCOME_TRY( auto block_data, getBlockData( block ) ); if ( block_data.justification ) { return block_data.justification.value(); @@ -190,21 +192,14 @@ namespace sgns::blockchain to_insert.receipt = block_data.receipt ? block_data.receipt : existing_data.receipt; } - //OUTCOME_TRY((auto &&, encoded_block_data), scale::encode(to_insert)); auto encoded_block_data = GetSerializedBlockData( to_insert ); - OUTCOME_TRY( ( auto &&, id_string ), idToStringKey( *db_, block_number ) ); + BOOST_OUTCOME_TRY( auto id_string, idToStringKey( *db_, block_number ) ); //TODO - For now one block data per block header. Revisit this - BOOST_OUTCOME_TRYV2( - auto &&, - db_->Put( { header_repo_->GetHeaderPath() + id_string + "/tx/0" }, + BOOST_OUTCOME_TRY( + db_->Put( { header_repo_->GetHeaderPath() + id_string + "/tx/0" }, Buffer{ encoded_block_data }, { "topic" } ) ); - //BOOST_OUTCOME_TRYV2(auto &&, putWithPrefix(*db_, - // Prefix::BLOCK_DATA, - // block_number, - // block_data.hash, - // Buffer{encoded_block_data})); return outcome::success(); } @@ -213,9 +208,7 @@ namespace sgns::blockchain // TODO(xDimon): Need to implement mechanism for wipe out orphan blocks // (in side-chains whom rejected by finalization) // for avoid leaks of storage space - auto block_hash = hasher_->blake2b_256( header_repo_->GetHeaderSerializedData( block.header ) ); - //auto block_in_storage_res = - // getWithPrefix(*db_, Prefix::HEADER, block_hash); + auto block_hash = hasher_->blake2b_256( header_repo_->GetHeaderSerializedData( block.header ) ); auto block_in_storage_res = header_repo_->getBlockHeader( block_hash ); if ( block_in_storage_res.has_value() ) { @@ -227,14 +220,14 @@ namespace sgns::blockchain } // insert our block's parts into the database- - BOOST_OUTCOME_TRYV2( auto &&, putBlockHeader( block.header ) ); + BOOST_OUTCOME_TRY( putBlockHeader( block.header ) ); primitives::BlockData block_data; block_data.hash = block_hash; block_data.header = block.header; block_data.body = block.body; - BOOST_OUTCOME_TRYV2( auto &&, putBlockData( block.header.number, block_data ) ); + BOOST_OUTCOME_TRY( putBlockData( block.header.number, block_data ) ); logger_->info( "Added block. Number: {}. Hash: {}. State root: {}", block.header.number, block_hash.toHex(), @@ -252,7 +245,7 @@ namespace sgns::blockchain block_data.hash = hash; block_data.justification = j; - BOOST_OUTCOME_TRYV2( auto &&, putBlockData( block_number, block_data ) ); + BOOST_OUTCOME_TRY( putBlockData( block_number, block_data ) ); return outcome::success(); } @@ -265,10 +258,11 @@ namespace sgns::blockchain return header_rm_res; } - OUTCOME_TRY( ( auto &&, key ), idToBufferKey( *db_, number ) ); + BOOST_OUTCOME_TRY( auto key, idToBufferKey( *db_, number ) ); //TODO - For now one block data per block header. Revisit this - OUTCOME_TRY(db_->Remove( { header_repo_->GetHeaderPath() + std::string( key.toString() ) + "/tx/0" }, { "topic" } ) ); + BOOST_OUTCOME_TRY( + db_->Remove( { header_repo_->GetHeaderPath() + std::string( key.toString() ) + "/tx/0" }, { "topic" } ) ); return outcome::success(); } @@ -322,9 +316,13 @@ namespace sgns::blockchain size_t size = hash_proto.ByteSizeLong(); std::vector serialized_proto( size ); - hash_proto.SerializeToArray( serialized_proto.data(), serialized_proto.size() ); - BOOST_OUTCOME_TRYV2( - auto &&, + if ( !hash_proto.SerializeToArray( serialized_proto.data(), serialized_proto.size() ) ) + { + logger_->error( "Failed to serialize hash into array" ); + return std::errc::bad_message; + } + + BOOST_OUTCOME_TRY( db_->Put( { storage::GetLastFinalizedBlockHashLookupKey() }, Buffer{ serialized_proto }, { "topic" } ) ); return outcome::success(); @@ -363,7 +361,10 @@ namespace sgns::blockchain size_t size = data_proto.ByteSizeLong(); std::vector serialized_proto( size ); - data_proto.SerializeToArray( serialized_proto.data(), serialized_proto.size() ); + if ( !data_proto.SerializeToArray( serialized_proto.data(), serialized_proto.size() ) ) + { + logger_->error( "Failed to serialize block data" ); + } return serialized_proto; } @@ -375,8 +376,9 @@ namespace sgns::blockchain SGBlocks::BlockPayloadData data_proto; if ( !data_proto.ParseFromArray( serialized_data.data(), serialized_data.size() ) ) { - std::cerr << "Failed to parse BlockPayloadData from array." << std::endl; + logger_->error( "Failed to parse BlockPayloadData from array" ); } + primitives::BlockHeader header; header.parent_hash = ( base::Hash256::fromReadableString( data_proto.header().parent_hash() ) ).value(); header.number = data_proto.header().block_number(); @@ -399,10 +401,10 @@ namespace sgns::blockchain outcome::result KeyValueBlockStorage::GetRawBlock( const primitives::BlockId &id ) const { - OUTCOME_TRY( ( auto &&, key ), idToBufferKey( *db_, id ) ); + BOOST_OUTCOME_TRY( auto key, idToBufferKey( *db_, id ) ); //TODO - For now one block data per block header. Revisit this - OUTCOME_TRY( ( auto &&, encoded_block_data ), + BOOST_OUTCOME_TRY( auto encoded_block_data, db_->Get( { header_repo_->GetHeaderPath() + std::string( key.toString() ) + "/tx/0" } ) ); return encoded_block_data; diff --git a/src/blockchain/impl/storage_util.cpp b/src/blockchain/impl/storage_util.cpp index 03c367479..f6a088059 100644 --- a/src/blockchain/impl/storage_util.cpp +++ b/src/blockchain/impl/storage_util.cpp @@ -34,7 +34,7 @@ namespace sgns::blockchain { prependPrefix(Buffer{block_hash}, Prefix::ID_TO_LOOKUP_KEY); BOOST_OUTCOME_TRYV2(auto &&, db.Put( { "num_to_idx_key" }, block_lookup_key, { "topic" } ) ); BOOST_OUTCOME_TRYV2(auto &&, db.Put( { "hash_to_idx_key" }, block_lookup_key, { "topic" } ) ); - OUTCOME_TRY(db.Put( { "value_lookup_key" }, value, { "topic" } ) ); + BOOST_OUTCOME_TRY(db.Put( { "value_lookup_key" }, value, { "topic" } ) ); return outcome::success(); } @@ -43,34 +43,22 @@ namespace sgns::blockchain { crdt::GlobalDB &db, prefix::Prefix prefix, const primitives::BlockId &block_id) { - OUTCOME_TRY((auto &&, key), idToBufferKey(db, block_id)); + BOOST_OUTCOME_TRY( auto key, idToBufferKey(db, block_id)); return db.Get({"prependPrefix(key, prefix)"}); } base::Buffer NumberToBuffer(primitives::BlockNumber n) { - // TODO(Harrm) Figure out why exactly it is this way in substrate - //BOOST_ASSERT((n & 0xffffffff00000000) == 0); SGBlocks::BlockID blockID; blockID.set_block_number(n); size_t size = blockID.ByteSizeLong(); std::vector serialized_proto( size ); - blockID.SerializeToArray( serialized_proto.data(), static_cast(serialized_proto.size()) ); - return base::Buffer{serialized_proto}; - /**base::Buffer retval; - - //Little endian - for ( std::size_t i = 0; i < sizeof(primitives::BlockNumber); ++i ) - { - retval.putUint8(static_cast((n >> (i * 8)) & 0xffu)) ; + if (!blockID.SerializeToArray( serialized_proto.data(), static_cast(serialized_proto.size()))) { + std::cerr << "Failed to serialize blockID into array.\n"; } - return retval;*/ - //return {uint8_t(n >> 24u), - // uint8_t((n >> 16u) & 0xffu), - // uint8_t((n >> 8u) & 0xffu), - // uint8_t(n & 0xffu)}; + return base::Buffer{serialized_proto}; } base::Buffer numberAndHashToLookupKey(primitives::BlockNumber number, @@ -90,20 +78,10 @@ namespace sgns::blockchain { if ( !blockID.ParseFromArray( key.toVector().data(), static_cast(key.toVector().size()) ) ) { - std::cerr << "Failed to parse BlockID from array." << std::endl; + std::cerr << "Failed to parse BlockID from array.\n"; } return blockID.block_number(); - /*primitives::BlockNumber retval = 0; - - //Little endian - for ( std::size_t i = 0; i < key.size(); ++i ) - { - retval += (key[i] << (i * 8)); - } - return retval;*/ - //return (uint64_t(key[0]) << 24u) | (uint64_t(key[1]) << 16u) - // | (uint64_t(key[2]) << 8u) | uint64_t(key[3]); } base::Buffer prependPrefix(const base::Buffer &key, diff --git a/src/crdt/crdt_datastore.hpp b/src/crdt/crdt_datastore.hpp index e5e5b25dd..692867a23 100644 --- a/src/crdt/crdt_datastore.hpp +++ b/src/crdt/crdt_datastore.hpp @@ -241,6 +241,18 @@ namespace sgns::crdt */ outcome::result BroadcastHeadsForTopics( const std::set &topics ); + /** + * @brief Enable or disable outgoing head broadcasts. + * @param[in] enabled True to allow broadcasts, false to suppress them. + */ + void SetBroadcastEnabled( bool enabled ); + + /** + * @brief Query whether outgoing head broadcasts are enabled. + * @return true when broadcasts are enabled. + */ + bool IsBroadcastEnabled() const; + std::unordered_set GetTopicNames() const; protected: @@ -458,6 +470,7 @@ namespace sgns::crdt std::map pending_jobs_; bool has_full_node_topic_; + std::atomic_bool broadcast_enabled_{ true }; void MarkJobPending( const CID &cid ); void MarkJobFailed( const CID &cid ); diff --git a/src/crdt/globaldb/globaldb.cpp b/src/crdt/globaldb/globaldb.cpp index b86369e8c..9700131c4 100644 --- a/src/crdt/globaldb/globaldb.cpp +++ b/src/crdt/globaldb/globaldb.cpp @@ -13,7 +13,7 @@ #include #include -#include +#include #include #include @@ -66,7 +66,7 @@ namespace sgns::crdt std::shared_ptr pubsub, std::shared_ptr crdtOptions, std::shared_ptr graphsyncnetwork, - std::shared_ptr scheduler, + std::shared_ptr scheduler, std::shared_ptr generator, std::shared_ptr datastore ) { @@ -77,12 +77,11 @@ namespace sgns::crdt auto new_instance = std::shared_ptr( new GlobalDB( std::move( context ), std::move( databasePath ), std::move( pubsub ) ) ); - BOOST_OUTCOME_TRYV2( auto &&, - new_instance->Init( std::move( crdtOptions ), - std::move( graphsyncnetwork ), - std::move( scheduler ), - std::move( generator ), - std::move( datastore ) ) ); + BOOST_OUTCOME_TRY( new_instance->Init( std::move( crdtOptions ), + std::move( graphsyncnetwork ), + std::move( scheduler ), + std::move( generator ), + std::move( datastore ) ) ); return new_instance; } @@ -112,7 +111,7 @@ namespace sgns::crdt outcome::result GlobalDB::Init( std::shared_ptr crdtOptions, std::shared_ptr graphsyncnetwork, - std::shared_ptr scheduler, + std::shared_ptr scheduler, std::shared_ptr generator, std::shared_ptr datastore ) { @@ -262,7 +261,7 @@ namespace sgns::crdt for ( auto &data : data_vector ) { - BOOST_OUTCOME_TRYV2( auto &&, batch.Put( std::get<0>( data ), std::get<1>( data ) ) ); + BOOST_OUTCOME_TRY( batch.Put( std::get<0>( data ), std::get<1>( data ) ) ); } return batch.Commit( topics ); @@ -413,6 +412,18 @@ namespace sgns::crdt return m_crdtDatastore->BroadcastHeadsForTopics( topics ); } + void GlobalDB::SetBroadcastEnabled( bool enabled ) + { + if ( !m_crdtDatastore ) + { + m_logger->warn( "SetBroadcastEnabled: CRDT datastore not initialized" ); + return; + } + + m_crdtDatastore->SetBroadcastEnabled( enabled ); + m_logger->info( "SetBroadcastEnabled: {}", enabled ? "enabled" : "disabled" ); + } + outcome::result> GlobalDB::GetMonitoredTopics() const { if ( !m_crdtDatastore ) diff --git a/src/crdt/globaldb/globaldb.hpp b/src/crdt/globaldb/globaldb.hpp index b00f91958..c82d316ac 100644 --- a/src/crdt/globaldb/globaldb.hpp +++ b/src/crdt/globaldb/globaldb.hpp @@ -47,7 +47,7 @@ namespace sgns::crdt std::shared_ptr pubsub, std::shared_ptr crdtOptions, std::shared_ptr graphsyncnetwork, - std::shared_ptr scheduler, + std::shared_ptr scheduler, std::shared_ptr generator, std::shared_ptr datastore = nullptr ); @@ -167,9 +167,15 @@ namespace sgns::crdt */ outcome::result RequestHeadBroadcast( const std::set &topics ); + /** + * @brief Enable or disable outgoing CRDT head broadcasts. + * @param[in] enabled True to allow broadcasts, false to suppress them. + */ + void SetBroadcastEnabled( bool enabled ); + /** * @brief Get the topics that are being listened to - * @return A set of the monitored topic names + * @return A set of the monitored topic names */ outcome::result> GetMonitoredTopics() const; @@ -188,7 +194,7 @@ namespace sgns::crdt outcome::result Init( std::shared_ptr crdtOptions, std::shared_ptr graphsyncnetwork, - std::shared_ptr scheduler, + std::shared_ptr scheduler, std::shared_ptr generator, std::shared_ptr datastore = nullptr ); diff --git a/src/crdt/globaldb/pubsub_broadcaster_ext.cpp b/src/crdt/globaldb/pubsub_broadcaster_ext.cpp index 45263de91..aad54057b 100644 --- a/src/crdt/globaldb/pubsub_broadcaster_ext.cpp +++ b/src/crdt/globaldb/pubsub_broadcaster_ext.cpp @@ -267,7 +267,11 @@ namespace sgns::crdt size_t size = bmsg.ByteSizeLong(); std::vector serialized_proto( size ); - bmsg.SerializeToArray( serialized_proto.data(), serialized_proto.size() ); + if (!bmsg.SerializeToArray( serialized_proto.data(), serialized_proto.size() )) + { + m_logger->error("Failed to serialize broadcast message"); + return std::errc::bad_message; + } for ( auto &topic : broadcastTopicsCopy ) { diff --git a/src/crdt/impl/atomic_transaction.cpp b/src/crdt/impl/atomic_transaction.cpp index 0a00033ed..454cf48d3 100644 --- a/src/crdt/impl/atomic_transaction.cpp +++ b/src/crdt/impl/atomic_transaction.cpp @@ -105,13 +105,13 @@ namespace sgns::crdt std::shared_ptr delta; if ( op.type == Operation::PUT ) { - OUTCOME_TRY( ( auto &&, result ), + BOOST_OUTCOME_TRY( auto result, datastore_->CreateDeltaToAdd( op.key.GetKey(), std::string( op.value.toString() ) ) ); delta = result; } else // REMOVE { - OUTCOME_TRY( ( auto &&, result ), datastore_->CreateDeltaToRemove( op.key.GetKey() ) ); + BOOST_OUTCOME_TRY( auto result, datastore_->CreateDeltaToRemove( op.key.GetKey() ) ); delta = result; } diff --git a/src/crdt/impl/crdt_datastore.cpp b/src/crdt/impl/crdt_datastore.cpp index 6650494dd..6e019ad89 100644 --- a/src/crdt/impl/crdt_datastore.cpp +++ b/src/crdt/impl/crdt_datastore.cpp @@ -585,7 +585,7 @@ namespace sgns::crdt { logger_->debug( "{}: Creating the Root Job for CID {}", __func__, aRootCID.toString().value() ); dagSyncer_->InitCIDBlock( aRootCID ); - OUTCOME_TRY( auto &&root_node, dagSyncer_->getNode( aRootCID ) ); + BOOST_OUTCOME_TRY( auto root_node, dagSyncer_->getNode( aRootCID ) ); logger_->debug( "{}: Root Job created for CID {}", __func__, aRootCID.toString().value() ); @@ -699,7 +699,7 @@ namespace sgns::crdt } dagSyncer_->InitCIDBlock( cid ); - OUTCOME_TRY( auto &&node, dagSyncer_->getNode( cid ) ); + BOOST_OUTCOME_TRY( auto node, dagSyncer_->getNode( cid ) ); RootCIDJob newRootJob; @@ -749,9 +749,9 @@ namespace sgns::crdt outcome::result CrdtDatastore::MergeDataFromDelta( const CID &node_cid, const Delta &aDelta ) { - OUTCOME_TRY( auto &&cid_string, node_cid.toString() ); + BOOST_OUTCOME_TRY( auto cid_string, node_cid.toString() ); logger_->debug( "{}: Merging node {} On CRDT", __func__, cid_string ); - OUTCOME_TRY( set_->Merge( aDelta, cid_string ) ); + BOOST_OUTCOME_TRY( set_->Merge( aDelta, cid_string ) ); return outcome::success(); } @@ -759,7 +759,7 @@ namespace sgns::crdt { logger_->debug( "{}: Starting to process Root CID", __func__ ); - OUTCOME_TRY( auto &&root_cid_string, job_to_process.root_node_->getCID().toString() ); + BOOST_OUTCOME_TRY( auto root_cid_string, job_to_process.root_node_->getCID().toString() ); logger_->debug( "{}: Processing Root CID job {}", __func__, root_cid_string ); auto node_to_process = job_to_process.node_; @@ -770,20 +770,20 @@ namespace sgns::crdt is_root = true; } - OUTCOME_TRY( auto &&cid_string, node_to_process->getCID().toString() ); + BOOST_OUTCOME_TRY( auto cid_string, node_to_process->getCID().toString() ); - OUTCOME_TRY( auto &&delta, GetDeltaFromNode( *node_to_process, job_to_process.created_by_self_ ) ); + BOOST_OUTCOME_TRY( auto delta, GetDeltaFromNode( *node_to_process, job_to_process.created_by_self_ ) ); logger_->debug( "{}: Merging Deltas from {}", __func__, cid_string ); - OUTCOME_TRY( MergeDataFromDelta( node_to_process->getCID(), delta ) ); + BOOST_OUTCOME_TRY( MergeDataFromDelta( node_to_process->getCID(), delta ) ); logger_->debug( "{}: Recording block on DAG Syncher {}", __func__, cid_string ); - OUTCOME_TRY( dagSyncer_->addNode( node_to_process ) ); + BOOST_OUTCOME_TRY( dagSyncer_->addNode( node_to_process ) ); (void)dagSyncer_->DeleteCIDBlock( node_to_process->getCID() ); - OUTCOME_TRY( auto &&links, GetLinksToFetch( job_to_process ) ); + BOOST_OUTCOME_TRY( auto links, GetLinksToFetch( job_to_process ) ); const bool should_fetch_links = !job_to_process.created_by_self_ && !links.empty(); if ( links.empty() && !is_root ) @@ -799,7 +799,7 @@ namespace sgns::crdt else if ( should_fetch_links ) { logger_->debug( "{}: Fetching {} links for Root job: {}", __func__, links.size(), root_cid_string ); - OUTCOME_TRY( FetchNodes( job_to_process, links ) ); + BOOST_OUTCOME_TRY( FetchNodes( job_to_process, links ) ); logger_->debug( "{}: Nodes fetched for Root job: {}", __func__, root_cid_string ); } else if ( is_root ) @@ -944,6 +944,11 @@ namespace sgns::crdt void CrdtDatastore::RebroadcastHeads() { + if ( !broadcast_enabled_.load( std::memory_order_relaxed ) ) + { + return; + } + std::unordered_set pending_topics; { std::lock_guard lock( pendingBroadcastMutex_ ); @@ -1018,6 +1023,12 @@ namespace sgns::crdt outcome::result CrdtDatastore::BroadcastHeadsForTopics( const std::set &topics ) { + if ( !broadcast_enabled_.load( std::memory_order_relaxed ) ) + { + logger_->debug( "BroadcastHeadsForTopics: broadcast suppressed" ); + return outcome::success(); + } + if ( topics.empty() ) { logger_->debug( "BroadcastHeadsForTopics: No topics requested" ); @@ -1082,6 +1093,16 @@ namespace sgns::crdt return set_->GetElement( aKey.GetKey() ); } + void CrdtDatastore::SetBroadcastEnabled( bool enabled ) + { + broadcast_enabled_.store( enabled, std::memory_order_relaxed ); + } + + bool CrdtDatastore::IsBroadcastEnabled() const + { + return broadcast_enabled_.load( std::memory_order_relaxed ); + } + std::string CrdtDatastore::GetKeysPrefix() const { return set_->KeysKey( "" ).GetKey(); @@ -1150,8 +1171,8 @@ namespace sgns::crdt outcome::result CrdtDatastore::Publish( const std::shared_ptr &aDelta, const std::unordered_set &topics ) { - OUTCOME_TRY( auto &&node, CreateDAGNode( aDelta, topics ) ); - OUTCOME_TRY( auto &&newCID, AddDAGNode( node ) ); + BOOST_OUTCOME_TRY( auto node, CreateDAGNode( aDelta, topics ) ); + BOOST_OUTCOME_TRY( auto newCID, AddDAGNode( node ) ); return newCID; } @@ -1159,6 +1180,11 @@ namespace sgns::crdt const std::string &topic, boost::optional peerInfo ) { + if ( !broadcast_enabled_.load( std::memory_order_relaxed ) ) + { + return outcome::success(); + } + if ( !broadcaster_ ) { logger_->error( "Broadcast: No broadcaster, Failed to broadcast" ); @@ -1240,7 +1266,7 @@ namespace sgns::crdt const std::shared_ptr &aDelta, const std::unordered_set &topics ) { - OUTCOME_TRY( auto &&head_list, heads_->GetList( topics ) ); + BOOST_OUTCOME_TRY( auto head_list, heads_->GetList( topics ) ); auto [head_map, height] = head_list; height = height + 1; // This implies our minimum height is 1 @@ -1256,7 +1282,7 @@ namespace sgns::crdt } } - OUTCOME_TRY( auto &&node, CreateIPLDNode( headsWithTopics, aDelta, topics ) ); + BOOST_OUTCOME_TRY( auto node, CreateIPLDNode( headsWithTopics, aDelta, topics ) ); //Log expensive toString only if trace enabled if ( logger_->level() == spdlog::level::debug ) @@ -1480,7 +1506,7 @@ namespace sgns::crdt for ( const auto &link : node->getLinks() ) { - PrintDAGRec( link.get().getCID(), aDepth + 1, aSet ); + BOOST_OUTCOME_TRY( PrintDAGRec( link.get().getCID(), aDepth + 1, aSet ) ); } return outcome::success(); diff --git a/src/crdt/impl/crdt_heads.cpp b/src/crdt/impl/crdt_heads.cpp index c20960281..b806ed67b 100644 --- a/src/crdt/impl/crdt_heads.cpp +++ b/src/crdt/impl/crdt_heads.cpp @@ -108,10 +108,10 @@ namespace sgns::crdt outcome::result CrdtHeads::Remove( const CID &aCid, const std::string &topic ) { logger_->debug( "{}: Removing {} as head for topic {}", __func__, aCid.toString().value(), topic ); - OUTCOME_TRY( auto &&head_key, GetKey( topic, aCid ) ); + BOOST_OUTCOME_TRY( auto head_key, GetKey( topic, aCid ) ); Buffer keyBuffer; keyBuffer.put( head_key.GetKey() ); - OUTCOME_TRY( dataStore_->remove( keyBuffer ) ); + BOOST_OUTCOME_TRY( dataStore_->remove( keyBuffer ) ); logger_->debug( "{}: Removed {} as head for topic {}", __func__, aCid.toString().value(), topic ); diff --git a/src/crdt/impl/crdt_set.cpp b/src/crdt/impl/crdt_set.cpp index b4682f8c2..0d87e2b1d 100644 --- a/src/crdt/impl/crdt_set.cpp +++ b/src/crdt/impl/crdt_set.cpp @@ -90,7 +90,7 @@ namespace sgns::crdt Buffer keyPrefixBuffer; keyPrefixBuffer.put( strElemsPrefix ); - OUTCOME_TRY( auto queryResult, this->dataStore_->query( keyPrefixBuffer ) ); + BOOST_OUTCOME_TRY( auto queryResult, this->dataStore_->query( keyPrefixBuffer ) ); for ( const auto &[key, _] : queryResult ) { @@ -524,10 +524,10 @@ namespace sgns::crdt Buffer valueKeyBuffer; valueKeyBuffer.put( valueK.GetKey() ); - OUTCOME_TRY( aDataStore->put( valueKeyBuffer, aValue ) ); + BOOST_OUTCOME_TRY( aDataStore->put( valueKeyBuffer, aValue ) ); // store priority - OUTCOME_TRY( this->SetPriority( aKey, aPriority ) ); + BOOST_OUTCOME_TRY( this->SetPriority( aKey, aPriority ) ); // trigger add hook if ( putHookFunc_ != nullptr ) @@ -566,7 +566,7 @@ namespace sgns::crdt Buffer keyBuffer; keyBuffer.put( kNamespace.GetKey() ); - OUTCOME_TRY( batchDatastore->put( std::move( keyBuffer ), Buffer() ) ); + BOOST_OUTCOME_TRY( batchDatastore->put( std::move( keyBuffer ), Buffer() ) ); // update the value if applicable: // * higher priority than we currently have. // * not tombstoned before. @@ -615,7 +615,7 @@ namespace sgns::crdt Buffer keyBuffer; keyBuffer.put( kNamespace.GetKey() ); - OUTCOME_TRY( batchDatastore->put( std::move( keyBuffer ), Buffer() ) ); + BOOST_OUTCOME_TRY( batchDatastore->put( std::move( keyBuffer ), Buffer() ) ); // run delete hook only once for all // versions of the same element tombstoned @@ -623,7 +623,7 @@ namespace sgns::crdt deletedKeys.push_back( key ); } - OUTCOME_TRY( batchDatastore->commit() ); + BOOST_OUTCOME_TRY( batchDatastore->commit() ); if ( deleteHookFunc_ ) { @@ -638,7 +638,7 @@ namespace sgns::crdt outcome::result CrdtSet::Merge( const Delta &aDelta, const std::string &aID ) { - OUTCOME_TRY( this->PutTombs( std::vector( aDelta.tombstones().cbegin(), aDelta.tombstones().cend() ), aID ) ); + BOOST_OUTCOME_TRY( this->PutTombs( std::vector( aDelta.tombstones().cbegin(), aDelta.tombstones().cend() ), aID ) ); std::vector elements( aDelta.elements().cbegin(), aDelta.elements().cend() ); return this->PutElems( elements, aID, aDelta.priority() ); diff --git a/src/crdt/impl/graphsync_dagsyncer.cpp b/src/crdt/impl/graphsync_dagsyncer.cpp index ede543c4d..3f5fe9a71 100644 --- a/src/crdt/impl/graphsync_dagsyncer.cpp +++ b/src/crdt/impl/graphsync_dagsyncer.cpp @@ -84,7 +84,7 @@ namespace sgns::crdt return outcome::failure( Error::HOST_IS_NULL ); } - OUTCOME_TRY( host_->listen( listen_to ) ); + BOOST_OUTCOME_TRY( host_->listen( listen_to ) ); auto startResult = this->StartSync(); @@ -202,7 +202,7 @@ namespace sgns::crdt } ipfs_lite::ipfs::graphsync::Subscription curr_subscription; - OUTCOME_TRY( auto peerEntry, GetRoute( cid ) ); + BOOST_OUTCOME_TRY( auto peerEntry, GetRoute( cid ) ); auto &peerID = peerEntry.first; auto &address = peerEntry.second; @@ -217,7 +217,7 @@ namespace sgns::crdt if ( already_requested == false ) { logger_->debug( "Requesting CID {}", cid.toString().value() ); - OUTCOME_TRY( ( auto &&, subscription ), RequestNode( peerID, address, cid ) ); + BOOST_OUTCOME_TRY( auto subscription, RequestNode( peerID, address, cid ) ); curr_subscription = std::move( subscription ); } @@ -249,7 +249,7 @@ namespace sgns::crdt return result; } logger_->error( "Request state not found for CID {}", cid.toString().value() ); - OUTCOME_TRY( BlackListPeer( peerID ) ); + BOOST_OUTCOME_TRY( BlackListPeer( peerID ) ); return outcome::failure( Error::ROUTE_NOT_FOUND ); } @@ -533,7 +533,7 @@ namespace sgns::crdt }; std::deque work_queue; - + // Start with the root node const CID &root_cid = node.getCID(); @@ -628,7 +628,7 @@ namespace sgns::crdt // This ensures children are resolved before their parents, maintaining the original // recursive behavior where we don't mark a CID as complete until all its children are processed work_queue.push_front( WorkItem{ get_child_result.value() } ); - + logger_->trace( "TraverseCIDsLinks: Added child {} to work queue (queue size: {})", child.toString().value(), work_queue.size() ); diff --git a/src/crypto/secp256k1/secp256k1_provider_impl.cpp b/src/crypto/secp256k1/secp256k1_provider_impl.cpp index 249163b9d..b2a9652d3 100644 --- a/src/crypto/secp256k1/secp256k1_provider_impl.cpp +++ b/src/crypto/secp256k1/secp256k1_provider_impl.cpp @@ -12,7 +12,7 @@ namespace sgns::crypto { Secp256k1ProviderImpl::recoverPublickeyUncompressed( const secp256k1::RSVSignature &signature, const secp256k1::MessageHash &message_hash) const { - OUTCOME_TRY((auto &&, pubkey), recoverPublickey(signature, message_hash)); + BOOST_OUTCOME_TRY( auto pubkey, recoverPublickey(signature, message_hash)); secp256k1::UncompressedPublicKey pubkey_out; size_t outputlen = pubkey_out.size(); @@ -32,7 +32,7 @@ namespace sgns::crypto { Secp256k1ProviderImpl::recoverPublickeyCompressed( const secp256k1::RSVSignature &signature, const secp256k1::MessageHash &message_hash) const { - OUTCOME_TRY((auto &&, pubkey), recoverPublickey(signature, message_hash)); + BOOST_OUTCOME_TRY( auto pubkey, recoverPublickey(signature, message_hash)); secp256k1::CompressedPublicKey pubkey_out; size_t outputlen = secp256k1::CompressedPublicKey::size(); @@ -73,7 +73,7 @@ namespace sgns::crypto { outcome::result Secp256k1ProviderImpl::recoverPublickey( const secp256k1::RSVSignature &signature, const secp256k1::MessageHash &message_hash) const { - OUTCOME_TRY((auto &&, rec_id), validateRecoveryId(static_cast(signature[64]))); + BOOST_OUTCOME_TRY( auto rec_id, validateRecoveryId(static_cast(signature[64]))); secp256k1_ecdsa_recoverable_signature sig_rec; secp256k1_pubkey pubkey; diff --git a/src/local_secure_storage/CMakeLists.txt b/src/local_secure_storage/CMakeLists.txt index 1fa1c13f6..d39404cdf 100644 --- a/src/local_secure_storage/CMakeLists.txt +++ b/src/local_secure_storage/CMakeLists.txt @@ -17,9 +17,9 @@ if(ANDROID) target_sources(secure_storage PRIVATE impl/Android.cpp) target_link_libraries(secure_storage PUBLIC - ${android-lib} + android::android PRIVATE - ${log-lib} + android::log ) elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux") target_sources(secure_storage PRIVATE impl/Linux.cpp) diff --git a/src/local_secure_storage/impl/Android.cpp b/src/local_secure_storage/impl/Android.cpp index d298a206e..883f6f07f 100644 --- a/src/local_secure_storage/impl/Android.cpp +++ b/src/local_secure_storage/impl/Android.cpp @@ -1,5 +1,6 @@ #include "Android.hpp" +#include #include #include @@ -16,6 +17,96 @@ namespace { JavaVM *g_jvm = nullptr; + jclass g_keystore_helper_class = nullptr; + + // Helper function to find class using app ClassLoader instead of system ClassLoader + jclass FindClassUsingAppClassLoader( JNIEnv *env, jobject context, const char *className ) + { + // Get the application's ClassLoader through the Context + jclass contextClass = env->GetObjectClass( context ); + if ( contextClass == nullptr ) + { + LOGE( "Failed to get Context class" ); + return nullptr; + } + + jmethodID getClassLoaderMethod = env->GetMethodID( contextClass, "getClassLoader", "()Ljava/lang/ClassLoader;" ); + if ( getClassLoaderMethod == nullptr ) + { + LOGE( "Failed to get getClassLoader method" ); + env->DeleteLocalRef( contextClass ); + return nullptr; + } + + jobject classLoader = env->CallObjectMethod( context, getClassLoaderMethod ); + env->DeleteLocalRef( contextClass ); + + if ( classLoader == nullptr ) + { + LOGE( "Failed to get ClassLoader" ); + return nullptr; + } + + // Use ClassLoader.loadClass() to find our class + jclass classLoaderClass = env->GetObjectClass( classLoader ); + jmethodID loadClassMethod = env->GetMethodID( classLoaderClass, "loadClass", "(Ljava/lang/String;)Ljava/lang/Class;" ); + env->DeleteLocalRef( classLoaderClass ); + + if ( loadClassMethod == nullptr ) + { + LOGE( "Failed to get loadClass method" ); + env->DeleteLocalRef( classLoader ); + return nullptr; + } + + // Convert className from JNI format (ai/gnus/sdk/KeyStoreHelper) to Java format (ai.gnus.sdk.KeyStoreHelper) + std::string javaClassName( className ); + std::replace( javaClassName.begin(), javaClassName.end(), '/', '.' ); + + jstring classNameStr = env->NewStringUTF( javaClassName.c_str() ); + jclass foundClass = static_cast( env->CallObjectMethod( classLoader, loadClassMethod, classNameStr ) ); + + env->DeleteLocalRef( classNameStr ); + env->DeleteLocalRef( classLoader ); + + return foundClass; + } +} + +// JNI function callable from Java to initialize the KeyStoreHelper class reference +extern "C" JNIEXPORT void JNICALL +Java_ai_gnus_sdk_KeyStoreHelper_nativeInit( JNIEnv *env, jclass clazz, jobject context ) +{ + LOGI( "KeyStoreHelper native initialization called" ); + + if ( g_keystore_helper_class != nullptr ) + { + LOGI( "KeyStoreHelper class already initialized" ); + return; + } + + // Find the KeyStoreHelper class using app ClassLoader + jclass local_class = FindClassUsingAppClassLoader( env, context, "ai/gnus/sdk/KeyStoreHelper" ); + + if ( local_class == nullptr ) + { + LOGE( "Failed to find KeyStoreHelper class using app ClassLoader" ); + // Fallback to FindClass (may work on main thread) + local_class = env->FindClass( "ai/gnus/sdk/KeyStoreHelper" ); + if ( local_class == nullptr ) + { + LOGE( "Failed to find KeyStoreHelper class with FindClass fallback" ); + env->ExceptionDescribe(); + env->ExceptionClear(); + return; + } + } + + // Cache as global reference + g_keystore_helper_class = static_cast( env->NewGlobalRef( local_class ) ); + env->DeleteLocalRef( local_class ); + + LOGI( "KeyStoreHelper class cached successfully" ); } extern "C" JNIEXPORT jint JNICALL JNI_OnLoad( JavaVM *vm, void *_reserved ) @@ -55,16 +146,14 @@ namespace sgns throw std::runtime_error( "Failed to get JNI environment" ); } - jclass local_class = env->FindClass( "ai/gnus/sdk/KeyStoreHelper" ); - if ( local_class == nullptr ) + // Use cached global reference instead of FindClass + if ( g_keystore_helper_class == nullptr ) { - env->ExceptionDescribe(); - env->ExceptionClear(); - throw std::runtime_error( "Failed to find KeyStoreHelper class" ); + LOGE( "KeyStoreHelper class not cached. Did you call KeyStoreHelper.initialize() from Java?" ); + throw std::runtime_error( "KeyStoreHelper class not initialized. Call KeyStoreHelper.initialize(context) first." ); } - - key_store_helper_class_ = static_cast( env->NewGlobalRef( local_class ) ); - env->DeleteLocalRef( local_class ); + + key_store_helper_class_ = g_keystore_helper_class; // Get STATIC method IDs (note the "Static" in the function names) load_method_ = env->GetStaticMethodID( key_store_helper_class_, "load", "()Ljava/lang/String;" ); @@ -83,15 +172,8 @@ namespace sgns AndroidSecureStorage::~AndroidSecureStorage() { - auto env = GetJNIEnv(); - - if ( env != nullptr ) - { - if ( key_store_helper_class_ != nullptr ) - { - env->DeleteGlobalRef( key_store_helper_class_ ); - } - } + // We don't delete key_store_helper_class_ here anymore since it's a shared global reference + // It will be cleaned up when the JVM unloads } outcome::result AndroidSecureStorage::LoadJSON() const diff --git a/src/local_secure_storage/impl/JSONBackend.cpp b/src/local_secure_storage/impl/JSONBackend.cpp index 4ce1d9aaf..0004bc0f4 100644 --- a/src/local_secure_storage/impl/JSONBackend.cpp +++ b/src/local_secure_storage/impl/JSONBackend.cpp @@ -8,7 +8,7 @@ namespace sgns { outcome::result JSONBackend::Load( const std::string &key ) { - OUTCOME_TRY( rj::Document d, LoadJSON() ); + BOOST_OUTCOME_TRY( rj::Document d, LoadJSON() ); if ( !d.HasMember( key.c_str() ) ) { @@ -28,7 +28,7 @@ namespace sgns outcome::result JSONBackend::Save( const std::string &key, const SecureBufferType &buffer ) { - OUTCOME_TRY( rj::Document d, LoadJSON() ); + BOOST_OUTCOME_TRY( rj::Document d, LoadJSON() ); rj::Value val( rj::StringRef( buffer.c_str(), buffer.length() ), d.GetAllocator() ); @@ -46,11 +46,11 @@ namespace sgns outcome::result JSONBackend::DeleteKey( const std::string &key ) { - OUTCOME_TRY( rj::Document d, LoadJSON() ); + BOOST_OUTCOME_TRY( rj::Document d, LoadJSON() ); bool ret = d.RemoveMember( key.c_str() ); - OUTCOME_TRY( SaveJSON( std::move( d ) ) ); + BOOST_OUTCOME_TRY( SaveJSON( std::move( d ) ) ); return ret; } diff --git a/src/local_secure_storage/impl/KeyStoreHelper.java b/src/local_secure_storage/impl/KeyStoreHelper.java index 6e780e0cd..a28ed6b5f 100644 --- a/src/local_secure_storage/impl/KeyStoreHelper.java +++ b/src/local_secure_storage/impl/KeyStoreHelper.java @@ -45,6 +45,8 @@ public static void initialize(Context context) { synchronized (sLock) { if (sInstance == null) { sInstance = new KeyStoreHelper(context); + // Initialize native side with app ClassLoader + nativeInit(context); Log.i(TAG, "KeyStoreHelper initialized"); } else { Log.w(TAG, "KeyStoreHelper already initialized"); @@ -52,6 +54,12 @@ public static void initialize(Context context) { } } + /** + * Native initialization - called from Java to cache the class reference + * using the app's ClassLoader instead of system ClassLoader + */ + private static native void nativeInit(Context context); + /** * Get the singleton instance. * Throws IllegalStateException if not initialized. diff --git a/src/local_secure_storage/impl/json/JSONSecureStorage.cpp b/src/local_secure_storage/impl/json/JSONSecureStorage.cpp index b937c44a8..7fe747496 100644 --- a/src/local_secure_storage/impl/json/JSONSecureStorage.cpp +++ b/src/local_secure_storage/impl/json/JSONSecureStorage.cpp @@ -1,6 +1,6 @@ /** * @file JSONSecureStorage.cpp - * @brief + * @brief * @date 2024-06-06 * @author Henrique A. Klein (hklein@gnus.ai) */ @@ -44,7 +44,7 @@ namespace sgns outcome::result JSONSecureStorage::Load( const std::string &key ) { - OUTCOME_TRY( auto document, LoadJSON() ); + BOOST_OUTCOME_TRY( auto document, LoadJSON() ); auto maybe_field = document.FindMember( "GeniusAccount" ); if ( maybe_field == document.MemberEnd() || !maybe_field->value.IsObject() ) diff --git a/src/outcome/CMakeLists.txt b/src/outcome/CMakeLists.txt index c44dab920..90462f515 100644 --- a/src/outcome/CMakeLists.txt +++ b/src/outcome/CMakeLists.txt @@ -1,5 +1,5 @@ add_library(outcome INTERFACE) -set_target_properties(outcome PROPERTIES PUBLIC_HEADER "outcome.hpp") +#set_target_properties(outcome PROPERTIES PUBLIC_HEADER "outcome.hpp") target_link_libraries(outcome INTERFACE Boost::headers p2p::p2p_event diff --git a/src/processing/impl/processing_core_impl.cpp b/src/processing/impl/processing_core_impl.cpp index f0e7fa96c..edce960c1 100644 --- a/src/processing/impl/processing_core_impl.cpp +++ b/src/processing/impl/processing_core_impl.cpp @@ -57,13 +57,16 @@ namespace sgns::processing libp2p::injector::makeKademliaInjector( libp2p::injector::useKademliaConfig( kademlia_config ) ) ); auto ioc = injector.create>(); - task.ParseFromArray( queryTasks.value().data(), queryTasks.value().size() ); + if ( !task.ParseFromArray( queryTasks.value().data(), queryTasks.value().size() ) ) + { + return std::errc::bad_message; + } //Parse main json data - OUTCOME_TRY( auto procmgr, sgns::sgprocessing::ProcessingManager::Create( task.json_data() ) ); + BOOST_OUTCOME_TRY( auto procmgr, sgns::sgprocessing::ProcessingManager::Create( task.json_data() ) ); m_currentProcessingManager = procmgr; // Store for progress tracking //Parse subtask json - auto subtaskjson = nlohmann::json::parse( subTask.json_data() ); - sgns::ModelNode model; + auto subtaskjson = nlohmann::json::parse( subTask.json_data() ); + sgns::ModelNode model; sgns::from_json( subtaskjson, model ); std::vector> chunkhashes; auto tempResult = procmgr->Process( ioc, chunkhashes, model ); @@ -93,10 +96,11 @@ namespace sgns::processing float ProcessingCoreImpl::GetProgress() const { - if (m_currentProcessingManager) { + if ( m_currentProcessingManager ) + { return m_currentProcessingManager->GetProgress(); } return 0.0f; } - + } diff --git a/src/processing/impl/processing_task_queue_impl.cpp b/src/processing/impl/processing_task_queue_impl.cpp index 6a74196f0..90dc2c9a8 100644 --- a/src/processing/impl/processing_task_queue_impl.cpp +++ b/src/processing/impl/processing_task_queue_impl.cpp @@ -38,7 +38,7 @@ namespace sgns::processing sgns::crdt::HierarchicalKey key( complete_subtask_path.str() ); sgns::base::Buffer value; value.put( subTask.SerializeAsString() ); - BOOST_OUTCOME_TRYV2( auto &&, job_crdt_transaction_->Put( std::move( key ), std::move( value ) ) ); + BOOST_OUTCOME_TRY( job_crdt_transaction_->Put( std::move( key ), std::move( value ) ) ); m_logger->debug( "[{}] placed to GlobalDB ", complete_subtask_path.str() ); } @@ -49,7 +49,7 @@ namespace sgns::processing sgns::base::Buffer value; value.put( task.SerializeAsString() ); - BOOST_OUTCOME_TRYV2( auto &&, job_crdt_transaction_->Put( std::move( key ), std::move( value ) ) ); + BOOST_OUTCOME_TRY( job_crdt_transaction_->Put( std::move( key ), std::move( value ) ) ); m_logger->debug( "[{}] placed to GlobalDB ", complete_task_path.str() ); return outcome::success(); @@ -103,7 +103,7 @@ namespace sgns::processing outcome::result> ProcessingTaskQueueImpl::GrabTask() { m_logger->info( "GRAB_TASK called - blacklist has {} items", m_badjobs.size() ); - OUTCOME_TRY( ( auto &&, queryTasks ), m_db->QueryKeyValues( std::string( TASK_LIST_KEY ) ) ); + BOOST_OUTCOME_TRY( auto queryTasks, m_db->QueryKeyValues( std::string( TASK_LIST_KEY ) ) ); //m_logger->info( "Task list grabbed from CRDT datastore" ); @@ -197,7 +197,7 @@ namespace sgns::processing auto job_completion_transaction = m_db->BeginTransaction(); data.put( taskResult.SerializeAsString() ); - BOOST_OUTCOME_TRYV2( auto &&, job_completion_transaction->Put( std::move( result_key ), std::move( data ) ) ); + BOOST_OUTCOME_TRY( job_completion_transaction->Put( std::move( result_key ), std::move( data ) ) ); m_logger->debug( "TASK_COMPLETED: {}, results stored", taskKey ); return job_completion_transaction; @@ -220,7 +220,7 @@ namespace sgns::processing complete_task_path % taskId; sgns::crdt::HierarchicalKey task_key( complete_task_path.str() ); - OUTCOME_TRY( ( auto &&, task_buffer ), m_db->Get( task_key ) ); + BOOST_OUTCOME_TRY( auto task_buffer, m_db->Get( task_key ) ); SGProcessing::Task task; @@ -243,7 +243,7 @@ namespace sgns::processing outcome::result ProcessingTaskQueueImpl::IsTaskValid( const std::string taskJson ) { - OUTCOME_TRY( auto procmgr, sgns::sgprocessing::ProcessingManager::Create( taskJson ) ); + BOOST_OUTCOME_TRY( auto procmgr, sgns::sgprocessing::ProcessingManager::Create( taskJson ) ); return outcome::success(); } @@ -351,8 +351,8 @@ namespace sgns::processing sgns::crdt::HierarchicalKey key( path ); - BOOST_OUTCOME_TRYV2( auto &&, job_crdt_transaction_->Put( std::move( key ), std::move( value ) ) ); - BOOST_OUTCOME_TRYV2( auto &&, job_crdt_transaction_->Commit( { m_processing_topic } ) ); + BOOST_OUTCOME_TRY( job_crdt_transaction_->Put( std::move( key ), std::move( value ) ) ); + BOOST_OUTCOME_TRY( job_crdt_transaction_->Commit( { m_processing_topic } ) ); ResetAtomicTransaction(); @@ -369,7 +369,7 @@ namespace sgns::processing m_logger->info( "MARKING_TASK_BAD: {} (total bad jobs: {}) - instance at {}", taskKey, m_badjobs.size(), static_cast(this) ); m_badjobs.insert( taskKey ); m_logger->info( "MARKED_TASK_BAD: {} (total bad jobs now: {}) - instance at {}", taskKey, m_badjobs.size(), static_cast(this) ); - + // Dump current blacklist for debugging if ( m_badjobs.size() <= 10 ) // Only dump if list is small { diff --git a/src/processing/processing_subtask_enqueuer_impl.cpp b/src/processing/processing_subtask_enqueuer_impl.cpp index 6a91c9f09..c92dee6ad 100644 --- a/src/processing/processing_subtask_enqueuer_impl.cpp +++ b/src/processing/processing_subtask_enqueuer_impl.cpp @@ -13,7 +13,7 @@ namespace sgns::processing std::string &subTaskQueueId, std::list &subTasks ) { - OUTCOME_TRY( ( auto &&, task_result ), m_taskQueue->GrabTask() ); + BOOST_OUTCOME_TRY( auto task_result, m_taskQueue->GrabTask() ); auto [taskKey, task] = task_result; @@ -27,7 +27,7 @@ namespace sgns::processing { m_logger->debug( "SUBTASK: id={}, ipfsblock={}", subtask.subtaskid(), subtask.ipfsblock() ); } - + return task; } diff --git a/src/proof/GeniusProver.cpp b/src/proof/GeniusProver.cpp index 6e8f94ff3..7a97cc61a 100644 --- a/src/proof/GeniusProver.cpp +++ b/src/proof/GeniusProver.cpp @@ -91,7 +91,7 @@ namespace sgns { return outcome::failure( ProverError::TABLE_PATH_ERROR ); } - OUTCOME_TRY( ( auto &&, plonk_table ), + BOOST_OUTCOME_TRY( auto plonk_table, NilFileHelper::DecodeMarshalledData( itable ) ); itable.close(); std::ifstream icircuit; @@ -100,7 +100,7 @@ namespace sgns { return outcome::failure( ProverError::CIRCUIT_PATH_ERROR ); } - OUTCOME_TRY( ( auto &&, plonk_constrains ), + BOOST_OUTCOME_TRY( auto plonk_constrains, NilFileHelper::DecodeMarshalledData( icircuit ) ); icircuit.close(); GeniusAssigner::AssignerOutput assigner_outputs( plonk_constrains, plonk_table ); diff --git a/src/proof/IBasicProof.cpp b/src/proof/IBasicProof.cpp index b8f743b32..825b9dfa6 100644 --- a/src/proof/IBasicProof.cpp +++ b/src/proof/IBasicProof.cpp @@ -1,6 +1,6 @@ /** * @file IBasicProof.cpp - * @brief + * @brief * @date 2024-10-08 * @author Henrique A. Klein (hklein@gnus.ai) */ @@ -66,11 +66,11 @@ namespace sgns GeniusAssigner assigner; GeniusProver prover; - OUTCOME_TRY( ( auto &&, assign_value ), + BOOST_OUTCOME_TRY( auto assign_value, assigner.GenerateCircuitAndTable( public_inputs_json_array, private_inputs_json_array, bytecode_payload_ ) ); - OUTCOME_TRY( ( auto &&, proof_value ), prover.CreateProof( assign_value.at( 0 ) ) ); + BOOST_OUTCOME_TRY( auto proof_value, prover.CreateProof( assign_value.at( 0 ) ) ); auto proof_vector = prover.WriteProofToVector( proof_value.proof ); retval.set_snark( std::string( proof_vector.begin(), proof_vector.end() ) ); @@ -80,8 +80,8 @@ namespace sgns outcome::result> IBasicProof::GenerateFullProof() { - OUTCOME_TRY( ( auto &&, proof_value ), GenerateProof() ); - OUTCOME_TRY( ( auto &&, full_proof_data ), SerializeFullProof( proof_value ) ); + BOOST_OUTCOME_TRY( auto proof_value, GenerateProof() ); + BOOST_OUTCOME_TRY( auto full_proof_data, SerializeFullProof( proof_value ) ); return full_proof_data; } @@ -99,7 +99,7 @@ namespace sgns outcome::result IBasicProof::VerifyFullProof( const std::vector &proof_data ) { - OUTCOME_TRY( ( auto &&, base_proof ), DeSerializeBaseProof( proof_data ) ); + BOOST_OUTCOME_TRY( auto base_proof, DeSerializeBaseProof( proof_data ) ); auto ParameterDeserializer = PublicParamDeSerializers.find( base_proof.proof_data().type() ); @@ -118,7 +118,7 @@ namespace sgns return true; } - OUTCOME_TRY( ( auto &&, parameter_pair ), ParameterDeserializer->second( proof_data ) ); + BOOST_OUTCOME_TRY( auto parameter_pair, ParameterDeserializer->second( proof_data ) ); return VerifyFullProof( parameter_pair, base_proof.proof_data(), bytecode->second ); } @@ -135,7 +135,7 @@ namespace sgns GeniusAssigner assigner; - OUTCOME_TRY( ( auto &&, assign_value ), + BOOST_OUTCOME_TRY( auto assign_value, assigner.GenerateCircuitAndTable( public_inputs_json_array, private_inputs_json_array, std::move( proof_bytecode ) ) ); diff --git a/src/proof/NilFileHelper.hpp b/src/proof/NilFileHelper.hpp index 524716525..863e92d9a 100644 --- a/src/proof/NilFileHelper.hpp +++ b/src/proof/NilFileHelper.hpp @@ -1,6 +1,6 @@ /** * @file NilFileHelper.hpp - * @brief + * @brief * @date 2024-09-19 * @author Henrique A. Klein (hklein@gnus.ai) */ @@ -52,12 +52,12 @@ namespace sgns std::vector v; if ( binary ) { - OUTCOME_TRY( ( auto &&, result ), ReadFromBin( in ) ); + BOOST_OUTCOME_TRY( auto result, ReadFromBin( in ) ); v = std::move( result ); } else { - OUTCOME_TRY( ( auto &&, result ), ReadFromHex( in ) ); + BOOST_OUTCOME_TRY( auto result, ReadFromHex( in ) ); v = std::move( result ); } diff --git a/src/proof/TransferProof.cpp b/src/proof/TransferProof.cpp index dbe7b52d8..f6fa0e799 100644 --- a/src/proof/TransferProof.cpp +++ b/src/proof/TransferProof.cpp @@ -19,7 +19,7 @@ namespace sgns TransferProof::TransferProof( uint64_t balance, uint64_t amount, - std::optional bytecode) : + std::optional bytecode ) : #ifdef RELEASE_BYTECODE_CIRCUITS IBasicProof( bytecode.value_or( std::string( TransactionCircuit ) ) ), // #else @@ -105,7 +105,11 @@ namespace sgns size_t size = transfer_proof_proto.ByteSizeLong(); std::vector serialized_proto( size ); - transfer_proof_proto.SerializeToArray( serialized_proto.data(), serialized_proto.size() ); + if ( !transfer_proof_proto.SerializeToArray( serialized_proto.data(), serialized_proto.size() ) ) + { + return std::errc::bad_message; + } + return serialized_proto; } diff --git a/src/storage/changes_trie/impl/changes_trie.cpp b/src/storage/changes_trie/impl/changes_trie.cpp index 4c59b6d13..b5dbd9cde 100644 --- a/src/storage/changes_trie/impl/changes_trie.cpp +++ b/src/storage/changes_trie/impl/changes_trie.cpp @@ -18,8 +18,8 @@ namespace sgns::storage::changes_trie { auto &changers = change.second; auto current_number = parent_number + 1; KeyIndexVariant keyIndex{ExtrinsicsChangesKey{{current_number, key}}}; - OUTCOME_TRY((auto &&, key_enc), scale::encode(keyIndex)); - OUTCOME_TRY((auto &&, value), scale::encode(changers)); + BOOST_OUTCOME_TRY( auto key_enc, scale::encode(keyIndex)); + BOOST_OUTCOME_TRY( auto value, scale::encode(changers)); base::Buffer value_buf {std::move(value)}; BOOST_OUTCOME_TRYV2(auto &&, changes_storage->put(base::Buffer{std::move(key_enc)}, std::move(value_buf))); diff --git a/src/storage/changes_trie/impl/storage_changes_tracker_impl.cpp b/src/storage/changes_trie/impl/storage_changes_tracker_impl.cpp index 63b259de0..9ece5a9f0 100644 --- a/src/storage/changes_trie/impl/storage_changes_tracker_impl.cpp +++ b/src/storage/changes_trie/impl/storage_changes_tracker_impl.cpp @@ -53,8 +53,8 @@ namespace sgns::storage::changes_trie { const base::Buffer &value, bool is_new_entry) { auto change_it = extrinsics_changes_.find(key); - OUTCOME_TRY((auto &&, idx_bytes), get_extrinsic_index_()); - OUTCOME_TRY((auto &&, idx), scale::decode(idx_bytes)); + BOOST_OUTCOME_TRY( auto idx_bytes, get_extrinsic_index_()); + BOOST_OUTCOME_TRY( auto idx, scale::decode(idx_bytes)); // if key was already changed in the same block, just add extrinsic to // the changers list @@ -73,8 +73,8 @@ namespace sgns::storage::changes_trie { outcome::result StorageChangesTrackerImpl::onRemove( const base::Buffer &key) { auto change_it = extrinsics_changes_.find(key); - OUTCOME_TRY((auto &&, idx_bytes), get_extrinsic_index_()); - OUTCOME_TRY((auto &&, idx), scale::decode(idx_bytes)); + BOOST_OUTCOME_TRY( auto idx_bytes, get_extrinsic_index_()); + BOOST_OUTCOME_TRY( auto idx, scale::decode(idx_bytes)); // if key was already changed in the same block, just add extrinsic to // the changers list @@ -100,7 +100,7 @@ namespace sgns::storage::changes_trie { if (parent != parent_hash_) { return Error::INVALID_PARENT_HASH; } - OUTCOME_TRY((auto &&, + BOOST_OUTCOME_TRY((auto &&, trie), ChangesTrie::buildFromChanges( parent_number_, trie_factory_, codec_, extrinsics_changes_, conf)); diff --git a/src/storage/trie/impl/persistent_trie_batch_impl.cpp b/src/storage/trie/impl/persistent_trie_batch_impl.cpp index 43dfaaca6..94bd79aaf 100644 --- a/src/storage/trie/impl/persistent_trie_batch_impl.cpp +++ b/src/storage/trie/impl/persistent_trie_batch_impl.cpp @@ -43,7 +43,7 @@ namespace sgns::storage::trie { } outcome::result PersistentTrieBatchImpl::commit() { - OUTCOME_TRY((auto &&, root), serializer_->storeTrie(*trie_)); + BOOST_OUTCOME_TRY( auto root, serializer_->storeTrie(*trie_)); root_changed_handler_(root); return root; } diff --git a/src/storage/trie/impl/trie_storage_impl.cpp b/src/storage/trie/impl/trie_storage_impl.cpp index f80ac1108..0c372638b 100644 --- a/src/storage/trie/impl/trie_storage_impl.cpp +++ b/src/storage/trie/impl/trie_storage_impl.cpp @@ -19,7 +19,7 @@ namespace sgns::storage::trie { auto empty_trie = trie_factory->createEmpty( [](const auto &branch, auto idx) { return nullptr; }); // ensure retrieval of empty trie succeeds - OUTCOME_TRY((auto &&, empty_root), serializer->storeTrie(*empty_trie)); + BOOST_OUTCOME_TRY( auto empty_root, serializer->storeTrie(*empty_trie)); return std::unique_ptr( new TrieStorageImpl(std::move(empty_root), std::move(codec), @@ -82,7 +82,7 @@ namespace sgns::storage::trie { TrieStorageImpl::getEphemeralBatch() const { logger_->info("Initialize ephemeral trie batch with root: {}", root_hash_.toHex()); - OUTCOME_TRY((auto &&, trie), serializer_->retrieveTrie(Buffer{root_hash_})); + BOOST_OUTCOME_TRY( auto trie, serializer_->retrieveTrie(Buffer{root_hash_})); return std::make_unique(codec_, std::move(trie)); } @@ -111,7 +111,7 @@ namespace sgns::storage::trie { TrieStorageImpl::getEphemeralBatchAt(const base::Hash256 &root) const { logger_->debug("Initialize ephemeral trie batch with root: {}", root_hash_.toHex()); - OUTCOME_TRY((auto &&, trie), serializer_->retrieveTrie(Buffer{root})); + BOOST_OUTCOME_TRY( auto trie, serializer_->retrieveTrie(Buffer{root})); return std::make_unique(codec_, std::move(trie)); } diff --git a/src/storage/trie/serialization/ordered_trie_hash.hpp b/src/storage/trie/serialization/ordered_trie_hash.hpp index c6e3186e5..fd2123669 100644 --- a/src/storage/trie/serialization/ordered_trie_hash.hpp +++ b/src/storage/trie/serialization/ordered_trie_hash.hpp @@ -32,11 +32,11 @@ namespace sgns::storage::trie { It it = begin; scale::CompactInteger key = 0; while (it != end) { - OUTCOME_TRY((auto &&, enc), scale::encode(key++)); + BOOST_OUTCOME_TRY( auto enc, scale::encode(key++)); BOOST_OUTCOME_TRYV2(auto &&, trie.put(base::Buffer{enc}, *it)); it++; } - OUTCOME_TRY((auto &&, enc), codec.encodeNode(*trie.getRoot())); + BOOST_OUTCOME_TRY( auto enc, codec.encodeNode(*trie.getRoot())); return base::Buffer{codec.hash256(enc)}; } diff --git a/src/storage/trie/serialization/supergenius_codec.cpp b/src/storage/trie/serialization/supergenius_codec.cpp index cb4a6173b..378f94e2a 100644 --- a/src/storage/trie/serialization/supergenius_codec.cpp +++ b/src/storage/trie/serialization/supergenius_codec.cpp @@ -159,7 +159,7 @@ namespace sgns::storage::trie { outcome::result SuperGeniusCodec::encodeBranch( const BranchNode &node) const { // node header - OUTCOME_TRY((auto &&, encoding), encodeHeader(node)); + BOOST_OUTCOME_TRY( auto encoding, encodeHeader(node)); // key encoding += nibblesToKey(node.key_nibbles); @@ -169,7 +169,7 @@ namespace sgns::storage::trie { if (node.getTrieType() == SuperGeniusNode::Type::BranchWithValue) { // scale encoded value - OUTCOME_TRY((auto &&, encNodeValue), scale::encode(node.value.get())); + BOOST_OUTCOME_TRY( auto encNodeValue, scale::encode(node.value.get())); encoding += Buffer(std::move(encNodeValue)); } @@ -179,11 +179,11 @@ namespace sgns::storage::trie { if (child->isDummy()) { auto merkle_value = std::dynamic_pointer_cast(child)->db_key; - OUTCOME_TRY((auto &&, scale_enc), scale::encode(std::move(merkle_value))); + BOOST_OUTCOME_TRY( auto scale_enc, scale::encode(std::move(merkle_value))); encoding.put(scale_enc); } else { - OUTCOME_TRY((auto &&, enc), encodeNode(*child)); - OUTCOME_TRY((auto &&, scale_enc), scale::encode(merkleValue(enc))); + BOOST_OUTCOME_TRY( auto enc, encodeNode(*child)); + BOOST_OUTCOME_TRY( auto scale_enc, scale::encode(merkleValue(enc))); encoding.put(scale_enc); } } @@ -194,14 +194,14 @@ namespace sgns::storage::trie { outcome::result SuperGeniusCodec::encodeLeaf( const LeafNode &node) const { - OUTCOME_TRY((auto &&, encoding), encodeHeader(node)); + BOOST_OUTCOME_TRY( auto encoding, encodeHeader(node)); // key encoding += nibblesToKey(node.key_nibbles); if (!node.value) return Error::NO_NODE_VALUE; // scale encoded value - OUTCOME_TRY((auto &&, encNodeValue), scale::encode(node.value.get())); + BOOST_OUTCOME_TRY( auto encNodeValue, scale::encode(node.value.get())); encoding += Buffer(std::move(encNodeValue)); return outcome::success(std::move(encoding)); @@ -211,15 +211,15 @@ namespace sgns::storage::trie { const base::Buffer &encoded_data) const { BufferStream stream{encoded_data}; // decode the header with the node type and the partial key length - OUTCOME_TRY((auto &&, header), decodeHeader(stream)); + BOOST_OUTCOME_TRY( auto header, decodeHeader(stream)); auto [type, pk_length] = header; // decode the partial key - OUTCOME_TRY((auto &&, partial_key), decodePartialKey(pk_length, stream)); + BOOST_OUTCOME_TRY( auto partial_key, decodePartialKey(pk_length, stream)); // decode the node subvalue (see Definition 28 of the supergenius // specification) switch (type) { case SuperGeniusNode::Type::Leaf: { - OUTCOME_TRY((auto &&, value), scale::decode(stream.leftBytes())); + BOOST_OUTCOME_TRY( auto value, scale::decode(stream.leftBytes())); return std::make_shared(partial_key, value); } case SuperGeniusNode::Type::BranchEmptyValue: diff --git a/src/storage/trie/serialization/trie_serializer_impl.cpp b/src/storage/trie/serialization/trie_serializer_impl.cpp index 6c45d4ae3..c1e9538fc 100644 --- a/src/storage/trie/serialization/trie_serializer_impl.cpp +++ b/src/storage/trie/serialization/trie_serializer_impl.cpp @@ -34,7 +34,7 @@ namespace sgns::storage::trie { if (db_key == getEmptyRootHash()) { return trie_factory_->createEmpty(std::move(f)); } - OUTCOME_TRY((auto &&, root), retrieveNode(db_key)); + BOOST_OUTCOME_TRY( auto root, retrieveNode(db_key)); return trie_factory_->createFromRoot(std::move(root), std::move(f)); } @@ -52,7 +52,7 @@ namespace sgns::storage::trie { BOOST_OUTCOME_TRYV2(auto &&, storeChildren(branch, *batch)); } - OUTCOME_TRY((auto &&, enc), codec_->encodeNode(node)); + BOOST_OUTCOME_TRY( auto enc, codec_->encodeNode(node)); auto key = Buffer{codec_->hash256(enc)}; BOOST_OUTCOME_TRYV2(auto &&, batch->put(key, enc)); BOOST_OUTCOME_TRYV2(auto &&, batch->commit()); @@ -72,7 +72,7 @@ namespace sgns::storage::trie { auto &branch = dynamic_cast(node); BOOST_OUTCOME_TRYV2(auto &&, storeChildren(branch, batch)); } - OUTCOME_TRY((auto &&, enc), codec_->encodeNode(node)); + BOOST_OUTCOME_TRY( auto enc, codec_->encodeNode(node)); auto key = Buffer{codec_->merkleValue(enc)}; BOOST_OUTCOME_TRYV2(auto &&, batch.put(key, enc)); return key; @@ -82,7 +82,7 @@ namespace sgns::storage::trie { BufferBatch &batch) { for (auto &child : branch.children) { if (child && !child->isDummy()) { - OUTCOME_TRY((auto &&, hash), storeNode(*child, batch)); + BOOST_OUTCOME_TRY( auto hash, storeNode(*child, batch)); // when a node is written to the storage, it is replaced with a dummy // node to avoid memory waste child = std::make_shared(hash); @@ -99,7 +99,7 @@ namespace sgns::storage::trie { if (parent->children.at(idx)->isDummy()) { auto dummy = std::dynamic_pointer_cast(parent->children.at(idx)); - OUTCOME_TRY((auto &&, n), retrieveNode(dummy->db_key)); + BOOST_OUTCOME_TRY( auto n, retrieveNode(dummy->db_key)); parent->children.at(idx) = n; } return parent->children.at(idx); @@ -110,8 +110,8 @@ namespace sgns::storage::trie { if (db_key.empty() || db_key == getEmptyRootHash()) { return nullptr; } - OUTCOME_TRY((auto &&, enc), backend_->get(db_key)); - OUTCOME_TRY((auto &&, n), codec_->decodeNode(enc)); + BOOST_OUTCOME_TRY( auto enc, backend_->get(db_key)); + BOOST_OUTCOME_TRY( auto n, codec_->decodeNode(enc)); return std::dynamic_pointer_cast(n); } diff --git a/src/storage/trie/supergenius_trie/supergenius_trie_cursor.cpp b/src/storage/trie/supergenius_trie/supergenius_trie_cursor.cpp index ab2cfaf4a..95ffa362b 100644 --- a/src/storage/trie/supergenius_trie/supergenius_trie_cursor.cpp +++ b/src/storage/trie/supergenius_trie/supergenius_trie_cursor.cpp @@ -29,11 +29,11 @@ namespace sgns::storage::trie { outcome::result> SuperGeniusTrieCursor::createAt(const base::Buffer &key, const SuperGeniusTrie &trie) { auto c = std::make_unique(trie); - OUTCOME_TRY((auto &&, node), + BOOST_OUTCOME_TRY( auto node, trie.getNode(trie.getRoot(), c->codec_.keyToNibbles(key))); c->visited_root_ = true; // root is always visited first c->current_ = node; - OUTCOME_TRY((auto &&, last_child_path), c->constructLastVisitedChildPath(key)); + BOOST_OUTCOME_TRY( auto last_child_path, c->constructLastVisitedChildPath(key)); c->last_visited_child_ = std::move(last_child_path); return c; @@ -51,9 +51,9 @@ namespace sgns::storage::trie { return Error::NULL_ROOT; } visited_root_ = true; // root is always visited first - OUTCOME_TRY((auto &&, last_child_path), constructLastVisitedChildPath(key)); + BOOST_OUTCOME_TRY( auto last_child_path, constructLastVisitedChildPath(key)); auto nibbles = SuperGeniusCodec::keyToNibbles(key); - OUTCOME_TRY((auto &&, node), trie_.getNode(trie_.getRoot(), nibbles)); + BOOST_OUTCOME_TRY( auto node, trie_.getNode(trie_.getRoot(), nibbles)); bool node_has_value = node != nullptr && node->value.has_value(); if (node_has_value) { @@ -83,7 +83,7 @@ namespace sgns::storage::trie { // find the rightmost child for (int8_t i = branch->kMaxChildren - 1; i >= 0; i--) { if (branch->children.at(i) != nullptr) { - OUTCOME_TRY((auto &&, c), trie_.retrieveChild(branch, i)); + BOOST_OUTCOME_TRY( auto c, trie_.retrieveChild(branch, i)); last_visited_child_.emplace_back(branch, i); current = c; } @@ -134,7 +134,7 @@ namespace sgns::storage::trie { p = last_visited_child_.back().parent; // p.parent } auto i = getNextChildIdx(p, last_visited_child_.back().child_idx); - OUTCOME_TRY((auto &&, c), trie_.retrieveChild(p, i)); + BOOST_OUTCOME_TRY( auto c, trie_.retrieveChild(p, i)); current_ = c; updateLastVisitedChild(p, i); @@ -154,7 +154,7 @@ namespace sgns::storage::trie { p = last_visited_child_.back().parent; // p.parent } auto i = getNextChildIdx(p, last_visited_child_.back().child_idx); - OUTCOME_TRY((auto &&, c), trie_.retrieveChild(p, i)); + BOOST_OUTCOME_TRY( auto c, trie_.retrieveChild(p, i)); current_ = c; updateLastVisitedChild(p, i); } @@ -239,7 +239,7 @@ namespace sgns::storage::trie { const base::Buffer &key) -> outcome::result< std::list> { - OUTCOME_TRY((auto &&, path), trie_.getPath(trie_.getRoot(), codec_.keyToNibbles(key))); + BOOST_OUTCOME_TRY( auto path, trie_.getPath(trie_.getRoot(), codec_.keyToNibbles(key))); std::list last_visited_child; for (auto &&[branch, idx] : path) { diff --git a/src/storage/trie/supergenius_trie/supergenius_trie_impl.cpp b/src/storage/trie/supergenius_trie/supergenius_trie_impl.cpp index 1d1c15cbd..40ac0c1b9 100644 --- a/src/storage/trie/supergenius_trie/supergenius_trie_impl.cpp +++ b/src/storage/trie/supergenius_trie/supergenius_trie_impl.cpp @@ -47,7 +47,7 @@ namespace sgns::storage::trie { // insert fetches a sequence of nodes (a path) from the storage and // these nodes are processed in memory, so any changes applied to them // will be written back to the storage only on storeNode call - OUTCOME_TRY((auto &&, n), + BOOST_OUTCOME_TRY( auto n, insert(root, k_enc, std::make_shared(k_enc, value))); root_ = n; @@ -60,7 +60,7 @@ namespace sgns::storage::trie { return outcome::success(); } auto key_nibbles = SuperGeniusCodec::keyToNibbles(prefix); - OUTCOME_TRY((auto &&, new_root), detachNode(root_, key_nibbles)); + BOOST_OUTCOME_TRY( auto new_root, detachNode(root_, key_nibbles)); root_ = new_root; return outcome::success(); @@ -144,9 +144,9 @@ namespace sgns::storage::trie { parent->value = node->value; return parent; } - OUTCOME_TRY((auto &&, child), retrieveChild(parent, key_nibbles[length])); + BOOST_OUTCOME_TRY( auto child, retrieveChild(parent, key_nibbles[length])); if (child) { - OUTCOME_TRY((auto &&, n), insert(child, key_nibbles.subspan(length + 1), node)); + BOOST_OUTCOME_TRY( auto n, insert(child, key_nibbles.subspan(length + 1), node)); parent->children.at(key_nibbles[length]) = n; return parent; } @@ -156,14 +156,13 @@ namespace sgns::storage::trie { } auto br = std::make_shared(key_nibbles.subspan(0, length)); auto parentIdx = parent->key_nibbles[length]; - OUTCOME_TRY((auto &&, - new_branch), + BOOST_OUTCOME_TRY( auto new_branch, insert(nullptr, parent->key_nibbles.subspan(length + 1), parent)); br->children.at(parentIdx) = new_branch; if (key_nibbles.size() <= length) { br->value = node->value; } else { - OUTCOME_TRY((auto &&, new_child), + BOOST_OUTCOME_TRY( auto new_child, insert(nullptr, key_nibbles.subspan(length + 1), node)); br->children.at(key_nibbles[length]) = new_child; } @@ -176,7 +175,7 @@ namespace sgns::storage::trie { return TrieError::NO_VALUE; } auto nibbles = SuperGeniusCodec::keyToNibbles(key); - OUTCOME_TRY((auto &&, node), getNode(root_, nibbles)); + BOOST_OUTCOME_TRY( auto node, getNode(root_, nibbles)); if (node && node->value) { return node->value.get(); } @@ -201,7 +200,7 @@ namespace sgns::storage::trie { return nullptr; } auto parent_as_branch = std::dynamic_pointer_cast(parent); - OUTCOME_TRY((auto &&, n), retrieveChild(parent_as_branch, key_nibbles[length])); + BOOST_OUTCOME_TRY( auto n, retrieveChild(parent_as_branch, key_nibbles[length])); return getNode(n, key_nibbles.subspan(length + 1)); } case T::Leaf: @@ -235,8 +234,8 @@ namespace sgns::storage::trie { return Path{}; } auto parent_as_branch = std::dynamic_pointer_cast(parent); - OUTCOME_TRY((auto &&, n), retrieveChild(parent_as_branch, key_nibbles[length])); - OUTCOME_TRY((auto &&, path), getPath(n, key_nibbles.subspan(length + 1))); + BOOST_OUTCOME_TRY( auto n, retrieveChild(parent_as_branch, key_nibbles[length])); + BOOST_OUTCOME_TRY( auto path, getPath(n, key_nibbles.subspan(length + 1))); path.emplace_front( parent_as_branch, key_nibbles[length] ); return path; } @@ -274,7 +273,7 @@ namespace sgns::storage::trie { auto key_nibbles = SuperGeniusCodec::keyToNibbles(key); // delete node will fetch nodes that it needs from the storage (the nodes // typically are a path in the trie) and work on them in memory - OUTCOME_TRY((auto &&, n), deleteNode(root_, key_nibbles)); + BOOST_OUTCOME_TRY( auto n, deleteNode(root_, key_nibbles)); // afterwards, the nodes are written back to the storage and the new trie // root hash is obtained root_ = n; @@ -298,13 +297,13 @@ namespace sgns::storage::trie { parent->value = boost::none; newRoot = parent; } else { - OUTCOME_TRY((auto &&, child), + BOOST_OUTCOME_TRY( auto child, retrieveChild(parent_as_branch, key_nibbles[length])); - OUTCOME_TRY((auto &&, n), deleteNode(child, key_nibbles.subspan(length + 1))); + BOOST_OUTCOME_TRY( auto n, deleteNode(child, key_nibbles.subspan(length + 1))); newRoot = parent; parent_as_branch->children.at(key_nibbles[length]) = n; } - OUTCOME_TRY((auto &&, n), handleDeletion(parent_as_branch, newRoot, key_nibbles)); + BOOST_OUTCOME_TRY( auto n, handleDeletion(parent_as_branch, newRoot, key_nibbles)); return n; } case T::Leaf: @@ -336,7 +335,7 @@ namespace sgns::storage::trie { break; } } - OUTCOME_TRY((auto &&, child), retrieveChild(parent, idx)); + BOOST_OUTCOME_TRY( auto child, retrieveChild(parent, idx)); using T = SuperGeniusNode::Type; if (child->getTrieType() == T::Leaf) { auto newKey = parent->key_nibbles; @@ -386,11 +385,11 @@ namespace sgns::storage::trie { || parent->getTrieType() == T::BranchEmptyValue) { auto branch = std::dynamic_pointer_cast(parent); auto length = getCommonPrefixLength(parent->key_nibbles, prefix_nibbles); - OUTCOME_TRY((auto &&, child), retrieveChild(branch, prefix_nibbles[length])); + BOOST_OUTCOME_TRY( auto child, retrieveChild(branch, prefix_nibbles[length])); if (child == nullptr) { return parent; } - OUTCOME_TRY((auto &&, n), detachNode(child, prefix_nibbles.subspan(length + 1))); + BOOST_OUTCOME_TRY( auto n, detachNode(child, prefix_nibbles.subspan(length + 1))); branch->children.at(prefix_nibbles[length]) = n; return branch; } diff --git a/test/src/CMakeLists.txt b/test/src/CMakeLists.txt index 4a8ee65e8..38103675f 100644 --- a/test/src/CMakeLists.txt +++ b/test/src/CMakeLists.txt @@ -8,6 +8,7 @@ add_subdirectory(local_secure_storage) add_subdirectory(multiaccount) add_subdirectory(price_retrieval) add_subdirectory(primitives) +add_subdirectory(processing_datatypes) add_subdirectory(processing_nodes) add_subdirectory(processing_schema) add_subdirectory(processing) diff --git a/test/src/account/utxo_manager_test.cpp b/test/src/account/utxo_manager_test.cpp index ab3d17100..f1dc09808 100644 --- a/test/src/account/utxo_manager_test.cpp +++ b/test/src/account/utxo_manager_test.cpp @@ -55,30 +55,35 @@ class UTXOManagerTest : public test::CRDTFixture TEST_F( UTXOManagerTest, InitialUTXOCount ) { // Insert four unique UTXOs - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ) ); - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 1, 30, sgns::TokenID::FromBytes( { 0x02 } ) ) ) ); - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 2, 20, sgns::TokenID::FromBytes( { 0x03 } ) ) ) ); - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 3, 40, sgns::TokenID::FromBytes( { 0x04 } ) ) ) ); + EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ).value() ); + EXPECT_TRUE( + utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 1, 30, sgns::TokenID::FromBytes( { 0x02 } ) ) ).value() ); + EXPECT_TRUE( + utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 2, 20, sgns::TokenID::FromBytes( { 0x03 } ) ) ).value() ); + EXPECT_TRUE( + utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 3, 40, sgns::TokenID::FromBytes( { 0x04 } ) ) ).value() ); // Duplicate should be ignored - EXPECT_FALSE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ) ); + EXPECT_FALSE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ).value() ); EXPECT_EQ( utxo_manager->GetUTXOs().size(), 4u ); } TEST_F( UTXOManagerTest, TotalBalance ) { - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ) ); + EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ).value() ); EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 1, 30, sgns::TokenID::FromBytes( { 0x02 } ) ) ) ); - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 2, 20, TOKEN_1 ) ) ); + EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 2, 20, TOKEN_1 ) ).value() ); EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 3, 40, sgns::TokenID::FromBytes( { 0x03 } ) ) ) ); EXPECT_EQ( utxo_manager->GetBalance(), 140ull ); } TEST_F( UTXOManagerTest, BalanceByToken ) { - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ) ); - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 2, 20, TOKEN_1 ) ) ); - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 1, 30, sgns::TokenID::FromBytes( { 0x02 } ) ) ) ); - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 3, 40, sgns::TokenID::FromBytes( { 0x03 } ) ) ) ); + EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ).value() ); + EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 2, 20, TOKEN_1 ) ).value() ); + EXPECT_TRUE( + utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 1, 30, sgns::TokenID::FromBytes( { 0x02 } ) ) ).value() ); + EXPECT_TRUE( + utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 3, 40, sgns::TokenID::FromBytes( { 0x03 } ) ) ).value() ); EXPECT_EQ( utxo_manager->GetBalance( TOKEN_1 ), 70ull ); EXPECT_EQ( utxo_manager->GetBalance( sgns::TokenID::FromBytes( { 0x02 } ) ), 30ull ); EXPECT_EQ( utxo_manager->GetBalance( sgns::TokenID::FromBytes( { 0x03 } ) ), 40ull ); @@ -86,25 +91,29 @@ TEST_F( UTXOManagerTest, BalanceByToken ) TEST_F( UTXOManagerTest, BalanceByTokenNonexistent ) { - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ) ); - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 2, 20, TOKEN_1 ) ) ); - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 1, 30, sgns::TokenID::FromBytes( { 0x02 } ) ) ) ); - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 3, 40, sgns::TokenID::FromBytes( { 0x03 } ) ) ) ); + EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ).value() ); + EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 2, 20, TOKEN_1 ) ).value() ); + EXPECT_TRUE( + utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 1, 30, sgns::TokenID::FromBytes( { 0x02 } ) ) ).value() ); + EXPECT_TRUE( + utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 3, 40, sgns::TokenID::FromBytes( { 0x03 } ) ) ).value() ); EXPECT_EQ( utxo_manager->GetBalance( sgns::TokenID::FromBytes( { 0xFF } ) ), 0ull ); } TEST_F( UTXOManagerTest, StringTemplateBalance ) { - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ) ); - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 1, 50, sgns::TokenID::FromBytes( { 0x02 } ) ) ) ); + EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ).value() ); + EXPECT_TRUE( + utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 1, 50, sgns::TokenID::FromBytes( { 0x02 } ) ) ).value() ); std::string s = std::to_string( utxo_manager->GetBalance() ); EXPECT_EQ( s, std::to_string( utxo_manager->GetBalance() ) ); } TEST_F( UTXOManagerTest, RefreshNoUTXOsLeavesAll ) { - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ) ); - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 1, 30, sgns::TokenID::FromBytes( { 0x02 } ) ) ) ); + EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ).value() ); + EXPECT_TRUE( + utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 1, 30, sgns::TokenID::FromBytes( { 0x02 } ) ) ).value() ); size_t before = utxo_manager->GetUTXOs().size(); utxo_manager->ConsumeUTXOs( {} ); EXPECT_EQ( utxo_manager->GetUTXOs().size(), before ); @@ -112,9 +121,10 @@ TEST_F( UTXOManagerTest, RefreshNoUTXOsLeavesAll ) TEST_F( UTXOManagerTest, RefreshPartialUTXOsRemovesOnlySpecified ) { - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ) ); - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 1, 30, sgns::TokenID::FromBytes( { 0x02 } ) ) ) ); - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 2, 20, TOKEN_1 ) ) ); + EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ).value() ); + EXPECT_TRUE( + utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 1, 30, sgns::TokenID::FromBytes( { 0x02 } ) ) ).value() ); + EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 2, 20, TOKEN_1 ) ).value() ); InputUTXOInfo info; info.txid_hash_ = DUMMY_HASH; info.output_idx_ = 1; // remove idx 1 @@ -125,8 +135,9 @@ TEST_F( UTXOManagerTest, RefreshPartialUTXOsRemovesOnlySpecified ) TEST_F( UTXOManagerTest, RefreshAllUTXOsRemovesAll ) { - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ) ); - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 1, 30, sgns::TokenID::FromBytes( { 0x02 } ) ) ) ); + EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 50, TOKEN_1 ) ).value() ); + EXPECT_TRUE( + utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 1, 30, sgns::TokenID::FromBytes( { 0x02 } ) ) ).value() ); std::vector infos; for ( const auto &utxo : utxo_manager->GetUTXOs() ) { @@ -135,14 +146,14 @@ TEST_F( UTXOManagerTest, RefreshAllUTXOsRemovesAll ) i.output_idx_ = utxo.GetOutputIdx(); infos.push_back( i ); } - utxo_manager->ConsumeUTXOs( infos ); + ASSERT_FALSE( utxo_manager->ConsumeUTXOs( infos ).has_error() ); EXPECT_TRUE( utxo_manager->GetUTXOs().empty() ); EXPECT_EQ( utxo_manager->GetBalance(), 0ull ); } TEST_F( UTXOManagerTest, VerifyParameters ) { - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( HASHER.sha2_256( {} ), 0, 420, TOKEN_1 ) ) ); + EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( HASHER.sha2_256( {} ), 0, 420, TOKEN_1 ) ).value() ); auto tx = utxo_manager->CreateTxParameter( 69, "foobar", TOKEN_1 ); EXPECT_TRUE( tx.has_value() ); EXPECT_TRUE( utxo_manager->VerifyParameters( tx.value() ) ); @@ -159,8 +170,8 @@ TEST_F( UTXOManagerTest, VerifyParameters ) TEST_F( UTXOManagerTest, Storage ) { - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 420, TOKEN_1 ) ) ); - EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 1, 420, TOKEN_1 ) ) ); + EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 0, 420, TOKEN_1 ) ).value() ); + EXPECT_TRUE( utxo_manager->PutUTXO( GeniusUTXO( DUMMY_HASH, 1, 420, TOKEN_1 ) ).value() ); auto res = utxo_manager->LoadUTXOs( db_->GetDataStore() ); EXPECT_TRUE( res.has_value() ); diff --git a/test/src/account_creation/account_creation_test.cpp b/test/src/account_creation/account_creation_test.cpp index 6b1c841fc..3a4c6c045 100644 --- a/test/src/account_creation/account_creation_test.cpp +++ b/test/src/account_creation/account_creation_test.cpp @@ -33,43 +33,38 @@ TEST_F( AccountCreationTest, CreationWithEthereumKey ) fs::remove_all( "./account2" ); }; - auto account = GeniusAccount::New( sgns::TokenID::FromBytes( { 0x00 } ), - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", - fs::path( "./account1" ) ); + auto account1 = GeniusAccount::New( sgns::TokenID::FromBytes( { 0x00 } ), + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", + fs::path( "./account1" ) ); auto account2 = GeniusAccount::New( sgns::TokenID::FromBytes( { 0x00 } ), - "deedbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", - fs::path( "./account2" ) ); - std::string address_main = account->GetAddress(); + "deedbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", + fs::path( "./account2" ) ); + std::string address_main1 = account1->GetAddress(); std::string address_main2 = account2->GetAddress(); - + EXPECT_EQ( - address_main, + address_main1, "c865650410bdc1328cf99dc011c14cb52dc0aeb43b5f49dbf64a478fe2f6eafd2056ed0155770ba0a2832c1adb65c75df043c62e772d167437e4532d1b4e788f" ) - << " Address is not expected" << address_main; - EXPECT_NE( address_main, address_main2 ) << "Addresses are equal even though they should not be"; + << " Address is not expected" << address_main1; + EXPECT_NE( address_main1, address_main2 ) << "Addresses are equal even though they should not be"; } -TEST_F( AccountCreationTest, CreationWithCredentials ) +TEST_F( AccountCreationTest, CreationWithMnemonic ) { - this->cleanup = [] - { - fs::remove_all( "./account1" ); - fs::remove_all( "./account2" ); - }; + this->cleanup = [] { fs::remove_all( "./mnemonic" ); }; - auto account = GeniusAccount::New( sgns::TokenID::FromBytes( { 0x00 } ), - { "account1@gnus.ai", "1234" }, - fs::path( "./account1" ) ); - auto account2 = GeniusAccount::New( sgns::TokenID::FromBytes( { 0x00 } ), - { "account2@gnus.ai", "4321" }, - fs::path( "./account2" ) ); - std::string address_main = account->GetAddress(); - std::string address_main2 = account2->GetAddress(); + auto account = GeniusAccount::NewFromMnemonic( + sgns::TokenID::FromBytes( { 0x00 } ), + "picture tooth meat version snack comic tribe craft switch cricket vacuum squeeze", + fs::path( "./mnemonic" ) ); - EXPECT_NE( address_main, address_main2 ) << "Addresses are equal even though they should not be"; + ASSERT_FALSE( account == nullptr ) << "Could not create account from mnemonic"; + ASSERT_EQ( + account->GetAddress(), + "27d36713d68c35403832cc321199dac8ab5d2e66bea4d72718b84f6acb1fa69fb716991b5a39f7b3707822ba9eef059624c3bfde74b025f03e591d32c6d7b3ab" ); } -TEST_F( AccountCreationTest, CreationWithRandomKeys ) +TEST_F( AccountCreationTest, CreationWithRandomKey ) { this->cleanup = [] { @@ -77,10 +72,10 @@ TEST_F( AccountCreationTest, CreationWithRandomKeys ) fs::remove_all( "./account2" ); }; - auto account = GeniusAccount::New( sgns::TokenID::FromBytes( { 0x00 } ), fs::path( "./account1" ) ); + auto account1 = GeniusAccount::New( sgns::TokenID::FromBytes( { 0x00 } ), fs::path( "./account1" ) ); auto account2 = GeniusAccount::New( sgns::TokenID::FromBytes( { 0x00 } ), fs::path( "./account2" ) ); - std::string address_main = account->GetAddress(); + std::string address_main1 = account1->GetAddress(); std::string address_main2 = account2->GetAddress(); - EXPECT_NE( address_main, address_main2 ) << "Addresses are equal even though they should not be"; + EXPECT_NE( address_main1, address_main2 ) << "Addresses are equal even though they should not be"; } diff --git a/test/src/base/CMakeLists.txt b/test/src/base/CMakeLists.txt index a7d32d173..6519bae72 100644 --- a/test/src/base/CMakeLists.txt +++ b/test/src/base/CMakeLists.txt @@ -19,15 +19,6 @@ target_link_libraries(blob_test blob ) -addtest(mp_utils_test - mp_utils_test.cpp -) -target_link_libraries(mp_utils_test - mp_utils - blob -) - - addtest(scaled_integer_test scaled_integer_test.cpp ) diff --git a/test/src/base/mp_utils_test.cpp b/test/src/base/mp_utils_test.cpp deleted file mode 100644 index ea9e3755c..000000000 --- a/test/src/base/mp_utils_test.cpp +++ /dev/null @@ -1,76 +0,0 @@ -#include "base/mp_utils.hpp" - -#include - -#include - -using boost::multiprecision::uint128_t; -using boost::multiprecision::uint256_t; -using sgns::base::bytes_to_uint128_t; -using sgns::base::bytes_to_uint256_t; -using sgns::base::uint128_t_to_bytes; -using sgns::base::uint256_t_to_bytes; - -#define ASSERT_TO_FROM_BYTES_EQUAL(value, integer_size) \ - { \ - auto v = value; \ - auto v_bytes = uint##integer_size##_t_to_bytes(v); \ - ASSERT_EQ(bytes_to_uint##integer_size##_t(v_bytes), v); \ - } - -/** - * @given a uint128 - * @when converting it to and then from bytes - * @then the result matches with the original one - */ -TEST(MpUtilsTest, Uint128) { - ASSERT_TO_FROM_BYTES_EQUAL(std::numeric_limits::max(), 128); - ASSERT_TO_FROM_BYTES_EQUAL(std::numeric_limits::min(), 128); - ASSERT_TO_FROM_BYTES_EQUAL(static_cast(std::numeric_limits::max())*4+1, 128); - ASSERT_TO_FROM_BYTES_EQUAL(1337, 128); -} - -/** - * @given a uint256 - * @when converting it to and then from bytes - * @then the result matches with the original one - */ -TEST(MpUtilsTest, Uint256) { - ASSERT_TO_FROM_BYTES_EQUAL(std::numeric_limits::max(), 256); - ASSERT_TO_FROM_BYTES_EQUAL(std::numeric_limits::min(), 256); - ASSERT_TO_FROM_BYTES_EQUAL(static_cast(std::numeric_limits::max())*4+1, 256); - ASSERT_TO_FROM_BYTES_EQUAL(1337, 256); -} - -/** - * @given bigint value and known serialized representation of it - * @when serialize bigint to bytes - * @then expected serialized bytes are returned - */ -TEST(MPUtilsTest, UInt128ConvertTest) { - boost::multiprecision::uint128_t a{"4961875008018162238211470133173564236"}; - - std::array encoded; - encoded[0] = 'L'; - encoded[1] = '3'; - encoded[2] = '\xa2'; - encoded[3] = '\n'; - encoded[4] = 'C'; - encoded[5] = '\xf4'; - encoded[6] = '5'; - encoded[7] = '\x93'; - encoded[8] = '\xc5'; - encoded[9] = '\x05'; - encoded[10] = '\xe0'; - encoded[11] = ']'; - encoded[12] = 'S'; - encoded[13] = '\x9f'; - encoded[14] = '\xbb'; - encoded[15] = '\x03'; - - auto a_encoded = uint128_t_to_bytes(a); - ASSERT_EQ(encoded, a_encoded) << "a = " << a; - - auto a_decoded = bytes_to_uint128_t(a_encoded); - ASSERT_EQ(a_decoded, a); -} diff --git a/test/src/blockchain/blockchain_genesis_test.cpp b/test/src/blockchain/blockchain_genesis_test.cpp index a4a3c762e..264d9ee0a 100644 --- a/test/src/blockchain/blockchain_genesis_test.cpp +++ b/test/src/blockchain/blockchain_genesis_test.cpp @@ -304,11 +304,7 @@ TEST_F( BlockchainGenesisTest, WithAuthorizationCanSyncAndProcessTransactions ) auto balance_regular_2_before = node_regular_2->GetBalance(); // Mint tokens on the first regular node after sync is confirmed - auto mint_result = node_regular_1->MintTokens( mint_amount, - "", - "", - token_id, - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + auto mint_result = node_regular_1->MintTokens( mint_amount, "", "", token_id ); ASSERT_TRUE( mint_result.has_value() ) << "Mint transaction failed or timed out"; auto [mint_tx_id, mint_duration] = mint_result.value(); diff --git a/test/src/crdt/globaldb_integration.cpp b/test/src/crdt/globaldb_integration.cpp index 0f6482cc7..f00c97f0f 100644 --- a/test/src/crdt/globaldb_integration.cpp +++ b/test/src/crdt/globaldb_integration.cpp @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include #include #include @@ -32,7 +34,7 @@ #include #include #include -#include +#include namespace { @@ -112,9 +114,10 @@ class GlobalDBIntegrationTest : public ::testing::Test const std::string listenIp = "127.0.0.1"; pubsub->Start( currentPubsubPort, {}, listenIp, {} ); - auto io = std::make_shared(); - auto scheduler = std::make_shared( io, - libp2p::protocol::SchedulerConfig{} ); + auto io = std::make_shared(); + auto scheduler = std::make_shared( + std::make_shared( io ), + libp2p::basic::Scheduler::Config{ std::chrono::milliseconds( 100 ) } ); auto graphsyncnetwork = std::make_shared( pubsub->GetHost(), scheduler ); auto generator = std::make_shared(); @@ -130,7 +133,7 @@ class GlobalDBIntegrationTest : public ::testing::Test { return; } - auto db = std::move(globaldb_ret.value()); + auto db = std::move( globaldb_ret.value() ); ++currentPubsubPort; @@ -146,8 +149,7 @@ class GlobalDBIntegrationTest : public ::testing::Test { for ( size_t j = i + 1; j < nodes_.size(); ++j ) { - nodes_[i].pubsub->AddPeers( - { nodes_[j].pubsub->GetInterfaceAddress() } ); + nodes_[i].pubsub->AddPeers( { nodes_[j].pubsub->GetInterfaceAddress() } ); } } std::this_thread::sleep_for( delay ); @@ -222,7 +224,7 @@ TEST_F( GlobalDBIntegrationTest, ReplicationWithoutTopicSuccessfulTest ) for ( auto &node : testNodes->getNodes() ) { - node.db->AddBroadcastTopic( "firstTopic" ); + ASSERT_FALSE( node.db->AddBroadcastTopic( "firstTopic" ).has_error() ); node.db->AddListenTopic( "firstTopic" ); } @@ -266,7 +268,7 @@ TEST_F( GlobalDBIntegrationTest, ReplicationViaTopicBroadcastTest ) for ( auto &node : testNodes->getNodes() ) { - node.db->AddBroadcastTopic( "test_topic" ); + ASSERT_FALSE( node.db->AddBroadcastTopic( "test_topic" ).has_error() ); node.db->AddListenTopic( "test_topic" ); } @@ -310,13 +312,13 @@ TEST_F( GlobalDBIntegrationTest, ReplicationAcrossMultipleTopicsTest ) for ( auto &node : testNodes->getNodes() ) { - node.db->AddBroadcastTopic( "firstTopic" ); + ASSERT_FALSE( node.db->AddBroadcastTopic( "firstTopic" ).has_error() ); node.db->AddListenTopic( "firstTopic" ); - node.db->AddBroadcastTopic( "topic_A" ); + ASSERT_FALSE( node.db->AddBroadcastTopic( "topic_A" ).has_error() ); node.db->AddListenTopic( "topic_A" ); - node.db->AddBroadcastTopic( "topic_B" ); + ASSERT_FALSE( node.db->AddBroadcastTopic( "topic_B" ).has_error() ); node.db->AddListenTopic( "topic_B" ); } @@ -365,7 +367,7 @@ TEST_F( GlobalDBIntegrationTest, PreventDoubleCommitTest ) { auto testNodes = std::make_unique(); testNodes->addNode( "globaldb_node1" ); - testNodes->getNodes()[0].db->AddBroadcastTopic( "firstTopic" ); + ASSERT_FALSE( testNodes->getNodes()[0].db->AddBroadcastTopic( "firstTopic" ).has_error() ); testNodes->connectNodes(); using sgns::crdt::HierarchicalKey; sgns::base::Buffer value; @@ -408,8 +410,8 @@ TEST_F( GlobalDBIntegrationTest, DirectPutWithTopicBroadcastTest ) for ( auto &node : testNodes->getNodes() ) { - node.db->AddBroadcastTopic( "firstTopic" ); - node.db->AddBroadcastTopic( "direct_topic" ); + ASSERT_FALSE( node.db->AddBroadcastTopic( "firstTopic" ).has_error() ); + ASSERT_FALSE( node.db->AddBroadcastTopic( "direct_topic" ).has_error() ); node.db->AddListenTopic( "direct_topic" ); } testNodes->connectNodes(); @@ -449,7 +451,7 @@ TEST_F( GlobalDBIntegrationTest, DirectPutWithoutTopicBroadcastTest ) for ( auto &node : testNodes->getNodes() ) { - node.db->AddBroadcastTopic( "firstTopic" ); + ASSERT_FALSE( node.db->AddBroadcastTopic( "firstTopic" ).has_error() ); node.db->AddListenTopic( "firstTopic" ); } testNodes->connectNodes(); @@ -489,11 +491,11 @@ TEST_F( GlobalDBIntegrationTest, NonSubscriberDoesNotReceiveTopicMessageTest ) for ( auto &node : testNodes->getNodes() ) { - node.db->AddBroadcastTopic( "first_topic" ); + ASSERT_FALSE( node.db->AddBroadcastTopic( "first_topic" ).has_error() ); } - testNodes->getNodes()[0].db->AddBroadcastTopic( "test_topic" ); + ASSERT_FALSE( testNodes->getNodes()[0].db->AddBroadcastTopic( "test_topic" ).has_error() ); testNodes->getNodes()[0].db->AddListenTopic( "test_topic" ); - testNodes->getNodes()[1].db->AddBroadcastTopic( "test_topic" ); + ASSERT_FALSE( testNodes->getNodes()[1].db->AddBroadcastTopic( "test_topic" ).has_error() ); testNodes->getNodes()[1].db->AddListenTopic( "test_topic" ); testNodes->connectNodes(); using sgns::crdt::HierarchicalKey; @@ -534,7 +536,7 @@ TEST_F( GlobalDBIntegrationTest, UnconnectedNodeDoesNotReplicateBroadcastMessage for ( auto &node : testNodes->getNodes() ) { - node.db->AddBroadcastTopic( "isolated_topic" ); + ASSERT_FALSE( node.db->AddBroadcastTopic( "isolated_topic" ).has_error() ); node.db->AddListenTopic( "isolated_topic" ); } diff --git a/test/src/graphsync/pubsub_graphsync_test.cpp b/test/src/graphsync/pubsub_graphsync_test.cpp index d81adab36..9b4e0473e 100644 --- a/test/src/graphsync/pubsub_graphsync_test.cpp +++ b/test/src/graphsync/pubsub_graphsync_test.cpp @@ -1,7 +1,9 @@ +#include #include #include #include +#include #include #include #include @@ -28,7 +30,7 @@ #include "testutil/wait_condition.hpp" #include #include -#include "libp2p/protocol/common/asio/asio_scheduler.hpp" +#include std::string GetLoggingSystem( const std::string & ) { @@ -128,12 +130,14 @@ TEST_F( PubsubGraphsyncTest, MultiGlobalDBTest ) &resultTime ); - auto scheduler = std::make_shared( io_context, - libp2p::protocol::SchedulerConfig{} ); - auto graphsyncnetwork = std::make_shared( pubs1->GetHost(), scheduler ); - auto generator = std::make_shared(); - auto scheduler2 = std::make_shared( io_context, - libp2p::protocol::SchedulerConfig{} ); + auto scheduler = std::make_shared( + std::make_shared( io_context ), + libp2p::basic::Scheduler::Config{ std::chrono::milliseconds( 100 ) } ); + auto graphsyncnetwork = std::make_shared( pubs1->GetHost(), scheduler ); + auto generator = std::make_shared(); + auto scheduler2 = std::make_shared( + std::make_shared( io_context ), + libp2p::basic::Scheduler::Config{ std::chrono::milliseconds( 100 ) } ); auto graphsyncnetwork2 = std::make_shared( pubs2->GetHost(), scheduler2 ); diff --git a/test/src/multiaccount/multi_account_sync.cpp b/test/src/multiaccount/multi_account_sync.cpp index 882208475..0b08d24f9 100644 --- a/test/src/multiaccount/multi_account_sync.cpp +++ b/test/src/multiaccount/multi_account_sync.cpp @@ -32,6 +32,8 @@ class MultiAccountTest : public ::testing::Test { protected: + static constexpr std::string_view FILE_PREFIX = "node_multi_account_"; + std::shared_ptr CreateNode( const std::string &self_address, const std::string &dev_addr, const std::string &tokenValue, @@ -43,14 +45,13 @@ class MultiAccountTest : public ::testing::Test static std::atomic nodeCounter{ 0 }; int id = nodeCounter.fetch_add( 1 ); - std::string binaryPath = boost::dll::program_location().parent_path().string(); - const char *filePath = ::testing::UnitTest::GetInstance()->current_test_info()->file(); - std::string fileStem = std::filesystem::path( filePath ).stem().string(); - auto outPath = binaryPath + "/node_multi_account_" + std::to_string( id ) + "/"; + auto binaryPath = boost::dll::program_location().parent_path(); + auto outPath = binaryPath / ( std::string( FILE_PREFIX ) + std::to_string( id ) ); + auto outPathStr = outPath.generic_string() + '/'; DevConfig_st devConfig = { "", "0.65", tokenValue, tokenId, "" }; std::strncpy( devConfig.Addr, dev_addr.c_str(), sizeof( devConfig.Addr ) - 1 ); - std::strncpy( devConfig.BaseWritePath, outPath.c_str(), sizeof( devConfig.BaseWritePath ) - 1 ); + std::strncpy( devConfig.BaseWritePath, outPathStr.c_str(), sizeof( devConfig.BaseWritePath ) - 1 ); devConfig.Addr[sizeof( devConfig.Addr ) - 1] = '\0'; devConfig.BaseWritePath[sizeof( devConfig.BaseWritePath ) - 1] = '\0'; @@ -96,9 +97,6 @@ class MultiAccountTest : public ::testing::Test void SetUp() override { - // Clean up any previous test runs - std::string binaryPath = boost::dll::program_location().parent_path().string(); - // Helper to remove directory with retry on Windows (file locks may not be immediately released) auto removeWithRetry = []( const std::string &path ) { @@ -122,9 +120,16 @@ class MultiAccountTest : public ::testing::Test } }; - removeWithRetry( binaryPath + "/node_multi_account_0/" ); - removeWithRetry( binaryPath + "/node_multi_account_1/" ); - removeWithRetry( binaryPath + "/node_multi_account_2/" ); + auto binaryPath = boost::dll::program_location().parent_path(); + + // Clean up any previous test runs + for ( auto &entry : boost::filesystem::directory_iterator( binaryPath ) ) + { + if ( entry.is_directory() && entry.path().filename().string().find( FILE_PREFIX ) != std::string::npos ) + { + removeWithRetry( entry.path().string() ); + } + } } void TearDown() override @@ -165,24 +170,12 @@ TEST_F( MultiAccountTest, SyncThroughEachOther ) auto balance_original_start = node_original->GetBalance(); // Mint some tokens - auto mint_result = node_original->MintTokens( 100, - "", - "", - TokenID::FromBytes( { 0x00 } ), - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + auto mint_result = node_original->MintTokens( 100, "", "", TokenID::FromBytes( { 0x00 } ) ); ASSERT_TRUE( mint_result.has_value() ) << "Mint transaction failed or timed out on node_original"; - mint_result = node_original->MintTokens( 2000, - "", - "", - TokenID::FromBytes( { 0x00 } ), - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + mint_result = node_original->MintTokens( 2000, "", "", TokenID::FromBytes( { 0x00 } ) ); ASSERT_TRUE( mint_result.has_value() ) << "Mint transaction failed or timed out on node_original"; - mint_result = node_original->MintTokens( 30, - "", - "", - TokenID::FromBytes( { 0x00 } ), - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + mint_result = node_original->MintTokens( 30, "", "", TokenID::FromBytes( { 0x00 } ) ); ASSERT_TRUE( mint_result.has_value() ) << "Mint transaction failed or timed out on node_original"; @@ -202,11 +195,7 @@ TEST_F( MultiAccountTest, SyncThroughEachOther ) std::chrono::milliseconds( 30000 ), "node_duplicated not synced" ); - mint_result = node_duplicated->MintTokens( 60000, - "", - "", - TokenID::FromBytes( { 0x00 } ), - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + mint_result = node_duplicated->MintTokens( 60000, "", "", TokenID::FromBytes( { 0x00 } ) ); ASSERT_TRUE( mint_result.has_value() ) << "Mint transaction failed or timed out on node_duplicated"; test::assertWaitForCondition( @@ -295,8 +284,7 @@ TEST_F( MultiAccountTest, CRDTFilterDuplicateTx ) auto mint_result_1 = node_same_addr_1->MintTokens( 50000000000, // 50 GNUS "", "", - sgns::TokenID::FromBytes( { 0x00 } ), - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + sgns::TokenID::FromBytes( { 0x00 } ) ); ASSERT_TRUE( mint_result_1.has_value() ) << "Mint transaction failed on node_same_addr_1"; std::cout << "Mint transaction 1 ID: " << mint_result_1.value().first << std::endl; diff --git a/test/src/processing_datatypes/CMakeLists.txt b/test/src/processing_datatypes/CMakeLists.txt new file mode 100644 index 000000000..9d763229f --- /dev/null +++ b/test/src/processing_datatypes/CMakeLists.txt @@ -0,0 +1,80 @@ +addtest(processing_datatypes_test + processing_datatypes_test.cpp +) + +target_include_directories(processing_datatypes_test PRIVATE ${AsyncIOManager_INCLUDE_DIR}) + +target_link_libraries(processing_datatypes_test + nlohmann_json + ProcessingBase +) + +add_custom_command(TARGET processing_datatypes_test POST_BUILD + COMMAND ${CMAKE_COMMAND} -E make_directory + "$/processing_datatypes" + COMMENT "Creating processing_datatypes directory" +) + +add_custom_command(TARGET processing_datatypes_test POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy_if_different + "${CMAKE_CURRENT_SOURCE_DIR}/bert-tiny.mnn" + "${CMAKE_CURRENT_SOURCE_DIR}/string-processing-definition.json" + "${CMAKE_CURRENT_SOURCE_DIR}/test_input.txt" + "${CMAKE_CURRENT_SOURCE_DIR}/spleen_ct_seg.mnn" + "${CMAKE_CURRENT_SOURCE_DIR}/spleen_15.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/texture3d-processing-definition.json" + "${CMAKE_CURRENT_SOURCE_DIR}/texture1d_tiny.mnn" + "${CMAKE_CURRENT_SOURCE_DIR}/texture1d_input.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/texture1d_output_pt.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/texture1d-processing-definition.json" + "${CMAKE_CURRENT_SOURCE_DIR}/bool_tiny.mnn" + "${CMAKE_CURRENT_SOURCE_DIR}/bool_input.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/bool_output_pt.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/bool-processing-definition.json" + "${CMAKE_CURRENT_SOURCE_DIR}/buffer_tiny.mnn" + "${CMAKE_CURRENT_SOURCE_DIR}/buffer_input.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/buffer_output_pt.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/buffer-processing-definition.json" + "${CMAKE_CURRENT_SOURCE_DIR}/float_model.mnn" + "${CMAKE_CURRENT_SOURCE_DIR}/float_input.bin" + "${CMAKE_CURRENT_SOURCE_DIR}/float_output_pt.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/float-processing-definition.json" + "${CMAKE_CURRENT_SOURCE_DIR}/int_model.mnn" + "${CMAKE_CURRENT_SOURCE_DIR}/int_input.bin" + "${CMAKE_CURRENT_SOURCE_DIR}/int_output_pt.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/int-processing-definition.json" + "${CMAKE_CURRENT_SOURCE_DIR}/mat2_tiny.mnn" + "${CMAKE_CURRENT_SOURCE_DIR}/mat2_input.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/mat2_output_pt.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/mat2-processing-definition.json" + "${CMAKE_CURRENT_SOURCE_DIR}/mat3_tiny.mnn" + "${CMAKE_CURRENT_SOURCE_DIR}/mat3_input.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/mat3_output_pt.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/mat3-processing-definition.json" + "${CMAKE_CURRENT_SOURCE_DIR}/mat4_tiny.mnn" + "${CMAKE_CURRENT_SOURCE_DIR}/mat4_input.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/mat4_output_pt.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/mat4-processing-definition.json" + "${CMAKE_CURRENT_SOURCE_DIR}/vec2_tiny.mnn" + "${CMAKE_CURRENT_SOURCE_DIR}/vec2_input.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/vec2_output_pt.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/vec2-processing-definition.json" + "${CMAKE_CURRENT_SOURCE_DIR}/vec3_tiny.mnn" + "${CMAKE_CURRENT_SOURCE_DIR}/vec3_input.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/vec3_output_pt.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/vec3-processing-definition.json" + "${CMAKE_CURRENT_SOURCE_DIR}/vec4_tiny.mnn" + "${CMAKE_CURRENT_SOURCE_DIR}/vec4_input.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/vec4_output_pt.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/vec4-processing-definition.json" + "${CMAKE_CURRENT_SOURCE_DIR}/tensor_tiny.mnn" + "${CMAKE_CURRENT_SOURCE_DIR}/tensor_input.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/tensor_output_pt.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/tensor-processing-definition.json" + "${CMAKE_CURRENT_SOURCE_DIR}/texturecube_tiny.mnn" + "${CMAKE_CURRENT_SOURCE_DIR}/texturecube_input.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/texturecube_output_pt.raw" + "${CMAKE_CURRENT_SOURCE_DIR}/texturecube-processing-definition.json" + "$/processing_datatypes/" + COMMENT "Copying test files" +) diff --git a/test/src/processing_datatypes/bert-tiny.mnn b/test/src/processing_datatypes/bert-tiny.mnn new file mode 100644 index 000000000..20a8a9333 Binary files /dev/null and b/test/src/processing_datatypes/bert-tiny.mnn differ diff --git a/test/src/processing_datatypes/bool-processing-definition.json b/test/src/processing_datatypes/bool-processing-definition.json new file mode 100644 index 000000000..9e68914ae --- /dev/null +++ b/test/src/processing_datatypes/bool-processing-definition.json @@ -0,0 +1,76 @@ +{ + "name": "bool-tiny-test", + "version": "1.0.0", + "gnus_spec_version": 1.0, + "author": "Test Author", + "description": "A test processing definition for bool input using a tiny MLP model", + "tags": ["bool", "signal", "test"], + + "inputs": [ + { + "name": "inputBool", + "source_uri_param": "file://processing_datatypes/bool_input.raw", + "type": "bool", + "description": "Input bool vector", + "dimensions": { + "width": 8, + "block_len": 8, + "chunk_stride": 8 + }, + "format": "FLOAT32" + } + ], + + "outputs": [ + { + "name": "boolOutput", + "source_uri_param": "file://processing_datatypes/bool_output.raw", + "type": "tensor", + "description": "Output tensor from bool model" + } + ], + + "parameters": [ + { + "name": "modelUri", + "type": "uri", + "default": "file://processing_datatypes/bool_tiny.mnn", + "description": "URI to the bool MNN model" + } + ], + + "passes": [ + { + "name": "bool_inference", + "type": "inference", + "description": "Bool model inference", + "model": { + "source_uri_param": "file://processing_datatypes/bool_tiny.mnn", + "format": "MNN", + "batch_size": 1, + "input_nodes": [ + { + "name": "input", + "type": "tensor", + "source": "input:inputBool", + "shape": [1, 8] + } + ], + "output_nodes": [ + { + "name": "output", + "type": "tensor", + "target": "output:boolOutput", + "shape": [1, 8] + } + ] + } + } + ], + + "metadata": { + "created_date": "2026-02-16", + "framework": "MNN", + "test_case": "bool_datatype" + } +} diff --git a/test/src/processing_datatypes/bool_input.raw b/test/src/processing_datatypes/bool_input.raw new file mode 100644 index 000000000..4c2c25cd1 Binary files /dev/null and b/test/src/processing_datatypes/bool_input.raw differ diff --git a/test/src/processing_datatypes/bool_output_pt.raw b/test/src/processing_datatypes/bool_output_pt.raw new file mode 100644 index 000000000..6a22deb67 --- /dev/null +++ b/test/src/processing_datatypes/bool_output_pt.raw @@ -0,0 +1 @@ +E>Mږ>^=;i@ \ No newline at end of file diff --git a/test/src/processing_datatypes/bool_tiny.mnn b/test/src/processing_datatypes/bool_tiny.mnn new file mode 100644 index 000000000..f3a85d059 Binary files /dev/null and b/test/src/processing_datatypes/bool_tiny.mnn differ diff --git a/test/src/processing_datatypes/buffer-processing-definition.json b/test/src/processing_datatypes/buffer-processing-definition.json new file mode 100644 index 000000000..e5973ee1c --- /dev/null +++ b/test/src/processing_datatypes/buffer-processing-definition.json @@ -0,0 +1,76 @@ +{ + "name": "buffer-tiny-test", + "version": "1.0.0", + "gnus_spec_version": 1.0, + "author": "Test Author", + "description": "A test processing definition for buffer input using a tiny MLP model", + "tags": ["buffer", "int8", "test"], + + "inputs": [ + { + "name": "inputBuffer", + "source_uri_param": "file://processing_datatypes/buffer_input.raw", + "type": "buffer", + "description": "Input int8 buffer", + "dimensions": { + "width": 16, + "block_len": 16, + "chunk_stride": 16 + }, + "format": "INT8" + } + ], + + "outputs": [ + { + "name": "bufferOutput", + "source_uri_param": "file://processing_datatypes/buffer_output.raw", + "type": "tensor", + "description": "Output tensor from buffer model" + } + ], + + "parameters": [ + { + "name": "modelUri", + "type": "uri", + "default": "file://processing_datatypes/buffer_tiny.mnn", + "description": "URI to the buffer MNN model" + } + ], + + "passes": [ + { + "name": "buffer_inference", + "type": "inference", + "description": "Buffer model inference", + "model": { + "source_uri_param": "file://processing_datatypes/buffer_tiny.mnn", + "format": "MNN", + "batch_size": 1, + "input_nodes": [ + { + "name": "input", + "type": "tensor", + "source": "input:inputBuffer", + "shape": [1, 16] + } + ], + "output_nodes": [ + { + "name": "output", + "type": "tensor", + "target": "output:bufferOutput", + "shape": [1, 16] + } + ] + } + } + ], + + "metadata": { + "created_date": "2026-02-16", + "framework": "MNN", + "test_case": "buffer_datatype" + } +} diff --git a/test/src/processing_datatypes/buffer_input.raw b/test/src/processing_datatypes/buffer_input.raw new file mode 100644 index 000000000..436c4589c Binary files /dev/null and b/test/src/processing_datatypes/buffer_input.raw differ diff --git a/test/src/processing_datatypes/buffer_output_pt.raw b/test/src/processing_datatypes/buffer_output_pt.raw new file mode 100644 index 000000000..4ad00041e --- /dev/null +++ b/test/src/processing_datatypes/buffer_output_pt.raw @@ -0,0 +1 @@ +zh "@P@@r>"?zN@,7@d å<8?s \ No newline at end of file diff --git a/test/src/processing_datatypes/buffer_tiny.mnn b/test/src/processing_datatypes/buffer_tiny.mnn new file mode 100644 index 000000000..3970e82c5 Binary files /dev/null and b/test/src/processing_datatypes/buffer_tiny.mnn differ diff --git a/test/src/processing_datatypes/convert_nii_to_raw.py b/test/src/processing_datatypes/convert_nii_to_raw.py new file mode 100644 index 000000000..e250fc2fa --- /dev/null +++ b/test/src/processing_datatypes/convert_nii_to_raw.py @@ -0,0 +1,71 @@ +import argparse +from pathlib import Path + +import nibabel as nib +import numpy as np + + +def _load_with_monai(input_path: Path) -> np.ndarray: + try: + from monai.transforms import ( + Compose, + LoadImaged, + EnsureChannelFirstd, + Orientationd, + Spacingd, + ScaleIntensityRanged, + EnsureTyped, + ) + import torch + except Exception as exc: + raise RuntimeError("monai is required for --monai: pip install monai") from exc + + preprocessing = Compose([ + LoadImaged(keys=["image"]), + EnsureChannelFirstd(keys=["image"]), + Orientationd(keys=["image"], axcodes="RAS"), + Spacingd(keys=["image"], pixdim=[1.5, 1.5, 2.0], mode="bilinear"), + ScaleIntensityRanged(keys=["image"], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True), + EnsureTyped(keys=["image"]), + ]) + + sample = preprocessing({"image": str(input_path)}) + data = sample["image"] + if isinstance(data, torch.Tensor): + data = data.cpu().numpy() + return np.asarray(data, dtype=np.float32) + + +def main() -> int: + parser = argparse.ArgumentParser(description="Convert NIfTI to raw float32") + parser.add_argument("input_nii", help="Path to input .nii or .nii.gz") + parser.add_argument("output_raw", help="Path to output .raw") + parser.add_argument("--monai", action="store_true", help="Apply MONAI preprocessing for spleen_ct_segmentation") + args = parser.parse_args() + + input_path = Path(args.input_nii) + output_path = Path(args.output_raw) + + if not input_path.exists(): + print(f"Input not found: {input_path}") + return 1 + + if args.monai: + data = _load_with_monai(input_path) + # MONAI output is typically (C, H, W, D). Drop channel and keep (H, W, D). + if data.ndim == 4 and data.shape[0] == 1: + data = data[0] + else: + img = nib.load(str(input_path)) + data = img.get_fdata(dtype=np.float32) + + print(f"shape: {data.shape}, dtype: {data.dtype}") + + output_path.parent.mkdir(parents=True, exist_ok=True) + data.tofile(str(output_path)) + print(f"wrote raw: {output_path}") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/test/src/processing_datatypes/create_bool_model.py b/test/src/processing_datatypes/create_bool_model.py new file mode 100644 index 000000000..6bc5e2f37 --- /dev/null +++ b/test/src/processing_datatypes/create_bool_model.py @@ -0,0 +1,76 @@ +import argparse +import os +import numpy as np + +try: + import torch + import torch.nn as nn +except ImportError as exc: + raise SystemExit("This script requires torch. Install with: pip install torch") from exc + + +class TinyBoolNet(nn.Module): + def __init__(self, length): + super().__init__() + self.fc1 = nn.Linear(length, length) + self.act = nn.ReLU() + self.fc2 = nn.Linear(length, length) + + def forward(self, x): + return self.fc2(self.act(self.fc1(x))) + + +def main(): + parser = argparse.ArgumentParser(description="Create a tiny bool ONNX model and sample input.") + parser.add_argument("--length", type=int, default=8, help="Input length") + parser.add_argument("--out-dir", default=".", help="Output directory") + parser.add_argument("--onnx", default="bool_tiny.onnx", help="ONNX filename") + parser.add_argument("--raw", default="bool_input.raw", help="Raw input filename") + parser.add_argument("--pt", default="bool_tiny_state.pt", help="Torch state dict filename") + parser.add_argument("--ref", default="bool_output_pt.raw", help="Reference output filename") + args = parser.parse_args() + + os.makedirs(args.out_dir, exist_ok=True) + + torch.manual_seed(0) + model = TinyBoolNet(args.length).eval() + + # Create a deterministic 0/1 pattern + input_bits = np.array([0, 1, 1, 0, 1, 0, 0, 1], dtype=np.float32) + if args.length != input_bits.size: + input_bits = np.resize(input_bits, args.length).astype(np.float32) + + dummy = torch.from_numpy(input_bits).reshape(1, args.length) + + onnx_path = os.path.join(args.out_dir, args.onnx) + torch.onnx.export( + model, + dummy, + onnx_path, + input_names=["input"], + output_names=["output"], + opset_version=11 + ) + + raw_path = os.path.join(args.out_dir, args.raw) + input_bits.astype(np.float32).tofile(raw_path) + + with torch.no_grad(): + output = model(dummy).cpu().numpy().astype(np.float32).reshape(-1) + + ref_path = os.path.join(args.out_dir, args.ref) + output.tofile(ref_path) + + pt_path = os.path.join(args.out_dir, args.pt) + torch.save(model.state_dict(), pt_path) + + print(f"Wrote ONNX model: {onnx_path}") + print(f"Wrote raw input: {raw_path} (length {args.length})") + print(f"Wrote reference output: {ref_path}") + print(f"Wrote PyTorch state dict: {pt_path}") + print("Convert to MNN with:") + print(f" MNNConvert -f ONNX --modelFile {onnx_path} --MNNModel {os.path.splitext(onnx_path)[0]}.mnn") + + +if __name__ == "__main__": + main() diff --git a/test/src/processing_datatypes/create_buffer_model.py b/test/src/processing_datatypes/create_buffer_model.py new file mode 100644 index 000000000..7bf991262 --- /dev/null +++ b/test/src/processing_datatypes/create_buffer_model.py @@ -0,0 +1,77 @@ +import argparse +import os +import numpy as np + +try: + import torch + import torch.nn as nn +except ImportError as exc: + raise SystemExit("This script requires torch. Install with: pip install torch") from exc + + +class TinyBufferNet(nn.Module): + def __init__(self, length): + super().__init__() + self.fc1 = nn.Linear(length, length) + self.act = nn.ReLU() + self.fc2 = nn.Linear(length, length) + + def forward(self, x): + x = x.float() + return self.fc2(self.act(self.fc1(x))) + + +def main(): + parser = argparse.ArgumentParser(description="Create a tiny buffer ONNX model and sample input.") + parser.add_argument("--length", type=int, default=16, help="Input length") + parser.add_argument("--out-dir", default=".", help="Output directory") + parser.add_argument("--onnx", default="buffer_tiny.onnx", help="ONNX filename") + parser.add_argument("--raw", default="buffer_input.raw", help="Raw input filename") + parser.add_argument("--pt", default="buffer_tiny_state.pt", help="Torch state dict filename") + parser.add_argument("--ref", default="buffer_output_pt.raw", help="Reference output filename") + args = parser.parse_args() + + os.makedirs(args.out_dir, exist_ok=True) + + torch.manual_seed(0) + model = TinyBufferNet(args.length).eval() + + # Deterministic int8 pattern + input_bytes = np.array([0, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15], dtype=np.int8) + if args.length != input_bytes.size: + input_bytes = np.resize(input_bytes, args.length).astype(np.int8) + + dummy = torch.from_numpy(input_bytes.astype(np.float32)).reshape(1, args.length) + + onnx_path = os.path.join(args.out_dir, args.onnx) + torch.onnx.export( + model, + dummy, + onnx_path, + input_names=["input"], + output_names=["output"], + opset_version=11 + ) + + raw_path = os.path.join(args.out_dir, args.raw) + input_bytes.tofile(raw_path) + + with torch.no_grad(): + output = model(dummy).cpu().numpy().astype(np.float32).reshape(-1) + + ref_path = os.path.join(args.out_dir, args.ref) + output.tofile(ref_path) + + pt_path = os.path.join(args.out_dir, args.pt) + torch.save(model.state_dict(), pt_path) + + print(f"Wrote ONNX model: {onnx_path}") + print(f"Wrote raw input: {raw_path} (length {args.length})") + print(f"Wrote reference output: {ref_path}") + print(f"Wrote PyTorch state dict: {pt_path}") + print("Convert to MNN with:") + print(f" MNNConvert -f ONNX --modelFile {onnx_path} --MNNModel {os.path.splitext(onnx_path)[0]}.mnn") + + +if __name__ == "__main__": + main() diff --git a/test/src/processing_datatypes/create_float_model.py b/test/src/processing_datatypes/create_float_model.py new file mode 100644 index 000000000..11b56207a --- /dev/null +++ b/test/src/processing_datatypes/create_float_model.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 +""" +Generate a small MLP model for float vector processing (1D). +This processes float32 input vectors with a simple feed-forward network. +""" + +import torch +import torch.nn as nn +import numpy as np +import sys + +class FloatMLP(nn.Module): + """Simple MLP for float vector processing""" + def __init__(self, input_size=64, hidden_size=32, output_size=64): + super().__init__() + self.fc1 = nn.Linear(input_size, hidden_size) + self.fc2 = nn.Linear(hidden_size, output_size) + self.relu = nn.ReLU() + + def forward(self, x): + x = self.relu(self.fc1(x)) + x = self.fc2(x) + return x + +def main(): + print("Creating float MLP model...") + + # Model parameters + input_size = 64 + hidden_size = 32 + output_size = 64 + + # Create model + model = FloatMLP(input_size, hidden_size, output_size) + model.eval() + + # Create dummy input + dummy_input = torch.randn(1, input_size) + + # Test model + with torch.no_grad(): + output = model(dummy_input) + print(f"Model test: input shape {dummy_input.shape} -> output shape {output.shape}") + + # Export to ONNX + onnx_path = "float_model.onnx" + torch.onnx.export( + model, + dummy_input, + onnx_path, + export_params=True, + opset_version=11, + do_constant_folding=True, + input_names=['input'], + output_names=['output'], + dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}} + ) + print(f"Saved ONNX model to {onnx_path}") + + # Generate test input (512 floats for sliding window test) + test_width = 512 + test_input = np.random.randn(test_width).astype(np.float32) + test_input_bytes = test_input.tobytes() + + with open('float_input.bin', 'wb') as f: + f.write(test_input_bytes) + print(f"Saved test input ({test_width} floats) to float_input.bin") + + # Generate reference output using sliding window with stride + patch_width = input_size + stride = patch_width // 2 # Default stride + num_patches = ((test_width - patch_width) // stride) + 1 + output_width = (num_patches - 1) * stride + output_size + + print(f"Reference generation: {num_patches} patches, output width {output_width}") + + output_accum = np.zeros(output_width, dtype=np.float32) + weight_accum = np.zeros(output_width, dtype=np.float32) + + with torch.no_grad(): + for p in range(num_patches): + start_pos = p * stride + end_pos = min(start_pos + patch_width, test_width) + + patch_input = np.zeros(patch_width, dtype=np.float32) + patch_input[:end_pos - start_pos] = test_input[start_pos:end_pos] + + patch_tensor = torch.from_numpy(patch_input).unsqueeze(0) + patch_output = model(patch_tensor).squeeze(0).numpy() + + out_start = start_pos + for w in range(output_size): + dst_pos = out_start + w + if dst_pos < output_width: + output_accum[dst_pos] += patch_output[w] + weight_accum[dst_pos] += 1.0 + + # Average overlapping regions + reference_output = np.divide(output_accum, weight_accum, where=weight_accum > 0) + reference_bytes = reference_output.tobytes() + + with open('float_reference.bin', 'wb') as f: + f.write(reference_bytes) + print(f"Saved reference output ({output_width} floats) to float_reference.bin") + + print("\nConvert ONNX to MNN with:") + print(" MNNConvert -f ONNX --modelFile float_model.onnx --MNNModel float_model.mnn --bizCode biz") + print("\nStats:") + print(f" Input: min={test_input.min():.3f}, max={test_input.max():.3f}, mean={test_input.mean():.3f}") + print(f" Reference: min={reference_output.min():.3f}, max={reference_output.max():.3f}, mean={reference_output.mean():.3f}") + +if __name__ == '__main__': + main() diff --git a/test/src/processing_datatypes/create_int_model.py b/test/src/processing_datatypes/create_int_model.py new file mode 100644 index 000000000..60b756b07 --- /dev/null +++ b/test/src/processing_datatypes/create_int_model.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python3 +""" +Generate a small MLP model for integer vector processing (1D). +This processes int32 input vectors with a simple feed-forward network. +""" + +import torch +import torch.nn as nn +import numpy as np +import sys + +class IntMLP(nn.Module): + """Simple MLP for integer vector processing""" + def __init__(self, input_size=64, hidden_size=32, output_size=64): + super().__init__() + self.fc1 = nn.Linear(input_size, hidden_size) + self.fc2 = nn.Linear(hidden_size, output_size) + self.relu = nn.ReLU() + + def forward(self, x): + x = self.relu(self.fc1(x)) + x = self.fc2(x) + return x + +def main(): + print("Creating int MLP model...") + + # Model parameters + input_size = 64 + hidden_size = 32 + output_size = 64 + + # Create model + model = IntMLP(input_size, hidden_size, output_size) + model.eval() + + # Create dummy input (as float for model) + dummy_input = torch.randn(1, input_size) + + # Test model + with torch.no_grad(): + output = model(dummy_input) + print(f"Model test: input shape {dummy_input.shape} -> output shape {output.shape}") + + # Export to ONNX + onnx_path = "int_model.onnx" + torch.onnx.export( + model, + dummy_input, + onnx_path, + export_params=True, + opset_version=11, + do_constant_folding=True, + input_names=['input'], + output_names=['output'], + dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}} + ) + print(f"Saved ONNX model to {onnx_path}") + + # Generate test input (512 int32 values for sliding window test) + test_width = 512 + test_input_int = np.random.randint(-100, 100, size=test_width, dtype=np.int32) + test_input_bytes = test_input_int.tobytes() + + with open('int_input.bin', 'wb') as f: + f.write(test_input_bytes) + print(f"Saved test input ({test_width} int32s) to int_input.bin") + + # Convert to float for model inference + test_input_float = test_input_int.astype(np.float32) + + # Generate reference output using sliding window with stride + patch_width = input_size + stride = patch_width // 2 # Default stride + num_patches = ((test_width - patch_width) // stride) + 1 + output_width = (num_patches - 1) * stride + output_size + + print(f"Reference generation: {num_patches} patches, output width {output_width}") + + output_accum = np.zeros(output_width, dtype=np.float32) + weight_accum = np.zeros(output_width, dtype=np.float32) + + with torch.no_grad(): + for p in range(num_patches): + start_pos = p * stride + end_pos = min(start_pos + patch_width, test_width) + + patch_input = np.zeros(patch_width, dtype=np.float32) + patch_input[:end_pos - start_pos] = test_input_float[start_pos:end_pos] + + patch_tensor = torch.from_numpy(patch_input).unsqueeze(0) + patch_output = model(patch_tensor).squeeze(0).numpy() + + out_start = start_pos + for w in range(output_size): + dst_pos = out_start + w + if dst_pos < output_width: + output_accum[dst_pos] += patch_output[w] + weight_accum[dst_pos] += 1.0 + + # Average overlapping regions + reference_output = np.divide(output_accum, weight_accum, where=weight_accum > 0) + reference_bytes = reference_output.tobytes() + + with open('int_reference.bin', 'wb') as f: + f.write(reference_bytes) + print(f"Saved reference output ({output_width} floats) to int_reference.bin") + + print("\nConvert ONNX to MNN with:") + print(" MNNConvert -f ONNX --modelFile int_model.onnx --MNNModel int_model.mnn --bizCode biz") + print("\nStats:") + print(f" Input (int32): min={test_input_int.min()}, max={test_input_int.max()}, mean={test_input_int.mean():.3f}") + print(f" Reference (float): min={reference_output.min():.3f}, max={reference_output.max():.3f}, mean={reference_output.mean():.3f}") + +if __name__ == '__main__': + main() diff --git a/test/src/processing_datatypes/create_mat2_model.py b/test/src/processing_datatypes/create_mat2_model.py new file mode 100644 index 000000000..d9d473469 --- /dev/null +++ b/test/src/processing_datatypes/create_mat2_model.py @@ -0,0 +1,107 @@ +import argparse +import os +import numpy as np + +try: + import torch + import torch.nn as nn +except ImportError as exc: + raise SystemExit("This script requires torch. Install with: pip install torch") from exc + + +class TinyMat2Net(nn.Module): + def __init__(self): + super().__init__() + self.net = nn.Sequential( + nn.Conv1d(4, 4, kernel_size=1), + nn.ReLU(), + nn.Conv1d(4, 4, kernel_size=1) + ) + + def forward(self, x): + return self.net(x) + + +def main(): + parser = argparse.ArgumentParser(description="Create a tiny mat2 ONNX model and reference output.") + parser.add_argument("--matrices", type=int, default=16, help="Number of mat2 entries") + parser.add_argument("--patch", type=int, default=8, help="Patch length (block_len)") + parser.add_argument("--stride", type=int, default=4, help="Stride in matrices") + parser.add_argument("--out-dir", default=".", help="Output directory") + parser.add_argument("--onnx", default="mat2_tiny.onnx", help="ONNX filename") + parser.add_argument("--raw", default="mat2_input.raw", help="Raw input filename") + parser.add_argument("--pt", default="mat2_tiny_state.pt", help="Torch state dict filename") + parser.add_argument("--ref", default="mat2_output_pt.raw", help="Reference output filename") + args = parser.parse_args() + + os.makedirs(args.out_dir, exist_ok=True) + + torch.manual_seed(0) + rng = np.random.default_rng(0) + + model = TinyMat2Net().eval() + + input_mats = rng.normal(size=(args.matrices, 4)).astype(np.float32) + raw_path = os.path.join(args.out_dir, args.raw) + input_mats.reshape(-1).tofile(raw_path) + + dummy = torch.randn(1, 4, args.patch, dtype=torch.float32) + onnx_path = os.path.join(args.out_dir, args.onnx) + torch.onnx.export( + model, + dummy, + onnx_path, + input_names=["input"], + output_names=["output"], + opset_version=11 + ) + + output_accum = np.zeros((4, args.matrices), dtype=np.float32) + weight_accum = np.zeros((args.matrices,), dtype=np.float32) + + with torch.no_grad(): + starts = list(range(0, max(1, args.matrices - args.patch + 1), max(1, args.stride))) + if not starts or starts[-1] != args.matrices - args.patch: + if args.matrices > args.patch: + starts.append(args.matrices - args.patch) + else: + starts = [0] + + for start in starts: + patch = np.zeros((4, args.patch), dtype=np.float32) + for i in range(args.patch): + idx = start + i + if idx >= args.matrices: + break + patch[:, i] = input_mats[idx, :] + + patch_tensor = torch.from_numpy(patch).unsqueeze(0) + patch_output = model(patch_tensor).squeeze(0).cpu().numpy() + + for i in range(args.patch): + out_index = start + i + if out_index >= args.matrices: + break + output_accum[:, out_index] += patch_output[:, i] + weight_accum[out_index] += 1.0 + + for i in range(args.matrices): + if weight_accum[i] > 0.0: + output_accum[:, i] /= weight_accum[i] + + ref_path = os.path.join(args.out_dir, args.ref) + output_accum.reshape(-1).tofile(ref_path) + + pt_path = os.path.join(args.out_dir, args.pt) + torch.save(model.state_dict(), pt_path) + + print(f"Wrote ONNX model: {onnx_path}") + print(f"Wrote raw input: {raw_path} (matrices {args.matrices})") + print(f"Wrote reference output: {ref_path}") + print(f"Wrote PyTorch state dict: {pt_path}") + print("Convert to MNN with:") + print(f" MNNConvert -f ONNX --modelFile {onnx_path} --MNNModel {os.path.splitext(onnx_path)[0]}.mnn") + + +if __name__ == "__main__": + main() diff --git a/test/src/processing_datatypes/create_mat3_model.py b/test/src/processing_datatypes/create_mat3_model.py new file mode 100644 index 000000000..89cda6997 --- /dev/null +++ b/test/src/processing_datatypes/create_mat3_model.py @@ -0,0 +1,107 @@ +import argparse +import os +import numpy as np + +try: + import torch + import torch.nn as nn +except ImportError as exc: + raise SystemExit("This script requires torch. Install with: pip install torch") from exc + + +class TinyMat3Net(nn.Module): + def __init__(self): + super().__init__() + self.net = nn.Sequential( + nn.Conv1d(9, 9, kernel_size=1), + nn.ReLU(), + nn.Conv1d(9, 9, kernel_size=1) + ) + + def forward(self, x): + return self.net(x) + + +def main(): + parser = argparse.ArgumentParser(description="Create a tiny mat3 ONNX model and reference output.") + parser.add_argument("--matrices", type=int, default=12, help="Number of mat3 entries") + parser.add_argument("--patch", type=int, default=6, help="Patch length (block_len)") + parser.add_argument("--stride", type=int, default=3, help="Stride in matrices") + parser.add_argument("--out-dir", default=".", help="Output directory") + parser.add_argument("--onnx", default="mat3_tiny.onnx", help="ONNX filename") + parser.add_argument("--raw", default="mat3_input.raw", help="Raw input filename") + parser.add_argument("--pt", default="mat3_tiny_state.pt", help="Torch state dict filename") + parser.add_argument("--ref", default="mat3_output_pt.raw", help="Reference output filename") + args = parser.parse_args() + + os.makedirs(args.out_dir, exist_ok=True) + + torch.manual_seed(0) + rng = np.random.default_rng(0) + + model = TinyMat3Net().eval() + + input_mats = rng.normal(size=(args.matrices, 9)).astype(np.float32) + raw_path = os.path.join(args.out_dir, args.raw) + input_mats.reshape(-1).tofile(raw_path) + + dummy = torch.randn(1, 9, args.patch, dtype=torch.float32) + onnx_path = os.path.join(args.out_dir, args.onnx) + torch.onnx.export( + model, + dummy, + onnx_path, + input_names=["input"], + output_names=["output"], + opset_version=11 + ) + + output_accum = np.zeros((9, args.matrices), dtype=np.float32) + weight_accum = np.zeros((args.matrices,), dtype=np.float32) + + with torch.no_grad(): + starts = list(range(0, max(1, args.matrices - args.patch + 1), max(1, args.stride))) + if not starts or starts[-1] != args.matrices - args.patch: + if args.matrices > args.patch: + starts.append(args.matrices - args.patch) + else: + starts = [0] + + for start in starts: + patch = np.zeros((9, args.patch), dtype=np.float32) + for i in range(args.patch): + idx = start + i + if idx >= args.matrices: + break + patch[:, i] = input_mats[idx, :] + + patch_tensor = torch.from_numpy(patch).unsqueeze(0) + patch_output = model(patch_tensor).squeeze(0).cpu().numpy() + + for i in range(args.patch): + out_index = start + i + if out_index >= args.matrices: + break + output_accum[:, out_index] += patch_output[:, i] + weight_accum[out_index] += 1.0 + + for i in range(args.matrices): + if weight_accum[i] > 0.0: + output_accum[:, i] /= weight_accum[i] + + ref_path = os.path.join(args.out_dir, args.ref) + output_accum.reshape(-1).tofile(ref_path) + + pt_path = os.path.join(args.out_dir, args.pt) + torch.save(model.state_dict(), pt_path) + + print(f"Wrote ONNX model: {onnx_path}") + print(f"Wrote raw input: {raw_path} (matrices {args.matrices})") + print(f"Wrote reference output: {ref_path}") + print(f"Wrote PyTorch state dict: {pt_path}") + print("Convert to MNN with:") + print(f" MNNConvert -f ONNX --modelFile {onnx_path} --MNNModel {os.path.splitext(onnx_path)[0]}.mnn") + + +if __name__ == "__main__": + main() diff --git a/test/src/processing_datatypes/create_mat4_model.py b/test/src/processing_datatypes/create_mat4_model.py new file mode 100644 index 000000000..e8d2980b2 --- /dev/null +++ b/test/src/processing_datatypes/create_mat4_model.py @@ -0,0 +1,107 @@ +import argparse +import os +import numpy as np + +try: + import torch + import torch.nn as nn +except ImportError as exc: + raise SystemExit("This script requires torch. Install with: pip install torch") from exc + + +class TinyMat4Net(nn.Module): + def __init__(self): + super().__init__() + self.net = nn.Sequential( + nn.Conv1d(16, 16, kernel_size=1), + nn.ReLU(), + nn.Conv1d(16, 16, kernel_size=1) + ) + + def forward(self, x): + return self.net(x) + + +def main(): + parser = argparse.ArgumentParser(description="Create a tiny mat4 ONNX model and reference output.") + parser.add_argument("--matrices", type=int, default=10, help="Number of mat4 entries") + parser.add_argument("--patch", type=int, default=5, help="Patch length (block_len)") + parser.add_argument("--stride", type=int, default=2, help="Stride in matrices") + parser.add_argument("--out-dir", default=".", help="Output directory") + parser.add_argument("--onnx", default="mat4_tiny.onnx", help="ONNX filename") + parser.add_argument("--raw", default="mat4_input.raw", help="Raw input filename") + parser.add_argument("--pt", default="mat4_tiny_state.pt", help="Torch state dict filename") + parser.add_argument("--ref", default="mat4_output_pt.raw", help="Reference output filename") + args = parser.parse_args() + + os.makedirs(args.out_dir, exist_ok=True) + + torch.manual_seed(0) + rng = np.random.default_rng(0) + + model = TinyMat4Net().eval() + + input_mats = rng.normal(size=(args.matrices, 16)).astype(np.float32) + raw_path = os.path.join(args.out_dir, args.raw) + input_mats.reshape(-1).tofile(raw_path) + + dummy = torch.randn(1, 16, args.patch, dtype=torch.float32) + onnx_path = os.path.join(args.out_dir, args.onnx) + torch.onnx.export( + model, + dummy, + onnx_path, + input_names=["input"], + output_names=["output"], + opset_version=11 + ) + + output_accum = np.zeros((16, args.matrices), dtype=np.float32) + weight_accum = np.zeros((args.matrices,), dtype=np.float32) + + with torch.no_grad(): + starts = list(range(0, max(1, args.matrices - args.patch + 1), max(1, args.stride))) + if not starts or starts[-1] != args.matrices - args.patch: + if args.matrices > args.patch: + starts.append(args.matrices - args.patch) + else: + starts = [0] + + for start in starts: + patch = np.zeros((16, args.patch), dtype=np.float32) + for i in range(args.patch): + idx = start + i + if idx >= args.matrices: + break + patch[:, i] = input_mats[idx, :] + + patch_tensor = torch.from_numpy(patch).unsqueeze(0) + patch_output = model(patch_tensor).squeeze(0).cpu().numpy() + + for i in range(args.patch): + out_index = start + i + if out_index >= args.matrices: + break + output_accum[:, out_index] += patch_output[:, i] + weight_accum[out_index] += 1.0 + + for i in range(args.matrices): + if weight_accum[i] > 0.0: + output_accum[:, i] /= weight_accum[i] + + ref_path = os.path.join(args.out_dir, args.ref) + output_accum.reshape(-1).tofile(ref_path) + + pt_path = os.path.join(args.out_dir, args.pt) + torch.save(model.state_dict(), pt_path) + + print(f"Wrote ONNX model: {onnx_path}") + print(f"Wrote raw input: {raw_path} (matrices {args.matrices})") + print(f"Wrote reference output: {ref_path}") + print(f"Wrote PyTorch state dict: {pt_path}") + print("Convert to MNN with:") + print(f" MNNConvert -f ONNX --modelFile {onnx_path} --MNNModel {os.path.splitext(onnx_path)[0]}.mnn") + + +if __name__ == "__main__": + main() diff --git a/test/src/processing_datatypes/create_tensor_model.py b/test/src/processing_datatypes/create_tensor_model.py new file mode 100644 index 000000000..7c250a0f9 --- /dev/null +++ b/test/src/processing_datatypes/create_tensor_model.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python3 +""" +Generate a small MLP model for generic tensor vector processing (1D). +""" + +import argparse +import os +import numpy as np + +try: + import torch + import torch.nn as nn +except ImportError as exc: + raise SystemExit("This script requires torch. Install with: pip install torch") from exc + + +class TensorMLP(nn.Module): + def __init__(self, input_size=64, hidden_size=32, output_size=64): + super().__init__() + self.fc1 = nn.Linear(input_size, hidden_size) + self.fc2 = nn.Linear(hidden_size, output_size) + self.relu = nn.ReLU() + + def forward(self, x): + x = self.relu(self.fc1(x)) + x = self.fc2(x) + return x + + +def main(): + parser = argparse.ArgumentParser(description="Create a tiny tensor ONNX model and sample input.") + parser.add_argument("--width", type=int, default=256, help="Input width (total elements)") + parser.add_argument("--patch", type=int, default=64, help="Patch length (block_len)") + parser.add_argument("--stride", type=int, default=32, help="Stride in elements") + parser.add_argument("--out-dir", default=".", help="Output directory") + parser.add_argument("--onnx", default="tensor_tiny.onnx", help="ONNX filename") + parser.add_argument("--raw", default="tensor_input.raw", help="Raw input filename") + parser.add_argument("--pt", default="tensor_tiny_state.pt", help="Torch state dict filename") + parser.add_argument("--ref", default="tensor_output_pt.raw", help="Reference output filename") + args = parser.parse_args() + + os.makedirs(args.out_dir, exist_ok=True) + + torch.manual_seed(0) + rng = np.random.default_rng(0) + + model = TensorMLP(args.patch, args.patch // 2, args.patch).eval() + + test_input = rng.normal(size=(args.width,)).astype(np.float32) + raw_path = os.path.join(args.out_dir, args.raw) + test_input.tofile(raw_path) + + dummy_input = torch.randn(1, args.patch) + onnx_path = os.path.join(args.out_dir, args.onnx) + torch.onnx.export( + model, + dummy_input, + onnx_path, + export_params=True, + opset_version=11, + do_constant_folding=True, + input_names=["input"], + output_names=["output"], + dynamic_axes={"input": {0: "batch_size"}, "output": {0: "batch_size"}} + ) + + output_accum = np.zeros((args.width,), dtype=np.float32) + weight_accum = np.zeros((args.width,), dtype=np.float32) + + with torch.no_grad(): + starts = list(range(0, max(1, args.width - args.patch + 1), max(1, args.stride))) + if not starts or starts[-1] != args.width - args.patch: + if args.width > args.patch: + starts.append(args.width - args.patch) + else: + starts = [0] + + for start in starts: + patch = np.zeros((args.patch,), dtype=np.float32) + end_pos = min(start + args.patch, args.width) + patch[:end_pos - start] = test_input[start:end_pos] + + patch_tensor = torch.from_numpy(patch).unsqueeze(0) + patch_output = model(patch_tensor).squeeze(0).numpy() + + out_start = start + for i in range(args.patch): + dst_pos = out_start + i + if dst_pos >= args.width: + break + output_accum[dst_pos] += patch_output[i] + weight_accum[dst_pos] += 1.0 + + reference_output = np.divide(output_accum, weight_accum, where=weight_accum > 0) + ref_path = os.path.join(args.out_dir, args.ref) + reference_output.tofile(ref_path) + + pt_path = os.path.join(args.out_dir, args.pt) + torch.save(model.state_dict(), pt_path) + + print(f"Wrote ONNX model: {onnx_path}") + print(f"Wrote raw input: {raw_path} (length {args.width})") + print(f"Wrote reference output: {ref_path}") + print(f"Wrote PyTorch state dict: {pt_path}") + print("Convert to MNN with:") + print(f" MNNConvert -f ONNX --modelFile {onnx_path} --MNNModel {os.path.splitext(onnx_path)[0]}.mnn") + + +if __name__ == "__main__": + main() diff --git a/test/src/processing_datatypes/create_texture1d_model.py b/test/src/processing_datatypes/create_texture1d_model.py new file mode 100644 index 000000000..bf4b0da80 --- /dev/null +++ b/test/src/processing_datatypes/create_texture1d_model.py @@ -0,0 +1,72 @@ +import argparse +import os +import numpy as np + +try: + import torch + import torch.nn as nn +except ImportError as exc: + raise SystemExit("This script requires torch. Install with: pip install torch") from exc + + +class TinyConv1D(nn.Module): + def __init__(self): + super().__init__() + self.net = nn.Sequential( + nn.Conv1d(1, 4, kernel_size=3, padding=1), + nn.ReLU(), + nn.Conv1d(4, 2, kernel_size=3, padding=1) + ) + + def forward(self, x): + return self.net(x) + + +def main(): + parser = argparse.ArgumentParser(description="Create a tiny texture1D ONNX model and sample input.") + parser.add_argument("--length", type=int, default=256, help="Input length") + parser.add_argument("--out-dir", default=".", help="Output directory") + parser.add_argument("--onnx", default="texture1d_tiny.onnx", help="ONNX filename") + parser.add_argument("--raw", default="texture1d_input.raw", help="Raw input filename") + parser.add_argument("--pt", default="texture1d_tiny_state.pt", help="Torch state dict filename") + parser.add_argument("--ref", default="texture1d_output_pt.raw", help="Reference output filename") + args = parser.parse_args() + + os.makedirs(args.out_dir, exist_ok=True) + + model = TinyConv1D().eval() + dummy = torch.randn(1, 1, args.length, dtype=torch.float32) + + onnx_path = os.path.join(args.out_dir, args.onnx) + torch.onnx.export( + model, + dummy, + onnx_path, + input_names=["input"], + output_names=["output"], + opset_version=11 + ) + + raw_path = os.path.join(args.out_dir, args.raw) + np_input = dummy.numpy().astype(np.float32).reshape(-1) + np_input.tofile(raw_path) + + with torch.no_grad(): + output = model(dummy).cpu().numpy().astype(np.float32).reshape(-1) + + ref_path = os.path.join(args.out_dir, args.ref) + output.tofile(ref_path) + + pt_path = os.path.join(args.out_dir, args.pt) + torch.save(model.state_dict(), pt_path) + + print(f"Wrote ONNX model: {onnx_path}") + print(f"Wrote raw input: {raw_path} (length {args.length})") + print(f"Wrote reference output: {ref_path}") + print(f"Wrote PyTorch state dict: {pt_path}") + print("Convert to MNN with:") + print(f" MNNConvert -f ONNX --modelFile {onnx_path} --MNNModel {os.path.splitext(onnx_path)[0]}.mnn") + + +if __name__ == "__main__": + main() diff --git a/test/src/processing_datatypes/create_texturecube_model.py b/test/src/processing_datatypes/create_texturecube_model.py new file mode 100644 index 000000000..12c3273a4 --- /dev/null +++ b/test/src/processing_datatypes/create_texturecube_model.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 +""" +Generate a tiny Conv2D model for textureCube processing and reference outputs. +""" + +import argparse +import os +import numpy as np + +try: + import torch + import torch.nn as nn +except ImportError as exc: + raise SystemExit("This script requires torch. Install with: pip install torch") from exc + + +class TinyCubeNet(nn.Module): + def __init__(self): + super().__init__() + self.net = nn.Sequential( + nn.Conv2d(3, 4, kernel_size=3, padding=1), + nn.ReLU(), + nn.Conv2d(4, 3, kernel_size=3, padding=1) + ) + + def forward(self, x): + return self.net(x) + + +def main(): + parser = argparse.ArgumentParser(description="Create a tiny textureCube ONNX model and sample input.") + parser.add_argument("--width", type=int, default=64, help="Face width") + parser.add_argument("--height", type=int, default=64, help="Face height") + parser.add_argument("--out-dir", default=".", help="Output directory") + parser.add_argument("--onnx", default="texturecube_tiny.onnx", help="ONNX filename") + parser.add_argument("--raw", default="texturecube_input.raw", help="Raw input filename") + parser.add_argument("--pt", default="texturecube_tiny_state.pt", help="Torch state dict filename") + parser.add_argument("--ref", default="texturecube_output_pt.raw", help="Reference output filename") + args = parser.parse_args() + + os.makedirs(args.out_dir, exist_ok=True) + + torch.manual_seed(0) + rng = np.random.default_rng(0) + + model = TinyCubeNet().eval() + + onnx_path = os.path.join(args.out_dir, args.onnx) + dummy = torch.randn(1, 3, args.height, args.width, dtype=torch.float32) + torch.onnx.export( + model, + dummy, + onnx_path, + input_names=["input"], + output_names=["output"], + opset_version=11 + ) + + faces = [] + for _ in range(6): + face = rng.integers(0, 256, size=(args.height, args.width, 3), dtype=np.uint8) + faces.append(face) + + raw_path = os.path.join(args.out_dir, args.raw) + with open(raw_path, "wb") as f: + for face in faces: + f.write(face.tobytes()) + + outputs = [] + with torch.no_grad(): + for face in faces: + face_tensor = torch.from_numpy(face.astype(np.float32) / 1.0) + face_tensor = face_tensor.permute(2, 0, 1).unsqueeze(0) + output = model(face_tensor).cpu().numpy().astype(np.float32) + outputs.append(output) + + ref_path = os.path.join(args.out_dir, args.ref) + with open(ref_path, "wb") as f: + for output in outputs: + f.write(output.tobytes()) + + pt_path = os.path.join(args.out_dir, args.pt) + torch.save(model.state_dict(), pt_path) + + print(f"Wrote ONNX model: {onnx_path}") + print(f"Wrote raw input: {raw_path} (6 faces)") + print(f"Wrote reference output: {ref_path}") + print(f"Wrote PyTorch state dict: {pt_path}") + print("Convert to MNN with:") + print(f" MNNConvert -f ONNX --modelFile {onnx_path} --MNNModel {os.path.splitext(onnx_path)[0]}.mnn") + + +if __name__ == "__main__": + main() diff --git a/test/src/processing_datatypes/create_vec2_model.py b/test/src/processing_datatypes/create_vec2_model.py new file mode 100644 index 000000000..314df8b79 --- /dev/null +++ b/test/src/processing_datatypes/create_vec2_model.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 +""" +Create a tiny vec2 ONNX model and reference output for testing. +Generates vec2 input, runs PyTorch inference, and exports to ONNX. +""" + +import torch +import torch.nn as nn +import torch.onnx +import numpy as np +import argparse + + +class TinyVec2Net(nn.Module): + """Simple 1D conv-based model for vec2 processing""" + def __init__(self): + super(TinyVec2Net, self).__init__() + # Input: (1, 2, N) where 2 is vec2 components, N is vector count + self.conv1 = nn.Conv1d(2, 8, kernel_size=3, padding=1) + self.relu = nn.ReLU() + self.conv2 = nn.Conv1d(8, 1, kernel_size=3, padding=1) + + def forward(self, x): + x = self.conv1(x) + x = self.relu(x) + x = self.conv2(x) + return x.view(-1) # Flatten to 1D output + + +def main(): + parser = argparse.ArgumentParser(description="Create a tiny vec2 ONNX model and reference output.") + parser.add_argument("--vectors", type=int, default=16, help="Number of vec2 entries") + parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") + parser.add_argument("--onnx", default="vec2_tiny.onnx", help="ONNX filename") + parser.add_argument("--raw", default="vec2_input.raw", help="Raw input filename") + parser.add_argument("--pt", default="vec2_tiny_state.pt", help="Torch state dict filename") + parser.add_argument("--ref", default="vec2_output_pt.raw", help="Reference output filename") + + args = parser.parse_args() + + torch.manual_seed(args.seed) + np.random.seed(args.seed) + + # Create model + model = TinyVec2Net().eval() + print(f"Model created: {model}") + + # Create input: vec2 array (vectors, 2) + input_data = np.random.randn(args.vectors, 2).astype(np.float32) + print(f"Input shape (vectors, components): {input_data.shape}") + print(f"Input range: [{input_data.min():.4f}, {input_data.max():.4f}]") + + # Save raw input + with open(args.raw, "wb") as f: + input_data.astype(np.float32).tofile(f) + print(f"Saved raw input to {args.raw}") + + # Convert to model format: (batch=1, channels=2, length=vectors) + model_input = torch.from_numpy(input_data.T[np.newaxis, :, :]) # (1, 2, vectors) + print(f"Model input tensor shape: {model_input.shape}") + + # Run inference + with torch.no_grad(): + output = model(model_input) + output_np = output.cpu().numpy().astype(np.float32) + print(f"Output shape: {output_np.shape}") + print(f"Output range: [{output_np.min():.4f}, {output_np.max():.4f}]") + + # Save reference output + with open(args.ref, "wb") as f: + output_np.astype(np.float32).tofile(f) + print(f"Saved reference output to {args.ref}") + + # Save model state + torch.save(model.state_dict(), args.pt) + print(f"Saved model state to {args.pt}") + + # Export to ONNX + dummy_input = torch.randn(1, 2, args.vectors) + torch.onnx.export( + model, + dummy_input, + args.onnx, + input_names=["input"], + output_names=["output"], + opset_version=13, + do_constant_folding=True, + verbose=False, + ) + print(f"Exported ONNX model to {args.onnx}") + print("\nConvert ONNX to MNN with:") + print(f" MNNConvert -f ONNX --modelFile {args.onnx} --MNNModel {args.onnx.replace('.onnx', '.mnn')} --bizCode biz") + print("\nStats:") + print(f" Input: min={input_data.min():.3f}, max={input_data.max():.3f}, mean={input_data.mean():.3f}") + print(f" Reference: min={output_np.min():.3f}, max={output_np.max():.3f}, mean={output_np.mean():.3f}") + + +if __name__ == "__main__": + main() diff --git a/test/src/processing_datatypes/create_vec3_model.py b/test/src/processing_datatypes/create_vec3_model.py new file mode 100644 index 000000000..afc637efa --- /dev/null +++ b/test/src/processing_datatypes/create_vec3_model.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 +""" +Create a tiny vec3 ONNX model and reference output for testing. +Generates vec3 input, runs PyTorch inference, and exports to ONNX. +""" + +import torch +import torch.nn as nn +import torch.onnx +import numpy as np +import argparse + + +class TinyVec3Net(nn.Module): + """Simple 1D conv-based model for vec3 processing""" + def __init__(self): + super(TinyVec3Net, self).__init__() + # Input: (1, 3, N) where 3 is vec3 components, N is vector count + self.conv1 = nn.Conv1d(3, 8, kernel_size=3, padding=1) + self.relu = nn.ReLU() + self.conv2 = nn.Conv1d(8, 1, kernel_size=3, padding=1) + + def forward(self, x): + x = self.conv1(x) + x = self.relu(x) + x = self.conv2(x) + return x.view(-1) # Flatten to 1D output + + +def main(): + parser = argparse.ArgumentParser(description="Create a tiny vec3 ONNX model and reference output.") + parser.add_argument("--vectors", type=int, default=16, help="Number of vec3 entries") + parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") + parser.add_argument("--onnx", default="vec3_tiny.onnx", help="ONNX filename") + parser.add_argument("--raw", default="vec3_input.raw", help="Raw input filename") + parser.add_argument("--pt", default="vec3_tiny_state.pt", help="Torch state dict filename") + parser.add_argument("--ref", default="vec3_output_pt.raw", help="Reference output filename") + + args = parser.parse_args() + + torch.manual_seed(args.seed) + np.random.seed(args.seed) + + # Create model + model = TinyVec3Net().eval() + print(f"Model created: {model}") + + # Create input: vec3 array (vectors, 3) + input_data = np.random.randn(args.vectors, 3).astype(np.float32) + print(f"Input shape (vectors, components): {input_data.shape}") + print(f"Input range: [{input_data.min():.4f}, {input_data.max():.4f}]") + + # Save raw input + with open(args.raw, "wb") as f: + input_data.astype(np.float32).tofile(f) + print(f"Saved raw input to {args.raw}") + + # Convert to model format: (batch=1, channels=3, length=vectors) + model_input = torch.from_numpy(input_data.T[np.newaxis, :, :]) # (1, 3, vectors) + print(f"Model input tensor shape: {model_input.shape}") + + # Run inference + with torch.no_grad(): + output = model(model_input) + output_np = output.cpu().numpy().astype(np.float32) + print(f"Output shape: {output_np.shape}") + print(f"Output range: [{output_np.min():.4f}, {output_np.max():.4f}]") + + # Save reference output + with open(args.ref, "wb") as f: + output_np.astype(np.float32).tofile(f) + print(f"Saved reference output to {args.ref}") + + # Save model state + torch.save(model.state_dict(), args.pt) + print(f"Saved model state to {args.pt}") + + # Export to ONNX + dummy_input = torch.randn(1, 3, args.vectors) + torch.onnx.export( + model, + dummy_input, + args.onnx, + input_names=["input"], + output_names=["output"], + opset_version=13, + do_constant_folding=True, + verbose=False, + ) + print(f"Exported ONNX model to {args.onnx}") + print("\nConvert ONNX to MNN with:") + print(f" MNNConvert -f ONNX --modelFile {args.onnx} --MNNModel {args.onnx.replace('.onnx', '.mnn')} --bizCode biz") + print("\nStats:") + print(f" Input: min={input_data.min():.3f}, max={input_data.max():.3f}, mean={input_data.mean():.3f}") + print(f" Reference: min={output_np.min():.3f}, max={output_np.max():.3f}, mean={output_np.mean():.3f}") + + +if __name__ == "__main__": + main() diff --git a/test/src/processing_datatypes/create_vec4_model.py b/test/src/processing_datatypes/create_vec4_model.py new file mode 100644 index 000000000..b5a8e0373 --- /dev/null +++ b/test/src/processing_datatypes/create_vec4_model.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 +""" +Create a tiny vec4 ONNX model and reference output for testing. +Generates vec4 input, runs PyTorch inference, and exports to ONNX. +""" + +import torch +import torch.nn as nn +import torch.onnx +import numpy as np +import argparse + + +class TinyVec4Net(nn.Module): + """Simple 1D conv-based model for vec4 processing""" + def __init__(self): + super(TinyVec4Net, self).__init__() + # Input: (1, 4, N) where 4 is vec4 components, N is vector count + self.conv1 = nn.Conv1d(4, 8, kernel_size=3, padding=1) + self.relu = nn.ReLU() + self.conv2 = nn.Conv1d(8, 1, kernel_size=3, padding=1) + + def forward(self, x): + x = self.conv1(x) + x = self.relu(x) + x = self.conv2(x) + return x.view(-1) # Flatten to 1D output + + +def main(): + parser = argparse.ArgumentParser(description="Create a tiny vec4 ONNX model and reference output.") + parser.add_argument("--vectors", type=int, default=16, help="Number of vec4 entries") + parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") + parser.add_argument("--onnx", default="vec4_tiny.onnx", help="ONNX filename") + parser.add_argument("--raw", default="vec4_input.raw", help="Raw input filename") + parser.add_argument("--pt", default="vec4_tiny_state.pt", help="Torch state dict filename") + parser.add_argument("--ref", default="vec4_output_pt.raw", help="Reference output filename") + + args = parser.parse_args() + + torch.manual_seed(args.seed) + np.random.seed(args.seed) + + # Create model + model = TinyVec4Net().eval() + print(f"Model created: {model}") + + # Create input: vec4 array (vectors, 4) + input_data = np.random.randn(args.vectors, 4).astype(np.float32) + print(f"Input shape (vectors, components): {input_data.shape}") + print(f"Input range: [{input_data.min():.4f}, {input_data.max():.4f}]") + + # Save raw input + with open(args.raw, "wb") as f: + input_data.astype(np.float32).tofile(f) + print(f"Saved raw input to {args.raw}") + + # Convert to model format: (batch=1, channels=4, length=vectors) + model_input = torch.from_numpy(input_data.T[np.newaxis, :, :]) # (1, 4, vectors) + print(f"Model input tensor shape: {model_input.shape}") + + # Run inference + with torch.no_grad(): + output = model(model_input) + output_np = output.cpu().numpy().astype(np.float32) + print(f"Output shape: {output_np.shape}") + print(f"Output range: [{output_np.min():.4f}, {output_np.max():.4f}]") + + # Save reference output + with open(args.ref, "wb") as f: + output_np.astype(np.float32).tofile(f) + print(f"Saved reference output to {args.ref}") + + # Save model state + torch.save(model.state_dict(), args.pt) + print(f"Saved model state to {args.pt}") + + # Export to ONNX + dummy_input = torch.randn(1, 4, args.vectors) + torch.onnx.export( + model, + dummy_input, + args.onnx, + input_names=["input"], + output_names=["output"], + opset_version=13, + do_constant_folding=True, + verbose=False, + ) + print(f"Exported ONNX model to {args.onnx}") + print("\nConvert ONNX to MNN with:") + print(f" MNNConvert -f ONNX --modelFile {args.onnx} --MNNModel {args.onnx.replace('.onnx', '.mnn')} --bizCode biz") + print("\nStats:") + print(f" Input: min={input_data.min():.3f}, max={input_data.max():.3f}, mean={input_data.mean():.3f}") + print(f" Reference: min={output_np.min():.3f}, max={output_np.max():.3f}, mean={output_np.mean():.3f}") + + +if __name__ == "__main__": + main() diff --git a/test/src/processing_datatypes/float-processing-definition.json b/test/src/processing_datatypes/float-processing-definition.json new file mode 100644 index 000000000..c9f37eeae --- /dev/null +++ b/test/src/processing_datatypes/float-processing-definition.json @@ -0,0 +1,76 @@ +{ + "name": "float-tiny-test", + "version": "1.0.0", + "gnus_spec_version": 1.0, + "author": "Test Author", + "description": "A test processing definition for float input using a tiny MLP model", + "tags": ["float", "signal", "test"], + + "inputs": [ + { + "name": "inputFloat", + "source_uri_param": "file://processing_datatypes/float_input.bin", + "type": "float", + "description": "Input float vector", + "dimensions": { + "width": 512, + "block_len": 64, + "chunk_stride": 32 + }, + "format": "FLOAT32" + } + ], + + "outputs": [ + { + "name": "floatOutput", + "source_uri_param": "file://processing_datatypes/float_output.raw", + "type": "tensor", + "description": "Output tensor from float model" + } + ], + + "parameters": [ + { + "name": "modelUri", + "type": "uri", + "default": "file://processing_datatypes/float_model.mnn", + "description": "URI to the float MNN model" + } + ], + + "passes": [ + { + "name": "float_inference", + "type": "inference", + "description": "Float model inference", + "model": { + "source_uri_param": "file://processing_datatypes/float_model.mnn", + "format": "MNN", + "batch_size": 1, + "input_nodes": [ + { + "name": "input", + "type": "tensor", + "source": "input:inputFloat", + "shape": [1, 64] + } + ], + "output_nodes": [ + { + "name": "output", + "type": "tensor", + "target": "output:floatOutput", + "shape": [1, 64] + } + ] + } + } + ], + + "metadata": { + "created_date": "2026-02-16", + "framework": "MNN", + "test_case": "float_datatype" + } +} \ No newline at end of file diff --git a/test/src/processing_datatypes/float_input.bin b/test/src/processing_datatypes/float_input.bin new file mode 100644 index 000000000..a8710505c Binary files /dev/null and b/test/src/processing_datatypes/float_input.bin differ diff --git a/test/src/processing_datatypes/float_model.mnn b/test/src/processing_datatypes/float_model.mnn new file mode 100644 index 000000000..ebee97351 Binary files /dev/null and b/test/src/processing_datatypes/float_model.mnn differ diff --git a/test/src/processing_datatypes/float_output_pt.raw b/test/src/processing_datatypes/float_output_pt.raw new file mode 100644 index 000000000..6f00c57d0 Binary files /dev/null and b/test/src/processing_datatypes/float_output_pt.raw differ diff --git a/test/src/processing_datatypes/int-processing-definition.json b/test/src/processing_datatypes/int-processing-definition.json new file mode 100644 index 000000000..056b57eef --- /dev/null +++ b/test/src/processing_datatypes/int-processing-definition.json @@ -0,0 +1,76 @@ +{ + "name": "int-tiny-test", + "version": "1.0.0", + "gnus_spec_version": 1.0, + "author": "Test Author", + "description": "A test processing definition for int input using a tiny MLP model", + "tags": ["int", "signal", "test"], + + "inputs": [ + { + "name": "inputInt", + "source_uri_param": "file://processing_datatypes/int_input.bin", + "type": "int", + "description": "Input int vector", + "dimensions": { + "width": 512, + "block_len": 64, + "chunk_stride": 32 + }, + "format": "INT32" + } + ], + + "outputs": [ + { + "name": "intOutput", + "source_uri_param": "file://processing_datatypes/int_output.raw", + "type": "tensor", + "description": "Output tensor from int model" + } + ], + + "parameters": [ + { + "name": "modelUri", + "type": "uri", + "default": "file://processing_datatypes/int_model.mnn", + "description": "URI to the int MNN model" + } + ], + + "passes": [ + { + "name": "int_inference", + "type": "inference", + "description": "Int model inference", + "model": { + "source_uri_param": "file://processing_datatypes/int_model.mnn", + "format": "MNN", + "batch_size": 1, + "input_nodes": [ + { + "name": "input", + "type": "tensor", + "source": "input:inputInt", + "shape": [1, 64] + } + ], + "output_nodes": [ + { + "name": "output", + "type": "tensor", + "target": "output:intOutput", + "shape": [1, 64] + } + ] + } + } + ], + + "metadata": { + "created_date": "2026-02-16", + "framework": "MNN", + "test_case": "int_datatype" + } +} \ No newline at end of file diff --git a/test/src/processing_datatypes/int_input.bin b/test/src/processing_datatypes/int_input.bin new file mode 100644 index 000000000..e5f83126b Binary files /dev/null and b/test/src/processing_datatypes/int_input.bin differ diff --git a/test/src/processing_datatypes/int_model.mnn b/test/src/processing_datatypes/int_model.mnn new file mode 100644 index 000000000..cebc6c697 Binary files /dev/null and b/test/src/processing_datatypes/int_model.mnn differ diff --git a/test/src/processing_datatypes/int_output_pt.raw b/test/src/processing_datatypes/int_output_pt.raw new file mode 100644 index 000000000..4cc0794a3 Binary files /dev/null and b/test/src/processing_datatypes/int_output_pt.raw differ diff --git a/test/src/processing_datatypes/mat2-processing-definition.json b/test/src/processing_datatypes/mat2-processing-definition.json new file mode 100644 index 000000000..670b46372 --- /dev/null +++ b/test/src/processing_datatypes/mat2-processing-definition.json @@ -0,0 +1,76 @@ +{ + "name": "mat2-tiny-test", + "version": "1.0.0", + "gnus_spec_version": 1.0, + "author": "Test Author", + "description": "A test processing definition for mat2 input using a tiny Conv1D model", + "tags": ["mat2", "matrix", "test"], + + "inputs": [ + { + "name": "inputMat2", + "source_uri_param": "file://processing_datatypes/mat2_input.raw", + "type": "mat2", + "description": "Input mat2 array (row-major)", + "dimensions": { + "width": 16, + "block_len": 8, + "chunk_stride": 4 + }, + "format": "FLOAT32" + } + ], + + "outputs": [ + { + "name": "mat2Output", + "source_uri_param": "file://processing_datatypes/mat2_output.raw", + "type": "tensor", + "description": "Output tensor from mat2 model" + } + ], + + "parameters": [ + { + "name": "modelUri", + "type": "uri", + "default": "file://processing_datatypes/mat2_tiny.mnn", + "description": "URI to the mat2 MNN model" + } + ], + + "passes": [ + { + "name": "mat2_inference", + "type": "inference", + "description": "Mat2 model inference", + "model": { + "source_uri_param": "file://processing_datatypes/mat2_tiny.mnn", + "format": "MNN", + "batch_size": 1, + "input_nodes": [ + { + "name": "input", + "type": "tensor", + "source": "input:inputMat2", + "shape": [1, 4, 8] + } + ], + "output_nodes": [ + { + "name": "output", + "type": "tensor", + "target": "output:mat2Output", + "shape": [1, 4, 8] + } + ] + } + } + ], + + "metadata": { + "created_date": "2026-02-16", + "framework": "MNN", + "test_case": "mat2_datatype" + } +} diff --git a/test/src/processing_datatypes/mat2_input.raw b/test/src/processing_datatypes/mat2_input.raw new file mode 100644 index 000000000..e2cc25710 Binary files /dev/null and b/test/src/processing_datatypes/mat2_input.raw differ diff --git a/test/src/processing_datatypes/mat2_output_pt.raw b/test/src/processing_datatypes/mat2_output_pt.raw new file mode 100644 index 000000000..f946d9af6 Binary files /dev/null and b/test/src/processing_datatypes/mat2_output_pt.raw differ diff --git a/test/src/processing_datatypes/mat2_tiny.mnn b/test/src/processing_datatypes/mat2_tiny.mnn new file mode 100644 index 000000000..99470134d Binary files /dev/null and b/test/src/processing_datatypes/mat2_tiny.mnn differ diff --git a/test/src/processing_datatypes/mat3-processing-definition.json b/test/src/processing_datatypes/mat3-processing-definition.json new file mode 100644 index 000000000..be5a155c5 --- /dev/null +++ b/test/src/processing_datatypes/mat3-processing-definition.json @@ -0,0 +1,76 @@ +{ + "name": "mat3-tiny-test", + "version": "1.0.0", + "gnus_spec_version": 1.0, + "author": "Test Author", + "description": "A test processing definition for mat3 input using a tiny Conv1D model", + "tags": ["mat3", "matrix", "test"], + + "inputs": [ + { + "name": "inputMat3", + "source_uri_param": "file://processing_datatypes/mat3_input.raw", + "type": "mat3", + "description": "Input mat3 array (row-major)", + "dimensions": { + "width": 12, + "block_len": 6, + "chunk_stride": 3 + }, + "format": "FLOAT32" + } + ], + + "outputs": [ + { + "name": "mat3Output", + "source_uri_param": "file://processing_datatypes/mat3_output.raw", + "type": "tensor", + "description": "Output tensor from mat3 model" + } + ], + + "parameters": [ + { + "name": "modelUri", + "type": "uri", + "default": "file://processing_datatypes/mat3_tiny.mnn", + "description": "URI to the mat3 MNN model" + } + ], + + "passes": [ + { + "name": "mat3_inference", + "type": "inference", + "description": "Mat3 model inference", + "model": { + "source_uri_param": "file://processing_datatypes/mat3_tiny.mnn", + "format": "MNN", + "batch_size": 1, + "input_nodes": [ + { + "name": "input", + "type": "tensor", + "source": "input:inputMat3", + "shape": [1, 9, 6] + } + ], + "output_nodes": [ + { + "name": "output", + "type": "tensor", + "target": "output:mat3Output", + "shape": [1, 9, 6] + } + ] + } + } + ], + + "metadata": { + "created_date": "2026-02-16", + "framework": "MNN", + "test_case": "mat3_datatype" + } +} diff --git a/test/src/processing_datatypes/mat3_input.raw b/test/src/processing_datatypes/mat3_input.raw new file mode 100644 index 000000000..cd3932ddb Binary files /dev/null and b/test/src/processing_datatypes/mat3_input.raw differ diff --git a/test/src/processing_datatypes/mat3_output_pt.raw b/test/src/processing_datatypes/mat3_output_pt.raw new file mode 100644 index 000000000..253159069 Binary files /dev/null and b/test/src/processing_datatypes/mat3_output_pt.raw differ diff --git a/test/src/processing_datatypes/mat3_tiny.mnn b/test/src/processing_datatypes/mat3_tiny.mnn new file mode 100644 index 000000000..cfb5287e7 Binary files /dev/null and b/test/src/processing_datatypes/mat3_tiny.mnn differ diff --git a/test/src/processing_datatypes/mat4-processing-definition.json b/test/src/processing_datatypes/mat4-processing-definition.json new file mode 100644 index 000000000..60a133d35 --- /dev/null +++ b/test/src/processing_datatypes/mat4-processing-definition.json @@ -0,0 +1,76 @@ +{ + "name": "mat4-tiny-test", + "version": "1.0.0", + "gnus_spec_version": 1.0, + "author": "Test Author", + "description": "A test processing definition for mat4 input using a tiny Conv1D model", + "tags": ["mat4", "matrix", "test"], + + "inputs": [ + { + "name": "inputMat4", + "source_uri_param": "file://processing_datatypes/mat4_input.raw", + "type": "mat4", + "description": "Input mat4 array (row-major)", + "dimensions": { + "width": 10, + "block_len": 5, + "chunk_stride": 2 + }, + "format": "FLOAT32" + } + ], + + "outputs": [ + { + "name": "mat4Output", + "source_uri_param": "file://processing_datatypes/mat4_output.raw", + "type": "tensor", + "description": "Output tensor from mat4 model" + } + ], + + "parameters": [ + { + "name": "modelUri", + "type": "uri", + "default": "file://processing_datatypes/mat4_tiny.mnn", + "description": "URI to the mat4 MNN model" + } + ], + + "passes": [ + { + "name": "mat4_inference", + "type": "inference", + "description": "Mat4 model inference", + "model": { + "source_uri_param": "file://processing_datatypes/mat4_tiny.mnn", + "format": "MNN", + "batch_size": 1, + "input_nodes": [ + { + "name": "input", + "type": "tensor", + "source": "input:inputMat4", + "shape": [1, 16, 5] + } + ], + "output_nodes": [ + { + "name": "output", + "type": "tensor", + "target": "output:mat4Output", + "shape": [1, 16, 5] + } + ] + } + } + ], + + "metadata": { + "created_date": "2026-02-16", + "framework": "MNN", + "test_case": "mat4_datatype" + } +} diff --git a/test/src/processing_datatypes/mat4_input.raw b/test/src/processing_datatypes/mat4_input.raw new file mode 100644 index 000000000..e3a07c2a6 Binary files /dev/null and b/test/src/processing_datatypes/mat4_input.raw differ diff --git a/test/src/processing_datatypes/mat4_output_pt.raw b/test/src/processing_datatypes/mat4_output_pt.raw new file mode 100644 index 000000000..18d902aec Binary files /dev/null and b/test/src/processing_datatypes/mat4_output_pt.raw differ diff --git a/test/src/processing_datatypes/mat4_tiny.mnn b/test/src/processing_datatypes/mat4_tiny.mnn new file mode 100644 index 000000000..122707719 Binary files /dev/null and b/test/src/processing_datatypes/mat4_tiny.mnn differ diff --git a/test/src/processing_datatypes/mnn_logits_to_nii.py b/test/src/processing_datatypes/mnn_logits_to_nii.py new file mode 100644 index 000000000..10e0dc85d --- /dev/null +++ b/test/src/processing_datatypes/mnn_logits_to_nii.py @@ -0,0 +1,105 @@ +import argparse +from pathlib import Path + +import numpy as np +import nibabel as nib +import torch + +try: + from monai.transforms import ( + Compose, + LoadImaged, + EnsureChannelFirstd, + Orientationd, + Spacingd, + ScaleIntensityRanged, + EnsureTyped, + Invertd, + AsDiscreted, + Activationsd, + ) + from monai.data import MetaTensor +except Exception as exc: + raise RuntimeError("monai is required: pip install monai") from exc + + +def main() -> int: + parser = argparse.ArgumentParser(description="Convert stitched MNN logits to NIfTI using MONAI inverse transforms") + parser.add_argument("--image", required=True, help="Path to original input .nii.gz") + parser.add_argument("--logits", required=True, help="Path to stitched_logits.raw") + parser.add_argument("--h", type=int, required=True, help="Height of preprocessed volume") + parser.add_argument("--w", type=int, required=True, help="Width of preprocessed volume") + parser.add_argument("--d", type=int, required=True, help="Depth of preprocessed volume") + parser.add_argument("--out", required=True, help="Output .nii.gz path") + parser.add_argument("--channels", type=int, default=2, help="Number of output channels") + parser.add_argument("--no-invert", action="store_true", help="Skip Invertd and save in preprocessed space") + args = parser.parse_args() + + logits_path = Path(args.logits) + if not logits_path.exists(): + raise FileNotFoundError(f"Logits not found: {logits_path}") + + data = np.fromfile(str(logits_path), dtype=np.float32) + expected = args.channels * args.h * args.w * args.d + if data.size != expected: + raise ValueError(f"Expected {expected} floats, got {data.size}") + + logits = data.reshape((args.channels, args.h, args.w, args.d)) + logits = torch.from_numpy(logits) + + preprocessing = Compose([ + LoadImaged(keys=["image"]), + EnsureChannelFirstd(keys=["image"]), + Orientationd(keys=["image"], axcodes="RAS"), + Spacingd(keys=["image"], pixdim=[1.5, 1.5, 2.0], mode="bilinear"), + ScaleIntensityRanged(keys=["image"], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True), + EnsureTyped(keys=["image"]), + ]) + + sample = preprocessing({"image": args.image}) + + if args.no_invert: + post = Compose([ + Activationsd(keys="pred", softmax=True), + AsDiscreted(keys="pred", argmax=True), + ]) + else: + post = Compose([ + Activationsd(keys="pred", softmax=True), + Invertd(keys="pred", transform=preprocessing, orig_keys="image", nearest_interp=False, to_tensor=True), + AsDiscreted(keys="pred", argmax=True), + ]) + + image_meta = sample["image"] + if isinstance(image_meta, MetaTensor): + pred = MetaTensor(logits, meta=image_meta.meta) + else: + pred = logits + + output = post({"pred": pred, "image": image_meta}) + pred = output["pred"] + if isinstance(pred, torch.Tensor): + pred_np = pred.cpu().numpy().astype(np.uint8) + else: + pred_np = np.asarray(pred, dtype=np.uint8) + + if pred_np.ndim == 4 and pred_np.shape[0] == 1: + pred_np = pred_np[0] + + print(f"Output shape: {pred_np.shape}") + + img = nib.load(args.image) + if args.no_invert and isinstance(image_meta, MetaTensor) and "affine" in image_meta.meta: + affine = image_meta.meta["affine"] + else: + affine = img.affine + out_img = nib.Nifti1Image(pred_np, affine=affine) + out_path = Path(args.out) + out_path.parent.mkdir(parents=True, exist_ok=True) + nib.save(out_img, str(out_path)) + print(f"Wrote NIfTI: {out_path}") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/test/src/processing_datatypes/processing_datatypes_test.cpp b/test/src/processing_datatypes/processing_datatypes_test.cpp new file mode 100644 index 000000000..57a71d9b9 --- /dev/null +++ b/test/src/processing_datatypes/processing_datatypes_test.cpp @@ -0,0 +1,2098 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "SGNSProcMain.hpp" +#include "Generators.hpp" +#include + +namespace sgns +{ + // Helper function to patch relative file:// URIs in JSON to absolute paths + // This ensures tests work correctly regardless of working directory + static std::string PatchJsonUrisToAbsolute( const std::string &json_str, const std::string &bin_path ) + { + std::string result; + std::string normalized_bin_path = bin_path; + + // Normalize backslashes to forward slashes in bin_path + for ( auto &c : normalized_bin_path ) + { + if ( c == '\\' ) + c = '/'; + } + + // Pattern to match file:// URIs with relative paths (not already absolute) + // Matches: file://path/to/file + // Excludes: file:///absolute/path (triple slash) or file://C:/windows/path + std::regex relative_file_uri_pattern( R"delim("(file://(?!/)(?![A-Za-z]:)[^"]+)")delim" ); + + // Use regex_iterator to find and replace all matches manually + size_t last_pos = 0; + std::sregex_iterator iter( json_str.begin(), json_str.end(), relative_file_uri_pattern ); + std::sregex_iterator end; + + while ( iter != end ) + { + // Add text before the match + result += json_str.substr( last_pos, iter->position() - last_pos ); + + std::string original_uri = (*iter)[1].str(); + // Extract the relative part after "file://" + std::string relative_path = original_uri.substr( 7 ); // Skip "file://" + + // Build absolute URI using bin_path + std::string absolute_uri = "file://" + normalized_bin_path + relative_path; + + // Add the replacement + result += "\"" + absolute_uri + "\""; + + last_pos = iter->position() + iter->length(); + ++iter; + } + + // Add any remaining text after the last match + result += json_str.substr( last_pos ); + + return result; + } + + class ProcessingDatatypesTest : public ::testing::Test + { + protected: + static inline std::string binary_path = ""; + + static void SetUpTestSuite() + { + } + + static void TearDownTestSuite() + { + } + }; + + TEST_F( ProcessingDatatypesTest, StringInputValidationTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + // Load test instance file + std::string instance_file = data_path + "string-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + // Create ProcessingManager and initialize with JSON + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + // Get the processing data to validate + auto processing = manager->GetProcessingData(); + + // Test basic fields + ASSERT_EQ( processing.get_name(), "bert-tiny-string-test" ); + ASSERT_EQ( processing.get_version(), "1.0.0" ); + ASSERT_EQ( processing.get_gnus_spec_version(), 1.0 ); + + // Test inputs array - should have one string input + const auto &inputs = processing.get_inputs(); + ASSERT_EQ( inputs.size(), 1 ) << "Should have exactly 1 input"; + ASSERT_EQ( inputs[0].get_name(), "inputText" ); + ASSERT_EQ( inputs[0].get_type(), sgns::DataType::STRING ); + // After patching, the URI should be absolute + ASSERT_TRUE( inputs[0].get_source_uri_param().find( "file://" ) == 0 ); + ASSERT_TRUE( inputs[0].get_source_uri_param().find( "test_input.txt" ) != std::string::npos ); + + // Validate string input does NOT require dimensions (unlike texture2D) + ASSERT_FALSE( inputs[0].get_dimensions().has_value() ) + << "String input should not have dimensions"; + + // Test outputs array + const auto &outputs = processing.get_outputs(); + ASSERT_EQ( outputs.size(), 1 ); + ASSERT_EQ( outputs[0].get_name(), "textEmbedding" ); + ASSERT_EQ( outputs[0].get_type(), sgns::DataType::TENSOR ); + + // Test parameters + ASSERT_TRUE( processing.get_parameters().has_value() ); + auto parameters = processing.get_parameters().value(); + ASSERT_EQ( parameters.size(), 4 ); + + // Validate modelUri parameter + ASSERT_EQ( parameters[0].get_name(), "modelUri" ); + ASSERT_EQ( parameters[0].get_type(), sgns::ParameterType::URI ); + + // Validate tokenizerMode parameter + ASSERT_EQ( parameters[1].get_name(), "tokenizerMode" ); + ASSERT_EQ( parameters[1].get_type(), sgns::ParameterType::STRING ); + + // Validate textInput parameter + ASSERT_EQ( parameters[2].get_name(), "textInput" ); + ASSERT_EQ( parameters[2].get_type(), sgns::ParameterType::STRING ); + + // Validate maxLength parameter + ASSERT_EQ( parameters[3].get_name(), "maxLength" ); + ASSERT_EQ( parameters[3].get_type(), sgns::ParameterType::INT ); + + // Test passes array + const auto &passes = processing.get_passes(); + ASSERT_EQ( passes.size(), 1 ); + ASSERT_EQ( passes[0].get_name(), "text_inference" ); + ASSERT_EQ( passes[0].get_type(), sgns::PassType::INFERENCE ); + + // Validate model configuration + ASSERT_TRUE( passes[0].get_model().has_value() ); + // Note: Must be a copy, not reference - get_model().value() may return temporary + const auto model = passes[0].get_model().value(); + ASSERT_EQ( model.get_format(), sgns::ModelFormat::MNN ); + ASSERT_TRUE( !model.get_source_uri_param().empty() ); + // After patching, the URI should be absolute and contain the filename + auto model_uri = model.get_source_uri_param(); + ASSERT_TRUE( model_uri.find( "file://" ) == 0 ) << "URI should start with file://"; + ASSERT_TRUE( model_uri.find( "bert-tiny.mnn" ) != std::string::npos ) << "URI should contain the filename"; + + // Validate input nodes + const auto &input_nodes = model.get_input_nodes(); + ASSERT_EQ( input_nodes.size(), 1 ); + ASSERT_EQ( input_nodes[0].get_name(), "input_ids" ); + ASSERT_EQ( input_nodes[0].get_type(), sgns::DataType::TENSOR ); + ASSERT_TRUE( input_nodes[0].get_source().has_value() ); + ASSERT_EQ( input_nodes[0].get_source().value(), "input:inputText" ); + + // Validate output nodes + const auto &output_nodes = model.get_output_nodes(); + ASSERT_EQ( output_nodes.size(), 1 ); + ASSERT_EQ( output_nodes[0].get_name(), "output" ); + ASSERT_EQ( output_nodes[0].get_type(), sgns::DataType::TENSOR ); + ASSERT_TRUE( output_nodes[0].get_target().has_value() ); + ASSERT_EQ( output_nodes[0].get_target().value(), "output:textEmbedding" ); + + std::cout << "String input validation test passed successfully" << std::endl; + std::cout << "Input type: " << static_cast(inputs[0].get_type()) << " (STRING)" << std::endl; + std::cout << "Model format: " << static_cast(model.get_format()) << " (MNN)" << std::endl; + } + + TEST_F( ProcessingDatatypesTest, StringInputConstraintsTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + // Load test instance file + std::string instance_file = data_path + "string-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ); + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + + // Patch relative file:// URIs to absolute paths for debugger compatibility + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + // Parse and verify CheckProcessValidity succeeds for string inputs + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) + << "ProcessingManager validation should pass for valid string input: " + << manager_result.error().message(); + + std::cout << "String input passed CheckProcessValidity (no dimension requirements)" << std::endl; + } + + TEST_F( ProcessingDatatypesTest, StringInputProcessingTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + // Load test instance file + std::string instance_file = data_path + "string-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + // Create ProcessingManager and initialize with JSON + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + // Get the processing data to access model nodes + auto processing = manager->GetProcessingData(); + const auto &passes = processing.get_passes(); + ASSERT_EQ( passes.size(), 1 ); + + ASSERT_TRUE( passes[0].get_model().has_value() ); + // Note: Must be a copy, not reference - get_model().value() may return temporary + const auto model = passes[0].get_model().value(); + const auto input_nodes = model.get_input_nodes(); + ASSERT_EQ( input_nodes.size(), 1 ); + + // Create IO context for async operations + auto ioc = std::make_shared(); + + // Create vector to store chunk hashes + std::vector> chunkhashes; + + // Get a mutable copy of the first input node + sgns::ModelNode model_node = input_nodes[0]; + + std::cout << "Calling Process() on ProcessingManager..." << std::endl; + + // Call Process() - this will load the model and text file and run inference + auto process_result = manager->Process( ioc, chunkhashes, model_node ); + + if ( process_result.has_value() ) { + std::cout << "Process() succeeded!" << std::endl; + std::cout << "Result hash size: " << process_result.value().size() << " bytes" << std::endl; + std::cout << "Number of chunk hashes: " << chunkhashes.size() << std::endl; + + ASSERT_FALSE( process_result.value().empty() ) << "Result hash should not be empty"; + ASSERT_GT( chunkhashes.size(), 0 ) << "Should have at least one chunk hash"; + } else { + std::cout << "Process() failed: " << process_result.error().message() << std::endl; + FAIL() << "Process() should succeed: " << process_result.error().message(); + } + } + + TEST_F( ProcessingDatatypesTest, Texture3DValidationTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "texture3d-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &inputs = processing.get_inputs(); + ASSERT_EQ( inputs.size(), 1 ); + ASSERT_EQ( inputs[0].get_type(), sgns::DataType::TEXTURE3_D ); + ASSERT_TRUE( inputs[0].get_dimensions().has_value() ); + auto dims = inputs[0].get_dimensions().value(); + ASSERT_EQ( dims.get_width().value(), 253 ); + ASSERT_EQ( dims.get_height().value(), 253 ); + ASSERT_EQ( dims.get_chunk_count().value(), 94 ); + ASSERT_EQ( dims.get_chunk_subchunk_width().value(), 96 ); + ASSERT_EQ( dims.get_chunk_subchunk_height().value(), 96 ); + ASSERT_EQ( dims.get_block_len().value(), 96 ); + } + + TEST_F( ProcessingDatatypesTest, Texture3DProcessingTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "texture3d-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &passes = processing.get_passes(); + ASSERT_EQ( passes.size(), 1 ); + + ASSERT_TRUE( passes[0].get_model().has_value() ); + const auto model = passes[0].get_model().value(); + const auto input_nodes = model.get_input_nodes(); + ASSERT_EQ( input_nodes.size(), 1 ); + + auto ioc = std::make_shared(); + std::vector> chunkhashes; + sgns::ModelNode model_node = input_nodes[0]; + + std::cout << "Calling Process() on ProcessingManager (texture3D)..." << std::endl; + + auto process_result = manager->Process( ioc, chunkhashes, model_node ); + + if ( process_result.has_value() ) { + std::cout << "Process() succeeded!" << std::endl; + std::cout << "Result hash size: " << process_result.value().size() << " bytes" << std::endl; + std::cout << "Number of chunk hashes: " << chunkhashes.size() << std::endl; + + ASSERT_FALSE( process_result.value().empty() ) << "Result hash should not be empty"; + ASSERT_GT( chunkhashes.size(), 0 ) << "Should have at least one chunk hash"; + } else { + std::cout << "Process() failed: " << process_result.error().message() << std::endl; + FAIL() << "Process() should succeed: " << process_result.error().message(); + } + } + + TEST_F( ProcessingDatatypesTest, Texture1DValidationTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "texture1d-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &inputs = processing.get_inputs(); + ASSERT_EQ( inputs.size(), 1 ); + ASSERT_EQ( inputs[0].get_type(), sgns::DataType::TEXTURE1_D ); + ASSERT_TRUE( inputs[0].get_dimensions().has_value() ); + auto dims = inputs[0].get_dimensions().value(); + ASSERT_EQ( dims.get_width().value(), 256 ); + ASSERT_EQ( dims.get_block_len().value(), 256 ); + ASSERT_EQ( dims.get_chunk_stride().value(), 256 ); + ASSERT_TRUE( inputs[0].get_format().has_value() ); + ASSERT_EQ( inputs[0].get_format().value(), sgns::InputFormat::FLOAT32 ); + } + + TEST_F( ProcessingDatatypesTest, Texture1DProcessingTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "texture1d-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &passes = processing.get_passes(); + ASSERT_EQ( passes.size(), 1 ); + + ASSERT_TRUE( passes[0].get_model().has_value() ); + const auto model = passes[0].get_model().value(); + const auto input_nodes = model.get_input_nodes(); + ASSERT_EQ( input_nodes.size(), 1 ); + + auto ioc = std::make_shared(); + std::vector> chunkhashes; + sgns::ModelNode model_node = input_nodes[0]; + + std::cout << "Calling Process() on ProcessingManager (texture1D)..." << std::endl; + + auto process_result = manager->Process( ioc, chunkhashes, model_node ); + + if ( process_result.has_value() ) { + std::cout << "Process() succeeded!" << std::endl; + std::cout << "Result hash size: " << process_result.value().size() << " bytes" << std::endl; + std::cout << "Number of chunk hashes: " << chunkhashes.size() << std::endl; + + ASSERT_FALSE( process_result.value().empty() ) << "Result hash should not be empty"; + ASSERT_GT( chunkhashes.size(), 0 ) << "Should have at least one chunk hash"; + } else { + std::cout << "Process() failed: " << process_result.error().message() << std::endl; + FAIL() << "Process() should succeed: " << process_result.error().message(); + } + + const std::string output_file = data_path + "texture1d_output.raw"; + const std::string reference_file = data_path + "texture1d_output_pt.raw"; + + std::ifstream output_stream( output_file, std::ios::binary ); + ASSERT_TRUE( output_stream.is_open() ) << "Failed to open output file: " << output_file; + + std::ifstream reference_stream( reference_file, std::ios::binary ); + if ( !reference_stream.is_open() ) + { + GTEST_SKIP() << "Reference output file not found: " << reference_file; + } + + output_stream.seekg( 0, std::ios::end ); + reference_stream.seekg( 0, std::ios::end ); + const auto output_size = output_stream.tellg(); + const auto reference_size = reference_stream.tellg(); + ASSERT_EQ( output_size, reference_size ) << "Output size mismatch"; + + output_stream.seekg( 0, std::ios::beg ); + reference_stream.seekg( 0, std::ios::beg ); + + std::vector output_data( static_cast( output_size ) / sizeof( float ) ); + std::vector reference_data( static_cast( reference_size ) / sizeof( float ) ); + + output_stream.read( reinterpret_cast( output_data.data() ), output_size ); + reference_stream.read( reinterpret_cast( reference_data.data() ), reference_size ); + + double max_abs_diff = 0.0; + double mean_abs_diff = 0.0; + float output_min = std::numeric_limits::infinity(); + float output_max = -std::numeric_limits::infinity(); + size_t nonzero_count = 0; + for ( size_t i = 0; i < output_data.size(); ++i ) + { + const double diff = std::abs( static_cast( output_data[i] ) - + static_cast( reference_data[i] ) ); + mean_abs_diff += diff; + if ( diff > max_abs_diff ) + { + max_abs_diff = diff; + } + + output_min = std::min( output_min, output_data[i] ); + output_max = std::max( output_max, output_data[i] ); + if ( std::abs( output_data[i] ) > 1e-8f ) + { + ++nonzero_count; + } + } + mean_abs_diff /= static_cast( output_data.size() ); + + std::cout << "Texture1D output diff: mean=" << mean_abs_diff << " max=" << max_abs_diff << std::endl; + std::cout << "Texture1D output stats: min=" << output_min << " max=" << output_max + << " nonzero=" << nonzero_count << "/" << output_data.size() << std::endl; + + ASSERT_LT( mean_abs_diff, 1e-3 ) << "Mean absolute diff too large"; + ASSERT_LT( max_abs_diff, 1e-2 ) << "Max absolute diff too large"; + } + + TEST_F( ProcessingDatatypesTest, BoolValidationTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "bool-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &inputs = processing.get_inputs(); + ASSERT_EQ( inputs.size(), 1 ); + ASSERT_EQ( inputs[0].get_type(), sgns::DataType::BOOL ); + ASSERT_TRUE( inputs[0].get_dimensions().has_value() ); + auto dims = inputs[0].get_dimensions().value(); + ASSERT_EQ( dims.get_width().value(), 8 ); + ASSERT_EQ( dims.get_block_len().value(), 8 ); + ASSERT_EQ( dims.get_chunk_stride().value(), 8 ); + ASSERT_TRUE( inputs[0].get_format().has_value() ); + ASSERT_EQ( inputs[0].get_format().value(), sgns::InputFormat::FLOAT32 ); + } + + TEST_F( ProcessingDatatypesTest, BoolProcessingTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "bool-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &passes = processing.get_passes(); + ASSERT_EQ( passes.size(), 1 ); + + ASSERT_TRUE( passes[0].get_model().has_value() ); + const auto model = passes[0].get_model().value(); + const auto input_nodes = model.get_input_nodes(); + ASSERT_EQ( input_nodes.size(), 1 ); + + auto ioc = std::make_shared(); + std::vector> chunkhashes; + sgns::ModelNode model_node = input_nodes[0]; + + std::cout << "Calling Process() on ProcessingManager (bool)..." << std::endl; + + auto process_result = manager->Process( ioc, chunkhashes, model_node ); + + if ( process_result.has_value() ) { + std::cout << "Process() succeeded!" << std::endl; + std::cout << "Result hash size: " << process_result.value().size() << " bytes" << std::endl; + std::cout << "Number of chunk hashes: " << chunkhashes.size() << std::endl; + + ASSERT_FALSE( process_result.value().empty() ) << "Result hash should not be empty"; + ASSERT_GT( chunkhashes.size(), 0 ) << "Should have at least one chunk hash"; + } else { + std::cout << "Process() failed: " << process_result.error().message() << std::endl; + FAIL() << "Process() should succeed: " << process_result.error().message(); + } + + const std::string output_file = data_path + "bool_output.raw"; + const std::string reference_file = data_path + "bool_output_pt.raw"; + + std::ifstream output_stream( output_file, std::ios::binary ); + ASSERT_TRUE( output_stream.is_open() ) << "Failed to open output file: " << output_file; + + std::ifstream reference_stream( reference_file, std::ios::binary ); + if ( !reference_stream.is_open() ) + { + GTEST_SKIP() << "Reference output file not found: " << reference_file; + } + + output_stream.seekg( 0, std::ios::end ); + reference_stream.seekg( 0, std::ios::end ); + const auto output_size = output_stream.tellg(); + const auto reference_size = reference_stream.tellg(); + ASSERT_EQ( output_size, reference_size ) << "Output size mismatch"; + + output_stream.seekg( 0, std::ios::beg ); + reference_stream.seekg( 0, std::ios::beg ); + + std::vector output_data( static_cast( output_size ) / sizeof( float ) ); + std::vector reference_data( static_cast( reference_size ) / sizeof( float ) ); + + output_stream.read( reinterpret_cast( output_data.data() ), output_size ); + reference_stream.read( reinterpret_cast( reference_data.data() ), reference_size ); + + double max_abs_diff = 0.0; + double mean_abs_diff = 0.0; + for ( size_t i = 0; i < output_data.size(); ++i ) + { + const double diff = std::abs( static_cast( output_data[i] ) - + static_cast( reference_data[i] ) ); + mean_abs_diff += diff; + if ( diff > max_abs_diff ) + { + max_abs_diff = diff; + } + } + mean_abs_diff /= static_cast( output_data.size() ); + + std::cout << "Bool output diff: mean=" << mean_abs_diff << " max=" << max_abs_diff << std::endl; + + ASSERT_LT( mean_abs_diff, 1e-3 ) << "Mean absolute diff too large"; + ASSERT_LT( max_abs_diff, 1e-2 ) << "Max absolute diff too large"; + } + + TEST_F( ProcessingDatatypesTest, BufferValidationTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "buffer-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &inputs = processing.get_inputs(); + ASSERT_EQ( inputs.size(), 1 ); + ASSERT_EQ( inputs[0].get_type(), sgns::DataType::BUFFER ); + ASSERT_TRUE( inputs[0].get_dimensions().has_value() ); + auto dims = inputs[0].get_dimensions().value(); + ASSERT_EQ( dims.get_width().value(), 16 ); + ASSERT_EQ( dims.get_block_len().value(), 16 ); + ASSERT_EQ( dims.get_chunk_stride().value(), 16 ); + ASSERT_TRUE( inputs[0].get_format().has_value() ); + ASSERT_EQ( inputs[0].get_format().value(), sgns::InputFormat::INT8 ); + } + + TEST_F( ProcessingDatatypesTest, BufferProcessingTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "buffer-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &passes = processing.get_passes(); + ASSERT_EQ( passes.size(), 1 ); + + ASSERT_TRUE( passes[0].get_model().has_value() ); + const auto model = passes[0].get_model().value(); + const auto input_nodes = model.get_input_nodes(); + ASSERT_EQ( input_nodes.size(), 1 ); + + auto ioc = std::make_shared(); + std::vector> chunkhashes; + sgns::ModelNode model_node = input_nodes[0]; + + std::cout << "Calling Process() on ProcessingManager (buffer)..." << std::endl; + + auto process_result = manager->Process( ioc, chunkhashes, model_node ); + + if ( process_result.has_value() ) { + std::cout << "Process() succeeded!" << std::endl; + std::cout << "Result hash size: " << process_result.value().size() << " bytes" << std::endl; + std::cout << "Number of chunk hashes: " << chunkhashes.size() << std::endl; + + ASSERT_FALSE( process_result.value().empty() ) << "Result hash should not be empty"; + ASSERT_GT( chunkhashes.size(), 0 ) << "Should have at least one chunk hash"; + } else { + std::cout << "Process() failed: " << process_result.error().message() << std::endl; + FAIL() << "Process() should succeed: " << process_result.error().message(); + } + + const std::string output_file = data_path + "buffer_output.raw"; + const std::string reference_file = data_path + "buffer_output_pt.raw"; + + std::ifstream output_stream( output_file, std::ios::binary ); + ASSERT_TRUE( output_stream.is_open() ) << "Failed to open output file: " << output_file; + + std::ifstream reference_stream( reference_file, std::ios::binary ); + if ( !reference_stream.is_open() ) + { + GTEST_SKIP() << "Reference output file not found: " << reference_file; + } + + output_stream.seekg( 0, std::ios::end ); + reference_stream.seekg( 0, std::ios::end ); + const auto output_size = output_stream.tellg(); + const auto reference_size = reference_stream.tellg(); + ASSERT_EQ( output_size, reference_size ) << "Output size mismatch"; + + output_stream.seekg( 0, std::ios::beg ); + reference_stream.seekg( 0, std::ios::beg ); + + std::vector output_data( static_cast( output_size ) / sizeof( float ) ); + std::vector reference_data( static_cast( reference_size ) / sizeof( float ) ); + + output_stream.read( reinterpret_cast( output_data.data() ), output_size ); + reference_stream.read( reinterpret_cast( reference_data.data() ), reference_size ); + + double max_abs_diff = 0.0; + double mean_abs_diff = 0.0; + for ( size_t i = 0; i < output_data.size(); ++i ) + { + const double diff = std::abs( static_cast( output_data[i] ) - + static_cast( reference_data[i] ) ); + mean_abs_diff += diff; + if ( diff > max_abs_diff ) + { + max_abs_diff = diff; + } + } + mean_abs_diff /= static_cast( output_data.size() ); + + std::cout << "Buffer output diff: mean=" << mean_abs_diff << " max=" << max_abs_diff << std::endl; + + ASSERT_LT( mean_abs_diff, 1e-3 ) << "Mean absolute diff too large"; + ASSERT_LT( max_abs_diff, 1e-2 ) << "Max absolute diff too large"; + } + + TEST_F( ProcessingDatatypesTest, FloatValidationTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "float-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &inputs = processing.get_inputs(); + ASSERT_EQ( inputs.size(), 1 ); + ASSERT_EQ( inputs[0].get_type(), sgns::DataType::FLOAT ); + ASSERT_TRUE( inputs[0].get_dimensions().has_value() ); + auto dims = inputs[0].get_dimensions().value(); + ASSERT_EQ( dims.get_width().value(), 512 ); + ASSERT_TRUE( inputs[0].get_format().has_value() ); + ASSERT_EQ( inputs[0].get_format().value(), sgns::InputFormat::FLOAT32 ); + } + + TEST_F( ProcessingDatatypesTest, FloatProcessingTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "float-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &passes = processing.get_passes(); + ASSERT_EQ( passes.size(), 1 ); + + ASSERT_TRUE( passes[0].get_model().has_value() ); + const auto model = passes[0].get_model().value(); + const auto input_nodes = model.get_input_nodes(); + ASSERT_EQ( input_nodes.size(), 1 ); + + auto ioc = std::make_shared(); + std::vector> chunkhashes; + sgns::ModelNode model_node = input_nodes[0]; + + std::cout << "Calling Process() on ProcessingManager (float)..." << std::endl; + + auto process_result = manager->Process( ioc, chunkhashes, model_node ); + + if ( process_result.has_value() ) { + std::cout << "Process() succeeded!" << std::endl; + std::cout << "Result hash size: " << process_result.value().size() << " bytes" << std::endl; + std::cout << "Number of chunk hashes: " << chunkhashes.size() << std::endl; + + ASSERT_FALSE( process_result.value().empty() ) << "Result hash should not be empty"; + ASSERT_GT( chunkhashes.size(), 0 ) << "Should have at least one chunk hash"; + } else { + std::cout << "Process() failed: " << process_result.error().message() << std::endl; + FAIL() << "Process() should succeed: " << process_result.error().message(); + } + + const std::string output_file = data_path + "float_output.raw"; + const std::string reference_file = data_path + "float_output_pt.raw"; + + std::ifstream output_stream( output_file, std::ios::binary ); + ASSERT_TRUE( output_stream.is_open() ) << "Failed to open output file: " << output_file; + + std::ifstream reference_stream( reference_file, std::ios::binary ); + if ( !reference_stream.is_open() ) + { + GTEST_SKIP() << "Reference output file not found: " << reference_file; + } + + output_stream.seekg( 0, std::ios::end ); + reference_stream.seekg( 0, std::ios::end ); + const auto output_size = output_stream.tellg(); + const auto reference_size = reference_stream.tellg(); + ASSERT_EQ( output_size, reference_size ) << "Output size mismatch"; + + output_stream.seekg( 0, std::ios::beg ); + reference_stream.seekg( 0, std::ios::beg ); + + std::vector output_data( static_cast( output_size ) / sizeof( float ) ); + std::vector reference_data( static_cast( reference_size ) / sizeof( float ) ); + + output_stream.read( reinterpret_cast( output_data.data() ), output_size ); + reference_stream.read( reinterpret_cast( reference_data.data() ), reference_size ); + + double max_abs_diff = 0.0; + double mean_abs_diff = 0.0; + for ( size_t i = 0; i < output_data.size(); ++i ) + { + const double diff = std::abs( static_cast( output_data[i] ) - + static_cast( reference_data[i] ) ); + mean_abs_diff += diff; + if ( diff > max_abs_diff ) + { + max_abs_diff = diff; + } + } + mean_abs_diff /= static_cast( output_data.size() ); + + std::cout << "Float output diff: mean=" << mean_abs_diff << " max=" << max_abs_diff << std::endl; + + ASSERT_LT( mean_abs_diff, 1e-3 ) << "Mean absolute diff too large"; + ASSERT_LT( max_abs_diff, 1e-2 ) << "Max absolute diff too large"; + } + + TEST_F( ProcessingDatatypesTest, IntValidationTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "int-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &inputs = processing.get_inputs(); + ASSERT_EQ( inputs.size(), 1 ); + ASSERT_EQ( inputs[0].get_type(), sgns::DataType::INT ); + ASSERT_TRUE( inputs[0].get_dimensions().has_value() ); + auto dims = inputs[0].get_dimensions().value(); + ASSERT_EQ( dims.get_width().value(), 512 ); + ASSERT_TRUE( inputs[0].get_format().has_value() ); + ASSERT_EQ( inputs[0].get_format().value(), sgns::InputFormat::INT32 ); + } + + TEST_F( ProcessingDatatypesTest, IntProcessingTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "int-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &passes = processing.get_passes(); + ASSERT_EQ( passes.size(), 1 ); + + ASSERT_TRUE( passes[0].get_model().has_value() ); + const auto model = passes[0].get_model().value(); + const auto input_nodes = model.get_input_nodes(); + ASSERT_EQ( input_nodes.size(), 1 ); + + auto ioc = std::make_shared(); + std::vector> chunkhashes; + sgns::ModelNode model_node = input_nodes[0]; + + std::cout << "Calling Process() on ProcessingManager (int)..." << std::endl; + + auto process_result = manager->Process( ioc, chunkhashes, model_node ); + + if ( process_result.has_value() ) { + std::cout << "Process() succeeded!" << std::endl; + std::cout << "Result hash size: " << process_result.value().size() << " bytes" << std::endl; + std::cout << "Number of chunk hashes: " << chunkhashes.size() << std::endl; + + ASSERT_FALSE( process_result.value().empty() ) << "Result hash should not be empty"; + ASSERT_GT( chunkhashes.size(), 0 ) << "Should have at least one chunk hash"; + } else { + std::cout << "Process() failed: " << process_result.error().message() << std::endl; + FAIL() << "Process() should succeed: " << process_result.error().message(); + } + + const std::string output_file = data_path + "int_output.raw"; + const std::string reference_file = data_path + "int_output_pt.raw"; + + std::ifstream output_stream( output_file, std::ios::binary ); + ASSERT_TRUE( output_stream.is_open() ) << "Failed to open output file: " << output_file; + + std::ifstream reference_stream( reference_file, std::ios::binary ); + if ( !reference_stream.is_open() ) + { + GTEST_SKIP() << "Reference output file not found: " << reference_file; + } + + output_stream.seekg( 0, std::ios::end ); + reference_stream.seekg( 0, std::ios::end ); + const auto output_size = output_stream.tellg(); + const auto reference_size = reference_stream.tellg(); + ASSERT_EQ( output_size, reference_size ) << "Output size mismatch"; + + output_stream.seekg( 0, std::ios::beg ); + reference_stream.seekg( 0, std::ios::beg ); + + std::vector output_data( static_cast( output_size ) / sizeof( float ) ); + std::vector reference_data( static_cast( reference_size ) / sizeof( float ) ); + + output_stream.read( reinterpret_cast( output_data.data() ), output_size ); + reference_stream.read( reinterpret_cast( reference_data.data() ), reference_size ); + + double max_abs_diff = 0.0; + double mean_abs_diff = 0.0; + for ( size_t i = 0; i < output_data.size(); ++i ) + { + const double diff = std::abs( static_cast( output_data[i] ) - + static_cast( reference_data[i] ) ); + mean_abs_diff += diff; + if ( diff > max_abs_diff ) + { + max_abs_diff = diff; + } + } + mean_abs_diff /= static_cast( output_data.size() ); + + std::cout << "Int output diff: mean=" << mean_abs_diff << " max=" << max_abs_diff << std::endl; + + ASSERT_LT( mean_abs_diff, 1e-3 ) << "Mean absolute diff too large"; + ASSERT_LT( max_abs_diff, 1e-2 ) << "Max absolute diff too large"; + } + + TEST_F( ProcessingDatatypesTest, Mat2ValidationTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "mat2-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &inputs = processing.get_inputs(); + ASSERT_EQ( inputs.size(), 1 ); + ASSERT_EQ( inputs[0].get_type(), sgns::DataType::MAT2 ); + ASSERT_TRUE( inputs[0].get_dimensions().has_value() ); + auto dims = inputs[0].get_dimensions().value(); + ASSERT_EQ( dims.get_width().value(), 16 ); + ASSERT_EQ( dims.get_block_len().value(), 8 ); + ASSERT_EQ( dims.get_chunk_stride().value(), 4 ); + ASSERT_TRUE( inputs[0].get_format().has_value() ); + ASSERT_EQ( inputs[0].get_format().value(), sgns::InputFormat::FLOAT32 ); + } + + TEST_F( ProcessingDatatypesTest, Mat2ProcessingTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "mat2-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &passes = processing.get_passes(); + ASSERT_EQ( passes.size(), 1 ); + + ASSERT_TRUE( passes[0].get_model().has_value() ); + const auto model = passes[0].get_model().value(); + const auto input_nodes = model.get_input_nodes(); + ASSERT_EQ( input_nodes.size(), 1 ); + + auto ioc = std::make_shared(); + std::vector> chunkhashes; + sgns::ModelNode model_node = input_nodes[0]; + + std::cout << "Calling Process() on ProcessingManager (mat2)..." << std::endl; + + auto process_result = manager->Process( ioc, chunkhashes, model_node ); + + if ( process_result.has_value() ) { + std::cout << "Process() succeeded!" << std::endl; + std::cout << "Result hash size: " << process_result.value().size() << " bytes" << std::endl; + std::cout << "Number of chunk hashes: " << chunkhashes.size() << std::endl; + + ASSERT_FALSE( process_result.value().empty() ) << "Result hash should not be empty"; + ASSERT_GT( chunkhashes.size(), 0 ) << "Should have at least one chunk hash"; + } else { + std::cout << "Process() failed: " << process_result.error().message() << std::endl; + FAIL() << "Process() should succeed: " << process_result.error().message(); + } + + const std::string output_file = data_path + "mat2_output.raw"; + const std::string reference_file = data_path + "mat2_output_pt.raw"; + + std::ifstream output_stream( output_file, std::ios::binary ); + ASSERT_TRUE( output_stream.is_open() ) << "Failed to open output file: " << output_file; + + std::ifstream reference_stream( reference_file, std::ios::binary ); + if ( !reference_stream.is_open() ) + { + GTEST_SKIP() << "Reference output file not found: " << reference_file; + } + + output_stream.seekg( 0, std::ios::end ); + reference_stream.seekg( 0, std::ios::end ); + const auto output_size = output_stream.tellg(); + const auto reference_size = reference_stream.tellg(); + ASSERT_EQ( output_size, reference_size ) << "Output size mismatch"; + + output_stream.seekg( 0, std::ios::beg ); + reference_stream.seekg( 0, std::ios::beg ); + + std::vector output_data( static_cast( output_size ) / sizeof( float ) ); + std::vector reference_data( static_cast( reference_size ) / sizeof( float ) ); + + output_stream.read( reinterpret_cast( output_data.data() ), output_size ); + reference_stream.read( reinterpret_cast( reference_data.data() ), reference_size ); + + double max_abs_diff = 0.0; + double mean_abs_diff = 0.0; + for ( size_t i = 0; i < output_data.size(); ++i ) + { + const double diff = std::abs( static_cast( output_data[i] ) - + static_cast( reference_data[i] ) ); + mean_abs_diff += diff; + if ( diff > max_abs_diff ) + { + max_abs_diff = diff; + } + } + mean_abs_diff /= static_cast( output_data.size() ); + + std::cout << "Mat2 output diff: mean=" << mean_abs_diff << " max=" << max_abs_diff << std::endl; + + ASSERT_LT( mean_abs_diff, 1e-3 ) << "Mean absolute diff too large"; + ASSERT_LT( max_abs_diff, 1e-2 ) << "Max absolute diff too large"; + } + + TEST_F( ProcessingDatatypesTest, Mat3ValidationTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "mat3-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &inputs = processing.get_inputs(); + ASSERT_EQ( inputs.size(), 1 ); + ASSERT_EQ( inputs[0].get_type(), sgns::DataType::MAT3 ); + ASSERT_TRUE( inputs[0].get_dimensions().has_value() ); + auto dims = inputs[0].get_dimensions().value(); + ASSERT_EQ( dims.get_width().value(), 12 ); + ASSERT_EQ( dims.get_block_len().value(), 6 ); + ASSERT_EQ( dims.get_chunk_stride().value(), 3 ); + ASSERT_TRUE( inputs[0].get_format().has_value() ); + ASSERT_EQ( inputs[0].get_format().value(), sgns::InputFormat::FLOAT32 ); + } + + TEST_F( ProcessingDatatypesTest, Mat3ProcessingTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "mat3-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &passes = processing.get_passes(); + ASSERT_EQ( passes.size(), 1 ); + + ASSERT_TRUE( passes[0].get_model().has_value() ); + const auto model = passes[0].get_model().value(); + const auto input_nodes = model.get_input_nodes(); + ASSERT_EQ( input_nodes.size(), 1 ); + + auto ioc = std::make_shared(); + std::vector> chunkhashes; + sgns::ModelNode model_node = input_nodes[0]; + + std::cout << "Calling Process() on ProcessingManager (mat3)..." << std::endl; + + auto process_result = manager->Process( ioc, chunkhashes, model_node ); + + if ( process_result.has_value() ) { + std::cout << "Process() succeeded!" << std::endl; + std::cout << "Result hash size: " << process_result.value().size() << " bytes" << std::endl; + std::cout << "Number of chunk hashes: " << chunkhashes.size() << std::endl; + + ASSERT_FALSE( process_result.value().empty() ) << "Result hash should not be empty"; + ASSERT_GT( chunkhashes.size(), 0 ) << "Should have at least one chunk hash"; + } else { + std::cout << "Process() failed: " << process_result.error().message() << std::endl; + FAIL() << "Process() should succeed: " << process_result.error().message(); + } + + const std::string output_file = data_path + "mat3_output.raw"; + const std::string reference_file = data_path + "mat3_output_pt.raw"; + + std::ifstream output_stream( output_file, std::ios::binary ); + ASSERT_TRUE( output_stream.is_open() ) << "Failed to open output file: " << output_file; + + std::ifstream reference_stream( reference_file, std::ios::binary ); + if ( !reference_stream.is_open() ) + { + GTEST_SKIP() << "Reference output file not found: " << reference_file; + } + + output_stream.seekg( 0, std::ios::end ); + reference_stream.seekg( 0, std::ios::end ); + const auto output_size = output_stream.tellg(); + const auto reference_size = reference_stream.tellg(); + ASSERT_EQ( output_size, reference_size ) << "Output size mismatch"; + + output_stream.seekg( 0, std::ios::beg ); + reference_stream.seekg( 0, std::ios::beg ); + + std::vector output_data( static_cast( output_size ) / sizeof( float ) ); + std::vector reference_data( static_cast( reference_size ) / sizeof( float ) ); + + output_stream.read( reinterpret_cast( output_data.data() ), output_size ); + reference_stream.read( reinterpret_cast( reference_data.data() ), reference_size ); + + double max_abs_diff = 0.0; + double mean_abs_diff = 0.0; + for ( size_t i = 0; i < output_data.size(); ++i ) + { + const double diff = std::abs( static_cast( output_data[i] ) - + static_cast( reference_data[i] ) ); + mean_abs_diff += diff; + if ( diff > max_abs_diff ) + { + max_abs_diff = diff; + } + } + mean_abs_diff /= static_cast( output_data.size() ); + + std::cout << "Mat3 output diff: mean=" << mean_abs_diff << " max=" << max_abs_diff << std::endl; + + ASSERT_LT( mean_abs_diff, 1e-3 ) << "Mean absolute diff too large"; + ASSERT_LT( max_abs_diff, 1e-2 ) << "Max absolute diff too large"; + } + + TEST_F( ProcessingDatatypesTest, Mat4ValidationTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "mat4-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &inputs = processing.get_inputs(); + ASSERT_EQ( inputs.size(), 1 ); + ASSERT_EQ( inputs[0].get_type(), sgns::DataType::MAT4 ); + ASSERT_TRUE( inputs[0].get_dimensions().has_value() ); + auto dims = inputs[0].get_dimensions().value(); + ASSERT_EQ( dims.get_width().value(), 10 ); + ASSERT_EQ( dims.get_block_len().value(), 5 ); + ASSERT_EQ( dims.get_chunk_stride().value(), 2 ); + ASSERT_TRUE( inputs[0].get_format().has_value() ); + ASSERT_EQ( inputs[0].get_format().value(), sgns::InputFormat::FLOAT32 ); + } + + TEST_F( ProcessingDatatypesTest, Mat4ProcessingTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "mat4-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &passes = processing.get_passes(); + ASSERT_EQ( passes.size(), 1 ); + + ASSERT_TRUE( passes[0].get_model().has_value() ); + const auto model = passes[0].get_model().value(); + const auto input_nodes = model.get_input_nodes(); + ASSERT_EQ( input_nodes.size(), 1 ); + + auto ioc = std::make_shared(); + std::vector> chunkhashes; + sgns::ModelNode model_node = input_nodes[0]; + + std::cout << "Calling Process() on ProcessingManager (mat4)..." << std::endl; + + auto process_result = manager->Process( ioc, chunkhashes, model_node ); + + if ( process_result.has_value() ) { + std::cout << "Process() succeeded!" << std::endl; + std::cout << "Result hash size: " << process_result.value().size() << " bytes" << std::endl; + std::cout << "Number of chunk hashes: " << chunkhashes.size() << std::endl; + + ASSERT_FALSE( process_result.value().empty() ) << "Result hash should not be empty"; + ASSERT_GT( chunkhashes.size(), 0 ) << "Should have at least one chunk hash"; + } else { + std::cout << "Process() failed: " << process_result.error().message() << std::endl; + FAIL() << "Process() should succeed: " << process_result.error().message(); + } + + const std::string output_file = data_path + "mat4_output.raw"; + const std::string reference_file = data_path + "mat4_output_pt.raw"; + + std::ifstream output_stream( output_file, std::ios::binary ); + ASSERT_TRUE( output_stream.is_open() ) << "Failed to open output file: " << output_file; + + std::ifstream reference_stream( reference_file, std::ios::binary ); + if ( !reference_stream.is_open() ) + { + GTEST_SKIP() << "Reference output file not found: " << reference_file; + } + + output_stream.seekg( 0, std::ios::end ); + reference_stream.seekg( 0, std::ios::end ); + const auto output_size = output_stream.tellg(); + const auto reference_size = reference_stream.tellg(); + ASSERT_EQ( output_size, reference_size ) << "Output size mismatch"; + + output_stream.seekg( 0, std::ios::beg ); + reference_stream.seekg( 0, std::ios::beg ); + + std::vector output_data( static_cast( output_size ) / sizeof( float ) ); + std::vector reference_data( static_cast( reference_size ) / sizeof( float ) ); + + output_stream.read( reinterpret_cast( output_data.data() ), output_size ); + reference_stream.read( reinterpret_cast( reference_data.data() ), reference_size ); + + double max_abs_diff = 0.0; + double mean_abs_diff = 0.0; + for ( size_t i = 0; i < output_data.size(); ++i ) + { + const double diff = std::abs( static_cast( output_data[i] ) - + static_cast( reference_data[i] ) ); + mean_abs_diff += diff; + if ( diff > max_abs_diff ) + { + max_abs_diff = diff; + } + } + mean_abs_diff /= static_cast( output_data.size() ); + + std::cout << "Mat4 output diff: mean=" << mean_abs_diff << " max=" << max_abs_diff << std::endl; + + ASSERT_LT( mean_abs_diff, 1e-3 ) << "Mean absolute diff too large"; + ASSERT_LT( max_abs_diff, 1e-2 ) << "Max absolute diff too large"; + } + + TEST_F( ProcessingDatatypesTest, Vec2ValidationTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "processing_datatypes/"; + + std::string instance_file = data_path + "vec2-processing-definition.json"; + std::ifstream ifs( instance_file ); + ASSERT_TRUE( ifs ) << "Could not open " << instance_file; + + std::string json_data( ( std::istreambuf_iterator( ifs ) ), + std::istreambuf_iterator() ); + + auto result = sgns::sgprocessing::ProcessingManager::Create( json_data ); + ASSERT_TRUE( result ) << result.error().message(); + + auto manager = result.value(); + auto inputs = manager->GetProcessingData().get_inputs(); + ASSERT_EQ( inputs.size(), 1 ) << "Expected 1 input"; + ASSERT_EQ( inputs[0].get_type(), sgns::DataType::VEC2 ); + } + + TEST_F( ProcessingDatatypesTest, Vec2ProcessingTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "processing_datatypes/"; + + std::string instance_file = data_path + "vec2-processing-definition.json"; + std::ifstream ifs( instance_file ); + ASSERT_TRUE( ifs ) << "Could not open " << instance_file; + + std::string json_data( ( std::istreambuf_iterator( ifs ) ), + std::istreambuf_iterator() ); + + // Patch relative file:// URIs to absolute paths for debugger compatibility + json_data = PatchJsonUrisToAbsolute( json_data, bin_path ); + + auto result = sgns::sgprocessing::ProcessingManager::Create( json_data ); + ASSERT_TRUE( result ) << result.error().message(); + + auto manager = result.value(); + + auto processing = manager->GetProcessingData(); + const auto &passes = processing.get_passes(); + ASSERT_EQ( passes.size(), 1 ); + + ASSERT_TRUE( passes[0].get_model().has_value() ); + const auto model = passes[0].get_model().value(); + const auto input_nodes = model.get_input_nodes(); + ASSERT_EQ( input_nodes.size(), 1 ); + + // Create mock model node + auto ioc = std::make_shared(); + std::vector> chunkhashes; + sgns::ModelNode model_node = input_nodes[0]; + + std::cout << "Calling Process() on ProcessingManager (vec2)..." << std::endl; + auto proc_result = manager->Process( ioc, chunkhashes, model_node ); + ASSERT_TRUE( proc_result ) << "Process failed: " << proc_result.error().message(); + + ASSERT_FALSE( proc_result.value().empty() ) << "Result hash should not be empty"; + ASSERT_GT( chunkhashes.size(), 0 ) << "Should have at least one chunk hash"; + + const std::string output_file = data_path + "vec2_output.raw"; + const std::string reference_file = data_path + "vec2_output_pt.raw"; + + std::ifstream output_stream( output_file, std::ios::binary ); + ASSERT_TRUE( output_stream.is_open() ) << "Failed to open output file: " << output_file; + + std::ifstream reference_stream( reference_file, std::ios::binary ); + if ( !reference_stream.is_open() ) + { + GTEST_SKIP() << "Reference output file not found: " << reference_file; + } + + output_stream.seekg( 0, std::ios::end ); + reference_stream.seekg( 0, std::ios::end ); + const auto output_size = output_stream.tellg(); + const auto reference_size = reference_stream.tellg(); + ASSERT_EQ( output_size, reference_size ) << "Output size mismatch"; + + output_stream.seekg( 0, std::ios::beg ); + reference_stream.seekg( 0, std::ios::beg ); + + std::vector output_data( static_cast( output_size ) / sizeof( float ) ); + std::vector reference_data( static_cast( reference_size ) / sizeof( float ) ); + + output_stream.read( reinterpret_cast( output_data.data() ), output_size ); + reference_stream.read( reinterpret_cast( reference_data.data() ), reference_size ); + + double mean_abs_diff = 0.0; + double max_abs_diff = 0.0; + for ( size_t i = 0; i < output_data.size(); ++i ) + { + const double diff = std::abs( static_cast( output_data[i] ) - + static_cast( reference_data[i] ) ); + mean_abs_diff += diff; + max_abs_diff = std::max( max_abs_diff, diff ); + } + + mean_abs_diff /= static_cast( output_data.size() ); + + std::cout << "Vec2 output diff: mean=" << mean_abs_diff << " max=" << max_abs_diff << std::endl; + + ASSERT_LT( mean_abs_diff, 1e-3 ) << "Mean absolute diff too large"; + ASSERT_LT( max_abs_diff, 1e-2 ) << "Max absolute diff too large"; + } + + TEST_F( ProcessingDatatypesTest, TensorValidationTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "tensor-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &inputs = processing.get_inputs(); + ASSERT_EQ( inputs.size(), 1 ); + ASSERT_EQ( inputs[0].get_type(), sgns::DataType::TENSOR ); + ASSERT_TRUE( inputs[0].get_dimensions().has_value() ); + auto dims = inputs[0].get_dimensions().value(); + ASSERT_EQ( dims.get_width().value(), 256 ); + ASSERT_EQ( dims.get_block_len().value(), 64 ); + ASSERT_EQ( dims.get_chunk_stride().value(), 32 ); + ASSERT_TRUE( inputs[0].get_format().has_value() ); + ASSERT_EQ( inputs[0].get_format().value(), sgns::InputFormat::FLOAT32 ); + } + + TEST_F( ProcessingDatatypesTest, TensorProcessingTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "tensor-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &passes = processing.get_passes(); + ASSERT_EQ( passes.size(), 1 ); + + ASSERT_TRUE( passes[0].get_model().has_value() ); + const auto model = passes[0].get_model().value(); + const auto input_nodes = model.get_input_nodes(); + ASSERT_EQ( input_nodes.size(), 1 ); + + auto ioc = std::make_shared(); + std::vector> chunkhashes; + sgns::ModelNode model_node = input_nodes[0]; + + std::cout << "Calling Process() on ProcessingManager (tensor)..." << std::endl; + + auto process_result = manager->Process( ioc, chunkhashes, model_node ); + + if ( process_result.has_value() ) { + std::cout << "Process() succeeded!" << std::endl; + std::cout << "Result hash size: " << process_result.value().size() << " bytes" << std::endl; + std::cout << "Number of chunk hashes: " << chunkhashes.size() << std::endl; + + ASSERT_FALSE( process_result.value().empty() ) << "Result hash should not be empty"; + ASSERT_GT( chunkhashes.size(), 0 ) << "Should have at least one chunk hash"; + } else { + std::cout << "Process() failed: " << process_result.error().message() << std::endl; + FAIL() << "Process() should succeed: " << process_result.error().message(); + } + + const std::string output_file = data_path + "tensor_output.raw"; + const std::string reference_file = data_path + "tensor_output_pt.raw"; + + std::ifstream output_stream( output_file, std::ios::binary ); + ASSERT_TRUE( output_stream.is_open() ) << "Failed to open output file: " << output_file; + + std::ifstream reference_stream( reference_file, std::ios::binary ); + if ( !reference_stream.is_open() ) + { + GTEST_SKIP() << "Reference output file not found: " << reference_file; + } + + output_stream.seekg( 0, std::ios::end ); + reference_stream.seekg( 0, std::ios::end ); + const auto output_size = output_stream.tellg(); + const auto reference_size = reference_stream.tellg(); + ASSERT_EQ( output_size, reference_size ) << "Output size mismatch"; + + output_stream.seekg( 0, std::ios::beg ); + reference_stream.seekg( 0, std::ios::beg ); + + std::vector output_data( static_cast( output_size ) / sizeof( float ) ); + std::vector reference_data( static_cast( reference_size ) / sizeof( float ) ); + + output_stream.read( reinterpret_cast( output_data.data() ), output_size ); + reference_stream.read( reinterpret_cast( reference_data.data() ), reference_size ); + + double max_abs_diff = 0.0; + double mean_abs_diff = 0.0; + for ( size_t i = 0; i < output_data.size(); ++i ) + { + const double diff = std::abs( static_cast( output_data[i] ) - + static_cast( reference_data[i] ) ); + mean_abs_diff += diff; + if ( diff > max_abs_diff ) + { + max_abs_diff = diff; + } + } + mean_abs_diff /= static_cast( output_data.size() ); + + std::cout << "Tensor output diff: mean=" << mean_abs_diff << " max=" << max_abs_diff << std::endl; + + ASSERT_LT( mean_abs_diff, 1e-3 ) << "Mean absolute diff too large"; + ASSERT_LT( max_abs_diff, 1e-2 ) << "Max absolute diff too large"; + } + + TEST_F( ProcessingDatatypesTest, TextureCubeValidationTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "texturecube-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &inputs = processing.get_inputs(); + ASSERT_EQ( inputs.size(), 1 ); + ASSERT_EQ( inputs[0].get_type(), sgns::DataType::TEXTURE_CUBE ); + ASSERT_TRUE( inputs[0].get_dimensions().has_value() ); + auto dims = inputs[0].get_dimensions().value(); + ASSERT_EQ( dims.get_width().value(), 64 ); + ASSERT_EQ( dims.get_height().value(), 64 ); + ASSERT_TRUE( inputs[0].get_format().has_value() ); + ASSERT_EQ( inputs[0].get_format().value(), sgns::InputFormat::RGB8 ); + } + + TEST_F( ProcessingDatatypesTest, TextureCubeProcessingTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "./processing_datatypes/"; + + std::string instance_file = data_path + "texturecube-processing-definition.json"; + std::ifstream instance_stream( instance_file ); + ASSERT_TRUE( instance_stream.is_open() ) << "Failed to open instance file: " << instance_file; + + std::string instance_str( ( std::istreambuf_iterator( instance_stream ) ), + std::istreambuf_iterator() ); + instance_stream.close(); + ASSERT_FALSE( instance_str.empty() ) << "Instance file is empty"; + + // Patch relative file:// URIs to absolute paths for debugger compatibility + + instance_str = PatchJsonUrisToAbsolute( instance_str, bin_path ); + + auto manager_result = sgns::sgprocessing::ProcessingManager::Create( instance_str ); + ASSERT_TRUE( manager_result.has_value() ) << "Failed to create ProcessingManager: " + << manager_result.error().message(); + + auto manager = manager_result.value(); + ASSERT_NE( manager, nullptr ) << "ProcessingManager is null"; + + auto processing = manager->GetProcessingData(); + const auto &passes = processing.get_passes(); + ASSERT_EQ( passes.size(), 1 ); + + ASSERT_TRUE( passes[0].get_model().has_value() ); + const auto model = passes[0].get_model().value(); + const auto input_nodes = model.get_input_nodes(); + ASSERT_EQ( input_nodes.size(), 1 ); + + auto ioc = std::make_shared(); + std::vector> chunkhashes; + sgns::ModelNode model_node = input_nodes[0]; + + std::cout << "Calling Process() on ProcessingManager (textureCube)..." << std::endl; + + auto process_result = manager->Process( ioc, chunkhashes, model_node ); + + if ( process_result.has_value() ) { + std::cout << "Process() succeeded!" << std::endl; + std::cout << "Result hash size: " << process_result.value().size() << " bytes" << std::endl; + std::cout << "Number of chunk hashes: " << chunkhashes.size() << std::endl; + + ASSERT_FALSE( process_result.value().empty() ) << "Result hash should not be empty"; + ASSERT_GT( chunkhashes.size(), 0 ) << "Should have at least one chunk hash"; + } else { + std::cout << "Process() failed: " << process_result.error().message() << std::endl; + FAIL() << "Process() should succeed: " << process_result.error().message(); + } + + const std::string output_file = data_path + "texturecube_output.raw"; + const std::string reference_file = data_path + "texturecube_output_pt.raw"; + + std::ifstream output_stream( output_file, std::ios::binary ); + ASSERT_TRUE( output_stream.is_open() ) << "Failed to open output file: " << output_file; + + std::ifstream reference_stream( reference_file, std::ios::binary ); + if ( !reference_stream.is_open() ) + { + GTEST_SKIP() << "Reference output file not found: " << reference_file; + } + + output_stream.seekg( 0, std::ios::end ); + reference_stream.seekg( 0, std::ios::end ); + const auto output_size = output_stream.tellg(); + const auto reference_size = reference_stream.tellg(); + ASSERT_EQ( output_size, reference_size ) << "Output size mismatch"; + + output_stream.seekg( 0, std::ios::beg ); + reference_stream.seekg( 0, std::ios::beg ); + + std::vector output_data( static_cast( output_size ) / sizeof( float ) ); + std::vector reference_data( static_cast( reference_size ) / sizeof( float ) ); + + output_stream.read( reinterpret_cast( output_data.data() ), output_size ); + reference_stream.read( reinterpret_cast( reference_data.data() ), reference_size ); + + double max_abs_diff = 0.0; + double mean_abs_diff = 0.0; + for ( size_t i = 0; i < output_data.size(); ++i ) + { + const double diff = std::abs( static_cast( output_data[i] ) - + static_cast( reference_data[i] ) ); + mean_abs_diff += diff; + if ( diff > max_abs_diff ) + { + max_abs_diff = diff; + } + } + mean_abs_diff /= static_cast( output_data.size() ); + + std::cout << "TextureCube output diff: mean=" << mean_abs_diff << " max=" << max_abs_diff << std::endl; + + ASSERT_LT( mean_abs_diff, 1e-3 ) << "Mean absolute diff too large"; + ASSERT_LT( max_abs_diff, 1e-2 ) << "Max absolute diff too large"; + } + + TEST_F( ProcessingDatatypesTest, Vec3ValidationTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "processing_datatypes/"; + + std::string instance_file = data_path + "vec3-processing-definition.json"; + std::ifstream ifs( instance_file ); + ASSERT_TRUE( ifs ) << "Could not open " << instance_file; + + std::string json_data( ( std::istreambuf_iterator( ifs ) ), + std::istreambuf_iterator() ); + + auto result = sgns::sgprocessing::ProcessingManager::Create( json_data ); + ASSERT_TRUE( result ) << result.error().message(); + + auto manager = result.value(); + auto inputs = manager->GetProcessingData().get_inputs(); + ASSERT_EQ( inputs.size(), 1 ) << "Expected 1 input"; + ASSERT_EQ( inputs[0].get_type(), sgns::DataType::VEC3 ); + } + + TEST_F( ProcessingDatatypesTest, Vec3ProcessingTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "processing_datatypes/"; + + std::string instance_file = data_path + "vec3-processing-definition.json"; + std::ifstream ifs( instance_file ); + ASSERT_TRUE( ifs ) << "Could not open " << instance_file; + + std::string json_data( ( std::istreambuf_iterator( ifs ) ), + std::istreambuf_iterator() ); + + // Patch relative file:// URIs to absolute paths for debugger compatibility + json_data = PatchJsonUrisToAbsolute( json_data, bin_path ); + + auto result = sgns::sgprocessing::ProcessingManager::Create( json_data ); + ASSERT_TRUE( result ) << result.error().message(); + + auto manager = result.value(); + + auto processing = manager->GetProcessingData(); + const auto &passes = processing.get_passes(); + ASSERT_EQ( passes.size(), 1 ); + + ASSERT_TRUE( passes[0].get_model().has_value() ); + const auto model = passes[0].get_model().value(); + const auto input_nodes = model.get_input_nodes(); + ASSERT_EQ( input_nodes.size(), 1 ); + + // Create mock model node + auto ioc = std::make_shared(); + std::vector> chunkhashes; + sgns::ModelNode model_node = input_nodes[0]; + + std::cout << "Calling Process() on ProcessingManager (vec3)..." << std::endl; + auto proc_result = manager->Process( ioc, chunkhashes, model_node ); + ASSERT_TRUE( proc_result ) << "Process failed: " << proc_result.error().message(); + + ASSERT_FALSE( proc_result.value().empty() ) << "Result hash should not be empty"; + ASSERT_GT( chunkhashes.size(), 0 ) << "Should have at least one chunk hash"; + + const std::string output_file = data_path + "vec3_output.raw"; + const std::string reference_file = data_path + "vec3_output_pt.raw"; + + std::ifstream output_stream( output_file, std::ios::binary ); + ASSERT_TRUE( output_stream.is_open() ) << "Failed to open output file: " << output_file; + + std::ifstream reference_stream( reference_file, std::ios::binary ); + if ( !reference_stream.is_open() ) + { + GTEST_SKIP() << "Reference output file not found: " << reference_file; + } + + output_stream.seekg( 0, std::ios::end ); + reference_stream.seekg( 0, std::ios::end ); + const auto output_size = output_stream.tellg(); + const auto reference_size = reference_stream.tellg(); + ASSERT_EQ( output_size, reference_size ) << "Output size mismatch"; + + output_stream.seekg( 0, std::ios::beg ); + reference_stream.seekg( 0, std::ios::beg ); + + std::vector output_data( static_cast( output_size ) / sizeof( float ) ); + std::vector reference_data( static_cast( reference_size ) / sizeof( float ) ); + + output_stream.read( reinterpret_cast( output_data.data() ), output_size ); + reference_stream.read( reinterpret_cast( reference_data.data() ), reference_size ); + + double mean_abs_diff = 0.0; + double max_abs_diff = 0.0; + for ( size_t i = 0; i < output_data.size(); ++i ) + { + const double diff = std::abs( static_cast( output_data[i] ) - + static_cast( reference_data[i] ) ); + mean_abs_diff += diff; + max_abs_diff = std::max( max_abs_diff, diff ); + } + + mean_abs_diff /= static_cast( output_data.size() ); + + std::cout << "Vec3 output diff: mean=" << mean_abs_diff << " max=" << max_abs_diff << std::endl; + + ASSERT_LT( mean_abs_diff, 1e-3 ) << "Mean absolute diff too large"; + ASSERT_LT( max_abs_diff, 1e-2 ) << "Max absolute diff too large"; + } + + TEST_F( ProcessingDatatypesTest, Vec4ValidationTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "processing_datatypes/"; + + std::string instance_file = data_path + "vec4-processing-definition.json"; + std::ifstream ifs( instance_file ); + ASSERT_TRUE( ifs ) << "Could not open " << instance_file; + + std::string json_data( ( std::istreambuf_iterator( ifs ) ), + std::istreambuf_iterator() ); + + auto result = sgns::sgprocessing::ProcessingManager::Create( json_data ); + ASSERT_TRUE( result ) << result.error().message(); + + auto manager = result.value(); + auto inputs = manager->GetProcessingData().get_inputs(); + ASSERT_EQ( inputs.size(), 1 ) << "Expected 1 input"; + ASSERT_EQ( inputs[0].get_type(), sgns::DataType::VEC4 ); + } + + TEST_F( ProcessingDatatypesTest, Vec4ProcessingTest ) + { + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string data_path = bin_path + "processing_datatypes/"; + + std::string instance_file = data_path + "vec4-processing-definition.json"; + std::ifstream ifs( instance_file ); + ASSERT_TRUE( ifs ) << "Could not open " << instance_file; + + std::string json_data( ( std::istreambuf_iterator( ifs ) ), + std::istreambuf_iterator() ); + + // Patch relative file:// URIs to absolute paths for debugger compatibility + json_data = PatchJsonUrisToAbsolute( json_data, bin_path ); + + auto result = sgns::sgprocessing::ProcessingManager::Create( json_data ); + ASSERT_TRUE( result ) << result.error().message(); + + auto manager = result.value(); + + auto processing = manager->GetProcessingData(); + const auto &passes = processing.get_passes(); + ASSERT_EQ( passes.size(), 1 ); + + ASSERT_TRUE( passes[0].get_model().has_value() ); + const auto model = passes[0].get_model().value(); + const auto input_nodes = model.get_input_nodes(); + ASSERT_EQ( input_nodes.size(), 1 ); + + // Create mock model node + auto ioc = std::make_shared(); + std::vector> chunkhashes; + sgns::ModelNode model_node = input_nodes[0]; + + std::cout << "Calling Process() on ProcessingManager (vec4)..." << std::endl; + auto proc_result = manager->Process( ioc, chunkhashes, model_node ); + ASSERT_TRUE( proc_result ) << "Process failed: " << proc_result.error().message(); + + ASSERT_FALSE( proc_result.value().empty() ) << "Result hash should not be empty"; + ASSERT_GT( chunkhashes.size(), 0 ) << "Should have at least one chunk hash"; + + const std::string output_file = data_path + "vec4_output.raw"; + const std::string reference_file = data_path + "vec4_output_pt.raw"; + + std::ifstream output_stream( output_file, std::ios::binary ); + ASSERT_TRUE( output_stream.is_open() ) << "Failed to open output file: " << output_file; + + std::ifstream reference_stream( reference_file, std::ios::binary ); + if ( !reference_stream.is_open() ) + { + GTEST_SKIP() << "Reference output file not found: " << reference_file; + } + + output_stream.seekg( 0, std::ios::end ); + reference_stream.seekg( 0, std::ios::end ); + const auto output_size = output_stream.tellg(); + const auto reference_size = reference_stream.tellg(); + ASSERT_EQ( output_size, reference_size ) << "Output size mismatch"; + + output_stream.seekg( 0, std::ios::beg ); + reference_stream.seekg( 0, std::ios::beg ); + + std::vector output_data( static_cast( output_size ) / sizeof( float ) ); + std::vector reference_data( static_cast( reference_size ) / sizeof( float ) ); + + output_stream.read( reinterpret_cast( output_data.data() ), output_size ); + reference_stream.read( reinterpret_cast( reference_data.data() ), reference_size ); + + double mean_abs_diff = 0.0; + double max_abs_diff = 0.0; + for ( size_t i = 0; i < output_data.size(); ++i ) + { + const double diff = std::abs( static_cast( output_data[i] ) - + static_cast( reference_data[i] ) ); + mean_abs_diff += diff; + max_abs_diff = std::max( max_abs_diff, diff ); + } + + mean_abs_diff /= static_cast( output_data.size() ); + + std::cout << "Vec4 output diff: mean=" << mean_abs_diff << " max=" << max_abs_diff << std::endl; + + ASSERT_LT( mean_abs_diff, 1e-3 ) << "Mean absolute diff too large"; + ASSERT_LT( max_abs_diff, 1e-2 ) << "Max absolute diff too large"; + } +} + + diff --git a/test/src/processing_datatypes/spleen_15.raw b/test/src/processing_datatypes/spleen_15.raw new file mode 100644 index 000000000..d6a6fe624 Binary files /dev/null and b/test/src/processing_datatypes/spleen_15.raw differ diff --git a/test/src/processing_datatypes/spleen_ct_seg.mnn b/test/src/processing_datatypes/spleen_ct_seg.mnn new file mode 100644 index 000000000..d1372e762 Binary files /dev/null and b/test/src/processing_datatypes/spleen_ct_seg.mnn differ diff --git a/test/src/processing_datatypes/string-processing-definition.json b/test/src/processing_datatypes/string-processing-definition.json new file mode 100644 index 000000000..7d235e1a1 --- /dev/null +++ b/test/src/processing_datatypes/string-processing-definition.json @@ -0,0 +1,92 @@ +{ + "name": "bert-tiny-string-test", + "version": "1.0.0", + "gnus_spec_version": 1.0, + "author": "Test Author", + "description": "A test processing definition for string input using bert-tiny", + "tags": ["string", "nlp", "test"], + + "inputs": [ + { + "name": "inputText", + "source_uri_param": "file://processing_datatypes/test_input.txt", + "type": "string", + "description": "Input text string for BERT processing" + } + ], + + "outputs": [ + { + "name": "textEmbedding", + "source_uri_param": "file://processing_datatypes/embedding_output.raw", + "type": "tensor", + "description": "Output text embedding from BERT" + } + ], + + "parameters": [ + { + "name": "modelUri", + "type": "uri", + "default": "file://processing_datatypes/bert-tiny.mnn", + "description": "URI to the BERT tiny model" + }, + { + "name": "tokenizerMode", + "type": "string", + "default": "token_ids", + "description": "Tokenizer mode: token_ids or raw_text" + }, + { + "name": "textInput", + "type": "string", + "default": "Hello world this is a test sentence", + "description": "Input text to process" + }, + { + "name": "maxLength", + "type": "int", + "default": 128, + "description": "Maximum sequence length", + "constraints": { + "min": 1, + "max": 512 + } + } + ], + + "passes": [ + { + "name": "text_inference", + "type": "inference", + "description": "BERT text embedding inference", + "model": { + "source_uri_param": "file://processing_datatypes/bert-tiny.mnn", + "format": "MNN", + "batch_size": 1, + "input_nodes": [ + { + "name": "input_ids", + "type": "tensor", + "source": "input:inputText", + "shape": [1, 128] + } + ], + "output_nodes": [ + { + "name": "output", + "type": "tensor", + "target": "output:textEmbedding", + "shape": [1, 128, 128] + } + ] + } + } + ], + + "metadata": { + "created_date": "2026-02-11", + "framework": "MNN", + "test_case": "string_datatype" + } +} diff --git a/test/src/processing_datatypes/tensor-processing-definition.json b/test/src/processing_datatypes/tensor-processing-definition.json new file mode 100644 index 000000000..44f85ae8a --- /dev/null +++ b/test/src/processing_datatypes/tensor-processing-definition.json @@ -0,0 +1,76 @@ +{ + "name": "tensor-tiny-test", + "version": "1.0.0", + "gnus_spec_version": 1.0, + "author": "Test Author", + "description": "A test processing definition for tensor input using a tiny MLP model", + "tags": ["tensor", "test"], + + "inputs": [ + { + "name": "inputTensor", + "source_uri_param": "file://processing_datatypes/tensor_input.raw", + "type": "tensor", + "description": "Input tensor data (flat)", + "dimensions": { + "width": 256, + "block_len": 64, + "chunk_stride": 32 + }, + "format": "FLOAT32" + } + ], + + "outputs": [ + { + "name": "tensorOutput", + "source_uri_param": "file://processing_datatypes/tensor_output.raw", + "type": "tensor", + "description": "Output tensor from tensor model" + } + ], + + "parameters": [ + { + "name": "modelUri", + "type": "uri", + "default": "file://processing_datatypes/tensor_tiny.mnn", + "description": "URI to the tensor MNN model" + } + ], + + "passes": [ + { + "name": "tensor_inference", + "type": "inference", + "description": "Tensor model inference", + "model": { + "source_uri_param": "file://processing_datatypes/tensor_tiny.mnn", + "format": "MNN", + "batch_size": 1, + "input_nodes": [ + { + "name": "input", + "type": "tensor", + "source": "input:inputTensor", + "shape": [1, 64] + } + ], + "output_nodes": [ + { + "name": "output", + "type": "tensor", + "target": "output:tensorOutput", + "shape": [1, 64] + } + ] + } + } + ], + + "metadata": { + "created_date": "2026-02-17", + "framework": "MNN", + "test_case": "tensor_datatype" + } +} diff --git a/test/src/processing_datatypes/tensor_input.raw b/test/src/processing_datatypes/tensor_input.raw new file mode 100644 index 000000000..350762db7 Binary files /dev/null and b/test/src/processing_datatypes/tensor_input.raw differ diff --git a/test/src/processing_datatypes/tensor_output_pt.raw b/test/src/processing_datatypes/tensor_output_pt.raw new file mode 100644 index 000000000..2d6954eb8 Binary files /dev/null and b/test/src/processing_datatypes/tensor_output_pt.raw differ diff --git a/test/src/processing_datatypes/tensor_tiny.mnn b/test/src/processing_datatypes/tensor_tiny.mnn new file mode 100644 index 000000000..102dcbc56 Binary files /dev/null and b/test/src/processing_datatypes/tensor_tiny.mnn differ diff --git a/test/src/processing_datatypes/test_input.txt b/test/src/processing_datatypes/test_input.txt new file mode 100644 index 000000000..50bc23fcf --- /dev/null +++ b/test/src/processing_datatypes/test_input.txt @@ -0,0 +1 @@ +101 7592 2088 2023 2003 1037 3231 6251 2005 14324 6364 102 \ No newline at end of file diff --git a/test/src/processing_datatypes/texture1d-processing-definition.json b/test/src/processing_datatypes/texture1d-processing-definition.json new file mode 100644 index 000000000..badb1fb45 --- /dev/null +++ b/test/src/processing_datatypes/texture1d-processing-definition.json @@ -0,0 +1,82 @@ +{ + "name": "texture1d-tiny-test", + "version": "1.0.0", + "gnus_spec_version": 1.0, + "author": "Test Author", + "description": "A test processing definition for texture1D input using a tiny Conv1D model", + "tags": ["texture1d", "signal", "test"], + + "inputs": [ + { + "name": "inputSignal", + "source_uri_param": "file://processing_datatypes/texture1d_input.raw", + "type": "texture1D", + "description": "Input 1D signal", + "dimensions": { + "width": 256, + "block_len": 256, + "chunk_stride": 256 + }, + "format": "FLOAT32" + } + ], + + "outputs": [ + { + "name": "signalOutput", + "source_uri_param": "file://processing_datatypes/texture1d_output.raw", + "type": "tensor", + "description": "Output tensor from 1D model" + } + ], + + "parameters": [ + { + "name": "modelUri", + "type": "uri", + "default": "file://processing_datatypes/texture1d_tiny.mnn", + "description": "URI to the tiny 1D MNN model" + }, + { + "name": "volumeLayout", + "type": "string", + "default": "HWD", + "description": "Accepted but ignored for 1D" + } + ], + + "passes": [ + { + "name": "texture1d_inference", + "type": "inference", + "description": "1D model inference", + "model": { + "source_uri_param": "file://processing_datatypes/texture1d_tiny.mnn", + "format": "MNN", + "batch_size": 1, + "input_nodes": [ + { + "name": "input", + "type": "tensor", + "source": "input:inputSignal", + "shape": [1, 1, 256] + } + ], + "output_nodes": [ + { + "name": "output", + "type": "tensor", + "target": "output:signalOutput", + "shape": [1, 2, 256] + } + ] + } + } + ], + + "metadata": { + "created_date": "2026-02-13", + "framework": "MNN", + "test_case": "texture1d_datatype" + } +} diff --git a/test/src/processing_datatypes/texture1d_input.raw b/test/src/processing_datatypes/texture1d_input.raw new file mode 100644 index 000000000..14597b9b1 Binary files /dev/null and b/test/src/processing_datatypes/texture1d_input.raw differ diff --git a/test/src/processing_datatypes/texture1d_output_pt.raw b/test/src/processing_datatypes/texture1d_output_pt.raw new file mode 100644 index 000000000..4ac48aab2 Binary files /dev/null and b/test/src/processing_datatypes/texture1d_output_pt.raw differ diff --git a/test/src/processing_datatypes/texture1d_tiny.mnn b/test/src/processing_datatypes/texture1d_tiny.mnn new file mode 100644 index 000000000..17030b601 Binary files /dev/null and b/test/src/processing_datatypes/texture1d_tiny.mnn differ diff --git a/test/src/processing_datatypes/texture3d-processing-definition.json b/test/src/processing_datatypes/texture3d-processing-definition.json new file mode 100644 index 000000000..67a37cb99 --- /dev/null +++ b/test/src/processing_datatypes/texture3d-processing-definition.json @@ -0,0 +1,82 @@ +{ + "name": "spleen-ct-texture3d-test", + "version": "1.0.0", + "gnus_spec_version": 1.0, + "author": "Test Author", + "description": "A test processing definition for texture3D input using spleen_ct_seg", + "tags": ["texture3d", "volume", "test"], + + "inputs": [ + { + "name": "inputVolume", + "source_uri_param": "file://processing_datatypes/spleen_15.raw", + "type": "texture3D", + "description": "Input volume for 3D segmentation", + "dimensions": { + "width": 253, + "height": 253, + "chunk_count": 94, + "chunk_subchunk_width": 96, + "chunk_subchunk_height": 96, + "block_len": 96, + "chunk_stride": 48, + "chunk_line_stride": 48, + "block_stride": 48 + }, + "format": "FLOAT32" + } + ], + + "outputs": [ + { + "name": "segmentationOutput", + "source_uri_param": "file://processing_datatypes/stitched_logits.raw", + "type": "tensor", + "description": "Output segmentation tensor" + } + ], + + "parameters": [ + { + "name": "modelUri", + "type": "uri", + "default": "file://processing_datatypes/spleen_ct_seg.mnn", + "description": "URI to the spleen CT segmentation model" + } + ], + + "passes": [ + { + "name": "volume_inference", + "type": "inference", + "description": "3D segmentation inference", + "model": { + "source_uri_param": "file://processing_datatypes/spleen_ct_seg.mnn", + "format": "MNN", + "batch_size": 1, + "input_nodes": [ + { + "name": "input", + "type": "tensor", + "source": "input:inputVolume", + "shape": [1, 1, 96, 96, 96] + } + ], + "output_nodes": [ + { + "name": "output", + "type": "tensor", + "target": "output:segmentationOutput", + "shape": [1, 2, 96, 96, 96] + } + ] + } + } + ], + + "metadata": { + "created_date": "2026-02-11", + "framework": "MNN", + "test_case": "texture3d_datatype" + } +} diff --git a/test/src/processing_datatypes/texturecube-processing-definition.json b/test/src/processing_datatypes/texturecube-processing-definition.json new file mode 100644 index 000000000..88fbab69b --- /dev/null +++ b/test/src/processing_datatypes/texturecube-processing-definition.json @@ -0,0 +1,81 @@ +{ + "name": "texturecube-tiny-test", + "version": "1.0.0", + "gnus_spec_version": 1.0, + "author": "Test Author", + "description": "A test processing definition for textureCube input using a tiny Conv2D model", + "tags": ["textureCube", "image", "test"], + + "inputs": [ + { + "name": "inputCube", + "source_uri_param": "file://processing_datatypes/texturecube_input.raw", + "type": "textureCube", + "description": "Input texture cube faces (6 faces in order)", + "dimensions": { + "width": 64, + "height": 64 + }, + "format": "RGB8" + } + ], + + "outputs": [ + { + "name": "cubeOutput", + "source_uri_param": "file://processing_datatypes/texturecube_output.raw", + "type": "tensor", + "description": "Output tensor from textureCube model" + } + ], + + "parameters": [ + { + "name": "modelUri", + "type": "uri", + "default": "file://processing_datatypes/texturecube_tiny.mnn", + "description": "URI to the textureCube MNN model" + }, + { + "name": "cubeLayout", + "type": "string", + "default": "faces_in_order", + "description": "faces_in_order or atlas_3x2" + } + ], + + "passes": [ + { + "name": "texturecube_inference", + "type": "inference", + "description": "TextureCube model inference", + "model": { + "source_uri_param": "file://processing_datatypes/texturecube_tiny.mnn", + "format": "MNN", + "batch_size": 1, + "input_nodes": [ + { + "name": "input", + "type": "tensor", + "source": "input:inputCube", + "shape": [1, 3, 64, 64] + } + ], + "output_nodes": [ + { + "name": "output", + "type": "tensor", + "target": "output:cubeOutput", + "shape": [1, 3, 64, 64] + } + ] + } + } + ], + + "metadata": { + "created_date": "2026-02-17", + "framework": "MNN", + "test_case": "texturecube_datatype" + } +} diff --git a/test/src/processing_datatypes/texturecube_input.raw b/test/src/processing_datatypes/texturecube_input.raw new file mode 100644 index 000000000..1272d19a5 Binary files /dev/null and b/test/src/processing_datatypes/texturecube_input.raw differ diff --git a/test/src/processing_datatypes/texturecube_output_pt.raw b/test/src/processing_datatypes/texturecube_output_pt.raw new file mode 100644 index 000000000..764d50606 Binary files /dev/null and b/test/src/processing_datatypes/texturecube_output_pt.raw differ diff --git a/test/src/processing_datatypes/texturecube_tiny.mnn b/test/src/processing_datatypes/texturecube_tiny.mnn new file mode 100644 index 000000000..5945fcee0 Binary files /dev/null and b/test/src/processing_datatypes/texturecube_tiny.mnn differ diff --git a/test/src/processing_datatypes/vec2-processing-definition.json b/test/src/processing_datatypes/vec2-processing-definition.json new file mode 100644 index 000000000..0b830ff04 --- /dev/null +++ b/test/src/processing_datatypes/vec2-processing-definition.json @@ -0,0 +1,74 @@ +{ + "name": "vec2-tiny-test", + "version": "1.0.0", + "gnus_spec_version": 1.0, + "author": "Test Author", + "description": "A test processing definition for vec2 input using a tiny Conv1D model", + "tags": ["vec2", "vector", "test"], + + "inputs": [ + { + "name": "inputVec2", + "source_uri_param": "file://processing_datatypes/vec2_input.raw", + "type": "vec2", + "description": "Input vec2 array (X, Y components)", + "dimensions": { + "width": 16 + }, + "format": "FLOAT32" + } + ], + + "outputs": [ + { + "name": "vec2Output", + "source_uri_param": "file://processing_datatypes/vec2_output.raw", + "type": "tensor", + "description": "Output tensor from vec2 model" + } + ], + + "parameters": [ + { + "name": "modelUri", + "type": "uri", + "default": "file://processing_datatypes/vec2_tiny.mnn", + "description": "URI to the vec2 MNN model" + } + ], + + "passes": [ + { + "name": "vec2_inference", + "type": "inference", + "description": "Vec2 model inference", + "model": { + "source_uri_param": "file://processing_datatypes/vec2_tiny.mnn", + "format": "MNN", + "batch_size": 1, + "input_nodes": [ + { + "name": "input", + "type": "tensor", + "source": "input:inputVec2", + "shape": [1, 2, 16] + } + ], + "output_nodes": [ + { + "name": "output", + "type": "tensor", + "target": "output:vec2Output", + "shape": [1, 16] + } + ] + } + } + ], + + "metadata": { + "created_date": "2026-02-17", + "framework": "MNN", + "test_case": "vec2_datatype" + } +} diff --git a/test/src/processing_datatypes/vec2_input.raw b/test/src/processing_datatypes/vec2_input.raw new file mode 100644 index 000000000..ddf626ec5 --- /dev/null +++ b/test/src/processing_datatypes/vec2_input.raw @@ -0,0 +1,2 @@ +QQ>( %??oo#?vD?^7 +?E!tw>^ܿs>Dth^ƴa?1g:L=&^\ b+=S~[>uXr t? \ No newline at end of file diff --git a/test/src/processing_datatypes/vec2_output_pt.raw b/test/src/processing_datatypes/vec2_output_pt.raw new file mode 100644 index 000000000..a61030b03 --- /dev/null +++ b/test/src/processing_datatypes/vec2_output_pt.raw @@ -0,0 +1,2 @@ +QK>N +k>=v6=2D]M>оDx]pi/|Q>'=,=^ \ No newline at end of file diff --git a/test/src/processing_datatypes/vec2_tiny.mnn b/test/src/processing_datatypes/vec2_tiny.mnn new file mode 100644 index 000000000..b8186fb4d Binary files /dev/null and b/test/src/processing_datatypes/vec2_tiny.mnn differ diff --git a/test/src/processing_datatypes/vec3-processing-definition.json b/test/src/processing_datatypes/vec3-processing-definition.json new file mode 100644 index 000000000..25ce91375 --- /dev/null +++ b/test/src/processing_datatypes/vec3-processing-definition.json @@ -0,0 +1,74 @@ +{ + "name": "vec3-tiny-test", + "version": "1.0.0", + "gnus_spec_version": 1.0, + "author": "Test Author", + "description": "A test processing definition for vec3 input using a tiny Conv1D model", + "tags": ["vec3", "vector", "test"], + + "inputs": [ + { + "name": "inputVec3", + "source_uri_param": "file://processing_datatypes/vec3_input.raw", + "type": "vec3", + "description": "Input vec3 array (X, Y, Z components)", + "dimensions": { + "width": 16 + }, + "format": "FLOAT32" + } + ], + + "outputs": [ + { + "name": "vec3Output", + "source_uri_param": "file://processing_datatypes/vec3_output.raw", + "type": "tensor", + "description": "Output tensor from vec3 model" + } + ], + + "parameters": [ + { + "name": "modelUri", + "type": "uri", + "default": "file://processing_datatypes/vec3_tiny.mnn", + "description": "URI to the vec3 MNN model" + } + ], + + "passes": [ + { + "name": "vec3_inference", + "type": "inference", + "description": "Vec3 model inference", + "model": { + "source_uri_param": "file://processing_datatypes/vec3_tiny.mnn", + "format": "MNN", + "batch_size": 1, + "input_nodes": [ + { + "name": "input", + "type": "tensor", + "source": "input:inputVec3", + "shape": [1, 3, 16] + } + ], + "output_nodes": [ + { + "name": "output", + "type": "tensor", + "target": "output:vec3Output", + "shape": [1, 16] + } + ] + } + } + ], + + "metadata": { + "created_date": "2026-02-17", + "framework": "MNN", + "test_case": "vec3_datatype" + } +} diff --git a/test/src/processing_datatypes/vec3_input.raw b/test/src/processing_datatypes/vec3_input.raw new file mode 100644 index 000000000..a95e3dc22 Binary files /dev/null and b/test/src/processing_datatypes/vec3_input.raw differ diff --git a/test/src/processing_datatypes/vec3_output_pt.raw b/test/src/processing_datatypes/vec3_output_pt.raw new file mode 100644 index 000000000..27823ec42 --- /dev/null +++ b/test/src/processing_datatypes/vec3_output_pt.raw @@ -0,0 +1 @@ +)S4E޾˾ /wQ|-d8~7Aph \ No newline at end of file diff --git a/test/src/processing_datatypes/vec3_tiny.mnn b/test/src/processing_datatypes/vec3_tiny.mnn new file mode 100644 index 000000000..c299c007e Binary files /dev/null and b/test/src/processing_datatypes/vec3_tiny.mnn differ diff --git a/test/src/processing_datatypes/vec4-processing-definition.json b/test/src/processing_datatypes/vec4-processing-definition.json new file mode 100644 index 000000000..93dafedbf --- /dev/null +++ b/test/src/processing_datatypes/vec4-processing-definition.json @@ -0,0 +1,74 @@ +{ + "name": "vec4-tiny-test", + "version": "1.0.0", + "gnus_spec_version": 1.0, + "author": "Test Author", + "description": "A test processing definition for vec4 input using a tiny Conv1D model", + "tags": ["vec4", "vector", "test"], + + "inputs": [ + { + "name": "inputVec4", + "source_uri_param": "file://processing_datatypes/vec4_input.raw", + "type": "vec4", + "description": "Input vec4 array (X, Y, Z, W components)", + "dimensions": { + "width": 16 + }, + "format": "FLOAT32" + } + ], + + "outputs": [ + { + "name": "vec4Output", + "source_uri_param": "file://processing_datatypes/vec4_output.raw", + "type": "tensor", + "description": "Output tensor from vec4 model" + } + ], + + "parameters": [ + { + "name": "modelUri", + "type": "uri", + "default": "file://processing_datatypes/vec4_tiny.mnn", + "description": "URI to the vec4 MNN model" + } + ], + + "passes": [ + { + "name": "vec4_inference", + "type": "inference", + "description": "Vec4 model inference", + "model": { + "source_uri_param": "file://processing_datatypes/vec4_tiny.mnn", + "format": "MNN", + "batch_size": 1, + "input_nodes": [ + { + "name": "input", + "type": "tensor", + "source": "input:inputVec4", + "shape": [1, 4, 16] + } + ], + "output_nodes": [ + { + "name": "output", + "type": "tensor", + "target": "output:vec4Output", + "shape": [1, 16] + } + ] + } + } + ], + + "metadata": { + "created_date": "2026-02-17", + "framework": "MNN", + "test_case": "vec4_datatype" + } +} diff --git a/test/src/processing_datatypes/vec4_input.raw b/test/src/processing_datatypes/vec4_input.raw new file mode 100644 index 000000000..9ffaecaaf Binary files /dev/null and b/test/src/processing_datatypes/vec4_input.raw differ diff --git a/test/src/processing_datatypes/vec4_output_pt.raw b/test/src/processing_datatypes/vec4_output_pt.raw new file mode 100644 index 000000000..9c8f954ba Binary files /dev/null and b/test/src/processing_datatypes/vec4_output_pt.raw differ diff --git a/test/src/processing_datatypes/vec4_tiny.mnn b/test/src/processing_datatypes/vec4_tiny.mnn new file mode 100644 index 000000000..d575a297a Binary files /dev/null and b/test/src/processing_datatypes/vec4_tiny.mnn differ diff --git a/test/src/processing_nodes/CMakeLists.txt b/test/src/processing_nodes/CMakeLists.txt index 6eb64392c..e3c460940 100644 --- a/test/src/processing_nodes/CMakeLists.txt +++ b/test/src/processing_nodes/CMakeLists.txt @@ -30,7 +30,6 @@ target_include_directories(child_tokens_test PRIVATE ${AsyncIOManager_INCLUDE_DI target_link_libraries(child_tokens_test genius_node - mp_utils json_secure_storage ) if(WIN32) @@ -56,7 +55,6 @@ target_include_directories(full_node_test PRIVATE ${AsyncIOManager_INCLUDE_DIR}) target_link_libraries(full_node_test genius_node - mp_utils json_secure_storage ) if(WIN32) diff --git a/test/src/processing_nodes/child_tokens_test.cpp b/test/src/processing_nodes/child_tokens_test.cpp index 6d634ca67..7b03ab63c 100644 --- a/test/src/processing_nodes/child_tokens_test.cpp +++ b/test/src/processing_nodes/child_tokens_test.cpp @@ -17,7 +17,6 @@ #include #include "local_secure_storage/impl/json/JSONSecureStorage.hpp" - using namespace sgns::test; using boost::multiprecision::cpp_dec_float_50; @@ -182,19 +181,11 @@ TEST( TransferTokenValue, ThreeNodeTransferTest ) } // Ensure enough balance with +1 change - auto mintRes51 = node51->MintTokens( totalMint51 + 1, - "", - "", - sgns::TokenID::FromBytes( { 0x51 } ), - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + auto mintRes51 = node51->MintTokens( totalMint51 + 1, "", "", sgns::TokenID::FromBytes( { 0x51 } ) ); ASSERT_TRUE( mintRes51.has_value() ) << "Grouped mint failed on token51"; std::cout << "Minted total " << ( totalMint51 + 1 ) << " of token51 on node51\n"; - auto mintRes52 = node52->MintTokens( totalMint52 + 1, - "", - "", - sgns::TokenID::FromBytes( { 0x52 } ), - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + auto mintRes52 = node52->MintTokens( totalMint52 + 1, "", "", sgns::TokenID::FromBytes( { 0x52 } ) ); ASSERT_TRUE( mintRes52.has_value() ) << "Grouped mint failed on token52"; std::cout << "Minted total " << ( totalMint52 + 1 ) << " of token52 on node52\n"; @@ -284,11 +275,7 @@ TEST_P( GeniusNodeMintMainTest, MintMainBalance ) auto parsedInitialChild = node->ParseTokens( initialChildStr, p.TokenID ); ASSERT_TRUE( parsedInitialChild.has_value() ); - auto res = node->MintTokens( p.mintMain, - "", - "", - p.TokenID, - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + auto res = node->MintTokens( p.mintMain, "", "", p.TokenID ); ASSERT_TRUE( res.has_value() ); auto finalFmtRes = node->FormatTokens( node->GetBalance(), p.TokenID ); @@ -333,9 +320,7 @@ inline std::ostream &operator<<( std::ostream &os, MintChildCase_s const &c ) class GeniusNodeMintChildTest : public ::testing::TestWithParam { protected: - static void SetUpTestSuite() - { - } + static void SetUpTestSuite() {} }; TEST_P( GeniusNodeMintChildTest, MintChildBalance ) @@ -364,11 +349,7 @@ TEST_P( GeniusNodeMintChildTest, MintChildBalance ) auto parsedMint = node->ParseTokens( p.mintChild, p.TokenID ); ASSERT_TRUE( parsedMint.has_value() ); - auto res = node->MintTokens( parsedMint.value(), - "", - "", - p.TokenID, - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + auto res = node->MintTokens( parsedMint.value(), "", "", p.TokenID ); ASSERT_TRUE( res.has_value() ); auto finalFmtRes = node->FormatTokens( node->GetBalance(), p.TokenID ); @@ -445,11 +426,7 @@ TEST( GeniusNodeMultiTokenMintTest, MintMultipleTokenIds ) for ( const auto &tm : mints ) { - auto res = node->MintTokens( tm.amount, - "", - "", - tm.tokenId, - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + auto res = node->MintTokens( tm.amount, "", "", tm.tokenId ); ASSERT_TRUE( res.has_value() ); // << "MintTokens failed for token=" << tm.tokenId << " amount=" << tm.amount; expectedTotals[tm.tokenId] += tm.amount; @@ -504,14 +481,10 @@ TEST_F( ProcessingNodesModuleTest, SinglePostProcessing ) std::chrono::milliseconds( 30000 ), "node_proc2 not synched" ); - auto mintResMain = node_main->MintTokens( 1000, - "", - "", - sgns::TokenID::FromBytes( { 0x00 } ), - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + auto mintResMain = node_main->MintTokens( 1000, "", "", sgns::TokenID::FromBytes( { 0x00 } ) ); ASSERT_TRUE( mintResMain.has_value() ) << "Mint failed on node_main"; - std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; + std::string bin_path = boost::dll::program_location().parent_path().string() + "/"; std::string json_data = R"( { "name": "posenet-inference", diff --git a/test/src/processing_nodes/processing_nodes_test.cpp b/test/src/processing_nodes/processing_nodes_test.cpp index fa7b77d7c..9405ca4f9 100644 --- a/test/src/processing_nodes/processing_nodes_test.cpp +++ b/test/src/processing_nodes/processing_nodes_test.cpp @@ -472,11 +472,7 @@ TEST_F( ProcessingNodesTest, PostProcessing ) auto procmgr = sgns::sgprocessing::ProcessingManager::Create( json_data ); auto cost = node_main->GetProcessCost( procmgr.value() ); - auto mint_result = node_main->MintTokens( 50000000000, - "", - "", - sgns::TokenID::FromBytes( { 0x00 } ), - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + auto mint_result = node_main->MintTokens( 50000000000, "", "", sgns::TokenID::FromBytes( { 0x00 } ) ); ASSERT_TRUE( mint_result.has_value() ) << "Mint transaction failed or timed out"; diff --git a/test/src/processing_schema/gnus-processing-schema.json b/test/src/processing_schema/gnus-processing-schema.json index 79195c3ac..065b4af82 100644 --- a/test/src/processing_schema/gnus-processing-schema.json +++ b/test/src/processing_schema/gnus-processing-schema.json @@ -453,7 +453,6 @@ "type": "string", "enum": [ "texture1D", "texture2D", "texture3D", "textureCube", - "texture1D_array", "texture2D_array", "texture3D_array", "tensor", "float", "int", "bool", "vec2", "vec3", "vec4", "mat2", "mat3", "mat4", "buffer", "string" ] diff --git a/test/src/transaction_sync/transaction_crash_test.cpp b/test/src/transaction_sync/transaction_crash_test.cpp index e6afe16f4..34aa7a80e 100644 --- a/test/src/transaction_sync/transaction_crash_test.cpp +++ b/test/src/transaction_sync/transaction_crash_test.cpp @@ -61,14 +61,14 @@ namespace sgns CONFIG2.BaseWritePath[sizeof( CONFIG2.BaseWritePath ) - 1] = '\0'; node1 = sgns::GeniusNode::New( CONFIG1, - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", - false, - false ); + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", + false, + false ); std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); node2 = sgns::GeniusNode::New( CONFIG2, - "cafebeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", - false, - false ); + "cafebeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", + false, + false ); std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); } @@ -86,12 +86,12 @@ namespace sgns */ void RestartNode2() { - node2.reset(); + node2.reset(); std::this_thread::sleep_for( std::chrono::milliseconds( 5000 ) ); node2 = sgns::GeniusNode::New( CONFIG2, - "cafebeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", - false, - false ); + "cafebeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", + false, + false ); std::this_thread::sleep_for( std::chrono::milliseconds( 1000 ) ); } }; @@ -115,11 +115,7 @@ namespace sgns } std::cout << "Minting the required tokens" << std::endl; - auto mint_result = node1->MintTokens( total_amount, - "", - "", - TokenID::FromBytes( { 0x00 } ), - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + auto mint_result = node1->MintTokens( total_amount, "", "", TokenID::FromBytes( { 0x00 } ) ); ASSERT_TRUE( mint_result.has_value() ) << "Mint transaction failed or timed out"; auto [mint_tx_id, mint_duration] = mint_result.value(); std::cout << "Mint transaction " << mint_tx_id << " completed in " << mint_duration << " ms" << std::endl; @@ -142,11 +138,11 @@ namespace sgns node2->GetPubSub()->AddPeers( { node1->GetPubSub()->GetLocalAddress() } ); std::cout << "Waiting for the first batch of incoming transactions" << std::endl; - for ( int i = 0; i < INITIAL_WAIT_TRANSFERS ; i++ ) + for ( int i = 0; i < INITIAL_WAIT_TRANSFERS; i++ ) { - EXPECT_EQ( - node2->WaitForTransactionIncoming( tx_ids[i], - std::chrono::milliseconds( INCOMING_TIMEOUT_MILLISECONDS ) ),TransactionManager::TransactionStatus::CONFIRMED ) + EXPECT_EQ( node2->WaitForTransactionIncoming( tx_ids[i], + std::chrono::milliseconds( INCOMING_TIMEOUT_MILLISECONDS ) ), + TransactionManager::TransactionStatus::CONFIRMED ) << "Failed to receive initial transaction " << tx_ids[i] << " on node2"; } @@ -155,12 +151,14 @@ namespace sgns node1->GetPubSub()->AddPeers( { node2->GetPubSub()->GetLocalAddress() } ); node2->GetPubSub()->AddPeers( { node1->GetPubSub()->GetLocalAddress() } ); - std::cout << "****************************Waiting for the remaining transactions after recovery****************************" << std::endl; + std::cout + << "****************************Waiting for the remaining transactions after recovery****************************" + << std::endl; for ( int i = 0; i < TOTAL_TRANSFERS; i++ ) { - EXPECT_EQ( - node2->WaitForTransactionIncoming( tx_ids[i], - std::chrono::milliseconds( INCOMING_TIMEOUT_MILLISECONDS ) ),TransactionManager::TransactionStatus::CONFIRMED ) + EXPECT_EQ( node2->WaitForTransactionIncoming( tx_ids[i], + std::chrono::milliseconds( INCOMING_TIMEOUT_MILLISECONDS ) ), + TransactionManager::TransactionStatus::CONFIRMED ) << "Missing post-recovery transaction " << tx_ids[i]; } } diff --git a/test/src/transaction_sync/transaction_sync_test.cpp b/test/src/transaction_sync/transaction_sync_test.cpp index a32c309b6..1a4324ee0 100644 --- a/test/src/transaction_sync/transaction_sync_test.cpp +++ b/test/src/transaction_sync/transaction_sync_test.cpp @@ -121,7 +121,7 @@ namespace sgns uint64_t amount, const std::string &destination ) { - OUTCOME_TRY( auto &¶ms, + BOOST_OUTCOME_TRY( auto params, utxo_manager.CreateTxParameter( amount, destination, sgns::TokenID::FromBytes( { 0x00 } ) ) ); auto timestamp = std::chrono::system_clock::now(); @@ -139,7 +139,7 @@ namespace sgns std::optional> maybe_proof; TransferProof prover( static_cast( utxo_manager.GetBalance() ), static_cast( amount ) ); - OUTCOME_TRY( ( auto &&, proof_result ), prover.GenerateFullProof() ); + BOOST_OUTCOME_TRY( auto proof_result, prover.GenerateFullProof() ); maybe_proof = std::move( proof_result ); @@ -170,11 +170,7 @@ TEST_F( TransactionSyncTest, TransactionSimpleTransfer ) auto balance_2_before = node_proc2->GetBalance(); // Mint tokens with timeout - auto mint_result = node_proc1->MintTokens( 10000000000, - "", - "", - sgns::TokenID::FromBytes( { 0x00 } ), - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + auto mint_result = node_proc1->MintTokens( 10000000000, "", "", sgns::TokenID::FromBytes( { 0x00 } ) ); ASSERT_TRUE( mint_result.has_value() ) << "Mint transaction failed or timed out"; auto [mint_tx_id, mint_duration] = mint_result.value(); @@ -247,11 +243,7 @@ TEST_F( TransactionSyncTest, TransactionMintSync ) for ( auto amount : mint_amounts ) { - auto mint_result = node_proc1->MintTokens( amount, - "", - "", - sgns::TokenID::FromBytes( { 0x00 } ), - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + auto mint_result = node_proc1->MintTokens( amount, "", "", sgns::TokenID::FromBytes( { 0x00 } ) ); ASSERT_TRUE( mint_result.has_value() ) << "Mint transaction of " << amount << " failed or timed out"; auto [tx_id, duration] = mint_result.value(); @@ -259,18 +251,10 @@ TEST_F( TransactionSyncTest, TransactionMintSync ) } // Mint tokens on node_proc2 - auto mint_result1 = node_proc2->MintTokens( 10000000000, - "", - "", - sgns::TokenID::FromBytes( { 0x00 } ), - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + auto mint_result1 = node_proc2->MintTokens( 10000000000, "", "", sgns::TokenID::FromBytes( { 0x00 } ) ); ASSERT_TRUE( mint_result1.has_value() ) << "Mint transaction failed or timed out"; - auto mint_result2 = node_proc2->MintTokens( 20000000000, - "", - "", - sgns::TokenID::FromBytes( { 0x00 } ), - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + auto mint_result2 = node_proc2->MintTokens( 20000000000, "", "", sgns::TokenID::FromBytes( { 0x00 } ) ); ASSERT_TRUE( mint_result2.has_value() ) << "Mint transaction failed or timed out"; // Verify balances after minting @@ -439,17 +423,9 @@ TEST_F( TransactionSyncTest, InvalidTransactionTest ) "full_node not synched" ); // Mint tokens with timeout - auto mint_result = node_proc1->MintTokens( 10000000000, - "", - "", - TokenID::FromBytes( { 0x00 } ), - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + auto mint_result = node_proc1->MintTokens( 10000000000, "", "", TokenID::FromBytes( { 0x00 } ) ); ASSERT_TRUE( mint_result.has_value() ) << "Mint transaction failed or timed out"; - mint_result = node_proc1->MintTokens( 10000000000, - "", - "", - TokenID::FromBytes( { 0x00 } ), - std::chrono::milliseconds( OUTGOING_TIMEOUT_MILLISECONDS ) ); + mint_result = node_proc1->MintTokens( 10000000000, "", "", TokenID::FromBytes( { 0x00 } ) ); ASSERT_TRUE( mint_result.has_value() ) << "Mint transaction failed or timed out"; auto [mint_tx_id, mint_duration] = mint_result.value(); diff --git a/test/testutil/primitives/CMakeLists.txt b/test/testutil/primitives/CMakeLists.txt index e1ec50fc9..3aca896f0 100644 --- a/test/testutil/primitives/CMakeLists.txt +++ b/test/testutil/primitives/CMakeLists.txt @@ -1,9 +1,6 @@ - - add_library(testutil_primitives_generator - mp_utils.hpp hash_creator.cpp - ) +) target_link_libraries(testutil_primitives_generator blob - ) +) diff --git a/test/testutil/storage/base_crdt_test.cpp b/test/testutil/storage/base_crdt_test.cpp index 9d20f6f58..da88bc758 100644 --- a/test/testutil/storage/base_crdt_test.cpp +++ b/test/testutil/storage/base_crdt_test.cpp @@ -1,5 +1,8 @@ #include "testutil/storage/base_crdt_test.hpp" +#include +#include +#include #include #include @@ -15,7 +18,7 @@ #include #include #include -#include "libp2p/protocol/common/asio/asio_scheduler.hpp" +#include using boost::asio::io_context; using sgns::crdt::GlobalDB; @@ -55,7 +58,7 @@ namespace test BOOST_ASSERT_MSG( pubs_ != nullptr, "could not create GossibPubSub for some reason" ); auto crdtOptions = sgns::crdt::CrdtOptions::DefaultOptions(); - auto scheduler = std::make_shared( io_, libp2p::protocol::SchedulerConfig{} ); + auto scheduler = std::make_shared( std::make_shared(io_), libp2p::basic::Scheduler::Config{std::chrono::milliseconds(100)} ); auto generator = std::make_shared(); auto graphsyncnetwork = std::make_shared( pubs_->GetHost(), scheduler );