diff --git a/.github/workflows/clippy-lint.yaml b/.github/workflows/clippy-lint.yaml index e73915f245..d4ef46216a 100644 --- a/.github/workflows/clippy-lint.yaml +++ b/.github/workflows/clippy-lint.yaml @@ -27,5 +27,6 @@ jobs: toolchain: 1.89 override: true components: clippy - - name: Run Clippy on different workspaces and crates - run: ./scripts/rust/clippy.sh + - name: Run Clippy + run: | + cargo clippy --all-features -- -D warnings diff --git a/.github/workflows/coverage-protocols.yaml b/.github/workflows/coverage-protocols.yaml index 4b0239e12d..5b33615af8 100644 --- a/.github/workflows/coverage-protocols.yaml +++ b/.github/workflows/coverage-protocols.yaml @@ -24,95 +24,119 @@ jobs: - name: Upload protocols coverage to codecov.io uses: codecov/codecov-action@v4 with: - directory: ./protocols/target/tarpaulin-reports - file: ./protocols/target/tarpaulin-reports/cobertura.xml + directory: ./target/tarpaulin-reports + file: ./target/tarpaulin-reports/cobertura.xml flags: protocols token: ${{ secrets.CODECOV_TOKEN }} - + + - name: Upload binary_codec_sv2-coverage to codecov.io + uses: codecov/codecov-action@v4 + with: + directory: ./target/tarpaulin-reports/codec-coverage + file: ./target/tarpaulin-reports/codec-coverage/cobertura.xml + flags: binary_codec_sv2-coverage + token: ${{ secrets.CODECOV_TOKEN }} + - name: Upload binary_sv2-coverage to codecov.io uses: codecov/codecov-action@v4 with: - directory: ./protocols/target/tarpaulin-reports/binary-sv2-coverage - file: ./protocols/target/tarpaulin-reports/binary-sv2-coverage/cobertura.xml + directory: ./target/tarpaulin-reports/binary-sv2-coverage + file: ./target/tarpaulin-reports/binary-sv2-coverage/cobertura.xml flags: binary_sv2-coverage token: ${{ secrets.CODECOV_TOKEN }} - name: Upload codec_sv2-coverage to codecov.io uses: codecov/codecov-action@v4 with: - directory: ./protocols/target/tarpaulin-reports/codec-sv2-coverage - file: ./protocols/target/tarpaulin-reports/codec-sv2-coverage/cobertura.xml + directory: ./target/tarpaulin-reports/codec-sv2-coverage + file: ./target/tarpaulin-reports/codec-sv2-coverage/cobertura.xml flags: codec_sv2-coverage token: ${{ secrets.CODECOV_TOKEN }} - name: Upload channels_sv2-coverage to codecov.io uses: codecov/codecov-action@v4 with: - directory: ./protocols/target/tarpaulin-reports/channels-sv2-coverage - file: ./protocols/target/tarpaulin-reports/channels-sv2-coverage/cobertura.xml + directory: ./target/tarpaulin-reports/channels-sv2-coverage + file: ./target/tarpaulin-reports/channels-sv2-coverage/cobertura.xml flags: channels_sv2-coverage token: ${{ secrets.CODECOV_TOKEN }} - name: Upload common_messages_sv2-coverage to codecov.io uses: codecov/codecov-action@v4 with: - directory: ./protocols/target/tarpaulin-reports/common-messages-coverage - file: ./protocols/target/tarpaulin-reports/common-messages-coverage/cobertura.xml + directory: ./target/tarpaulin-reports/common-messages-coverage + file: ./target/tarpaulin-reports/common-messages-coverage/cobertura.xml flags: common_messages_sv2-coverage token: ${{ secrets.CODECOV_TOKEN }} - name: Upload framing_sv2-coverage to codecov.io uses: codecov/codecov-action@v4 with: - directory: ./protocols/target/tarpaulin-reports/framing-sv2-coverage - file: ./protocols/target/tarpaulin-reports/framing-sv2-coverage/cobertura.xml + directory: ./target/tarpaulin-reports/framing-sv2-coverage + file: ./target/tarpaulin-reports/framing-sv2-coverage/cobertura.xml flags: framing_sv2-coverage token: ${{ secrets.CODECOV_TOKEN }} - name: Upload job_declaration_sv2-coverage to codecov.io uses: codecov/codecov-action@v4 with: - directory: ./protocols/target/tarpaulin-reports/job-declaration-coverage - file: ./protocols/target/tarpaulin-reports/job-declaration-coverage/cobertura.xml + directory: ./target/tarpaulin-reports/job-declaration-coverage + file: ./target/tarpaulin-reports/job-declaration-coverage/cobertura.xml flags: job_declaration_sv2-coverage token: ${{ secrets.CODECOV_TOKEN }} - name: Upload noise_sv2-coverage to codecov.io uses: codecov/codecov-action@v4 with: - directory: ./protocols/target/tarpaulin-reports/noise-sv2-coverage - file: ./protocols/target/tarpaulin-reports/noise-sv2-coverage/cobertura.xml + directory: ./target/tarpaulin-reports/noise-sv2-coverage + file: ./target/tarpaulin-reports/noise-sv2-coverage/cobertura.xml flags: noise_sv2-coverage token: ${{ secrets.CODECOV_TOKEN }} - name: Upload parsers_sv2-coverage to codecov.io uses: codecov/codecov-action@v4 with: - directory: ./protocols/target/tarpaulin-reports/parsers-sv2-coverage - file: ./protocols/target/tarpaulin-reports/parsers-sv2-coverage/cobertura.xml + directory: ./target/tarpaulin-reports/parsers-sv2-coverage + file: ./target/tarpaulin-reports/parsers-sv2-coverage/cobertura.xml flags: parsers_sv2-coverage token: ${{ secrets.CODECOV_TOKEN }} + - name: Upload roles_logic_sv2-coverage to codecov.io + uses: codecov/codecov-action@v4 + with: + directory: ./target/tarpaulin-reports/roles-logic-sv2-coverage + file: ./target/tarpaulin-reports/roles-logic-sv2-coverage/cobertura.xml + flags: roles_logic_sv2-coverage + token: ${{ secrets.CODECOV_TOKEN }} + - name: Upload v1-coverage to codecov.io uses: codecov/codecov-action@v4 with: - directory: ./protocols/target/tarpaulin-reports/v1-coverage - file: ./protocols/target/tarpaulin-reports/v1-coverage/cobertura.xml + directory: ./target/tarpaulin-reports/v1-coverage + file: ./target/tarpaulin-reports/v1-coverage/cobertura.xml flags: v1-coverage token: ${{ secrets.CODECOV_TOKEN }} - name: Upload template_distribution_sv2-coverage to codecov.io uses: codecov/codecov-action@v4 with: - directory: ./protocols/target/tarpaulin-reports/template-distribution-coverage - file: ./protocols/target/tarpaulin-reports/template-distribution-coverage/cobertura.xml + directory: ./target/tarpaulin-reports/template-distribution-coverage + file: ./target/tarpaulin-reports/template-distribution-coverage/cobertura.xml flags: template_distribution_sv2-coverage token: ${{ secrets.CODECOV_TOKEN }} - name: Upload mining-coverage to codecov.io uses: codecov/codecov-action@v4 with: - directory: ./protocols/target/tarpaulin-reports/mining-coverage - file: ./protocols/target/tarpaulin-reports/mining-coverage/cobertura.xml + directory: ./target/tarpaulin-reports/mining-coverage + file: ./target/tarpaulin-reports/mining-coverage/cobertura.xml flags: mining-coverage token: ${{ secrets.CODECOV_TOKEN }} + + - name: Upload handlers_sv2-coverage to codecov.io + uses: codecov/codecov-action@v4 + with: + directory: ./target/tarpaulin-reports/handlers-sv2-coverage + file: ./target/tarpaulin-reports/handlers-sv2-coverage/cobertura.xml + flags: handler_sv2-coverage + token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/coverage-roles.yaml b/.github/workflows/coverage-roles.yaml deleted file mode 100644 index ce2c51e376..0000000000 --- a/.github/workflows/coverage-roles.yaml +++ /dev/null @@ -1,86 +0,0 @@ -name: Roles test Coverage - -on: - push: - branches: - - main - -jobs: - roles-coverage: - - name: tarpaulin Test - runs-on: ubuntu-latest - container: - image: xd009642/tarpaulin:0.27.1-nightly - options: --security-opt seccomp=unconfined - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Generate code coverage - run: | - ./scripts/coverage-roles.sh - - - name: Upload roles coverage to codecov.io - uses: codecov/codecov-action@v4 - with: - directory: ./roles/target/tarpaulin-reports - file: ./roles/target/tarpaulin-reports/cobertura.xml - flags: roles - token: ${{ secrets.CODECOV_TOKEN }} - - - name: Upload jd_client-coverage to codecov.io - uses: codecov/codecov-action@v4 - with: - directory: ./roles/target/tarpaulin-reports/jd-client-coverage - file: ./roles/target/tarpaulin-reports/jd-client-coverage/cobertura.xml - flags: jd_client-coverage - token: ${{ secrets.CODECOV_TOKEN }} - - - name: Upload jd_server-coverage to codecov.io - uses: codecov/codecov-action@v4 - with: - directory: ./roles/target/tarpaulin-reports/jd-server-coverage - file: ./roles/target/tarpaulin-reports/jd-server-coverage/cobertura.xml - flags: jd_server-coverage - token: ${{ secrets.CODECOV_TOKEN }} - - - name: Upload mining_device-coverage to codecov.io - uses: codecov/codecov-action@v4 - with: - directory: ./roles/target/tarpaulin-reports/mining-device-coverage - file: ./roles/target/tarpaulin-reports/mining-device-coverage/cobertura.xml - flags: mining_device-coverage - token: ${{ secrets.CODECOV_TOKEN }} - - - name: Upload pool_sv2-coverage to codecov.io - uses: codecov/codecov-action@v4 - with: - directory: ./roles/target/tarpaulin-reports/pool-coverage - file: ./roles/target/tarpaulin-reports/pool-coverage/cobertura.xml - flags: pool_sv2-coverage - token: ${{ secrets.CODECOV_TOKEN }} - - - name: Upload sv1-mining-device-coverage to codecov.io - uses: codecov/codecov-action@v4 - with: - directory: ./roles/target/tarpaulin-reports/sv1-mining-device-coverage - file: ./roles/target/tarpaulin-reports/sv1-mining-device-coverage/cobertura.xml - flags: sv1-mining-device-coverage - token: ${{ secrets.CODECOV_TOKEN }} - - - name: Upload translator_sv2-coverage to codecov.io - uses: codecov/codecov-action@v4 - with: - directory: ./roles/target/tarpaulin-reports/translator-coverage - file: ./roles/target/tarpaulin-reports/translator-coverage/cobertura.xml - flags: translator_sv2-coverage - token: ${{ secrets.CODECOV_TOKEN }} - - - name: Upload stratum-apps-coverage to codecov.io - uses: codecov/codecov-action@v4 - with: - directory: ./roles/target/tarpaulin-reports/stratum-apps-coverage - file: ./roles/target/tarpaulin-reports/stratum-apps-coverage/cobertura.xml - flags: stratum-apps-coverage - token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/coverage-utils.yaml b/.github/workflows/coverage-utils.yaml deleted file mode 100644 index a3c6611263..0000000000 --- a/.github/workflows/coverage-utils.yaml +++ /dev/null @@ -1,39 +0,0 @@ -name: Util Test Coverage - -on: - push: - branches: - - main - -jobs: - utils-coverage: - - name: tarpaulin Test - runs-on: ubuntu-latest - container: - image: xd009642/tarpaulin:0.27.1-nightly - options: --security-opt seccomp=unconfined - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Generate code coverage - run: | - ./scripts/coverage-utils.sh - - - name: Upload utils coverage to codecov.io - uses: codecov/codecov-action@v4 - with: - directory: ./utils/target/tarpaulin-reports - file: ./utils/target/tarpaulin-reports/cobertura.xml - flags: utils - token: ${{ secrets.CODECOV_TOKEN }} - - - name: Upload buffer_sv2-coverage to codecov.io - uses: codecov/codecov-action@v4 - with: - directory: ./utils/target/tarpaulin-reports/buffer-coverage - file: ./utils/target/tarpaulin-reports/buffer-coverage/cobertura.xml - flags: buffer_sv2-coverage - - token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 347b4a8293..10bbec46f7 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -31,65 +31,70 @@ jobs: - name: Rust Docs crate buffer_sv2 run: | - cd utils/buffer + cd sv2/buffer-sv2 cargo doc - name: Rust Docs crate binary_sv2 derive_codec run: | - cd protocols/v2/binary-sv2/derive_codec + cd sv2/binary-sv2/derive_codec cargo doc - name: Rust Docs crate binary_sv2 run: | - cd protocols/v2/binary-sv2 + cd sv2/binary-sv2 cargo doc --features with_buffer_pool - name: Rust Docs crate channels_sv2 run: | - cd protocols/v2/channels-sv2 + cd sv2/channels-sv2 cargo doc - name: Rust Docs crate parsers_sv2 run: | - cd protocols/v2/parsers-sv2 + cd sv2/parsers-sv2 cargo doc - name: Rust Docs crate framing_sv2 run: | - cd protocols/v2/framing-sv2 + cd sv2/framing-sv2 cargo doc --features with_buffer_pool - name: Rust Docs crate noise_sv2 run: | - cd protocols/v2/noise-sv2 + cd sv2/noise-sv2 cargo doc --features std - name: Rust Docs crate codec_sv2 run: | - cd protocols/v2/codec-sv2 + cd sv2/codec-sv2 cargo doc --features with_buffer_pool,noise_sv2 + - name: Rust Docs crate handlers_sv2 + run: | + cd sv2/handlers-sv2 + cargo doc + - name: Rust Docs crate common_messages run: | - cd protocols/v2/subprotocols/common-messages + cd sv2/subprotocols/common-messages cargo doc - name: Rust Docs crate job_declaration run: | - cd protocols/v2/subprotocols/job-declaration + cd sv2/subprotocols/job-declaration cargo doc --all-features - name: Rust Docs crate mining run: | - cd protocols/v2/subprotocols/mining + cd sv2/subprotocols/mining cargo doc --all-features - name: Rust Docs crate template_distribution run: | - cd protocols/v2/subprotocols/template-distribution + cd sv2/subprotocols/template-distribution cargo doc - name: Rust Docs crate sv1_api run: | - cd protocols/v1 + cd sv1 cargo doc diff --git a/.github/workflows/fmt.yaml b/.github/workflows/fmt.yaml index 0006a96765..91bf9254df 100644 --- a/.github/workflows/fmt.yaml +++ b/.github/workflows/fmt.yaml @@ -29,8 +29,4 @@ jobs: components: rustfmt - name: Run fmt in different workspaces and crates run: | - cargo fmt --all --manifest-path=stratum-core/Cargo.toml -- --check - cargo fmt --all --manifest-path=protocols/Cargo.toml -- --check - cargo fmt --all --manifest-path=roles/Cargo.toml -- --check - cargo fmt --all --manifest-path=utils/Cargo.toml -- --check - cargo fmt --all --manifest-path=test/integration-tests/Cargo.toml -- --check + cargo fmt --all -- --check --verbose diff --git a/.github/workflows/integration-tests.yaml b/.github/workflows/integration-tests.yaml index 23e3017950..9b3a5d5ed1 100644 --- a/.github/workflows/integration-tests.yaml +++ b/.github/workflows/integration-tests.yaml @@ -24,9 +24,6 @@ jobs: toolchain: stable override: true - - name: Install cargo-nextest - run: cargo install cargo-nextest --locked - - - name: Integration Tests + - name: Run Integration Tests Script run: | - RUST_BACKTRACE=1 RUST_LOG=debug cargo nextest run --manifest-path=test/integration-tests/Cargo.toml --nocapture + ./scripts/run-integration-tests.sh diff --git a/.github/workflows/lockfiles.yaml b/.github/workflows/lockfiles.yaml deleted file mode 100644 index cfe52da48d..0000000000 --- a/.github/workflows/lockfiles.yaml +++ /dev/null @@ -1,26 +0,0 @@ -name: Lockfiles - -# Trigger the workflow on pull request events for the main branch -on: - pull_request: - branches: - - main - -jobs: - build: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - - name: Build with locked dependencies - run: | - cargo build --manifest-path=roles/Cargo.toml --locked - cargo build --manifest-path=utils/Cargo.toml --locked - cargo build --manifest-path=test/integration-tests/Cargo.toml --locked diff --git a/.github/workflows/release-libs.yaml b/.github/workflows/release-libs.yaml index 7fbc12fa1a..6d8d0782f6 100644 --- a/.github/workflows/release-libs.yaml +++ b/.github/workflows/release-libs.yaml @@ -32,77 +32,73 @@ jobs: # Base dependencies with no local dependencies - name: Publish crate buffer_sv2 run: | - ./scripts/release-libs.sh utils/buffer + ./scripts/release-libs.sh sv2/buffer-sv2 - name: Publish crate noise_sv2 run: | - ./scripts/release-libs.sh protocols/v2/noise-sv2 + ./scripts/release-libs.sh sv2/noise-sv2 - name: Publish crate binary_sv2 derive_codec run: | - ./scripts/release-libs.sh protocols/v2/binary-sv2/derive_codec + ./scripts/release-libs.sh sv2/binary-sv2/derive_codec - name: Publish crate binary_sv2 run: | - ./scripts/release-libs.sh protocols/v2/binary-sv2 + ./scripts/release-libs.sh sv2/binary-sv2 # framing_sv2(depends on binary_sv2, buffer_sv2, noise_sv2) - name: Publish crate framing_sv2 run: | - ./scripts/release-libs.sh protocols/v2/framing-sv2 + ./scripts/release-libs.sh sv2/framing-sv2 # codec_sv2 (depends on framing_sv2, noise_sv2, binary_sv2, buffer_sv2) - name: Publish crate codec_sv2 run: | - ./scripts/release-libs.sh protocols/v2/codec-sv2 + ./scripts/release-libs.sh sv2/codec-sv2 # Subprotocols (depend on binary_sv2) - name: Publish crate common_messages run: | - ./scripts/release-libs.sh protocols/v2/subprotocols/common-messages + ./scripts/release-libs.sh sv2/subprotocols/common-messages - name: Publish crate job_declaration run: | - ./scripts/release-libs.sh protocols/v2/subprotocols/job-declaration + ./scripts/release-libs.sh sv2/subprotocols/job-declaration - name: Publish crate mining run: | - ./scripts/release-libs.sh protocols/v2/subprotocols/mining + ./scripts/release-libs.sh sv2/subprotocols/mining - name: Publish crate template_distribution run: | - ./scripts/release-libs.sh protocols/v2/subprotocols/template-distribution + ./scripts/release-libs.sh sv2/subprotocols/template-distribution # channels_sv2 (depends on binary_sv2, common_messages_sv2, mining_sv2, template_distribution_sv2, job_declaration_sv2) - name: Publish crate channels_sv2 run: | - ./scripts/release-libs.sh protocols/v2/channels-sv2 + ./scripts/release-libs.sh sv2/channels-sv2 # parsers_sv2 (depends on binary_sv2, framing_sv2, common_messages, mining, template_distribution, job_declaration) - name: Publish crate parsers_sv2 run: | - ./scripts/release-libs.sh protocols/v2/parsers-sv2 + ./scripts/release-libs.sh sv2/parsers-sv2 # sv1_api (depends on binary_sv2) - name: Publish crate v1 run: | - ./scripts/release-libs.sh protocols/v1 + ./scripts/release-libs.sh sv1 # stratum_translation (depends on binary_sv2, mining_sv2, channels_sv2, v1) - name: Publish crate stratum_translation run: | - ./scripts/release-libs.sh protocols/stratum-translation + ./scripts/release-libs.sh stratum-core/stratum-translation # handlers_sv2 (depends on parsers_sv2, binary_sv2, common_messages_sv2, mining_sv2, template_distribution_sv2, job_declaration_sv2) - name: Publish crate handlers_sv2 run: | - ./scripts/release-libs.sh protocols/v2/handlers-sv2 + ./scripts/release-libs.sh sv2/handlers-sv2 # Stratum Core (re-exports all the protocol crates) - name: Publish crate stratum-core run: | ./scripts/release-libs.sh stratum-core/stratum-core - # stratum-apps (depends on stratum-core and external crates) - - name: Publish crate stratum-apps - run: | - ./scripts/release-libs.sh roles/stratum-apps diff --git a/.github/workflows/rust-msrv.yaml b/.github/workflows/rust-msrv.yaml index ae1a84cf7c..17c98b53fb 100644 --- a/.github/workflows/rust-msrv.yaml +++ b/.github/workflows/rust-msrv.yaml @@ -22,25 +22,9 @@ jobs: with: toolchain: ${{ matrix.rust }} override: true - - name: Build stratum-core - run: cargo build --manifest-path=stratum-core/Cargo.toml - - name: Build Protocols - run: cargo build --manifest-path=protocols/Cargo.toml - - name: Build Roles - run: cargo build --locked --manifest-path=roles/Cargo.toml - - name: Build Utils - run: cargo build --locked --manifest-path=utils/Cargo.toml - - name: Build Integration Tests - run: cargo build --locked --manifest-path=test/integration-tests/Cargo.toml + - name: Build Workspace + run: cargo build # also check test compilation without running tests - - name: Check Test Compilation for Protocols - run: cargo test --manifest-path=protocols/Cargo.toml --no-run - - name: Check Test Compilation for Stratum Core - run: cargo test --manifest-path=stratum-core/Cargo.toml --no-run - - name: Check Test Compilation for Roles - run: cargo test --locked --manifest-path=roles/Cargo.toml --no-run - - name: Check Test Compilation for Utils - run: cargo test --locked --manifest-path=utils/Cargo.toml --no-run - - name: Check Test Compilation for Integration Tests - run: cargo test --locked --manifest-path=test/integration-tests/Cargo.toml --no-run + - name: Check Test Compilation for Workspace + run: cargo test --no-run diff --git a/.github/workflows/semver-check.yaml b/.github/workflows/semver-check.yaml index d7865da3d5..5b845d2606 100644 --- a/.github/workflows/semver-check.yaml +++ b/.github/workflows/semver-check.yaml @@ -41,60 +41,60 @@ jobs: - name: Install cargo-semver-checks run: cargo install cargo-semver-checks --version 0.37.0 --locked - - name: Run semver checks for utils/buffer - working-directory: utils/buffer + - name: Run semver checks for sv2/buffer-sv2 + working-directory: sv2/buffer-sv2 run: cargo semver-checks - - name: Run semver checks for protocols/v2/binary-sv2 - working-directory: protocols/v2/binary-sv2 + - name: Run semver checks for sv2/binary-sv2 + working-directory: sv2/binary-sv2 run: cargo semver-checks - - name: Run semver checks for protocols/v2/framing-sv2 - working-directory: protocols/v2/framing-sv2 + - name: Run semver checks for sv2/framing-sv2 + working-directory: sv2/framing-sv2 run: cargo semver-checks - - name: Run semver checks for protocols/v2/noise-sv2 - working-directory: protocols/v2/noise-sv2 + - name: Run semver checks for sv2/noise-sv2 + working-directory: sv2/noise-sv2 run: cargo semver-checks - - name: Run semver checks for protocols/v2/codec-sv2 - working-directory: protocols/v2/codec-sv2 + - name: Run semver checks for sv2/codec-sv2 + working-directory: sv2/codec-sv2 run: cargo semver-checks - - name: Run semver checks for protocols/v2/subprotocols/common-messages - working-directory: protocols/v2/subprotocols/common-messages + - name: Run semver checks for sv2/subprotocols/common-messages + working-directory: sv2/subprotocols/common-messages run: cargo semver-checks - - name: Run semver checks for protocols/v2/subprotocols/job-declaration - working-directory: protocols/v2/subprotocols/job-declaration + - name: Run semver checks for sv2/subprotocols/job-declaration + working-directory: sv2/subprotocols/job-declaration run: cargo semver-checks - - name: Run semver checks for protocols/v2/subprotocols/mining - working-directory: protocols/v2/subprotocols/mining + - name: Run semver checks for sv2/subprotocols/mining + working-directory: sv2/subprotocols/mining run: cargo semver-checks - - name: Run semver checks for protocols/v2/subprotocols/template-distribution - working-directory: protocols/v2/subprotocols/template-distribution + - name: Run semver checks for sv2/subprotocols/template-distribution + working-directory: sv2/subprotocols/template-distribution run: cargo semver-checks - - name: Run semver checks for protocols/v2/channels-sv2 - working-directory: protocols/v2/channels-sv2 + - name: Run semver checks for sv2/channels-sv2 + working-directory: sv2/channels-sv2 run: cargo semver-checks - - name: Run semver checks for protocols/v2/parsers-sv2 - working-directory: protocols/v2/parsers-sv2 + - name: Run semver checks for sv2/parsers-sv2 + working-directory: sv2/parsers-sv2 run: cargo semver-checks - - name: Run semver checks for protocols/v2/handlers-sv2 - working-directory: protocols/v2/handlers-sv2 + - name: Run semver checks for sv2/handlers-sv2 + working-directory: sv2/handlers-sv2 run: cargo semver-checks - name: Run semver checks for protocols/v1 - working-directory: protocols/v1 + working-directory: sv1 run: cargo semver-checks - - name: Run semver checks for protocols/stratum-translation - working-directory: protocols/stratum-translation + - name: Run semver checks for stratum-core/stratum-translation + working-directory: stratum-core/stratum-translation run: cargo semver-checks # TODO: Uncomment this when the stratum-core crate is published to crates.io @@ -102,7 +102,3 @@ jobs: # working-directory: stratum-core # run: cargo semver-checks - # TODO: Uncomment this when the stratum-apps crate is published to crates.io - # - name: Run semver checks for roles/stratum-apps - # working-directory: roles/stratum-apps - # run: cargo semver-checks diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 68bac3d762..c0e8c03576 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -1,18 +1,49 @@ +name: Rust CI + on: pull_request: branches: - main -name: Test, Prop Tests, Example Tests - jobs: - ci: + test-core: runs-on: ${{ matrix.os }} strategy: matrix: os: + - ubuntu-latest - macos-latest + + steps: + - uses: actions/checkout@v4 + + - name: Setup Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + profile: minimal + override: true + + - name: Cache cargo registry + uses: actions/cache@v3 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Run tests + run: | + cargo test --verbose + + run_examples: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: - ubuntu-latest + - macos-latest include: - os: macos-latest target: aarch64-apple-darwin @@ -20,69 +51,36 @@ jobs: target: x86_64-unknown-linux-musl steps: - - name: Install stable toolchain & components - uses: actions/checkout@v4 + - uses: actions/checkout@v4 + + - name: Setup Rust + uses: actions-rs/toolchain@v1 with: - profile: minimal toolchain: nightly + profile: minimal override: true - name: Build run: | - cargo build --manifest-path=stratum-core/Cargo.toml - cargo build --manifest-path=protocols/Cargo.toml - cargo build --manifest-path=roles/Cargo.toml - cargo build --manifest-path=utils/Cargo.toml - cargo build --manifest-path=roles/stratum-apps/Cargo.toml - - - name: Run sv1-client-and-server example - run: | - cargo run --manifest-path=protocols/v1/Cargo.toml --example client_and_server 30 - - - name: Run framing-sv2 example - run: | - cargo run --manifest-path=protocols/v2/framing-sv2/Cargo.toml --example sv2_frame - - - name: Run codec-sv2 examples - run: | - cargo run --manifest-path=protocols/v2/codec-sv2/Cargo.toml --example unencrypted - cargo run --manifest-path=protocols/v2/codec-sv2/Cargo.toml --example encrypted --features=noise_sv2 + cargo build --workspace - - name: Run binary-sv2 examples + - name: Run examples run: | - cargo run --manifest-path=protocols/v2/binary-sv2/Cargo.toml --example encode_decode + cargo run --manifest-path=sv1/Cargo.toml --example client_and_server 30 + cargo run --manifest-path=sv2/framing-sv2/Cargo.toml --example sv2_frame + cargo run --manifest-path=sv2/codec-sv2/Cargo.toml --example unencrypted + cargo run --manifest-path=sv2/codec-sv2/Cargo.toml --example encrypted --features=noise_sv2 + cargo run --manifest-path=sv2/binary-sv2/Cargo.toml --example encode_decode + cargo run --manifest-path=sv2/noise-sv2/Cargo.toml --example handshake - - name: Run noise-sv2 examples - run: | - cargo run --manifest-path=protocols/v2/noise-sv2/Cargo.toml --example handshake - - - name: fuzz tests + - name: Fuzz tests run: | if [ ${{ matrix.os }} == "ubuntu-latest" ]; then ./run.sh 30 else echo "Skipping fuzz test on ${{ matrix.os }} - not supported" fi - working-directory: utils/buffer/fuzz + working-directory: sv2/buffer-sv2/fuzz - - name: Test - run: | - cargo test --manifest-path=stratum-core/Cargo.toml - cargo test --manifest-path=protocols/Cargo.toml - cargo test --manifest-path=roles/Cargo.toml - cargo test --manifest-path=utils/Cargo.toml - cargo test --manifest-path=roles/stratum-apps/Cargo.toml --features config - cargo test --manifest-path=roles/stratum-apps/Cargo.toml sv1_connection::tests::test_sv1_connection --features sv1 - cargo test --manifest-path=protocols/stratum-translation/Cargo.toml - - - name: Property based testing - run: | - cargo test --manifest-path=protocols/Cargo.toml --features prop_test - - - name: Run ping-pong-encrypted example - run: | - cargo run --manifest-path=examples/ping-pong-encrypted/Cargo.toml - - - name: Run ping-pong example - run: | - cargo run --manifest-path=examples/ping-pong/Cargo.toml + - name: Property-based testing + run: cargo test --features prop_test diff --git a/.gitignore b/.gitignore index 70c24c10d0..44edb9a6d2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,28 +1,5 @@ .idea -*/**/target -/protocols/guix-example/guix-example.h -/protocols/Cargo.lock -/stratum-core/Cargo.lock -/roles/stratum-apps/Cargo.lock -/protocols/v2/binary-sv2/derive_codec/Cargo.lock -/benches/Cargo.lock -/ignore -/vendor/ed25519-dalek/target -/utils/buffer/target -/sv2.h -/test/bitcoin_data/regtest -lcov.info -/target +/integration-test-framework +Cargo.lock +target/ .vscode -*.py -**/conf/** -cobertura.xml -/roles/*/*-config.toml -/examples/*/Cargo.lock -/scripts/sv2.h -/test/integration-tests/template-provider -/test/integration-tests/minerd -**/template-provider -stratum-message-generator -*.log -**/minerd \ No newline at end of file diff --git a/roles/Cargo.toml b/Cargo.toml similarity index 53% rename from roles/Cargo.toml rename to Cargo.toml index d71deeb381..7274f43df2 100644 --- a/roles/Cargo.toml +++ b/Cargo.toml @@ -1,13 +1,12 @@ [workspace] -resolver="2" +resolver = "2" members = [ - "pool", - "test-utils/mining-device", - "translator", - "jd-client", - "jd-server", - "stratum-apps" + "stratum-core", +] + +exclude = [ + "integration-test-framework" ] [profile.dev] diff --git a/README.md b/README.md index 0dc2c554f6..463ddfd118 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,11 @@ -


SRI
-Stratum V2 Reference Implementation (SRI) +SV2 Libraries

- -

SRI is a reference implementation of the Stratum V2 protocol written in Rust πŸ¦€.

- +

Stratum V2 protocol libraries from the SRI project πŸ¦€

codecov @@ -18,89 +15,65 @@ Stratum V2 Reference Implementation (SRI)

-## πŸ’Ό Table of Contents - -

- Introduction β€’ - Getting Started β€’ - Use Cases β€’ - Roadmap β€’ - Contribute β€’ - Support β€’ - Donate β€’ - Supporters β€’ - License - MSRV -

- -## πŸ‘‹ Introduction - -Welcome to the official GitHub repository for the **SRI - Stratum V2 Reference Implementation**. - -[Stratum V2](https://stratumprotocol.org) is a next-generation bitcoin mining protocol designed to enhance the efficiency, security, flexibility and decentralization. -SRI is fully open-source, community-developed, independent of any single entity, aiming to be fully compatible with [Stratum V2 Specification](https://github.com/stratum-mining/sv2-spec). - -## ⛏️ Getting Started - -To get started with the Stratum V2 Reference Implementation (SRI), please follow the detailed setup instructions available on the official website: +# Stratum Repository -[Getting Started with Stratum V2](https://stratumprotocol.org/blog/getting-started/) +This repository contains the low-level crates. +If you’re looking to run Sv2 applications at the most recent changes, check out the [`sv2-apps` repository](https://github.com/stratum-mining/sv2-apps). Those crates are application-level, currently in **alpha** stage. -This guide provides all the necessary information on prerequisites, installation, and configuration to help you begin using, testing or contributing to SRI. +## Contents -## πŸš€ Use Cases +- `sv1/` - Stratum V1 protocol implementation and utilities +- `sv2/` - Stratum V2 protocol implementations + - `binary-sv2/` - Binary encoding/decoding for SV2 messages + - `buffer/` - Buffer management and pooling + - `channels-sv2/` - Channel management for SV2 + - `codec-sv2/` - SV2 message codec with encryption support + - `framing-sv2/` - SV2 message framing utilities + - `handlers_sv2/` - SV2 message handler traits + - `noise-sv2/` - Noise protocol implementation for SV2 + - `parsers-sv2/` - Message parsing utilities + - `subprotocols/` - SV2 subprotocol implementations +- `stratum-core/` - Entrypoint for all the low-level crates in `sv2/` and `sv1/`implementations + - `stratum-translation` - Stratum V1 ↔ Stratum V2 translation utilities -The library is modular to address different use-cases and desired functionality. Examples include: +## Local Integration Testing -### πŸ‘· Miners +To run integration tests locally: -- SV1 Miners can use the translator proxy (`roles/translator`) to connect with a SV2-compatible pool. -- SV1 mining farms mining to a SV2-compatible pool gain some of the security and efficiency improvements SV2 offers over Stratum V1 (SV1). The SV1<->SV2 translator proxy does not support _all_ the features of SV2, but works as a temporary measure before upgrading completely to SV2-compatible firmware. (The SV1<->SV2 translation proxy implementation is a work in progress.) +```bash +./scripts/run-integration-tests.sh +``` -### πŸ› οΈ Pools - -- Pools supporting SV2 can deploy the open source binary crate (`roles/pool`) to offer their clients (miners participating in said pool) an SV2-compatible pool. -- The Rust helper library provides a suite of tools for mining pools to build custom SV2 compatible pool implementations. +This will: +1. Clone/update the integration test framework +2. Update dependencies to use your local changes +3. Run the full integration test suite +4. Restore the original configuration ## πŸ›£ Roadmap -Our roadmap is publicly available, outlining current and future plans. Decisions on the roadmap are made through a consensus-driven approach, through participation on dev meetings, Discord or GitHub. - -[View the SRI Roadmap](https://github.com/orgs/stratum-mining/projects/5) - -### πŸ… Project Maturity - -Low-level crates (`protocols` directory) are considered **beta** software. Rust API Docs is a [work-in-progress](https://github.com/stratum-mining/stratum/issues/845), and the community should still expect small breaking API changes and patches. - -Application-level crates (`roles` directory) are considered **alpha** software, and bugs are expected. They should be used as a guide on how to consume the low-level crates as dependencies. +Our roadmap is publicly available as part of the broader SRI project, outlining current and future plans. Decisions are made through a consensus-driven approach via dev meetings, Discord, and GitHub. -### 🎯 Goals - -The goals of this project are to provide: - -1. A robust set of Stratum V2 (SV2) primitives as Rust library crates which anyone can use - to expand the protocol or implement a role. For example: - - Pools supporting SV2 - - Mining-device/hashrate producers integrating SV2 into their firmware - - Bitcoin nodes implementing Template Provider to build the `blocktemplate` -2. A set of helpers built on top of the above primitives and the external Bitcoin-related Rust crates for anyone to implement the SV2 roles. -3. An open-source implementation of a SV2 proxy for miners. -4. An open-source implementation of a SV2 pool for mining pool operators. +[View the SRI Roadmap](https://github.com/orgs/stratum-mining/projects/15) ## πŸ’» Contribute -If you are a developer looking to help, but you're not sure where to begin, check the [good first issue label](https://github.com/stratum-mining/stratum/labels/good%20first%20issue), which contains small pieces of work that have been specifically flagged as being friendly to new contributors. +We welcome contributions to improve these SV2 libraries! Here's how you can help: -Contributors looking to do something a bit more challenging, before opening a pull request, please join [our community chat](https://discord.gg/fsEW23wFYs) or [start a GitHub issue](https://github.com/stratum-mining/stratum/issues) to get early feedback, discuss the best ways to tackle the problem, and ensure there is no work duplication and consensus. +1. **Start small**: Check the [good first issue label](https://github.com/stratum-mining/stratum/labels/good%20first%20issue) in the main SRI repository +2. **Join the community**: Connect with us on [Discord](https://discord.gg/fsEW23wFYs) before starting larger contributions +3. **Open issues**: [Create GitHub issues](https://github.com/stratum-mining/stratum/issues) for bugs, feature requests, or questions +4. **Follow standards**: Ensure code follows Rust best practices and includes appropriate tests ## 🀝 Support -Join our Discord community to get help, share your ideas, or discuss anything related to Stratum V2 and its reference implementation. - -Whether you're looking for technical support, want to contribute, or are just interested in learning more about the project, our community is the place to be. +Join our Discord community for technical support, discussions, and collaboration: [Join the Stratum V2 Discord Community](https://discord.gg/fsEW23wFYs) +For detailed documentation and guides, visit: +[Stratum V2 Documentation](https://stratumprotocol.org) + ## 🎁 Donate ### πŸ‘€ Individual Donations @@ -111,8 +84,6 @@ If you wish to support the development and maintenance of the Stratum V2 Referen ### 🏒 Corporate Donations For corporate entities interested in providing more substantial support, such as grants to SRI contributors, please get in touch with us directly. Your support can make a significant difference in accelerating development, research, and innovation. -Email us at: stratumv2@gmail.com - ## πŸ™ Supporters SRI contributors are independently, financially supported by following entities: @@ -134,4 +105,4 @@ Minimum Supported Rust Version: 1.75.0 > Website [stratumprotocol.org](https://www.stratumprotocol.org)  ·  > Discord [SV2 Discord](https://discord.gg/fsEW23wFYs)  ·  -> Twitter [@Stratumv2](https://twitter.com/StratumV2) +> Twitter [@Stratumv2](https://twitter.com/StratumV2)  ·  \ No newline at end of file diff --git a/examples/ping-pong-encrypted/Cargo.toml b/examples/ping-pong-encrypted/Cargo.toml deleted file mode 100644 index a59daf9441..0000000000 --- a/examples/ping-pong-encrypted/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "ping-pong-encrypted" -version = "0.1.0" -edition = "2021" -authors = [ "SRI Community" ] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -stratum-apps = { path = "../../roles/stratum-apps" } -rand = "0.8" -tokio = { version = "1.44.1", features = [ "full" ] } -async-channel = "1.5.1" diff --git a/examples/ping-pong-encrypted/README.md b/examples/ping-pong-encrypted/README.md deleted file mode 100644 index ab4da5813e..0000000000 --- a/examples/ping-pong-encrypted/README.md +++ /dev/null @@ -1,20 +0,0 @@ -`ping-pong-encrypted` is an example of how to encode and decode SV2 binary frames (without any encryption layer) while leveraging the following crates: -- [`binary_sv2`](http://docs.rs/binary_sv2) -- [`codec_sv2`](http://docs.rs/codec_sv2) -- [`framing_sv2`](http://docs.rs/framing_sv2) (which is actually just re-exported by `codec_sv2`) -- [`noise_sv2`](http://docs.rs/noise_sv2) - -We establish a simple `Ping`-`Pong` protocol with a server and a client communicating over a TCP socket. - -The server expects to receive a `Ping` message encoded as a SV2 binary frame. -The `Ping` message contains a `nonce`, which is a `u8` generated randomly by the client. - -The client expects to get a `Pong` message in response, also encoded as a SV2 binary frame, with the same `nonce`. - -The messages are assigned arbitrary values for binary encoding: -```rust -pub const PING_MSG_TYPE: u8 = 0xfe; -pub const PONG_MSG_TYPE: u8 = 0xff; -``` - -All communication is encrypted with [SV2 Noise Protocol](https://stratumprotocol.org/specification/04-Protocol-Security/). \ No newline at end of file diff --git a/examples/ping-pong-encrypted/src/client.rs b/examples/ping-pong-encrypted/src/client.rs deleted file mode 100644 index 1fd3985513..0000000000 --- a/examples/ping-pong-encrypted/src/client.rs +++ /dev/null @@ -1,82 +0,0 @@ -use crate::messages::{Message, Ping, Pong, PING_MSG_TYPE, PONG_MSG_TYPE}; -use stratum_apps::{ - key_utils::Secp256k1PublicKey, - network_helpers::noise_connection::Connection, - stratum_core::{ - binary_sv2, - codec_sv2::{HandshakeRole, StandardSv2Frame}, - noise_sv2::Initiator, - }, -}; -use tokio::net::TcpStream; - -use crate::error::Error; - -pub async fn start_client(address: &str, k_pub: String) -> Result<(), Error> { - let stream = TcpStream::connect(address).await?; - - println!("CLIENT: Connected to server on {}", address); - - // parse server pubkey - let k_pub: Secp256k1PublicKey = k_pub.try_into()?; - - // noise handshake initiator - let initiator = Initiator::from_raw_k(k_pub.into_bytes())?; - - // channels for encrypted connection - let (receiver, sender) = Connection::new(stream, HandshakeRole::Initiator(initiator)).await?; - - // create Ping message - let ping = Ping::new()?; - let ping_nonce = ping.get_nonce(); - let message = Message::Ping(ping); - - // create Ping frame - let ping_frame = - StandardSv2Frame::::from_message(message.clone(), PING_MSG_TYPE, 0, false) - .ok_or(Error::FrameFromMessage)?; - - // send Ping frame (sender takes care of encryption) - println!( - "CLIENT: Sending encrypted Ping to server with nonce: {}", - ping_nonce - ); - sender - .send(ping_frame.into()) - .await - .map_err(|_| Error::Sender)?; - - // ok, we have successfully sent the ping message - // now it's time to receive and verify the pong response - // receiver already took care of decryption - let mut frame: StandardSv2Frame = match receiver.recv().await { - Ok(f) => f.try_into()?, - Err(_) => return Err(Error::Receiver), - }; - - let frame_header = frame.get_header().ok_or(Error::FrameHeader)?; - - // check message type on header - if frame_header.msg_type() != PONG_MSG_TYPE { - return Err(Error::FrameHeader); - } - - // decode frame payload - let decoded_payload: Pong = match binary_sv2::from_bytes(frame.payload()) { - Ok(pong) => pong, - Err(e) => return Err(Error::BinarySv2(e)), - }; - - // check if nonce is the same as ping - let pong_nonce = decoded_payload.get_nonce(); - if ping_nonce == pong_nonce { - println!( - "CLIENT: Received encrypted Pong with identical nonce as Ping: {}", - pong_nonce - ); - } else { - return Err(Error::Nonce); - } - - Ok(()) -} diff --git a/examples/ping-pong-encrypted/src/error.rs b/examples/ping-pong-encrypted/src/error.rs deleted file mode 100644 index a9b5eb6982..0000000000 --- a/examples/ping-pong-encrypted/src/error.rs +++ /dev/null @@ -1,64 +0,0 @@ -use stratum_apps::{ - key_utils, network_helpers, - stratum_core::{binary_sv2, codec_sv2, framing_sv2, noise_sv2}, -}; - -#[derive(std::fmt::Debug)] -pub enum Error { - Io(std::io::Error), - CodecSv2(codec_sv2::Error), - FramingSv2(framing_sv2::Error), - BinarySv2(binary_sv2::Error), - NoiseSv2(noise_sv2::Error), - NetworkHelpersSv2(network_helpers::Error), - KeyUtils(key_utils::Error), - Receiver, - Sender, - FrameHeader, - FrameFromMessage, - Nonce, - WrongMessage, - Tcp(std::io::Error), -} - -impl From for Error { - fn from(e: std::io::Error) -> Error { - Error::Io(e) - } -} - -impl From for Error { - fn from(e: codec_sv2::Error) -> Error { - Error::CodecSv2(e) - } -} - -impl From for Error { - fn from(e: network_helpers::Error) -> Error { - Error::NetworkHelpersSv2(e) - } -} - -impl From for Error { - fn from(e: binary_sv2::Error) -> Error { - Error::BinarySv2(e) - } -} - -impl From for Error { - fn from(e: noise_sv2::Error) -> Error { - Error::NoiseSv2(e) - } -} - -impl From for Error { - fn from(e: key_utils::Error) -> Error { - Error::KeyUtils(e) - } -} - -impl From for Error { - fn from(e: framing_sv2::Error) -> Error { - Error::FramingSv2(e) - } -} diff --git a/examples/ping-pong-encrypted/src/main.rs b/examples/ping-pong-encrypted/src/main.rs deleted file mode 100644 index 1afa72a316..0000000000 --- a/examples/ping-pong-encrypted/src/main.rs +++ /dev/null @@ -1,33 +0,0 @@ -mod client; -mod error; -mod messages; -mod server; - -const ADDR: &str = "127.0.0.1:3333"; -const SERVER_PUBLIC_K: &str = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72"; -const SERVER_PRIVATE_K: &str = "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n"; -const SERVER_CERT_VALIDITY: std::time::Duration = std::time::Duration::from_secs(3600); - -#[tokio::main] -async fn main() { - // start the server in a separate thread - tokio::spawn(async { - server::start_server( - ADDR, - SERVER_PUBLIC_K.to_string(), - SERVER_PRIVATE_K.to_string(), - SERVER_CERT_VALIDITY, - ) - .await - .expect("Server failed"); - }); - - // give the server a moment to start up - std::thread::sleep(std::time::Duration::from_secs(1)); - - // start the client - // note: it only knows the server's pubkey! - client::start_client(ADDR, SERVER_PUBLIC_K.to_string()) - .await - .expect("Client failed"); -} diff --git a/examples/ping-pong-encrypted/src/messages.rs b/examples/ping-pong-encrypted/src/messages.rs deleted file mode 100644 index 53dd5c1d3f..0000000000 --- a/examples/ping-pong-encrypted/src/messages.rs +++ /dev/null @@ -1,83 +0,0 @@ -use crate::error::Error; -use stratum_apps::stratum_core::binary_sv2::{ - self as binary_sv2, - decodable::{DecodableField, FieldMarker}, - Deserialize, Serialize, -}; - -use rand::Rng; - -pub const PING_MSG_TYPE: u8 = 0xfe; -pub const PONG_MSG_TYPE: u8 = 0xff; - -// we derive binary_sv2::{Serialize, Deserialize} -// to allow for binary encoding -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct Ping { - nonce: u8, -} - -impl Ping { - pub fn new() -> Result { - let mut rng = rand::thread_rng(); - let random: u8 = rng.gen(); - Ok(Self { nonce: random }) - } - - pub fn get_nonce(&self) -> u8 { - self.nonce - } -} - -// we derive binary_sv2::{Serialize, Deserialize} -// to allow for binary encoding -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct Pong { - nonce: u8, -} - -impl Pong { - pub fn new(nonce: u8) -> Result { - Ok(Self { nonce }) - } - - pub fn get_nonce(&self) -> u8 { - self.nonce - } -} - -// unifies message types for noise_connection_tokio::Connection -#[derive(Clone)] -pub enum Message { - Ping(Ping), - Pong(Pong), -} - -impl binary_sv2::GetSize for Message { - fn get_size(&self) -> usize { - match self { - Self::Ping(ping) => ping.get_size(), - Self::Pong(pong) => pong.get_size(), - } - } -} - -impl From for binary_sv2::encodable::EncodableField<'_> { - fn from(m: Message) -> Self { - match m { - Message::Ping(p) => p.into(), - Message::Pong(p) => p.into(), - } - } -} - -impl Deserialize<'_> for Message { - fn get_structure(_v: &[u8]) -> std::result::Result, binary_sv2::Error> { - unimplemented!() - } - fn from_decoded_fields( - _v: Vec, - ) -> std::result::Result { - unimplemented!() - } -} diff --git a/examples/ping-pong-encrypted/src/server.rs b/examples/ping-pong-encrypted/src/server.rs deleted file mode 100644 index 3dc8a1913e..0000000000 --- a/examples/ping-pong-encrypted/src/server.rs +++ /dev/null @@ -1,105 +0,0 @@ -use crate::{ - error::Error, - messages::{Message, Ping, Pong, PING_MSG_TYPE, PONG_MSG_TYPE}, -}; -use stratum_apps::{ - key_utils::{Secp256k1PublicKey, Secp256k1SecretKey}, - network_helpers::noise_connection::Connection, - stratum_core::{ - binary_sv2, - codec_sv2::{HandshakeRole, StandardEitherFrame, StandardSv2Frame}, - noise_sv2::Responder, - }, -}; - -use async_channel::{Receiver, Sender}; -use tokio::net::TcpListener; - -pub async fn start_server( - address: &str, - k_pub: String, - k_priv: String, - cert_validity: std::time::Duration, -) -> Result<(), Error> { - let listener = TcpListener::bind(address).await?; - - // parse keys - let k_pub: Secp256k1PublicKey = k_pub.to_string().try_into()?; - let k_priv: Secp256k1SecretKey = k_priv.to_string().try_into()?; - - println!("SERVER: Listening on {}", address); - - loop { - let (stream, _) = listener.accept().await?; - tokio::spawn(async move { - // noise handshake responder - let responder = Responder::from_authority_kp( - &k_pub.into_bytes(), - &k_priv.into_bytes(), - cert_validity, - )?; - - // channels for encrypted connection - let (receiver, sender) = - Connection::new(stream, HandshakeRole::Responder(responder)).await?; - - // handle encrypted connection - handle_connection(receiver, sender).await?; - Ok::<(), Error>(()) - }); - } -} - -async fn handle_connection( - receiver: Receiver>, - sender: Sender>, -) -> Result<(), Error> { - // first, we need to read the ping frame - // receiver already took care of decryption - let mut frame: StandardSv2Frame = match receiver.recv().await { - Ok(f) => f.try_into()?, - Err(_) => return Err(Error::Receiver), - }; - - let frame_header = frame.get_header().ok_or(Error::FrameHeader)?; - - // check message type on header - if frame_header.msg_type() != PING_MSG_TYPE { - return Err(Error::WrongMessage); - } - - // decode frame payload - let decoded_payload: Ping = match binary_sv2::from_bytes(frame.payload()) { - Ok(ping) => ping, - Err(e) => return Err(Error::BinarySv2(e)), - }; - - // ok, we have successfully received the ping message - // now it's time to send the pong response - - // we need the ping nonce to create our pong response - let ping_nonce = decoded_payload.get_nonce(); - - println!("SERVER: Received encrypted Ping with nonce: {}", ping_nonce); - - // create Pong message - let pong = Pong::new(ping_nonce)?; - let message = Message::Pong(pong.clone()); - - // create Pong frame - let pong_frame = - StandardSv2Frame::::from_message(message.clone(), PONG_MSG_TYPE, 0, false) - .ok_or(Error::FrameFromMessage)?; - - // respond Pong (sender takes care of encryption) - println!( - "SERVER: Sending encrypted Pong to client with nonce: {}", - pong.get_nonce() - ); - sender - .send(pong_frame.into()) - .await - .map_err(|_| Error::Sender)?; - - Ok(()) -} diff --git a/examples/ping-pong/Cargo.toml b/examples/ping-pong/Cargo.toml deleted file mode 100644 index fef89b57c7..0000000000 --- a/examples/ping-pong/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[package] -name = "ping-pong" -version = "0.1.0" -edition = "2021" -authors = [ "SRI Community" ] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -stratum-apps = { path = "../../roles/stratum-apps" } -rand = "0.8" diff --git a/examples/ping-pong/README.md b/examples/ping-pong/README.md deleted file mode 100644 index e10b42e069..0000000000 --- a/examples/ping-pong/README.md +++ /dev/null @@ -1,17 +0,0 @@ -`ping-pong` is an example of how to encode and decode SV2 binary frames (without any encryption layer) while leveraging the following crates: -- [`binary_sv2`](http://docs.rs/binary_sv2) -- [`codec_sv2`](http://docs.rs/codec_sv2) -- [`framing_sv2`](http://docs.rs/framing_sv2) (which is actually just re-exported by `codec_sv2`) - -We establish a simple `Ping`-`Pong` protocol with a server and a client communicating over a TCP socket. - -The server expects to receive a `Ping` message encoded as a SV2 binary frame. -The `Ping` message contains a `nonce`, which is a `u8` generated randomly by the client. - -The client expects to get a `Pong` message in response, also encoded as a SV2 binary frame, with the same `nonce`. - -The messages are assigned arbitrary values for binary encoding: -```rust -pub const PING_MSG_TYPE: u8 = 0xfe; -pub const PONG_MSG_TYPE: u8 = 0xff; -``` \ No newline at end of file diff --git a/examples/ping-pong/src/client.rs b/examples/ping-pong/src/client.rs deleted file mode 100644 index 557091cfb4..0000000000 --- a/examples/ping-pong/src/client.rs +++ /dev/null @@ -1,86 +0,0 @@ -use crate::messages::{Ping, Pong, PING_MSG_TYPE, PONG_MSG_TYPE}; -use std::{ - io::{Read, Write}, - net::TcpStream, -}; -use stratum_apps::stratum_core::{ - binary_sv2, - codec_sv2::{self, StandardDecoder, StandardSv2Frame}, -}; - -use crate::error::Error; - -pub fn start_client(address: &str) -> Result<(), Error> { - let mut stream = TcpStream::connect(address)?; - - println!("CLIENT: Connected to server on {}", address); - - // create Ping message - let ping_message = Ping::new()?; - let ping_nonce = ping_message.get_nonce(); - - // create Ping frame - let ping_frame = - StandardSv2Frame::::from_message(ping_message.clone(), PING_MSG_TYPE, 0, false) - .ok_or(Error::FrameFromMessage)?; - - // encode Ping frame - let mut encoder = codec_sv2::Encoder::::new(); - let ping_encoded = encoder.encode(ping_frame)?; - - println!("CLIENT: Sending Ping to server with nonce: {}", ping_nonce); - stream.write_all(ping_encoded)?; - - // ok, we have successfully sent the ping message - // now it's time to receive and verify the pong response - - // initialize decoder - let mut decoder = StandardDecoder::::new(); - - // right now, the decoder buffer can only read a frame header - // because decoder.missing_b is initialized with a header size - let decoder_buf = decoder.writable(); - - // read frame header into decoder_buf - stream.read_exact(decoder_buf)?; - - // this returns an error (MissingBytes), because it only read the header, and there's no payload - // in memory yet therefore, we safely ignore the error - // the important thing here is that we loaded decoder.missing_b with the expected frame payload - // size - let _ = decoder.next_frame(); - - // now, the decoder buffer has the expected size of the frame payload - let decoder_buf = decoder.writable(); - - // read the payload into the decoder_buf - stream.read_exact(decoder_buf)?; - - // finally read the frame - let mut frame = decoder.next_frame()?; - let frame_header = frame.get_header().ok_or(Error::FrameHeader)?; - - // check message type on header - if frame_header.msg_type() != PONG_MSG_TYPE { - return Err(Error::FrameHeader); - } - - // decode frame payload - let decoded_payload: Pong = match binary_sv2::from_bytes(frame.payload()) { - Ok(pong) => pong, - Err(e) => return Err(Error::BinarySv2(e)), - }; - - // check if nonce is the same as ping - let pong_nonce = decoded_payload.get_nonce(); - if ping_nonce == pong_nonce { - println!( - "CLIENT: Received Pong with identical nonce as Ping: {}", - pong_nonce - ); - } else { - return Err(Error::Nonce); - } - - Ok(()) -} diff --git a/examples/ping-pong/src/error.rs b/examples/ping-pong/src/error.rs deleted file mode 100644 index 4794c903db..0000000000 --- a/examples/ping-pong/src/error.rs +++ /dev/null @@ -1,31 +0,0 @@ -use stratum_apps::stratum_core::{binary_sv2, codec_sv2}; - -#[derive(std::fmt::Debug)] -pub enum Error { - Io(std::io::Error), - Codec(codec_sv2::Error), - BinarySv2(binary_sv2::Error), - FrameHeader, - FrameFromMessage, - Nonce, - WrongMessage, - Tcp(std::io::Error), -} - -impl From for Error { - fn from(e: std::io::Error) -> Error { - Error::Io(e) - } -} - -impl From for Error { - fn from(e: codec_sv2::Error) -> Error { - Error::Codec(e) - } -} - -impl From for Error { - fn from(e: binary_sv2::Error) -> Error { - Error::BinarySv2(e) - } -} diff --git a/examples/ping-pong/src/main.rs b/examples/ping-pong/src/main.rs deleted file mode 100644 index d1edd9c776..0000000000 --- a/examples/ping-pong/src/main.rs +++ /dev/null @@ -1,19 +0,0 @@ -mod client; -mod error; -mod messages; -mod server; - -const ADDR: &str = "127.0.0.1:3333"; - -fn main() { - // Start the server in a separate thread - std::thread::spawn(|| { - server::start_server(ADDR).expect("Server failed"); - }); - - // Give the server a moment to start up - std::thread::sleep(std::time::Duration::from_secs(1)); - - // Start the client - client::start_client(ADDR).expect("Client failed"); -} diff --git a/examples/ping-pong/src/messages.rs b/examples/ping-pong/src/messages.rs deleted file mode 100644 index f71c8e3664..0000000000 --- a/examples/ping-pong/src/messages.rs +++ /dev/null @@ -1,39 +0,0 @@ -use crate::error::Error; -use stratum_apps::stratum_core::binary_sv2::{self, Deserialize, Serialize}; - -use rand::Rng; - -pub const PING_MSG_TYPE: u8 = 0xfe; -pub const PONG_MSG_TYPE: u8 = 0xff; - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct Ping { - nonce: u8, -} - -impl Ping { - pub fn new() -> Result { - let mut rng = rand::thread_rng(); - let random: u8 = rng.gen(); - Ok(Self { nonce: random }) - } - - pub fn get_nonce(&self) -> u8 { - self.nonce - } -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct Pong { - nonce: u8, -} - -impl Pong { - pub fn new(nonce: u8) -> Result { - Ok(Self { nonce }) - } - - pub fn get_nonce(&self) -> u8 { - self.nonce - } -} diff --git a/examples/ping-pong/src/server.rs b/examples/ping-pong/src/server.rs deleted file mode 100644 index e4b0c11472..0000000000 --- a/examples/ping-pong/src/server.rs +++ /dev/null @@ -1,104 +0,0 @@ -use crate::{ - error::Error, - messages::{Ping, Pong, PING_MSG_TYPE, PONG_MSG_TYPE}, -}; -use std::{ - io::{Read, Write}, - net::{TcpListener, TcpStream}, - thread, -}; -use stratum_apps::stratum_core::{ - binary_sv2, - codec_sv2::{self, StandardDecoder, StandardSv2Frame}, -}; - -use stratum_apps::stratum_core::framing_sv2::header::Header as StandardSv2Header; - -pub fn start_server(address: &str) -> Result<(), Error> { - let listener = TcpListener::bind(address)?; - - println!("SERVER: Listening on {}", address); - - for stream in listener.incoming() { - match stream { - Ok(stream) => { - thread::spawn(|| { - handle_connection(stream)?; - Ok::<(), Error>(()) - }); - } - Err(e) => return Err(Error::Tcp(e)), - } - } - - Ok(()) -} - -fn handle_connection(mut stream: TcpStream) -> Result<(), Error> { - // first, we need to read the ping message - - // initialize decoder - let mut decoder = StandardDecoder::::new(); - - // right now, the decoder buffer can only read a frame header - // because decoder.missing_b is initialized with a header size - let decoder_buf = decoder.writable(); - - // read frame header into decoder_buf - stream.read_exact(decoder_buf)?; - - // this returns an error (MissingBytes), because it only read the header, and there's no payload - // in memory yet therefore, we safely ignore the error - // the important thing here is that we loaded decoder.missing_b with the expected frame payload - // size - let _ = decoder.next_frame(); - - // now, the decoder buffer has the expected size of the frame payload - let decoder_buf = decoder.writable(); - - // read from stream into decoder_buf again, loading the payload into memory - stream.read_exact(decoder_buf)?; - - // parse into a Sv2Frame - let mut frame: StandardSv2Frame = decoder.next_frame()?; - let frame_header: StandardSv2Header = frame.get_header().ok_or(Error::FrameHeader)?; - - // check message type on header - if frame_header.msg_type() != PING_MSG_TYPE { - return Err(Error::WrongMessage); - } - - // decode frame payload - let decoded_payload: Ping = match binary_sv2::from_bytes(frame.payload()) { - Ok(ping) => ping, - Err(e) => return Err(Error::BinarySv2(e)), - }; - - // ok, we have successfully received the ping message - // now it's time to send the pong response - - // we need the ping nonce to create our pong response - let ping_nonce = decoded_payload.get_nonce(); - - println!("SERVER: Received Ping message with nonce: {}", ping_nonce); - - // create Pong message - let pong_message = Pong::new(ping_nonce)?; - - // create Pong frame - let pong_frame = - StandardSv2Frame::::from_message(pong_message.clone(), PONG_MSG_TYPE, 0, false) - .ok_or(Error::FrameFromMessage)?; - - // encode Pong frame - let mut encoder = codec_sv2::Encoder::::new(); - let pong_encoded = encoder.encode(pong_frame)?; - - println!( - "SERVER: Sending Pong to client with nonce: {}", - pong_message.get_nonce() - ); - stream.write_all(pong_encoded)?; - - Ok(()) -} diff --git a/protocols/fuzz-tests/Cargo.lock b/fuzz-tests/Cargo.lock similarity index 56% rename from protocols/fuzz-tests/Cargo.lock rename to fuzz-tests/Cargo.lock index 7c93c90277..2ed7a968a1 100644 --- a/protocols/fuzz-tests/Cargo.lock +++ b/fuzz-tests/Cargo.lock @@ -58,110 +58,27 @@ dependencies = [ "derive_arbitrary", ] -[[package]] -name = "arrayvec" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" - -[[package]] -name = "base58ck" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c8d66485a3a2ea485c1913c4572ce0256067a5377ac8c75c4960e1cda98605f" -dependencies = [ - "bitcoin-internals 0.3.0", - "bitcoin_hashes 0.14.0", -] - -[[package]] -name = "bech32" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" - [[package]] name = "binary_sv2" -version = "4.0.0" +version = "5.0.0" dependencies = [ - "buffer_sv2", "derive_codec_sv2", ] -[[package]] -name = "bitcoin" -version = "0.32.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda569d741b895131a88ee5589a467e73e9c4718e958ac9308e4f7dc44b6945" -dependencies = [ - "base58ck", - "bech32", - "bitcoin-internals 0.3.0", - "bitcoin-io", - "bitcoin-units", - "bitcoin_hashes 0.14.0", - "hex-conservative 0.2.1", - "hex_lit", - "secp256k1 0.29.1", -] - [[package]] name = "bitcoin-internals" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" -[[package]] -name = "bitcoin-internals" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bdbe14aa07b06e6cfeffc529a1f099e5fbe249524f8125358604df99a4bed2" - -[[package]] -name = "bitcoin-io" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf" - -[[package]] -name = "bitcoin-units" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5285c8bcaa25876d07f37e3d30c303f2609179716e11d688f51e8f1fe70063e2" -dependencies = [ - "bitcoin-internals 0.3.0", -] - [[package]] name = "bitcoin_hashes" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" dependencies = [ - "bitcoin-internals 0.2.0", - "hex-conservative 0.1.2", -] - -[[package]] -name = "bitcoin_hashes" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" -dependencies = [ - "bitcoin-io", - "hex-conservative 0.2.1", -] - -[[package]] -name = "bitvec" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", + "bitcoin-internals", + "hex-conservative", ] [[package]] @@ -172,18 +89,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "byte-slice-cast" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" - -[[package]] -name = "byteorder" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" - [[package]] name = "cc" version = "1.2.41" @@ -226,20 +131,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "channels_sv2" -version = "2.0.0" -dependencies = [ - "binary_sv2", - "bitcoin", - "common_messages_sv2", - "job_declaration_sv2", - "mining_sv2", - "primitive-types", - "template_distribution_sv2", - "tracing", -] - [[package]] name = "cipher" version = "0.4.4" @@ -265,31 +156,11 @@ dependencies = [ [[package]] name = "common_messages_sv2" -version = "6.0.1" +version = "6.0.2" dependencies = [ "binary_sv2", ] -[[package]] -name = "const_format" -version = "0.2.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7faa7469a93a566e9ccc1c73fe783b4a65c274c5ace346038dca9c39fe0030ad" -dependencies = [ - "const_format_proc_macros", -] - -[[package]] -name = "const_format_proc_macros" -version = "0.2.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" -dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", -] - [[package]] name = "cpufeatures" version = "0.2.17" @@ -299,12 +170,6 @@ dependencies = [ "libc", ] -[[package]] -name = "crunchy" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" - [[package]] name = "crypto-common" version = "0.1.6" @@ -312,7 +177,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "rand_core", "typenum", ] @@ -340,12 +204,6 @@ dependencies = [ name = "derive_codec_sv2" version = "1.1.1" -[[package]] -name = "equivalent" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" - [[package]] name = "errno" version = "0.3.14" @@ -362,33 +220,14 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127" -[[package]] -name = "fixed-hash" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" -dependencies = [ - "byteorder", - "rand", - "rustc-hex", - "static_assertions", -] - [[package]] name = "framing_sv2" -version = "5.0.1" +version = "5.0.2" dependencies = [ "binary_sv2", - "buffer_sv2", "noise_sv2", ] -[[package]] -name = "funty" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" - [[package]] name = "fuzz-tests" version = "1.0.1" @@ -402,7 +241,6 @@ dependencies = [ "libfuzzer-sys", "parsers_sv2", "rand", - "roles_logic_sv2", "threadpool", ] @@ -449,97 +287,18 @@ dependencies = [ "polyval", ] -[[package]] -name = "handlers_sv2" -version = "0.2.0" -dependencies = [ - "binary_sv2", - "common_messages_sv2", - "job_declaration_sv2", - "mining_sv2", - "parsers_sv2", - "template_distribution_sv2", - "trait-variant", -] - -[[package]] -name = "hashbrown" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" - [[package]] name = "hermit-abi" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - [[package]] name = "hex-conservative" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20" -[[package]] -name = "hex-conservative" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd" -dependencies = [ - "arrayvec", -] - -[[package]] -name = "hex-conservative" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afe881d0527571892c4034822e59bb10c6c991cce6abe8199b6f5cf10766f55" -dependencies = [ - "arrayvec", -] - -[[package]] -name = "hex_lit" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3011d1213f159867b13cfd6ac92d2cd5f1345762c63be3554e84092d85a50bbd" - -[[package]] -name = "impl-codec" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d40b9d5e17727407e55028eafc22b2dc68781786e6d7eb8a21103f5058e3a14" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "indexmap" -version = "2.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" -dependencies = [ - "equivalent", - "hashbrown", -] - [[package]] name = "inout" version = "0.1.4" @@ -551,7 +310,7 @@ dependencies = [ [[package]] name = "job_declaration_sv2" -version = "5.0.1" +version = "5.0.2" dependencies = [ "binary_sv2", ] @@ -588,25 +347,13 @@ dependencies = [ "cc", ] -[[package]] -name = "memchr" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" - [[package]] name = "mining_sv2" -version = "5.0.1" +version = "6.0.0" dependencies = [ "binary_sv2", ] -[[package]] -name = "nohash-hasher" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" - [[package]] name = "noise_sv2" version = "1.4.0" @@ -616,7 +363,7 @@ dependencies = [ "generic-array", "rand", "rand_chacha", - "secp256k1 0.28.2", + "secp256k1", ] [[package]] @@ -641,37 +388,9 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" -[[package]] -name = "parity-scale-codec" -version = "3.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799781ae679d79a948e13d4824a40970bfa500058d245760dd857301059810fa" -dependencies = [ - "arrayvec", - "bitvec", - "byte-slice-cast", - "const_format", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "rustversion", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "3.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34b4653168b563151153c9e4c08ebed57fb8262bebfa79711552fa983c623e7a" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "parsers_sv2" -version = "0.1.1" +version = "0.1.2" dependencies = [ "binary_sv2", "common_messages_sv2", @@ -719,26 +438,6 @@ dependencies = [ "zerocopy", ] -[[package]] -name = "primitive-types" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d15600a7d856470b7d278b3fe0e311fe28c2526348549f8ef2ff7db3299c87f5" -dependencies = [ - "fixed-hash", - "impl-codec", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" -dependencies = [ - "toml_edit", -] - [[package]] name = "proc-macro2" version = "1.0.101" @@ -763,12 +462,6 @@ version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" -[[package]] -name = "radium" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" - [[package]] name = "rand" version = "0.8.5" @@ -799,58 +492,15 @@ dependencies = [ "getrandom 0.2.16", ] -[[package]] -name = "roles_logic_sv2" -version = "5.0.0" -dependencies = [ - "binary_sv2", - "bitcoin", - "chacha20poly1305", - "channels_sv2", - "codec_sv2", - "common_messages_sv2", - "handlers_sv2", - "hex-conservative 0.3.0", - "job_declaration_sv2", - "mining_sv2", - "nohash-hasher", - "parsers_sv2", - "primitive-types", - "template_distribution_sv2", - "tracing", -] - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "rustversion" -version = "1.0.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" - [[package]] name = "secp256k1" version = "0.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" dependencies = [ - "bitcoin_hashes 0.13.0", + "bitcoin_hashes", "rand", - "secp256k1-sys 0.9.2", -] - -[[package]] -name = "secp256k1" -version = "0.29.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" -dependencies = [ - "bitcoin_hashes 0.14.0", - "secp256k1-sys 0.10.1", + "secp256k1-sys", ] [[package]] @@ -862,56 +512,12 @@ dependencies = [ "cc", ] -[[package]] -name = "secp256k1-sys" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" -dependencies = [ - "cc", -] - -[[package]] -name = "serde" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" -dependencies = [ - "serde_core", -] - -[[package]] -name = "serde_core" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "shlex" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - [[package]] name = "subtle" version = "2.6.1" @@ -929,15 +535,9 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - [[package]] name = "template_distribution_sv2" -version = "4.0.1" +version = "4.0.2" dependencies = [ "binary_sv2", ] @@ -951,36 +551,6 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "toml_datetime" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" -dependencies = [ - "serde_core", -] - -[[package]] -name = "toml_edit" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" -dependencies = [ - "indexmap", - "toml_datetime", - "toml_parser", - "winnow", -] - -[[package]] -name = "toml_parser" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" -dependencies = [ - "winnow", -] - [[package]] name = "tracing" version = "0.1.41" @@ -1012,47 +582,18 @@ dependencies = [ "once_cell", ] -[[package]] -name = "trait-variant" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "typenum" version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" -[[package]] -name = "uint" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - [[package]] name = "unicode-ident" version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" -[[package]] -name = "unicode-xid" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" - [[package]] name = "universal-hash" version = "0.5.1" @@ -1099,30 +640,12 @@ dependencies = [ "windows-link", ] -[[package]] -name = "winnow" -version = "0.7.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" -dependencies = [ - "memchr", -] - [[package]] name = "wit-bindgen" version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" -[[package]] -name = "wyz" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" -dependencies = [ - "tap", -] - [[package]] name = "zerocopy" version = "0.8.27" diff --git a/protocols/fuzz-tests/Cargo.toml b/fuzz-tests/Cargo.toml similarity index 77% rename from protocols/fuzz-tests/Cargo.toml rename to fuzz-tests/Cargo.toml index 5d28609b50..8fc366159d 100644 --- a/protocols/fuzz-tests/Cargo.toml +++ b/fuzz-tests/Cargo.toml @@ -17,10 +17,10 @@ cargo-fuzz = true libfuzzer-sys = { version = "0.4.0", features = ["arbitrary-derive"] } arbitrary = { version = "1", features = ["derive"] } rand = "0.8.3" -binary_sv2 = { path = "../v2/binary-sv2"} -parsers_sv2 = { path = "../v2/parsers-sv2" } -framing_sv2 = { path = "../v2/framing-sv2" } -codec_sv2 = { path = "../v2/codec-sv2", features = ["noise_sv2"]} +binary_sv2 = { path = "../sv2/binary-sv2"} +parsers_sv2 = { path = "../sv2/parsers-sv2" } +framing_sv2 = { path = "../sv2/framing-sv2" } +codec_sv2 = { path = "../sv2/codec-sv2", features = ["noise_sv2"]} affinity = "0.1.1" threadpool = "1.8.1" lazy_static = "1.4.0" diff --git a/protocols/fuzz-tests/src/main.rs b/fuzz-tests/src/main.rs similarity index 100% rename from protocols/fuzz-tests/src/main.rs rename to fuzz-tests/src/main.rs diff --git a/protocols/Cargo.toml b/protocols/Cargo.toml deleted file mode 100644 index 8095ce1205..0000000000 --- a/protocols/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[workspace] - -resolver="2" - -members = [ - "v1", - "v2/binary-sv2/derive_codec", - "v2/binary-sv2", - "v2/noise-sv2", - "v2/framing-sv2", - "v2/codec-sv2", - "v2/subprotocols/common-messages", - "v2/subprotocols/template-distribution", - "v2/subprotocols/mining", - "v2/subprotocols/job-declaration", - "v2/channels-sv2", - "v2/parsers-sv2", - "v2/handlers-sv2", - "stratum-translation", -] - -[profile.dev] -# Required by super_safe_lock -opt-level = 1 - -[profile.test] -# Required by super_safe_lock -opt-level = 1 diff --git a/protocols/v2/binary-sv2/derive_codec/.gitignore b/protocols/v2/binary-sv2/derive_codec/.gitignore deleted file mode 100644 index ea8c4bf7f3..0000000000 --- a/protocols/v2/binary-sv2/derive_codec/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/target diff --git a/roles/Cargo.lock b/roles/Cargo.lock deleted file mode 100644 index acdbd055fd..0000000000 --- a/roles/Cargo.lock +++ /dev/null @@ -1,3790 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "addr2line" -version = "0.25.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler2" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" - -[[package]] -name = "aead" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" -dependencies = [ - "crypto-common", - "generic-array", -] - -[[package]] -name = "aes" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" -dependencies = [ - "cfg-if", - "cipher", - "cpufeatures", -] - -[[package]] -name = "aes-gcm" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" -dependencies = [ - "aead", - "aes", - "cipher", - "ctr", - "ghash", - "subtle", -] - -[[package]] -name = "ahash" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" -dependencies = [ - "getrandom", - "once_cell", - "version_check", -] - -[[package]] -name = "ahash" -version = "0.8.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" -dependencies = [ - "cfg-if", - "once_cell", - "version_check", - "zerocopy", -] - -[[package]] -name = "aho-corasick" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" -dependencies = [ - "memchr", -] - -[[package]] -name = "allocator-api2" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" - -[[package]] -name = "anes" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" - -[[package]] -name = "anstream" -version = "0.6.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" -dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "is_terminal_polyfill", - "utf8parse", -] - -[[package]] -name = "anstyle" -version = "1.0.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" - -[[package]] -name = "anstyle-parse" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" -dependencies = [ - "utf8parse", -] - -[[package]] -name = "anstyle-query" -version = "1.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2" -dependencies = [ - "windows-sys 0.60.2", -] - -[[package]] -name = "anstyle-wincon" -version = "3.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a" -dependencies = [ - "anstyle", - "once_cell_polyfill", - "windows-sys 0.60.2", -] - -[[package]] -name = "arraydeque" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" - -[[package]] -name = "arrayvec" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" - -[[package]] -name = "async-channel" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" -dependencies = [ - "concurrent-queue", - "event-listener 2.5.3", - "futures-core", -] - -[[package]] -name = "async-channel" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2" -dependencies = [ - "concurrent-queue", - "event-listener-strategy", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-executor" -version = "1.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497c00e0fd83a72a79a39fcbd8e3e2f055d6f6c7e025f3b3d91f4f8e76527fb8" -dependencies = [ - "async-task", - "concurrent-queue", - "fastrand 2.3.0", - "futures-lite 2.6.1", - "pin-project-lite", - "slab", -] - -[[package]] -name = "async-fs" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279cf904654eeebfa37ac9bb1598880884924aab82e290aa65c9e77a0e142e06" -dependencies = [ - "async-lock 2.8.0", - "autocfg", - "blocking", - "futures-lite 1.13.0", -] - -[[package]] -name = "async-global-executor" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" -dependencies = [ - "async-channel 2.5.0", - "async-executor", - "async-io 2.6.0", - "async-lock 3.4.1", - "blocking", - "futures-lite 2.6.1", - "once_cell", -] - -[[package]] -name = "async-io" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" -dependencies = [ - "async-lock 2.8.0", - "autocfg", - "cfg-if", - "concurrent-queue", - "futures-lite 1.13.0", - "log", - "parking", - "polling 2.8.0", - "rustix 0.37.28", - "slab", - "socket2 0.4.10", - "waker-fn", -] - -[[package]] -name = "async-io" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456b8a8feb6f42d237746d4b3e9a178494627745c3c56c6ea55d92ba50d026fc" -dependencies = [ - "autocfg", - "cfg-if", - "concurrent-queue", - "futures-io", - "futures-lite 2.6.1", - "parking", - "polling 3.11.0", - "rustix 1.1.2", - "slab", - "windows-sys 0.61.2", -] - -[[package]] -name = "async-lock" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" -dependencies = [ - "event-listener 2.5.3", -] - -[[package]] -name = "async-lock" -version = "3.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd03604047cee9b6ce9de9f70c6cd540a0520c813cbd49bae61f33ab80ed1dc" -dependencies = [ - "event-listener 5.4.1", - "event-listener-strategy", - "pin-project-lite", -] - -[[package]] -name = "async-net" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0434b1ed18ce1cf5769b8ac540e33f01fa9471058b5e89da9e06f3c882a8c12f" -dependencies = [ - "async-io 1.13.0", - "blocking", - "futures-lite 1.13.0", -] - -[[package]] -name = "async-process" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6438ba0a08d81529c69b36700fa2f95837bfe3e776ab39cde9c14d9149da88" -dependencies = [ - "async-io 1.13.0", - "async-lock 2.8.0", - "async-signal", - "blocking", - "cfg-if", - "event-listener 3.1.0", - "futures-lite 1.13.0", - "rustix 0.38.44", - "windows-sys 0.48.0", -] - -[[package]] -name = "async-recursion" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7d78656ba01f1b93024b7c3a0467f1608e4be67d725749fdcd7d2c7678fd7a2" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "async-signal" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43c070bbf59cd3570b6b2dd54cd772527c7c3620fce8be898406dd3ed6adc64c" -dependencies = [ - "async-io 2.6.0", - "async-lock 3.4.1", - "atomic-waker", - "cfg-if", - "futures-core", - "futures-io", - "rustix 1.1.2", - "signal-hook-registry", - "slab", - "windows-sys 0.61.2", -] - -[[package]] -name = "async-std" -version = "1.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c8e079a4ab67ae52b7403632e4618815d6db36d2a010cfe41b02c1b1578f93b" -dependencies = [ - "async-channel 1.9.0", - "async-global-executor", - "async-io 2.6.0", - "async-lock 3.4.1", - "crossbeam-utils", - "futures-channel", - "futures-core", - "futures-io", - "futures-lite 2.6.1", - "gloo-timers", - "kv-log-macro", - "log", - "memchr", - "once_cell", - "pin-project-lite", - "pin-utils", - "slab", - "wasm-bindgen-futures", -] - -[[package]] -name = "async-task" -version = "4.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" - -[[package]] -name = "async-trait" -version = "0.1.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "atomic-waker" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" - -[[package]] -name = "autocfg" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" - -[[package]] -name = "backtrace" -version = "0.3.76" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb531853791a215d7c62a30daf0dde835f381ab5de4589cfe7c649d2cbe92bd6" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "windows-link 0.2.1", -] - -[[package]] -name = "base58ck" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c8d66485a3a2ea485c1913c4572ce0256067a5377ac8c75c4960e1cda98605f" -dependencies = [ - "bitcoin-internals 0.3.0", - "bitcoin_hashes 0.14.0", -] - -[[package]] -name = "base64" -version = "0.21.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" - -[[package]] -name = "base64" -version = "0.22.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" - -[[package]] -name = "bech32" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" - -[[package]] -name = "binary_codec_sv2" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad24342e0abdcc463ad6ad4ac7b0ec606122c11eddf92de186a657df0114eb7" - -[[package]] -name = "binary_codec_sv2" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16415a0a9ccee2f71820da352c1f2a7f16d9f8e3ae6fb5e97834c6d732e98cd" -dependencies = [ - "buffer_sv2 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "binary_sv2" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba8295945d51b707f3a49e17810dddef858549e2b52383c7f2c4dd036f6bc1e6" -dependencies = [ - "binary_codec_sv2 3.0.0", - "derive_codec_sv2 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "binary_sv2" -version = "5.0.0" -dependencies = [ - "buffer_sv2 2.0.0", - "derive_codec_sv2 1.1.1", -] - -[[package]] -name = "bitcoin" -version = "0.32.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda569d741b895131a88ee5589a467e73e9c4718e958ac9308e4f7dc44b6945" -dependencies = [ - "base58ck", - "bech32", - "bitcoin-internals 0.3.0", - "bitcoin-io", - "bitcoin-units", - "bitcoin_hashes 0.14.0", - "hex-conservative 0.2.1", - "hex_lit", - "secp256k1 0.29.1", -] - -[[package]] -name = "bitcoin-internals" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" - -[[package]] -name = "bitcoin-internals" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bdbe14aa07b06e6cfeffc529a1f099e5fbe249524f8125358604df99a4bed2" - -[[package]] -name = "bitcoin-io" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf" - -[[package]] -name = "bitcoin-units" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5285c8bcaa25876d07f37e3d30c303f2609179716e11d688f51e8f1fe70063e2" -dependencies = [ - "bitcoin-internals 0.3.0", -] - -[[package]] -name = "bitcoin_hashes" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b7a2e9773ee7ae7f2560f0426c938f57902dcb9e39321b0cbd608f47ed579a4" -dependencies = [ - "byteorder", -] - -[[package]] -name = "bitcoin_hashes" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" -dependencies = [ - "bitcoin-internals 0.2.0", - "hex-conservative 0.1.2", -] - -[[package]] -name = "bitcoin_hashes" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" -dependencies = [ - "bitcoin-io", - "hex-conservative 0.2.1", -] - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" -dependencies = [ - "serde", -] - -[[package]] -name = "bitvec" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array", -] - -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array", -] - -[[package]] -name = "blocking" -version = "1.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e83f8d02be6967315521be875afa792a316e28d57b5a2d401897e2a7921b7f21" -dependencies = [ - "async-channel 2.5.0", - "async-task", - "futures-io", - "futures-lite 2.6.1", - "piper", -] - -[[package]] -name = "bs58" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" -dependencies = [ - "sha2 0.9.9", -] - -[[package]] -name = "buffer_sv2" -version = "2.0.0" -dependencies = [ - "aes-gcm", - "generic-array", -] - -[[package]] -name = "buffer_sv2" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19781425841d2e217eb7ded68089b693b47c8f756eb02231c92122dbf505bcf0" -dependencies = [ - "aes-gcm", -] - -[[package]] -name = "bumpalo" -version = "3.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" - -[[package]] -name = "byte-slice-cast" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" - -[[package]] -name = "byteorder" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" - -[[package]] -name = "bytes" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" - -[[package]] -name = "cast" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" - -[[package]] -name = "cc" -version = "1.2.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac9fe6cdbb24b6ade63616c0a0688e45bb56732262c158df3c0c4bea4ca47cb7" -dependencies = [ - "find-msvc-tools", - "shlex", -] - -[[package]] -name = "cfg-if" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" - -[[package]] -name = "chacha20" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" -dependencies = [ - "cfg-if", - "cipher", - "cpufeatures", -] - -[[package]] -name = "chacha20poly1305" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" -dependencies = [ - "aead", - "chacha20", - "cipher", - "poly1305", - "zeroize", -] - -[[package]] -name = "channels_sv2" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11ac02b93b5bd92a7dda2bc4b8c9d1f087e1fffc8b1018b532109135629051fc" -dependencies = [ - "binary_sv2 4.0.0", - "bitcoin", - "common_messages_sv2 6.0.1", - "job_declaration_sv2 5.0.1", - "mining_sv2 5.0.1", - "primitive-types", - "template_distribution_sv2 4.0.1", - "tracing", -] - -[[package]] -name = "channels_sv2" -version = "2.0.0" -dependencies = [ - "binary_sv2 5.0.0", - "bitcoin", - "common_messages_sv2 6.0.2", - "job_declaration_sv2 5.0.2", - "mining_sv2 6.0.0", - "primitive-types", - "template_distribution_sv2 4.0.2", - "tracing", -] - -[[package]] -name = "ciborium" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" -dependencies = [ - "ciborium-io", - "ciborium-ll", - "serde", -] - -[[package]] -name = "ciborium-io" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" - -[[package]] -name = "ciborium-ll" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" -dependencies = [ - "ciborium-io", - "half", -] - -[[package]] -name = "cipher" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" -dependencies = [ - "crypto-common", - "inout", - "zeroize", -] - -[[package]] -name = "clap" -version = "4.5.49" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4512b90fa68d3a9932cea5184017c5d200f5921df706d45e853537dea51508f" -dependencies = [ - "clap_builder", - "clap_derive", -] - -[[package]] -name = "clap_builder" -version = "4.5.49" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0025e98baa12e766c67ba13ff4695a887a1eba19569aad00a472546795bd6730" -dependencies = [ - "anstream", - "anstyle", - "clap_lex", - "strsim", -] - -[[package]] -name = "clap_derive" -version = "4.5.49" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "clap_lex" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" - -[[package]] -name = "codec_sv2" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01e6d43e79e66d0f98038922157db8b6101594921be87ac2cca3754d669f2a05" -dependencies = [ - "binary_sv2 4.0.0", - "buffer_sv2 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "framing_sv2 5.0.1", - "noise_sv2 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand", - "tracing", -] - -[[package]] -name = "codec_sv2" -version = "4.0.0" -dependencies = [ - "binary_sv2 5.0.0", - "buffer_sv2 2.0.0", - "framing_sv2 5.0.2", - "noise_sv2 1.4.0", - "rand", - "tracing", -] - -[[package]] -name = "colorchoice" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" - -[[package]] -name = "common_messages_sv2" -version = "6.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e6ec6ab527aeebf8ead273d6ab712ff181c050ee5e1082f3f6a2c65c0a10bf6" -dependencies = [ - "binary_sv2 4.0.0", -] - -[[package]] -name = "common_messages_sv2" -version = "6.0.2" -dependencies = [ - "binary_sv2 5.0.0", -] - -[[package]] -name = "concurrent-queue" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "config" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68578f196d2a33ff61b27fae256c3164f65e36382648e30666dde05b8cc9dfdf" -dependencies = [ - "async-trait", - "convert_case", - "json5", - "nom", - "pathdiff", - "ron", - "rust-ini", - "serde", - "serde_json", - "toml", - "yaml-rust2", -] - -[[package]] -name = "const-random" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" -dependencies = [ - "const-random-macro", -] - -[[package]] -name = "const-random-macro" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" -dependencies = [ - "getrandom", - "once_cell", - "tiny-keccak", -] - -[[package]] -name = "const_format" -version = "0.2.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7faa7469a93a566e9ccc1c73fe783b4a65c274c5ace346038dca9c39fe0030ad" -dependencies = [ - "const_format_proc_macros", -] - -[[package]] -name = "const_format_proc_macros" -version = "0.2.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" -dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", -] - -[[package]] -name = "convert_case" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "core-foundation" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" - -[[package]] -name = "cpufeatures" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" -dependencies = [ - "libc", -] - -[[package]] -name = "criterion" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" -dependencies = [ - "anes", - "async-std", - "cast", - "ciborium", - "clap", - "criterion-plot", - "csv", - "futures", - "is-terminal", - "itertools", - "num-traits", - "once_cell", - "oorandom", - "regex", - "serde", - "serde_derive", - "serde_json", - "smol", - "tinytemplate", - "tokio", - "walkdir", -] - -[[package]] -name = "criterion-plot" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" -dependencies = [ - "cast", - "itertools", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" - -[[package]] -name = "crunchy" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "rand_core", - "typenum", -] - -[[package]] -name = "csv" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf" -dependencies = [ - "csv-core", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "csv-core" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d02f3b0da4c6504f86e9cd789d8dbafab48c2321be74e9987593de5a894d93d" -dependencies = [ - "memchr", -] - -[[package]] -name = "ctr" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" -dependencies = [ - "cipher", -] - -[[package]] -name = "derive_codec_sv2" -version = "1.1.1" - -[[package]] -name = "derive_codec_sv2" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "924f288d967a5cd37956b195269ee7f710999169895cf670a736e1b2267d6137" -dependencies = [ - "binary_codec_sv2 1.2.0", -] - -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer 0.10.4", - "crypto-common", -] - -[[package]] -name = "dlv-list" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "442039f5147480ba31067cb00ada1adae6892028e40e45fc5de7b7df6dcc1b5f" -dependencies = [ - "const-random", -] - -[[package]] -name = "either" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" - -[[package]] -name = "encoding_rs" -version = "0.8.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "equivalent" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" - -[[package]] -name = "errno" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" -dependencies = [ - "libc", - "windows-sys 0.61.2", -] - -[[package]] -name = "error_handling" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfdf3be9049288001eb8a37f21b0f4e922598a6fa0098630fd3a6a14459ef217" - -[[package]] -name = "event-listener" -version = "2.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" - -[[package]] -name = "event-listener" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d93877bcde0eb80ca09131a08d23f0a5c18a620b01db137dba666d18cd9b30c2" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener" -version = "5.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" -dependencies = [ - "event-listener 5.4.1", - "pin-project-lite", -] - -[[package]] -name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - -[[package]] -name = "fastrand" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" - -[[package]] -name = "find-msvc-tools" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127" - -[[package]] -name = "fixed-hash" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" -dependencies = [ - "byteorder", - "rand", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "framing_sv2" -version = "5.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc6543955264144174b93780e0e76623ee4293037c9e180cfde3e2c155b59fa9" -dependencies = [ - "binary_sv2 4.0.0", - "buffer_sv2 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "noise_sv2 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "framing_sv2" -version = "5.0.2" -dependencies = [ - "binary_sv2 5.0.0", - "buffer_sv2 2.0.0", - "noise_sv2 1.4.0", -] - -[[package]] -name = "funty" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" - -[[package]] -name = "futures" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" - -[[package]] -name = "futures-executor" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-io" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" - -[[package]] -name = "futures-lite" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" -dependencies = [ - "fastrand 1.9.0", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite", - "waker-fn", -] - -[[package]] -name = "futures-lite" -version = "2.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" -dependencies = [ - "fastrand 2.3.0", - "futures-core", - "futures-io", - "parking", - "pin-project-lite", -] - -[[package]] -name = "futures-macro" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "futures-sink" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" - -[[package]] -name = "futures-task" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" - -[[package]] -name = "futures-util" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", -] - -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "ghash" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" -dependencies = [ - "opaque-debug", - "polyval", -] - -[[package]] -name = "gimli" -version = "0.32.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" - -[[package]] -name = "gloo-timers" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" -dependencies = [ - "futures-channel", - "futures-core", - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "h2" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" -dependencies = [ - "atomic-waker", - "bytes", - "fnv", - "futures-core", - "futures-sink", - "http", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "half" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc52e53916c08643f1b56ec082790d1e86a32e58dc5268f897f313fbae7b4872" -dependencies = [ - "cfg-if", - "crunchy", -] - -[[package]] -name = "handlers_sv2" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "472824f98b68a963dbf4c77625a8b5525c322abe49afa9403dfb816e35dd4d93" -dependencies = [ - "binary_sv2 4.0.0", - "common_messages_sv2 6.0.1", - "job_declaration_sv2 5.0.1", - "mining_sv2 5.0.1", - "parsers_sv2 0.1.1", - "template_distribution_sv2 4.0.1", - "trait-variant", -] - -[[package]] -name = "handlers_sv2" -version = "0.2.0" -dependencies = [ - "binary_sv2 5.0.0", - "common_messages_sv2 6.0.2", - "job_declaration_sv2 5.0.2", - "mining_sv2 6.0.0", - "parsers_sv2 0.1.2", - "template_distribution_sv2 4.0.2", - "trait-variant", -] - -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" -dependencies = [ - "ahash 0.7.8", - "serde", -] - -[[package]] -name = "hashbrown" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" -dependencies = [ - "ahash 0.8.12", - "allocator-api2", -] - -[[package]] -name = "hashbrown" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" - -[[package]] -name = "hashlink" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" -dependencies = [ - "hashbrown 0.14.5", -] - -[[package]] -name = "heck" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" - -[[package]] -name = "hermit-abi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" - -[[package]] -name = "hermit-abi" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "hex-conservative" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20" - -[[package]] -name = "hex-conservative" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd" -dependencies = [ - "arrayvec", -] - -[[package]] -name = "hex-conservative" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afe881d0527571892c4034822e59bb10c6c991cce6abe8199b6f5cf10766f55" -dependencies = [ - "arrayvec", -] - -[[package]] -name = "hex_lit" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3011d1213f159867b13cfd6ac92d2cd5f1345762c63be3554e84092d85a50bbd" - -[[package]] -name = "http" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" -dependencies = [ - "bytes", - "http", -] - -[[package]] -name = "http-body-util" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" -dependencies = [ - "bytes", - "futures-core", - "http", - "http-body", - "pin-project-lite", -] - -[[package]] -name = "httparse" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" - -[[package]] -name = "httpdate" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" - -[[package]] -name = "hyper" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" -dependencies = [ - "atomic-waker", - "bytes", - "futures-channel", - "futures-core", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "pin-utils", - "smallvec", - "tokio", - "want", -] - -[[package]] -name = "hyper-util" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" -dependencies = [ - "base64 0.22.1", - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "http", - "http-body", - "hyper", - "ipnet", - "libc", - "percent-encoding", - "pin-project-lite", - "socket2 0.6.1", - "system-configuration", - "tokio", - "tower-service", - "tracing", - "windows-registry", -] - -[[package]] -name = "impl-codec" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d40b9d5e17727407e55028eafc22b2dc68781786e6d7eb8a21103f5058e3a14" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "indexmap" -version = "2.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" -dependencies = [ - "equivalent", - "hashbrown 0.16.0", -] - -[[package]] -name = "inout" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" -dependencies = [ - "generic-array", -] - -[[package]] -name = "instant" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi 0.3.9", - "libc", - "windows-sys 0.48.0", -] - -[[package]] -name = "io-uring" -version = "0.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b" -dependencies = [ - "bitflags 2.9.4", - "cfg-if", - "libc", -] - -[[package]] -name = "ipnet" -version = "2.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" - -[[package]] -name = "is-terminal" -version = "0.4.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" -dependencies = [ - "hermit-abi 0.5.2", - "libc", - "windows-sys 0.59.0", -] - -[[package]] -name = "is_terminal_polyfill" -version = "1.70.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" - -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" - -[[package]] -name = "jd_client_sv2" -version = "0.1.0" -dependencies = [ - "async-channel 1.9.0", - "clap", - "config", - "serde", - "stratum-apps", - "tokio", - "tracing", -] - -[[package]] -name = "jd_server" -version = "0.1.3" -dependencies = [ - "async-channel 1.9.0", - "binary_sv2 4.0.0", - "bitcoin", - "clap", - "codec_sv2 3.0.1", - "common_messages_sv2 6.0.1", - "config", - "error_handling", - "framing_sv2 5.0.1", - "hashbrown 0.11.2", - "hex", - "job_declaration_sv2 5.0.1", - "mining_sv2 5.0.1", - "network_helpers_sv2", - "nohash-hasher", - "noise_sv2 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parsers_sv2 0.1.1", - "rand", - "roles_logic_sv2 5.0.0", - "rpc_sv2", - "serde", - "serde_json", - "stratum-apps", - "tokio", - "tracing", -] - -[[package]] -name = "job_declaration_sv2" -version = "5.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d4edc436d29e8dcac178539222de2b3681d629f9884191bd7db8831e49dd24" -dependencies = [ - "binary_sv2 4.0.0", -] - -[[package]] -name = "job_declaration_sv2" -version = "5.0.2" -dependencies = [ - "binary_sv2 5.0.0", -] - -[[package]] -name = "js-sys" -version = "0.3.81" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec48937a97411dcb524a265206ccd4c90bb711fca92b2792c407f268825b9305" -dependencies = [ - "once_cell", - "wasm-bindgen", -] - -[[package]] -name = "json5" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" -dependencies = [ - "pest", - "pest_derive", - "serde", -] - -[[package]] -name = "kv-log-macro" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" -dependencies = [ - "log", -] - -[[package]] -name = "lazy_static" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" - -[[package]] -name = "libc" -version = "0.2.177" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" - -[[package]] -name = "linux-raw-sys" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - -[[package]] -name = "linux-raw-sys" -version = "0.4.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" - -[[package]] -name = "linux-raw-sys" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" - -[[package]] -name = "lock_api" -version = "0.4.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" -dependencies = [ - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" -dependencies = [ - "value-bag", -] - -[[package]] -name = "matchers" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" -dependencies = [ - "regex-automata", -] - -[[package]] -name = "memchr" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" - -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - -[[package]] -name = "mining_device" -version = "0.1.3" -dependencies = [ - "async-channel 1.9.0", - "async-recursion", - "binary_sv2 4.0.0", - "clap", - "codec_sv2 3.0.1", - "common_messages_sv2 6.0.1", - "criterion", - "framing_sv2 5.0.1", - "futures", - "half", - "mining_sv2 5.0.1", - "network_helpers_sv2", - "noise_sv2 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "num-format", - "num_cpus", - "parsers_sv2 0.1.1", - "primitive-types", - "rand", - "roles_logic_sv2 5.0.0", - "sha2 0.10.9", - "stratum-apps", - "tokio", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "mining_sv2" -version = "5.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1eb3c055232f64d36e3eee4296adcaa584fb3185a57e0de11ad5807766c45edc" -dependencies = [ - "binary_sv2 4.0.0", -] - -[[package]] -name = "mining_sv2" -version = "6.0.0" -dependencies = [ - "binary_sv2 5.0.0", -] - -[[package]] -name = "miniscript" -version = "12.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "487906208f38448e186e3deb02f2b8ef046a9078b0de00bdb28bf4fb9b76951c" -dependencies = [ - "bech32", - "bitcoin", -] - -[[package]] -name = "miniz_oxide" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" -dependencies = [ - "adler2", -] - -[[package]] -name = "mio" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" -dependencies = [ - "libc", - "wasi", - "windows-sys 0.59.0", -] - -[[package]] -name = "network_helpers_sv2" -version = "4.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d720d6a31532fb4f08e59b71669084d06462f42e9d2c2aede7368d221d36db" -dependencies = [ - "async-channel 1.9.0", - "codec_sv2 3.0.1", - "futures", - "tokio", - "tracing", -] - -[[package]] -name = "nohash-hasher" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" - -[[package]] -name = "noise_sv2" -version = "1.4.0" -dependencies = [ - "aes-gcm", - "chacha20poly1305", - "generic-array", - "rand", - "rand_chacha", - "secp256k1 0.28.2", -] - -[[package]] -name = "noise_sv2" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30964f9fbc4572bb5a1b0046176331d20e9ce6de0ca18afc3cfd42c6e91a94aa" -dependencies = [ - "aes-gcm", - "chacha20poly1305", - "rand", - "rand_chacha", - "secp256k1 0.28.2", -] - -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - -[[package]] -name = "nu-ansi-term" -version = "0.50.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" -dependencies = [ - "windows-sys 0.61.2", -] - -[[package]] -name = "num-format" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3" -dependencies = [ - "arrayvec", - "itoa", -] - -[[package]] -name = "num-traits" -version = "0.2.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" -dependencies = [ - "autocfg", -] - -[[package]] -name = "num_cpus" -version = "1.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" -dependencies = [ - "hermit-abi 0.5.2", - "libc", -] - -[[package]] -name = "object" -version = "0.37.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" -dependencies = [ - "memchr", -] - -[[package]] -name = "once_cell" -version = "1.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" - -[[package]] -name = "once_cell_polyfill" -version = "1.70.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" - -[[package]] -name = "oorandom" -version = "11.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" - -[[package]] -name = "opaque-debug" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" - -[[package]] -name = "ordered-multimap" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49203cdcae0030493bad186b28da2fa25645fa276a51b6fec8010d281e02ef79" -dependencies = [ - "dlv-list", - "hashbrown 0.14.5", -] - -[[package]] -name = "parity-scale-codec" -version = "3.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799781ae679d79a948e13d4824a40970bfa500058d245760dd857301059810fa" -dependencies = [ - "arrayvec", - "bitvec", - "byte-slice-cast", - "const_format", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "rustversion", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "3.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34b4653168b563151153c9e4c08ebed57fb8262bebfa79711552fa983c623e7a" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "parking" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" - -[[package]] -name = "parking_lot" -version = "0.12.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-link 0.2.1", -] - -[[package]] -name = "parsers_sv2" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109e80bc77241a729f61cad15f3f246f3de12e1b741b31e419fc7e02f20c2ccb" -dependencies = [ - "binary_sv2 4.0.0", - "common_messages_sv2 6.0.1", - "framing_sv2 5.0.1", - "job_declaration_sv2 5.0.1", - "mining_sv2 5.0.1", - "template_distribution_sv2 4.0.1", -] - -[[package]] -name = "parsers_sv2" -version = "0.1.2" -dependencies = [ - "binary_sv2 5.0.0", - "common_messages_sv2 6.0.2", - "framing_sv2 5.0.2", - "job_declaration_sv2 5.0.2", - "mining_sv2 6.0.0", - "template_distribution_sv2 4.0.2", -] - -[[package]] -name = "pathdiff" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3" - -[[package]] -name = "percent-encoding" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" - -[[package]] -name = "pest" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "198db74531d58c70a361c42201efde7e2591e976d518caf7662a47dc5720e7b6" -dependencies = [ - "memchr", - "thiserror", - "ucd-trie", -] - -[[package]] -name = "pest_derive" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d725d9cfd79e87dccc9341a2ef39d1b6f6353d68c4b33c177febbe1a402c97c5" -dependencies = [ - "pest", - "pest_generator", -] - -[[package]] -name = "pest_generator" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db7d01726be8ab66ab32f9df467ae8b1148906685bbe75c82d1e65d7f5b3f841" -dependencies = [ - "pest", - "pest_meta", - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "pest_meta" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9f832470494906d1fca5329f8ab5791cc60beb230c74815dff541cbd2b5ca0" -dependencies = [ - "once_cell", - "pest", - "sha2 0.10.9", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "piper" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" -dependencies = [ - "atomic-waker", - "fastrand 2.3.0", - "futures-io", -] - -[[package]] -name = "polling" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" -dependencies = [ - "autocfg", - "bitflags 1.3.2", - "cfg-if", - "concurrent-queue", - "libc", - "log", - "pin-project-lite", - "windows-sys 0.48.0", -] - -[[package]] -name = "polling" -version = "3.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0e4f59085d47d8241c88ead0f274e8a0cb551f3625263c05eb8dd897c34218" -dependencies = [ - "cfg-if", - "concurrent-queue", - "hermit-abi 0.5.2", - "pin-project-lite", - "rustix 1.1.2", - "windows-sys 0.61.2", -] - -[[package]] -name = "poly1305" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" -dependencies = [ - "cpufeatures", - "opaque-debug", - "universal-hash", -] - -[[package]] -name = "polyval" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" -dependencies = [ - "cfg-if", - "cpufeatures", - "opaque-debug", - "universal-hash", -] - -[[package]] -name = "pool_sv2" -version = "0.2.0" -dependencies = [ - "async-channel 1.9.0", - "clap", - "config", - "rand", - "secp256k1 0.28.2", - "serde", - "stratum-apps", - "tokio", - "tracing", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" -dependencies = [ - "zerocopy", -] - -[[package]] -name = "primitive-types" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d15600a7d856470b7d278b3fe0e311fe28c2526348549f8ef2ff7db3299c87f5" -dependencies = [ - "fixed-hash", - "impl-codec", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" -dependencies = [ - "toml_edit 0.23.7", -] - -[[package]] -name = "proc-macro2" -version = "1.0.101" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "radium" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "redox_syscall" -version = "0.5.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" -dependencies = [ - "bitflags 2.9.4", -] - -[[package]] -name = "regex" -version = "1.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" -dependencies = [ - "aho-corasick", - "memchr", - "regex-automata", - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" - -[[package]] -name = "roles_logic_sv2" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7241840512841396df29ede2094619ad06cbbd1a0dc342553c7a5901506d096b" -dependencies = [ - "bitcoin", - "chacha20poly1305", - "channels_sv2 1.0.2", - "codec_sv2 3.0.1", - "common_messages_sv2 6.0.1", - "handlers_sv2 0.1.0", - "hex-conservative 0.3.0", - "job_declaration_sv2 5.0.1", - "mining_sv2 5.0.1", - "nohash-hasher", - "parsers_sv2 0.1.1", - "primitive-types", - "template_distribution_sv2 4.0.1", - "tracing", -] - -[[package]] -name = "roles_logic_sv2" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88850ead16993f86cb4616d154ddd37b9c0d739ea23711b1cc51f40484e0e39a" -dependencies = [ - "binary_sv2 4.0.0", - "bitcoin", - "chacha20poly1305", - "channels_sv2 1.0.2", - "codec_sv2 3.0.1", - "common_messages_sv2 6.0.1", - "handlers_sv2 0.1.0", - "hex-conservative 0.3.0", - "job_declaration_sv2 5.0.1", - "mining_sv2 5.0.1", - "nohash-hasher", - "parsers_sv2 0.1.1", - "primitive-types", - "template_distribution_sv2 4.0.1", - "tracing", -] - -[[package]] -name = "ron" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" -dependencies = [ - "base64 0.21.7", - "bitflags 2.9.4", - "serde", - "serde_derive", -] - -[[package]] -name = "rpc_sv2" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af9b3a6c43d03c5cc6ca9f40797cbf17a9a30b8db236be6c87f5243bd404d6af" -dependencies = [ - "base64 0.21.7", - "hex", - "http-body-util", - "hyper", - "hyper-util", - "serde", - "serde_json", - "stratum-common", -] - -[[package]] -name = "rust-ini" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e0698206bcb8882bf2a9ecb4c1e7785db57ff052297085a6efd4fe42302068a" -dependencies = [ - "cfg-if", - "ordered-multimap", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "rustix" -version = "0.37.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "519165d378b97752ca44bbe15047d5d3409e875f39327546b42ac81d7e18c1b6" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustix" -version = "0.38.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" -dependencies = [ - "bitflags 2.9.4", - "errno", - "libc", - "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", -] - -[[package]] -name = "rustix" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" -dependencies = [ - "bitflags 2.9.4", - "errno", - "libc", - "linux-raw-sys 0.11.0", - "windows-sys 0.61.2", -] - -[[package]] -name = "rustversion" -version = "1.0.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" - -[[package]] -name = "ryu" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - -[[package]] -name = "secp256k1" -version = "0.28.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" -dependencies = [ - "bitcoin_hashes 0.13.0", - "rand", - "secp256k1-sys 0.9.2", -] - -[[package]] -name = "secp256k1" -version = "0.29.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" -dependencies = [ - "bitcoin_hashes 0.14.0", - "secp256k1-sys 0.10.1", -] - -[[package]] -name = "secp256k1-sys" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" -dependencies = [ - "cc", -] - -[[package]] -name = "secp256k1-sys" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" -dependencies = [ - "cc", -] - -[[package]] -name = "serde" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" -dependencies = [ - "serde_core", - "serde_derive", -] - -[[package]] -name = "serde_core" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "serde_json" -version = "1.0.145" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" -dependencies = [ - "itoa", - "memchr", - "ryu", - "serde", - "serde_core", -] - -[[package]] -name = "serde_spanned" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" -dependencies = [ - "serde", -] - -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "sha2" -version = "0.10.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest 0.10.7", - "sha2-asm", -] - -[[package]] -name = "sha2-asm" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b845214d6175804686b2bd482bcffe96651bb2d1200742b712003504a2dac1ab" -dependencies = [ - "cc", -] - -[[package]] -name = "sharded-slab" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "shlex" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" - -[[package]] -name = "signal-hook-registry" -version = "1.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" -dependencies = [ - "libc", -] - -[[package]] -name = "slab" -version = "0.4.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" - -[[package]] -name = "smallvec" -version = "1.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" - -[[package]] -name = "smol" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13f2b548cd8447f8de0fdf1c592929f70f4fc7039a05e47404b0d096ec6987a1" -dependencies = [ - "async-channel 1.9.0", - "async-executor", - "async-fs", - "async-io 1.13.0", - "async-lock 2.8.0", - "async-net", - "async-process", - "blocking", - "futures-lite 1.13.0", -] - -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "socket2" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" -dependencies = [ - "libc", - "windows-sys 0.60.2", -] - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "stratum-apps" -version = "0.1.0" -dependencies = [ - "async-channel 1.9.0", - "base64 0.21.7", - "bs58", - "clap", - "config", - "futures", - "generic-array", - "hex", - "http-body-util", - "hyper", - "hyper-util", - "miniscript", - "rand", - "rustversion", - "secp256k1 0.28.2", - "serde", - "serde_json", - "stratum-core", - "tokio", - "tokio-util", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "stratum-common" -version = "4.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77b7dc7a762d19aba6f355599a61440b29603ceece5a158914888691b9867ebe" -dependencies = [ - "roles_logic_sv2 4.0.0", -] - -[[package]] -name = "stratum-core" -version = "0.1.0" -dependencies = [ - "binary_sv2 5.0.0", - "bitcoin", - "buffer_sv2 2.0.0", - "channels_sv2 2.0.0", - "codec_sv2 4.0.0", - "common_messages_sv2 6.0.2", - "framing_sv2 5.0.2", - "handlers_sv2 0.2.0", - "job_declaration_sv2 5.0.2", - "mining_sv2 6.0.0", - "noise_sv2 1.4.0", - "parsers_sv2 0.1.2", - "stratum_translation", - "sv1_api", - "template_distribution_sv2 4.0.2", -] - -[[package]] -name = "stratum_translation" -version = "0.1.1" -dependencies = [ - "binary_sv2 5.0.0", - "bitcoin", - "channels_sv2 2.0.0", - "mining_sv2 6.0.0", - "sv1_api", - "tracing", -] - -[[package]] -name = "strsim" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" - -[[package]] -name = "subtle" -version = "2.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" - -[[package]] -name = "sv1_api" -version = "2.1.2" -dependencies = [ - "binary_sv2 5.0.0", - "bitcoin_hashes 0.3.2", - "byteorder", - "hex", - "serde", - "serde_json", - "tracing", -] - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.106" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "system-configuration" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" -dependencies = [ - "bitflags 2.9.4", - "core-foundation", - "system-configuration-sys", -] - -[[package]] -name = "system-configuration-sys" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - -[[package]] -name = "template_distribution_sv2" -version = "4.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6298fc9f339b1c3b654ef3590857d5d3eff6d709891f003b7f7a701b8a64a3a4" -dependencies = [ - "binary_sv2 4.0.0", -] - -[[package]] -name = "template_distribution_sv2" -version = "4.0.2" -dependencies = [ - "binary_sv2 5.0.0", -] - -[[package]] -name = "thiserror" -version = "2.0.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "2.0.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "thread_local" -version = "1.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - -[[package]] -name = "tinytemplate" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "tokio" -version = "1.47.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" -dependencies = [ - "backtrace", - "bytes", - "io-uring", - "libc", - "mio", - "parking_lot", - "pin-project-lite", - "signal-hook-registry", - "slab", - "socket2 0.6.1", - "tokio-macros", - "windows-sys 0.59.0", -] - -[[package]] -name = "tokio-macros" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "tokio-util" -version = "0.7.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "toml" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime 0.6.11", - "toml_edit 0.22.27", -] - -[[package]] -name = "toml_datetime" -version = "0.6.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" -dependencies = [ - "serde", -] - -[[package]] -name = "toml_datetime" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" -dependencies = [ - "serde_core", -] - -[[package]] -name = "toml_edit" -version = "0.22.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" -dependencies = [ - "indexmap", - "serde", - "serde_spanned", - "toml_datetime 0.6.11", - "toml_write", - "winnow", -] - -[[package]] -name = "toml_edit" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" -dependencies = [ - "indexmap", - "toml_datetime 0.7.3", - "toml_parser", - "winnow", -] - -[[package]] -name = "toml_parser" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" -dependencies = [ - "winnow", -] - -[[package]] -name = "toml_write" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" - -[[package]] -name = "tower-service" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" - -[[package]] -name = "tracing" -version = "0.1.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" -dependencies = [ - "pin-project-lite", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "tracing-core" -version = "0.1.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" -dependencies = [ - "once_cell", - "valuable", -] - -[[package]] -name = "tracing-log" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" -dependencies = [ - "log", - "once_cell", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.3.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" -dependencies = [ - "matchers", - "nu-ansi-term", - "once_cell", - "regex-automata", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", -] - -[[package]] -name = "trait-variant" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "translator_sv2" -version = "2.0.0" -dependencies = [ - "async-channel 1.9.0", - "clap", - "config", - "serde", - "serde_json", - "sha2 0.10.9", - "stratum-apps", - "tokio", - "tracing", -] - -[[package]] -name = "try-lock" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" - -[[package]] -name = "typenum" -version = "1.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" - -[[package]] -name = "ucd-trie" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" - -[[package]] -name = "uint" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - -[[package]] -name = "unicode-ident" -version = "1.0.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" - -[[package]] -name = "unicode-segmentation" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" - -[[package]] -name = "unicode-xid" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" - -[[package]] -name = "universal-hash" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" -dependencies = [ - "crypto-common", - "subtle", -] - -[[package]] -name = "utf8parse" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" - -[[package]] -name = "valuable" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" - -[[package]] -name = "value-bag" -version = "1.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "943ce29a8a743eb10d6082545d861b24f9d1b160b7d741e0f2cdf726bec909c5" - -[[package]] -name = "version_check" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" - -[[package]] -name = "waker-fn" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" - -[[package]] -name = "walkdir" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" -dependencies = [ - "same-file", - "winapi-util", -] - -[[package]] -name = "want" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" -dependencies = [ - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.11.1+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" - -[[package]] -name = "wasm-bindgen" -version = "0.2.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1da10c01ae9f1ae40cbfac0bac3b1e724b320abfcf52229f80b547c0d250e2d" -dependencies = [ - "cfg-if", - "once_cell", - "rustversion", - "wasm-bindgen-macro", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "671c9a5a66f49d8a47345ab942e2cb93c7d1d0339065d4f8139c486121b43b19" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn 2.0.106", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.54" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e038d41e478cc73bae0ff9b36c60cff1c98b8f38f8d7e8061e79ee63608ac5c" -dependencies = [ - "cfg-if", - "js-sys", - "once_cell", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ca60477e4c59f5f2986c50191cd972e3a50d8a95603bc9434501cf156a9a119" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bad67dc8b2a1a6e5448428adec4c3e84c43e561d8c9ee8a9e5aabeb193ec41d1" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "web-sys" -version = "0.3.81" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9367c417a924a74cae129e6a2ae3b47fabb1f8995595ab474029da749a8be120" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" -dependencies = [ - "windows-sys 0.61.2", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-link" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" - -[[package]] -name = "windows-link" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" - -[[package]] -name = "windows-registry" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" -dependencies = [ - "windows-link 0.1.3", - "windows-result", - "windows-strings", -] - -[[package]] -name = "windows-result" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" -dependencies = [ - "windows-link 0.1.3", -] - -[[package]] -name = "windows-strings" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" -dependencies = [ - "windows-link 0.1.3", -] - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets 0.48.5", -] - -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.60.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" -dependencies = [ - "windows-targets 0.53.5", -] - -[[package]] -name = "windows-sys" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" -dependencies = [ - "windows-link 0.2.1", -] - -[[package]] -name = "windows-targets" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" -dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", -] - -[[package]] -name = "windows-targets" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" -dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm 0.52.6", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", -] - -[[package]] -name = "windows-targets" -version = "0.53.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" -dependencies = [ - "windows-link 0.2.1", - "windows_aarch64_gnullvm 0.53.1", - "windows_aarch64_msvc 0.53.1", - "windows_i686_gnu 0.53.1", - "windows_i686_gnullvm 0.53.1", - "windows_i686_msvc 0.53.1", - "windows_x86_64_gnu 0.53.1", - "windows_x86_64_gnullvm 0.53.1", - "windows_x86_64_msvc 0.53.1", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - -[[package]] -name = "windows_i686_gnu" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" - -[[package]] -name = "windows_i686_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" - -[[package]] -name = "winnow" -version = "0.7.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" -dependencies = [ - "memchr", -] - -[[package]] -name = "wyz" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" -dependencies = [ - "tap", -] - -[[package]] -name = "yaml-rust2" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8902160c4e6f2fb145dbe9d6760a75e3c9522d8bf796ed7047c85919ac7115f8" -dependencies = [ - "arraydeque", - "encoding_rs", - "hashlink", -] - -[[package]] -name = "zerocopy" -version = "0.8.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" -dependencies = [ - "zerocopy-derive", -] - -[[package]] -name = "zerocopy-derive" -version = "0.8.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "zeroize" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" diff --git a/roles/jd-client/Cargo.toml b/roles/jd-client/Cargo.toml deleted file mode 100644 index a161bdce71..0000000000 --- a/roles/jd-client/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "jd_client_sv2" -version = "0.1.0" -authors = ["The Stratum V2 Developers"] -edition = "2021" -description = "Job Declarator Client (JDC) role" -documentation = "https://docs.rs/jd_client" -readme = "README.md" -homepage = "https://stratumprotocol.org" -repository = "https://github.com/stratum-mining/stratum" -license = "MIT OR Apache-2.0" -keywords = ["stratum", "mining", "bitcoin", "protocol"] - -[lib] -name = "jd_client_sv2" -path = "src/lib/mod.rs" - -[dependencies] -stratum-apps = { path = "../stratum-apps", features = ["jd_client"] } -async-channel = "1.5.1" -serde = { version = "1.0.89", default-features = false, features = ["derive", "alloc"] } -tokio = { version = "1.44.1", features = ["full"] } -ext-config = { version = "0.14.0", features = ["toml"], package = "config" } -tracing = { version = "0.1" } -clap = { version = "4.5.39", features = ["derive"] } diff --git a/roles/jd-client/README.md b/roles/jd-client/README.md deleted file mode 100644 index e08a767fae..0000000000 --- a/roles/jd-client/README.md +++ /dev/null @@ -1,188 +0,0 @@ - -# Job Declarator Client - -The **Job Declarator Client (JDC)** is responsible for: - -* Connecting to the **Pool** and **JD Server**. -* Connecting to the **Template Provider**. -* Receiving custom block templates from the Template Provider and declaring them to the pool via the **Job Declaration Protocol**. -* Sending jobs to downstream clients. -* Forwarding shares to the pool. - -## Architecture Overview - -The JDC sits between **SV2 downstream clients** (e.g., SV2 mining devices or Translator Proxies) and **SV2 upstream servers** (the Pool and JD Server). - -* It obtains templates from the Bitcoin node. -* It creates and broadcasts jobs to downstream clients. -* It declares and sets custom jobs to the pool side. -* It also supports solo mining mode in case no upstream is available or the upstream is fraudulent - -Note: while JDC can cater for multiple downstream clients, with either one or multiple channels per client, it only opens one single extended channel with the upstream Pool server. - -``` -<--- Most Downstream ------------------------------------------------------------------------------------------------ Most Upstream ---> - -+----------------------------------------------------------------------------------------------------+ +------------------------------+ -| Mining Farm | | Remote Pool | -| | | | -| +-------------------+ +------------------+ | | +-----------------+ | -| | SV1 Mining Device | <-> | Translator Proxy |-------| |-------------------------------> | SV2 Pool Server | | -| +-------------------+ +------------------+ | | | | +-----------------+ | -| | | | | | -| | | | | | -| +-----------------------+| | | | -| | Job Declarator Client | | | | -| +-----------------------+| | | +-----------------------+ | -| | |--------------------------------> | Job Declarator Server | | -| +-------------------+ | | | +-----------------------+ | -| | SV2 Mining Device |-----------------------------| | | | -| +-------------------+ | | | -| | | | -| | | | -| | | | -+----------------------------------------------------------------------------------------------------+ +------------------------------+ - - -``` -## Setup - -### Configuration File - -The configuration file contains the following information: - -1. The downstream socket information, which includes the listening IP address (`downstream_address`) and port (`downstream_port`). -2. The maximum and minimum protocol versions (`max_supported_version` and `min_supported_version`) with size as (`min_extranonce2_size`) -3. The authentication keys used for the downstream connections (`authority_public_key`, `authority_secret_key`) -4. The Template Provider address (`tp_address`). - -## Configuration - -The JDC is configured via a `.toml` file. -See [`config-examples/jdc-config-local-example.toml`](./config-examples/jdc-config-local-example.toml) for a full example. - -### Example Configuration - -```toml -# Listening address for downstream clients -listening_address = "127.0.0.1:34265" - -# Version support -max_supported_version = 2 -min_supported_version = 2 - -# Authentication keys for encrypted downstream connections -authority_public_key = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" -authority_secret_key = "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n" -cert_validity_sec = 3600 - -user_identity = "your_username_here" - -# Target shares per minute & batching -shares_per_minute = 1.0 -share_batch_size = 1 -min_extranonce_size = 4 - -# Template Provider -tp_address = "127.0.0.1:8442" -jdc_signature = "Sv2MinerSignature" - -# Coinbase output for solo mining fallback -coinbase_reward_script = "addr(tb1qa0sm0hxzj0x25rh8gw5xlzwlsfvvyz8u96w3p8)" - -[[upstreams]] -authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" -pool_address = "127.0.0.1" -pool_port = 34254 -jd_address = "127.0.0.1" -jd_port = 34264 -``` - -For a complete, annotated config, see the [full example](./config-examples/jdc-config-hosted-example.toml). - - -## Usage - -### Installation & Build - -```bash -# Clone the repository -git clone https://github.com/stratum-mining/stratum.git -cd stratum - -# Build JDC -cargo build --release -p jd_client -``` - -### Running JDC - -#### With Local Pool and Job Declarator Server - -```bash -cd roles/jd_client -cargo run -- -c config-examples/jdc-config-local-example.toml -``` - -#### With Hosted Pool and Job Declarator Server - -```bash -cd roles/jd_client -cargo run -- -c config-examples/jdc-config-hosted-example.toml -``` - -### Command Line Options - -```bash -# Use specific config file -jd_client -c /path/to/config.toml -jd_client --config /path/to/config.toml - -# Show help -jd_client -h -jd_client --help -``` - -## Architecture Details - -### **Component Overview** - -1. **Channel Manager**: Orchestrates message routing among sub-systems in JDC -2. **Task Manager**: Manages async task lifecycle and coordination -3. **Status System**: Provides real-time monitoring and health reporting - -## Internal Architecture - -JDC is built from several modules that divide responsibility for handling different roles and protocols: - -### **Modules** - -1. **Upstream** - - * Connects to the **pool**. - * Handles messages coming from the Pool (the ones defined in the Common Protocol are directly handled, others are forwarded to the Channel Manager). - -2. **Downstream** - - * Accepts connections from Sv2 Mining Devices or Translator Proxies. - * Includes a **ChannelState**, which provisions new channels when `OpenStandard/ExtendedChannel` messages arrive from the downstreams. - -3. **Template Receiver** - - * Connects to the **Template Provider**. - * Handles messages received by the TP (the ones defined in the Common Protocol are directly handled, while the others are forwarded to the Channel Manager). - -4. **Job Declarator** - - * Connects to the **Job Declarator Server (JDS)**. - * Handles messages received by the JDS (the ones defined in the Common Protocol are directly handled, while the others are forwarded to the Channel Manager). - -5. **Channel Manager (Orchestrator)** - - * Central coordination point. - * Responsibilities: - - * Handles **non-common messages** forwarded from all modules. - * Maintains **upstream channel state**. - * Maintains most of the **Job Declarator state**. - * Orchestrates job lifecycle and state synchronization across upstream and downstream roles. - diff --git a/roles/jd-client/config-examples/jdc-config-hosted-example.toml b/roles/jd-client/config-examples/jdc-config-hosted-example.toml deleted file mode 100644 index 3b9d9887f8..0000000000 --- a/roles/jd-client/config-examples/jdc-config-hosted-example.toml +++ /dev/null @@ -1,66 +0,0 @@ -# SRI JDC config -listening_address = "127.0.0.1:34265" - -# Version support -max_supported_version = 2 -min_supported_version = 2 - -# Auth keys for open encrypted connection downstream -authority_public_key = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" -authority_secret_key = "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n" -cert_validity_sec = 3600 - - -# User identity/username for pool connection -user_identity = "your_username_here" - -# target number of shares per minute applied to every downstream channel -shares_per_minute = 6.0 - -# Share batch size -share_batch_size = 10 - -# JDC supports two modes: -# "FULLTEMPLATE" - full template mining -# "COINBASEONLY" - coinbase-only mining -mode = "FULLTEMPLATE" - -# Template Provider config -# Local TP (this is pointing to localhost so you must run a TP locally for this configuration to work) -# tp_address = "127.0.0.1:8442" -# Hosted testnet TP -tp_address = "75.119.150.111:8442" -tp_authority_public_key = "9bwHCYnjhbHm4AS3pWg9MtAH83mzWohoJJJDELYBqZhDNqszDLc" - -# string to be added into the Coinbase scriptSig -jdc_signature = "Sv2MinerSignature" - -# Solo Mining config -# Coinbase output used to build the coinbase tx in case of Solo Mining (as last-resort solution of the pools fallback system) -# -# Coinbase outputs are specified as descriptors. A full list of descriptors is available at -# https://github.com/bitcoin/bips/blob/master/bip-0380.mediawiki#appendix-b-index-of-script-expressions -# Although the `musig` descriptor is not yet supported and the legacy `combo` descriptor never -# will be. If you have an address, embed it in a descriptor like `addr(
)`. -coinbase_reward_script = "addr(tb1qa0sm0hxzj0x25rh8gw5xlzwlsfvvyz8u96w3p8)" - -# Enable this option to set a predefined log file path. -# When enabled, logs will always be written to this file. -# The CLI option --log-file (or -f) will override this setting if provided. -# log_file = "./jd-client.log" - -# List of upstreams (JDS) used as backup endpoints -# In case of shares refused by the JDS, the fallback system will propose the same job to the next upstream in this list -[[upstreams]] -authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" -pool_address = "75.119.150.111" -pool_port = "34254" -jds_address = "75.119.150.111" -jds_port = "34264" - -# [[upstreams]] -# authority_pubkey = "2di19GHYQnAZJmEpoUeP7C3Eg9TCcksHr23rZCC83dvUiZgiDL" -# pool_address = "127.0.0.1:34254" -# pool_port = "34254" -# jds_address = "127.0.0.1:34264" -# jds_port = "34264" diff --git a/roles/jd-client/config-examples/jdc-config-local-example.toml b/roles/jd-client/config-examples/jdc-config-local-example.toml deleted file mode 100644 index f550a5fd6d..0000000000 --- a/roles/jd-client/config-examples/jdc-config-local-example.toml +++ /dev/null @@ -1,66 +0,0 @@ -# SRI JDC config -listening_address = "127.0.0.1:34265" - -# Version support -max_supported_version = 2 -min_supported_version = 2 - -# Auth keys for open encrypted connection downstream -authority_public_key = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" -authority_secret_key = "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n" -cert_validity_sec = 3600 - - -# User identity/username for pool connection -user_identity = "your_username_here" - -# target number of shares per minute applied to every downstream channel -shares_per_minute = 6.0 - -# Share batch size -share_batch_size = 10 - -# JDC supports two modes: -# "FULLTEMPLATE" - full template mining -# "COINBASEONLY" - coinbase-only mining -mode = "FULLTEMPLATE" - -# Template Provider config -# Local TP (this is pointing to localhost so you must run a TP locally for this configuration to work) -tp_address = "127.0.0.1:8442" -# Hosted testnet TP -# tp_address = "75.119.150.111:8442" - -# string to be added into the Coinbase scriptSig -jdc_signature = "Sv2MinerSignature" - -# Solo Mining config -# Coinbase output used to build the coinbase tx in case of Solo Mining (as last-resort solution of the pools fallback system) -# -# Coinbase outputs are specified as descriptors. A full list of descriptors is available at -# https://github.com/bitcoin/bips/blob/master/bip-0380.mediawiki#appendix-b-index-of-script-expressions -# Although the `musig` descriptor is not yet supported and the legacy `combo` descriptor never -# will be. If you have an address, embed it in a descriptor like `addr(
)`. -coinbase_reward_script = "addr(tb1qa0sm0hxzj0x25rh8gw5xlzwlsfvvyz8u96w3p8)" - -# Enable this option to set a predefined log file path. -# When enabled, logs will always be written to this file. -# The CLI option --log-file (or -f) will override this setting if provided. -# log_file = "./jd-client.log" - - -# List of upstreams (JDS) used as backup endpoints -# In case of shares refused by the JDS, the fallback system will propose the same job to the next upstream in this list -[[upstreams]] -authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" -pool_address = "127.0.0.1" -pool_port = 34254 -jds_address = "127.0.0.1" -jds_port = 34264 - -[[upstreams]] -authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" -pool_address = "75.119.150.111" -pool_port = "34254" -jds_address = "75.119.150.111" -jds_port = "34264" diff --git a/roles/jd-client/src/args.rs b/roles/jd-client/src/args.rs deleted file mode 100644 index 1836b2d1c1..0000000000 --- a/roles/jd-client/src/args.rs +++ /dev/null @@ -1,43 +0,0 @@ -use clap::Parser; -use ext_config::{Config, File, FileFormat}; -use jd_client_sv2::{config::JobDeclaratorClientConfig, error::JDCError}; - -use std::path::PathBuf; -use tracing::error; -#[derive(Debug, Parser)] -#[command(author, version, about = "JD Client", long_about = None)] -pub struct Args { - #[arg( - short = 'c', - long = "config", - help = "Path to the TOML configuration file", - default_value = "jdc-config.toml" - )] - pub config_path: PathBuf, - #[arg( - short = 'f', - long = "log-file", - help = "Path to the log file. If not set, logs will only be written to stdout." - )] - pub log_file: Option, -} - -#[allow(clippy::result_large_err)] -pub fn process_cli_args() -> Result { - let args = Args::parse(); - - let config_path = args.config_path.to_str().ok_or_else(|| { - error!("Invalid configuration path."); - JDCError::BadCliArgs - })?; - - let settings = Config::builder() - .add_source(File::new(config_path, FileFormat::Toml)) - .build()?; - - let mut config = settings.try_deserialize::()?; - - config.set_log_file(args.log_file); - - Ok(config) -} diff --git a/roles/jd-client/src/lib/channel_manager/downstream_message_handler.rs b/roles/jd-client/src/lib/channel_manager/downstream_message_handler.rs deleted file mode 100644 index 1075485d3d..0000000000 --- a/roles/jd-client/src/lib/channel_manager/downstream_message_handler.rs +++ /dev/null @@ -1,1317 +0,0 @@ -use std::sync::atomic::Ordering; - -use stratum_apps::stratum_core::{ - binary_sv2::Str0255, - bitcoin::{Amount, Target}, - channels_sv2::{ - client, - outputs::deserialize_outputs, - server::{ - error::{ExtendedChannelError, StandardChannelError}, - extended::ExtendedChannel, - group::GroupChannel, - jobs::job_store::DefaultJobStore, - share_accounting::{ShareValidationError, ShareValidationResult}, - standard::StandardChannel, - }, - Vardiff, VardiffState, - }, - handlers_sv2::{HandleMiningMessagesFromClientAsync, SupportedChannelTypes}, - job_declaration_sv2::PushSolution, - mining_sv2::*, - parsers_sv2::{AnyMessage, JobDeclaration, Mining, TemplateDistribution}, - template_distribution_sv2::SubmitSolution, -}; -use tracing::{debug, error, info, warn}; - -use crate::{ - channel_manager::{ChannelManager, ChannelManagerChannel}, - error::{ChannelSv2Error, JDCError}, - jd_mode::{get_jd_mode, JdMode}, - utils::StdFrame, -}; - -/// `RouteMessageTo` is an abstraction used to route protocol messages -/// to the appropriate subsystem connected to the JDC. -/// -/// Instead of manually handling routing logic for each message type, -/// this enum provides a unified interface. Each variant represents -/// a possible destination: -/// -/// - [`RouteMessageTo::Upstream`] β†’ For messages intended for the upstream. -/// - [`RouteMessageTo::JobDeclarator`] β†’ For job declaration messages sent to the JDS. -/// - [`RouteMessageTo::TemplateProvider`] β†’ For template distribution messages sent to the template -/// provider. -/// - [`RouteMessageTo::Downstream`] β†’ For messages destined to a specific downstream client, -/// identified by its `u32` downstream ID. -#[derive(Clone)] -pub enum RouteMessageTo<'a> { - /// Route to the upstream (mining) channel. - Upstream(Mining<'a>), - /// Route to the job declarator subsystem. - JobDeclarator(JobDeclaration<'a>), - /// Route to the template provider subsystem. - TemplateProvider(TemplateDistribution<'a>), - /// Route to a specific downstream client by ID, along with its mining message. - Downstream((u32, Mining<'a>)), -} - -impl<'a> From> for RouteMessageTo<'a> { - fn from(value: Mining<'a>) -> Self { - Self::Upstream(value) - } -} - -impl<'a> From> for RouteMessageTo<'a> { - fn from(value: JobDeclaration<'a>) -> Self { - Self::JobDeclarator(value) - } -} - -impl<'a> From> for RouteMessageTo<'a> { - fn from(value: TemplateDistribution<'a>) -> Self { - Self::TemplateProvider(value) - } -} - -impl<'a> From<(u32, Mining<'a>)> for RouteMessageTo<'a> { - fn from(value: (u32, Mining<'a>)) -> Self { - Self::Downstream(value) - } -} - -impl RouteMessageTo<'_> { - /// Forwards the message to its corresponding destination channel. - /// - /// The routing is handled as follows: - /// - [`RouteMessageTo::Downstream`] β†’ Sends the mining message to the specified downstream - /// client. - /// - [`RouteMessageTo::Upstream`] β†’ Sends the mining message upstream, unless in - /// [`JdMode::SoloMining`]. - /// - [`RouteMessageTo::JobDeclarator`] β†’ Sends the job declaration message to the JDS. - /// - [`RouteMessageTo::TemplateProvider`] β†’ Sends the template distribution message to the - /// template provider. - /// - /// Messages are automatically converted into the appropriate - /// [`AnyMessage`] variant and wrapped into a [`StdFrame`]. - pub async fn forward(self, channel_manager_channel: &ChannelManagerChannel) { - match self { - RouteMessageTo::Downstream((downstream_id, message)) => { - _ = channel_manager_channel - .downstream_sender - .send((downstream_id, AnyMessage::Mining(message).into_static())); - } - RouteMessageTo::Upstream(message) => { - if get_jd_mode() != JdMode::SoloMining { - let message = AnyMessage::Mining(message).into_static(); - let frame: StdFrame = message.try_into().unwrap(); - _ = channel_manager_channel.upstream_sender.send(frame).await; - } - } - RouteMessageTo::JobDeclarator(message) => { - let message = AnyMessage::JobDeclaration(message).into_static(); - let frame: StdFrame = message.try_into().unwrap(); - _ = channel_manager_channel.jd_sender.send(frame).await; - } - RouteMessageTo::TemplateProvider(message) => { - let message = AnyMessage::TemplateDistribution(message).into_static(); - let frame: StdFrame = message.try_into().unwrap(); - _ = channel_manager_channel.tp_sender.send(frame).await; - } - } - } -} - -impl HandleMiningMessagesFromClientAsync for ChannelManager { - type Error = JDCError; - - fn get_channel_type_for_client(&self, _client_id: Option) -> SupportedChannelTypes { - SupportedChannelTypes::GroupAndExtended - } - fn is_work_selection_enabled_for_client(&self, _client_id: Option) -> bool { - false - } - fn is_client_authorized( - &self, - _client_id: Option, - _user_identity: &Str0255, - ) -> Result { - Ok(true) - } - - // Handles a `CloseChannel` message: - // - Look up the downstream associated with the given `channel_id`. - // - If found, remove the channel from its `extended_channels` and `standard_channels`. - // - If not found, return an appropriate error. - async fn handle_close_channel( - &mut self, - _client_id: Option, - msg: CloseChannel<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - self.channel_manager_data - .super_safe_lock(|channel_manager_data| { - let Some(downstream_id) = channel_manager_data - .channel_id_to_downstream_id - .get(&msg.channel_id) - else { - error!( - "No downstream_id related to channel_id: {:?}, found", - msg.channel_id - ); - return Err(JDCError::DownstreamNotFoundWithChannelId(msg.channel_id)); - }; - let Some(downstream) = channel_manager_data.downstream.get(downstream_id) else { - error!( - "No downstream with channel_id: {:?} and downstream_id: {:?}, found", - msg.channel_id, downstream_id - ); - return Err(JDCError::DownstreamNotFound(*downstream_id)); - }; - downstream.downstream_data.super_safe_lock(|data| { - data.extended_channels.remove(&msg.channel_id); - data.standard_channels.remove(&msg.channel_id); - }); - Ok(()) - }) - } - - // Handles an `OpenStandardMiningChannel` message from a downstream. - // - // Steps: - // 1. Parse the `downstream_id` from the `user_identity`. - // 2. Create a new `StandardChannel` for the downstream. - // 3. Ensure a valid `GroupChannel` exists (create one if needed). - // 4. Apply the latest future template and prevhash to both group and standard channels. - // 5. Send the following messages back to the downstream: - // - `OpenStandardMiningChannelSuccess` - // - `NewMiningJob` - // - `SetNewPrevHash` - // 6. Update the downstream state, including: - // - Channel manager mappings - // - Standard and group channel registrations - // - Vardiff state - // - // Returns an error if any step fails, such as missing templates, invalid identity, - // or failure to apply updates to channels. - async fn handle_open_standard_mining_channel( - &mut self, - _client_id: Option, - msg: OpenStandardMiningChannel<'_>, - ) -> Result<(), Self::Error> { - let request_id = msg.get_request_id_as_u32(); - let user_string = msg.user_identity.as_utf8_or_hex(); - - let coinbase_outputs = self - .channel_manager_data - .super_safe_lock(|data| data.coinbase_outputs.clone()); - - let mut coinbase_outputs = deserialize_outputs(coinbase_outputs) - .map_err(|_| JDCError::ChannelManagerHasBadCoinbaseOutputs)?; - - let (user_identity, downstream_id) = match user_string.rsplit_once('#') { - Some((user_identity, id)) => match id.parse::() { - Ok(id) => (user_identity, id), - Err(e) => { - warn!( - ?e, - user_string, "Failed to parse downstream_id from user_identity" - ); - return Err(JDCError::ParseInt(e)); - } - }, - None => { - warn!(user_string, "User identity missing downstream_id"); - return Err(JDCError::DownstreamIdNotFound); - } - }; - - info!(downstream_id, "Received: {}", msg); - - let build_error = |code: &str| { - Mining::OpenMiningChannelError(OpenMiningChannelError { - request_id, - error_code: code.to_string().try_into().expect("valid error code"), - }) - }; - - let messages: Vec = - self.channel_manager_data - .super_safe_lock(|channel_manager_data| { - let Some(last_future_template) = - channel_manager_data.last_future_template.clone() - else { - error!("Missing last_future_template, cannot open channel"); - return Err(JDCError::FutureTemplateNotPresent); - }; - - let Some(last_new_prev_hash) = channel_manager_data.last_new_prev_hash.clone() - else { - error!("Missing last_new_prev_hash, cannot open channel"); - return Err(JDCError::LastNewPrevhashNotFound); - }; - - let Some(downstream) = channel_manager_data.downstream.get(&downstream_id) - else { - error!(downstream_id, "Downstream not registered"); - return Err(JDCError::DownstreamNotFound(downstream_id)); - }; - - coinbase_outputs[0].value = - Amount::from_sat(last_future_template.coinbase_tx_value_remaining); - - downstream.downstream_data.super_safe_lock(|data| { - let mut messages: Vec = vec![]; - - if !data.require_std_job && data.group_channels.is_none() { - let group_channel_id = channel_manager_data - .channel_id_factory - .fetch_add(1, Ordering::Relaxed); - let job_store = DefaultJobStore::new(); - let full_extranonce_size = channel_manager_data - .upstream_channel - .as_ref() - .map(|channel| channel.get_full_extranonce_size()) - .unwrap_or(32); - let mut group_channel = - match GroupChannel::new_for_job_declaration_client( - group_channel_id, - job_store, - full_extranonce_size, - channel_manager_data.pool_tag_string.clone(), - self.miner_tag_string.clone(), - ) { - Ok(channel) => channel, - Err(e) => { - error!(?e, "Failed to create group channel"); - return Err(JDCError::FailedToCreateGroupChannel(e)); - } - }; - - if let Err(e) = group_channel.on_new_template( - last_future_template.clone(), - coinbase_outputs.clone(), - ) { - error!(?e, "Failed to apply template to group channel"); - return Err(JDCError::ChannelSv2( - ChannelSv2Error::GroupChannelServerSide(e), - )); - } - - if let Err(e) = - group_channel.on_set_new_prev_hash(last_new_prev_hash.clone()) - { - error!(?e, "Failed to apply prevhash to group channel"); - return Err(JDCError::ChannelSv2( - ChannelSv2Error::GroupChannelServerSide(e), - )); - }; - - data.group_channels = Some(group_channel); - } - - let nominal_hash_rate = msg.nominal_hash_rate; - let requested_max_target = Target::from_le_bytes( - msg.max_target.inner_as_ref().try_into().unwrap(), - ); - - let group_channel_id = data - .group_channels - .as_ref() - .map(|gc| gc.get_group_channel_id()) - .unwrap_or(0); - let standard_channel_id = channel_manager_data - .channel_id_factory - .fetch_add(1, Ordering::Relaxed); - - let extranonce_prefix = match channel_manager_data - .extranonce_prefix_factory_standard - .next_prefix_standard() - { - Ok(p) => p, - Err(e) => { - error!(?e, "Failed to get extranonce prefix"); - return Err(JDCError::ExtranoncePrefixFactoryError(e)); - } - }; - - let job_store = DefaultJobStore::new(); - let mut standard_channel = - match StandardChannel::new_for_job_declaration_client( - standard_channel_id, - user_identity.to_string(), - extranonce_prefix.to_vec(), - requested_max_target, - nominal_hash_rate, - self.share_batch_size, - self.shares_per_minute, - job_store, - channel_manager_data.pool_tag_string.clone(), - self.miner_tag_string.clone(), - ) { - Ok(channel) => channel, - Err(e) => { - error!(?e, "Failed to create standard channel"); - return match e { - StandardChannelError::InvalidNominalHashrate => Ok(vec![( - downstream_id, - build_error("invalid-nominal-hashrate"), - ) - .into()]), - StandardChannelError::RequestedMaxTargetOutOfRange => { - Ok(vec![( - downstream_id, - build_error("max-target-out-of-range"), - ) - .into()]) - } - other => Err(JDCError::ChannelSv2( - ChannelSv2Error::StandardChannelServerSide(other), - )), - }; - } - }; - - let open_standard_mining_channel_success = - OpenStandardMiningChannelSuccess { - request_id: msg.request_id.clone(), - channel_id: standard_channel_id, - target: standard_channel.get_target().to_le_bytes().into(), - extranonce_prefix: standard_channel - .get_extranonce_prefix() - .clone() - .try_into() - .expect("extranonce_prefix must be valid"), - group_channel_id, - } - .into_static(); - - messages.push( - ( - downstream_id, - Mining::OpenStandardMiningChannelSuccess( - open_standard_mining_channel_success, - ), - ) - .into(), - ); - - if let Err(e) = standard_channel - .on_new_template(last_future_template.clone(), coinbase_outputs.clone()) - { - error!(?e, "Failed to apply template to standard channel"); - return Err(JDCError::ChannelSv2( - ChannelSv2Error::StandardChannelServerSide(e), - )); - } - - let future_standard_job_id = standard_channel - .get_future_template_to_job_id() - .get(&last_future_template.template_id) - .cloned() - .expect("future job id must exist"); - - let future_standard_job = standard_channel - .get_future_jobs() - .get(&future_standard_job_id) - .expect("future job must exist"); - - let future_standard_job_message = - future_standard_job.get_job_message().clone().into_static(); - - messages.push( - ( - downstream_id, - Mining::NewMiningJob(future_standard_job_message), - ) - .into(), - ); - - let prev_hash = last_new_prev_hash.prev_hash.clone(); - let header_timestamp = last_new_prev_hash.header_timestamp; - let n_bits = last_new_prev_hash.n_bits; - let set_new_prev_hash_mining = SetNewPrevHash { - channel_id: standard_channel_id, - job_id: future_standard_job_id, - prev_hash, - min_ntime: header_timestamp, - nbits: n_bits, - }; - - if let Err(e) = - standard_channel.on_set_new_prev_hash(last_new_prev_hash.clone()) - { - error!(?e, "Failed to apply prevhash to standard channel"); - return Err(JDCError::ChannelSv2( - ChannelSv2Error::StandardChannelServerSide(e), - )); - } - messages.push( - ( - downstream_id, - Mining::SetNewPrevHash(set_new_prev_hash_mining), - ) - .into(), - ); - - let vardiff = - VardiffState::new().expect("Vardiff state should instantiate."); - - channel_manager_data - .vardiff - .insert((standard_channel_id, downstream_id), vardiff); - data.standard_channels - .insert(standard_channel_id, standard_channel); - channel_manager_data - .channel_id_to_downstream_id - .insert(standard_channel_id, downstream_id); - - channel_manager_data - .downstream_channel_id_and_job_id_to_template_id - .insert( - (standard_channel_id, future_standard_job_id), - last_future_template.template_id, - ); - if let Some(group_channel) = data.group_channels.as_mut() { - group_channel.add_standard_channel_id(standard_channel_id); - } - - Ok(messages) - }) - })?; - - for messages in messages { - messages.forward(&self.channel_manager_channel).await; - } - Ok(()) - } - - // Handles an `OpenExtendedMiningChannel` request from a downstream. - // - // Workflow: - // 1. Extract the `downstream_id` from `user_identity`. - // 2. Create a new `ExtendedChannel` with the requested parameters. - // 3. Send back to the downstream: - // - `OpenExtendedMiningChannelSuccess` - // - `NewExtendedMiningJob` (based on the latest future template) - // - `SetNewPrevHash` (to immediately activate the job) - // 4. Update internal state, including: - // - Extended channel registry - // - Downstream/channel mappings - // - Vardiff state - // - // Returns an error if the downstream is missing, template/prevhash are unavailable, - // or if extended channel creation fails. - async fn handle_open_extended_mining_channel( - &mut self, - _client_id: Option, - msg: OpenExtendedMiningChannel<'_>, - ) -> Result<(), Self::Error> { - let user_string = msg.user_identity.as_utf8_or_hex(); - let (user_identity, downstream_id) = match user_string.rsplit_once('#') { - Some((user_identity, id)) => match id.parse::() { - Ok(v) => (user_identity, v), - Err(e) => { - warn!(?e, user_string, "Invalid downstream_id in user_identity"); - return Err(JDCError::ParseInt(e)); - } - }, - None => { - warn!(user_string, "Missing downstream_id in user_identity"); - return Err(JDCError::DownstreamIdNotFound); - } - }; - - info!(downstream_id, "Received: {}", msg); - let request_id = msg.get_request_id_as_u32(); - - let nominal_hash_rate = msg.nominal_hash_rate; - let requested_max_target = - Target::from_le_bytes(msg.max_target.inner_as_ref().try_into().unwrap()); - let requested_min_rollable_extranonce_size = msg.min_extranonce_size; - - let build_error = |code: &str| { - Mining::OpenMiningChannelError(OpenMiningChannelError { - request_id, - error_code: code.to_string().try_into().expect("valid error code"), - }) - }; - - let messages = - self.channel_manager_data - .super_safe_lock(|channel_manager_data| { - - let Some(downstream) = channel_manager_data.downstream.get_mut(&downstream_id) else { - error!(downstream_id, "Downstream not found"); - return Err(JDCError::DownstreamNotFound(downstream_id)); - }; - - downstream.downstream_data.super_safe_lock(|data| { - - let mut messages: Vec = vec![]; - let extended_channel_id = channel_manager_data.channel_id_factory.fetch_add(1, Ordering::Relaxed); - - let extranonce_prefix = match channel_manager_data.extranonce_prefix_factory_extended - .next_prefix_extended(requested_min_rollable_extranonce_size.into()) - { - Ok(p) => p, - Err(e) => { - error!(?e, "Extranonce prefix error"); - return Err(JDCError::ExtranoncePrefixFactoryError(e)); - } - }; - - let Some(last_future_template) = channel_manager_data.last_future_template.clone() else { - error!("No template to share"); - return Err(JDCError::FutureTemplateNotPresent); - }; - - let Some(last_new_prev_hash) = channel_manager_data.last_new_prev_hash.clone() else { - error!("No prevhash in system"); - return Err(JDCError::LastNewPrevhashNotFound); - }; - - let job_store = DefaultJobStore::new(); - - let mut extended_channel = match ExtendedChannel::new_for_job_declaration_client( - extended_channel_id, - user_identity.to_string(), - extranonce_prefix.into(), - requested_max_target, - nominal_hash_rate, - true, - requested_min_rollable_extranonce_size, - self.share_batch_size, - self.shares_per_minute, - job_store, - channel_manager_data.pool_tag_string.clone(), - self.miner_tag_string.clone(), - ) { - Ok(c) => c, - Err(e) => { - error!(?e, "Failed to create ExtendedChannel"); - return match e { - ExtendedChannelError::InvalidNominalHashrate => { - Ok(vec![(downstream_id, build_error("invalid-nominal-hashrate")).into()]) - } - ExtendedChannelError::RequestedMaxTargetOutOfRange => { - Ok(vec![(downstream_id, build_error("max-target-out-of-range")).into()]) - } - ExtendedChannelError::RequestedMinExtranonceSizeTooLarge => { - Ok(vec![(downstream_id, build_error("min-extranonce-size-too-large")).into()]) - } - other => Err( - JDCError::ChannelSv2( - ChannelSv2Error::ExtendedChannelServerSide(other) - ) - ), - } - } - }; - - let open_extended_mining_channel_success = - OpenExtendedMiningChannelSuccess { - request_id, - channel_id: extended_channel_id, - target: extended_channel.get_target().to_le_bytes().into(), - extranonce_prefix: extended_channel - .get_extranonce_prefix() - .clone() - .try_into() - .expect("valid extranonce prefix"), - extranonce_size: extended_channel.get_rollable_extranonce_size(), - } - .into_static(); - - messages.push(( - downstream_id, - Mining::OpenExtendedMiningChannelSuccess( - open_extended_mining_channel_success, - ), - ).into()); - - let mut coinbase_outputs = match deserialize_outputs(channel_manager_data.coinbase_outputs.clone()) { - Ok(outputs) => outputs, - Err(_) => return Err(JDCError::ChannelManagerHasBadCoinbaseOutputs), - }; - coinbase_outputs[0].value = - Amount::from_sat(last_future_template.coinbase_tx_value_remaining); - - - // create a future extended job based on the last future template - if let Err(e) = - extended_channel.on_new_template(last_future_template.clone(), coinbase_outputs) - { - error!(?e, "Failed to apply template to extended channel"); - return Err(JDCError::ChannelSv2(ChannelSv2Error::ExtendedChannelServerSide(e))); - } - - let future_extended_job_id = extended_channel - .get_future_template_to_job_id() - .get(&last_future_template.template_id) - .cloned() - .expect("future job id must exist"); - let future_extended_job = extended_channel - .get_future_jobs() - .get(&future_extended_job_id) - .expect("future job must exist"); - - let future_extended_job_message = - future_extended_job.get_job_message().clone().into_static(); - - // send this future job as new job message - // to be immediately activated with the subsequent SetNewPrevHash message - messages.push(( - downstream_id, - Mining::NewExtendedMiningJob( - future_extended_job_message, - ), - ).into()); - - // SetNewPrevHash message activates the future job - let prev_hash = last_new_prev_hash.prev_hash.clone(); - let header_timestamp = last_new_prev_hash.header_timestamp; - let n_bits = last_new_prev_hash.n_bits; - let set_new_prev_hash_mining = SetNewPrevHash { - channel_id: extended_channel_id, - job_id: future_extended_job_id, - prev_hash, - min_ntime: header_timestamp, - nbits: n_bits, - }; - if let Err(e) = extended_channel.on_set_new_prev_hash(last_new_prev_hash) { - error!(?e, "Failed to set prevhash on extended channel"); - return Err(JDCError::ChannelSv2(ChannelSv2Error::ExtendedChannelServerSide(e))); - } - messages.push(( - downstream_id, - Mining::SetNewPrevHash(set_new_prev_hash_mining), - ).into()); - - let vardiff = VardiffState::new().expect("Vardiff should instantiate."); - data.extended_channels.insert(extended_channel_id, extended_channel); - - channel_manager_data.downstream_channel_id_and_job_id_to_template_id.insert((extended_channel_id, future_extended_job_id), last_future_template.template_id); - channel_manager_data - .channel_id_to_downstream_id - .insert(extended_channel_id, downstream_id); - channel_manager_data.vardiff.insert((extended_channel_id, downstream_id), vardiff); - - Ok(messages) - }) - })?; - - for messages in messages { - messages.forward(&self.channel_manager_channel).await; - } - - Ok(()) - } - - // Handles an `UpdateChannel` message from a downstream. - // - // Workflow: - // 1. Update the target for the corresponding downstream channel (standard or extended). - // - On success, reply with a `SetTarget`. - // - On failure, return an `UpdateChannelError`. - // 2. Recompute aggregate downstream state: - // - Sum all downstream nominal hashrates. - // - Determine the minimum target across all downstream channels. - // 3. Propagate the update upstream by sending an `UpdateChannel` with the aggregated hashrate - // and minimum target. - // - // Returns an error if the downstream channel is missing or update - // validation fails. - async fn handle_update_channel( - &mut self, - _client_id: Option, - msg: UpdateChannel<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - let channel_id = msg.channel_id; - let new_nominal_hash_rate = msg.nominal_hash_rate; - let requested_maximum_target = - Target::from_le_bytes(msg.maximum_target.inner_as_ref().try_into().unwrap()); - - let messages = self - .channel_manager_data - .super_safe_lock(|channel_manager_data| { - let mut messages: Vec = vec![]; - - let downstream_id = match channel_manager_data - .channel_id_to_downstream_id - .get(&channel_id) - { - Some(id) => *id, - None => { - error!( - channel_id, - "UpdateChannelError: invalid-channel-id (no downstream_id mapping)" - ); - return Err(JDCError::DownstreamNotFoundWithChannelId(channel_id)); - } - }; - - if let Some(downstream) = channel_manager_data.downstream.get_mut(&downstream_id) { - messages.extend_from_slice(&downstream.downstream_data.super_safe_lock( - |data| { - let mut messages: Vec = vec![]; - - let build_error = |code: &str| { - error!(channel_id, error_code = code, "UpdateChannelError"); - Mining::UpdateChannelError(UpdateChannelError { - channel_id, - error_code: code - .to_string() - .try_into() - .expect("valid error code"), - }) - }; - - if let Some(standard_channel) = - data.standard_channels.get_mut(&channel_id) - { - let update_channel = standard_channel.update_channel( - new_nominal_hash_rate, - Some(requested_maximum_target), - ); - let new_target = standard_channel.get_target(); - - if let Err(e) = update_channel { - error!(channel_id, ?e, "StandardChannel update failed"); - - let err_code = match e { - StandardChannelError::InvalidNominalHashrate => { - "invalid-nominal-hashrate" - } - StandardChannelError::RequestedMaxTargetOutOfRange => { - "requested-max-target-out-of-range" - } - _ => "internal-error", - }; - if err_code == "internal-error" { - warn!("Failed to update extended channel {channel_id}"); - } else { - return vec![(downstream_id, build_error(err_code)).into()]; - } - } - - messages.push( - ( - downstream_id, - Mining::SetTarget(SetTarget { - channel_id, - maximum_target: new_target.to_le_bytes().into(), - }), - ) - .into(), - ); - } else if let Some(extended_channel) = - data.extended_channels.get_mut(&channel_id) - { - let update_channel = extended_channel.update_channel( - new_nominal_hash_rate, - Some(requested_maximum_target), - ); - let new_target = extended_channel.get_target(); - - if let Err(e) = update_channel { - error!(channel_id, ?e, "StandardChannel update failed"); - let err_code = match e { - ExtendedChannelError::InvalidNominalHashrate => { - "invalid-nominal-hashrate" - } - ExtendedChannelError::RequestedMaxTargetOutOfRange => { - "requested-max-target-out-of-range" - } - _ => "internal-error", - }; - if err_code == "internal-error" { - warn!("Failed to update extended channel {channel_id}"); - } else { - return vec![(downstream_id, build_error(err_code)).into()]; - } - } - - messages.push( - ( - downstream_id, - Mining::SetTarget(SetTarget { - channel_id, - maximum_target: new_target.to_le_bytes().into(), - }), - ) - .into(), - ); - } else { - error!("UpdateChannelError: invalid-channel-id"); - return vec![ - (downstream_id, build_error("invalid-channel-id")).into() - ]; - } - - messages - }, - )); - } - - let mut downstream_hashrate = 0.0; - let mut min_target = Target::from_le_bytes([0xff; 32]); - - for (_, downstream) in channel_manager_data.downstream.iter() { - downstream.downstream_data.super_safe_lock(|data| { - let mut update_from_channel = |hashrate: f32, target: &Target| { - downstream_hashrate += hashrate; - min_target = std::cmp::min(*target, min_target); - }; - - for (_, channel) in data.standard_channels.iter() { - update_from_channel( - channel.get_nominal_hashrate(), - channel.get_target(), - ); - } - - for (_, channel) in data.extended_channels.iter() { - update_from_channel( - channel.get_nominal_hashrate(), - channel.get_target(), - ); - } - }); - } - - if let Some(ref upstream_channel) = channel_manager_data.upstream_channel { - debug!( - "Checking upstream channel {} with hashrate {} and target {:?}", - upstream_channel.get_channel_id(), - upstream_channel.get_nominal_hashrate(), - upstream_channel.get_target() - ); - - info!("Sending update channel message upstream"); - messages.push( - Mining::UpdateChannel(UpdateChannel { - channel_id: upstream_channel.get_channel_id(), - nominal_hash_rate: downstream_hashrate, - maximum_target: min_target.to_le_bytes().into(), - }) - .into(), - ) - } - - Ok(messages) - })?; - - for messages in messages { - messages.forward(&self.channel_manager_channel).await; - } - - Ok(()) - } - - // Handles a `SubmitSharesStandard` message from a downstream. - // - // Steps: - // 1. Validate the share against the downstream channel. - // - On error, respond with `SubmitSharesError`. - // - On success, acknowledge with `SubmitSharesSuccess` (and optionally a block found). - // - // 2. If the share is valid, attempt to forward it upstream: - // - Translate the share into an upstream `SubmitSharesExtended`. - // - Validate with the upstream channel. - // - Forward valid shares (or block solutions) upstream. - async fn handle_submit_shares_standard( - &mut self, - _client_id: Option, - msg: SubmitSharesStandard, - ) -> Result<(), Self::Error> { - info!("Received SubmitSharesStandard"); - let channel_id = msg.channel_id; - let job_id = msg.job_id; - - let build_error = |code: &str| { - Mining::SubmitSharesError(SubmitSharesError { - channel_id, - sequence_number: msg.sequence_number, - error_code: code.to_string().try_into().expect("valid error code"), - }) - }; - - let messages = self.channel_manager_data.super_safe_lock(|channel_manager_data| { - let Some(downstream_id) = channel_manager_data.channel_id_to_downstream_id.get(&channel_id) else { - warn!("No downstream_id found for channel_id={channel_id}"); - return Err(JDCError::DownstreamNotFoundWithChannelId(channel_id)) - }; - let Some(downstream) = channel_manager_data.downstream.get_mut(downstream_id) else { - warn!("No downstream found for downstream_id={downstream_id}"); - return Err(JDCError::DownstreamNotFound(*downstream_id)); - }; - let Some(prev_hash) = channel_manager_data.last_new_prev_hash.as_ref() else { - warn!("No prev_hash available yet, ignoring share"); - return Err(JDCError::LastNewPrevhashNotFound); - }; - - downstream.downstream_data.super_safe_lock(|data| { - let mut messages: Vec = vec![]; - - let Some(standard_channel) = data.standard_channels.get_mut(&channel_id) else { - error!("SubmitSharesError: channel_id: {channel_id}, sequence_number: {}, error_code: invalid-channel-id", msg.sequence_number); - return Ok(vec![(*downstream_id, build_error("invalid-channel-id")).into()]); - }; - - let Some(vardiff) = channel_manager_data.vardiff.get_mut(&(channel_id, *downstream_id)) else { - return Err(JDCError::VardiffNotFound(channel_id)); - }; - vardiff.increment_shares_since_last_update(); - let res = standard_channel.validate_share(msg.clone()); - let mut is_downstream_share_valid = false; - match res { - Ok(ShareValidationResult::Valid(share_hash)) => { - let share_accounting = standard_channel.get_share_accounting(); - if share_accounting.should_acknowledge() { - let success = SubmitSharesSuccess { - channel_id, - last_sequence_number: share_accounting.get_last_share_sequence_number(), - new_submits_accepted_count: share_accounting.get_last_batch_accepted(), - new_shares_sum: share_accounting.get_last_batch_work_sum() as u64, - }; - info!("SubmitSharesStandard on downstream channel: {} βœ…", success); - messages.push((downstream.downstream_id, Mining::SubmitSharesSuccess(success)).into()); - } else { - info!( - "SubmitSharesStandard on downstream channel: valid share | channel_id: {}, sequence_number: {}, share_hash: {} β˜‘οΈ", - channel_id, msg.sequence_number, share_hash - ); - } - is_downstream_share_valid = true; - } - Ok(ShareValidationResult::BlockFound(share_hash, template_id, coinbase)) => { - info!("SubmitSharesStandard on downstream channel: πŸ’° Block Found!!! πŸ’°{share_hash}"); - is_downstream_share_valid = true; - if let Some(template_id) = template_id { - info!("SubmitSharesStandard: Propagating solution to the Template Provider."); - let solution = SubmitSolution { - template_id, - version: msg.version, - header_timestamp: msg.ntime, - header_nonce: msg.nonce, - coinbase_tx: coinbase.try_into()?, - }; - - messages.push(TemplateDistribution::SubmitSolution(solution.clone()).into()); - } - let share_accounting = standard_channel.get_share_accounting().clone(); - let success = SubmitSharesSuccess { - channel_id, - last_sequence_number: share_accounting.get_last_share_sequence_number(), - new_submits_accepted_count: share_accounting.get_last_batch_accepted(), - new_shares_sum: share_accounting.get_last_batch_work_sum() as u64, - }; - messages.push(( - downstream.downstream_id, - Mining::SubmitSharesSuccess(success), - ).into()); - } - Err(err) => { - let code = match err { - ShareValidationError::Invalid => "invalid-share", - ShareValidationError::Stale => "stale-share", - ShareValidationError::InvalidJobId => "invalid-job-id", - ShareValidationError::DoesNotMeetTarget => "difficulty-too-low", - ShareValidationError::DuplicateShare => "duplicate-share", - _ => unreachable!(), - }; - error!("❌ SubmitSharesError: ch={}, seq={}, error={code}", channel_id, msg.sequence_number); - messages.push((*downstream_id, build_error(code)).into()); - } - } - - if !is_downstream_share_valid { - return Ok(messages); - } - - if let Some(upstream_channel) = channel_manager_data.upstream_channel.as_mut() { - let prefix = standard_channel.get_extranonce_prefix().clone(); - let mut extranonce_parts = Vec::new(); - let up_prefix = upstream_channel.get_extranonce_prefix(); - extranonce_parts.extend_from_slice(&prefix[up_prefix.len()..]); - - let upstream_message = channel_manager_data - .downstream_channel_id_and_job_id_to_template_id - .get(&(channel_id, job_id)) - .and_then(|tid| channel_manager_data.template_id_to_upstream_job_id.get(tid)) - .map(|&upstream_job_id| { - SubmitSharesExtended { - channel_id: upstream_channel.get_channel_id(), - job_id: upstream_job_id as u32, - extranonce: extranonce_parts.try_into().unwrap(), - nonce: msg.nonce, - ntime: msg.ntime, - // We assign sequence number later, when we validate the share - // and send it to upstream. - sequence_number: 0, - version: msg.version, - } - }); - - if let Some(mut upstream_message) = upstream_message { - let res = upstream_channel.validate_share(upstream_message.clone()); - match res { - Ok(client::share_accounting::ShareValidationResult::Valid(share_hash)) => { - upstream_message.sequence_number = channel_manager_data.sequence_number_factory.fetch_add(1, Ordering::Relaxed); - info!( - "SubmitSharesStandard, forwarding it to upstream: valid share | channel_id: {}, sequence_number: {}, share_hash: {} βœ…", - channel_id, upstream_message.sequence_number, share_hash - ); - messages.push(Mining::SubmitSharesExtended(upstream_message).into()); - } - Ok(client::share_accounting::ShareValidationResult::BlockFound(share_hash)) => { - upstream_message.sequence_number = channel_manager_data.sequence_number_factory.fetch_add(1, Ordering::Relaxed); - info!("SubmitSharesStandard forwarding it to upstream: πŸ’° Block Found!!! πŸ’°{share_hash}"); - let push_solution = PushSolution { - extranonce: standard_channel.get_extranonce_prefix().to_vec().try_into()?, - ntime: upstream_message.ntime, - nonce: upstream_message.nonce, - version: upstream_message.version, - nbits: prev_hash.n_bits, - prev_hash: prev_hash.prev_hash.clone(), - }; - messages.push(JobDeclaration::PushSolution(push_solution).into()); - messages.push(Mining::SubmitSharesExtended(upstream_message).into()); - } - Err(err) => { - let code = match err { - client::share_accounting::ShareValidationError::Invalid => "invalid-share", - client::share_accounting::ShareValidationError::Stale => "stale-share", - client::share_accounting::ShareValidationError::InvalidJobId => "invalid-job-id", - client::share_accounting::ShareValidationError::DoesNotMeetTarget => "difficulty-too-low", - client::share_accounting::ShareValidationError::DuplicateShare => "duplicate-share", - _ => unreachable!(), - }; - debug!("❌ SubmitSharesError not forwarding it to upstream: ch={}, seq={}, error={code}", channel_id, upstream_message.sequence_number); - } - } - } - } - - Ok(messages) - }) - })?; - - for messages in messages { - messages.forward(&self.channel_manager_channel).await; - } - - Ok(()) - } - - // Handles a `SubmitSharesExtended` message from a downstream. - // - // Steps: - // 1. Validate the share against the downstream channel. - // - On error, respond with `SubmitSharesError`. - // - On success, acknowledge with `SubmitSharesSuccess` (and optionally a block found). - // - // 2. If the share is valid, attempt to forward it upstream: - // - Translate the share into an upstream `SubmitSharesExtended`. - // - Validate with the upstream channel. - // - Forward valid shares (or block solutions) upstream. - async fn handle_submit_shares_extended( - &mut self, - _client_id: Option, - msg: SubmitSharesExtended<'_>, - ) -> Result<(), Self::Error> { - info!("Received SubmitSharesExtended"); - let channel_id = msg.channel_id; - let job_id = msg.job_id; - - let build_error = |code: &str| { - Mining::SubmitSharesError(SubmitSharesError { - channel_id, - sequence_number: msg.sequence_number, - error_code: code.to_string().try_into().expect("valid error code"), - }) - }; - - let messages = self.channel_manager_data.super_safe_lock(|channel_manager_data| { - let Some(downstream_id) = channel_manager_data.channel_id_to_downstream_id.get(&channel_id) else { - warn!("No downstream_id found for channel_id={channel_id}"); - return Err(JDCError::DownstreamNotFoundWithChannelId(channel_id)); - }; - let Some(downstream) = channel_manager_data.downstream.get_mut(downstream_id) else { - warn!("No downstream found for downstream_id={downstream_id}"); - return Err(JDCError::DownstreamNotFound(*downstream_id)); - }; - let Some(prev_hash) = channel_manager_data.last_new_prev_hash.as_ref() else { - warn!("No prev_hash available yet, ignoring share"); - return Err(JDCError::LastNewPrevhashNotFound); - }; - downstream.downstream_data.super_safe_lock(|data| { - let mut messages: Vec = vec![]; - - let Some(extended_channel) = data.extended_channels.get_mut(&channel_id) else { - error!("SubmitSharesError: channel_id: {channel_id}, sequence_number: {}, error_code: invalid-channel-id", msg.sequence_number); - return Ok(vec![(*downstream_id, build_error("invalid-channel-id")).into()]); - }; - - let Some(vardiff) = channel_manager_data.vardiff.get_mut(&(channel_id, *downstream_id)) else { - return Err(JDCError::VardiffNotFound(channel_id)); - }; - vardiff.increment_shares_since_last_update(); - let res = extended_channel.validate_share(msg.clone()); - let mut is_downstream_share_valid = false; - match res { - Ok(ShareValidationResult::Valid(share_hash)) => { - let share_accounting = extended_channel.get_share_accounting(); - if share_accounting.should_acknowledge() { - let success = SubmitSharesSuccess { - channel_id, - last_sequence_number: share_accounting.get_last_share_sequence_number(), - new_submits_accepted_count: share_accounting.get_last_batch_accepted(), - new_shares_sum: share_accounting.get_last_batch_work_sum() as u64, - }; - info!("SubmitSharesExtended on downstream channel: {} βœ…", success); - messages.push((downstream.downstream_id, Mining::SubmitSharesSuccess(success)).into()); - } else { - info!( - "SubmitSharesExtended on downstream channel: valid share | channel_id: {}, sequence_number: {}, share_hash: {} β˜‘οΈ", - channel_id, msg.sequence_number, share_hash - ); - } - is_downstream_share_valid = true; - } - Ok(ShareValidationResult::BlockFound(share_hash, template_id, coinbase)) => { - info!("SubmitSharesExtended on downstream channel: πŸ’° Block Found!!! πŸ’°{share_hash}"); - if let Some(template_id) = template_id { - info!("SubmitSharesExtended: Propagating solution to the Template Provider."); - let solution = SubmitSolution { - template_id, - version: msg.version, - header_timestamp: msg.ntime, - header_nonce: msg.nonce, - coinbase_tx: coinbase.try_into()?, - }; - messages.push(TemplateDistribution::SubmitSolution(solution.clone()).into()); - } - let share_accounting = extended_channel.get_share_accounting().clone(); - let success = SubmitSharesSuccess { - channel_id, - last_sequence_number: share_accounting.get_last_share_sequence_number(), - new_submits_accepted_count: share_accounting.get_last_batch_accepted(), - new_shares_sum: share_accounting.get_last_batch_work_sum() as u64, - }; - is_downstream_share_valid = true; - messages.push(( - downstream.downstream_id, - Mining::SubmitSharesSuccess(success), - ).into()); - } - Err(err) => { - let code = match err { - ShareValidationError::Invalid => "invalid-share", - ShareValidationError::Stale => "stale-share", - ShareValidationError::InvalidJobId => "invalid-job-id", - ShareValidationError::DoesNotMeetTarget => "difficulty-too-low", - ShareValidationError::DuplicateShare => "duplicate-share", - _ => unreachable!(), - }; - error!("❌ SubmitSharesError on downstream channel: ch={}, seq={}, error={code}", channel_id, msg.sequence_number); - messages.push((*downstream_id, build_error(code)).into()); - } - } - - if !is_downstream_share_valid{ - return Ok(messages); - } - - if let Some(upstream_channel) = channel_manager_data.upstream_channel.as_mut() { - let prefix = extended_channel.get_extranonce_prefix().clone(); - let mut extranonce_parts = Vec::new(); - let up_prefix = upstream_channel.get_extranonce_prefix(); - extranonce_parts.extend_from_slice(&prefix[up_prefix.len()..]); - - let upstream_message = channel_manager_data - .downstream_channel_id_and_job_id_to_template_id - .get(&(channel_id, job_id)) - .and_then(|tid| channel_manager_data.template_id_to_upstream_job_id.get(tid)) - .map(|&upstream_job_id| { - let mut new_msg = msg.clone(); - new_msg.channel_id = upstream_channel.get_channel_id(); - new_msg.job_id = upstream_job_id as u32; - // We assign sequence number later, when we validate the share - // and send it to upstream. - new_msg.sequence_number = 0; - - extranonce_parts.extend_from_slice(&msg.extranonce.to_vec()); - new_msg.extranonce = extranonce_parts.try_into().unwrap(); - - new_msg - }); - if let Some(mut upstream_message) = upstream_message{ - let res = upstream_channel.validate_share(upstream_message.clone()); - match res { - Ok(client::share_accounting::ShareValidationResult::Valid(share_hash)) => { - upstream_message.sequence_number = channel_manager_data.sequence_number_factory.fetch_add(1, Ordering::Relaxed); - info!( - "SubmitSharesExtended forwarding it to upstream: valid share | channel_id: {}, sequence_number: {}, share_hash: {} βœ…", - channel_id, upstream_message.sequence_number, share_hash - ); - messages.push( - Mining::SubmitSharesExtended(upstream_message.into_static()).into(), - ); - } - Ok(client::share_accounting::ShareValidationResult::BlockFound(share_hash)) => { - upstream_message.sequence_number = channel_manager_data.sequence_number_factory.fetch_add(1, Ordering::Relaxed); - info!("SubmitSharesExtended forwarding it to upstream: πŸ’° Block Found!!! πŸ’°{share_hash}"); - let mut channel_extranonce = upstream_channel.get_extranonce_prefix().to_vec(); - channel_extranonce.extend_from_slice(&upstream_message.extranonce.to_vec()); - let push_solution = PushSolution { - extranonce: channel_extranonce.try_into()?, - ntime: upstream_message.ntime, - nonce: upstream_message.nonce, - version: upstream_message.version, - nbits: prev_hash.n_bits, - prev_hash: prev_hash.prev_hash.clone(), - }; - messages.push(JobDeclaration::PushSolution(push_solution.clone()).into()); - messages.push(Mining::SubmitSharesExtended(upstream_message.into_static()).into()); - } - Err(err) => { - let code = match err { - client::share_accounting::ShareValidationError::Invalid=>"invalid-share", - client::share_accounting::ShareValidationError::Stale=>"stale-share", - client::share_accounting::ShareValidationError::InvalidJobId=>"invalid-job-id", - client::share_accounting::ShareValidationError::DoesNotMeetTarget=>"difficulty-too-low", - client::share_accounting::ShareValidationError::DuplicateShare=>"duplicate-share", - _ => unreachable!(), - }; - debug!("❌ SubmitSharesError not forwarding it to upstream: ch={}, seq={}, error={code}", channel_id, upstream_message.sequence_number); - } - } - } - } - - Ok(messages) - }) - })?; - - for messages in messages { - messages.forward(&self.channel_manager_channel).await; - } - - Ok(()) - } - - // Handles an incoming `SetCustomMiningJob` message from a downstream. - async fn handle_set_custom_mining_job( - &mut self, - _client_id: Option, - msg: SetCustomMiningJob<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", msg); - Err(Self::Error::UnexpectedMessage( - MESSAGE_TYPE_SET_CUSTOM_MINING_JOB, - )) - } -} diff --git a/roles/jd-client/src/lib/channel_manager/jd_message_handler.rs b/roles/jd-client/src/lib/channel_manager/jd_message_handler.rs deleted file mode 100644 index 36460f10f8..0000000000 --- a/roles/jd-client/src/lib/channel_manager/jd_message_handler.rs +++ /dev/null @@ -1,294 +0,0 @@ -use stratum_apps::stratum_core::{ - binary_sv2::{self, Sv2DataType, B016M}, - bitcoin::{ - self, absolute::LockTime, transaction::Version, OutPoint, ScriptBuf, Sequence, Transaction, - TxIn, TxOut, Witness, - }, - channels_sv2::outputs::deserialize_outputs, - handlers_sv2::HandleJobDeclarationMessagesFromServerAsync, - job_declaration_sv2::{ - AllocateMiningJobTokenSuccess, DeclareMiningJobError, DeclareMiningJobSuccess, - ProvideMissingTransactions, ProvideMissingTransactionsSuccess, - }, - parsers_sv2::{AnyMessage, JobDeclaration, Mining, TemplateDistribution}, - template_distribution_sv2::CoinbaseOutputConstraints, -}; -use tracing::{debug, error, info, warn}; - -use crate::{ - channel_manager::ChannelManager, - error::JDCError, - status::{State, Status}, - utils::StdFrame, -}; - -impl HandleJobDeclarationMessagesFromServerAsync for ChannelManager { - type Error = JDCError; - - // Handles a successful `AllocateMiningJobToken` response from the JDS. - // - // When the JDS confirms job token allocation: - // - Updates the channel manager state with the newly issued token. - // - Checks whether the JDS has provided updated coinbase outputs. - // - If outputs have changed, recalculates the corresponding size and sigops constraints. - // - Sends an updated `CoinbaseOutputConstraints` message to the Template Provider to ensure - // the new coinbase rules are enforced. - // - If outputs are unchanged, skips recomputation and continues as normal. - async fn handle_allocate_mining_job_token_success( - &mut self, - _server_id: Option, - msg: AllocateMiningJobTokenSuccess<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - - let coinbase_changed = self.channel_manager_data.super_safe_lock(|data| { - let changed = data.coinbase_outputs != msg.coinbase_outputs.to_vec(); - data.coinbase_outputs = msg.coinbase_outputs.to_vec(); - data.allocate_tokens = Some(msg.clone().into_static()); - changed - }); - - if coinbase_changed { - info!("Coinbase outputs from JDS changed, recalculating constraints"); - let deserialized_jds_coinbase_outputs: Vec = - bitcoin::consensus::deserialize(&msg.coinbase_outputs.to_vec()) - .map_err(JDCError::BitcoinEncodeError)?; - - let max_additional_size: usize = deserialized_jds_coinbase_outputs - .iter() - .map(|o| o.size()) - .sum(); - - // create a dummy coinbase transaction with the empty output - // this is used to calculate the sigops of the coinbase output - let dummy_coinbase = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { - previous_output: OutPoint::null(), - script_sig: ScriptBuf::new(), - sequence: Sequence::MAX, - witness: Witness::from(vec![vec![0; 32]]), - }], - output: deserialized_jds_coinbase_outputs, - }; - - let max_additional_sigops = dummy_coinbase.total_sigop_cost(|_| None) as u16; - - debug!( - max_additional_size, - max_additional_sigops, "Computed coinbase output constraints" - ); - - let coinbase_output_contraints_message = AnyMessage::TemplateDistribution( - TemplateDistribution::CoinbaseOutputConstraints(CoinbaseOutputConstraints { - coinbase_output_max_additional_size: max_additional_size as u32, - coinbase_output_max_additional_sigops: max_additional_sigops, - }), - ); - - let frame: StdFrame = coinbase_output_contraints_message.try_into()?; - - self.channel_manager_channel - .tp_sender - .send(frame) - .await - .map_err(|_e| JDCError::ChannelErrorSender)?; - - info!("Sent updated CoinbaseOutputConstraints to TP channel"); - } else { - debug!("Coinbase outputs unchanged, skipping constraints update"); - } - - Ok(()) - } - - // Handles a `DeclareMiningJobError` response from the JDS. - // - // Receiving this error is treated as a malicious or invalid upstream behavior, - // since it indicates the JDS has rejected a declared mining job request. - // - // Upon receiving it: - // - Triggers the fallback mechanism by signaling a shutdown through the status channel, causing - // the Job Declarator Client to enter `JobDeclaratorShutdownFallback`. - // - // This ensures that the system does not continue relying on a potentially - // untrustworthy or misbehaving JDS, and instead fails over to a safer state. - async fn handle_declare_mining_job_error( - &mut self, - _server_id: Option, - msg: DeclareMiningJobError<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", msg); - warn!("⚠️ JDS refused the declared job with a DeclareMiningJobError ❌. Starting fallback mechanism."); - self.channel_manager_channel - .status_sender - .send(Status { - state: State::JobDeclaratorShutdownFallback(JDCError::Shutdown), - }) - .await - .map_err(|_e| JDCError::ChannelErrorSender)?; - - Ok(()) - } - - // Handles a `DeclareMiningJobSuccess` message from the JDS. - // - // Receiving this message means the JDS has accepted the declared mining job, - // giving us the green light to propagate it upstream. - // - // The steps are: - // 1. Look up the last declared job using the `request_id`. - // 2. Validate that a `prevhash` exists and retrieve job details. - // 3. Use the job factory to create a new `SetCustomMiningJob` request, embedding the token - // provided by the JDS. - // 4. Update the channel manager state with the newly created custom job. - // 5. Send the `SetCustomMiningJob` message to the upstream, ensuring the job is now distributed - // across the mining network. - // - // If any required data (like `prevhash` or the last declared job) is missing, - // this handler returns an error to prevent propagation of an incomplete job. - async fn handle_declare_mining_job_success( - &mut self, - _server_id: Option, - msg: DeclareMiningJobSuccess<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - - let Some(last_declare_job) = self - .channel_manager_data - .super_safe_lock(|data| data.last_declare_job_store.get(&msg.request_id).cloned()) - else { - error!( - "No last_declare_job found for request_id={}", - msg.request_id - ); - return Err(JDCError::LastDeclareJobNotFound(msg.request_id)); - }; - - let Some(prevhash) = last_declare_job.prev_hash else { - error!("Prevhash not found for request_id = {}", msg.request_id); - return Err(JDCError::LastNewPrevhashNotFound); - }; - - let outputs = match deserialize_outputs(last_declare_job.coinbase_output.clone()) { - Ok(outputs) => outputs, - Err(_) => return Err(JDCError::ChannelManagerHasBadCoinbaseOutputs), - }; - - let Some(custom_job) = self - .channel_manager_data - .super_safe_lock(|channel_manager_data| { - let job_factory = channel_manager_data.job_factory.as_mut()?; - let upstream_channel = channel_manager_data.upstream_channel.as_ref()?; - let full_extranonce_size = upstream_channel.get_full_extranonce_size(); - let custom_job = job_factory.new_custom_job( - upstream_channel.get_channel_id(), - msg.request_id, - msg.new_mining_job_token, - prevhash.into(), - last_declare_job.template, - outputs, - full_extranonce_size, - ); - Some(custom_job) - }) - else { - return Err(JDCError::FailedToCreateCustomJob); - }; - - let custom_job = custom_job.map_err(|_e| JDCError::FailedToCreateCustomJob)?; - - self.channel_manager_data.super_safe_lock(|data| { - if let Some(value) = data.last_declare_job_store.get_mut(&msg.request_id) { - value.set_custom_mining_job = Some(custom_job.clone().into_static()); - } - }); - - let channel_id = custom_job.channel_id; - - debug!("Sending SetCustomMiningJob to the upstream with channel_id: {channel_id}"); - let message = AnyMessage::Mining(Mining::SetCustomMiningJob(custom_job)).into_static(); - let frame: StdFrame = message.try_into()?; - - self.channel_manager_channel - .upstream_sender - .send(frame) - .await - .map_err(|_e| JDCError::ChannelErrorSender)?; - - info!("Successfully sent SetCustomMiningJob to the upstream with channel_id: {channel_id}"); - Ok(()) - } - - // Handles a `ProvideMissingTransactions` request from the JDS. - // - // The JDS provides a list of transaction positions it could not resolve. - // We then: - // - Retrieve the full transaction list for the given `request_id`. - // - Identify which transactions are missing based on the provided positions. - // - Collect and package those transactions into a `ProvideMissingTransactionsSuccess`. - // - Send the response back to the JDS. - async fn handle_provide_missing_transactions( - &mut self, - _server_id: Option, - msg: ProvideMissingTransactions<'_>, - ) -> Result<(), Self::Error> { - let request_id = msg.request_id; - - info!("Received: {}", msg); - - let tx_store_entry = self - .channel_manager_data - .super_safe_lock(|data| data.last_declare_job_store.get(&request_id).cloned()); - - let Some(entry) = tx_store_entry else { - warn!( - "No transaction list found for request_id={}", - msg.request_id - ); - return Err(JDCError::LastDeclareJobNotFound(msg.request_id)); - }; - - let full_tx_list: Vec = entry - .tx_list - .iter() - .map(|raw| B016M::from_vec_unchecked(raw.clone())) - .collect(); - - let unknown_positions: Vec = msg.unknown_tx_position_list.into_inner(); - debug!( - total_known = full_tx_list.len(), - unknown_positions = unknown_positions.len(), - "Resolving missing transactions" - ); - - let missing_txns: Vec = unknown_positions - .iter() - .filter_map(|&pos| full_tx_list.get(pos as usize).cloned()) - .collect(); - - if missing_txns.is_empty() { - warn!("No matching transactions found for request_id={request_id}"); - } - - let response = ProvideMissingTransactionsSuccess { - request_id: msg.request_id, - transaction_list: binary_sv2::Seq064K::new(missing_txns) - .map_err(JDCError::BinarySv2)?, - }; - let frame: StdFrame = - AnyMessage::JobDeclaration(JobDeclaration::ProvideMissingTransactionsSuccess(response)) - .try_into()?; - - self.channel_manager_channel - .jd_sender - .send(frame) - .await - .map_err(|_e| JDCError::ChannelErrorSender)?; - - info!("Successfully sent ProvideMissingTransactionsSuccess to the JDS with request_id: {request_id}"); - - Ok(()) - } -} diff --git a/roles/jd-client/src/lib/channel_manager/mod.rs b/roles/jd-client/src/lib/channel_manager/mod.rs deleted file mode 100644 index d35c01f38c..0000000000 --- a/roles/jd-client/src/lib/channel_manager/mod.rs +++ /dev/null @@ -1,1119 +0,0 @@ -use std::{ - collections::{HashMap, VecDeque}, - net::SocketAddr, - sync::{ - atomic::{AtomicU32, Ordering}, - Arc, - }, -}; - -use async_channel::{Receiver, Sender}; -use stratum_apps::{ - custom_mutex::Mutex, - key_utils::{Secp256k1PublicKey, Secp256k1SecretKey}, - network_helpers::noise_stream::NoiseTcpStream, - stratum_core::{ - bitcoin::Target, - channels_sv2::{ - client::extended::ExtendedChannel, - server::{ - jobs::{ - extended::ExtendedJob, factory::JobFactory, job_store::DefaultJobStore, - standard::StandardJob, - }, - standard::StandardChannel, - }, - Vardiff, VardiffState, - }, - framing_sv2::framing::Sv2Frame, - handlers_sv2::{ - HandleJobDeclarationMessagesFromServerAsync, HandleMiningMessagesFromClientAsync, - HandleMiningMessagesFromServerAsync, HandleTemplateDistributionMessagesFromServerAsync, - }, - job_declaration_sv2::{ - AllocateMiningJobToken, AllocateMiningJobTokenSuccess, DeclareMiningJob, - }, - mining_sv2::{ - ExtendedExtranonce, OpenExtendedMiningChannel, SetCustomMiningJob, SetTarget, - UpdateChannel, MAX_EXTRANONCE_LEN, MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL, - MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL, - }, - noise_sv2::Responder, - parsers_sv2::{AnyMessage, JobDeclaration, Mining}, - template_distribution_sv2::{NewTemplate, SetNewPrevHash as SetNewPrevHashTdp}, - }, -}; -use tokio::{net::TcpListener, select, sync::broadcast}; -use tracing::{debug, error, info, warn}; - -use crate::{ - channel_manager::downstream_message_handler::RouteMessageTo, - config::JobDeclaratorClientConfig, - downstream::Downstream, - error::JDCError, - status::{handle_error, Status, StatusSender}, - task_manager::TaskManager, - utils::{ - AtomicUpstreamState, Message, PendingChannelRequest, SV2Frame, ShutdownMessage, StdFrame, - UpstreamState, - }, -}; -mod downstream_message_handler; -mod jd_message_handler; -mod template_message_handler; -mod upstream_message_handler; - -pub const JDC_SEARCH_SPACE_BYTES: usize = 4; - -/// A `DeclaredJob` encapsulates all the relevant data associated with a single -/// job declaration, including its template, optional messages, coinbase output, -/// and transaction list. -#[derive(Clone, Debug)] -pub struct DeclaredJob { - // The original `DeclareMiningJob` message associated with this job, - // if one was sent. - declare_mining_job: Option>, - // The template associated with the declared job. - template: NewTemplate<'static>, - // The `SetNewPrevHashTdp` message associated with this job, if available. - prev_hash: Option>, - // The `SetCustomMiningJob` message associated with this job, - // if a custom job was created. - set_custom_mining_job: Option>, - // The coinbase output for this job. - coinbase_output: Vec, - // The list of transactions included in the job’s template. - tx_list: Vec>, -} - -/// Central state container for the **Channel Manager**. -/// -/// `ChannelManagerData` holds all runtime state that the JDC -/// needs to manage downstream clients, upstream connections, extranonce allocation, -/// job tracking, and various ID factories. -pub struct ChannelManagerData { - // Mapping of `downstream_id` β†’ `Downstream` object, - // used by the channel manager to locate and interact with downstream clients. - downstream: HashMap, - // Extranonce prefix factory for **extended downstream channels**. - // Each new extended downstream receives a unique extranonce prefix. - extranonce_prefix_factory_extended: ExtendedExtranonce, - // Extranonce prefix factory for **standard downstream channels**. - // Each new standard downstream receives a unique extranonce prefix. - extranonce_prefix_factory_standard: ExtendedExtranonce, - // Factory that generates **monotonically increasing request IDs** - // for messages sent from the JDC. - request_id_factory: AtomicU32, - // Factory that assigns a unique ID to each new **downstream connection**. - downstream_id_factory: AtomicU32, - // Factory that assigns a unique **channel ID** to each channel. - // - // ⚠️ Note: In this version of the JDC, channel IDs are unique - // across *all downstreams*, not scoped per downstream. - channel_id_factory: AtomicU32, - // Factory that assigns a unique **sequence number** to each share - // submitted from the JDC to the upstream. - sequence_number_factory: AtomicU32, - // The last **future template** received from the upstream. - last_future_template: Option>, - // The last **new prevhash** received from the upstream. - last_new_prev_hash: Option>, - // The most recent set of **allocation tokens** received from the JDS. - allocate_tokens: Option>, - // Stores new templates as they arrive, mapped by their **template ID**. - template_store: HashMap>, - // Stores the last declared job, keyed by the `request_id` used when - // declaring the job to the JDS. - // This is later used to send a `SetCustomMiningJob`. - last_declare_job_store: HashMap, - // Maps a template ID β†’ corresponding upstream job ID. - template_id_to_upstream_job_id: HashMap, - // Maps a downstream channel ID + job ID β†’ corresponding template ID. - downstream_channel_id_and_job_id_to_template_id: HashMap<(u32, u32), u64>, - // The coinbase outputs currently in use. - coinbase_outputs: Vec, - // Maps channel ID β†’ downstream ID. - channel_id_to_downstream_id: HashMap, - // The active upstream extended channel (client-side instance), if any. - upstream_channel: Option>, - // Optional "pool tag" string, identifying the pool. - pool_tag_string: Option, - // List of pending downstream connection requests, - // persisted while the JDC is opening a channel with the upstream. - pending_downstream_requests: VecDeque, - // Factory for creating **custom mining jobs**, if available. - job_factory: Option, - // Mapping of `(downstream_id, channel_id)` β†’ vardiff controller. - // Each entry manages variable difficulty for a specific downstream channel. - vardiff: HashMap<(u32, u32), VardiffState>, -} - -impl ChannelManagerData { - /// Resets the internal state of the Channel Manager. - /// - /// This method is primarily used during **fallback scenarios** to clear and - /// reinitialize all internal data structures. It ensures that the Channel Manager - /// returns to a clean state, ready to handle fresh upstream or downstream connections. - pub fn reset(&mut self, coinbase_outputs: Vec) { - self.downstream.clear(); - self.template_store.clear(); - self.last_declare_job_store.clear(); - self.template_id_to_upstream_job_id.clear(); - self.downstream_channel_id_and_job_id_to_template_id.clear(); - self.channel_id_to_downstream_id.clear(); - self.pending_downstream_requests.clear(); - - self.downstream_id_factory = AtomicU32::new(0); - self.request_id_factory = AtomicU32::new(0); - self.channel_id_factory = AtomicU32::new(0); - - let (range_0, range_1, range_2) = { - let range_1 = 0..JDC_SEARCH_SPACE_BYTES; - ( - 0..range_1.start, - range_1.clone(), - range_1.end..MAX_EXTRANONCE_LEN, - ) - }; - self.extranonce_prefix_factory_extended = - ExtendedExtranonce::new(range_0.clone(), range_1.clone(), range_2.clone(), None) - .expect("valid ranges"); - self.extranonce_prefix_factory_standard = - ExtendedExtranonce::new(range_0, range_1, range_2, None).expect("valid ranges"); - - self.allocate_tokens = None; - self.upstream_channel = None; - self.pool_tag_string = None; - - self.coinbase_outputs = coinbase_outputs; - } -} - -/// Represents all communication channels managed by the Channel Manager. -/// -/// The `ChannelManagerChannel` holds all the asynchronous communication primitives -/// required for message exchange between the **Channel Manager** and other subsystems. -/// It ensures decoupled, structured communication between upstreams, downstreams, -/// the Job Dispatcher Service (JDS), and the Template Provider (TP). -/// -/// # Channels -/// 1. **Upstream**: -/// - `(upstream_sender, upstream_receiver)` Used to send and receive messages from the upstream -/// subsystem. -/// -/// 2. **JDS**: -/// - `(jd_sender, jd_receiver)` Handles communication with JDS. -/// -/// 3. **Template Provider**: -/// - `(tp_sender, tp_receiver)` Manages communication with the Template Provider. -/// -/// 4. **Downstream**: -/// - `(downstream_sender, downstream_receiver)` Broadcasts messages to all downstream clients -/// and receives messages from them. -/// -/// 5. **Status**: -/// - `status_sender` Allows the Channel Manager to notify the main status loop of critical state -/// changes. - -#[derive(Clone)] -pub struct ChannelManagerChannel { - upstream_sender: Sender, - upstream_receiver: Receiver, - jd_sender: Sender, - jd_receiver: Receiver, - tp_sender: Sender, - tp_receiver: Receiver, - downstream_sender: broadcast::Sender<(u32, Message)>, - downstream_receiver: Receiver<(u32, SV2Frame)>, - status_sender: Sender, -} - -/// Contains all the state of mutable and immutable data required -/// by channel manager to process its task along with channels -/// to perform message traversal. -#[derive(Clone)] -pub struct ChannelManager { - channel_manager_data: Arc>, - channel_manager_channel: ChannelManagerChannel, - miner_tag_string: String, - share_batch_size: usize, - shares_per_minute: f32, - user_identity: String, - /// This represent the current state of Upstream channel - /// 1. NoChannel: No active upstream connection. - /// 2. Pending: A channel request has been sent, awaiting response. - /// 3. Connected: An upstream channel is successfully established. - /// 4. SoloMining: No upstream is available; the JDC operates in solo mining mode. case. - pub upstream_state: AtomicUpstreamState, -} - -impl ChannelManager { - /// Constructor method used to instantiate the Channel Manager - #[allow(clippy::too_many_arguments)] - pub async fn new( - config: JobDeclaratorClientConfig, - upstream_sender: Sender, - upstream_receiver: Receiver, - jd_sender: Sender, - jd_receiver: Receiver, - tp_sender: Sender, - tp_receiver: Receiver, - downstream_sender: broadcast::Sender<(u32, Message)>, - downstream_receiver: Receiver<(u32, SV2Frame)>, - status_sender: Sender, - coinbase_outputs: Vec, - ) -> Result { - let (range_0, range_1, range_2) = { - let range_1 = 0..JDC_SEARCH_SPACE_BYTES; - ( - 0..range_1.start, - range_1.clone(), - range_1.end..MAX_EXTRANONCE_LEN, - ) - }; - - let make_extranonce_factory = || { - ExtendedExtranonce::new(range_0.clone(), range_1.clone(), range_2.clone(), None) - .expect("Failed to create ExtendedExtranonce with valid ranges") - }; - - let extranonce_prefix_factory_extended = make_extranonce_factory(); - let extranonce_prefix_factory_standard = make_extranonce_factory(); - - let channel_manager_data = Arc::new(Mutex::new(ChannelManagerData { - downstream: HashMap::new(), - extranonce_prefix_factory_extended, - extranonce_prefix_factory_standard, - downstream_id_factory: AtomicU32::new(0), - request_id_factory: AtomicU32::new(0), - channel_id_factory: AtomicU32::new(0), - sequence_number_factory: AtomicU32::new(0), - last_future_template: None, - last_new_prev_hash: None, - allocate_tokens: None, - template_store: HashMap::new(), - last_declare_job_store: HashMap::new(), - template_id_to_upstream_job_id: HashMap::new(), - downstream_channel_id_and_job_id_to_template_id: HashMap::new(), - coinbase_outputs, - channel_id_to_downstream_id: HashMap::new(), - upstream_channel: None, - pool_tag_string: None, - pending_downstream_requests: VecDeque::new(), - job_factory: None, - vardiff: HashMap::new(), - })); - - let channel_manager_channel = ChannelManagerChannel { - upstream_sender, - upstream_receiver, - jd_sender, - jd_receiver, - tp_sender, - tp_receiver, - downstream_sender, - downstream_receiver, - status_sender, - }; - - let channel_manager = ChannelManager { - channel_manager_data, - channel_manager_channel, - share_batch_size: config.share_batch_size() as usize, - shares_per_minute: config.shares_per_minute() as f32, - miner_tag_string: config.jdc_signature().to_string(), - user_identity: config.user_identity().to_string(), - upstream_state: AtomicUpstreamState::new(UpstreamState::SoloMining), - }; - - Ok(channel_manager) - } - - /// Starts the downstream server, and accepts new connection request. - #[allow(clippy::too_many_arguments)] - pub async fn start_downstream_server( - self, - authority_public_key: Secp256k1PublicKey, - authority_secret_key: Secp256k1SecretKey, - cert_validity_sec: u64, - listening_address: SocketAddr, - task_manager: Arc, - notify_shutdown: broadcast::Sender, - status_sender: Sender, - channel_manager_sender: Sender<(u32, SV2Frame)>, - channel_manager_receiver: broadcast::Sender<(u32, Message)>, - ) -> Result<(), JDCError> { - info!("Starting downstream server at {listening_address}"); - let server = TcpListener::bind(listening_address).await.map_err(|e| { - error!(error = ?e, "Failed to bind downstream server at {listening_address}"); - e - })?; - - let mut shutdown_rx = notify_shutdown.subscribe(); - - let task_manager_clone = task_manager.clone(); - task_manager.spawn(async move { - - loop { - select! { - message = shutdown_rx.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - info!("Channel Manager: received shutdown signal"); - break; - } - Ok(ShutdownMessage::JobDeclaratorShutdownFallback(_)) => { - info!("Downstream Server: received job declarator shutdown signal"); - break; - } - Ok(ShutdownMessage::UpstreamShutdownFallback(_)) => { - info!("Downstream Server: received upstream shutdown signal"); - break; - } - Err(e) => { - warn!(error = ?e, "shutdown channel closed unexpectedly"); - break; - } - _ => {} - } - } - res = server.accept() => { - match res { - Ok((stream, socket_address)) => { - info!(%socket_address, "New downstream connection"); - let responder = match Responder::from_authority_kp( - &authority_public_key.into_bytes(), - &authority_secret_key.into_bytes(), - std::time::Duration::from_secs(cert_validity_sec), - ) { - Ok(r) => r, - Err(e) => { - error!(error = ?e, "Failed to create responder"); - continue; - } - }; - let noise_stream = match NoiseTcpStream::::new( - stream, - stratum_apps::stratum_core::codec_sv2::HandshakeRole::Responder(responder), - ) - .await - { - Ok(ns) => ns, - Err(e) => { - error!(error = ?e, "Noise handshake failed"); - continue; - } - }; - - let downstream_id = self - .channel_manager_data - .super_safe_lock(|data| data.downstream_id_factory.fetch_add(1, Ordering::Relaxed)); - - let downstream = Downstream::new( - downstream_id, - channel_manager_sender.clone(), - channel_manager_receiver.clone(), - noise_stream, - notify_shutdown.clone(), - task_manager_clone.clone(), - status_sender.clone(), - ); - - self.channel_manager_data.super_safe_lock(|data| { - data.downstream.insert(downstream_id, downstream.clone()); - }); - - downstream - .start( - notify_shutdown.clone(), - status_sender.clone(), - task_manager_clone.clone(), - ) - .await; - } - - Err(e) => { - error!(error = ?e, "Failed to accept new downstream connection"); - } - } - } - } - } - info!("Downstream server: Unified loop break"); - }); - Ok(()) - } - - /// The central orchestrator of the Channel Manager. - /// - /// Responsible for receiving messages from all subsystems, processing them, - /// and either forwarding them to the appropriate subsystem or updating - /// the internal state of the Channel Manager as needed. - pub async fn start( - mut self, - notify_shutdown: broadcast::Sender, - status_sender: Sender, - task_manager: Arc, - ) { - let status_sender = StatusSender::ChannelManager(status_sender); - let mut shutdown_rx = notify_shutdown.subscribe(); - - task_manager.spawn(async move { - let cm = self.clone(); - let vd = self.clone(); - let vardiff_future = vd.run_vardiff_loop(); - tokio::pin!(vardiff_future); - loop { - let mut cm_jds = cm.clone(); - let mut cm_pool = cm.clone(); - let mut cm_template = cm.clone(); - let mut cm_downstreams = cm.clone(); - tokio::select! { - message = shutdown_rx.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - info!("Channel Manager: received shutdown signal"); - break; - } - Ok(ShutdownMessage::DownstreamShutdown(downstream_id)) => { - info!(%downstream_id, "Channel Manager: removing downstream after shutdown"); - if let Err(e) = self.remove_downstream(downstream_id) { - tracing::error!(%downstream_id, error = ?e, "Failed to remove downstream"); - } - } - Ok(ShutdownMessage::JobDeclaratorShutdownFallback((coinbase_outputs,tx))) => { - info!("Channel Manager: Job declarator shutdown signal"); - self.upstream_state.set(UpstreamState::SoloMining); - self.channel_manager_data.super_safe_lock(|data| data.reset(coinbase_outputs)); - drop(tx); - } - Ok(ShutdownMessage::UpstreamShutdownFallback((coinbase_outputs,tx))) => { - info!("Channel Manager: Upstream shutdown signal"); - self.upstream_state.set(UpstreamState::SoloMining); - self.channel_manager_data.super_safe_lock(|data| data.reset(coinbase_outputs)); - drop(tx); - } - Err(e) => { - warn!(error = ?e, "shutdown channel closed unexpectedly"); - break; - } - _ => {} - } - } - res = &mut vardiff_future => { - info!("Vardiff loop completed with: {res:?}"); - } - res = cm_jds.handle_jds_message() => { - if let Err(e) = res { - if !e.is_critical() { - continue; - } - error!(error = ?e, "Error handling JDS message"); - handle_error(&status_sender, e).await; - break; - } - } - res = cm_pool.handle_pool_message() => { - if let Err(e) = res { - if !e.is_critical() { - continue; - } - error!(error = ?e, "Error handling Pool message"); - handle_error(&status_sender, e).await; - break; - } - } - res = cm_template.handle_template_provider_message() => { - if let Err(e) = res { - if !e.is_critical() { - continue; - } - error!(error = ?e, "Error handling Template Receiver message"); - handle_error(&status_sender, e).await; - break; - } - } - res = cm_downstreams.handle_downstream_message() => { - if let Err(e) = res { - if !e.is_critical() { - continue; - } - error!(error = ?e, "Error handling Downstreams message"); - handle_error(&status_sender, e).await; - break; - } - } - } - } - }); - } - - // Removes a downstream entry from the Channel Manager’s state. - // - // Given a `downstream_id`, this method: - // 1. Removes the corresponding downstream from the `downstream` map. - // 2. Cleans up all associated channel mappings (both standard and extended) by removing their - // entries from `channel_id_to_downstream_id`. - fn remove_downstream(&mut self, downstream_id: u32) -> Result<(), JDCError> { - self.channel_manager_data.super_safe_lock(|cm_data| { - if let Some(downstream) = cm_data.downstream.remove(&downstream_id) { - downstream.downstream_data.super_safe_lock(|ds_data| { - for k in ds_data - .standard_channels - .keys() - .chain(ds_data.extended_channels.keys()) - { - cm_data.channel_id_to_downstream_id.remove(k); - } - }); - } - }); - Ok(()) - } - - /// Handles messages received from the JDS subsystem. - /// - /// This method listens for incoming frames on the `jd_receiver` channel. - /// - If the frame contains a JobDeclaration message, it forwards it to the job declaration - /// message handler. - /// - If the frame contains any unsupported message type, an error is returned. - async fn handle_jds_message(&mut self) -> Result<(), JDCError> { - if let Ok(mut sv2_frame) = self.channel_manager_channel.jd_receiver.recv().await { - let Some(message_type) = sv2_frame.get_header().map(|m| m.msg_type()) else { - return Ok(()); - }; - - self.handle_job_declaration_message_frame_from_server( - None, - message_type, - sv2_frame.payload(), - ) - .await?; - } - Ok(()) - } - - /// Handles messages received from the Upstream subsystem. - /// - /// This method listens for incoming frames on the `upstream_receiver` channel. - /// - If the frame contains a **Mining** message, it forwards it to the mining message - /// handler. - /// - If the frame contains any unsupported message type, an error is returned. - async fn handle_pool_message(&mut self) -> Result<(), JDCError> { - if let Ok(mut sv2_frame) = self.channel_manager_channel.upstream_receiver.recv().await { - let Some(message_type) = sv2_frame.get_header().map(|m| m.msg_type()) else { - return Ok(()); - }; - - self.handle_mining_message_frame_from_server(None, message_type, sv2_frame.payload()) - .await?; - } - Ok(()) - } - - // Handles messages received from the TP subsystem. - // - // This method listens for incoming frames on the `tp_receiver` channel. - // - If the frame contains a TemplateDistribution message, it forwards it to the template - // distribution message handler. - // - If the frame contains any unsupported message type, an error is returned. - async fn handle_template_provider_message(&mut self) -> Result<(), JDCError> { - if let Ok(mut sv2_frame) = self.channel_manager_channel.tp_receiver.recv().await { - let Some(message_type) = sv2_frame.get_header().map(|m| m.msg_type()) else { - return Ok(()); - }; - self.handle_template_distribution_message_frame_from_server( - None, - message_type, - sv2_frame.payload(), - ) - .await?; - } - Ok(()) - } - - // Handles messages received from downstream clients and routes them appropriately. - // - // # Overview - // This method is similar to the upstream JDS message handler, but introduces additional - // logic for handling OpenChannel requests (both standard and extended). - // - // # Message Flow - // - For most mining messages: The message is forwarded directly to - // `handle_mining_message_from_client`, and the `channel_id_to_downstream_id` map is used to - // determine the origin downstream. - // - // - For OpenChannel messages: At the time of request, the `channel_id` is not yet assigned, so - // we cannot map the message back to the downstream. To solve this: - // 1. The `downstream_id` is appended to the `user_identity` (e.g., - // `"identity#downstream_id"`). - // 2. Later, the appended downstream ID is stripped and used by the message handler to - // correctly attribute the request. - // - // # Channel Establishment Logic - // - NoChannel β†’ Pending: - // - The first downstream OpenChannel request is stored in `pending_downstream_requests`. - // - The upstream state transitions from `NoChannel` to `Pending`. - // - A single channel request is then sent to the upstream (JDC β†’ upstream). - // - // - Pending: - // - Additional downstream OpenChannel requests are stored in `pending_downstream_requests` - // until the upstream connection is established. - // - // - Connected / SoloMining: - // - Downstream OpenChannel requests are immediately forwarded to the mining handler. - // - // # Notes - // - Only one upstream channel is created per JDC instance. - // - After the upstream channel is established, all new downstream requests bypass the pending - // mechanism and are sent directly to the mining handler. - async fn handle_downstream_message(&mut self) -> Result<(), JDCError> { - if let Ok((downstream_id, mut sv2_frame)) = self - .channel_manager_channel - .downstream_receiver - .recv() - .await - { - let Some(message_type) = sv2_frame.get_header().map(|m| m.msg_type()) else { - return Err(JDCError::UnexpectedMessage(0)); - }; - - match message_type { - MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL => { - let message: Mining = (message_type, sv2_frame.payload()).try_into()?; - let Mining::OpenExtendedMiningChannel(mut downstream_channel_request) = message - else { - return Err(JDCError::UnexpectedMessage(message_type)); - }; - let user_identity = format!( - "{}#{}", - downstream_channel_request.user_identity.as_utf8_or_hex(), - downstream_id - ); - downstream_channel_request.user_identity = user_identity.try_into()?; - - let downstream_msg = downstream_channel_request.clone().into_static(); - - match self.upstream_state.get() { - UpstreamState::NoChannel => { - self.channel_manager_data.super_safe_lock(|data| { - data.pending_downstream_requests - .push_front(downstream_msg.into()); - }); - - if self - .upstream_state - .compare_and_set(UpstreamState::NoChannel, UpstreamState::Pending) - .is_ok() - { - let mut upstream_message = downstream_channel_request; - upstream_message.user_identity = - self.user_identity.clone().try_into()?; - upstream_message.request_id = 1; - upstream_message.min_extranonce_size += - JDC_SEARCH_SPACE_BYTES as u16; - let upstream_message = AnyMessage::Mining( - Mining::OpenExtendedMiningChannel(upstream_message) - .into_static(), - ); - let frame: StdFrame = upstream_message.try_into()?; - - self.channel_manager_channel - .upstream_sender - .send(frame) - .await - .map_err(|_| JDCError::ChannelErrorSender)?; - } - } - UpstreamState::Pending => { - self.channel_manager_data.super_safe_lock(|data| { - data.pending_downstream_requests - .push_back(downstream_msg.into()); - }); - } - UpstreamState::Connected => { - self.send_open_channel_request_to_mining_handler( - Mining::OpenExtendedMiningChannel(downstream_msg), - message_type, - ) - .await?; - } - UpstreamState::SoloMining => { - self.send_open_channel_request_to_mining_handler( - Mining::OpenExtendedMiningChannel(downstream_msg), - message_type, - ) - .await?; - } - } - } - MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL => { - let message: Mining = (message_type, sv2_frame.payload()).try_into()?; - let Mining::OpenStandardMiningChannel(mut downstream_channel_request) = message - else { - return Err(JDCError::UnexpectedMessage(message_type)); - }; - - let user_identity = format!( - "{:?}#{}", - downstream_channel_request.user_identity, downstream_id - ); - downstream_channel_request.user_identity = user_identity.try_into()?; - - let downstream_msg = downstream_channel_request.clone().into_static(); - - match self.upstream_state.get() { - UpstreamState::NoChannel => { - self.channel_manager_data.super_safe_lock(|data| { - data.pending_downstream_requests - .push_front(downstream_msg.into()) - }); - - if self - .upstream_state - .compare_and_set(UpstreamState::NoChannel, UpstreamState::Pending) - .is_ok() - { - let upstream_open = OpenExtendedMiningChannel { - user_identity: self.user_identity.clone().try_into().unwrap(), - request_id: 1, - nominal_hash_rate: downstream_channel_request.nominal_hash_rate, - max_target: downstream_channel_request.max_target, - min_extranonce_size: JDC_SEARCH_SPACE_BYTES as u16, - }; - - let frame: StdFrame = AnyMessage::Mining( - Mining::OpenExtendedMiningChannel(upstream_open).into_static(), - ) - .try_into()?; - self.channel_manager_channel - .upstream_sender - .send(frame) - .await - .map_err(|_| JDCError::ChannelErrorSender)?; - } - } - UpstreamState::Pending => { - self.channel_manager_data.super_safe_lock(|data| { - data.pending_downstream_requests - .push_back(downstream_msg.into()) - }); - } - UpstreamState::Connected => { - self.send_open_channel_request_to_mining_handler( - Mining::OpenStandardMiningChannel(downstream_msg), - message_type, - ) - .await?; - } - UpstreamState::SoloMining => { - self.send_open_channel_request_to_mining_handler( - Mining::OpenStandardMiningChannel(downstream_msg), - message_type, - ) - .await?; - } - } - } - _ => { - self.handle_mining_message_frame_from_client( - None, - message_type, - sv2_frame.payload(), - ) - .await?; - } - } - } - - Ok(()) - } - - // Utility method to send open channel request from downstream to message handler. - async fn send_open_channel_request_to_mining_handler( - &mut self, - mining_msg: Mining<'static>, - message_type: u8, - ) -> Result<(), JDCError> { - let sv2_frame: Sv2Frame, Vec> = match Sv2Frame::from_message( - mining_msg, - message_type, - 0, - false, - ) { - Some(f) => f, - None => { - warn!(%message_type, "Failed to build Sv2Frame from mining message; dropping request"); - return Err(JDCError::FrameConversionError); - } - }; - - let mut serialized = vec![0u8; sv2_frame.encoded_length()]; - if let Err(e) = sv2_frame.serialize(&mut serialized) { - warn!(?e, %message_type, len = serialized.len(), "Failed to serialize Sv2Frame; dropping request"); - return Err(JDCError::FramingSv2(e)); - } - - let mut deserialized_frame = - match Sv2Frame::, Vec>::from_bytes(serialized) { - Ok(f) => f, - Err(e) => { - warn!(?e, %message_type, "Failed to deserialize Sv2Frame; dropping request"); - return Err(JDCError::FrameConversionError); - } - }; - - let payload = deserialized_frame.payload(); - self.handle_mining_message_frame_from_client(None, message_type, payload) - .await?; - Ok(()) - } - - /// Utility method to request for more token to JDS. - pub async fn allocate_tokens(&self, token_to_allocate: u32) -> Result<(), JDCError> { - debug!("Allocating {} job tokens", token_to_allocate); - - for i in 0..token_to_allocate { - let request_id = self - .channel_manager_data - .super_safe_lock(|data| data.request_id_factory.fetch_add(1, Ordering::Relaxed)); - - debug!( - request_id, - "Allocating token {}/{}", - i + 1, - token_to_allocate - ); - - let message = JobDeclaration::AllocateMiningJobToken(AllocateMiningJobToken { - user_identifier: self - .user_identity - .to_string() - .try_into() - .expect("Static string should always convert"), - request_id, - }); - - let frame: StdFrame = AnyMessage::JobDeclaration(message) - .try_into() - .map_err(|e| { - info!(error = ?e, "Failed to convert AllocateMiningJobToken to frame"); - e - })?; - - self.channel_manager_channel - .jd_sender - .send(frame) - .await - .map_err(|e| { - info!(error = ?e, "Failed to send AllocateMiningJobToken frame"); - JDCError::ChannelErrorSender - })?; - } - - info!("Requested allocation of {token_to_allocate} mining job tokens to JDS"); - Ok(()) - } - - // Runs the vardiff on extended channel. - fn run_vardiff_on_extended_channel( - downstream_id: u32, - channel_id: u32, - channel_state: &mut stratum_apps::stratum_core::channels_sv2::server::extended::ExtendedChannel< - 'static, - DefaultJobStore>, - >, - vardiff_state: &mut VardiffState, - updates: &mut Vec, - ) { - let (hashrate, target, shares_per_minute) = ( - channel_state.get_nominal_hashrate(), - channel_state.get_target(), - channel_state.get_shares_per_minute(), - ); - - let Ok(new_hashrate_opt) = vardiff_state.try_vardiff(hashrate, target, shares_per_minute) - else { - debug!("Vardiff computation failed for extended channel {channel_id}"); - return; - }; - - let Some(new_hashrate) = new_hashrate_opt else { - return; - }; - - match channel_state.update_channel(new_hashrate, None) { - Ok(()) => { - let updated_target = channel_state.get_target(); - updates.push( - ( - downstream_id, - Mining::SetTarget(SetTarget { - channel_id, - maximum_target: updated_target.to_le_bytes().into(), - }), - ) - .into(), - ); - debug!("Updated target for extended channel_id={channel_id} to {updated_target:?}",); - } - Err(e) => warn!( - "Failed to update extended channel channel_id={channel_id} during vardiff {e:?}" - ), - } - } - - // Runs the vardiff on the standard channel. - fn run_vardiff_on_standard_channel( - downstream_id: u32, - channel_id: u32, - channel: &mut StandardChannel<'static, DefaultJobStore>>, - vardiff_state: &mut VardiffState, - updates: &mut Vec, - ) { - let hashrate = channel.get_nominal_hashrate(); - let target = channel.get_target(); - let shares_per_minute = channel.get_shares_per_minute(); - - let Ok(new_hashrate_opt) = vardiff_state.try_vardiff(hashrate, target, shares_per_minute) - else { - debug!("Vardiff computation failed for standard channel {channel_id}"); - return; - }; - - if let Some(new_hashrate) = new_hashrate_opt { - match channel.update_channel(new_hashrate, None) { - Ok(()) => { - let updated_target = channel.get_target(); - updates.push( - ( - downstream_id, - Mining::SetTarget(SetTarget { - channel_id, - maximum_target: updated_target.to_le_bytes().into(), - }), - ) - .into(), - ); - debug!("Updated target for standard channel channel_id={channel_id} to {updated_target:?}"); - } - Err(e) => warn!( - "Failed to update standard channel channel_id={channel_id} during vardiff {e:?}" - ), - } - } - } - - // Periodic vardiff task loop. - // - // # Purpose - // - Executes the vardiff cycle every 60 seconds for all downstreams. - // - Delegates to [`Self::run_vardiff`] on each tick. - async fn run_vardiff_loop(&self) -> Result<(), JDCError> { - let mut ticker = tokio::time::interval(std::time::Duration::from_secs(60)); - loop { - ticker.tick().await; - info!("Starting vardiff loop for downstreams"); - - if let Err(e) = self.run_vardiff().await { - error!(error = ?e, "Vardiff iteration failed"); - } - } - } - - // Runs vardiff across **all channels** and generates updates. - // - // # Purpose - // - Iterates through all downstream channels (both standard and extended). - // - Runs vardiff for each channel and collects the resulting updates. - // - Propagates difficulty changes to downstreams and also sends an `UpdateChannel` message - // upstream if applicable. - async fn run_vardiff(&self) -> Result<(), JDCError> { - let mut messages: Vec = vec![]; - self.channel_manager_data - .super_safe_lock(|channel_manager_data| { - for ((channel_id, downstream_id), vardiff_state) in - channel_manager_data.vardiff.iter_mut() - { - let Some(downstream) = channel_manager_data.downstream.get_mut(downstream_id) - else { - continue; - }; - downstream.downstream_data.super_safe_lock(|data| { - if let Some(standard_channel) = data.standard_channels.get_mut(channel_id) { - Self::run_vardiff_on_standard_channel( - *downstream_id, - *channel_id, - standard_channel, - vardiff_state, - &mut messages, - ); - } - if let Some(extended_channel) = data.extended_channels.get_mut(channel_id) { - Self::run_vardiff_on_extended_channel( - *downstream_id, - *channel_id, - extended_channel, - vardiff_state, - &mut messages, - ); - } - }); - } - - if !messages.is_empty() { - let mut downstream_hashrate = 0.0; - let mut min_target = [0xff; 32]; - - for (_, downstream) in channel_manager_data.downstream.iter() { - downstream.downstream_data.super_safe_lock(|data| { - let mut update_from_channel = |hashrate: f32, target: &Target| { - downstream_hashrate += hashrate; - min_target = std::cmp::min(target.to_le_bytes(), min_target); - }; - - for (_, channel) in data.standard_channels.iter() { - update_from_channel( - channel.get_nominal_hashrate(), - channel.get_target(), - ); - } - - for (_, channel) in data.extended_channels.iter() { - update_from_channel( - channel.get_nominal_hashrate(), - channel.get_target(), - ); - } - }); - } - - if let Some(ref upstream_channel) = channel_manager_data.upstream_channel { - debug!( - "Checking upstream channel {} with hashrate {} and target {:?}", - upstream_channel.get_channel_id(), - upstream_channel.get_nominal_hashrate(), - upstream_channel.get_target() - ); - - info!("Sending update channel message upstream"); - messages.push( - Mining::UpdateChannel(UpdateChannel { - channel_id: upstream_channel.get_channel_id(), - nominal_hash_rate: downstream_hashrate, - maximum_target: min_target.into(), - }) - .into(), - ) - } - } - }); - - for message in messages { - message.forward(&self.channel_manager_channel).await; - } - - info!("Vardiff update cycle complete"); - Ok(()) - } -} diff --git a/roles/jd-client/src/lib/channel_manager/template_message_handler.rs b/roles/jd-client/src/lib/channel_manager/template_message_handler.rs deleted file mode 100644 index e0e97a4bc3..0000000000 --- a/roles/jd-client/src/lib/channel_manager/template_message_handler.rs +++ /dev/null @@ -1,611 +0,0 @@ -use std::sync::atomic::Ordering; - -use stratum_apps::stratum_core::{ - binary_sv2::{Seq064K, U256}, - bitcoin::{consensus, hashes::Hash, Amount, Transaction}, - channels_sv2::{chain_tip::ChainTip, outputs::deserialize_outputs}, - handlers_sv2::HandleTemplateDistributionMessagesFromServerAsync, - job_declaration_sv2::DeclareMiningJob, - mining_sv2::SetNewPrevHash as SetNewPrevHashMp, - parsers_sv2::{AnyMessage, JobDeclaration, Mining, TemplateDistribution}, - template_distribution_sv2::*, -}; -use tracing::{error, info, warn}; - -use crate::{ - channel_manager::{downstream_message_handler::RouteMessageTo, ChannelManager, DeclaredJob}, - error::JDCError, - jd_mode::{get_jd_mode, JdMode}, - utils::StdFrame, -}; - -impl HandleTemplateDistributionMessagesFromServerAsync for ChannelManager { - type Error = JDCError; - - // Handles a `NewTemplate` message from the Template Provider. - // - // Behavior depends on the JD mode: - // - FullTemplate: sends a `RequestTransactionData` to start the declare-mining-job flow. - // - CoinbaseOnly: sends a `SetCustomMiningJob` and continues with that flow. - // - // In both modes, the new template is stored and propagated to all - // downstream channels, updating their state and dispatching the - // appropriate mining job messages (standard, group, or extended). - // - // Also updates future/active template state and triggers token - // allocation if needed. - async fn handle_new_template( - &mut self, - _server_id: Option, - msg: NewTemplate<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - - let coinbase_outputs = self.channel_manager_data.super_safe_lock(|data| { - data.template_store - .insert(msg.template_id, msg.clone().into_static()); - if msg.future_template { - data.last_future_template = Some(msg.clone().into_static()); - } - data.coinbase_outputs.clone() - }); - - let mut coinbase_outputs = deserialize_outputs(coinbase_outputs) - .map_err(|_| JDCError::ChannelManagerHasBadCoinbaseOutputs)?; - - if get_jd_mode() == JdMode::FullTemplate { - let tx_data_request = AnyMessage::TemplateDistribution( - TemplateDistribution::RequestTransactionData(RequestTransactionData { - template_id: msg.template_id, - }), - ); - let frame: StdFrame = tx_data_request.try_into()?; - self.channel_manager_channel - .tp_sender - .send(frame) - .await - .map_err(|_e| JDCError::ChannelErrorSender)?; - } - - let messages = self.channel_manager_data.super_safe_lock(|channel_manager_data| { - let mut messages: Vec = Vec::new(); - coinbase_outputs[0].value = Amount::from_sat(msg.coinbase_tx_value_remaining); - - for (downstream_id, downstream) in channel_manager_data.downstream.iter_mut() { - - let messages_ = downstream.downstream_data.super_safe_lock(|data| { - - let mut messages: Vec = vec![]; - - let group_channel_job = if let Some(ref mut group_channel) = data.group_channels { - if group_channel.on_new_template(msg.clone().into_static(), coinbase_outputs.clone()).is_ok() { - match msg.future_template { - true => { - let future_job_id = group_channel - .get_future_template_to_job_id() - .get(&msg.template_id) - .expect("job_id must exist"); - Some(group_channel - .get_future_jobs() - .get(future_job_id) - .expect("future job must exist")).cloned() - }, - false => { - Some(group_channel - .get_active_job() - .expect("active job must exist")).cloned() - } - } - } else { - tracing::error!("Some issue with downstream: {downstream_id}, group channel"); - None - } - } else { - None - }; - - if let Some(upstream_channel) = channel_manager_data.upstream_channel.as_mut() { - if !msg.future_template && get_jd_mode() == JdMode::CoinbaseOnly { - if let (Some(token), Some(prevhash)) = ( - channel_manager_data.allocate_tokens.clone(), - channel_manager_data.last_new_prev_hash.clone(), - ) { - let request_id = channel_manager_data.request_id_factory.fetch_add(1, Ordering::Relaxed); - let job_factory = channel_manager_data.job_factory.as_mut().unwrap(); - let full_extranonce_size = upstream_channel.get_full_extranonce_size(); - let custom_job = job_factory.new_custom_job(upstream_channel.get_channel_id(), request_id, token.clone().mining_job_token, prevhash.clone().into(), msg.clone(), coinbase_outputs.clone(), full_extranonce_size); - - if let Ok(custom_job) = custom_job{ - let last_declare = DeclaredJob { - declare_mining_job: None, - template: msg.clone().into_static(), - prev_hash: Some(prevhash), - set_custom_mining_job: Some(custom_job.clone().into_static()), - coinbase_output: channel_manager_data.coinbase_outputs.clone(), - tx_list: Vec::new(), - }; - channel_manager_data - .last_declare_job_store - .insert(request_id, last_declare); - messages.push( - Mining::SetCustomMiningJob(custom_job).into() - ); - } - } - } - } - match msg.future_template { - true => { - for (channel_id, standard_channel) in data.standard_channels.iter_mut() { - if data.group_channels.is_none() { - if let Err(e) = standard_channel.on_new_template(msg.clone().into_static(), coinbase_outputs.clone()) { - tracing::error!("Error while adding template to standard channel: {channel_id:?} {e:?}"); - continue; - } - let standard_job_id = standard_channel.get_future_template_to_job_id().get(&msg.template_id).expect("job_id must exist"); - let standard_job = standard_channel.get_future_jobs().get(standard_job_id).expect("standard job must exist"); - channel_manager_data.downstream_channel_id_and_job_id_to_template_id.insert((*channel_id, *standard_job_id), msg.template_id); - let standard_job_message = standard_job.get_job_message(); - messages.push((*downstream_id, Mining::NewMiningJob(standard_job_message.clone())).into()); - } - if let Some(ref group_channel_job) = group_channel_job { - if let Err(e) = standard_channel.on_new_template(msg.clone().into_static(), coinbase_outputs.clone()) { - tracing::error!("Error while adding template to standard channel: {channel_id:?} {e:?}"); - continue; - } - _ = standard_channel - .on_group_channel_job(group_channel_job.clone()); - } - } - if let Some(group_channel_job) = group_channel_job { - let job_message = group_channel_job.get_job_message(); - messages.push((*downstream_id, Mining::NewExtendedMiningJob(job_message.clone())).into()); - } - - for (channel_id, extended_channel) in data.extended_channels.iter_mut() { - if let Err(e) = extended_channel.on_new_template(msg.clone().into_static(), coinbase_outputs.clone()) { - tracing::error!("Error while adding template to standard channel: {channel_id:?} {e:?}"); - continue; - } - let extended_job_id = extended_channel - .get_future_template_to_job_id() - .get(&msg.template_id) - .expect("job_id must exist"); - - let extended_job = extended_channel - .get_future_jobs() - .get(extended_job_id) - .expect("extended job must exist"); - - channel_manager_data.downstream_channel_id_and_job_id_to_template_id.insert((*channel_id, *extended_job_id), msg.template_id); - let extended_job_message = extended_job.get_job_message(); - - messages.push((*downstream_id,Mining::NewExtendedMiningJob(extended_job_message.clone())).into()); - } - } - false => { - for (channel_id, standard_channel) in data.standard_channels.iter_mut() { - if data.group_channels.is_none() { - if let Err(e) = standard_channel.on_new_template(msg.clone().into_static(), coinbase_outputs.clone()) { - tracing::error!("Error while adding template to standard channel: {channel_id:?} {e:?}"); - continue; - } - let standard_job = standard_channel.get_active_job().expect("standard job must exist"); - channel_manager_data.downstream_channel_id_and_job_id_to_template_id.insert((*channel_id, standard_job.get_job_id()), msg.template_id); - let standard_job_message = standard_job.get_job_message(); - messages.push((*downstream_id, Mining::NewMiningJob(standard_job_message.clone())).into()); - } - if let Some(ref group_channel_job) = group_channel_job { - if let Err(e) = standard_channel.on_new_template(msg.clone().into_static(), coinbase_outputs.clone()) { - tracing::error!("Error while adding template to standard channel: {channel_id:?} {e:?}"); - continue; - } - _ = standard_channel - .on_group_channel_job(group_channel_job.clone()); - } - } - if let Some(group_channel_job) = group_channel_job { - let job_message = group_channel_job.get_job_message(); - messages.push((*downstream_id, Mining::NewExtendedMiningJob(job_message.clone())).into()); - } - - for (channel_id, extended_channel) in data.extended_channels.iter_mut() { - if let Err(e) = extended_channel.on_new_template(msg.clone().into_static(), coinbase_outputs.clone()) { - tracing::error!("Error while adding template to standard channel: {channel_id:?} {e:?}"); - continue; - } - let extended_job = extended_channel - .get_active_job() - .expect("extended job must exist"); - - channel_manager_data.downstream_channel_id_and_job_id_to_template_id.insert((*channel_id, extended_job.get_job_id()), msg.template_id); - let extended_job_message = extended_job.get_job_message(); - - messages.push((*downstream_id,Mining::NewExtendedMiningJob(extended_job_message.clone())).into()); - } - } - } - - messages - - }); - messages.extend(messages_); - } - messages - }); - - if get_jd_mode() == JdMode::CoinbaseOnly && !msg.future_template { - _ = self.allocate_tokens(1).await; - } - - for message in messages { - message.forward(&self.channel_manager_channel).await; - } - - Ok(()) - } - - // Handles a `RequestTransactionDataError` message from the Template Provider. - async fn handle_request_tx_data_error( - &mut self, - _server_id: Option, - msg: RequestTransactionDataError<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", msg); - let error_code = msg.error_code.as_utf8_or_hex(); - - if matches!( - error_code.as_str(), - "template-id-not-found" | "stale-template-id" - ) { - return Ok(()); - } - Err(JDCError::TxDataError) - } - - // Handles a `RequestTransactionDataSuccess` message from the Template Provider. - // - // Flow: - // - If the template is not a future template, immediately declare a mining job to JDS. - // - If the template is a future template: - // - Check if the current `prevhash` activates this template. - // - If activated β†’ proceed with the normal declare job flow. - // - If not activated β†’ cache it as a declare job for later propagation. - async fn handle_request_tx_data_success( - &mut self, - _server_id: Option, - msg: RequestTransactionDataSuccess<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - - let transactions_data = msg.transaction_list; - let excess_data = msg.excess_data; - - let coinbase_outputs = self - .channel_manager_data - .super_safe_lock(|data| data.coinbase_outputs.clone()); - - let mut deserialized_outputs = deserialize_outputs(coinbase_outputs) - .map_err(|_| JDCError::ChannelManagerHasBadCoinbaseOutputs)?; - - let (token, template_message, request_id, prevhash) = - self.channel_manager_data.super_safe_lock(|data| { - ( - data.allocate_tokens.clone(), - data.template_store.remove(&msg.template_id), - data.request_id_factory.fetch_add(1, Ordering::Relaxed), - data.last_new_prev_hash.clone(), - ) - }); - - _ = self.allocate_tokens(1).await; - let Some(token) = token else { - error!("Token not found, template id: {}", msg.template_id); - return Err(JDCError::TokenNotFound); - }; - - let Some(template_message) = template_message else { - error!("Template not found, template id: {}", msg.template_id); - return Err(JDCError::TemplateNotFound(msg.template_id)); - }; - - let mining_token = token.mining_job_token.clone(); - deserialized_outputs[0].value = - Amount::from_sat(template_message.coinbase_tx_value_remaining); - let reserialized_outputs = consensus::serialize(&deserialized_outputs); - - let tx_list: Vec = transactions_data - .to_vec() - .iter() - .map(|raw_tx| consensus::deserialize(raw_tx).expect("invalid tx")) - .collect(); - - let txids_as_u256: Vec> = tx_list - .iter() - .map(|tx| { - let txid = tx.compute_txid(); - let byte_array: [u8; 32] = *txid.as_byte_array(); - U256::Owned(byte_array.to_vec()) - }) - .collect(); - - let tx_ids = Seq064K::new(txids_as_u256).map_err(JDCError::BinarySv2)?; - let is_activated_future_template = template_message.future_template - && prevhash - .map(|prev_hash| prev_hash.template_id != template_message.template_id) - .unwrap_or(true); - - let declare_job = self.channel_manager_data.super_safe_lock(|data| { - let job_factory = data.job_factory.as_mut()?; - - let full_extranonce_size = data - .upstream_channel - .as_ref() - .map(|channel| channel.get_full_extranonce_size()) - .unwrap_or(32); - - if let Ok((coinbase_tx_prefix, coinbase_tx_suffix)) = job_factory - .new_coinbase_tx_prefix_and_suffix( - template_message.clone(), - deserialized_outputs.clone(), - full_extranonce_size, - ) - { - let version = template_message.version; - - let declare_job = DeclareMiningJob { - request_id, - mining_job_token: mining_token.to_vec().try_into().unwrap(), - version, - coinbase_tx_prefix: coinbase_tx_prefix.try_into().unwrap(), - coinbase_tx_suffix: coinbase_tx_suffix.try_into().unwrap(), - tx_ids_list: tx_ids, - excess_data: excess_data.to_vec().try_into().unwrap(), - }; - - let last_declare = DeclaredJob { - declare_mining_job: Some(declare_job.clone()), - template: template_message, - prev_hash: data.last_new_prev_hash.clone(), - set_custom_mining_job: None, - coinbase_output: reserialized_outputs, - tx_list: transactions_data.to_vec(), - }; - - data.last_declare_job_store.insert(request_id, last_declare); - - return Some(declare_job); - } - None - }); - - if is_activated_future_template { - return Ok(()); - } - - if let Some(declare_job) = declare_job { - let frame: StdFrame = - AnyMessage::JobDeclaration(JobDeclaration::DeclareMiningJob(declare_job)) - .try_into()?; - - _ = self.channel_manager_channel.jd_sender.send(frame).await; - } - - Ok(()) - } - - // Handles a `SetNewPrevHash` message: - // - // - Check `declare_job_cache` to see if the `prevhash` activates a future template. - // - In FullTemplate mode β†’ send a `DeclareMiningJob`. - // - In CoinbaseOnly mode β†’ send a `CustomMiningJob` for the activated future template. - // - Update the upstream channel state. - // - Update all downstream channels and propagate the new `prevhash` via `SetNewPrevHash`. - async fn handle_set_new_prev_hash( - &mut self, - _server_id: Option, - msg: SetNewPrevHash<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - - let coinbase_outputs = self - .channel_manager_data - .super_safe_lock(|data| data.coinbase_outputs.clone()); - - let outputs = deserialize_outputs(coinbase_outputs) - .map_err(|_| JDCError::ChannelManagerHasBadCoinbaseOutputs)?; - - let (future_template, declare_job) = self.channel_manager_data.super_safe_lock(|data| { - if let Some(upstream_channel) = data.upstream_channel.as_mut() { - if let Err(e) = upstream_channel.on_chain_tip_update(msg.clone().into()) { - error!( - "Couldn't update chaintip of the upstream channel: {msg}, error: {e:#?}" - ); - } - } - - let declare_job = data - .last_declare_job_store - .values() - .find(|declared_job| { - Some(declared_job.template.template_id) - == data.last_future_template.as_ref().map(|t| t.template_id) - }) - .map(|declared_job| declared_job.declare_mining_job.clone()); - - (data.last_future_template.clone(), declare_job) - }); - - if get_jd_mode() == JdMode::FullTemplate { - if let Some(Some(job)) = declare_job { - let frame: StdFrame = - AnyMessage::JobDeclaration(JobDeclaration::DeclareMiningJob(job)).try_into()?; - - self.channel_manager_channel - .jd_sender - .send(frame) - .await - .map_err(|_e| JDCError::ChannelErrorSender)?; - } - } - - let messages = self.channel_manager_data.super_safe_lock(|data| { - data.last_new_prev_hash = Some(msg.clone().into_static()); - data.last_declare_job_store.iter_mut().for_each(|(_k, v)| { - if v.template.future_template && v.template.template_id == msg.template_id { - v.prev_hash = Some(msg.clone().into_static()); - v.template.future_template = false; - } - }); - - let mut messages: Vec = vec![]; - - if let Some(ref mut upstream_channel) = data.upstream_channel { - _ = upstream_channel.on_chain_tip_update(msg.clone().into()); - - if get_jd_mode() == JdMode::CoinbaseOnly { - if let (Some(job_factory), Some(token), Some(template)) = ( - data.job_factory.as_mut(), - data.allocate_tokens.clone(), - future_template.clone(), - ) { - let request_id = data.request_id_factory.fetch_add(1, Ordering::Relaxed); - let chain_tip = ChainTip::new( - msg.prev_hash.clone().into_static(), - msg.n_bits, - msg.header_timestamp, - ); - - let full_extranonce_size = upstream_channel.get_full_extranonce_size(); - - if let Ok(custom_job) = job_factory.new_custom_job( - upstream_channel.get_channel_id(), - request_id, - token.clone().mining_job_token, - chain_tip, - template.clone(), - outputs, - full_extranonce_size, - ) { - let last_declare = DeclaredJob { - declare_mining_job: None, - template: template.into_static(), - prev_hash: Some(msg.clone().into_static()), - set_custom_mining_job: Some(custom_job.clone().into_static()), - coinbase_output: data.coinbase_outputs.clone(), - tx_list: vec![], - }; - - data.last_declare_job_store.insert(request_id, last_declare); - messages.push(Mining::SetCustomMiningJob(custom_job).into()); - } - } - } - } - - for (downstream_id, downstream) in data.downstream.iter_mut() { - let downstream_messages = downstream.downstream_data.super_safe_lock(|data| { - let mut messages: Vec = vec![]; - if let Some(ref mut group_channel) = data.group_channels { - _ = group_channel.on_set_new_prev_hash(msg.clone().into_static()); - let group_channel_id = group_channel.get_group_channel_id(); - let activated_group_job_id = group_channel - .get_active_job() - .expect("active job must exist") - .get_job_id(); - - let set_new_prev_hash_message = SetNewPrevHashMp { - channel_id: group_channel_id, - job_id: activated_group_job_id, - prev_hash: msg.prev_hash.clone(), - min_ntime: msg.header_timestamp, - nbits: msg.n_bits, - }; - messages.push( - ( - *downstream_id, - Mining::SetNewPrevHash(set_new_prev_hash_message), - ) - .into(), - ); - } - - for (channel_id, standard_channel) in data.standard_channels.iter_mut() { - if let Err(_e) = - standard_channel.on_set_new_prev_hash(msg.clone().into_static()) - { - continue; - }; - - // did SetupConnection have the REQUIRES_STANDARD_JOBS flag set? - // if yes, there's no group channel, so we need to send the SetNewPrevHashMp - // to each standard channel - if data.group_channels.is_none() { - let activated_standard_job_id = standard_channel - .get_active_job() - .expect("active job must exist") - .get_job_id(); - let set_new_prev_hash_message = SetNewPrevHashMp { - channel_id: *channel_id, - job_id: activated_standard_job_id, - prev_hash: msg.prev_hash.clone(), - min_ntime: msg.header_timestamp, - nbits: msg.n_bits, - }; - messages.push( - ( - *downstream_id, - Mining::SetNewPrevHash(set_new_prev_hash_message), - ) - .into(), - ); - } - } - - for (channel_id, extended_channel) in data.extended_channels.iter_mut() { - if let Err(_e) = - extended_channel.on_set_new_prev_hash(msg.clone().into_static()) - { - continue; - }; - - let activated_extended_job_id = extended_channel - .get_active_job() - .expect("active job must exist") - .get_job_id(); - let set_new_prev_hash_message = SetNewPrevHashMp { - channel_id: *channel_id, - job_id: activated_extended_job_id, - prev_hash: msg.prev_hash.clone(), - min_ntime: msg.header_timestamp, - nbits: msg.n_bits, - }; - messages.push( - ( - *downstream_id, - Mining::SetNewPrevHash(set_new_prev_hash_message), - ) - .into(), - ); - } - - messages - }); - - messages.extend(downstream_messages); - } - - messages - }); - - if get_jd_mode() == JdMode::CoinbaseOnly { - _ = self.allocate_tokens(1).await; - } - - for message in messages { - message.forward(&self.channel_manager_channel).await; - } - - Ok(()) - } -} diff --git a/roles/jd-client/src/lib/channel_manager/upstream_message_handler.rs b/roles/jd-client/src/lib/channel_manager/upstream_message_handler.rs deleted file mode 100644 index d1d95116ef..0000000000 --- a/roles/jd-client/src/lib/channel_manager/upstream_message_handler.rs +++ /dev/null @@ -1,619 +0,0 @@ -use std::sync::atomic::Ordering; - -use stratum_apps::stratum_core::{ - bitcoin::Target, - channels_sv2::{ - client::extended::ExtendedChannel, outputs::deserialize_outputs, - server::jobs::factory::JobFactory, - }, - handlers_sv2::{HandleMiningMessagesFromServerAsync, SupportedChannelTypes}, - mining_sv2::*, - parsers_sv2::{AnyMessage, Mining, TemplateDistribution}, - template_distribution_sv2::RequestTransactionData, -}; -use tracing::{debug, error, info, warn}; - -use crate::{ - channel_manager::{ - downstream_message_handler::RouteMessageTo, ChannelManager, DeclaredJob, - JDC_SEARCH_SPACE_BYTES, - }, - error::{ChannelSv2Error, JDCError}, - jd_mode::{get_jd_mode, JdMode}, - status::{State, Status}, - utils::{create_close_channel_msg, PendingChannelRequest, StdFrame, UpstreamState}, -}; - -impl HandleMiningMessagesFromServerAsync for ChannelManager { - type Error = JDCError; - - fn get_channel_type_for_server(&self, _server_id: Option) -> SupportedChannelTypes { - SupportedChannelTypes::Extended - } - fn is_work_selection_enabled_for_server(&self, _server_id: Option) -> bool { - true - } - - // Handles an unexpected `OpenStandardMiningChannelSuccess` message from the upstream. - // - // The Job Declarator Client (JDC) only supports extended channel when - // communicating with upstream peer. Receiving a standard channel success - // indicates either misbehavior or a protocol violation by the upstream. - // - // In such cases, the event is treated as malicious, and a fallback - // (`UpstreamShutdownFallback`) is immediately triggered to protect the system. - async fn handle_open_standard_mining_channel_success( - &mut self, - _server_id: Option, - msg: OpenStandardMiningChannelSuccess<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - info!( - "⚠️ JDC can only open extended channels with the upstream server, preparing fallback." - ); - _ = self - .channel_manager_channel - .status_sender - .send(Status { - state: State::UpstreamShutdownFallback(JDCError::Shutdown), - }) - .await; - Ok(()) - } - - // Handles `OpenExtendedMiningChannelSuccess` messages from upstream. - // - // On success, this establishes a client-side extended channel: - // - If initialization fails at any step, the upstream state is reverted from `Pending` to - // `NoChannel`. - // - If initialization succeeds, we configure the extranonce factory, create a new - // `ExtendedChannel` and `JobFactory`, and update the upstream state from `Pending` to - // `Connected`. - // - // Once the upstream state transitions to `Connected`, all pending downstream requests are - // processed, and downstream channels are opened accordingly. - async fn handle_open_extended_mining_channel_success( - &mut self, - _server_id: Option, - msg: OpenExtendedMiningChannelSuccess<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - - let coinbase_outputs = self - .channel_manager_data - .super_safe_lock(|data| data.coinbase_outputs.clone()); - - let outputs = deserialize_outputs(coinbase_outputs) - .map_err(|_| JDCError::DeclaredJobHasBadCoinbaseOutputs)?; - - let (channel_state, template, custom_job, close_channel) = - self.channel_manager_data.super_safe_lock(|data| { - let Some(pending_request) = data.pending_downstream_requests.front() else { - self.upstream_state.set(UpstreamState::NoChannel); - let close_channel = - create_close_channel_msg(msg.channel_id, "downstream not available"); - return (self.upstream_state.get(), None, None, Some(close_channel)); - }; - - let hashrate = match pending_request { - PendingChannelRequest::ExtendedChannel(m) => m.nominal_hash_rate, - PendingChannelRequest::StandardChannel(m) => m.nominal_hash_rate, - }; - - let prefix_len = msg.extranonce_prefix.len(); - - let total_len = prefix_len + msg.extranonce_size as usize; - let range_0 = 0..prefix_len; - let range_1 = prefix_len..prefix_len + JDC_SEARCH_SPACE_BYTES; - let range_2 = prefix_len + JDC_SEARCH_SPACE_BYTES..total_len; - - debug!( - prefix_len, - extranonce_size = msg.extranonce_size, - total_len, - "Calculated extranonce ranges" - ); - - let extranonces = match ExtendedExtranonce::from_upstream_extranonce( - msg.extranonce_prefix.clone().into(), - range_0, - range_1, - range_2, - ) { - Ok(e) => e, - Err(e) => { - warn!("Failed to build extranonce factory: {e:?}"); - self.upstream_state.set(UpstreamState::NoChannel); - let close_channel = - create_close_channel_msg(msg.channel_id, "downstream not available"); - return (self.upstream_state.get(), None, None, Some(close_channel)); - } - }; - - let job_factory = JobFactory::new( - true, - data.pool_tag_string.clone(), - Some(self.miner_tag_string.clone()), - ); - - let mut extended_channel = ExtendedChannel::new( - msg.channel_id, - self.user_identity.clone(), - msg.extranonce_prefix.to_vec(), - Target::from_le_bytes(msg.target.inner_as_ref().try_into().unwrap()), - hashrate, - true, - msg.extranonce_size, - ); - - if let Some(ref mut prevhash) = data.last_new_prev_hash { - _ = extended_channel.on_chain_tip_update(prevhash.clone().into()); - debug!("Applied last_new_prev_hash to new extended channel"); - } - - let set_custom_job = if get_jd_mode() == JdMode::CoinbaseOnly { - if let (Some(job_factory), Some(token), Some(template), Some(prevhash)) = ( - data.job_factory.as_mut(), - data.allocate_tokens.clone(), - data.last_future_template.clone(), - data.last_new_prev_hash.clone(), - ) { - let request_id = data.request_id_factory.fetch_add(1, Ordering::Relaxed); - - let full_extranonce_size = extended_channel.get_full_extranonce_size(); - - if let Ok(custom_job) = job_factory.new_custom_job( - extended_channel.get_channel_id(), - request_id, - token.clone().mining_job_token, - prevhash.clone().into(), - template.clone(), - outputs, - full_extranonce_size, - ) { - let last_declare = DeclaredJob { - declare_mining_job: None, - template: template.into_static(), - prev_hash: Some(prevhash.into_static()), - set_custom_mining_job: Some(custom_job.clone().into_static()), - coinbase_output: data.coinbase_outputs.clone(), - tx_list: vec![], - }; - - data.last_declare_job_store.insert(request_id, last_declare); - Some(custom_job) - } else { - None - } - } else { - None - } - } else { - None - }; - - data.extranonce_prefix_factory_extended = extranonces.clone(); - data.extranonce_prefix_factory_standard = extranonces; - data.upstream_channel = Some(extended_channel); - data.job_factory = Some(job_factory); - self.upstream_state.set(UpstreamState::Connected); - - info!("Extended mining channel successfully initialized"); - ( - self.upstream_state.get(), - data.last_future_template.clone(), - set_custom_job, - None, - ) - }); - - if channel_state == UpstreamState::Connected { - if get_jd_mode() == JdMode::FullTemplate { - if let Some(template) = template { - let tx_data_request = AnyMessage::TemplateDistribution( - TemplateDistribution::RequestTransactionData(RequestTransactionData { - template_id: template.template_id, - }), - ); - let frame: StdFrame = tx_data_request.try_into()?; - self.channel_manager_channel - .tp_sender - .send(frame) - .await - .map_err(|_e| JDCError::ChannelErrorSender)?; - } - } - - if get_jd_mode() == JdMode::CoinbaseOnly { - if let Some(custom_job) = custom_job { - let set_custom_job = AnyMessage::Mining(Mining::SetCustomMiningJob(custom_job)); - let frame: StdFrame = set_custom_job.try_into()?; - self.channel_manager_channel - .jd_sender - .send(frame) - .await - .map_err(|_e| JDCError::ChannelErrorSender)?; - _ = self.allocate_tokens(1).await; - } - } - - let pending_downstreams = self - .channel_manager_data - .super_safe_lock(|data| std::mem::take(&mut data.pending_downstream_requests)); - - for pending_downstream in pending_downstreams { - let message_type = pending_downstream.message_type(); - self.send_open_channel_request_to_mining_handler( - pending_downstream.into(), - message_type, - ) - .await?; - } - } - - // In case of failure, close the channel with upstream. - if let Some(close_channel) = close_channel { - let close_channel = AnyMessage::Mining(Mining::CloseChannel(close_channel)); - let frame: StdFrame = close_channel.try_into()?; - self.channel_manager_channel - .upstream_sender - .send(frame) - .await - .map_err(|_e| JDCError::ChannelErrorSender)?; - _ = self.allocate_tokens(1).await; - } - - Ok(()) - } - - // Handles `OpenMiningChannelError` messages received from upstream. - // - // Receiving this message is treated as malicious behavior, since JDC only supports - // extended channels. When encountered, we immediately trigger the fallback mechanism - // by transitioning the upstream state into a shutdown-fallback mode. - async fn handle_open_mining_channel_error( - &mut self, - _server_id: Option, - msg: OpenMiningChannelError<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", msg); - warn!("⚠️ Cannot open extended channel with the upstream server, preparing fallback."); - - _ = self - .channel_manager_channel - .status_sender - .send(Status { - state: State::UpstreamShutdownFallback(JDCError::Shutdown), - }) - .await; - Ok(()) - } - - // Handles `UpdateChannelError` messages from upstream. - async fn handle_update_channel_error( - &mut self, - _server_id: Option, - msg: UpdateChannelError<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", msg); - Ok(()) - } - - // Handles `CloseChannel` messages from upstream. - // - // Upon receiving this message, the upstream channel is immediately closed and - // the system transitions into the upstream shutdown fallback state. - async fn handle_close_channel( - &mut self, - _server_id: Option, - msg: CloseChannel<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - - self.channel_manager_data.super_safe_lock(|data| { - data.upstream_channel = None; - }); - _ = self - .channel_manager_channel - .status_sender - .send(Status { - state: State::UpstreamShutdownFallback(JDCError::Shutdown), - }) - .await; - Ok(()) - } - - // Handles `SetExtranoncePrefix` messages from upstream. - // - // When received, this updates the current extranonce prefix and rebuilds both the - // standard and extended extranonce factories. Each active downstream channel is then - // assigned a new extranonce prefix, and a corresponding `SetExtranoncePrefix` message - // is sent downstream to synchronize state. - async fn handle_set_extranonce_prefix( - &mut self, - _server_id: Option, - msg: SetExtranoncePrefix<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - let messages_results = - self.channel_manager_data - .super_safe_lock(|channel_manager_data| { - let mut messages_results: Vec> = vec![]; - if let Some(upstream_channel) = channel_manager_data.upstream_channel.as_mut() { - if let Err(e) = - upstream_channel.set_extranonce_prefix(msg.extranonce_prefix.to_vec()) - { - return Err(JDCError::ChannelSv2( - ChannelSv2Error::ExtendedChannelClientSide(e), - )); - } - - let new_prefix_len = msg.extranonce_prefix.len(); - let rollable_extranonce_size = - upstream_channel.get_rollable_extranonce_size(); - let full_extranonce_size = - new_prefix_len + rollable_extranonce_size as usize; - if full_extranonce_size > MAX_EXTRANONCE_LEN { - return Err(JDCError::ExtranonceSizeTooLarge); - } - - let range_0 = 0..new_prefix_len; - let range_1 = new_prefix_len..new_prefix_len + JDC_SEARCH_SPACE_BYTES; - let range_2 = new_prefix_len + JDC_SEARCH_SPACE_BYTES..full_extranonce_size; - - debug!( - new_prefix_len, - rollable_extranonce_size, - full_extranonce_size, - "Calculated extranonce ranges" - ); - let extranonces = match ExtendedExtranonce::from_upstream_extranonce( - msg.extranonce_prefix.clone().into(), - range_0, - range_1, - range_2, - ) { - Ok(e) => e, - Err(e) => { - warn!("Failed to build extranonce factory: {e:?}"); - return Err(JDCError::ExtranoncePrefixFactoryError(e)); - } - }; - - channel_manager_data.extranonce_prefix_factory_extended = - extranonces.clone(); - channel_manager_data.extranonce_prefix_factory_standard = extranonces; - - for (downstream_id, downstream) in - channel_manager_data.downstream.iter_mut() - { - downstream.downstream_data.super_safe_lock(|data| { - for (channel_id, standard_channel) in - data.standard_channels.iter_mut() - { - match channel_manager_data - .extranonce_prefix_factory_standard - .next_prefix_standard() - { - Ok(prefix) => match standard_channel - .set_extranonce_prefix(prefix.clone().to_vec()) - { - Ok(_) => { - messages_results.push(Ok(( - *downstream_id, - Mining::SetExtranoncePrefix( - SetExtranoncePrefix { - channel_id: *channel_id, - extranonce_prefix: prefix.into(), - }, - ), - ) - .into())); - } - Err(e) => { - messages_results.push(Err(JDCError::ChannelSv2( - ChannelSv2Error::StandardChannelServerSide(e), - ))); - } - }, - Err(e) => { - messages_results.push(Err( - JDCError::ExtranoncePrefixFactoryError(e), - )); - } - } - } - for (channel_id, extended_channel) in - data.extended_channels.iter_mut() - { - match channel_manager_data - .extranonce_prefix_factory_extended - .next_prefix_extended( - extended_channel.get_rollable_extranonce_size() - as usize, - ) { - Ok(prefix) => match extended_channel - .set_extranonce_prefix(prefix.clone().to_vec()) - { - Ok(_) => { - messages_results.push(Ok(( - *downstream_id, - Mining::SetExtranoncePrefix( - SetExtranoncePrefix { - channel_id: *channel_id, - extranonce_prefix: prefix.into(), - }, - ), - ) - .into())); - } - Err(e) => { - messages_results.push(Err(JDCError::ChannelSv2( - ChannelSv2Error::ExtendedChannelServerSide(e), - ))); - } - }, - Err(e) => { - messages_results.push(Err( - JDCError::ExtranoncePrefixFactoryError(e), - )); - } - } - } - }); - } - } - Ok(messages_results) - })?; - - for message in messages_results.into_iter().flatten() { - message.forward(&self.channel_manager_channel).await; - } - Ok(()) - } - - // Handles `SubmitSharesSuccess` messages from upstream. - async fn handle_submit_shares_success( - &mut self, - _server_id: Option, - msg: SubmitSharesSuccess, - ) -> Result<(), Self::Error> { - info!("Received: {} βœ…", msg); - Ok(()) - } - - // Handles `SubmitSharesError` messages from upstream. - async fn handle_submit_shares_error( - &mut self, - _server_id: Option, - msg: SubmitSharesError<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {} ❌", msg); - Ok(()) - } - - // Handles `NewMiningJob` messages from upstream. JDC ignores it. - async fn handle_new_mining_job( - &mut self, - _server_id: Option, - msg: NewMiningJob<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", msg); - warn!("⚠️ JDC does not expect jobs from the upstream server β€” ignoring."); - Ok(()) - } - - // Handles `NewExtendedMiningJob` messages from upstream. JDC ignores it. - async fn handle_new_extended_mining_job( - &mut self, - _server_id: Option, - msg: NewExtendedMiningJob<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", msg); - warn!("⚠️ JDC does not expect jobs from the upstream server β€” ignoring."); - Ok(()) - } - - // Handles `SetNewPrevHash` messages from upstream. JDC ignores it. - async fn handle_set_new_prev_hash( - &mut self, - _server_id: Option, - msg: SetNewPrevHash<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", msg); - warn!("⚠️ JDC does not expect prevhash updates from the upstream server β€” ignoring."); - Ok(()) - } - - // Handles `SetCustomMiningJobSuccess` messages from upstream. - // - // On success: - // - Updates the `job_id_to_template_id` mapping. - // - Updates the channel state accordingly. - // - Removes the associated `last_declare_job`, completing its lifecycle. - async fn handle_set_custom_mining_job_success( - &mut self, - _server_id: Option, - msg: SetCustomMiningJobSuccess, - ) -> Result<(), Self::Error> { - info!("Received: {} βœ…", msg); - self.channel_manager_data.super_safe_lock(|data| { - if let Some(last_declare_job) = data.last_declare_job_store.remove(&msg.request_id) { - let template_id = last_declare_job.template.template_id; - data.last_declare_job_store - .retain(|_, job| job.template.template_id != template_id); - - data.template_id_to_upstream_job_id - .insert(last_declare_job.template.template_id, msg.job_id as u64); - debug!(job_id = msg.job_id, "Mapped custom job into template store"); - if let (Some(upstream_channel), Some(set_custom_job)) = ( - data.upstream_channel.as_mut(), - last_declare_job.set_custom_mining_job, - ) { - if let Err(e) = - upstream_channel.on_set_custom_mining_job_success(set_custom_job, msg) - { - error!("Custom mining job success validation failed: {e:#?}"); - } - } - } else { - warn!( - request_id = msg.request_id, - "No matching declare job found for custom job success" - ); - } - }); - Ok(()) - } - - // Handles a `SetCustomMiningJobError` from upstream. - // - // Receiving this is treated as malicious behavior, so we immediately - // trigger the fallback mechanism. - async fn handle_set_custom_mining_job_error( - &mut self, - _server_id: Option, - msg: SetCustomMiningJobError<'_>, - ) -> Result<(), Self::Error> { - warn!("⚠️ Received: {} ❌", msg); - warn!("⚠️ Starting fallback mechanism."); - _ = self - .channel_manager_channel - .status_sender - .send(Status { - state: State::UpstreamShutdownFallback(JDCError::Shutdown), - }) - .await; - Ok(()) - } - - // Handles a `SetTarget` message from upstream. - // - // Updates the corresponding upstream channel's target state. - async fn handle_set_target( - &mut self, - _server_id: Option, - msg: SetTarget<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - self.channel_manager_data.super_safe_lock(|data| { - if let Some(ref mut upstream) = data.upstream_channel { - upstream.set_target(Target::from_le_bytes( - msg.maximum_target.clone().as_ref().try_into().unwrap(), - )); - } - }); - Ok(()) - } - - // Handles `SetGroupChannel` messages from upstream. JDC ignores it. - async fn handle_set_group_channel( - &mut self, - _server_id: Option, - msg: SetGroupChannel<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", msg); - warn!("⚠️ JDC does not expect group channel updates from the upstream server β€” ignoring."); - Ok(()) - } -} diff --git a/roles/jd-client/src/lib/config.rs b/roles/jd-client/src/lib/config.rs deleted file mode 100644 index de72efb5be..0000000000 --- a/roles/jd-client/src/lib/config.rs +++ /dev/null @@ -1,294 +0,0 @@ -use serde::Deserialize; -use std::{ - net::SocketAddr, - path::{Path, PathBuf}, - str::FromStr, -}; -use stratum_apps::{ - config_helpers::CoinbaseRewardScript, - key_utils::{Secp256k1PublicKey, Secp256k1SecretKey}, - stratum_core::bitcoin::{Amount, TxOut}, -}; - -#[derive(Debug, Deserialize, Clone)] -pub struct JobDeclaratorClientConfig { - // The address on which the JDC will listen for incoming connections when acting as an - // upstream. - listening_address: SocketAddr, - // The maximum supported SV2 protocol version. - max_supported_version: u16, - // The minimum supported SV2 protocol version. - min_supported_version: u16, - // The public key used by this JDC for noise encryption. - authority_public_key: Secp256k1PublicKey, - /// The secret key used by this JDC for noise encryption. - authority_secret_key: Secp256k1SecretKey, - /// The validity period (in seconds) for the certificate used in noise. - cert_validity_sec: u64, - /// The address of the TP that this JDC will connect to. - tp_address: String, - /// The expected public key of the TP's authority for authentication (optional). - tp_authority_public_key: Option, - /// A list of upstream Job Declarator Servers (JDS) that this JDC can connect to. - /// JDC can fallover between these upstreams. - upstreams: Vec, - /// This is only used during solo-mining. - pub coinbase_reward_script: CoinbaseRewardScript, - /// A signature string identifying this JDC instance. - jdc_signature: String, - /// The path to the log file where JDC will write logs. - log_file: Option, - /// User Identity - user_identity: String, - /// Shares per minute - shares_per_minute: f64, - /// share batch size - share_batch_size: u64, - /// JDC mode: FullTemplate or CoinbaseOnly - #[serde(deserialize_with = "deserialize_jdc_mode", default)] - pub mode: ConfigJDCMode, -} - -impl JobDeclaratorClientConfig { - #[allow(clippy::too_many_arguments)] - pub fn new( - listening_address: SocketAddr, - protocol_config: ProtocolConfig, - user_identity: String, - shares_per_minute: f64, - share_batch_size: u64, - pool_config: PoolConfig, - tp_config: TPConfig, - upstreams: Vec, - jdc_signature: String, - jdc_mode: Option, - ) -> Self { - Self { - listening_address, - max_supported_version: protocol_config.max_supported_version, - min_supported_version: protocol_config.min_supported_version, - authority_public_key: pool_config.authority_public_key, - authority_secret_key: pool_config.authority_secret_key, - cert_validity_sec: tp_config.cert_validity_sec, - tp_address: tp_config.tp_address, - tp_authority_public_key: tp_config.tp_authority_public_key, - upstreams, - coinbase_reward_script: protocol_config.coinbase_reward_script, - jdc_signature, - log_file: None, - user_identity, - shares_per_minute, - share_batch_size, - mode: jdc_mode - .map(|s| s.parse::().unwrap_or_default()) - .unwrap_or_default(), - } - } - - /// Returns the listening address of the Job Declartor Client. - pub fn listening_address(&self) -> &SocketAddr { - &self.listening_address - } - - /// Returns the list of upstreams. - /// - /// JDC will try to fallback to the next upstream in case of failure of the current one. - pub fn upstreams(&self) -> &Vec { - &self.upstreams - } - - /// Returns the authority public key. - pub fn authority_public_key(&self) -> &Secp256k1PublicKey { - &self.authority_public_key - } - - /// Returns the authority secret key. - pub fn authority_secret_key(&self) -> &Secp256k1SecretKey { - &self.authority_secret_key - } - - /// Returns the certificate validity in seconds. - pub fn cert_validity_sec(&self) -> u64 { - self.cert_validity_sec - } - - /// Returns Template Provider address. - pub fn tp_address(&self) -> &str { - &self.tp_address - } - - /// Returns Template Provider authority public key. - pub fn tp_authority_public_key(&self) -> Option<&Secp256k1PublicKey> { - self.tp_authority_public_key.as_ref() - } - - /// Returns the minimum supported version. - pub fn min_supported_version(&self) -> u16 { - self.min_supported_version - } - - /// Returns the maximum supported version. - pub fn max_supported_version(&self) -> u16 { - self.max_supported_version - } - - /// Returns the JDC signature. - pub fn jdc_signature(&self) -> &str { - &self.jdc_signature - } - - pub fn get_txout(&self) -> TxOut { - TxOut { - value: Amount::from_sat(0), - script_pubkey: self.coinbase_reward_script.script_pubkey().to_owned(), - } - } - - pub fn log_file(&self) -> Option<&Path> { - self.log_file.as_deref() - } - pub fn set_log_file(&mut self, log_file: Option) { - if let Some(log_file) = log_file { - self.log_file = Some(log_file); - } - } - pub fn user_identity(&self) -> &str { - &self.user_identity - } - - pub fn shares_per_minute(&self) -> f64 { - self.shares_per_minute - } - - pub fn share_batch_size(&self) -> u64 { - self.share_batch_size - } -} - -#[derive(Debug, Deserialize, Clone, Default)] -#[serde(rename_all = "UPPERCASE")] -pub enum ConfigJDCMode { - #[default] - FullTemplate, - CoinbaseOnly, -} - -impl std::str::FromStr for ConfigJDCMode { - type Err = (); - - fn from_str(s: &str) -> Result { - match s.to_uppercase().as_str() { - "COINBASEONLY" => Ok(ConfigJDCMode::CoinbaseOnly), - _ => Ok(ConfigJDCMode::FullTemplate), - } - } -} - -fn deserialize_jdc_mode<'de, D>(deserializer: D) -> Result -where - D: serde::Deserializer<'de>, -{ - let s: String = String::deserialize(deserializer)?; - Ok(ConfigJDCMode::from_str(&s).unwrap_or_default()) -} - -/// Represents pool specific encryption keys. -pub struct PoolConfig { - authority_public_key: Secp256k1PublicKey, - authority_secret_key: Secp256k1SecretKey, -} - -impl PoolConfig { - /// Creates a new instance of [`PoolConfig`]. - pub fn new( - authority_public_key: Secp256k1PublicKey, - authority_secret_key: Secp256k1SecretKey, - ) -> Self { - Self { - authority_public_key, - authority_secret_key, - } - } -} - -/// Represent template provider config for JDC to connect. -pub struct TPConfig { - // The validity period (in seconds) expected for the Template Provider's certificate. - cert_validity_sec: u64, - // The network address of the Template Provider. - tp_address: String, - // The expected public key of the Template Provider's authority (optional). - tp_authority_public_key: Option, -} - -impl TPConfig { - // Creates a new instance of [`TPConfig`]. - pub fn new( - cert_validity_sec: u64, - tp_address: String, - tp_authority_public_key: Option, - ) -> Self { - Self { - cert_validity_sec, - tp_address, - tp_authority_public_key, - } - } -} - -/// Represent protocol versioning the JDC supports. -pub struct ProtocolConfig { - // The maximum supported SV2 protocol version. - max_supported_version: u16, - // The minimum supported SV2 protocol version. - min_supported_version: u16, - // A coinbase output to be included in block templates. - coinbase_reward_script: CoinbaseRewardScript, -} - -impl ProtocolConfig { - // Creates a new instance of [`ProtocolConfig`]. - pub fn new( - max_supported_version: u16, - min_supported_version: u16, - coinbase_reward_script: CoinbaseRewardScript, - ) -> Self { - Self { - max_supported_version, - min_supported_version, - coinbase_reward_script, - } - } -} - -/// Represents necessary fields required to connect to JDS -#[derive(Debug, Deserialize, Clone)] -pub struct Upstream { - // The public key of the upstream pool's authority for authentication. - pub authority_pubkey: Secp256k1PublicKey, - // The address of the upstream pool's main server. - pub pool_address: String, - pub pool_port: u16, - // The network address of the JDS. - pub jds_address: String, - pub jds_port: u16, -} - -impl Upstream { - /// Creates a new instance of [`Upstream`]. - pub fn new( - authority_pubkey: Secp256k1PublicKey, - pool_address: String, - pool_port: u16, - jds_address: String, - jds_port: u16, - ) -> Self { - Self { - authority_pubkey, - pool_address, - pool_port, - jds_address, - jds_port, - } - } -} diff --git a/roles/jd-client/src/lib/downstream/message_handler.rs b/roles/jd-client/src/lib/downstream/message_handler.rs deleted file mode 100644 index 552fec7601..0000000000 --- a/roles/jd-client/src/lib/downstream/message_handler.rs +++ /dev/null @@ -1,89 +0,0 @@ -use crate::{downstream::Downstream, error::JDCError, utils::StdFrame}; -use std::convert::TryInto; -use stratum_apps::stratum_core::{ - common_messages_sv2::{ - has_requires_std_job, has_work_selection, Protocol, SetupConnection, SetupConnectionError, - SetupConnectionSuccess, - }, - handlers_sv2::HandleCommonMessagesFromClientAsync, - parsers_sv2::AnyMessage, -}; -use tracing::info; - -impl HandleCommonMessagesFromClientAsync for Downstream { - type Error = JDCError; - // Handles the initial [`SetupConnection`] message from a downstream client. - // - // This method validates that the connection request is compatible with the - // supported mining protocol and feature set. The flow is: - // - // 1. Protocol validation - // - Only the `MiningProtocol` is supported. - // - If the client requests another protocol, the connection is rejected with a - // [`SetupConnectionError`] (`unsupported-protocol`). - // - // 2. Feature flag validation - // - Work selection (`work_selection`) is not allowed. - // - If requested, the connection is rejected with a [`SetupConnectionError`] - // (`unsupported-feature-flags`). - // - // 3. Standard job requirement - // - If the downstream sets the `requires_standard_job` flag, it is recorded in - // [`DownstreamData::require_std_job`]. - // - // 4. Successful setup - // - If all validations pass, a [`SetupConnectionSuccess`] message is - async fn handle_setup_connection( - &mut self, - _client_id: Option, - msg: SetupConnection<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - - if msg.protocol != Protocol::MiningProtocol { - info!("Rejecting connection: SetupConnection asking for other protocols than mining protocol."); - let response = SetupConnectionError { - flags: 0, - error_code: "unsupported-protocol" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - let frame: StdFrame = AnyMessage::Common(response.into_static().into()).try_into()?; - _ = self.downstream_channel.downstream_sender.send(frame).await; - - return Err(JDCError::Shutdown); - } - - if has_work_selection(msg.flags) { - info!("Rejecting: work selection not allowed."); - let response = SetupConnectionError { - flags: 0b0000_0000_0000_0010, - error_code: "unsupported-feature-flags" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - let frame: StdFrame = AnyMessage::Common(response.into_static().into()) - .try_into() - .unwrap(); - _ = self.downstream_channel.downstream_sender.send(frame).await; - - return Err(JDCError::Shutdown); - } - - if has_requires_std_job(msg.flags) { - self.downstream_data - .super_safe_lock(|data| data.require_std_job = true); - } - let response = SetupConnectionSuccess { - used_version: 2, - flags: msg.flags, - }; - let frame: StdFrame = AnyMessage::Common(response.into_static().into()).try_into()?; - - _ = self.downstream_channel.downstream_sender.send(frame).await; - - Ok(()) - } -} diff --git a/roles/jd-client/src/lib/downstream/mod.rs b/roles/jd-client/src/lib/downstream/mod.rs deleted file mode 100644 index a9a37d8014..0000000000 --- a/roles/jd-client/src/lib/downstream/mod.rs +++ /dev/null @@ -1,284 +0,0 @@ -use std::{collections::HashMap, sync::Arc}; - -use async_channel::{unbounded, Receiver, Sender}; -use stratum_apps::{ - custom_mutex::Mutex, - network_helpers::noise_stream::NoiseTcpStream, - stratum_core::{ - channels_sv2::server::{ - extended::ExtendedChannel, - group::GroupChannel, - jobs::{extended::ExtendedJob, job_store::DefaultJobStore, standard::StandardJob}, - standard::StandardChannel, - }, - common_messages_sv2::MESSAGE_TYPE_SETUP_CONNECTION, - handlers_sv2::HandleCommonMessagesFromClientAsync, - parsers_sv2::{AnyMessage, IsSv2Message}, - }, -}; - -use tokio::sync::broadcast; -use tracing::{debug, error, warn}; - -use crate::{ - error::JDCError, - status::{handle_error, Status, StatusSender}, - task_manager::TaskManager, - utils::{ - protocol_message_type, spawn_io_tasks, Message, MessageType, SV2Frame, ShutdownMessage, - StdFrame, - }, -}; - -mod message_handler; - -/// Holds state related to a downstream connection's mining channels. -/// -/// This includes: -/// - Whether the downstream requires a standard job (`require_std_job`). -/// - An optional [`GroupChannel`] if group channeling is used. -/// - Active [`ExtendedChannel`]s keyed by channel ID. -/// - Active [`StandardChannel`]s keyed by channel ID. -pub struct DownstreamData { - pub require_std_job: bool, - pub group_channels: Option>>>, - pub extended_channels: - HashMap>>>, - pub standard_channels: - HashMap>>>, -} - -/// Communication layer for a downstream connection. -/// -/// Provides the messaging primitives for interacting with the -/// channel manager and the downstream peer: -/// - `channel_manager_sender`: sends frames to the channel manager. -/// - `channel_manager_receiver`: receives messages from the channel manager. -/// - `downstream_sender`: sends frames to the downstream. -/// - `downstream_receiver`: receives frames from the downstream. -#[derive(Clone)] -pub struct DownstreamChannel { - channel_manager_sender: Sender<(u32, SV2Frame)>, - channel_manager_receiver: broadcast::Sender<(u32, Message)>, - downstream_sender: Sender, - downstream_receiver: Receiver, -} - -/// Represents a downstream client connected to this node. -#[derive(Clone)] -pub struct Downstream { - pub downstream_data: Arc>, - downstream_channel: DownstreamChannel, - pub downstream_id: u32, -} - -impl Downstream { - /// Creates a new [`Downstream`] instance and spawns the necessary I/O tasks. - pub fn new( - downstream_id: u32, - channel_manager_sender: Sender<(u32, SV2Frame)>, - channel_manager_receiver: broadcast::Sender<(u32, Message)>, - noise_stream: NoiseTcpStream, - notify_shutdown: broadcast::Sender, - task_manager: Arc, - status_sender: Sender, - ) -> Self { - let (noise_stream_reader, noise_stream_writer) = noise_stream.into_split(); - let status_sender = StatusSender::Downstream { - downstream_id, - tx: status_sender, - }; - let (inbound_tx, inbound_rx) = unbounded::(); - let (outbound_tx, outbound_rx) = unbounded::(); - spawn_io_tasks( - task_manager, - noise_stream_reader, - noise_stream_writer, - outbound_rx, - inbound_tx, - notify_shutdown, - status_sender, - ); - - let downstream_channel = DownstreamChannel { - channel_manager_receiver, - channel_manager_sender, - downstream_sender: outbound_tx, - downstream_receiver: inbound_rx, - }; - let downstream_data = Arc::new(Mutex::new(DownstreamData { - require_std_job: false, - extended_channels: HashMap::new(), - standard_channels: HashMap::new(), - group_channels: None, - })); - Downstream { - downstream_channel, - downstream_data, - downstream_id, - } - } - - /// Starts the downstream loop. - /// - /// Responsibilities: - /// - Performs the initial `SetupConnection` handshake with the downstream. - /// - Forwards mining-related messages to the channel manager. - /// - Forwards channel manager messages back to the downstream peer. - pub async fn start( - mut self, - notify_shutdown: broadcast::Sender, - status_sender: Sender, - task_manager: Arc, - ) { - let status_sender = StatusSender::Downstream { - downstream_id: self.downstream_id, - tx: status_sender, - }; - - let mut shutdown_rx = notify_shutdown.subscribe(); - - // Setup initial connection - if let Err(e) = self.setup_connection_with_downstream().await { - error!(?e, "Failed to set up downstream connection"); - handle_error(&status_sender, e).await; - return; - } - - let mut receiver = self.downstream_channel.channel_manager_receiver.subscribe(); - task_manager.spawn(async move { - loop { - let self_clone_1 = self.clone(); - let downstream_id = self_clone_1.downstream_id; - let self_clone_2 = self.clone(); - tokio::select! { - message = shutdown_rx.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - debug!("Downstream {downstream_id}: Received global shutdown"); - break; - } - Ok(ShutdownMessage::DownstreamShutdown(id)) if downstream_id == id => { - debug!("Downstream {downstream_id}: Received downstream {id} shutdown"); - break; - } - Ok(ShutdownMessage::JobDeclaratorShutdownFallback(_)) => { - debug!("Downstream {downstream_id}: Received job declaratorShutdown shutdown"); - break; - } - Ok(ShutdownMessage::UpstreamShutdownFallback(_)) => { - debug!("Downstream {downstream_id}: Received job Upstream shutdown"); - break; - } - _ => {} - } - } - res = self_clone_1.handle_downstream_message() => { - if let Err(e) = res { - error!(?e, "Error handling downstream message for {downstream_id}"); - handle_error(&status_sender, e).await; - break; - } - } - res = self_clone_2.handle_channel_manager_message(&mut receiver) => { - if let Err(e) = res { - error!(?e, "Error handling channel manager message for {downstream_id}"); - handle_error(&status_sender, e).await; - break; - } - } - - } - } - warn!("Downstream: unified message loop exited."); - }); - } - - // Performs the initial handshake with a downstream peer. - async fn setup_connection_with_downstream(&mut self) -> Result<(), JDCError> { - let mut frame = self.downstream_channel.downstream_receiver.recv().await?; - - let Some(message_type) = frame.get_header().map(|m| m.msg_type()) else { - return Err(JDCError::UnexpectedMessage(0)); - }; - if message_type == MESSAGE_TYPE_SETUP_CONNECTION { - self.handle_common_message_frame_from_client(None, message_type, frame.payload()) - .await?; - return Ok(()); - } - Err(JDCError::UnexpectedMessage(message_type)) - } - - // Handles messages sent from the channel manager to this downstream. - async fn handle_channel_manager_message( - self, - receiver: &mut broadcast::Receiver<(u32, AnyMessage<'static>)>, - ) -> Result<(), JDCError> { - let (downstream_id, frame) = match receiver.recv().await { - Ok(msg) => msg, - Err(e) => { - warn!(?e, "Broadcast receive failed"); - return Ok(()); - } - }; - - if downstream_id != self.downstream_id { - debug!( - ?downstream_id, - "Message ignored for non-matching downstream" - ); - return Ok(()); - } - - let message_type = frame.message_type(); - let std_frame = match StdFrame::from_message(frame, message_type, 0, true) { - Some(f) => f, - None => { - debug!("Invalid frame conversion; skipping message"); - return Ok(()); - } - }; - - self.downstream_channel - .downstream_sender - .send(std_frame) - .await - .map_err(|e| { - error!(?e, "Downstream send failed"); - JDCError::CodecNoise( - stratum_apps::stratum_core::noise_sv2::Error::ExpectedIncomingHandshakeMessage, - ) - })?; - - Ok(()) - } - - // Handles incoming messages from the downstream peer. - async fn handle_downstream_message(self) -> Result<(), JDCError> { - let sv2_frame = self.downstream_channel.downstream_receiver.recv().await?; - - let Some(message_type) = sv2_frame.get_header().map(|h| h.msg_type()) else { - return Ok(()); - }; - - if protocol_message_type(message_type) != MessageType::Mining { - warn!( - ?message_type, - "Received unsupported message type from downstream." - ); - return Ok(()); - } - - debug!("Received mining SV2 frame from downstream."); - self.downstream_channel - .channel_manager_sender - .send((self.downstream_id, sv2_frame)) - .await - .map_err(|e| { - error!(error=?e, "Failed to send mining message to channel manager."); - JDCError::ChannelErrorSender - })?; - - Ok(()) - } -} diff --git a/roles/jd-client/src/lib/error.rs b/roles/jd-client/src/lib/error.rs deleted file mode 100644 index f9a10997c2..0000000000 --- a/roles/jd-client/src/lib/error.rs +++ /dev/null @@ -1,336 +0,0 @@ -//! ## Error Module -//! -//! Defines [`Error`], the central error enum used throughout the Job Declarator Client (JDC). -//! -//! It unifies errors from: -//! - I/O operations -//! - Channels (send/recv) -//! - SV2 stack: Binary, Codec, Noise, Framing, RolesLogic -//! - Locking logic (PoisonError) -//! - Domain-specific issues -//! -//! This module ensures that all errors can be passed around consistently, including across async -//! boundaries. -use ext_config::ConfigError; -use std::fmt; -use stratum_apps::{ - network_helpers, - stratum_core::{ - binary_sv2, bitcoin, - channels_sv2::{ - client::error::ExtendedChannelError as ExtendedChannelClientError, - server::error::{ - ExtendedChannelError as ExtendedChannelServerError, GroupChannelError, - StandardChannelError, - }, - }, - framing_sv2, - handlers_sv2::HandlerErrorType, - mining_sv2::ExtendedExtranonceError, - noise_sv2, - parsers_sv2::ParserError, - }, -}; -use tokio::{sync::broadcast, time::error::Elapsed}; - -#[derive(Debug)] -pub enum ChannelSv2Error { - ExtendedChannelClientSide(ExtendedChannelClientError), - ExtendedChannelServerSide(ExtendedChannelServerError), - StandardChannelServerSide(StandardChannelError), - GroupChannelServerSide(GroupChannelError), -} - -#[derive(Debug)] -pub enum JDCError { - #[allow(dead_code)] - VecToSlice32(Vec), - /// Errors on bad CLI argument input. - BadCliArgs, - /// Errors on bad `config` TOML deserialize. - BadConfigDeserialize(ConfigError), - /// Errors from `binary_sv2` crate. - BinarySv2(binary_sv2::Error), - /// Errors on bad noise handshake. - CodecNoise(noise_sv2::Error), - /// Errors from `framing_sv2` crate. - FramingSv2(framing_sv2::Error), - /// Errors on bad `TcpStream` connection. - Io(std::io::Error), - /// Errors on bad `String` to `int` conversion. - ParseInt(std::num::ParseIntError), - #[allow(dead_code)] - SubprotocolMining(String), - // Locking Errors - PoisonLock, - TokioChannelErrorRecv(tokio::sync::broadcast::error::RecvError), - Infallible(std::convert::Infallible), - Parser(ParserError), - /// Channel receiver error - ChannelErrorReceiver(async_channel::RecvError), - /// Channel sender error - ChannelErrorSender, - /// Broadcast channel receiver error - BroadcastChannelErrorReceiver(broadcast::error::RecvError), - Shutdown, - NetworkHelpersError(network_helpers::Error), - UnexpectedMessage(u8), - InvalidUserIdentity(String), - BitcoinEncodeError(bitcoin::consensus::encode::Error), - InvalidSocketAddress(String), - Timeout, - LastDeclareJobNotFound(u32), - ActiveJobNotFound(u32), - TokenNotFound, - TemplateNotFound(u64), - DownstreamNotFoundWithChannelId(u32), - DownstreamNotFound(u32), - DownstreamIdNotFound, - FutureTemplateNotPresent, - LastNewPrevhashNotFound, - VardiffNotFound(u32), - TxDataError, - FrameConversionError, - FailedToCreateCustomJob, - AllocateMiningJobTokenSuccessCoinbaseOutputsError, - ChannelManagerHasBadCoinbaseOutputs, - DeclaredJobHasBadCoinbaseOutputs, - ExtranonceSizeTooLarge, - FailedToCreateGroupChannel(GroupChannelError), - ///Channel Errors - ChannelSv2(ChannelSv2Error), - ExtranoncePrefixFactoryError(ExtendedExtranonceError), -} - -impl std::error::Error for JDCError {} - -impl fmt::Display for JDCError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use JDCError::*; - match self { - BadCliArgs => write!(f, "Bad CLI arg input"), - BadConfigDeserialize(ref e) => write!(f, "Bad `config` TOML deserialize: `{e:?}`"), - BinarySv2(ref e) => write!(f, "Binary SV2 error: `{e:?}`"), - CodecNoise(ref e) => write!(f, "Noise error: `{e:?}"), - FramingSv2(ref e) => write!(f, "Framing SV2 error: `{e:?}`"), - Io(ref e) => write!(f, "I/O error: `{e:?}"), - ParseInt(ref e) => write!(f, "Bad convert from `String` to `int`: `{e:?}`"), - SubprotocolMining(ref e) => write!(f, "Subprotocol Mining Error: `{e:?}`"), - PoisonLock => write!(f, "Poison Lock error"), - ChannelErrorReceiver(ref e) => write!(f, "Channel receive error: `{e:?}`"), - TokioChannelErrorRecv(ref e) => write!(f, "Channel receive error: `{e:?}`"), - VecToSlice32(ref e) => write!(f, "Standard Error: `{e:?}`"), - Infallible(ref e) => write!(f, "Infallible Error:`{e:?}`"), - Parser(ref e) => write!(f, "Parser error: `{e:?}`"), - BroadcastChannelErrorReceiver(ref e) => { - write!(f, "Broadcast channel receive error: {e:?}") - } - ChannelErrorSender => write!(f, "Sender error"), - Shutdown => write!(f, "Shutdown"), - NetworkHelpersError(ref e) => write!(f, "Network error: {e:?}"), - UnexpectedMessage(message_type) => write!(f, "Unexpected Message: {message_type}"), - InvalidUserIdentity(_) => write!(f, "User ID is invalid"), - BitcoinEncodeError(_) => write!(f, "Error generated during encoding"), - InvalidSocketAddress(ref s) => write!(f, "Invalid socket address: {s}"), - Timeout => write!(f, "Time out error"), - LastDeclareJobNotFound(request_id) => { - write!(f, "last declare job not found for request id: {request_id}") - } - ActiveJobNotFound(request_id) => { - write!(f, "Active Job not found for request_id: {request_id}") - } - TokenNotFound => { - write!(f, "Token Not found") - } - TemplateNotFound(template_id) => { - write!(f, "Template not found, template_id: {template_id}") - } - DownstreamNotFoundWithChannelId(channel_id) => { - write!(f, "Downstream not found with channel id: {channel_id}") - } - DownstreamNotFound(downstream_id) => { - write!( - f, - "Downstream not found with downstream_id: {downstream_id}" - ) - } - DownstreamIdNotFound => { - write!(f, "Downstream id not found") - } - FutureTemplateNotPresent => { - write!(f, "Future template not present") - } - LastNewPrevhashNotFound => { - write!(f, "Last new prevhash not found") - } - VardiffNotFound(channel_id) => { - write!(f, "Vardiff not found for channel id: {channel_id:?}") - } - TxDataError => { - write!(f, "Transaction data error") - } - FrameConversionError => { - write!(f, "Could not convert message to frame") - } - FailedToCreateCustomJob => { - write!(f, "failed to create custom job") - } - AllocateMiningJobTokenSuccessCoinbaseOutputsError => { - write!( - f, - "AllocateMiningJobToken.Success coinbase outputs are not deserializable" - ) - } - ChannelManagerHasBadCoinbaseOutputs => { - write!(f, "Channel Manager coinbase outputs are not deserializable") - } - DeclaredJobHasBadCoinbaseOutputs => { - write!(f, "Declared job coinbase outputs are not deserializable") - } - ExtranonceSizeTooLarge => { - write!(f, "Extranonce size too large") - } - FailedToCreateGroupChannel(ref e) => { - write!(f, "Failed to create group channel: {e:?}") - } - ExtranoncePrefixFactoryError(e) => { - write!(f, "Failed to create ExtranoncePrefixFactory: {e:?}") - } - ChannelSv2(channel_error) => { - write!(f, "Channel error: {channel_error:?}") - } - } - } -} - -impl JDCError { - fn is_non_critical_variant(&self) -> bool { - matches!( - self, - JDCError::LastNewPrevhashNotFound - | JDCError::FutureTemplateNotPresent - | JDCError::LastDeclareJobNotFound(_) - | JDCError::ActiveJobNotFound(_) - | JDCError::TokenNotFound - | JDCError::TemplateNotFound(_) - | JDCError::DownstreamNotFoundWithChannelId(_) - | JDCError::DownstreamNotFound(_) - | JDCError::DownstreamIdNotFound - | JDCError::VardiffNotFound(_) - | JDCError::TxDataError - | JDCError::FrameConversionError - | JDCError::FailedToCreateCustomJob - ) - } - - /// Adds basic priority to error types: - /// todo: design a better error priority system. - pub fn is_critical(&self) -> bool { - if self.is_non_critical_variant() { - tracing::error!("Non-critical error: {self}"); - return false; - } - - true - } -} - -impl From for JDCError { - fn from(e: ParserError) -> Self { - JDCError::Parser(e) - } -} - -impl From for JDCError { - fn from(e: binary_sv2::Error) -> Self { - JDCError::BinarySv2(e) - } -} - -impl From for JDCError { - fn from(e: noise_sv2::Error) -> Self { - JDCError::CodecNoise(e) - } -} - -impl From for JDCError { - fn from(e: framing_sv2::Error) -> Self { - JDCError::FramingSv2(e) - } -} - -impl From for JDCError { - fn from(e: std::io::Error) -> Self { - JDCError::Io(e) - } -} - -impl From for JDCError { - fn from(e: std::num::ParseIntError) -> Self { - JDCError::ParseInt(e) - } -} - -impl From for JDCError { - fn from(e: ConfigError) -> Self { - JDCError::BadConfigDeserialize(e) - } -} - -impl From for JDCError { - fn from(e: async_channel::RecvError) -> Self { - JDCError::ChannelErrorReceiver(e) - } -} - -impl From for JDCError { - fn from(e: tokio::sync::broadcast::error::RecvError) -> Self { - JDCError::TokioChannelErrorRecv(e) - } -} - -impl From for JDCError { - fn from(value: network_helpers::Error) -> Self { - JDCError::NetworkHelpersError(value) - } -} - -impl From for JDCError { - fn from(value: stratum_apps::stratum_core::bitcoin::consensus::encode::Error) -> Self { - JDCError::BitcoinEncodeError(value) - } -} - -impl From for JDCError { - fn from(_value: Elapsed) -> Self { - Self::Timeout - } -} - -impl HandlerErrorType for JDCError { - fn parse_error(error: ParserError) -> Self { - JDCError::Parser(error) - } - - fn unexpected_message(message_type: u8) -> Self { - JDCError::UnexpectedMessage(message_type) - } -} - -impl From for JDCError { - fn from(value: ExtendedChannelClientError) -> Self { - JDCError::ChannelSv2(ChannelSv2Error::ExtendedChannelClientSide(value)) - } -} - -impl From for JDCError { - fn from(value: ExtendedChannelServerError) -> Self { - JDCError::ChannelSv2(ChannelSv2Error::ExtendedChannelServerSide(value)) - } -} - -impl From for JDCError { - fn from(value: StandardChannelError) -> Self { - JDCError::ChannelSv2(ChannelSv2Error::StandardChannelServerSide(value)) - } -} diff --git a/roles/jd-client/src/lib/jd_mode.rs b/roles/jd-client/src/lib/jd_mode.rs deleted file mode 100644 index 0533afc718..0000000000 --- a/roles/jd-client/src/lib/jd_mode.rs +++ /dev/null @@ -1,61 +0,0 @@ -//! Global configuration for Job Declarator (JD) operating mode. -//! -//! This module defines different operating modes for the Job Declarator -//! and provides atomic accessors for setting and retrieving the current mode. -//! -//! Modes are stored in a global [`AtomicU8`] to allow safe concurrent access -//! across threads. -use std::sync::atomic::{AtomicU8, Ordering}; - -/// Operating modes for the Job Declarator. -#[repr(u8)] -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum JdMode { - /// Runs in Coinbase only mode. - CoinbaseOnly = 0, - /// Runs in Full template mode, - FullTemplate = 1, - /// Runs in solo mining mode, - SoloMining = 2, -} - -impl From for JdMode { - fn from(val: u8) -> Self { - match val { - 0 => JdMode::CoinbaseOnly, - 1 => JdMode::FullTemplate, - 2 => JdMode::SoloMining, - _ => JdMode::SoloMining, - } - } -} - -impl From for JdMode { - fn from(val: u32) -> Self { - match val { - 0 => JdMode::CoinbaseOnly, - 1 => JdMode::FullTemplate, - 2 => JdMode::SoloMining, - _ => JdMode::SoloMining, - } - } -} - -impl From for u8 { - fn from(mode: JdMode) -> Self { - mode as u8 - } -} - -/// Global atomic variable storing the current JD mode. -pub static JD_MODE: AtomicU8 = AtomicU8::new(JdMode::SoloMining as u8); - -/// Updates the global JD mode. -pub fn set_jd_mode(mode: JdMode) { - JD_MODE.store(mode as u8, Ordering::SeqCst); -} - -/// Returns the current global JD mode. -pub fn get_jd_mode() -> JdMode { - JD_MODE.load(Ordering::SeqCst).into() -} diff --git a/roles/jd-client/src/lib/job_declarator/message_handler.rs b/roles/jd-client/src/lib/job_declarator/message_handler.rs deleted file mode 100644 index 932e0645aa..0000000000 --- a/roles/jd-client/src/lib/job_declarator/message_handler.rs +++ /dev/null @@ -1,65 +0,0 @@ -use stratum_apps::stratum_core::{ - common_messages_sv2::{ - ChannelEndpointChanged, Reconnect, SetupConnectionError, SetupConnectionSuccess, - }, - handlers_sv2::HandleCommonMessagesFromServerAsync, -}; -use tracing::{info, warn}; - -use crate::{ - error::JDCError, - jd_mode::{set_jd_mode, JdMode}, - job_declarator::JobDeclarator, -}; - -impl HandleCommonMessagesFromServerAsync for JobDeclarator { - type Error = JDCError; - - async fn handle_setup_connection_success( - &mut self, - _server_id: Option, - msg: SetupConnectionSuccess, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - - let jd_mode = match msg.flags { - 0 => JdMode::CoinbaseOnly, - 1 => JdMode::FullTemplate, - _ => JdMode::SoloMining, - }; - set_jd_mode(jd_mode); - - if jd_mode == JdMode::SoloMining { - return Err(JDCError::Shutdown); - } - - Ok(()) - } - - async fn handle_channel_endpoint_changed( - &mut self, - _server_id: Option, - msg: ChannelEndpointChanged, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - Ok(()) - } - - async fn handle_reconnect( - &mut self, - _server_id: Option, - msg: Reconnect<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - Ok(()) - } - - async fn handle_setup_connection_error( - &mut self, - _server_id: Option, - msg: SetupConnectionError<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", msg); - Err(JDCError::Shutdown) - } -} diff --git a/roles/jd-client/src/lib/job_declarator/mod.rs b/roles/jd-client/src/lib/job_declarator/mod.rs deleted file mode 100644 index 3fc1fb6fea..0000000000 --- a/roles/jd-client/src/lib/job_declarator/mod.rs +++ /dev/null @@ -1,313 +0,0 @@ -use std::{net::SocketAddr, sync::Arc}; - -use async_channel::{unbounded, Receiver, Sender}; -use stratum_apps::{ - custom_mutex::Mutex, - key_utils::Secp256k1PublicKey, - network_helpers::noise_stream::NoiseTcpStream, - stratum_core::{ - codec_sv2::HandshakeRole, framing_sv2, handlers_sv2::HandleCommonMessagesFromServerAsync, - noise_sv2::Initiator, - }, -}; -use tokio::{ - net::TcpStream, - sync::{broadcast, mpsc}, -}; -use tracing::{debug, error, info, warn}; - -use crate::{ - config::ConfigJDCMode, - error::JDCError, - status::{handle_error, Status, StatusSender}, - task_manager::TaskManager, - utils::{ - get_setup_connection_message_jds, protocol_message_type, spawn_io_tasks, Message, - MessageType, SV2Frame, ShutdownMessage, StdFrame, - }, -}; - -mod message_handler; - -/// Shared state for Job Declarator -pub struct JobDeclaratorData; - -/// Holds all channels required for Job Declarator communication. -#[derive(Clone)] -pub struct JobDeclaratorChannel { - channel_manager_sender: Sender, - channel_manager_receiver: Receiver, - jds_sender: Sender, - jds_receiver: Receiver, -} - -/// Manages the lifecycle and communication with a Job Declarator (JDS) -#[allow(warnings)] -#[derive(Clone)] -pub struct JobDeclarator { - /// Internal state - job_declarator_data: Arc>, - /// Messaging channels to/from the channel manager and JD. - job_declarator_channel: JobDeclaratorChannel, - /// Socket address of the Job Declarator server. - socket_address: SocketAddr, - /// Config JDC mode - mode: ConfigJDCMode, -} - -impl JobDeclarator { - /// Creates a new JobDeclarator instance by connecting and performing a Noise handshake. - /// - /// - Establishes TCP connection. - /// - Performs SV2 Noise handshake. - /// - Spawns background IO tasks for reading/writing frames. - pub async fn new( - upstreams: &(SocketAddr, SocketAddr, Secp256k1PublicKey, bool), - channel_manager_sender: Sender, - channel_manager_receiver: Receiver, - notify_shutdown: broadcast::Sender, - mode: ConfigJDCMode, - task_manager: Arc, - status_sender: Sender, - ) -> Result { - let (_, addr, pubkey, _) = upstreams; - info!("Connecting to JD Server at {addr}"); - let stream = tokio::time::timeout( - tokio::time::Duration::from_secs(5), - TcpStream::connect(addr), - ) - .await??; - info!("Connection established with JD Server at {addr} in mode: {mode:?}"); - let initiator = Initiator::from_raw_k(pubkey.into_bytes())?; - let (noise_stream_reader, noise_stream_writer) = - NoiseTcpStream::::new(stream, HandshakeRole::Initiator(initiator)) - .await? - .into_split(); - - let status_sender = StatusSender::JobDeclarator(status_sender); - let (inbound_tx, inbound_rx) = unbounded::(); - let (outbound_tx, outbound_rx) = unbounded::(); - - spawn_io_tasks( - task_manager, - noise_stream_reader, - noise_stream_writer, - outbound_rx, - inbound_tx, - notify_shutdown, - status_sender, - ); - let job_declarator_data = Arc::new(Mutex::new(JobDeclaratorData)); - let job_declarator_channel = JobDeclaratorChannel { - channel_manager_receiver, - channel_manager_sender, - jds_sender: outbound_tx, - jds_receiver: inbound_rx, - }; - Ok(JobDeclarator { - job_declarator_channel, - job_declarator_data, - socket_address: *addr, - mode, - }) - } - - /// Starts the JobDeclarator message loop. - /// - /// - Waits for shutdown signals. - /// - Handles incoming messages from Job Declarator and Channel Manager. - /// - Cleans up on termination. - pub async fn start( - mut self, - notify_shutdown: broadcast::Sender, - shutdown_complete_tx: mpsc::Sender<()>, - status_sender: Sender, - task_manager: Arc, - ) { - let status_sender = StatusSender::JobDeclarator(status_sender); - let mut shutdown_rx = notify_shutdown.subscribe(); - - if let Err(e) = self.setup_connection().await { - handle_error(&status_sender, e).await; - return; - } - - task_manager.spawn( - async move { - loop { - let mut self_clone_1 = self.clone(); - let self_clone_2 = self.clone(); - tokio::select! { - message = shutdown_rx.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - info!("Job Declarator: received shutdown signal."); - break; - } - Ok(ShutdownMessage::JobDeclaratorShutdownFallback(_)) => { - info!("Job Declarator: Received Job declarator shutdown."); - break; - } - Ok(ShutdownMessage::UpstreamShutdownFallback(_)) => { - info!("Job Declarator: Received Upstream shutdown."); - break; - } - Ok(ShutdownMessage::UpstreamShutdown(tx)) => { - info!("Job declarator shutdown requested"); - drop(tx); - break; - } - Ok(ShutdownMessage::JobDeclaratorShutdown(tx)) => { - info!("Job declarator shutdown requested"); - drop(tx); - break; - } - Err(e) => { - warn!(error = ?e, "Job Declarator: shutdown channel closed unexpectedly"); - break; - } - _ => {} - } - } - res = self_clone_1.handle_job_declarator_message() => { - if let Err(e) = res { - error!(error = ?e, "Job Declarator message handling failed"); - handle_error(&status_sender, e).await; - break; - } - } - res = self_clone_2.handle_channel_manager_message() => { - if let Err(e) = res { - error!(error = ?e, "Channel Manager message handling failed"); - handle_error(&status_sender, e).await; - break; - } - }, - } - } - drop(shutdown_complete_tx); - warn!("JobDeclarator: unified message loop exited."); - }, - ); - } - - /// Performs SV2 setup connection handshake with Job Declarator server. - /// - /// - Sends `SetupConnection` message. - /// - Waits for and validates server response. - /// - Completes SV2 protocol handshake. - pub async fn setup_connection(&mut self) -> Result<(), JDCError> { - info!("Sending SetupConnection to JDS at {}", self.socket_address); - - let setup_connection = get_setup_connection_message_jds(&self.socket_address, &self.mode); - let sv2_frame: StdFrame = Message::Common(setup_connection.into()) - .try_into() - .map_err(|e| { - error!(error=?e, "Failed to serialize SetupConnection message."); - JDCError::CodecNoise( - stratum_apps::stratum_core::noise_sv2::Error::ExpectedIncomingHandshakeMessage, - ) - })?; - - if let Err(e) = self.job_declarator_channel.jds_sender.send(sv2_frame).await { - error!(error=?e, "Failed to send SetupConnection frame."); - return Err(JDCError::ChannelErrorSender); - } - debug!("SetupConnection frame sent successfully."); - - let mut incoming = self - .job_declarator_channel - .jds_receiver - .recv() - .await - .map_err(|e| { - error!(error=?e, "No handshake response received from Job declarator."); - JDCError::CodecNoise( - stratum_apps::stratum_core::noise_sv2::Error::ExpectedIncomingHandshakeMessage, - ) - })?; - - let message_type = incoming - .get_header() - .ok_or_else(|| { - error!("Handshake frame missing header."); - framing_sv2::Error::ExpectedHandshakeFrame - })? - .msg_type(); - - debug!(?message_type, "Processing handshake response."); - - self.handle_common_message_frame_from_server(None, message_type, incoming.payload()) - .await?; - - info!("Job declarator: SV2 handshake completed successfully."); - Ok(()) - } - - // Handles messages coming from the Channel Manager and forwards them to the Job Declarator. - async fn handle_channel_manager_message(&self) -> Result<(), JDCError> { - match self - .job_declarator_channel - .channel_manager_receiver - .recv() - .await - { - Ok(msg) => { - debug!("Forwarding message from channel manager to JDS."); - self.job_declarator_channel - .jds_sender - .send(msg) - .await - .map_err(|e| { - error!("Failed to send message to outbound channel: {:?}", e); - JDCError::ChannelErrorSender - })?; - } - Err(e) => { - warn!("Channel manager receiver closed or errored: {:?}", e); - } - } - Ok(()) - } - - // Handles messages received from the Job Declarator. - // - // - Forwards `JobDeclaration` messages to Channel Manager. - // - Processes `Common` messages via handler. - // - Rejects unsupported message types. - async fn handle_job_declarator_message(&mut self) -> Result<(), JDCError> { - let mut sv2_frame = self.job_declarator_channel.jds_receiver.recv().await?; - - debug!("Received SV2 frame from JDS."); - let Some(message_type) = sv2_frame.get_header().map(|m| m.msg_type()) else { - return Ok(()); - }; - - match protocol_message_type(message_type) { - MessageType::Common => { - info!(?message_type, "Handling common message from Upstream."); - self.handle_common_message_frame_from_server( - None, - message_type, - sv2_frame.payload(), - ) - .await?; - } - MessageType::JobDeclaration => { - self.job_declarator_channel - .channel_manager_sender - .send(sv2_frame) - .await - .map_err(|e| { - error!(error=?e, "Failed to send Job declaration message to channel manager."); - JDCError::ChannelErrorSender - })?; - } - _ => { - warn!("Received unsupported message type from Job declarator: {message_type}"); - } - } - - Ok(()) - } -} diff --git a/roles/jd-client/src/lib/mod.rs b/roles/jd-client/src/lib/mod.rs deleted file mode 100644 index c228508f85..0000000000 --- a/roles/jd-client/src/lib/mod.rs +++ /dev/null @@ -1,473 +0,0 @@ -use std::{net::SocketAddr, sync::Arc, time::Duration}; - -use async_channel::{unbounded, Receiver, Sender}; -use stratum_apps::{key_utils::Secp256k1PublicKey, stratum_core::bitcoin::consensus::Encodable}; -use tokio::sync::{broadcast, mpsc}; -use tracing::{debug, info, warn}; - -use crate::{ - channel_manager::ChannelManager, - config::{ConfigJDCMode, JobDeclaratorClientConfig}, - error::JDCError, - jd_mode::{set_jd_mode, JdMode}, - job_declarator::JobDeclarator, - status::{State, Status}, - task_manager::TaskManager, - template_receiver::TemplateReceiver, - upstream::Upstream, - utils::{SV2Frame, ShutdownMessage, UpstreamState}, -}; - -mod channel_manager; -pub mod config; -mod downstream; -pub mod error; -pub mod jd_mode; -mod job_declarator; -mod status; -mod task_manager; -mod template_receiver; -mod upstream; -pub mod utils; - -/// Represent Job Declarator Client -#[derive(Clone)] -pub struct JobDeclaratorClient { - config: JobDeclaratorClientConfig, - notify_shutdown: broadcast::Sender, -} - -impl JobDeclaratorClient { - /// Creates a new [`JobDeclaratorClient`] instance. - pub fn new(config: JobDeclaratorClientConfig) -> Self { - let (notify_shutdown, _) = tokio::sync::broadcast::channel::(100); - Self { - config, - notify_shutdown, - } - } - - /// Starts the Job Declarator Client (JDC) main loop. - pub async fn start(&self) { - info!( - "Job declarator client starting... setting up subsystems, User Identity: {}", - self.config.user_identity() - ); - - let miner_coinbase_outputs = vec![self.config.get_txout()]; - let mut encoded_outputs = vec![]; - - miner_coinbase_outputs - .consensus_encode(&mut encoded_outputs) - .expect("Invalid coinbase output in config"); - - let notify_shutdown = self.notify_shutdown.clone(); - let (shutdown_complete_tx, mut shutdown_complete_rx) = mpsc::channel::<()>(1); - let task_manager = Arc::new(TaskManager::new()); - - let (status_sender, status_receiver) = async_channel::unbounded::(); - - let (channel_manager_to_upstream_sender, channel_manager_to_upstream_receiver) = - unbounded::(); - let (upstream_to_channel_manager_sender, upstream_to_channel_manager_receiver) = - unbounded::(); - - let (channel_manager_to_jd_sender, channel_manager_to_jd_receiver) = - unbounded::(); - let (jd_to_channel_manager_sender, jd_to_channel_manager_receiver) = - unbounded::(); - - let (channel_manager_to_downstream_sender, _channel_manager_to_downstream_receiver) = - broadcast::channel(10); - let (downstream_to_channel_manager_sender, downstream_to_channel_manager_receiver) = - unbounded(); - - let (channel_manager_to_tp_sender, channel_manager_to_tp_receiver) = - unbounded::(); - let (tp_to_channel_manager_sender, tp_to_channel_manager_receiver) = - unbounded::(); - - debug!("Channels initialized."); - - let channel_manager = ChannelManager::new( - self.config.clone(), - channel_manager_to_upstream_sender.clone(), - upstream_to_channel_manager_receiver.clone(), - channel_manager_to_jd_sender.clone(), - jd_to_channel_manager_receiver.clone(), - channel_manager_to_tp_sender.clone(), - tp_to_channel_manager_receiver.clone(), - channel_manager_to_downstream_sender.clone(), - downstream_to_channel_manager_receiver, - status_sender.clone(), - encoded_outputs.clone(), - ) - .await - .unwrap(); - - let channel_manager_clone = channel_manager.clone(); - - // Initialize the template Receiver - let tp_address = self.config.tp_address().to_string(); - let tp_pubkey = self.config.tp_authority_public_key().copied(); - - let template_receiver = TemplateReceiver::new( - tp_address.clone(), - tp_pubkey, - channel_manager_to_tp_receiver, - tp_to_channel_manager_sender, - notify_shutdown.clone(), - task_manager.clone(), - status_sender.clone(), - ) - .await - .unwrap(); - - info!("Template provider setup done"); - - let notify_shutdown_cl = notify_shutdown.clone(); - let status_sender_cl = status_sender.clone(); - let task_manager_cl = task_manager.clone(); - - template_receiver - .start( - tp_address, - notify_shutdown_cl, - status_sender_cl, - task_manager_cl, - encoded_outputs.clone(), - ) - .await; - - let mut upstream_addresses: Vec<_> = self - .config - .upstreams() - .iter() - .map(|u| { - let pool_addr = SocketAddr::new( - u.pool_address.parse().expect("Invalid pool address"), - u.pool_port, - ); - let jd_addr = SocketAddr::new( - u.jds_address.parse().expect("Invalid JD address"), - u.jds_port, - ); - (pool_addr, jd_addr, u.authority_pubkey, false) - }) - .collect(); - - channel_manager - .start( - notify_shutdown.clone(), - status_sender.clone(), - task_manager.clone(), - ) - .await; - - info!("Attempting to initialize upstream..."); - - match self - .initialize_jd( - &mut upstream_addresses, - channel_manager_to_upstream_receiver.clone(), - upstream_to_channel_manager_sender.clone(), - channel_manager_to_jd_receiver.clone(), - jd_to_channel_manager_sender.clone(), - notify_shutdown.clone(), - status_sender.clone(), - self.config.mode.clone(), - task_manager.clone(), - ) - .await - { - Ok((upstream, job_declarator)) => { - upstream - .start( - self.config.min_supported_version(), - self.config.max_supported_version(), - notify_shutdown.clone(), - shutdown_complete_tx.clone(), - status_sender.clone(), - task_manager.clone(), - ) - .await; - - job_declarator - .start( - notify_shutdown.clone(), - shutdown_complete_tx, - status_sender.clone(), - task_manager.clone(), - ) - .await; - - channel_manager_clone - .upstream_state - .set(UpstreamState::NoChannel); - _ = channel_manager_clone.allocate_tokens(1).await; - } - Err(e) => { - tracing::error!("Failed to initialize upstream: {:?}", e); - set_jd_mode(jd_mode::JdMode::SoloMining); - } - }; - - _ = channel_manager_clone - .clone() - .start_downstream_server( - *self.config.authority_public_key(), - *self.config.authority_secret_key(), - self.config.cert_validity_sec(), - *self.config.listening_address(), - task_manager.clone(), - notify_shutdown.clone(), - status_sender.clone(), - downstream_to_channel_manager_sender.clone(), - channel_manager_to_downstream_sender.clone(), - ) - .await; - - info!("Spawning status listener task..."); - let notify_shutdown_clone = notify_shutdown.clone(); - - loop { - tokio::select! { - _ = tokio::signal::ctrl_c() => { - info!("Ctrl+C received β€” initiating graceful shutdown..."); - let _ = notify_shutdown_clone.send(ShutdownMessage::ShutdownAll); - break; - } - message = status_receiver.recv() => { - if let Ok(status) = message { - match status.state { - State::DownstreamShutdown{downstream_id,..} => { - warn!("Downstream {downstream_id:?} disconnected β€” Channel manager."); - let _ = notify_shutdown_clone.send(ShutdownMessage::DownstreamShutdown(downstream_id)); - } - State::TemplateReceiverShutdown(_) => { - warn!("Template Receiver shutdown requested β€” initiating full shutdown."); - let _ = notify_shutdown_clone.send(ShutdownMessage::ShutdownAll); - break; - } - State::ChannelManagerShutdown(_) => { - warn!("Channel Manager shutdown requested β€” initiating full shutdown."); - let _ = notify_shutdown_clone.send(ShutdownMessage::ShutdownAll); - break; - } - State::UpstreamShutdownFallback(_) | State::JobDeclaratorShutdownFallback(_) => { - warn!("Upstream/Job Declarator connection dropped β€” attempting reconnection..."); - let (tx, mut rx) = mpsc::channel::<()>(1); - let _ = notify_shutdown_clone.send(ShutdownMessage::UpstreamShutdownFallback((encoded_outputs.clone(), tx))); - set_jd_mode(JdMode::SoloMining); - shutdown_complete_rx.recv().await; - tracing::error!("Existing Upstream or JD instance taken out"); - rx.recv().await; - tracing::error!("All entities acknowledged Upstream fallback. Preparing fallback."); - - let (shutdown_complete_tx_fallback, shutdown_complete_rx_fallback) = mpsc::channel::<()>(1); - - shutdown_complete_rx = shutdown_complete_rx_fallback; - - info!("Attempting to initialize Jd and upstream..."); - - match self - .initialize_jd( - &mut upstream_addresses, - channel_manager_to_upstream_receiver.clone(), - upstream_to_channel_manager_sender.clone(), - channel_manager_to_jd_receiver.clone(), - jd_to_channel_manager_sender.clone(), - notify_shutdown.clone(), - status_sender.clone(), - self.config.mode.clone(), - task_manager.clone(), - ) - .await - { - Ok((upstream, job_declarator)) => { - upstream - .start( - self.config.min_supported_version(), - self.config.max_supported_version(), - notify_shutdown.clone(), - shutdown_complete_tx_fallback.clone(), - status_sender.clone(), - task_manager.clone(), - ) - .await; - - job_declarator - .start( - notify_shutdown.clone(), - shutdown_complete_tx_fallback, - status_sender.clone(), - task_manager.clone(), - ) - .await; - - channel_manager_clone.upstream_state.set(UpstreamState::NoChannel); - - _ = channel_manager_clone.allocate_tokens(1).await; - } - Err(e) => { - tracing::error!("Failed to initialize upstream: {:?}", e); - channel_manager_clone.upstream_state.set(UpstreamState::SoloMining); - set_jd_mode(jd_mode::JdMode::SoloMining); - info!("Fallback to solo mining mode"); - } - }; - - _ = channel_manager_clone.clone() - .start_downstream_server( - *self.config.authority_public_key(), - *self.config.authority_secret_key(), - self.config.cert_validity_sec(), - *self.config.listening_address(), - task_manager.clone(), - notify_shutdown.clone(), - status_sender.clone(), - downstream_to_channel_manager_sender.clone(), - channel_manager_to_downstream_sender.clone(), - ) - .await; - } - } - } - } - } - } - - warn!("Graceful shutdown"); - task_manager.abort_all().await; - - info!("Joining remaining tasks..."); - task_manager.join_all().await; - info!("JD Client shutdown complete."); - } - - /// Initializes an upstream pool + JD connection pair. - #[allow(clippy::too_many_arguments)] - pub async fn initialize_jd( - &self, - upstreams: &mut [(SocketAddr, SocketAddr, Secp256k1PublicKey, bool)], - channel_manager_to_upstream_receiver: Receiver, - upstream_to_channel_manager_sender: Sender, - channel_manager_to_jd_receiver: Receiver, - jd_to_channel_manager_sender: Sender, - notify_shutdown: broadcast::Sender, - status_sender: Sender, - mode: ConfigJDCMode, - task_manager: Arc, - ) -> Result<(Upstream, JobDeclarator), JDCError> { - const MAX_RETRIES: usize = 3; - let upstream_len = upstreams.len(); - for (i, upstream_addr) in upstreams.iter_mut().enumerate() { - info!( - "Trying upstream {} of {}: {:?}", - i + 1, - upstream_len, - upstream_addr - ); - - tokio::time::sleep(Duration::from_secs(1)).await; - - if upstream_addr.3 { - info!( - "Upstream previously marked as malicious, skipping initial attempt warnings." - ); - continue; - } - - for attempt in 1..=MAX_RETRIES { - info!("Connection attempt {}/{}...", attempt, MAX_RETRIES); - - match try_initialize_single( - upstream_addr, - upstream_to_channel_manager_sender.clone(), - channel_manager_to_upstream_receiver.clone(), - jd_to_channel_manager_sender.clone(), - channel_manager_to_jd_receiver.clone(), - notify_shutdown.clone(), - status_sender.clone(), - mode.clone(), - task_manager.clone(), - ) - .await - { - Ok(pair) => { - upstream_addr.3 = true; - return Ok(pair); - } - Err(e) => { - let (tx, mut rx) = mpsc::channel::<()>(1); - let _ = notify_shutdown.send(ShutdownMessage::JobDeclaratorShutdown(tx)); - rx.recv().await; - tracing::error!("All sparsed upstream and JDS connection is be terminated"); - tokio::time::sleep(Duration::from_secs(1)).await; - warn!( - "Attempt {}/{} failed for {:?}: {:?}", - attempt, MAX_RETRIES, upstream_addr, e - ); - if attempt == MAX_RETRIES { - warn!( - "Max retries reached for {:?}, moving to next upstream", - upstream_addr - ); - } - } - } - } - upstream_addr.3 = true; - } - - tracing::error!("All upstreams failed after {} retries each", MAX_RETRIES); - Err(JDCError::Shutdown) - } -} - -// Attempts to initialize a single upstream (pool + JDS pair). -#[allow(clippy::too_many_arguments)] -async fn try_initialize_single( - upstream_addr: &(SocketAddr, SocketAddr, Secp256k1PublicKey, bool), - upstream_to_channel_manager_sender: Sender, - channel_manager_to_upstream_receiver: Receiver, - jd_to_channel_manager_sender: Sender, - channel_manager_to_jd_receiver: Receiver, - notify_shutdown: broadcast::Sender, - status_sender: Sender, - mode: ConfigJDCMode, - task_manager: Arc, -) -> Result<(Upstream, JobDeclarator), JDCError> { - info!("Upstream connection in-progress at initialize single"); - let upstream = Upstream::new( - upstream_addr, - upstream_to_channel_manager_sender, - channel_manager_to_upstream_receiver, - notify_shutdown.clone(), - task_manager.clone(), - status_sender.clone(), - ) - .await?; - - info!("Upstream connection done at initialize single"); - - let job_declarator = JobDeclarator::new( - upstream_addr, - jd_to_channel_manager_sender, - channel_manager_to_jd_receiver, - notify_shutdown, - mode, - task_manager.clone(), - status_sender.clone(), - ) - .await?; - - Ok((upstream, job_declarator)) -} - -impl Drop for JobDeclaratorClient { - fn drop(&mut self) { - info!("JobDeclaratorClient dropped"); - let _ = self.notify_shutdown.send(ShutdownMessage::ShutdownAll); - } -} diff --git a/roles/jd-client/src/lib/status.rs b/roles/jd-client/src/lib/status.rs deleted file mode 100644 index feea1d2015..0000000000 --- a/roles/jd-client/src/lib/status.rs +++ /dev/null @@ -1,154 +0,0 @@ -//! Status reporting and error propagation Utility. -//! -//! This module provides mechanisms for communicating shutdown events and -//! component state changes across the system. Each component (downstream, -//! upstream, job declarator, template receiver, channel manager) can send -//! and receive status updates via typed channels. Errors are automatically -//! converted into shutdown signals, allowing coordinated teardown of tasks. - -use tracing::{debug, error, warn}; - -use crate::error::JDCError; - -/// Sender type for propagating status updates from different system components. -#[derive(Debug, Clone)] -pub enum StatusSender { - /// Status updates from a specific downstream connection. - Downstream { - downstream_id: u32, - tx: async_channel::Sender, - }, - /// Status updates from the template receiver. - TemplateReceiver(async_channel::Sender), - /// Status updates from the channel manager. - ChannelManager(async_channel::Sender), - /// Status updates from the upstream. - Upstream(async_channel::Sender), - /// Status updates from the job declarator. - JobDeclarator(async_channel::Sender), -} - -/// High-level identifier of a component type that can send status updates. -#[derive(Debug, PartialEq, Eq)] -pub enum StatusType { - /// A downstream connection identified by its ID. - Downstream(u32), - /// The template receiver component. - TemplateReceiver, - /// The channel manager component. - ChannelManager, - /// The upstream component. - Upstream, - /// The job declarator component. - JobDeclarator, -} - -impl From<&StatusSender> for StatusType { - fn from(value: &StatusSender) -> Self { - match value { - StatusSender::ChannelManager(_) => StatusType::ChannelManager, - StatusSender::Downstream { - downstream_id, - tx: _, - } => StatusType::Downstream(*downstream_id), - StatusSender::JobDeclarator(_) => StatusType::JobDeclarator, - StatusSender::Upstream(_) => StatusType::Upstream, - StatusSender::TemplateReceiver(_) => StatusType::TemplateReceiver, - } - } -} - -impl StatusSender { - /// Sends a status update for the associated component. - pub async fn send(&self, status: Status) -> Result<(), async_channel::SendError> { - match self { - Self::Downstream { downstream_id, tx } => { - debug!( - "Sending status from Downstream [{}]: {:?}", - downstream_id, status.state - ); - tx.send(status).await - } - Self::TemplateReceiver(tx) => { - debug!("Sending status from TemplateReceiver: {:?}", status.state); - tx.send(status).await - } - Self::ChannelManager(tx) => { - debug!("Sending status from ChannelManager: {:?}", status.state); - tx.send(status).await - } - Self::Upstream(tx) => { - debug!("Sending status from Upstream: {:?}", status.state); - tx.send(status).await - } - Self::JobDeclarator(tx) => { - debug!("Sending status from JobDeclarator: {:?}", status.state); - tx.send(status).await - } - } - } -} - -/// Represents the state of a component, typically triggered by an error or shutdown event. -#[derive(Debug)] -pub enum State { - /// A downstream connection has shut down with a reason. - DownstreamShutdown { - downstream_id: u32, - reason: JDCError, - }, - /// Template receiver has shut down with a reason. - TemplateReceiverShutdown(JDCError), - /// Job declarator has shut down during fallback with a reason. - JobDeclaratorShutdownFallback(JDCError), - /// Channel manager has shut down with a reason. - ChannelManagerShutdown(JDCError), - /// Upstream has shut down during fallback with a reason. - UpstreamShutdownFallback(JDCError), -} - -/// Wrapper around a component’s state, sent as status updates across the system. -#[derive(Debug)] -pub struct Status { - /// The current state being reported. - pub state: State, -} - -/// Sends a shutdown status for the given component, logging the error cause. -async fn send_status(sender: &StatusSender, error: JDCError) { - let state = match sender { - StatusSender::Downstream { downstream_id, .. } => { - warn!("Downstream [{downstream_id}] shutting down due to error: {error:?}"); - State::DownstreamShutdown { - downstream_id: *downstream_id, - reason: error, - } - } - StatusSender::TemplateReceiver(_) => { - warn!("Template Receiver shutting down due to error: {error:?}"); - State::TemplateReceiverShutdown(error) - } - StatusSender::ChannelManager(_) => { - warn!("ChannelManager shutting down due to error: {error:?}"); - State::ChannelManagerShutdown(error) - } - StatusSender::Upstream(_) => { - warn!("Upstream shutting down due to error: {error:?}"); - State::UpstreamShutdownFallback(error) - } - StatusSender::JobDeclarator(_) => { - warn!("Job declarator shutting down due to error: {error:?}"); - State::JobDeclaratorShutdownFallback(error) - } - }; - - if let Err(e) = sender.send(Status { state }).await { - tracing::error!("Failed to send status update from {sender:?}: {e:?}"); - } -} - -/// Logs an error and propagates a corresponding shutdown status for the component. -pub async fn handle_error(sender: &StatusSender, e: JDCError) { - error!("Error in {:?}: {:?}", sender, e); - send_status(sender, e).await; -} diff --git a/roles/jd-client/src/lib/task_manager.rs b/roles/jd-client/src/lib/task_manager.rs deleted file mode 100644 index 95435a020c..0000000000 --- a/roles/jd-client/src/lib/task_manager.rs +++ /dev/null @@ -1,72 +0,0 @@ -use std::sync::Mutex as StdMutex; -use tokio::task::JoinHandle; - -/// Manages a collection of spawned tokio tasks. -/// -/// This struct provides a centralized way to spawn, track, and manage the lifecycle -/// of async tasks. It maintains a list of join handles that can -/// be used to wait for all tasks to complete or abort them during shutdown. -pub struct TaskManager { - tasks: StdMutex>>, -} - -impl Default for TaskManager { - fn default() -> Self { - Self::new() - } -} - -impl TaskManager { - /// Creates a new TaskManager instance. - /// - /// Initializes an empty task manager ready to spawn and track tasks. - pub fn new() -> Self { - Self { - tasks: StdMutex::new(Vec::new()), - } - } - - /// Spawns a new async task and adds it to the managed collection. - /// - /// The task will be tracked by this manager and can be waited for or aborted - /// using the other methods. - /// - /// # Arguments - /// * `fut` - The future to spawn as a task - pub fn spawn(&self, fut: F) - where - F: std::future::Future + Send + 'static, - { - let handle = tokio::spawn(async move { - fut.await; - }); - self.tasks.lock().unwrap().push(handle); - } - - /// Waits for all managed tasks to complete. - /// - /// This method will block until all tasks that were spawned through this - /// manager have finished executing. Tasks are joined in reverse order - /// (most recently spawned first). - pub async fn join_all(&self) { - let handles = { - let mut tasks = self.tasks.lock().unwrap(); - std::mem::take(&mut *tasks) - }; - - for handle in handles { - let _ = handle.await; - } - } - - /// Aborts all managed tasks. - /// - /// This method immediately cancels all tasks that were spawned through this - /// manager. The tasks will be terminated without waiting for them to complete. - pub async fn abort_all(&self) { - let mut tasks = self.tasks.lock().unwrap(); - for handle in tasks.drain(..) { - handle.abort(); - } - } -} diff --git a/roles/jd-client/src/lib/template_receiver/message_handler.rs b/roles/jd-client/src/lib/template_receiver/message_handler.rs deleted file mode 100644 index 61a8f56a4f..0000000000 --- a/roles/jd-client/src/lib/template_receiver/message_handler.rs +++ /dev/null @@ -1,50 +0,0 @@ -use stratum_apps::stratum_core::{ - common_messages_sv2::{ - ChannelEndpointChanged, Reconnect, SetupConnectionError, SetupConnectionSuccess, - }, - handlers_sv2::HandleCommonMessagesFromServerAsync, -}; -use tracing::{info, warn}; - -use crate::{error::JDCError, template_receiver::TemplateReceiver}; - -impl HandleCommonMessagesFromServerAsync for TemplateReceiver { - type Error = JDCError; - - async fn handle_setup_connection_success( - &mut self, - _server_id: Option, - msg: SetupConnectionSuccess, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - - Ok(()) - } - - async fn handle_channel_endpoint_changed( - &mut self, - _server_id: Option, - msg: ChannelEndpointChanged, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - Ok(()) - } - - async fn handle_reconnect( - &mut self, - _server_id: Option, - msg: Reconnect<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - Ok(()) - } - - async fn handle_setup_connection_error( - &mut self, - _server_id: Option, - msg: SetupConnectionError<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", msg); - Err(JDCError::Shutdown) - } -} diff --git a/roles/jd-client/src/lib/template_receiver/mod.rs b/roles/jd-client/src/lib/template_receiver/mod.rs deleted file mode 100644 index 9512680b7f..0000000000 --- a/roles/jd-client/src/lib/template_receiver/mod.rs +++ /dev/null @@ -1,426 +0,0 @@ -//! Template Receiver module -//! -//! This module defines the [`TemplateReceiver`] struct, which manages a connection -//! to a Template Provider (TP). -//! -//! Responsibilities: -//! - Establish TCP + Noise encrypted connection to the template provider -//! - Perform `SetupConnection` handshake -//! - Forward SV2 `TemplateDistribution` messages to the channel manager -//! - Forward messages from the channel manager upstream to the template provider -//! - Send [`CoinbaseOutputConstraints`] to the template provider - -use std::{net::SocketAddr, sync::Arc}; - -use async_channel::{unbounded, Receiver, Sender}; -use stratum_apps::{ - custom_mutex::Mutex, - key_utils::Secp256k1PublicKey, - network_helpers::noise_stream::NoiseTcpStream, - stratum_core::{ - bitcoin::{ - self, absolute::LockTime, transaction::Version, OutPoint, ScriptBuf, Sequence, - Transaction, TxIn, TxOut, Witness, - }, - codec_sv2::HandshakeRole, - framing_sv2, - handlers_sv2::HandleCommonMessagesFromServerAsync, - noise_sv2::Initiator, - parsers_sv2::{AnyMessage, TemplateDistribution}, - template_distribution_sv2::CoinbaseOutputConstraints, - }, -}; -use tokio::{net::TcpStream, sync::broadcast}; -use tracing::{debug, error, info, warn}; - -use crate::{ - error::JDCError, - status::{handle_error, Status, StatusSender}, - task_manager::TaskManager, - utils::{ - get_setup_connection_message_tp, protocol_message_type, spawn_io_tasks, Message, - MessageType, SV2Frame, ShutdownMessage, StdFrame, - }, -}; - -mod message_handler; - -/// Placeholder for future template receiver–specific state. -pub struct TemplateReceiverData; - -/// Holds communication channels between the template receiver, channel manager, -/// and upstream template provider. -/// -/// - `channel_manager_sender` β†’ sends frames to the channel manager -/// - `channel_manager_receiver` β†’ receives frames from the channel manager -/// - `outbound_tx` β†’ sends frames upstream to the template provider -/// - `inbound_rx` β†’ receives frames from the template provider -#[derive(Clone)] -pub struct TemplateReceiverChannel { - channel_manager_sender: Sender, - channel_manager_receiver: Receiver, - tp_sender: Sender, - tp_receiver: Receiver, -} - -/// Manages communication with a Stratum V2 Template Provider. -/// -/// Responsibilities: -/// - Establishes TCP + Noise connection to TP -/// - Performs handshake (`SetupConnection`) -/// - Sends [`CoinbaseOutputConstraints`] to TP -/// - Routes messages between TP and channel manager -/// - Handles shutdown/fallback notifications -#[allow(warnings)] -#[derive(Clone)] -pub struct TemplateReceiver { - /// Internal state - template_receiver_data: Arc>, - /// Messaging channels to/from the channel manager and TP. - template_receiver_channel: TemplateReceiverChannel, - /// Address of the template provider (string form) - tp_address: String, -} - -impl TemplateReceiver { - /// Establish a new connection to a Template Provider. - /// - /// - Opens a TCP connection - /// - Performs Noise handshake - /// - Spawns IO tasks for inbound/outbound frames - /// - /// Retries up to 3 times before returning [`JDCError::Shutdown`]. - pub async fn new( - tp_address: String, - public_key: Option, - channel_manager_receiver: Receiver, - channel_manager_sender: Sender, - notify_shutdown: broadcast::Sender, - task_manager: Arc, - status_sender: Sender, - ) -> Result { - const MAX_RETRIES: usize = 3; - - for attempt in 1..=MAX_RETRIES { - info!(attempt, MAX_RETRIES, "Connecting to template provider"); - - let initiator = match public_key { - Some(pub_key) => { - debug!(attempt, "Using public key for initiator handshake"); - Initiator::from_raw_k(pub_key.into_bytes()) - } - None => { - debug!(attempt, "Using anonymous initiator (no public key)"); - Initiator::without_pk() - } - }?; - - match TcpStream::connect(tp_address.as_str()).await { - Ok(stream) => { - info!( - attempt, - "TCP connection established, starting Noise handshake" - ); - - match NoiseTcpStream::::new( - stream, - HandshakeRole::Initiator(initiator), - ) - .await - { - Ok(noise_stream) => { - info!(attempt, "Noise handshake completed successfully"); - - let (noise_stream_reader, noise_stream_writer) = - noise_stream.into_split(); - - let status_sender = StatusSender::TemplateReceiver(status_sender); - let (inbound_tx, inbound_rx) = unbounded::(); - let (outbound_tx, outbound_rx) = unbounded::(); - - info!(attempt, "Spawning IO tasks for template receiver"); - spawn_io_tasks( - task_manager.clone(), - noise_stream_reader, - noise_stream_writer, - outbound_rx, - inbound_tx, - notify_shutdown, - status_sender, - ); - - let template_receiver_data = Arc::new(Mutex::new(TemplateReceiverData)); - let template_receiver_channel = TemplateReceiverChannel { - channel_manager_receiver, - channel_manager_sender, - tp_receiver: inbound_rx, - tp_sender: outbound_tx, - }; - - info!(attempt, "TemplateReceiver initialized successfully"); - return Ok(TemplateReceiver { - template_receiver_channel, - template_receiver_data, - tp_address, - }); - } - Err(e) => { - error!(attempt, error = ?e, "Noise handshake failed"); - } - } - } - Err(e) => { - warn!(attempt, MAX_RETRIES, error = ?e, "Failed to connect to template provider"); - } - } - - if attempt < MAX_RETRIES { - debug!(attempt, "Retrying connection after backoff"); - tokio::time::sleep(std::time::Duration::from_secs(2)).await; - } - } - - error!("Exhausted all connection attempts, shutting down TemplateReceiver"); - Err(JDCError::Shutdown) - } - - /// Start unified message loop for template receiver. - /// - /// Responsibilities: - /// - Run handshake (`setup_connection`) - /// - Send [`CoinbaseOutputConstraints`] - /// - Handle: - /// - Messages from template provider - /// - Messages from channel manager - /// - Shutdown signals (upstream/job-declarator fallback) - pub async fn start( - mut self, - socket_address: String, - notify_shutdown: broadcast::Sender, - status_sender: Sender, - task_manager: Arc, - coinbase_outputs: Vec, - ) { - let status_sender = StatusSender::TemplateReceiver(status_sender); - let mut shutdown_rx = notify_shutdown.subscribe(); - - info!("Initialized state for starting template receiver"); - _ = self.setup_connection(socket_address).await; - - _ = self.coinbase_constraints(coinbase_outputs).await; - - info!("Setup Connection done. connection with template receiver is now done"); - task_manager.spawn( - async move { - loop { - let mut self_clone_1 = self.clone(); - let self_clone_2 = self.clone(); - tokio::select! { - message = shutdown_rx.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - info!("Template Receiver: received shutdown signal"); - break; - }, - Ok(ShutdownMessage::UpstreamShutdownFallback((coinbase_outputs,tx))) => { - info!("Template provider: Received Upstream shutdown."); - _ = self.coinbase_constraints(coinbase_outputs).await; - drop(tx); - } - Ok(ShutdownMessage::JobDeclaratorShutdownFallback((coinbase_outputs, tx))) => { - info!("Template provider: Received Job declarator shutdown."); - _ = self.coinbase_constraints(coinbase_outputs).await; - drop(tx); - } - Err(e) => { - warn!(error = ?e, "Template Receiver: shutdown channel closed unexpectedly"); - break; - } - _ => {} - } - } - res = self_clone_1.handle_template_provider_message() => { - if let Err(e) = res { - error!("TemplateReceiver template provider handler failed: {e:?}"); - handle_error(&status_sender, e).await; - break; - } - } - res = self_clone_2.handle_channel_manager_message() => { - if let Err(e) = res { - error!("TemplateReceiver channel manager handler failed: {e:?}"); - handle_error(&status_sender, e).await; - break; - } - }, - } - } - warn!("TemplateReceiver: unified message loop exited."); - }, - ); - } - - /// Handle inbound messages from the template provider. - /// - /// Routes: - /// - `Common` messages β†’ handled locally - /// - `TemplateDistribution` messages β†’ forwarded to channel manager - /// - Unsupported messages β†’ logged and ignored - pub async fn handle_template_provider_message(&mut self) -> Result<(), JDCError> { - let mut sv2_frame = self.template_receiver_channel.tp_receiver.recv().await?; - - debug!("Received SV2 frame from Template provider."); - let Some(message_type) = sv2_frame.get_header().map(|m| m.msg_type()) else { - return Ok(()); - }; - match protocol_message_type(message_type) { - MessageType::Common => { - info!( - ?message_type, - "Handling common message from Template provider." - ); - self.handle_common_message_frame_from_server( - None, - message_type, - sv2_frame.payload(), - ) - .await?; - } - MessageType::TemplateDistribution => { - self.template_receiver_channel - .channel_manager_sender - .send(sv2_frame) - .await - .map_err(|e| { - error!(error=?e, "Failed to send template distribution message to channel manager."); - JDCError::ChannelErrorSender - })?; - } - _ => { - warn!("Received unsupported message type from template provider: {message_type}"); - } - } - Ok(()) - } - - /// Handle messages from channel manager β†’ template provider. - /// - /// Forwards outbound frames upstream - pub async fn handle_channel_manager_message(&self) -> Result<(), JDCError> { - let msg = self - .template_receiver_channel - .channel_manager_receiver - .recv() - .await?; - debug!("Forwarding message from channel manager to outbound_tx"); - self.template_receiver_channel - .tp_sender - .send(msg) - .await - .map_err(|_| JDCError::ChannelErrorSender)?; - - Ok(()) - } - - /// Build and send [`CoinbaseOutputConstraints`] upstream TP. - pub async fn coinbase_constraints( - &mut self, - coinbase_outputs: Vec, - ) -> Result<(), JDCError> { - debug!( - "Deserializing coinbase outputs ({} bytes)", - coinbase_outputs.len() - ); - let outputs: Vec = bitcoin::consensus::deserialize(&coinbase_outputs)?; - - let max_size: u32 = outputs.iter().map(|o| o.size() as u32).sum(); - debug!( - max_size, - outputs_count = outputs.len(), - "Calculated max coinbase output size" - ); - - let dummy_coinbase = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { - previous_output: OutPoint::null(), - script_sig: ScriptBuf::new(), - sequence: Sequence::MAX, - witness: Witness::from(vec![vec![0; 32]]), - }], - output: outputs, - }; - - let max_sigops = dummy_coinbase.total_sigop_cost(|_| None) as u16; - debug!(max_sigops, "Calculated max sigops for coinbase"); - - let constraints = CoinbaseOutputConstraints { - coinbase_output_max_additional_size: max_size, - coinbase_output_max_additional_sigops: max_sigops, - }; - - let msg = AnyMessage::TemplateDistribution( - TemplateDistribution::CoinbaseOutputConstraints(constraints), - ); - - let frame: StdFrame = msg.try_into()?; - info!("Sending CoinbaseOutputConstraints message upstream"); - self.template_receiver_channel - .tp_sender - .send(frame) - .await - .map_err(|_| { - error!("Failed to send CoinbaseOutputConstraints message upstream"); - JDCError::ChannelErrorSender - })?; - - Ok(()) - } - - // Performs the initial handshake with template provider. - pub async fn setup_connection(&mut self, addr: String) -> Result<(), JDCError> { - let socket: SocketAddr = addr.parse().map_err(|_| { - error!(%addr, "Invalid socket address"); - JDCError::InvalidSocketAddress(addr.clone()) - })?; - - info!(%socket, "Building setup connection message for upstream"); - let setup_msg = get_setup_connection_message_tp(socket); - let frame: StdFrame = Message::Common(setup_msg.into()).try_into()?; - - info!("Sending setup connection message to upstream"); - self.template_receiver_channel - .tp_sender - .send(frame) - .await - .map_err(|_| { - error!("Failed to send setup connection message upstream"); - JDCError::ChannelErrorSender - })?; - - info!("Waiting for upstream handshake response"); - let mut incoming: StdFrame = self - .template_receiver_channel - .tp_receiver - .recv() - .await - .map_err(|e| { - error!(?e, "Upstream connection closed during handshake"); - JDCError::CodecNoise( - stratum_apps::stratum_core::noise_sv2::Error::ExpectedIncomingHandshakeMessage, - ) - })?; - - let msg_type = incoming - .get_header() - .ok_or(framing_sv2::Error::ExpectedHandshakeFrame)? - .msg_type(); - debug!(?msg_type, "Received upstream handshake response"); - - self.handle_common_message_frame_from_server(None, msg_type, incoming.payload()) - .await?; - info!("Handshake with upstream completed successfully"); - Ok(()) - } -} diff --git a/roles/jd-client/src/lib/upstream/message_handler.rs b/roles/jd-client/src/lib/upstream/message_handler.rs deleted file mode 100644 index ac7d663fb6..0000000000 --- a/roles/jd-client/src/lib/upstream/message_handler.rs +++ /dev/null @@ -1,50 +0,0 @@ -use stratum_apps::stratum_core::{ - common_messages_sv2::{ - ChannelEndpointChanged, Reconnect, SetupConnectionError, SetupConnectionSuccess, - }, - handlers_sv2::HandleCommonMessagesFromServerAsync, -}; -use tracing::{info, warn}; - -use crate::{error::JDCError, upstream::Upstream}; - -impl HandleCommonMessagesFromServerAsync for Upstream { - type Error = JDCError; - - async fn handle_setup_connection_success( - &mut self, - _server_id: Option, - msg: SetupConnectionSuccess, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - - Ok(()) - } - - async fn handle_channel_endpoint_changed( - &mut self, - _server_id: Option, - msg: ChannelEndpointChanged, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - Ok(()) - } - - async fn handle_reconnect( - &mut self, - _server_id: Option, - msg: Reconnect<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - Ok(()) - } - - async fn handle_setup_connection_error( - &mut self, - _server_id: Option, - msg: SetupConnectionError<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", msg); - Err(JDCError::Shutdown) - } -} diff --git a/roles/jd-client/src/lib/upstream/mod.rs b/roles/jd-client/src/lib/upstream/mod.rs deleted file mode 100644 index 15d9903c1a..0000000000 --- a/roles/jd-client/src/lib/upstream/mod.rs +++ /dev/null @@ -1,322 +0,0 @@ -//! Upstream module -//! -//! This module defines the [`Upstream`] struct, which manages communication -//! with an upstream SV2 server (e.g., pool). -//! -//! Responsibilities: -//! - Establish a TCP + Noise encrypted connection to upstream -//! - Perform `SetupConnection` handshake -//! - Forward SV2 mining messages between upstream and channel manager -//! - Handle common messages from upstream - -use std::{net::SocketAddr, sync::Arc}; - -use async_channel::{unbounded, Receiver, Sender}; -use stratum_apps::{ - custom_mutex::Mutex, - key_utils::Secp256k1PublicKey, - network_helpers::noise_stream::NoiseTcpStream, - stratum_core::{ - codec_sv2::HandshakeRole, framing_sv2, handlers_sv2::HandleCommonMessagesFromServerAsync, - noise_sv2::Initiator, - }, -}; -use tokio::{ - net::TcpStream, - sync::{broadcast, mpsc}, -}; -use tracing::{debug, error, info, warn}; - -use crate::{ - error::JDCError, - status::{handle_error, Status, StatusSender}, - task_manager::TaskManager, - utils::{ - get_setup_connection_message, protocol_message_type, spawn_io_tasks, Message, MessageType, - SV2Frame, ShutdownMessage, StdFrame, - }, -}; - -mod message_handler; - -/// Placeholder for future upstream-specific data/state. -pub struct UpstreamData; - -/// Holds channels for communication between upstream and channel manager. -/// -/// - `channel_manager_sender` β†’ sends frames to channel manager -/// - `channel_manager_receiver` β†’ receives frames from channel manager -/// - `outbound_tx` β†’ sends frames outbound to upstream -/// - `inbound_rx` β†’ receives frames inbound from upstream -#[derive(Clone)] -pub struct UpstreamChannel { - channel_manager_sender: Sender, - channel_manager_receiver: Receiver, - upstream_sender: Sender, - upstream_receiver: Receiver, -} - -/// Represents an upstream connection (e.g., a pool). -#[derive(Clone)] -pub struct Upstream { - #[allow(dead_code)] - /// Internal state - upstream_data: Arc>, - /// Messaging channels to/from the channel manager and Upstream. - upstream_channel: UpstreamChannel, -} - -impl Upstream { - /// Create a new [`Upstream`] connection to the given address. - /// - /// - Establishes TCP + Noise connection - /// - Spawns IO tasks to handle inbound/outbound traffic - pub async fn new( - upstreams: &(SocketAddr, SocketAddr, Secp256k1PublicKey, bool), - channel_manager_sender: Sender, - channel_manager_receiver: Receiver, - notify_shutdown: broadcast::Sender, - task_manager: Arc, - status_sender: Sender, - ) -> Result { - let (addr, _, pubkey, _) = upstreams; - let stream = tokio::time::timeout( - tokio::time::Duration::from_secs(5), - TcpStream::connect(addr), - ) - .await??; - info!("Connected to upstream at {}", addr); - let initiator = Initiator::from_raw_k(pubkey.into_bytes())?; - debug!("Begin with noise setup in upstream connection"); - let (noise_stream_reader, noise_stream_writer) = - NoiseTcpStream::::new(stream, HandshakeRole::Initiator(initiator)) - .await? - .into_split(); - - let status_sender = StatusSender::Upstream(status_sender); - let (inbound_tx, inbound_rx) = unbounded::(); - let (outbound_tx, outbound_rx) = unbounded::(); - - spawn_io_tasks( - task_manager, - noise_stream_reader, - noise_stream_writer, - outbound_rx, - inbound_tx, - notify_shutdown, - status_sender, - ); - - debug!("Noise setup done in upstream connection"); - let upstream_data = Arc::new(Mutex::new(UpstreamData)); - let upstream_channel = UpstreamChannel { - channel_manager_receiver, - channel_manager_sender, - upstream_sender: outbound_tx, - upstream_receiver: inbound_rx, - }; - Ok(Upstream { - upstream_data, - upstream_channel, - }) - } - - /// Perform `SetupConnection` handshake with upstream. - /// - /// Sends [`SetupConnection`] and awaits response. - pub async fn setup_connection( - &mut self, - min_version: u16, - max_version: u16, - ) -> Result<(), JDCError> { - info!("Upstream: initiating SV2 handshake..."); - let setup_connection = get_setup_connection_message(min_version, max_version)?; - debug!(?setup_connection, "Prepared `SetupConnection` message"); - let sv2_frame: StdFrame = Message::Common(setup_connection.into()).try_into()?; - debug!(?sv2_frame, "Encoded `SetupConnection` frame"); - - // Send SetupConnection - if let Err(e) = self.upstream_channel.upstream_sender.send(sv2_frame).await { - error!(?e, "Failed to send `SetupConnection` frame to upstream"); - return Err(JDCError::CodecNoise( - stratum_apps::stratum_core::noise_sv2::Error::ExpectedIncomingHandshakeMessage, - )); - } - info!("Sent `SetupConnection` to upstream, awaiting response..."); - - let incoming_frame = match self.upstream_channel.upstream_receiver.recv().await { - Ok(frame) => { - debug!(?frame, "Received raw inbound frame during handshake"); - frame - } - Err(e) => { - error!(?e, "Upstream closed connection during handshake"); - return Err(JDCError::CodecNoise( - stratum_apps::stratum_core::noise_sv2::Error::ExpectedIncomingHandshakeMessage, - )); - } - }; - - let mut incoming: StdFrame = incoming_frame; - debug!(?incoming, "Decoded inbound handshake frame"); - - let message_type = incoming - .get_header() - .ok_or(framing_sv2::Error::ExpectedHandshakeFrame)? - .msg_type(); - - info!(?message_type, "Dispatching inbound handshake message"); - self.handle_common_message_frame_from_server(None, message_type, incoming.payload()) - .await?; - Ok(()) - } - - /// Start unified upstream loop. - /// - /// Responsibilities: - /// - Run `setup_connection` - /// - Handle messages from upstream (pool) and channel manager - /// - React to shutdown signals - /// - /// This function spawns an async task and returns immediately. - pub async fn start( - mut self, - min_version: u16, - max_version: u16, - notify_shutdown: broadcast::Sender, - shutdown_complete_tx: mpsc::Sender<()>, - status_sender: Sender, - task_manager: Arc, - ) { - let status_sender = StatusSender::Upstream(status_sender); - let mut shutdown_rx = notify_shutdown.subscribe(); - - if let Err(e) = self.setup_connection(min_version, max_version).await { - error!(error = ?e, "Upstream: connection setup failed."); - return; - } - - task_manager.spawn(async move { - let mut self_clone_1 = self.clone(); - let mut self_clone_2 = self.clone(); - loop { - tokio::select! { - message = shutdown_rx.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - info!("Upstream: received shutdown signal."); - break; - } - Ok(ShutdownMessage::JobDeclaratorShutdownFallback(_)) => { - info!("Upstream: Received Job declarator shutdown."); - break; - } - Ok(ShutdownMessage::UpstreamShutdownFallback(_)) => { - info!("Upstream: Received Upstream shutdown."); - break; - } - Ok(ShutdownMessage::UpstreamShutdown(tx)) => { - info!("Upstream shutdown requested"); - drop(tx); - break; - } - Ok(ShutdownMessage::JobDeclaratorShutdown(tx)) => { - info!("Upstream shutdown requested"); - drop(tx); - break; - } - Err(_) => { - warn!("Upstream: shutdown channel closed unexpectedly."); - break; - } - _ => {} - } - } - res = self_clone_1.handle_pool_message() => { - if let Err(e) = res { - error!(error = ?e, "Upstream: error handling pool message."); - handle_error(&status_sender, e).await; - break; - } - } - res = self_clone_2.handle_channel_manager_message() => { - if let Err(e) = res { - error!(error = ?e, "Upstream: error handling channel manager message."); - handle_error(&status_sender, e).await; - break; - } - } - - } - } - drop(shutdown_complete_tx); - warn!("Upstream: unified message loop exited."); - }); - } - - // Handle incoming frames from upstream (pool). - // - // Routes: - // - `Common` messages β†’ handled locally - // - `Mining` messages β†’ forwarded to channel manager - // - Unsupported β†’ error - async fn handle_pool_message(&mut self) -> Result<(), JDCError> { - let mut sv2_frame = self.upstream_channel.upstream_receiver.recv().await?; - - debug!("Received SV2 frame from upstream."); - let Some(message_type) = sv2_frame.get_header().map(|m| m.msg_type()) else { - return Ok(()); - }; - - match protocol_message_type(message_type) { - MessageType::Common => { - info!(?message_type, "Handling common message from Upstream."); - self.handle_common_message_frame_from_server( - None, - message_type, - sv2_frame.payload(), - ) - .await?; - } - MessageType::Mining => { - self.upstream_channel - .channel_manager_sender - .send(sv2_frame) - .await - .map_err(|e| { - error!(error=?e, "Failed to send mining message to channel manager."); - JDCError::ChannelErrorSender - })?; - } - _ => { - warn!("Received unsupported message type from upstream: {message_type}"); - } - } - Ok(()) - } - - // Handle outbound frames from channel manager β†’ upstream. - // - // Forwards messages upstream. - async fn handle_channel_manager_message(&mut self) -> Result<(), JDCError> { - match self.upstream_channel.channel_manager_receiver.recv().await { - Ok(msg) => { - debug!("Received message from channel manager, forwarding upstream."); - self.upstream_channel - .upstream_sender - .send(msg) - .await - .map_err(|e| { - error!(error=?e, "Failed to send outbound message to upstream."); - JDCError::CodecNoise( - stratum_apps::stratum_core::noise_sv2::Error::ExpectedIncomingHandshakeMessage, - ) - })?; - } - Err(e) => { - warn!(error=?e, "Channel manager receiver closed or errored."); - } - } - Ok(()) - } -} diff --git a/roles/jd-client/src/lib/utils.rs b/roles/jd-client/src/lib/utils.rs deleted file mode 100644 index 6d19a894c2..0000000000 --- a/roles/jd-client/src/lib/utils.rs +++ /dev/null @@ -1,585 +0,0 @@ -//! Utilities for managing JDC communication, connection setup, -//! shutdown signaling, and upstream state tracking. -//! -//! This module provides: -//! - Construction of `SetupConnection` messages for mining, job declarator, and template -//! distribution protocols. -//! - Helpers for parsing frames into typed Stratum messages. -//! - An async I/O task spawner for handling framed network communication with shutdown -//! coordination. -//! - Deserialization of coinbase transaction outputs. -//! - Shutdown signaling types for orchestrating controlled shutdown of upstream, downstream, and -//! job declarator components. -//! - An atomic wrapper for managing the upstream connection state safely across threads. -use std::{ - net::SocketAddr, - sync::{ - atomic::{AtomicU8, Ordering}, - Arc, - }, -}; - -use async_channel::{Receiver, Sender}; -use stratum_apps::{ - network_helpers::noise_stream::{NoiseTcpReadHalf, NoiseTcpWriteHalf}, - stratum_core::{ - binary_sv2::Str0255, - buffer_sv2, - codec_sv2::{StandardEitherFrame, StandardSv2Frame}, - common_messages_sv2::{ - Protocol, SetupConnection, MESSAGE_TYPE_CHANNEL_ENDPOINT_CHANGED, - MESSAGE_TYPE_RECONNECT, MESSAGE_TYPE_SETUP_CONNECTION, - MESSAGE_TYPE_SETUP_CONNECTION_ERROR, MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS, - }, - framing_sv2::framing::{Frame, Sv2Frame}, - job_declaration_sv2::{ - MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN, MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN_SUCCESS, - MESSAGE_TYPE_DECLARE_MINING_JOB, MESSAGE_TYPE_DECLARE_MINING_JOB_ERROR, - MESSAGE_TYPE_DECLARE_MINING_JOB_SUCCESS, MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS, - MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS_SUCCESS, MESSAGE_TYPE_PUSH_SOLUTION, - }, - mining_sv2::{ - CloseChannel, OpenExtendedMiningChannel, OpenStandardMiningChannel, - MESSAGE_TYPE_CLOSE_CHANNEL, MESSAGE_TYPE_MINING_SET_NEW_PREV_HASH, - MESSAGE_TYPE_NEW_EXTENDED_MINING_JOB, MESSAGE_TYPE_NEW_MINING_JOB, - MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL, - MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL_SUCCESS, - MESSAGE_TYPE_OPEN_MINING_CHANNEL_ERROR, MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL, - MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL_SUCCESS, MESSAGE_TYPE_SET_CUSTOM_MINING_JOB, - MESSAGE_TYPE_SET_CUSTOM_MINING_JOB_ERROR, MESSAGE_TYPE_SET_CUSTOM_MINING_JOB_SUCCESS, - MESSAGE_TYPE_SET_EXTRANONCE_PREFIX, MESSAGE_TYPE_SET_GROUP_CHANNEL, - MESSAGE_TYPE_SET_TARGET, MESSAGE_TYPE_SUBMIT_SHARES_ERROR, - MESSAGE_TYPE_SUBMIT_SHARES_EXTENDED, MESSAGE_TYPE_SUBMIT_SHARES_STANDARD, - MESSAGE_TYPE_SUBMIT_SHARES_SUCCESS, MESSAGE_TYPE_UPDATE_CHANNEL, - MESSAGE_TYPE_UPDATE_CHANNEL_ERROR, - }, - parsers_sv2::{AnyMessage, Mining}, - template_distribution_sv2::{ - MESSAGE_TYPE_COINBASE_OUTPUT_CONSTRAINTS, MESSAGE_TYPE_NEW_TEMPLATE, - MESSAGE_TYPE_REQUEST_TRANSACTION_DATA, MESSAGE_TYPE_REQUEST_TRANSACTION_DATA_ERROR, - MESSAGE_TYPE_REQUEST_TRANSACTION_DATA_SUCCESS, MESSAGE_TYPE_SET_NEW_PREV_HASH, - MESSAGE_TYPE_SUBMIT_SOLUTION, - }, - }, -}; -use tokio::sync::broadcast; -use tracing::{error, trace, warn, Instrument}; - -use crate::{ - config::ConfigJDCMode, - error::JDCError, - status::{StatusSender, StatusType}, - task_manager::TaskManager, -}; - -pub type Message = AnyMessage<'static>; -pub type StdFrame = StandardSv2Frame; -pub type EitherFrame = StandardEitherFrame; -pub type SV2Frame = Sv2Frame; -/// Represents a message that can trigger shutdown of various system components. -#[derive(Debug, Clone)] -pub enum ShutdownMessage { - /// Shutdown all components immediately - ShutdownAll, - /// Shutdown all downstream connections - DownstreamShutdownAll, - /// Shutdown a specific downstream connection by ID - DownstreamShutdown(u32), - /// Shutdown Upstream and JD part of JDC during fallback - JobDeclaratorShutdownFallback((Vec, tokio::sync::mpsc::Sender<()>)), - /// Shutdown Upstream and JD part during fallback - UpstreamShutdownFallback((Vec, tokio::sync::mpsc::Sender<()>)), - /// Shutdown Job Declarator during initialization. - JobDeclaratorShutdown(tokio::sync::mpsc::Sender<()>), - /// Shutdown Job Declarator during initialization. - UpstreamShutdown(tokio::sync::mpsc::Sender<()>), -} - -/// Constructs a `SetupConnection` message for the mining protocol. -pub fn get_setup_connection_message( - min_version: u16, - max_version: u16, -) -> Result, JDCError> { - let endpoint_host = "0.0.0.0".to_string().into_bytes().try_into()?; - let vendor = String::new().try_into()?; - let hardware_version = String::new().try_into()?; - let firmware = String::new().try_into()?; - let device_id = String::new().try_into()?; - let flags = 0b0000_0000_0000_0000_0000_0000_0000_0110; - Ok(SetupConnection { - protocol: Protocol::MiningProtocol, - min_version, - max_version, - flags, - endpoint_host, - endpoint_port: 50, - vendor, - hardware_version, - firmware, - device_id, - }) -} - -/// Constructs a `SetupConnection` message for the Job Declarator (JDS). -pub fn get_setup_connection_message_jds( - proxy_address: &SocketAddr, - mode: &ConfigJDCMode, -) -> SetupConnection<'static> { - let endpoint_host = proxy_address - .ip() - .to_string() - .into_bytes() - .try_into() - .unwrap(); - let vendor = String::new().try_into().unwrap(); - let hardware_version = String::new().try_into().unwrap(); - let firmware = String::new().try_into().unwrap(); - let device_id = String::new().try_into().unwrap(); - let mut setup_connection = SetupConnection { - protocol: Protocol::JobDeclarationProtocol, - min_version: 2, - max_version: 2, - flags: 0b0000_0000_0000_0000_0000_0000_0000_0000, - endpoint_host, - endpoint_port: proxy_address.port(), - vendor, - hardware_version, - firmware, - device_id, - }; - - if matches!(mode, ConfigJDCMode::FullTemplate) { - setup_connection.allow_full_template_mode(); - } - - setup_connection -} - -/// Constructs a `SetupConnection` message for the Template Provider (TP). -pub fn get_setup_connection_message_tp(address: SocketAddr) -> SetupConnection<'static> { - let endpoint_host = address.ip().to_string().into_bytes().try_into().unwrap(); - let vendor = String::new().try_into().unwrap(); - let hardware_version = String::new().try_into().unwrap(); - let firmware = String::new().try_into().unwrap(); - let device_id = String::new().try_into().unwrap(); - SetupConnection { - protocol: Protocol::TemplateDistributionProtocol, - min_version: 2, - max_version: 2, - flags: 0b0000_0000_0000_0000_0000_0000_0000_0000, - endpoint_host, - endpoint_port: address.port(), - vendor, - hardware_version, - firmware, - device_id, - } -} - -/// Spawns async reader and writer tasks for handling framed I/O with shutdown support. -#[track_caller] -#[allow(clippy::too_many_arguments)] -pub fn spawn_io_tasks( - task_manager: Arc, - mut reader: NoiseTcpReadHalf, - mut writer: NoiseTcpWriteHalf, - outbound_rx: Receiver, - inbound_tx: Sender, - notify_shutdown: broadcast::Sender, - status_sender: StatusSender, -) { - let caller = std::panic::Location::caller(); - let inbound_tx_clone = inbound_tx.clone(); - let outbound_rx_clone = outbound_rx.clone(); - { - let mut shutdown_rx = notify_shutdown.subscribe(); - let status_sender = status_sender.clone(); - let status_type: StatusType = StatusType::from(&status_sender); - - task_manager.spawn(async move { - trace!("Reader task started"); - loop { - tokio::select! { - message = shutdown_rx.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - trace!("Received global shutdown"); - inbound_tx.close(); - break; - } - Ok(ShutdownMessage::DownstreamShutdown(down_id)) if matches!(status_type, StatusType::Downstream(id) if id == down_id) => { - trace!(down_id, "Received downstream shutdown"); - if status_type != StatusType::TemplateReceiver { - inbound_tx.close(); - break; - } - } - Ok(ShutdownMessage::JobDeclaratorShutdownFallback(_)) if !matches!(status_type, StatusType::TemplateReceiver) => { - trace!("Received job declarator shutdown"); - if status_type != StatusType::TemplateReceiver { - inbound_tx.close(); - break; - } - } - Ok(ShutdownMessage::UpstreamShutdownFallback(_)) if !matches!(status_type, StatusType::TemplateReceiver) => { - trace!("Received upstream shutdown"); - if status_type != StatusType::TemplateReceiver { - inbound_tx.close(); - break; - } - } - - Ok(ShutdownMessage::UpstreamShutdown(tx)) if !matches!(status_type, StatusType::TemplateReceiver) => { - trace!("Received upstream shutdown"); - if status_type != StatusType::TemplateReceiver { - inbound_tx.close(); - break; - } - drop(tx); - } - Ok(ShutdownMessage::JobDeclaratorShutdown(tx)) if !matches!(status_type, StatusType::TemplateReceiver) => { - trace!("Received upstream shutdown"); - if status_type != StatusType::TemplateReceiver { - inbound_tx.close(); - break; - } - drop(tx); - } - _ => {} - } - } - res = reader.read_frame() => { - match res { - Ok(frame) => { - match frame { - Frame::HandShake(frame) => { - error!(?frame, "Received handshake frame"); - drop(frame); - break; - }, - Frame::Sv2(sv2_frame) => { - trace!("Received inbound frame"); - if let Err(e) = inbound_tx.send(sv2_frame).await { - inbound_tx.close(); - error!(error=?e, "Failed to forward inbound frame"); - break; - } - }, - } - } - Err(e) => { - error!(error=?e, "Reader error"); - inbound_tx.close(); - break; - } - } - } - } - } - inbound_tx.close(); - outbound_rx_clone.close(); - drop(inbound_tx); - drop(outbound_rx_clone); - warn!("Reader task exited."); - }.instrument(tracing::trace_span!( - "reader_task", - spawned_at = %format!("{}:{}", caller.file(), caller.line()) - ))); - } - - { - let mut shutdown_rx = notify_shutdown.subscribe(); - let status_type: StatusType = StatusType::from(&status_sender); - - task_manager.spawn(async move { - trace!("Writer task started"); - loop { - tokio::select! { - message = shutdown_rx.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - trace!("Received global shutdown"); - outbound_rx.close(); - break; - } - Ok(ShutdownMessage::DownstreamShutdown(down_id)) if matches!(status_type, StatusType::Downstream(id) if id == down_id) => { - trace!(down_id, "Received downstream shutdown"); - if status_type != StatusType::TemplateReceiver { - outbound_rx.close(); - break; - } - } - Ok(ShutdownMessage::JobDeclaratorShutdownFallback(_)) if !matches!(status_type, StatusType::TemplateReceiver) => { - trace!("Received job declarator shutdown"); - if status_type != StatusType::TemplateReceiver { - outbound_rx.close(); - break; - } - } - Ok(ShutdownMessage::UpstreamShutdownFallback(_)) if !matches!(status_type, StatusType::TemplateReceiver) => { - trace!("Received upstream shutdown"); - if status_type != StatusType::TemplateReceiver { - outbound_rx.close(); - break; - } - } - Ok(ShutdownMessage::UpstreamShutdown(tx)) if !matches!(status_type, StatusType::TemplateReceiver) => { - trace!("Received upstream shutdown"); - if status_type != StatusType::TemplateReceiver { - outbound_rx.close(); - break; - } - drop(tx); - } - Ok(ShutdownMessage::JobDeclaratorShutdown(tx)) if !matches!(status_type, StatusType::TemplateReceiver) => { - trace!("Received upstream shutdown"); - if status_type != StatusType::TemplateReceiver { - outbound_rx.close(); - break; - } - drop(tx); - } - _ => {} - } - } - res = outbound_rx.recv() => { - match res { - Ok(frame) => { - trace!("Sending outbound frame"); - if let Err(e) = writer.write_frame(frame.into()).await { - error!(error=?e, "Writer error"); - outbound_rx.close(); - break; - } - } - Err(_) => { - outbound_rx.close(); - warn!("Outbound channel closed"); - break; - } - } - } - } - } - outbound_rx.close(); - inbound_tx_clone.close(); - drop(outbound_rx); - drop(inbound_tx_clone); - warn!("Writer task exited."); - }.instrument(tracing::trace_span!( - "writer_task", - spawned_at = %format!("{}:{}", caller.file(), caller.line()) - ))); - } -} - -/// Represents the state of the upstream connection. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum UpstreamState { - /// No channel established with upstream. - NoChannel = 0, - /// Channel is being established undergoing. - Pending = 1, - /// Channel is active and connected. - Connected = 2, - /// Running in solo mining mode. - SoloMining = 3, -} - -/// Atomic wrapper for managing upstream connection state safely across threads. -#[derive(Clone)] -pub struct AtomicUpstreamState { - inner: Arc, -} - -impl AtomicUpstreamState { - /// Creates a new atomic upstream state. - pub fn new(state: UpstreamState) -> Self { - Self { - inner: Arc::new(AtomicU8::new(state as u8)), - } - } - - /// Returns the current upstream state. - pub fn get(&self) -> UpstreamState { - match self.inner.load(Ordering::SeqCst) { - 0 => UpstreamState::NoChannel, - 1 => UpstreamState::Pending, - 2 => UpstreamState::Connected, - 3 => UpstreamState::SoloMining, - _ => unreachable!("invalid upstream state"), - } - } - - /// Updates the upstream state - pub fn set(&self, state: UpstreamState) { - self.inner.store(state as u8, Ordering::SeqCst); - } - - /// Conditionally updates the upstream state if the current value matches. - pub fn compare_and_set( - &self, - current: UpstreamState, - new: UpstreamState, - ) -> Result<(), UpstreamState> { - self.inner - .compare_exchange(current as u8, new as u8, Ordering::SeqCst, Ordering::SeqCst) - .map(|_| ()) - .map_err(|v| match v { - 0 => UpstreamState::NoChannel, - 1 => UpstreamState::Pending, - 2 => UpstreamState::Connected, - 3 => UpstreamState::SoloMining, - _ => unreachable!("invalid upstream state"), - }) - } -} - -/// Represents a pending channel request during the bootstrap phase -/// of the Job Declarator Client (JDC). -/// -/// These requests are created by downstreams that want to open -/// a mining channel but cannot proceed immediately. -/// They remain queued until an upstream channel is successfully opened, -/// at which point they can be processed. -/// -/// Two types of requests can be pending: -/// - [`OpenExtendedMiningChannel`] for extended mining channels -/// - [`OpenStandardMiningChannel`] for standard mining channels -pub enum PendingChannelRequest { - /// A request to open an extended mining channel. - ExtendedChannel(OpenExtendedMiningChannel<'static>), - /// A request to open a standard mining channel. - StandardChannel(OpenStandardMiningChannel<'static>), -} - -impl From> for PendingChannelRequest { - fn from(value: OpenExtendedMiningChannel<'static>) -> Self { - PendingChannelRequest::ExtendedChannel(value) - } -} - -impl From> for PendingChannelRequest { - fn from(value: OpenStandardMiningChannel<'static>) -> Self { - PendingChannelRequest::StandardChannel(value) - } -} - -impl From for Mining<'_> { - fn from(value: PendingChannelRequest) -> Self { - match value { - PendingChannelRequest::ExtendedChannel(m) => Mining::OpenExtendedMiningChannel(m), - PendingChannelRequest::StandardChannel(m) => Mining::OpenStandardMiningChannel(m), - } - } -} - -impl PendingChannelRequest { - pub fn message_type(&self) -> u8 { - match self { - PendingChannelRequest::ExtendedChannel(_) => MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL, - PendingChannelRequest::StandardChannel(_) => MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL, - } - } -} - -/// Creates a [`CloseChannel`] message for the given channel ID and reason. -/// -/// The `msg` is converted into a [`Str0255`] reason code. -/// If conversion fails, this function will panic. -pub(crate) fn create_close_channel_msg(channel_id: u32, msg: &str) -> CloseChannel<'_> { - CloseChannel { - channel_id, - reason_code: Str0255::try_from(msg.to_string()).expect("Could not convert message."), - } -} - -pub fn is_common_message(message_type: u8) -> bool { - matches!( - message_type, - MESSAGE_TYPE_SETUP_CONNECTION - | MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS - | MESSAGE_TYPE_SETUP_CONNECTION_ERROR - | MESSAGE_TYPE_CHANNEL_ENDPOINT_CHANGED - | MESSAGE_TYPE_RECONNECT - ) -} - -pub fn is_mining_message(message_type: u8) -> bool { - matches!( - message_type, - MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL - | MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL_SUCCESS - | MESSAGE_TYPE_OPEN_MINING_CHANNEL_ERROR - | MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL - | MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL_SUCCESS - | MESSAGE_TYPE_NEW_MINING_JOB - | MESSAGE_TYPE_UPDATE_CHANNEL - | MESSAGE_TYPE_UPDATE_CHANNEL_ERROR - | MESSAGE_TYPE_CLOSE_CHANNEL - | MESSAGE_TYPE_SET_EXTRANONCE_PREFIX - | MESSAGE_TYPE_SUBMIT_SHARES_STANDARD - | MESSAGE_TYPE_SUBMIT_SHARES_EXTENDED - | MESSAGE_TYPE_SUBMIT_SHARES_SUCCESS - | MESSAGE_TYPE_SUBMIT_SHARES_ERROR - // | MESSAGE_TYPE_RESERVED - | 0x1e - | MESSAGE_TYPE_NEW_EXTENDED_MINING_JOB - | MESSAGE_TYPE_MINING_SET_NEW_PREV_HASH - | MESSAGE_TYPE_SET_TARGET - | MESSAGE_TYPE_SET_CUSTOM_MINING_JOB - | MESSAGE_TYPE_SET_CUSTOM_MINING_JOB_SUCCESS - | MESSAGE_TYPE_SET_CUSTOM_MINING_JOB_ERROR - | MESSAGE_TYPE_SET_GROUP_CHANNEL - ) -} - -pub fn is_job_declaration_message(message_type: u8) -> bool { - matches!( - message_type, - MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN - | MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN_SUCCESS - | MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS - | MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS_SUCCESS - | MESSAGE_TYPE_DECLARE_MINING_JOB - | MESSAGE_TYPE_DECLARE_MINING_JOB_SUCCESS - | MESSAGE_TYPE_DECLARE_MINING_JOB_ERROR - | MESSAGE_TYPE_PUSH_SOLUTION - ) -} - -pub fn is_template_distribution_message(message_type: u8) -> bool { - matches!( - message_type, - MESSAGE_TYPE_COINBASE_OUTPUT_CONSTRAINTS - | MESSAGE_TYPE_NEW_TEMPLATE - | MESSAGE_TYPE_SET_NEW_PREV_HASH - | MESSAGE_TYPE_REQUEST_TRANSACTION_DATA - | MESSAGE_TYPE_REQUEST_TRANSACTION_DATA_SUCCESS - | MESSAGE_TYPE_REQUEST_TRANSACTION_DATA_ERROR - | MESSAGE_TYPE_SUBMIT_SOLUTION - ) -} - -#[derive(Debug, PartialEq, Eq)] -pub enum MessageType { - Common, - Mining, - JobDeclaration, - TemplateDistribution, - Unknown, -} - -pub fn protocol_message_type(message_type: u8) -> MessageType { - if is_common_message(message_type) { - MessageType::Common - } else if is_mining_message(message_type) { - MessageType::Mining - } else if is_job_declaration_message(message_type) { - MessageType::JobDeclaration - } else if is_template_distribution_message(message_type) { - MessageType::TemplateDistribution - } else { - MessageType::Unknown - } -} diff --git a/roles/jd-client/src/main.rs b/roles/jd-client/src/main.rs deleted file mode 100644 index 9b6047d8f9..0000000000 --- a/roles/jd-client/src/main.rs +++ /dev/null @@ -1,17 +0,0 @@ -use jd_client_sv2::JobDeclaratorClient; -use stratum_apps::config_helpers::logging::init_logging; - -use crate::args::process_cli_args; - -mod args; - -#[tokio::main] -async fn main() { - let jdc_config = process_cli_args().unwrap_or_else(|e| { - eprintln!("Job Declarator Client config error: {e}"); - std::process::exit(1); - }); - - init_logging(jdc_config.log_file()); - JobDeclaratorClient::new(jdc_config).start().await; -} diff --git a/roles/jd-server/Cargo.toml b/roles/jd-server/Cargo.toml deleted file mode 100644 index 19f9568431..0000000000 --- a/roles/jd-server/Cargo.toml +++ /dev/null @@ -1,44 +0,0 @@ -[package] -name = "jd_server" -version = "0.1.3" -authors = ["The Stratum V2 Developers"] -edition = "2021" -description = "Job Declarator Server (JDS) role" -documentation = "https://github.com/stratum-mining/stratum" -readme = "README.md" -homepage = "https://stratumprotocol.org" -repository = "https://github.com/stratum-mining/stratum" -license = "MIT OR Apache-2.0" -keywords = ["stratum", "mining", "bitcoin", "protocol"] - - -[lib] -name = "jd_server" -path = "src/lib/mod.rs" - -[dependencies] -stratum-apps = { path = "../stratum-apps", features = ["jd_server"] } -roles_logic_sv2 = "5.0.0" -binary_sv2 = "4.0.0" -error_handling = "1.0.0" -codec_sv2 = "3.0.1" -framing_sv2 = "5.0.1" -mining_sv2 = "5.0.1" -noise_sv2 = "1.4.0" -parsers_sv2 = "0.1.1" -job_declaration_sv2 = "5.0.1" -common_messages_sv2 = "6.0.1" -network_helpers_sv2 = "4.0.1" -rpc_sv2 = "1.1.1" -bitcoin = "0.32.5" -async-channel = "1.5.1" -rand = "0.8.4" -tokio = { version = "1.44.1", features = ["full"] } -ext-config = { version = "0.14.0", features = ["toml"], package = "config" } -tracing = { version = "0.1" } -nohash-hasher = "0.2.0" -serde_json = { version = "1.0", default-features = false, features = ["alloc","raw_value"] } -serde = { version = "1.0.89", features = ["derive", "alloc"], default-features = false } -hashbrown = { version = "0.11", default-features = false, features = ["ahash", "serde"] } -hex = "0.4.3" -clap = { version = "4.5.39", features = ["derive"] } diff --git a/roles/jd-server/config-examples/jds-config-hosted-example.toml b/roles/jd-server/config-examples/jds-config-hosted-example.toml deleted file mode 100644 index 4f3b4039e6..0000000000 --- a/roles/jd-server/config-examples/jds-config-hosted-example.toml +++ /dev/null @@ -1,30 +0,0 @@ -# If set to true, JDS require JDC to reveal the transactions they are going to mine on -full_template_mode_required = true - -# SRI Pool config -authority_public_key = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" -authority_secret_key = "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n" -cert_validity_sec = 3600 - -# Coinbase outputs are specified as descriptors. A full list of descriptors is available at -# https://github.com/bitcoin/bips/blob/master/bip-0380.mediawiki#appendix-b-index-of-script-expressions -# Although the `musig` descriptor is not yet supported and the legacy `combo` descriptor never -# will be. If you have an address, embed it in a descriptor like `addr(
)`. -coinbase_reward_script = "addr(tb1qa0sm0hxzj0x25rh8gw5xlzwlsfvvyz8u96w3p8)" - -# Enable this option to set a predefined log file path. -# When enabled, logs will always be written to this file. -# The CLI option --log-file (or -f) will override this setting if provided. -# log_file = "./jd-server.log" - -# SRI Pool JD config -listen_jd_address = "0.0.0.0:34264" -# RPC config for mempool (it can be also the same TP if correctly configured) -core_rpc_url = "http://75.119.150.111" -core_rpc_port = 48332 -core_rpc_user = "username" -core_rpc_pass = "password" -# Time interval used for JDS mempool update -[mempool_update_interval] -unit = "secs" -value = 1 diff --git a/roles/jd-server/config-examples/jds-config-local-example.toml b/roles/jd-server/config-examples/jds-config-local-example.toml deleted file mode 100644 index f26adfbf48..0000000000 --- a/roles/jd-server/config-examples/jds-config-local-example.toml +++ /dev/null @@ -1,30 +0,0 @@ -# If set to true, JDS require JDC to reveal the transactions they are going to mine on -full_template_mode_required = true - -# SRI Pool config -authority_public_key = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" -authority_secret_key = "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n" -cert_validity_sec = 3600 - -# Coinbase outputs are specified as descriptors. A full list of descriptors is available at -# https://github.com/bitcoin/bips/blob/master/bip-0380.mediawiki#appendix-b-index-of-script-expressions -# Although the `musig` descriptor is not yet supported and the legacy `combo` descriptor never -# will be. If you have an address, embed it in a descriptor like `addr(
)`. -coinbase_reward_script = "addr(tb1qa0sm0hxzj0x25rh8gw5xlzwlsfvvyz8u96w3p8)" - -# Enable this option to set a predefined log file path. -# When enabled, logs will always be written to this file. -# The CLI option --log-file (or -f) will override this setting if provided. -# log_file = "./jd-server.log" - -# SRI Pool JD config -listen_jd_address = "127.0.0.1:34264" -# RPC config for mempool (it can be also the same TP if correctly configured) -core_rpc_url = "http://127.0.0.1" -core_rpc_port = 48332 -core_rpc_user = "username" -core_rpc_pass = "password" -# Time interval used for JDS mempool update -[mempool_update_interval] -unit = "secs" -value = 1 diff --git a/roles/jd-server/src/args.rs b/roles/jd-server/src/args.rs deleted file mode 100644 index e0e0d26400..0000000000 --- a/roles/jd-server/src/args.rs +++ /dev/null @@ -1,67 +0,0 @@ -use std::path::PathBuf; - -use clap::Parser; -use ext_config::{Config, File, FileFormat}; -use jd_server::{ - config::JobDeclaratorServerConfig, - error::JdsError, - // error::{Error, ProxyResult}, -}; - -use tracing::error; - -/// CLI argument parser for the JDS binary. -/// -/// Supports the following flags: -/// - `-c`, `--config`: specify a custom config file path -/// - `-h`, `--help`: print help and usage info -#[derive(Parser, Debug)] -#[command(author, version, about = "Job Declarator Server (JDS)", long_about = None)] -pub struct Args { - #[arg( - short = 'c', - long = "config", - help = "Path to the TOML configuration file", - default_value = "jds-config.toml" - )] - pub config_path: std::path::PathBuf, - #[arg( - short = 'f', - long = "log-file", - help = "Path to the log file. If not set, logs will only be written to stdout." - )] - pub log_file: Option, -} - -/// Process CLI args and load configuration. -#[allow(clippy::result_large_err)] -pub fn process_cli_args() -> Result { - // Parse CLI arguments - let args = Args::parse(); - - // Build configuration from the provided file path - let config_path = args.config_path.to_str().ok_or_else(|| { - error!("Invalid configuration path."); - JdsError::BadCliArgs - })?; - - let settings = Config::builder() - .add_source(File::new(config_path, FileFormat::Toml)) - .build() - .map_err(|e| { - error!("Failed to build config: {}", e); - JdsError::BadCliArgs - })?; - - // Deserialize settings into JobDeclaratorServerConfig - let mut config = settings - .try_deserialize::() - .map_err(|e| { - error!("Failed to deserialize config: {}", e); - JdsError::BadCliArgs - })?; - - config.set_log_file(args.log_file); - - Ok(config) -} diff --git a/roles/jd-server/src/lib/config.rs b/roles/jd-server/src/lib/config.rs deleted file mode 100644 index ee01a046ec..0000000000 --- a/roles/jd-server/src/lib/config.rs +++ /dev/null @@ -1,296 +0,0 @@ -//! ## Configuration Module -//! -//! Defines [`JobDeclaratorServerConfig`], the configuration structure for the Job Declarator Server -//! (JDS). -//! -//! This module handles: -//! - Parsing TOML files via `serde` -//! - Accessing Bitcoin Core RPC parameters -//! - Managing cryptographic keys for Noise authentication -//! - Setting networking and coinbase logic -//! -//! Also defines a helper struct [`CoreRpc`] to group RPC parameters. - -use serde::Deserialize; -use std::{ - path::{Path, PathBuf}, - time::Duration, -}; -use stratum_apps::{ - config_helpers::CoinbaseRewardScript, - key_utils::{Secp256k1PublicKey, Secp256k1SecretKey}, -}; - -#[derive(Debug, serde::Deserialize, Clone)] -pub struct JobDeclaratorServerConfig { - #[serde(default = "default_true")] - full_template_mode_required: bool, - listen_jd_address: String, - authority_public_key: Secp256k1PublicKey, - authority_secret_key: Secp256k1SecretKey, - cert_validity_sec: u64, - coinbase_reward_script: CoinbaseRewardScript, - core_rpc_url: String, - core_rpc_port: u16, - core_rpc_user: String, - core_rpc_pass: String, - #[serde(deserialize_with = "stratum_apps::config_helpers::duration_from_toml")] - mempool_update_interval: Duration, - log_file: Option, -} - -impl JobDeclaratorServerConfig { - /// Creates a new instance of [`JobDeclaratorServerConfig`]. - /// - /// # Panics - /// - /// Panics if `coinbase_reward_scripts` is empty. - pub fn new( - listen_jd_address: String, - authority_public_key: Secp256k1PublicKey, - authority_secret_key: Secp256k1SecretKey, - cert_validity_sec: u64, - coinbase_reward_script: CoinbaseRewardScript, - core_rpc: CoreRpc, - mempool_update_interval: Duration, - ) -> Self { - Self { - full_template_mode_required: true, - listen_jd_address, - authority_public_key, - authority_secret_key, - cert_validity_sec, - coinbase_reward_script, - core_rpc_url: core_rpc.url, - core_rpc_port: core_rpc.port, - core_rpc_user: core_rpc.user, - core_rpc_pass: core_rpc.pass, - mempool_update_interval, - log_file: None, - } - } - - /// Returns the listening address of the Job Declarator Server. - pub fn listen_jd_address(&self) -> &str { - &self.listen_jd_address - } - - /// Returns the public key of the authority. - pub fn authority_public_key(&self) -> &Secp256k1PublicKey { - &self.authority_public_key - } - - /// Returns the secret key of the authority. - pub fn authority_secret_key(&self) -> &Secp256k1SecretKey { - &self.authority_secret_key - } - - /// Returns the URL of the core RPC. - pub fn core_rpc_url(&self) -> &str { - &self.core_rpc_url - } - - /// Returns the port of the core RPC. - pub fn core_rpc_port(&self) -> u16 { - self.core_rpc_port - } - - /// Returns the user of the core RPC. - pub fn core_rpc_user(&self) -> &str { - &self.core_rpc_user - } - - /// Returns the password of the core RPC. - pub fn core_rpc_pass(&self) -> &str { - &self.core_rpc_pass - } - - /// Returns the coinbase outputs. - pub fn coinbase_reward_scripts(&self) -> &CoinbaseRewardScript { - &self.coinbase_reward_script - } - - /// Returns the certificate validity in seconds. - pub fn cert_validity_sec(&self) -> u64 { - self.cert_validity_sec - } - - /// Returns whether [`Full Template`] is required. Otherwise, [`Coinbase Only`] mode will be - /// used. - /// - /// [`Full Template`]: https://github.com/stratum-mining/sv2-spec/blob/main/06-Job-Declaration-Protocol.md#632-full-template-mode - /// [`Coinbase Only`]: https://github.com/stratum-mining/sv2-spec/blob/main/06-Job-Declaration-Protocol.md#631-coinbase-only-mode - pub fn full_template_mode_required(&self) -> bool { - self.full_template_mode_required - } - - /// Returns the mempool update interval. - pub fn mempool_update_interval(&self) -> Duration { - self.mempool_update_interval - } - - /// Sets the listening address of Bitcoin core RPC. - pub fn set_core_rpc_url(&mut self, url: String) { - self.core_rpc_url = url; - } - - /// Sets coinbase outputs. - pub fn set_coinbase_reward_scripts(&mut self, output: CoinbaseRewardScript) { - self.coinbase_reward_script = output; - } - - pub fn log_file(&self) -> Option<&Path> { - self.log_file.as_deref() - } - pub fn set_log_file(&mut self, log_file: Option) { - if let Some(path) = log_file { - self.log_file = Some(path); - } - } -} - -fn default_true() -> bool { - true -} - -#[derive(Debug, Deserialize, Clone)] -pub struct CoreRpc { - url: String, - port: u16, - user: String, - pass: String, -} - -impl CoreRpc { - pub fn new(url: String, port: u16, user: String, pass: String) -> Self { - Self { - url, - port, - user, - pass, - } - } -} - -#[cfg(test)] -mod tests { - use super::super::JobDeclaratorServer; - use ext_config::{Config, ConfigError, File, FileFormat}; - use std::path::PathBuf; - use stratum_apps::stratum_core::bitcoin::{self, Amount, ScriptBuf, TxOut}; - - use crate::config::JobDeclaratorServerConfig; - - const COINBASE_CONFIG_TEMPLATE: &'static str = r#" - full_template_mode_required = true - authority_public_key = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" - authority_secret_key = "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n" - cert_validity_sec = 3600 - - coinbase_reward_script = %COINBASE_REWARD_SCRIPT% - - listen_jd_address = "127.0.0.1:34264" - core_rpc_url = "http://127.0.0.1" - core_rpc_port = 48332 - core_rpc_user = "username" - core_rpc_pass = "password" - [mempool_update_interval] - unit = "secs" - value = 1 - "#; - const TEST_PK_HEX: &'static str = - "036adc3bdf21e6f9a0f0fb0066bf517e5b7909ed1563d6958a10993849a7554075"; - const TEST_INVALID_PK_HEX: &'static str = - "036adc3bdf21e6f9a0f0fb0066bf517e5b7909ed1563d6958a10993849a7ffffff"; - - fn load_config(path: &str) -> JobDeclaratorServerConfig { - let config_path = PathBuf::from(path); - assert!( - config_path.exists(), - "No config file found at {:?}", - config_path - ); - - let config_path = config_path.to_str().unwrap(); - - let settings = Config::builder() - .add_source(File::new(config_path, FileFormat::Toml)) - .build() - .expect("Failed to build config"); - - settings.try_deserialize().expect("Failed to parse config") - } - - fn load_coinbase_config_str(path: &str) -> Result { - let s = COINBASE_CONFIG_TEMPLATE.replace("%COINBASE_REWARD_SCRIPT%", path); - let settings = Config::builder() - .add_source(File::from_str(&s, FileFormat::Toml)) - .build() - .expect("Failed to build config"); - - settings.try_deserialize() - } - - #[tokio::test] - async fn test_offline_rpc_url() { - let mut config = load_config("config-examples/jds-config-hosted-example.toml"); - config.set_core_rpc_url("http://127.0.0.1".to_string()); - let jd = JobDeclaratorServer::new(config); - assert!(jd.start().await.is_err()); - } - - #[test] - fn test_get_non_empty_coinbase_reward_script() { - let pk = TEST_PK_HEX - .parse::() - .expect("Failed to parse public key"); - let config = - load_coinbase_config_str(&format!("\"wpkh({pk})\"")).expect("Failed to parse config"); - - let output = TxOut { - value: Amount::from_sat(0), - script_pubkey: config.coinbase_reward_scripts().script_pubkey(), - }; - let expected_script = ScriptBuf::from_hex(&format!( - "0014{}", - pk.wpubkey_hash().expect("compressed key") - )) - .expect("hex"); - let expected_transaction_output = TxOut { - value: Amount::from_sat(0), - script_pubkey: expected_script, - }; - - assert_eq!(output, expected_transaction_output); - } - - #[test] - fn test_get_coinbase_reward_script_empty() { - let error = - load_coinbase_config_str("\"\"").expect_err("cannot parse config with empty txout"); - assert_eq!( - error.to_string(), - "Miniscript: unexpected Β«(0 args) while parsing MiniscriptΒ»", - ); - } - - #[test] - fn test_get_invalid_miniscript_in_coinbase_reward_script() { - let error = load_coinbase_config_str(&format!("\"INVALID\"")) - .expect_err("Cannot parse config with bad miniscript"); - assert_eq!( - error.to_string(), - "Miniscript: unexpected Β«INVALID(0 args) while parsing MiniscriptΒ»", - ); - } - - #[test] - fn test_get_invalid_value_in_coinbase_reward_script() { - let error = load_coinbase_config_str(&format!("\"wpkh({TEST_INVALID_PK_HEX})\"")) - .expect_err("Cannot parse config with bad pubkeys"); - assert_eq!( - error.to_string(), - "Miniscript: unexpected Β«Error while parsing simple public keyΒ»", - ); - } -} diff --git a/roles/jd-server/src/lib/error.rs b/roles/jd-server/src/lib/error.rs deleted file mode 100644 index 7f1ef71615..0000000000 --- a/roles/jd-server/src/lib/error.rs +++ /dev/null @@ -1,153 +0,0 @@ -//! ## Error Module -//! -//! Defines [`JdsError`], the central error enum used throughout the Job Declarator Server (JDS). -//! -//! It unifies errors from: -//! - I/O operations -//! - Channels (send/recv) -//! - SV2 stack: Binary, Codec, Noise, Framing, RolesLogic -//! - Mempool layer -//! - Locking logic (PoisonError) -//! - Domain-specific issues (e.g., missing job, invalid URL, reconstruction failures) -//! -//! This module ensures that all errors can be passed around consistently, including across async -//! boundaries. - -use binary_sv2; -use codec_sv2; -use framing_sv2; -use noise_sv2; -use parsers_sv2::Mining; -use roles_logic_sv2; -use std::{ - convert::From, - fmt::Debug, - sync::{MutexGuard, PoisonError}, -}; - -use crate::mempool::error::JdsMempoolError; - -#[derive(std::fmt::Debug)] -pub enum JdsError { - Io(std::io::Error), - ChannelSend(Box), - ChannelRecv(async_channel::RecvError), - BinarySv2(binary_sv2::Error), - Codec(codec_sv2::Error), - Noise(noise_sv2::Error), - RolesLogic(roles_logic_sv2::Error), - Framing(framing_sv2::Error), - PoisonLock(String), - Custom(String), - Sv2ProtocolError((u32, Mining<'static>)), - MempoolError(JdsMempoolError), - ImpossibleToReconstructBlock(String), - NoLastDeclaredJob, - InvalidRPCUrl, - BadCliArgs, - InvalidPrevHash, - InvalidCoinbase, - InvalidMerkleRoot, -} - -impl std::fmt::Display for JdsError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - use JdsError::*; - match self { - Io(ref e) => write!(f, "I/O error: `{e:?}"), - ChannelSend(ref e) => write!(f, "Channel send failed: `{e:?}`"), - ChannelRecv(ref e) => write!(f, "Channel recv failed: `{e:?}`"), - BinarySv2(ref e) => write!(f, "Binary SV2 error: `{e:?}`"), - Codec(ref e) => write!(f, "Codec SV2 error: `{e:?}"), - Framing(ref e) => write!(f, "Framing SV2 error: `{e:?}`"), - Noise(ref e) => write!(f, "Noise SV2 error: `{e:?}"), - RolesLogic(ref e) => write!(f, "Roles Logic SV2 error: `{e:?}`"), - PoisonLock(ref e) => write!(f, "Poison lock: {e:?}"), - Custom(ref e) => write!(f, "Custom SV2 error: `{e:?}`"), - Sv2ProtocolError(ref e) => { - write!(f, "Received Sv2 Protocol Error from upstream: `{e:?}`") - } - MempoolError(ref e) => write!(f, "Mempool error: `{e:?}`"), - ImpossibleToReconstructBlock(e) => { - write!(f, "Error in reconstructing the block: {e:?}") - } - NoLastDeclaredJob => write!(f, "Last declared job not found"), - InvalidRPCUrl => write!(f, "Invalid Template Provider RPC URL"), - BadCliArgs => write!(f, "Bad CLI arg input"), - InvalidPrevHash => write!(f, "Invalid previous hash"), - InvalidCoinbase => write!(f, "Invalid coinbase"), - InvalidMerkleRoot => write!(f, "Invalid merkle root"), - } - } -} - -impl From for JdsError { - fn from(e: std::io::Error) -> JdsError { - JdsError::Io(e) - } -} - -impl From for JdsError { - fn from(e: async_channel::RecvError) -> JdsError { - JdsError::ChannelRecv(e) - } -} - -impl From for JdsError { - fn from(e: binary_sv2::Error) -> JdsError { - JdsError::BinarySv2(e) - } -} - -impl From for JdsError { - fn from(e: codec_sv2::Error) -> JdsError { - JdsError::Codec(e) - } -} - -impl From for JdsError { - fn from(e: noise_sv2::Error) -> JdsError { - JdsError::Noise(e) - } -} - -impl From for JdsError { - fn from(e: roles_logic_sv2::Error) -> JdsError { - JdsError::RolesLogic(e) - } -} - -impl From> for JdsError { - fn from(e: async_channel::SendError) -> JdsError { - JdsError::ChannelSend(Box::new(e)) - } -} - -impl From for JdsError { - fn from(e: String) -> JdsError { - JdsError::Custom(e) - } -} -impl From for JdsError { - fn from(e: framing_sv2::Error) -> JdsError { - JdsError::Framing(e) - } -} - -impl From>> for JdsError { - fn from(e: PoisonError>) -> JdsError { - JdsError::PoisonLock(e.to_string()) - } -} - -impl From<(u32, Mining<'static>)> for JdsError { - fn from(e: (u32, Mining<'static>)) -> Self { - JdsError::Sv2ProtocolError(e) - } -} - -impl From for JdsError { - fn from(error: JdsMempoolError) -> Self { - JdsError::MempoolError(error) - } -} diff --git a/roles/jd-server/src/lib/job_declarator/message_handler.rs b/roles/jd-server/src/lib/job_declarator/message_handler.rs deleted file mode 100644 index b5644bad41..0000000000 --- a/roles/jd-server/src/lib/job_declarator/message_handler.rs +++ /dev/null @@ -1,296 +0,0 @@ -use binary_sv2::{Decodable, Serialize, U256}; -use bitcoin::{ - consensus::Decodable as BitcoinDecodable, - hashes::{sha256d, Hash}, - Transaction, Txid, -}; -use job_declaration_sv2::{ - AllocateMiningJobToken, AllocateMiningJobTokenSuccess, DeclareMiningJob, DeclareMiningJobError, - DeclareMiningJobSuccess, ProvideMissingTransactions, ProvideMissingTransactionsSuccess, - PushSolution, -}; -use parsers_sv2::JobDeclaration; -use roles_logic_sv2::{ - errors::Error, - handlers::{job_declaration::ParseJobDeclarationMessagesFromDownstream, SendTo_}, - utils::Mutex, -}; -use std::{ - convert::TryInto, - io::Cursor, - sync::{atomic::Ordering, Arc}, -}; -pub type SendTo = SendTo_, ()>; -use crate::mempool::JDsMempool; - -use super::{signed_token, TransactionState}; -use parsers_sv2::AnyMessage as AllMessages; -use tracing::{debug, info}; - -use super::JobDeclaratorDownstream; - -impl JobDeclaratorDownstream { - fn verify_job(&mut self, message: &DeclareMiningJob) -> bool { - // Convert token from B0255 to u32 - let four_byte_array: [u8; 4] = message - .mining_job_token - .clone() - .to_vec() - .as_slice() - .try_into() - .unwrap(); - let token_u32 = u32::from_le_bytes(four_byte_array); - // TODO Function to implement, it must be checked if the requested job has: - // 1. right coinbase - // 2. right version field - // 3. right prev-hash - // 4. right nbits - self.token_to_job_map.contains_key(&(token_u32)) - } -} - -impl ParseJobDeclarationMessagesFromDownstream for JobDeclaratorDownstream { - fn handle_allocate_mining_job_token( - &mut self, - message: AllocateMiningJobToken, - ) -> Result { - info!( - "Received `AllocateMiningJobToken` with id: {}", - message.request_id - ); - debug!("`AllocateMiningJobToken`: {:?}", message.request_id); - let token = self.tokens.fetch_add(1, Ordering::Relaxed); - self.token_to_job_map.insert(token, None); - let message_success = AllocateMiningJobTokenSuccess { - request_id: message.request_id, - mining_job_token: token.to_le_bytes().to_vec().try_into().unwrap(), - coinbase_outputs: self.coinbase_output.clone().try_into().unwrap(), - }; - let message_enum = JobDeclaration::AllocateMiningJobTokenSuccess(message_success); - info!( - "Sending AllocateMiningJobTokenSuccess to proxy {}", - message_enum - ); - Ok(SendTo::Respond(message_enum)) - } - - // Transactions that are present in the mempool are stored here, that is sent to the - // mempool which use the rpc client to retrieve the whole data for each transaction. - // The unknown transactions is a vector that contains the transactions that are not in the - // jds mempool, and will be non-empty in the ProvideMissingTransactionsSuccess message - fn handle_declare_mining_job(&mut self, message: DeclareMiningJob) -> Result { - info!( - "Received `DeclareMiningJob` with id: {}", - message.request_id - ); - debug!("`DeclareMiningJob`: {}", message); - if let Some(old_mining_job) = self.declared_mining_job.0.take() { - clear_declared_mining_job(old_mining_job, &message, self.mempool.clone())?; - } - let mut known_transactions: Vec = vec![]; - if self.verify_job(&message) { - let txids = message.tx_ids_list.inner_as_ref(); - let mempool = self.mempool.safe_lock(|x| x.mempool.clone())?; - let mut transactions_with_state = vec![TransactionState::Missing; txids.len()]; - let mut missing_txs: Vec = Vec::new(); - for (i, txid) in txids.iter().enumerate() { - let hash = sha256d::Hash::from_slice(txid)?; - let txid = Txid::from(hash); - match mempool.contains_key(&txid) { - true => { - transactions_with_state[i] = TransactionState::PresentInMempool(txid); - known_transactions.push(txid); - } - false => { - missing_txs.push(i as u16); - } - } - } - self.declared_mining_job = ( - Some(message.clone().into_static()), - transactions_with_state, - missing_txs.clone(), - ); - // here we send the transactions that we want to be stored in jds mempool with full data - - self.add_txs_to_mempool - .add_txs_to_mempool_inner - .known_transactions - .append(&mut known_transactions); - let mut full_token = [0u8; 255]; - message.mining_job_token.to_bytes(&mut full_token)?; - let mining_job_token = &mut full_token[..32]; - if missing_txs.is_empty() { - let message_success = DeclareMiningJobSuccess { - request_id: message.request_id, - new_mining_job_token: signed_token( - U256::from_bytes(mining_job_token)?, - &self.public_key.clone(), - &self.private_key.clone(), - ), - }; - let message_enum_success = JobDeclaration::DeclareMiningJobSuccess(message_success); - Ok(SendTo::Respond(message_enum_success)) - } else { - let message_provide_missing_transactions = ProvideMissingTransactions { - request_id: message.request_id, - unknown_tx_position_list: missing_txs.into(), - }; - let message_enum_provide_missing_transactions = - JobDeclaration::ProvideMissingTransactions( - message_provide_missing_transactions, - ); - Ok(SendTo::Respond(message_enum_provide_missing_transactions)) - } - } else { - let message_error = DeclareMiningJobError { - request_id: message.request_id, - error_code: Vec::new().try_into().unwrap(), - error_details: Vec::new().try_into().unwrap(), - }; - let message_enum_error = JobDeclaration::DeclareMiningJobError(message_error); - Ok(SendTo::Respond(message_enum_error)) - } - } - - fn handle_provide_missing_transactions_success( - &mut self, - message: ProvideMissingTransactionsSuccess, - ) -> Result { - info!( - "Received `ProvideMissingTransactionsSuccess` with id: {}", - message.request_id - ); - debug!("`ProvideMissingTransactionsSuccess`: {}", message); - let (declared_mining_job, ref mut transactions_with_state, missing_indexes) = - &mut self.declared_mining_job; - let mut unknown_transactions: Vec = vec![]; - match declared_mining_job { - Some(declared_job) => { - let id = declared_job.request_id; - // check request_id in order to ignore old ProvideMissingTransactionsSuccess (see - // issue #860) - if id == message.request_id { - for (i, tx) in message.transaction_list.inner_as_ref().iter().enumerate() { - let mut cursor = Cursor::new(tx); - let transaction = - Transaction::consensus_decode_from_finite_reader(&mut cursor) - .map_err(|e| Error::TxDecodingError(e.to_string()))?; - Vec::push(&mut unknown_transactions, transaction.clone()); - let index = - *missing_indexes - .get(i) - .ok_or(Error::LogicErrorMessage(Box::new( - AllMessages::JobDeclaration( - JobDeclaration::ProvideMissingTransactionsSuccess( - message.clone().into_static(), - ), - ), - )))? as usize; - // insert the missing transactions in the mempool - transactions_with_state[index] = - TransactionState::PresentInMempool(transaction.compute_txid()); - } - self.add_txs_to_mempool - .add_txs_to_mempool_inner - .unknown_transactions - .append(&mut unknown_transactions); - // if there still a missing transaction return an error - for tx_with_state in transactions_with_state { - match tx_with_state { - TransactionState::PresentInMempool(_) => continue, - TransactionState::Missing => return Err(Error::JDSMissingTransactions), - } - } - let mut full_token = [0u8; 255]; - declared_job - .mining_job_token - .clone() - .to_bytes(&mut full_token)?; - let mining_job_token = &mut full_token[..32]; - let message_success = DeclareMiningJobSuccess { - request_id: message.request_id, - new_mining_job_token: signed_token( - U256::from_bytes(mining_job_token)?, - &self.public_key.clone(), - &self.private_key.clone(), - ), - }; - let message_enum_success = - JobDeclaration::DeclareMiningJobSuccess(message_success); - return Ok(SendTo::Respond(message_enum_success)); - } - } - None => return Err(Error::NoValidJob), - } - Ok(SendTo::None(None)) - } - - fn handle_push_solution(&mut self, message: PushSolution<'_>) -> Result { - info!("Received PushSolution from JDC"); - debug!("`PushSolution`: {}", message); - let m = JobDeclaration::PushSolution(message.clone().into_static()); - Ok(SendTo::None(Some(m))) - } -} - -fn clear_declared_mining_job( - old_mining_job: DeclareMiningJob, - new_mining_job: &DeclareMiningJob, - mempool: Arc>, -) -> Result<(), Error> { - let old_transactions = old_mining_job.tx_ids_list.inner_as_ref(); - let new_transactions = new_mining_job.tx_ids_list.inner_as_ref(); - - if old_transactions.is_empty() { - info!("No transactions to remove from mempool"); - return Ok(()); - } - - let result = mempool.safe_lock(|mempool_| -> Result<(), Error> { - let mempool_txs = mempool_.mempool.clone(); - - for old_txid in old_transactions - .iter() - .filter(|&id| !new_transactions.contains(id)) - { - if let Some(tx) = mempool_txs.get(*old_txid) { - if let Some((transaction, _)) = tx.as_ref() { - let txid = transaction.compute_txid(); - match mempool_.mempool.get_mut(&txid) { - Some(Some((_transaction, counter))) => { - if *counter > 1 { - *counter -= 1; - debug!( - "Fat transaction {:?} counter decremented; job id {:?} dropped", - txid, old_mining_job.request_id - ); - } else { - mempool_.mempool.remove(&txid); - debug!( - "Fat transaction {:?} with job id {:?} removed from mempool", - txid, old_mining_job.request_id - ); - } - } - Some(None) => debug!( - "Thin transaction {:?} with job id {:?} removed from mempool", - txid, old_mining_job.request_id - ), - None => {} - } - } else { - debug!("Transaction with id {:?} is None in mempool", old_txid); - } - } else { - debug!( - "Transaction with id {:?} not found in mempool for old jobs", - old_txid - ); - } - } - Ok(()) - })?; - - result.map_err(|err| Error::PoisonLock(err.to_string())) -} diff --git a/roles/jd-server/src/lib/job_declarator/mod.rs b/roles/jd-server/src/lib/job_declarator/mod.rs deleted file mode 100644 index bc11a649cb..0000000000 --- a/roles/jd-server/src/lib/job_declarator/mod.rs +++ /dev/null @@ -1,645 +0,0 @@ -//! # Job Declarator Server - Protocol and Downstream Handling -//! -//! This module implements the core logic of the **Job Declarator Server (JDS)**. -//! -//! Responsibilities include: -//! - Listening for downstream client connections (JDCs) -//! - Handling the Job Declaration Protocol (AllocateMiningJobToken, DeclareMiningJob, PushSolution, -//! etc.) -//! - Tracking job state and transaction presence -//! - Managing transaction flow into the local mempool -//! - Assembling and submitting full blocks to the upstream node -//! -//! Structure: -//! - [`JobDeclarator`] handles server-level responsibilities like accepting new TCP connections. -//! - [`JobDeclaratorDownstream`] manages the per-client state and protocol interaction. -//! -//! The design is one-task-per-downstream, with communication via channels and internal -//! synchronization. - -pub mod message_handler; -use super::{ - error::JdsError, mempool::JDsMempool, status, EitherFrame, JobDeclaratorServerConfig, StdFrame, -}; -use async_channel::{Receiver, Sender}; -use binary_sv2::{self, B0255, U256}; -use bitcoin::{ - block::{Header, Version}, - consensus::{deserialize, encode::serialize}, - hashes::{sha256d::Hash as DHash, Hash}, - Amount, Block, BlockHash, CompactTarget, Transaction, TxOut, Txid, -}; -use codec_sv2::HandshakeRole; -use common_messages_sv2::{ - Protocol, SetupConnection, SetupConnectionError, SetupConnectionSuccess, -}; -use core::panic; -use error_handling::handle_result; -use job_declaration_sv2::{DeclareMiningJob, PushSolution}; -use network_helpers_sv2::noise_connection::Connection; -use nohash_hasher::BuildNoHashHasher; -use noise_sv2::Responder; -use parsers_sv2::{AnyMessage as JdsMessages, JobDeclaration}; -use roles_logic_sv2::{ - handlers::job_declaration::{ParseJobDeclarationMessagesFromDownstream, SendTo}, - utils::Mutex, -}; -use std::{ - collections::HashMap, - convert::TryInto, - sync::{atomic::AtomicU32, Arc}, -}; -use stratum_apps::key_utils::{Secp256k1PublicKey, Secp256k1SecretKey, SignatureService}; -use tokio::{net::TcpListener, time::Duration}; -use tracing::{debug, error, info}; - -/// Represents whether a transaction declared in a mining job is known to the JDS mempool -/// or still missing and needs to be fetched/provided. -#[derive(Clone, Debug)] -pub enum TransactionState { - PresentInMempool(Txid), - Missing, -} - -/// Contains transaction identifiers and full transaction data that need to be -/// added or completed in the JDS mempool. -/// -/// Used internally during the job declaration lifecycle. -#[derive(Clone, Debug)] -pub struct AddTrasactionsToMempoolInner { - pub known_transactions: Vec, - pub unknown_transactions: Vec, -} - -/// Wrapper struct enabling transaction updates to be sent via a channel to the mempool task. -#[derive(Clone, Debug)] -pub struct AddTrasactionsToMempool { - pub add_txs_to_mempool_inner: AddTrasactionsToMempoolInner, - pub sender_add_txs_to_mempool: Sender, -} - -/// Represents a single downstream connection to a JDC. -/// -/// This struct tracks all state relevant to one connection, including: -/// - The declared mining job and missing transactions -/// - Mapping between tokens and job IDs -/// - Interaction with the mempool -/// -/// It operates in its own async task and communicates with the rest of the system -/// via channels and locks. - -#[derive(Debug)] -pub struct JobDeclaratorDownstream { - #[allow(dead_code)] - full_template_mode_required: bool, - sender: Sender, - receiver: Receiver, - // TODO this should be computed for each new template so that fees are included - #[allow(dead_code)] - // TODO: use coinbase output - coinbase_output: Vec, - token_to_job_map: HashMap, BuildNoHashHasher>, - tokens: AtomicU32, - public_key: Secp256k1PublicKey, - private_key: Secp256k1SecretKey, - mempool: Arc>, - // Vec is the vector of missing transactions - declared_mining_job: ( - Option>, - Vec, - Vec, - ), - add_txs_to_mempool: AddTrasactionsToMempool, -} - -impl JobDeclaratorDownstream { - /// Creates a new downstream connection context. - pub fn new( - full_template_mode_required: bool, - receiver: Receiver, - sender: Sender, - config: &JobDeclaratorServerConfig, - mempool: Arc>, - sender_add_txs_to_mempool: Sender, - ) -> Self { - // TODO: use next variables - let token_to_job_map = HashMap::with_hasher(BuildNoHashHasher::default()); - let tokens = AtomicU32::new(0); - let add_txs_to_mempool_inner = AddTrasactionsToMempoolInner { - known_transactions: vec![], - unknown_transactions: vec![], - }; - let coinbase_output = serialize(&vec![TxOut { - value: Amount::from_sat(0), - script_pubkey: config.coinbase_reward_scripts().script_pubkey().to_owned(), - }]); - - Self { - full_template_mode_required, - receiver, - sender, - coinbase_output, - token_to_job_map, - tokens, - public_key: *config.authority_public_key(), - private_key: *config.authority_secret_key(), - mempool, - declared_mining_job: (None, Vec::new(), Vec::new()), - add_txs_to_mempool: AddTrasactionsToMempool { - add_txs_to_mempool_inner, - sender_add_txs_to_mempool, - }, - } - } - - fn get_block_hex( - self_mutex: Arc>, - message: PushSolution, - ) -> Result> { - let (last_declare_, _, _) = self_mutex - .clone() - .safe_lock(|x| x.declared_mining_job.clone()) - .map_err(|e| Box::new(JdsError::PoisonLock(e.to_string())))?; - let last_declare = last_declare_.ok_or(Box::new(JdsError::NoLastDeclaredJob))?; - let mut transactions_list = Self::collect_txs_in_job(self_mutex)?; - - let hash: [u8; 32] = message - .prev_hash - .to_vec() - .try_into() - .map_err(|_| Box::new(JdsError::InvalidPrevHash))?; - let hash = Hash::from_slice(&hash).expect("32 bytes should always be valid sha256d hash"); - let prev_blockhash = BlockHash::from_raw_hash(hash); - - let dummy_merkle_root = - DHash::from_slice(&[0u8; 32]).expect("32 bytes should always be valid sha256d hash"); - - let header = Header { - version: Version::from_consensus(message.version as i32), - prev_blockhash, - merkle_root: dummy_merkle_root.into(), - time: message.ntime, - bits: CompactTarget::from_consensus(message.nbits), - nonce: message.nonce, - }; - - let mut serialized_coinbase = Vec::new(); - serialized_coinbase.extend_from_slice(last_declare.coinbase_tx_prefix.to_vec().as_slice()); - serialized_coinbase.extend_from_slice(message.extranonce.to_vec().as_slice()); - serialized_coinbase.extend_from_slice(last_declare.coinbase_tx_suffix.to_vec().as_slice()); - let coinbase = deserialize(&serialized_coinbase[..]) - .map_err(|_| Box::new(JdsError::InvalidCoinbase))?; - transactions_list.insert(0, coinbase); - - let mut block = Block { - header, - txdata: transactions_list, - }; - - let merkle_root = block - .compute_merkle_root() - .ok_or(Box::new(JdsError::InvalidMerkleRoot))?; - block.header.merkle_root = merkle_root; - - Ok(hex::encode(serialize(&block))) - } - - fn collect_txs_in_job(self_mutex: Arc>) -> Result, Box> { - let (_, transactions_with_state, _) = self_mutex - .clone() - .safe_lock(|x| x.declared_mining_job.clone()) - .map_err(|e| Box::new(JdsError::PoisonLock(e.to_string())))?; - let mempool = self_mutex - .safe_lock(|x| x.mempool.clone()) - .map_err(|e| Box::new(JdsError::PoisonLock(e.to_string())))?; - let mut transactions_list: Vec = Vec::new(); - for tx_with_state in transactions_with_state.iter().enumerate() { - if let TransactionState::PresentInMempool(txid) = tx_with_state.1 { - let tx = mempool - .safe_lock(|x| x.mempool.get(txid).cloned()) - .map_err(|e| JdsError::PoisonLock(e.to_string()))? - .ok_or(Box::new(JdsError::ImpossibleToReconstructBlock( - "Txid not found in jds mempool".to_string(), - )))? - .ok_or(Box::new(JdsError::ImpossibleToReconstructBlock( - "Txid found in jds mempool but transactions not present".to_string(), - )))?; - transactions_list.push(tx.0); - } else { - return Err(Box::new(JdsError::ImpossibleToReconstructBlock( - "Unknown transaction".to_string(), - ))); - }; - } - Ok(transactions_list) - } - - async fn send_txs_to_mempool(self_mutex: Arc>) { - let add_txs_to_mempool = self_mutex - .safe_lock(|a| a.add_txs_to_mempool.clone()) - .unwrap(); - let sender_add_txs_to_mempool = add_txs_to_mempool.sender_add_txs_to_mempool; - let add_txs_to_mempool_inner = add_txs_to_mempool.add_txs_to_mempool_inner; - let _ = sender_add_txs_to_mempool - .send(add_txs_to_mempool_inner) - .await; - // the trasnactions sent to the mempool can be freed - let _ = self_mutex.safe_lock(|a| { - a.add_txs_to_mempool.add_txs_to_mempool_inner = AddTrasactionsToMempoolInner { - known_transactions: vec![], - unknown_transactions: vec![], - }; - }); - } - - fn get_transactions_in_job(self_mutex: Arc>) -> Vec { - let mut known_transactions: Vec = Vec::new(); - let job_transactions = self_mutex - .safe_lock(|a| a.declared_mining_job.1.clone()) - .unwrap(); - for transaction in job_transactions { - match transaction { - TransactionState::PresentInMempool(txid) => known_transactions.push(txid), - TransactionState::Missing => { - continue; - } - } - } - known_transactions - } - - /// Sends a single Job Declaration message back to the downstream client. - /// - /// Wraps the message into a `StdFrame` and sends it through the established channel. - pub async fn send( - self_mutex: Arc>, - message: parsers_sv2::JobDeclaration<'static>, - ) -> Result<(), ()> { - let sv2_frame: StdFrame = JdsMessages::JobDeclaration(message).try_into().unwrap(); - let sender = self_mutex.safe_lock(|self_| self_.sender.clone()).unwrap(); - sender.send(sv2_frame.into()).await.map_err(|_| ())?; - Ok(()) - } - - /// Starts the message processing loop for this downstream connection. - /// - /// - Waits for incoming SV2 messages - /// - Delegates message parsing to [`ParseJobDeclarationMessagesFromDownstream`] - /// - Sends appropriate responses back to the client - /// - Updates the JDS mempool as needed - /// - /// This loop runs until the client disconnects or a critical error is encountered. - pub fn start( - self_mutex: Arc>, - tx_status: status::Sender, - new_block_sender: Sender, - ) { - let recv = self_mutex.safe_lock(|s| s.receiver.clone()).unwrap(); - tokio::spawn(async move { - loop { - match recv.recv().await { - Ok(message) => { - let mut frame: StdFrame = handle_result!(tx_status, message.try_into()); - let header = frame - .get_header() - .ok_or_else(|| JdsError::Custom(String::from("No header set"))); - let header = handle_result!(tx_status, header); - let message_type = header.msg_type(); - let payload = frame.payload(); - let next_message_to_send = - ParseJobDeclarationMessagesFromDownstream::handle_message_job_declaration( - self_mutex.clone(), - message_type, - payload, - ); - // How works the txs recognition and txs storing in JDS mempool - // when a DMJ arrives, the JDS compares the received transactions with the - // ids in the the JDS mempool. Then there are two scenarios - // 1. the JDS recognizes all the transactions. Then, just before a DMJS is - // sent, the JDS mempool is triggered to fill in the JDS mempool the id - // of declared job with the full transaction (with send_tx_to_mempool - // method(), that eventually will ask the transactions to a bitcoin node - // via RPC) - // 2. there are some unknown txids. Just before sending PMT, the JDS mempool - // is triggered to fill the known txids with the full transactions. When - // a PMTS arrives, just before sending a DMJS, the unknown full - // transactions provided by the downstream are added to the JDS mempool - match next_message_to_send { - Ok(SendTo::Respond(m)) => { - match m { - JobDeclaration::AllocateMiningJobToken(_) => { - error!("Send unexpected message: AMJT"); - } - JobDeclaration::AllocateMiningJobTokenSuccess(_) => { - debug!("Send message: AMJTS"); - } - JobDeclaration::DeclareMiningJob(_) => { - error!("Send unexpected message: DMJ"); - } - JobDeclaration::DeclareMiningJobError(_) => { - debug!("Send nmessage: DMJE"); - } - JobDeclaration::DeclareMiningJobSuccess(_) => { - debug!("Send message: DMJS. Updating the JDS mempool."); - Self::send_txs_to_mempool(self_mutex.clone()).await; - } - JobDeclaration::ProvideMissingTransactions(_) => { - debug!("Send message: PMT. Updating the JDS mempool."); - Self::send_txs_to_mempool(self_mutex.clone()).await; - } - JobDeclaration::ProvideMissingTransactionsSuccess(_) => { - error!("Send unexpected PMTS"); - } - JobDeclaration::PushSolution(_) => todo!(), - } - Self::send(self_mutex.clone(), m).await.unwrap(); - } - Ok(SendTo::RelayNewMessage(message)) => { - error!("JD Server: unexpected relay new message {}", message); - } - Ok(SendTo::RelayNewMessageToRemote(remote, message)) => { - error!( - "JD Server: unexpected relay new message to remote. Remote: {:?}, Message: {}", - remote, - message - ); - } - Ok(SendTo::RelaySameMessageToRemote(remote)) => { - error!( - "JD Server: unexpected relay same message to remote. Remote: {:?}", - remote - ); - } - Ok(SendTo::Multiple(multiple)) => { - error!("JD Server: unexpected multiple messages: {:?}", multiple); - } - Ok(SendTo::None(m)) => { - match m { - Some(JobDeclaration::PushSolution(message)) => { - match Self::collect_txs_in_job(self_mutex.clone()) { - Ok(_) => { - info!( - "All transactions in downstream job are recognized correctly by the JD Server" - ); - let hexdata = - match JobDeclaratorDownstream::get_block_hex( - self_mutex.clone(), - message, - ) { - Ok(inner) => inner, - Err(e) => { - error!( - "Received solution but encountered error: {:?}", - e - ); - recv.close(); - //TODO should we brake it? - break; - } - }; - let _ = new_block_sender.send(hexdata).await; - } - Err(error) => { - error!("Missing transactions: {:?}", error); - // TODO print here the ip of the downstream - let known_transactions = - JobDeclaratorDownstream::get_transactions_in_job( - self_mutex.clone() - ); - let retrieve_transactions = - AddTrasactionsToMempoolInner { - known_transactions, - unknown_transactions: Vec::new(), - }; - let mempool = self_mutex - .clone() - .safe_lock(|a| a.mempool.clone()) - .unwrap(); - tokio::select! { - _ = JDsMempool::add_tx_data_to_mempool(mempool, retrieve_transactions) => { - match JobDeclaratorDownstream::get_block_hex( - self_mutex.clone(), - message.clone(), - ) { - Ok(hexdata) => { - let _ = new_block_sender.send(hexdata).await; - }, - Err(e) => { - handle_result!( - tx_status, - Err(*e) - ); - } - }; - } - _ = tokio::time::sleep(Duration::from_secs(60)) => {} - } - } - }; - } - Some(JobDeclaration::DeclareMiningJob(_)) => { - error!("JD Server received an unexpected message {:?}", m); - } - Some(JobDeclaration::DeclareMiningJobSuccess(_)) => { - error!("JD Server received an unexpected message {:?}", m); - } - Some(JobDeclaration::DeclareMiningJobError(_)) => { - error!("JD Server received an unexpected message {:?}", m); - } - Some(JobDeclaration::AllocateMiningJobToken(_)) => { - error!("JD Server received an unexpected message {:?}", m); - } - Some(JobDeclaration::AllocateMiningJobTokenSuccess(_)) => { - error!("JD Server received an unexpected message {:?}", m); - } - Some(JobDeclaration::ProvideMissingTransactions(_)) => { - error!("JD Server received an unexpected message {:?}", m); - } - Some(JobDeclaration::ProvideMissingTransactionsSuccess(_)) => { - error!("JD Server received an unexpected message {:?}", m); - } - None => (), - } - } - Err(e) => { - error!("{:?}", e); - handle_result!( - tx_status, - Err(JdsError::Custom("Invalid message received".to_string())) - ); - recv.close(); - break; - } - } - } - Err(err) => { - handle_result!(tx_status, Err(JdsError::ChannelRecv(err))); - break; - } - } - } - }); - } -} - -pub fn signed_token( - tx_hash_list_hash: U256, - _pub_key: &Secp256k1PublicKey, - prv_key: &Secp256k1SecretKey, -) -> B0255<'static> { - let secp = SignatureService::default(); - - let signature = secp.sign(tx_hash_list_hash.to_vec(), prv_key.0); - - // Sign message - signature.as_ref().to_vec().try_into().unwrap() -} - -fn _get_random_token() -> B0255<'static> { - let inner: [u8; 32] = rand::random(); - inner.to_vec().try_into().unwrap() -} - -/// The entry point of the Job Declarator Server. -/// -/// Responsible for initializing server state and accepting incoming TCP connections -/// from downstream clients (JDCs). Each client gets a dedicated [`JobDeclaratorDownstream`] -/// instance. -/// -/// Responsibilities: -/// - Listening on the configured address -/// - Performing the SV2 Noise handshake -/// - Handling `SetupConnection` messages -/// - Spawning the downstream message loop -pub struct JobDeclarator {} - -impl JobDeclarator { - /// Starts the Job Declarator server. - /// - /// - Accepts configuration and shared components (status sender, mempool, etc.). - /// - Initializes internal state. - /// - Begins listening for downstream connections via - /// [`JobDeclarator::accept_incoming_connection`]. - pub async fn start( - config: JobDeclaratorServerConfig, - status_tx: crate::status::Sender, - mempool: Arc>, - new_block_sender: Sender, - sender_add_txs_to_mempool: Sender, - ) { - let self_ = Arc::new(Mutex::new(Self {})); - info!("JD INITIALIZED"); - Self::accept_incoming_connection( - self_, - config, - status_tx, - mempool, - new_block_sender, - sender_add_txs_to_mempool, - ) - .await; - } - async fn accept_incoming_connection( - _self_: Arc>, - config: JobDeclaratorServerConfig, - status_tx: crate::status::Sender, - mempool: Arc>, - new_block_sender: Sender, - sender_add_txs_to_mempool: Sender, - ) { - let listener = TcpListener::bind(config.listen_jd_address()).await.unwrap(); - - while let Ok((stream, _)) = listener.accept().await { - let responder = Responder::from_authority_kp( - &config.authority_public_key().into_bytes(), - &config.authority_secret_key().into_bytes(), - std::time::Duration::from_secs(config.cert_validity_sec()), - ) - .unwrap(); - - let addr = stream.peer_addr(); - - if let Ok((receiver, sender)) = - Connection::new(stream, HandshakeRole::Responder(responder)).await - { - match receiver.recv().await { - Ok(EitherFrame::Sv2(mut sv2_message)) => { - debug!("Received SV2 message: {:?}", sv2_message); - let payload = sv2_message.payload(); - - if let Ok(setup_connection) = - binary_sv2::from_bytes::(payload) - { - let flag = setup_connection.flags; - let is_valid = SetupConnection::check_flags( - Protocol::JobDeclarationProtocol, - config.full_template_mode_required() as u32, - flag, - ); - - if is_valid { - let success_message = SetupConnectionSuccess { - used_version: 2, - flags: (setup_connection.flags & 1u32), - }; - info!("Sending success message for proxy"); - let sv2_frame: StdFrame = JdsMessages::Common(success_message.into()) - .try_into() - .expect("Failed to convert setup connection response message to standard frame"); - - sender.send(sv2_frame.into()).await.unwrap(); - - let jddownstream = Arc::new(Mutex::new( - JobDeclaratorDownstream::new( - (setup_connection.flags & 1u32) != 0u32, /* this takes a - * bool instead - * of u32 */ - receiver.clone(), - sender.clone(), - &config, - mempool.clone(), - sender_add_txs_to_mempool.clone(), /* each downstream has its own sender (multi producer single consumer) */ - ), - )); - - JobDeclaratorDownstream::start( - jddownstream, - status_tx.clone(), - new_block_sender.clone(), - ); - } else { - let error_message = SetupConnectionError { - flags: flag, - error_code: "unsupported-feature-flags" - .to_string() - .into_bytes() - .try_into() - .unwrap(), - }; - info!("Sending error message for proxy"); - let sv2_frame: StdFrame = JdsMessages::Common(error_message.into()) - .try_into() - .expect("Failed to convert setup connection response message to standard frame"); - - sender.send(sv2_frame.into()).await.unwrap(); - } - } else { - error!("Error parsing SetupConnection message"); - } - } - Ok(EitherFrame::HandShake(handshake_message)) => { - error!( - "Unexpected handshake message from upstream: {:?} at {:?}", - handshake_message, addr - ); - } - Err(e) => { - error!("Error receiving message: {:?}", e); - } - } - } else { - error!("Cannot connect to {:?}", addr); - } - } - } -} diff --git a/roles/jd-server/src/lib/mempool/error.rs b/roles/jd-server/src/lib/mempool/error.rs deleted file mode 100644 index cb0e2a2971..0000000000 --- a/roles/jd-server/src/lib/mempool/error.rs +++ /dev/null @@ -1,66 +0,0 @@ -//! ## JDS Mempool Errors -//! -//! This module defines the error types and handling utilities related to the mempool logic in the -//! Job Declarator Server (JDS). -//! -//! These errors are mostly used when interacting with: -//! - the internal mempool data structure -//! - the RPC client that communicates with the Bitcoin node -//! - the synchronization/update routines -//! -//! It also includes a centralized error logging helper (`handle_error`) to standardize warnings -//! and diagnostics across components. - -use rpc_sv2::mini_rpc_client::RpcError; -use std::{convert::From, sync::PoisonError}; -use tracing::{error, warn}; - -/// Errors that may occur during JDS mempool operations. -#[derive(Debug)] -pub enum JdsMempoolError { - /// The mempool was found to be empty (likely due to testnet/signet conditions). - EmptyMempool, - /// Failed to construct a valid RPC client (e.g. invalid URL, malformed credentials). - NoClient, - /// An RPC call to the Bitcoin node failed. - Rpc(RpcError), - /// A poisoned lock was encountered while accessing the mempool - PoisonLock(String), -} - -impl From for JdsMempoolError { - fn from(value: RpcError) -> Self { - JdsMempoolError::Rpc(value) - } -} - -impl From> for JdsMempoolError { - fn from(value: PoisonError) -> Self { - JdsMempoolError::PoisonLock(value.to_string()) - } -} - -/// Logs a structured diagnostic message for a given mempool error. -/// -/// This function is used throughout the codebase to provide more meaningful context -/// in logs when mempool-related operations fail. -pub fn handle_error(err: &JdsMempoolError) { - match err { - JdsMempoolError::EmptyMempool => { - warn!("{:?}", err); - warn!("Template Provider is running, but its MEMPOOL is empty (possible reasons: you're testing in testnet, signet, or regtest)"); - } - JdsMempoolError::NoClient => { - error!("{:?}", err); - error!("Unable to establish RPC connection with Template Provider (possible reasons: not fully synced, down)"); - } - JdsMempoolError::Rpc(_) => { - error!("{:?}", err); - error!("Unable to establish RPC connection with Template Provider (possible reasons: not fully synced, down)"); - } - JdsMempoolError::PoisonLock(_) => { - error!("{:?}", err); - error!("Poison lock error)"); - } - } -} diff --git a/roles/jd-server/src/lib/mempool/mod.rs b/roles/jd-server/src/lib/mempool/mod.rs deleted file mode 100644 index 796463ae4a..0000000000 --- a/roles/jd-server/src/lib/mempool/mod.rs +++ /dev/null @@ -1,200 +0,0 @@ -//! ## Mempool Management for the Job Declarator Server (JDS) -//! -//! This module defines the internal mempool of the JDS, responsible for keeping track of known -//! transactions and interacting with the Bitcoin node via RPC. -//! -//! Its core responsibilities are: -//! - Keeping a local copy of txids and (optionally) their full transaction data -//! - Pulling known transactions from the Bitcoin node on demand (via `getrawtransaction`) -//! - Accepting and tracking raw transactions received from clients -//! - Forwarding valid blocks to the Bitcoin node via `submitblock` -//! -//! Internally, `JDsMempool` uses a `HashMap>`: -//! - `None`: transaction only known by ID, data is missing -//! - `Some`: full transaction is known, `u32` is a reference counter for eviction -//! -//! Most methods are `Arc>`-wrapped and should be reviewed for locking efficiency. - -pub mod error; -use super::job_declarator::AddTrasactionsToMempoolInner; -use crate::mempool::error::JdsMempoolError; -use async_channel::Receiver; -use bitcoin::{blockdata::transaction::Transaction, hash_types::Txid}; -use hashbrown::HashMap; -use rpc_sv2::{mini_rpc_client, mini_rpc_client::RpcError}; -use std::{str::FromStr, sync::Arc}; - -use roles_logic_sv2::utils::Mutex; -/// Wrapper around a known transaction and its hash. -#[derive(Clone, Debug)] -pub struct TransactionWithHash { - pub id: Txid, - pub tx: Option<(Transaction, u32)>, // Full data and ref count -} - -/// Internal representation of the JDS mempool. -#[derive(Clone, Debug)] -pub struct JDsMempool { - /// Local map of known txids and their associated data (if available). - pub mempool: HashMap>, - /// Auth for RPC connection to the node. - auth: mini_rpc_client::Auth, - /// URI of the Bitcoin node. - url: rpc_sv2::Uri, - /// Receiver for new block solutions coming from JDC. - new_block_receiver: Receiver, -} - -impl JDsMempool { - /// Returns a MiniRpcClient if the URL looks valid. - pub fn get_client(&self) -> Option { - let url = self.url.to_string(); - if url.contains("http") { - let client = mini_rpc_client::MiniRpcClient::new(self.url.clone(), self.auth.clone()); - Some(client) - } else { - None - } - } - - /// This function is used only for debug purposes and should not be used - /// in production code. - #[cfg(debug_assertions)] - pub fn _get_transaction_list(self_: Arc>) -> Vec { - let tx_list = self_.safe_lock(|x| x.mempool.clone()).unwrap(); - let tx_list_: Vec = tx_list.iter().map(|n| *n.0).collect(); - tx_list_ - } - - /// Instantiates a new empty mempool for JDS. - pub fn new( - url: rpc_sv2::Uri, - username: String, - password: String, - new_block_receiver: Receiver, - ) -> Self { - let auth = mini_rpc_client::Auth::new(username, password); - let empty_mempool: HashMap> = HashMap::new(); - JDsMempool { - mempool: empty_mempool, - auth, - url, - new_block_receiver, - } - } - - /// Simple RPC ping to verify connection to Bitcoin node. - pub async fn health(self_: Arc>) -> Result<(), JdsMempoolError> { - let client = self_ - .safe_lock(|a| a.get_client())? - .ok_or(JdsMempoolError::NoClient)?; - client.health().await.map_err(JdsMempoolError::Rpc) - } - - /// Inserts transactions into the mempool: - /// - known txids are fetched from the Bitcoin node - /// - unknown txs are directly inserted - pub async fn add_tx_data_to_mempool( - self_: Arc>, - add_txs_to_mempool_inner: AddTrasactionsToMempoolInner, - ) -> Result<(), JdsMempoolError> { - let txids = add_txs_to_mempool_inner.known_transactions; - let transactions = add_txs_to_mempool_inner.unknown_transactions; - let client = self_ - .safe_lock(|a| a.get_client())? - .ok_or(JdsMempoolError::NoClient)?; - // fill in the mempool the transactions id in the mempool with the full transactions - // retrieved from the jd client - for txid in txids { - if let Some(None) = self_ - .safe_lock(|a| a.mempool.get(&txid).cloned()) - .map_err(|e| JdsMempoolError::PoisonLock(e.to_string()))? - { - let transaction = client - .get_raw_transaction(&txid.to_string(), None) - .await - .map_err(JdsMempoolError::Rpc)?; - let _ = self_.safe_lock(|a| { - a.mempool - .entry(transaction.compute_txid()) - .and_modify(|entry| { - if let Some((_, count)) = entry { - *count += 1; - } else { - *entry = Some((transaction.clone(), 1)); - } - }) - .or_insert(Some((transaction, 1))); - }); - } - } - - // fill in the mempool the transactions given in input - for transaction in transactions { - let _ = self_.safe_lock(|a| { - a.mempool - .entry(transaction.compute_txid()) - .and_modify(|entry| { - if let Some((_, count)) = entry { - *count += 1; - } else { - *entry = Some((transaction.clone(), 1)); - } - }) - .or_insert(Some((transaction, 1))); - }); - } - Ok(()) - } - - /// Periodically synchronizes the mempool with the Bitcoin node. - /// This only inserts thin entries (`None` as value), not full transactions. - pub async fn update_mempool(self_: Arc>) -> Result<(), JdsMempoolError> { - let client = self_ - .safe_lock(|x| x.get_client())? - .ok_or(JdsMempoolError::NoClient)?; - - let mempool = client.get_raw_mempool().await?; - - let raw_mempool_txids: Result, _> = mempool - .into_iter() - .map(|id| { - Txid::from_str(&id) - .map_err(|err| JdsMempoolError::Rpc(RpcError::Deserialization(err.to_string()))) - }) - .collect(); - - let raw_mempool_txids = raw_mempool_txids?; - - // Holding the lock till the light mempool updation is complete. - let is_mempool_empty = self_.safe_lock(|x| { - raw_mempool_txids.iter().for_each(|txid| { - x.mempool.entry(*txid).or_insert(None); - }); - x.mempool.is_empty() - })?; - - if is_mempool_empty { - Err(JdsMempoolError::EmptyMempool) - } else { - Ok(()) - } - } - - /// Listens for block submissions (hex-encoded) and propagates them to the Bitcoin node. - pub async fn on_submit(self_: Arc>) -> Result<(), JdsMempoolError> { - let new_block_receiver: Receiver = - self_.safe_lock(|x| x.new_block_receiver.clone())?; - let client = self_ - .safe_lock(|x| x.get_client())? - .ok_or(JdsMempoolError::NoClient)?; - - while let Ok(block_hex) = new_block_receiver.recv().await { - match mini_rpc_client::MiniRpcClient::submit_block(&client, block_hex).await { - Ok(_) => return Ok(()), - Err(e) => JdsMempoolError::Rpc(e), - }; - } - Ok(()) - } -} diff --git a/roles/jd-server/src/lib/mod.rs b/roles/jd-server/src/lib/mod.rs deleted file mode 100644 index 28e3c72d6e..0000000000 --- a/roles/jd-server/src/lib/mod.rs +++ /dev/null @@ -1,251 +0,0 @@ -//! ## JDS Core Runtime Module -//! -//! This module serves as the central coordination layer of the Job Declarator Server (JDS). -//! -//! It connects all core components: -//! - `mempool`: a local cache of Bitcoin transactions, synchronized via RPC. -//! - `job_declarator`: protocol logic for handling downstream job declaration clients. -//! - `status`: a simple health/error propagation mechanism. -//! - `config`: configuration loader and accessor. -//! -//! The [`JobDeclaratorServer`] struct represents the entrypoint to the system's async runtime. -//! It is launched from `main.rs` and responsible for: -//! - validating config -//! - initializing the mempool -//! - spawning all background tasks -//! - handling graceful shutdowns and task health reporting -//! -//! All components communicate asynchronously using `async_channel`. - -pub mod config; -pub mod error; -pub mod job_declarator; -pub mod mempool; -pub mod status; -use async_channel::{bounded, unbounded, Receiver, Sender}; -use config::JobDeclaratorServerConfig; -use error::JdsError; -use error_handling::handle_result; -use job_declarator::JobDeclarator; -use mempool::error::JdsMempoolError; -pub use rpc_sv2::Uri; -use std::{ops::Sub, str::FromStr, sync::Arc}; - -use codec_sv2::{StandardEitherFrame, StandardSv2Frame}; -use parsers_sv2::AnyMessage as JdsMessages; -use roles_logic_sv2::utils::Mutex; -use tokio::{select, task}; -use tracing::{error, info, warn}; - -/// Type alias for incoming SV2 messages. -pub type Message = JdsMessages<'static>; - -/// SV2 frame carrying a parsed JDS message. -pub type StdFrame = StandardSv2Frame; - -/// SV2 frame that can be either a standard message or handshake frame. -pub type EitherFrame = StandardEitherFrame; - -/// The core runtime orchestrator for the JDS system. -/// -/// Starts all essential services (mempool polling, block submission, job declaration protocol) -/// and monitors for shutdown conditions or task failures via a `status` channel. -#[derive(Debug, Clone)] -pub struct JobDeclaratorServer { - config: JobDeclaratorServerConfig, -} - -impl JobDeclaratorServer { - /// Constructs a new instance using the given TOML configuration. - pub fn new(config: JobDeclaratorServerConfig) -> Self { - Self { config } - } - - /// Starts the Job Declarator Server runtime. - /// - /// This method spawns the following: - /// - a task for polling the Bitcoin Core mempool - /// - a task for processing new block submissions from downstream clients - /// - a task for listening to incoming downstream connections - /// - a task for integrating transaction data into the local mempool - /// - /// It concludes with a `select!` loop that reacts to: - /// - SIGINT (`tokio::signal::ctrl_c()`) - /// - messages from the `status` channel - /// - /// When a critical error or interrupt is received, the server shuts down cleanly. - pub async fn start(&self) -> Result<(), JdsError> { - let mut config = self.config.clone(); - // Normalize URL to avoid trailing slashes. - if config.core_rpc_url().ends_with('/') { - config.set_core_rpc_url(config.core_rpc_url().trim_end_matches('/').to_string()); - } - let url = config.core_rpc_url().to_string() + ":" + &config.core_rpc_port().to_string(); - let username = config.core_rpc_user(); - let password = config.core_rpc_pass(); - // Channel for sending new blocks to the Bitcoin node - let (new_block_sender, new_block_receiver): (Sender, Receiver) = - bounded(10); - let url = Uri::from_str(&url.clone()).expect("Invalid core rpc url"); - // Shared mempool instance - let mempool = Arc::new(Mutex::new(mempool::JDsMempool::new( - url, - username.to_string(), - password.to_string(), - new_block_receiver, - ))); - let mempool_update_interval = config.mempool_update_interval(); - let mempool_cloned_ = mempool.clone(); - let mempool_cloned_1 = mempool.clone(); - // Pre-flight check: can we reach the RPC node - if let Err(e) = mempool::JDsMempool::health(mempool_cloned_1.clone()).await { - error!("JDS Connection with bitcoin core failed {:?}", e); - return Err(JdsError::MempoolError(e)); - } - let (status_tx, status_rx) = unbounded(); - let sender = status::Sender::Downstream(status_tx.clone()); - let mut last_empty_mempool_warning = - std::time::Instant::now().sub(std::time::Duration::from_secs(60)); - - let sender_update_mempool = sender.clone(); - // ========== Task: Periodically update the mempool via RPC ========== // - task::spawn(async move { - loop { - let update_mempool_result: Result<(), mempool::error::JdsMempoolError> = - mempool::JDsMempool::update_mempool(mempool_cloned_.clone()).await; - if let Err(err) = update_mempool_result { - match err { - JdsMempoolError::EmptyMempool => { - if last_empty_mempool_warning.elapsed().as_secs() >= 60 { - warn!("{:?}", err); - warn!("Template Provider is running, but its mempool is empty (possible reasons: you're testing in testnet, signet, or regtest)"); - last_empty_mempool_warning = std::time::Instant::now(); - } - } - JdsMempoolError::NoClient => { - mempool::error::handle_error(&err); - handle_result!(sender_update_mempool, Err(err)); - } - JdsMempoolError::Rpc(_) => { - mempool::error::handle_error(&err); - handle_result!(sender_update_mempool, Err(err)); - } - JdsMempoolError::PoisonLock(_) => { - mempool::error::handle_error(&err); - handle_result!(sender_update_mempool, Err(err)); - } - } - } - tokio::time::sleep(mempool_update_interval).await; - // DO NOT REMOVE THIS LINE - //let _transactions = - // mempool::JDsMempool::_get_transaction_list(mempool_cloned_.clone()); - } - }); - - // ========== Task: Listen for SubmitSolution events ========== // - let mempool_cloned = mempool.clone(); - let sender_submit_solution = sender.clone(); - task::spawn(async move { - loop { - let result = mempool::JDsMempool::on_submit(mempool_cloned.clone()).await; - if let Err(err) = result { - match err { - JdsMempoolError::EmptyMempool => { - if last_empty_mempool_warning.elapsed().as_secs() >= 60 { - warn!("{:?}", err); - warn!("Template Provider is running, but its mempool is empty (possible reasons: you're testing in testnet, signet, or regtest)"); - last_empty_mempool_warning = std::time::Instant::now(); - } - } - _ => { - // TODO here there should be a better error managmenet - mempool::error::handle_error(&err); - handle_result!(sender_submit_solution, Err(err)); - } - } - } - } - }); - - // ========== Task: Launch Job Declarator server ========== // - let cloned = config.clone(); - let mempool_cloned = mempool.clone(); - let (sender_add_txs_to_mempool, receiver_add_txs_to_mempool) = unbounded(); - task::spawn(async move { - JobDeclarator::start( - cloned, - sender, - mempool_cloned, - new_block_sender, - sender_add_txs_to_mempool, - ) - .await - }); - - // ========== Task: Add transactions to mempool when received ========== // - task::spawn(async move { - loop { - if let Ok(add_transactions_to_mempool) = receiver_add_txs_to_mempool.recv().await { - let mempool_cloned = mempool.clone(); - task::spawn(async move { - match mempool::JDsMempool::add_tx_data_to_mempool( - mempool_cloned, - add_transactions_to_mempool, - ) - .await - { - Ok(_) => (), - Err(err) => { - // TODO - // here there should be a better error management - mempool::error::handle_error(&err); - } - } - }); - } - } - }); - - // ========== Central Runtime Loop: Shutdown and Error Reactions ========== // - loop { - let task_status = select! { - task_status = status_rx.recv() => task_status, - interrupt_signal = tokio::signal::ctrl_c() => { - match interrupt_signal { - Ok(()) => { - info!("Interrupt received"); - }, - Err(err) => { - error!("Unable to listen for interrupt signal: {}", err); - // we also shut down in case of error - }, - } - break; - } - }; - let task_status: status::Status = task_status.unwrap(); - - match task_status.state { - // Should only be sent by the downstream listener - status::State::DownstreamShutdown(err) => { - error!( - "SHUTDOWN from Downstream: {}\nTry to restart the downstream listener", - err - ); - } - status::State::TemplateProviderShutdown(err) => { - error!("SHUTDOWN from Upstream: {}\nTry to reconnecting or connecting to a new upstream", err); - break; - } - status::State::Healthy(msg) => { - info!("HEALTHY message: {}", msg); - } - status::State::DownstreamInstanceDropped(downstream_id) => { - warn!("Dropping downstream instance {} from jds", downstream_id); - } - } - } - Ok(()) - } -} diff --git a/roles/jd-server/src/lib/status.rs b/roles/jd-server/src/lib/status.rs deleted file mode 100644 index 39b17fc2f6..0000000000 --- a/roles/jd-server/src/lib/status.rs +++ /dev/null @@ -1,451 +0,0 @@ -//! ## Status Reporting System for JDS -//! -//! This module defines how internal components of the Job Declarator Server (JDS) report -//! health, errors, and shutdown conditions back to the main runtime loop in `lib/mod.rs`. -//! -//! At the core, tasks send a [`Status`] (wrapping a [`State`]) through a channel, -//! which is tagged with a [`Sender`] enum to indicate the origin of the message. -//! -//! This allows for centralized, consistent error handling across the application. - -use error_handling; -use parsers_sv2::Mining; - -use super::error::JdsError; - -/// Identifies the component that originated a [`Status`] update. -/// -/// Each sender is associated with a dedicated side of the status channel. -/// This lets the central loop distinguish between errors from different parts of the system. -#[derive(Debug)] -pub enum Sender { - /// Downstream task (e.g. per-client connection handler) - Downstream(async_channel::Sender), - /// Listener for incoming downstream connections - DownstreamListener(async_channel::Sender), - /// Template Provider (Bitcoin Core RPC) - Upstream(async_channel::Sender), -} - -impl Clone for Sender { - fn clone(&self) -> Self { - match self { - Self::Downstream(inner) => Self::Downstream(inner.clone()), - Self::DownstreamListener(inner) => Self::DownstreamListener(inner.clone()), - Self::Upstream(inner) => Self::Upstream(inner.clone()), - } - } -} - -/// The kind of event or status being reported by a task. -#[derive(Debug)] -pub enum State { - /// A downstream component (e.g. client) failed and should be shut down. - DownstreamShutdown(JdsError), - /// The Template Provider (upstream Bitcoin Core) failed. - TemplateProviderShutdown(JdsError), - /// A specific downstream instance was dropped (e.g., due to protocol error). - DownstreamInstanceDropped(u32), - /// A generic message to indicate health or non-critical errors. - Healthy(String), -} - -/// Wraps a status update, to be passed through a status channel. -#[derive(Debug)] -pub struct Status { - pub state: State, -} - -/// Sends a [`Status`] message tagged with its [`Sender`] to the central loop. -/// -/// This is the core logic used to determine which status variant should be sent -/// based on the error type and sender context. -async fn send_status( - sender: &Sender, - e: JdsError, - outcome: error_handling::ErrorBranch, -) -> error_handling::ErrorBranch { - match sender { - Sender::Downstream(tx) => match e { - JdsError::Sv2ProtocolError((id, Mining::OpenMiningChannelError(_))) => { - tx.send(Status { - state: State::DownstreamInstanceDropped(id), - }) - .await - .unwrap_or(()); - } - JdsError::ChannelRecv(_) => { - tx.send(Status { - state: State::DownstreamShutdown(e), - }) - .await - .unwrap_or(()); - } - JdsError::MempoolError(_) => { - tx.send(Status { - state: State::TemplateProviderShutdown(e), - }) - .await - .unwrap_or(()); - } - _ => { - let string_err = e.to_string(); - tx.send(Status { - state: State::Healthy(string_err), - }) - .await - .unwrap_or(()); - } - }, - Sender::DownstreamListener(tx) => { - tx.send(Status { - state: State::DownstreamShutdown(e), - }) - .await - .unwrap_or(()); - } - Sender::Upstream(tx) => { - tx.send(Status { - state: State::TemplateProviderShutdown(e), - }) - .await - .unwrap_or(()); - } - } - outcome -} - -/// Centralized error dispatcher for the JDS. -/// -/// Used by the `handle_result!` macro across the codebase. -/// Decides whether the task should `Continue` or `Break` based on the error type and source. -pub async fn handle_error(sender: &Sender, e: JdsError) -> error_handling::ErrorBranch { - tracing::debug!("Error: {:?}", &e); - match e { - JdsError::Io(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - JdsError::ChannelSend(_) => { - //This should be a continue because if we fail to send to 1 downstream we should - // continue processing the other downstreams in the loop we are in. - // Otherwise if a downstream fails to send to then subsequent downstreams in - // the map won't get send called on them - send_status(sender, e, error_handling::ErrorBranch::Continue).await - } - JdsError::ChannelRecv(_) => { - send_status(sender, e, error_handling::ErrorBranch::Break).await - } - JdsError::BinarySv2(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - JdsError::Codec(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - JdsError::Noise(_) => send_status(sender, e, error_handling::ErrorBranch::Continue).await, - JdsError::RolesLogic(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - JdsError::Custom(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - JdsError::Framing(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - JdsError::PoisonLock(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - JdsError::Sv2ProtocolError(_) => { - send_status(sender, e, error_handling::ErrorBranch::Break).await - } - JdsError::MempoolError(_) => { - send_status(sender, e, error_handling::ErrorBranch::Break).await - } - JdsError::ImpossibleToReconstructBlock(_) => { - send_status(sender, e, error_handling::ErrorBranch::Continue).await - } - JdsError::NoLastDeclaredJob => { - send_status(sender, e, error_handling::ErrorBranch::Continue).await - } - JdsError::InvalidRPCUrl => send_status(sender, e, error_handling::ErrorBranch::Break).await, - JdsError::BadCliArgs => send_status(sender, e, error_handling::ErrorBranch::Break).await, - JdsError::InvalidPrevHash => { - send_status(sender, e, error_handling::ErrorBranch::Break).await - } - JdsError::InvalidCoinbase => { - send_status(sender, e, error_handling::ErrorBranch::Break).await - } - JdsError::InvalidMerkleRoot => { - send_status(sender, e, error_handling::ErrorBranch::Break).await - } - } -} - -#[cfg(test)] -mod tests { - use std::{convert::TryInto, io::Error}; - - use super::*; - use async_channel::{bounded, RecvError}; - use binary_sv2; - use codec_sv2; - use framing_sv2; - use mining_sv2::OpenMiningChannelError; - use noise_sv2; - use roles_logic_sv2; - - #[tokio::test] - async fn test_send_status_downstream_listener_shutdown() { - let (tx, rx) = bounded(1); - let sender = Sender::DownstreamListener(tx); - let error = JdsError::ChannelRecv(async_channel::RecvError); - - send_status(&sender, error, error_handling::ErrorBranch::Continue).await; - match rx.recv().await { - Ok(status) => match status.state { - State::DownstreamShutdown(e) => { - assert_eq!(e.to_string(), "Channel recv failed: `RecvError`") - } - _ => panic!("Unexpected state received"), - }, - Err(_) => panic!("Failed to receive status"), - } - } - - #[tokio::test] - async fn test_send_status_upstream_shutdown() { - let (tx, rx) = bounded(1); - let sender = Sender::Upstream(tx); - let error = JdsError::MempoolError(crate::mempool::error::JdsMempoolError::EmptyMempool); - let error_string = error.to_string(); - send_status(&sender, error, error_handling::ErrorBranch::Continue).await; - - match rx.recv().await { - Ok(status) => match status.state { - State::TemplateProviderShutdown(e) => assert_eq!(e.to_string(), error_string), - _ => panic!("Unexpected state received"), - }, - Err(_) => panic!("Failed to receive status"), - } - } - - #[tokio::test] - async fn test_handle_error_io_error() { - let (tx, rx) = bounded(1); - let sender = Sender::Downstream(tx); - let error = JdsError::Io(Error::new(std::io::ErrorKind::Interrupted, "IO error")); - let error_string = error.to_string(); - - handle_error(&sender, error).await; - match rx.recv().await { - Ok(status) => match status.state { - State::Healthy(e) => assert_eq!(e, error_string), - _ => panic!("Unexpected state received"), - }, - Err(_) => panic!("Failed to receive status"), - } - } - - #[tokio::test] - async fn test_handle_error_channel_send_error() { - let (tx, rx) = bounded(1); - let sender = Sender::Downstream(tx); - let error = JdsError::ChannelSend(Box::new("error")); - let error_string = error.to_string(); - - handle_error(&sender, error).await; - match rx.recv().await { - Ok(status) => match status.state { - State::Healthy(e) => assert_eq!(e, error_string), - _ => panic!("Unexpected state received"), - }, - Err(_) => panic!("Failed to receive status"), - } - } - - #[tokio::test] - async fn test_handle_error_channel_receive_error() { - let (tx, rx) = bounded(1); - let sender = Sender::Downstream(tx); - let error = JdsError::ChannelRecv(RecvError); - let error_string = error.to_string(); - - handle_error(&sender, error).await; - match rx.recv().await { - Ok(status) => match status.state { - State::DownstreamShutdown(e) => assert_eq!(e.to_string(), error_string), - _ => panic!("Unexpected state received"), - }, - Err(_) => panic!("Failed to receive status"), - } - } - - #[tokio::test] - async fn test_handle_error_binary_sv2_error() { - let (tx, rx) = bounded(1); - let sender = Sender::Downstream(tx); - let error = JdsError::BinarySv2(binary_sv2::Error::IoError); - let error_string = error.to_string(); - handle_error(&sender, error).await; - match rx.recv().await { - Ok(status) => match status.state { - State::Healthy(e) => assert_eq!(e, error_string), - _ => panic!("Unexpected state received"), - }, - Err(_) => panic!("Failed to receive status"), - } - } - - #[tokio::test] - async fn test_handle_error_codec_error() { - let (tx, rx) = bounded(1); - let sender = Sender::Downstream(tx); - let error = JdsError::Codec(codec_sv2::Error::InvalidStepForInitiator); - let error_string = error.to_string(); - handle_error(&sender, error).await; - match rx.recv().await { - Ok(status) => match status.state { - State::Healthy(e) => assert_eq!(e, error_string), - _ => panic!("Unexpected state received"), - }, - Err(_) => panic!("Failed to receive status"), - } - } - - #[tokio::test] - async fn test_handle_error_noise_error() { - let (tx, rx) = bounded(1); - let sender = Sender::Downstream(tx); - let error = JdsError::Noise(noise_sv2::Error::HandshakeNotFinalized); - let error_string = error.to_string(); - handle_error(&sender, error).await; - match rx.recv().await { - Ok(status) => match status.state { - State::Healthy(e) => assert_eq!(e, error_string), - _ => panic!("Unexpected state received"), - }, - Err(_) => panic!("Failed to receive status"), - } - } - - #[tokio::test] - async fn test_handle_error_roles_logic_error() { - let (tx, rx) = bounded(1); - let sender = Sender::Downstream(tx); - let error = JdsError::RolesLogic(roles_logic_sv2::Error::BadPayloadSize); - let error_string = error.to_string(); - handle_error(&sender, error).await; - match rx.recv().await { - Ok(status) => match status.state { - State::Healthy(e) => assert_eq!(e, error_string), - _ => panic!("Unexpected state received"), - }, - Err(_) => panic!("Failed to receive status"), - } - } - - #[tokio::test] - async fn test_handle_error_custom_error() { - let (tx, rx) = bounded(1); - let sender = Sender::Downstream(tx); - let error = JdsError::Custom("error".to_string()); - let error_string = error.to_string(); - handle_error(&sender, error).await; - match rx.recv().await { - Ok(status) => match status.state { - State::Healthy(e) => assert_eq!(e, error_string), - _ => panic!("Unexpected state received"), - }, - Err(_) => panic!("Failed to receive status"), - } - } - - #[tokio::test] - async fn test_handle_error_framing_error() { - let (tx, rx) = bounded(1); - let sender = Sender::Downstream(tx); - let error = JdsError::Framing(framing_sv2::Error::ExpectedHandshakeFrame); - let error_string = error.to_string(); - handle_error(&sender, error).await; - match rx.recv().await { - Ok(status) => match status.state { - State::Healthy(e) => assert_eq!(e, error_string), - _ => panic!("Unexpected state received"), - }, - Err(_) => panic!("Failed to receive status"), - } - } - - #[tokio::test] - async fn test_handle_error_poison_lock_error() { - let (tx, rx) = bounded(1); - let sender = Sender::Downstream(tx); - let error = JdsError::PoisonLock("error".to_string()); - let error_string = error.to_string(); - handle_error(&sender, error).await; - match rx.recv().await { - Ok(status) => match status.state { - State::Healthy(e) => assert_eq!(e, error_string), - _ => panic!("Unexpected state received"), - }, - Err(_) => panic!("Failed to receive status"), - } - } - - #[tokio::test] - async fn test_handle_error_impossible_to_reconstruct_block_error() { - let (tx, rx) = bounded(1); - let sender = Sender::Downstream(tx); - let error = JdsError::ImpossibleToReconstructBlock("Impossible".to_string()); - let error_string = error.to_string(); - handle_error(&sender, error).await; - match rx.recv().await { - Ok(status) => match status.state { - State::Healthy(e) => assert_eq!(e, error_string), - _ => panic!("Unexpected state received"), - }, - Err(_) => panic!("Failed to receive status"), - } - } - - #[tokio::test] - async fn test_handle_error_no_last_declared_job_error() { - let (tx, rx) = bounded(1); - let sender = Sender::Downstream(tx); - let error = JdsError::NoLastDeclaredJob; - let error_string = error.to_string(); - handle_error(&sender, error).await; - match rx.recv().await { - Ok(status) => match status.state { - State::Healthy(e) => assert_eq!(e, error_string), - _ => panic!("Unexpected state received"), - }, - Err(_) => panic!("Failed to receive status"), - } - } - - #[tokio::test] - async fn test_handle_error_last_mempool_error() { - let (tx, rx) = bounded(1); - let sender = Sender::Downstream(tx); - let error = JdsError::MempoolError(crate::mempool::error::JdsMempoolError::EmptyMempool); - let error_string = error.to_string(); - handle_error(&sender, error).await; - match rx.recv().await { - Ok(status) => match status.state { - State::TemplateProviderShutdown(e) => assert_eq!(e.to_string(), error_string), - _ => panic!("Unexpected state received"), - }, - Err(_) => panic!("Failed to receive status"), - } - } - - #[tokio::test] - async fn test_handle_error_sv2_protocol_error() { - let (tx, rx) = bounded(1); - let sender = Sender::Downstream(tx); - let inner: [u8; 32] = rand::random(); - let value = inner.to_vec().try_into().unwrap(); - let error = JdsError::Sv2ProtocolError(( - 12, - Mining::OpenMiningChannelError(OpenMiningChannelError { - request_id: 1, - error_code: value, - }), - )); - let error_string = "12"; - handle_error(&sender, error).await; - match rx.recv().await { - Ok(status) => match status.state { - State::DownstreamInstanceDropped(e) => assert_eq!(e.to_string(), error_string), - _ => panic!("Unexpected state received"), - }, - Err(_) => panic!("Failed to receive status"), - } - } -} diff --git a/roles/jd-server/src/main.rs b/roles/jd-server/src/main.rs deleted file mode 100644 index 762a6e5808..0000000000 --- a/roles/jd-server/src/main.rs +++ /dev/null @@ -1,28 +0,0 @@ -//! Entry point for the Job Declarator Server (JDS). -//! -//! This binary parses CLI arguments, loads the TOML configuration file, and -//! starts the main runtime defined in `jd_server::JobDeclaratorServer`. -//! -//! The actual task orchestration and shutdown logic are managed in `lib/mod.rs`. -mod args; -use args::process_cli_args; -use jd_server::JobDeclaratorServer; -use stratum_apps::config_helpers::logging::init_logging; -use tracing::error; - -/// Entrypoint for the Job Declarator Server binary. -/// -/// Loads the configuration from TOML and initializes the main runtime -/// defined in `jd_server::JobDeclaratorServer`. Errors during startup are logged. -#[tokio::main] -async fn main() { - let config = match process_cli_args() { - Ok(cfg) => cfg, - Err(e) => { - error!("Failed to process CLI arguments: {}", e); - return; - } - }; - init_logging(config.log_file()); - let _ = JobDeclaratorServer::new(config).start().await; -} diff --git a/roles/pool/Cargo.toml b/roles/pool/Cargo.toml deleted file mode 100644 index dffd724e02..0000000000 --- a/roles/pool/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "pool_sv2" -version = "0.2.0" -authors = ["The Stratum V2 Developers"] -edition = "2021" -description = "SV2 pool role" -documentation = "https://docs.rs/pool_sv2" -readme = "README.md" -homepage = "https://stratumprotocol.org" -repository = "https://github.com/stratum-mining/stratum" -license = "MIT OR Apache-2.0" -keywords = ["stratum", "mining", "bitcoin", "protocol"] - - -[lib] -name = "pool_sv2" -path = "src/lib/mod.rs" - -[dependencies] -stratum-apps = { path = "../stratum-apps", features = ["pool"] } -async-channel = "1.5.1" -rand = "0.8.4" -serde = { version = "1.0.89", features = ["derive", "alloc"], default-features = false } -secp256k1 = { version = "0.28.2", default-features = false, features = ["alloc", "rand", "rand-std"] } -tokio = { version = "1.44.1", features = ["full"] } -ext-config = { version = "0.14.0", features = ["toml"], package = "config" } -tracing = { version = "0.1" } -clap = { version = "4.5.39", features = ["derive"] } diff --git a/roles/pool/README.md b/roles/pool/README.md deleted file mode 100644 index 1e608b80d4..0000000000 --- a/roles/pool/README.md +++ /dev/null @@ -1,54 +0,0 @@ - -# SRI Pool - -SRI Pool is designed to communicate with Downstream role (most typically a Translator Proxy or a Mining Proxy) running SV2 protocol to exploit features introduced by its sub-protocols. - -The most typical high level configuration is: - -``` -<--- Most Downstream ----------------------------------------- Most Upstream ---> - -+---------------------------------------------------+ +------------------------+ -| Mining Farm | | Remote Pool | -| | | | -| +-------------------+ +------------------+ | | +-----------------+ | -| | SV1 Mining Device | <-> | Translator Proxy | <------> | SV2 Pool Server | | -| +-------------------+ +------------------+ | | +-----------------+ | -| | | | -+---------------------------------------------------+ +------------------------+ - -``` - -## Setup - -### Configuration File - -`pool-config-hosted-tp-example.toml` and `pool-config-local-tp-example.toml` are examples of configuration files. - -The configuration file contains the following information: - -1. The SRI Pool information which includes the SRI Pool authority public key - (`authority_public_key`), the SRI Pool authority secret key (`authority_secret_key`). -2. The address which it will use to listen to new connection from downstream roles (`listen_address`) -3. The list of uncompressed pubkeys for coinbase payout (`coinbase_outputs`) -4. A string that serves as signature on the coinbase tx (`pool_signature`). -5. The Template Provider address (`tp_address`). -6. Optionally, you may want to verify that your TP connection is authentic. You may get `tp_authority_public_key` from the logs of your TP, for example: - -``` -# 2024-02-13T14:59:24Z Template Provider authority key: EguTM8URcZDQVeEBsM4B5vg9weqEUnufA8pm85fG4bZd -``` - -### Run - -There are two files found in `roles/pool/config-examples` - -1. `pool-config-hosted-tp-example.toml` runs on our community hosted server. -2. `pool-config-example-tp-example.toml` runs with your local config. - -Run the Pool: - -```bash -cd roles/pool/config-examples -cargo run -- -c pool-config-hosted-tp-example.toml -``` diff --git a/roles/pool/config-examples/pool-config-hosted-tp-example.toml b/roles/pool/config-examples/pool-config-hosted-tp-example.toml deleted file mode 100644 index cb9747e9fb..0000000000 --- a/roles/pool/config-examples/pool-config-hosted-tp-example.toml +++ /dev/null @@ -1,32 +0,0 @@ -# SRI Pool config -authority_public_key = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" -authority_secret_key = "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n" -cert_validity_sec = 3600 -test_only_listen_adress_plain = "0.0.0.0:34250" -listen_address = "0.0.0.0:34254" - -# Coinbase outputs are specified as descriptors. A full list of descriptors is available at -# https://github.com/bitcoin/bips/blob/master/bip-0380.mediawiki#appendix-b-index-of-script-expressions -# Although the `musig` descriptor is not yet supported and the legacy `combo` descriptor never -# will be. If you have an address, embed it in a descriptor like `addr(
)`. -coinbase_reward_script = "addr(tb1qa0sm0hxzj0x25rh8gw5xlzwlsfvvyz8u96w3p8)" - -# Server Id (number to guarantee unique search space allocation across different Pool servers) -server_id = 1 - -# Pool signature (string to be included in coinbase tx) -pool_signature = "Stratum V2 SRI Pool" - -# Enable this option to set a predefined log file path. -# When enabled, logs will always be written to this file. -# The CLI option --log-file (or -f) will override this setting if provided. -# log_file = "./pool.log" - -# Template Provider config -# Local TP (this is pointing to localhost so you must run a TP locally for this configuration to work) -#tp_address = "127.0.0.1:8442" -# Hosted testnet TP -tp_address = "75.119.150.111:8442" -tp_authority_public_key = "9bwHCYnjhbHm4AS3pWg9MtAH83mzWohoJJJDELYBqZhDNqszDLc" -shares_per_minute = 6.0 -share_batch_size = 10 \ No newline at end of file diff --git a/roles/pool/config-examples/pool-config-local-tp-example.toml b/roles/pool/config-examples/pool-config-local-tp-example.toml deleted file mode 100644 index 000e3e0fd2..0000000000 --- a/roles/pool/config-examples/pool-config-local-tp-example.toml +++ /dev/null @@ -1,30 +0,0 @@ -# SRI Pool config -authority_public_key = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" -authority_secret_key = "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n" -cert_validity_sec = 3600 -test_only_listen_adress_plain = "0.0.0.0:34250" -listen_address = "0.0.0.0:34254" - -# Coinbase outputs are specified as descriptors. A full list of descriptors is available at -# https://github.com/bitcoin/bips/blob/master/bip-0380.mediawiki#appendix-b-index-of-script-expressions -# Although the `musig` descriptor is not yet supported and the legacy `combo` descriptor never -# will be. If you have an address, embed it in a descriptor like `addr(
)`. -coinbase_reward_script = "addr(tb1qa0sm0hxzj0x25rh8gw5xlzwlsfvvyz8u96w3p8)" - -# Server Id (number to guarantee unique search space allocation across different Pool servers) -server_id = 1 - -# Pool signature (string to be included in coinbase tx) -pool_signature = "Stratum V2 SRI Pool" - -# Enable this option to set a predefined log file path. -# When enabled, logs will always be written to this file. -# The CLI option --log-file (or -f) will override this setting if provided. -# log_file = "./pool.log" - - -# Template Provider config -# Local TP (this is pointing to localhost so you must run a TP locally for this configuration to work) -tp_address = "127.0.0.1:8442" -shares_per_minute = 6.0 -share_batch_size = 10 \ No newline at end of file diff --git a/roles/pool/src/args.rs b/roles/pool/src/args.rs deleted file mode 100644 index c2c2186b34..0000000000 --- a/roles/pool/src/args.rs +++ /dev/null @@ -1,42 +0,0 @@ -//! CLI argument parsing for the Pool binary. -//! -//! Defines the `Args` struct and a function to process CLI arguments into a PoolConfig. - -use clap::Parser; -use ext_config::{Config, File, FileFormat}; -use pool_sv2::config::PoolConfig; -use std::path::PathBuf; - -/// Holds the parsed CLI arguments for the Pool binary. -#[derive(Parser, Debug)] -#[command(author, version, about = "Pool CLI", long_about = None)] -pub struct Args { - #[arg( - short = 'c', - long = "config", - help = "Path to the TOML configuration file", - default_value = "pool-config.toml" - )] - pub config_path: PathBuf, - #[arg( - short = 'f', - long = "log-file", - help = "Path to the log file. If not set, logs will only be written to stdout." - )] - pub log_file: Option, -} - -/// Parses CLI arguments and loads the PoolConfig from the specified file. -pub fn process_cli_args() -> PoolConfig { - let args = Args::parse(); - let config_path = args.config_path.to_str().expect("Invalid config path"); - let mut config: PoolConfig = Config::builder() - .add_source(File::new(config_path, FileFormat::Toml)) - .build() - .and_then(|settings| settings.try_deserialize::()) - .expect("Failed to load or deserialize config"); - - config.set_log_dir(args.log_file); - - config -} diff --git a/roles/pool/src/lib/channel_manager/mining_message_handler.rs b/roles/pool/src/lib/channel_manager/mining_message_handler.rs deleted file mode 100644 index d3b713dec0..0000000000 --- a/roles/pool/src/lib/channel_manager/mining_message_handler.rs +++ /dev/null @@ -1,1030 +0,0 @@ -use std::sync::atomic::Ordering; - -use stratum_apps::stratum_core::{ - binary_sv2::Str0255, - bitcoin::{consensus::Decodable, Amount, Target, TxOut}, - channels_sv2::{ - server::{ - error::{ExtendedChannelError, StandardChannelError}, - extended::ExtendedChannel, - group::GroupChannel, - jobs::job_store::DefaultJobStore, - share_accounting::{ShareValidationError, ShareValidationResult}, - standard::StandardChannel, - }, - Vardiff, VardiffState, - }, - handlers_sv2::{HandleMiningMessagesFromClientAsync, SupportedChannelTypes}, - mining_sv2::*, - parsers_sv2::{Mining, TemplateDistribution}, - template_distribution_sv2::SubmitSolution, -}; -use tracing::{error, info}; - -use crate::{ - channel_manager::{ChannelManager, RouteMessageTo, FULL_EXTRANONCE_SIZE}, - error::PoolError, -}; - -impl HandleMiningMessagesFromClientAsync for ChannelManager { - type Error = PoolError; - - fn get_channel_type_for_client(&self, _client_id: Option) -> SupportedChannelTypes { - SupportedChannelTypes::GroupAndExtended - } - - fn is_work_selection_enabled_for_client(&self, _client_id: Option) -> bool { - true - } - - fn is_client_authorized( - &self, - _client_id: Option, - _user_identity: &Str0255, - ) -> Result { - Ok(true) - } - - async fn handle_close_channel( - &mut self, - client_id: Option, - msg: CloseChannel<'_>, - ) -> Result<(), Self::Error> { - info!("Received Close Channel: {msg}"); - let downstream_id = - client_id.expect("client_id must be present for downstream_id extraction"); - self.channel_manager_data - .super_safe_lock(|channel_manager_data| { - let Some(downstream) = channel_manager_data.downstream.get_mut(&downstream_id) - else { - return Err(PoolError::DownstreamNotFound(downstream_id)); - }; - - downstream - .downstream_data - .super_safe_lock(|downstream_data| { - downstream_data.standard_channels.remove(&msg.channel_id); - downstream_data.extended_channels.remove(&msg.channel_id); - }); - Ok(()) - }) - } - - async fn handle_open_standard_mining_channel( - &mut self, - client_id: Option, - msg: OpenStandardMiningChannel<'_>, - ) -> Result<(), Self::Error> { - let request_id = msg.get_request_id_as_u32(); - let user_identity = msg.user_identity.as_utf8_or_hex(); - let downstream_id = - client_id.expect("client_id must be present for downstream_id extraction"); - - info!("Received OpenStandardMiningChannel: {}", msg); - - let messages = self.channel_manager_data.super_safe_lock(|channel_manager_data| { - let Some(downstream) = channel_manager_data.downstream.get_mut(&downstream_id) else { - return Err(PoolError::DownstreamIdNotFound); - }; - - if downstream.requires_custom_work.load(Ordering::SeqCst) { - error!("OpenStandardMiningChannel: Standard Channels are not supported for this connection"); - let open_standard_mining_channel_error = OpenMiningChannelError { - request_id, - error_code: "standard-channels-not-supported-for-custom-work" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - return Ok(vec![(downstream_id, Mining::OpenMiningChannelError(open_standard_mining_channel_error)).into()]); - } - - let Some(last_future_template) = channel_manager_data.last_future_template.clone() else { - return Err(PoolError::FutureTemplateNotPresent); - }; - - let Some(last_set_new_prev_hash_tdp) = channel_manager_data.last_new_prev_hash.clone() else { - return Err(PoolError::LastNewPrevhashNotFound); - }; - - - let pool_coinbase_output = TxOut { - value: Amount::from_sat(last_future_template.coinbase_tx_value_remaining), - script_pubkey: self.coinbase_reward_script.script_pubkey(), - }; - - downstream.downstream_data.super_safe_lock(|downstream_data| { - if !downstream.requires_standard_jobs.load(Ordering::SeqCst) && downstream_data.group_channels.is_none() { - let group_channel_id = downstream_data.channel_id_factory.fetch_add(1, Ordering::SeqCst); - let job_store = DefaultJobStore::new(); - - let mut group_channel = match GroupChannel::new_for_pool(group_channel_id as u32, job_store, FULL_EXTRANONCE_SIZE, self.pool_tag_string.clone()) { - Ok(channel) => channel, - Err(e) => { - error!(?e, "Failed to create group channel"); - return Err(PoolError::FailedToCreateGroupChannel(e)); - } - }; - group_channel.on_new_template(last_future_template.clone(), vec![pool_coinbase_output.clone()])?; - - group_channel.on_set_new_prev_hash(last_set_new_prev_hash_tdp.clone())?; - downstream_data.group_channels = Some(group_channel); - } - let nominal_hash_rate = msg.nominal_hash_rate; - let requested_max_target = Target::from_le_bytes(msg.max_target.inner_as_ref().try_into().unwrap()); - let extranonce_prefix = channel_manager_data.extranonce_prefix_factory_standard.next_prefix_standard()?; - - let channel_id = downstream_data.channel_id_factory.fetch_add(1, Ordering::SeqCst); - let job_store = DefaultJobStore::new(); - - let mut standard_channel = match StandardChannel::new_for_pool(channel_id as u32, user_identity.to_string(), extranonce_prefix.to_vec(), requested_max_target, nominal_hash_rate, self.share_batch_size, self.shares_per_minute, job_store, self.pool_tag_string.clone()) { - Ok(channel) => channel, - Err(e) => match e { - StandardChannelError::InvalidNominalHashrate => { - error!("OpenMiningChannelError: invalid-nominal-hashrate"); - let open_standard_mining_channel_error = OpenMiningChannelError { - request_id, - error_code: "invalid-nominal-hashrate" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - return Ok(vec![(downstream_id, Mining::OpenMiningChannelError(open_standard_mining_channel_error)).into()]); - } - StandardChannelError::RequestedMaxTargetOutOfRange => { - error!("OpenMiningChannelError: max-target-out-of-range"); - let open_standard_mining_channel_error = OpenMiningChannelError { - request_id, - error_code: "max-target-out-of-range" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - return Ok(vec![(downstream_id, Mining::OpenMiningChannelError(open_standard_mining_channel_error)).into()]); - } - _ => { - error!("error in handle_open_standard_mining_channel: {:?}", e); - return Err(PoolError::ChannelErrorSender); - } - }, - }; - - let group_channel_id = downstream_data.group_channels.as_ref().map(|channel| channel.get_group_channel_id()).unwrap_or(0); - - let open_standard_mining_channel_success = OpenStandardMiningChannelSuccess { - request_id: msg.request_id, - channel_id: channel_id as u32, - target: standard_channel.get_target().to_le_bytes().into(), - extranonce_prefix: standard_channel.get_extranonce_prefix().clone().try_into().expect("Extranonce_prefix must be valid"), - group_channel_id - }.into_static(); - - let mut messages: Vec = Vec::new(); - - messages.push((downstream_id, Mining::OpenStandardMiningChannelSuccess(open_standard_mining_channel_success)).into()); - - let template_id = last_future_template.template_id; - - // create a future standard job based on the last future template - standard_channel.on_new_template(last_future_template, vec![pool_coinbase_output.clone()])?; - let future_standard_job_id = standard_channel - .get_future_template_to_job_id() - .get(&template_id) - .expect("future job id must exist"); - let future_standard_job = standard_channel - .get_future_jobs() - .get(future_standard_job_id) - .expect("future job must exist"); - let future_standard_job_message = - future_standard_job.get_job_message().clone().into_static(); - - messages.push((downstream_id, Mining::NewMiningJob(future_standard_job_message)).into()); - let prev_hash = last_set_new_prev_hash_tdp.prev_hash.clone(); - let header_timestamp = last_set_new_prev_hash_tdp.header_timestamp; - let n_bits = last_set_new_prev_hash_tdp.n_bits; - let set_new_prev_hash_mining = SetNewPrevHash { - channel_id: channel_id as u32, - job_id: *future_standard_job_id, - prev_hash, - min_ntime: header_timestamp, - nbits: n_bits, - }; - - - standard_channel - .on_set_new_prev_hash(last_set_new_prev_hash_tdp.clone())?; - - messages.push((downstream_id, Mining::SetNewPrevHash(set_new_prev_hash_mining)).into()); - - downstream_data.standard_channels.insert(channel_id as u32, standard_channel); - if let Some(group_channel) = downstream_data.group_channels.as_mut() { - group_channel.add_standard_channel_id(channel_id as u32); - } - let vardiff = VardiffState::new()?; - channel_manager_data.vardiff.insert((downstream_id, channel_id as u32).into(), vardiff); - - Ok(messages) - }) - })?; - - for message in messages { - message.forward(&self.channel_manager_channel).await; - } - - Ok(()) - } - - async fn handle_open_extended_mining_channel( - &mut self, - client_id: Option, - msg: OpenExtendedMiningChannel<'_>, - ) -> Result<(), Self::Error> { - let request_id = msg.get_request_id_as_u32(); - let user_identity = msg.user_identity.as_utf8_or_hex(); - let downstream_id = - client_id.expect("client_id must be present for downstream_id extraction"); - info!("Received OpenExtendedMiningChannel: {}", msg); - - let nominal_hash_rate = msg.nominal_hash_rate; - let requested_max_target = - Target::from_le_bytes(msg.max_target.inner_as_ref().try_into().unwrap()); - let requested_min_rollable_extranonce_size = msg.min_extranonce_size; - - let messages = self - .channel_manager_data - .super_safe_lock(|channel_manager_data| { - let Some(downstream) = channel_manager_data.downstream.get_mut(&downstream_id) - else { - return Err(PoolError::DownstreamIdNotFound); - }; - downstream - .downstream_data - .super_safe_lock(|downstream_data| { - let mut messages: Vec = Vec::new(); - - let extranonce_prefix = match channel_manager_data - .extranonce_prefix_factory_extended - .next_prefix_extended(requested_min_rollable_extranonce_size.into()) - { - Ok(extranonce_prefix) => extranonce_prefix.to_vec(), - Err(_) => { - error!("OpenMiningChannelError: min-extranonce-size-too-large"); - let open_extended_mining_channel_error = OpenMiningChannelError { - request_id, - error_code: "min-extranonce-size-too-large" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - return Ok(vec![( - downstream_id, - Mining::OpenMiningChannelError( - open_extended_mining_channel_error, - ), - ) - .into()]); - } - }; - - let channel_id = downstream_data - .channel_id_factory - .fetch_add(1, Ordering::SeqCst); - let job_store = DefaultJobStore::new(); - - let mut extended_channel = match ExtendedChannel::new_for_pool( - channel_id as u32, - user_identity.to_string(), - extranonce_prefix, - requested_max_target, - nominal_hash_rate, - true, // version rolling always allowed - requested_min_rollable_extranonce_size, - self.share_batch_size, - self.shares_per_minute, - job_store, - self.pool_tag_string.clone(), - ) { - Ok(channel) => channel, - Err(e) => match e { - ExtendedChannelError::InvalidNominalHashrate => { - error!("OpenMiningChannelError: invalid-nominal-hashrate"); - let open_extended_mining_channel_error = - OpenMiningChannelError { - request_id, - error_code: "invalid-nominal-hashrate" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - return Ok(vec![( - downstream_id, - Mining::OpenMiningChannelError( - open_extended_mining_channel_error, - ), - ) - .into()]); - } - ExtendedChannelError::RequestedMaxTargetOutOfRange => { - error!("OpenMiningChannelError: max-target-out-of-range"); - let open_extended_mining_channel_error = - OpenMiningChannelError { - request_id, - error_code: "max-target-out-of-range" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - return Ok(vec![( - downstream_id, - Mining::OpenMiningChannelError( - open_extended_mining_channel_error, - ), - ) - .into()]); - } - ExtendedChannelError::RequestedMinExtranonceSizeTooLarge => { - error!("OpenMiningChannelError: min-extranonce-size-too-large"); - let open_extended_mining_channel_error = - OpenMiningChannelError { - request_id, - error_code: "min-extranonce-size-too-large" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - return Ok(vec![( - downstream_id, - Mining::OpenMiningChannelError( - open_extended_mining_channel_error, - ), - ) - .into()]); - } - e => { - error!("error in handle_open_extended_mining_channel: {:?}", e); - return Err(e)?; - } - }, - }; - - let open_extended_mining_channel_success = - OpenExtendedMiningChannelSuccess { - request_id, - channel_id: channel_id as u32, - target: extended_channel.get_target().to_le_bytes().into(), - extranonce_prefix: extended_channel - .get_extranonce_prefix() - .clone() - .try_into()?, - extranonce_size: extended_channel.get_rollable_extranonce_size(), - } - .into_static(); - info!("Sending OpenExtendedMiningChannel.Success (downstream_id: {downstream_id}): {open_extended_mining_channel_success}"); - - messages.push( - ( - downstream_id, - Mining::OpenExtendedMiningChannelSuccess( - open_extended_mining_channel_success, - ), - ) - .into(), - ); - - let Some(last_set_new_prev_hash_tdp) = - channel_manager_data.last_new_prev_hash.clone() - else { - return Err(PoolError::LastNewPrevhashNotFound); - }; - - let Some(last_future_template) = - channel_manager_data.last_future_template.clone() - else { - return Err(PoolError::FutureTemplateNotPresent); - }; - - // if the client requires custom work, we don't need to send any extended - // jobs so we just process the SetNewPrevHash - // message - if downstream.requires_custom_work.load(Ordering::SeqCst) { - extended_channel.on_set_new_prev_hash(last_set_new_prev_hash_tdp)?; - // if the client does not require custom work, we need to send the - // future extended job - // and the SetNewPrevHash message - } else { - let pool_coinbase_output = TxOut { - value: Amount::from_sat( - last_future_template.coinbase_tx_value_remaining, - ), - script_pubkey: self.coinbase_reward_script.script_pubkey(), - }; - - extended_channel.on_new_template( - last_future_template.clone(), - vec![pool_coinbase_output], - )?; - - let future_extended_job_id = extended_channel - .get_future_template_to_job_id() - .get(&last_future_template.template_id) - .expect("future job id must exist"); - let future_extended_job = extended_channel - .get_future_jobs() - .get(future_extended_job_id) - .expect("future job must exist"); - - let future_extended_job_message = - future_extended_job.get_job_message().clone().into_static(); - - // send this future job as new job message - // to be immediately activated with the subsequent SetNewPrevHash - // message - messages.push( - ( - downstream_id, - Mining::NewExtendedMiningJob(future_extended_job_message), - ) - .into(), - ); - - // SetNewPrevHash message activates the future job - let prev_hash = last_set_new_prev_hash_tdp.prev_hash.clone(); - let header_timestamp = last_set_new_prev_hash_tdp.header_timestamp; - let n_bits = last_set_new_prev_hash_tdp.n_bits; - let set_new_prev_hash_mining = SetNewPrevHash { - channel_id: channel_id as u32, - job_id: *future_extended_job_id, - prev_hash, - min_ntime: header_timestamp, - nbits: n_bits, - }; - - extended_channel.on_set_new_prev_hash(last_set_new_prev_hash_tdp)?; - - messages.push( - ( - downstream_id, - Mining::SetNewPrevHash(set_new_prev_hash_mining), - ) - .into(), - ); - } - - downstream_data - .extended_channels - .insert(channel_id as u32, extended_channel); - let vardiff = VardiffState::new()?; - channel_manager_data - .vardiff - .insert((downstream_id, channel_id as u32).into(), vardiff); - - Ok(messages) - }) - })?; - - for message in messages { - message.forward(&self.channel_manager_channel).await; - } - - Ok(()) - } - - async fn handle_submit_shares_standard( - &mut self, - client_id: Option, - msg: SubmitSharesStandard, - ) -> Result<(), Self::Error> { - info!("Received SubmitSharesStandard: {msg}"); - let downstream_id = - client_id.expect("client_id must be present for downstream_id extraction"); - - let messages = self.channel_manager_data.super_safe_lock(|channel_manager_data| { - let channel_id = msg.channel_id; - - let Some(downstream) = channel_manager_data.downstream.get(&downstream_id) else { - return Err(PoolError::DownstreamNotFound(downstream_id)); - }; - - downstream.downstream_data.super_safe_lock(|downstream_data| { - let mut messages: Vec = Vec::new(); - let Some(standard_channel) = downstream_data.standard_channels.get_mut(&channel_id) else { - let submit_shares_error = SubmitSharesError { - channel_id, - sequence_number: msg.sequence_number, - error_code: "invalid-channel-id" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - error!("SubmitSharesError: downstream_id: {}, channel_id: {}, sequence_number: {}, error_code: invalid-channel-id ❌", downstream_id, channel_id, msg.sequence_number); - return Ok(vec![(downstream_id, Mining::SubmitSharesError(submit_shares_error)).into()]); - }; - - let Some(vardiff) = channel_manager_data.vardiff.get_mut(&(downstream_id, channel_id).into()) else { - return Err(PoolError::VardiffNotFound(channel_id)); - }; - - let res = standard_channel.validate_share(msg.clone()); - vardiff.increment_shares_since_last_update(); - - - match res { - Ok(ShareValidationResult::Valid(share_hash)) => { - let share_accounting = standard_channel.get_share_accounting(); - if share_accounting.should_acknowledge() { - let success = SubmitSharesSuccess { - channel_id, - last_sequence_number: share_accounting.get_last_share_sequence_number(), - new_submits_accepted_count: share_accounting.get_last_batch_accepted(), - new_shares_sum: share_accounting.get_last_batch_work_sum() as u64, - }; - info!("SubmitSharesStandard: {} βœ…", success); - messages.push((downstream_id, Mining::SubmitSharesSuccess(success)).into()); - } else { - let share_work = standard_channel.get_target().difficulty_float(); - info!( - "SubmitSharesStandard: valid share | downstream_id: {}, channel_id: {}, sequence_number: {}, share_hash: {}, share_work: {} βœ…", - downstream_id, channel_id, msg.sequence_number, share_hash, share_work - ); - } - - } - Ok(ShareValidationResult::BlockFound(share_hash, template_id, coinbase)) => { - info!("SubmitSharesStandard: πŸ’° Block Found!!! πŸ’°{share_hash}"); - // if we have a template id (i.e.: this was not a custom job) - // we can propagate the solution to the TP - if let Some(template_id) = template_id { - info!("SubmitSharesStandard: Propagating solution to the Template Provider."); - let solution = SubmitSolution { - template_id, - version: msg.version, - header_timestamp: msg.ntime, - header_nonce: msg.nonce, - coinbase_tx: coinbase.try_into()?, - }; - messages.push(TemplateDistribution::SubmitSolution(solution).into()); - } - let share_accounting = standard_channel.get_share_accounting(); - let success = SubmitSharesSuccess { - channel_id, - last_sequence_number: share_accounting.get_last_share_sequence_number(), - new_submits_accepted_count: share_accounting.get_last_batch_accepted(), - new_shares_sum: share_accounting.get_last_batch_work_sum() as u64, - }; - messages.push((downstream_id, Mining::SubmitSharesSuccess(success)).into()); - } - Err(ShareValidationError::Invalid) => { - error!("SubmitSharesError: downstream_id: {}, channel_id: {}, sequence_number: {}, error_code: invalid-share ❌", downstream_id, channel_id, msg.sequence_number); - let error = SubmitSharesError { - channel_id: msg.channel_id, - sequence_number: msg.sequence_number, - error_code: "invalid-share" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - - messages.push((downstream_id, Mining::SubmitSharesError(error)).into()); - } - Err(ShareValidationError::Stale) => { - error!("SubmitSharesError: downstream_id: {}, channel_id: {}, sequence_number: {}, error_code: stale-share ❌", downstream_id, channel_id, msg.sequence_number); - let error = SubmitSharesError { - channel_id: msg.channel_id, - sequence_number: msg.sequence_number, - error_code: "stale-share" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - messages.push((downstream_id, Mining::SubmitSharesError(error)).into()); - } - Err(ShareValidationError::InvalidJobId) => { - error!("SubmitSharesError: downstream_id: {}, channel_id: {}, sequence_number: {}, error_code: invalid-job-id ❌", downstream_id, channel_id, msg.sequence_number); - let error = SubmitSharesError { - channel_id: msg.channel_id, - sequence_number: msg.sequence_number, - error_code: "invalid-job-id" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - messages.push((downstream_id, Mining::SubmitSharesError(error)).into()); - } - Err(ShareValidationError::DoesNotMeetTarget) => { - error!("SubmitSharesError: downstream_id: {}, channel_id: {}, sequence_number: {}, error_code: difficulty-too-low ❌", downstream_id, channel_id, msg.sequence_number); - let error = SubmitSharesError { - channel_id: msg.channel_id, - sequence_number: msg.sequence_number, - error_code: "difficulty-too-low" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - messages.push((downstream_id, Mining::SubmitSharesError(error)).into()); - } - Err(ShareValidationError::DuplicateShare) => { - error!("SubmitSharesError: downstream_id: {}, channel_id: {}, sequence_number: {}, error_code: duplicate-share ❌", downstream_id, channel_id, msg.sequence_number); - let error = SubmitSharesError { - channel_id: msg.channel_id, - sequence_number: msg.sequence_number, - error_code: "duplicate-share" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - messages.push((downstream_id, Mining::SubmitSharesError(error)).into()); - } - Err(e) => { - return Err(e)?; - } - } - - Ok(messages) - }) - })?; - - for message in messages { - message.forward(&self.channel_manager_channel).await; - } - - Ok(()) - } - - async fn handle_submit_shares_extended( - &mut self, - client_id: Option, - msg: SubmitSharesExtended<'_>, - ) -> Result<(), Self::Error> { - info!("Received SubmitSharesExtended: {msg}"); - let downstream_id = - client_id.expect("client_id must be present for downstream_id extraction"); - let messages = self.channel_manager_data.super_safe_lock(|channel_manager_data| { - let channel_id = msg.channel_id; - let Some(downstream) = channel_manager_data.downstream.get(&downstream_id) else { - return Err(PoolError::DownstreamNotFound(downstream_id)); - }; - - downstream.downstream_data.super_safe_lock(|downstream_data| { - let mut messages: Vec = Vec::new(); - let Some(extended_channel) = downstream_data.extended_channels.get_mut(&channel_id) else { - let error = SubmitSharesError { - channel_id, - sequence_number: msg.sequence_number, - error_code: "invalid-channel-id" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - error!("SubmitSharesError: downstream_id: {}, channel_id: {}, sequence_number: {}, error_code: invalid-channel-id ❌", downstream_id, channel_id, msg.sequence_number); - return Ok(vec![(downstream_id, Mining::SubmitSharesError(error)).into()]); - }; - - let Some(vardiff) = channel_manager_data.vardiff.get_mut(&(downstream_id, channel_id).into()) else { - return Err(PoolError::VardiffNotFound(channel_id)); - }; - - let res = extended_channel.validate_share(msg.clone()); - vardiff.increment_shares_since_last_update(); - - match res { - Ok(ShareValidationResult::Valid(share_hash)) => { - let share_accounting = extended_channel.get_share_accounting(); - if share_accounting.should_acknowledge() { - let success = SubmitSharesSuccess { - channel_id, - last_sequence_number: share_accounting.get_last_share_sequence_number(), - new_submits_accepted_count: share_accounting.get_last_batch_accepted(), - new_shares_sum: share_accounting.get_last_batch_work_sum() as u64, - }; - info!("SubmitSharesExtended: {} βœ…", success); - messages.push((downstream_id, Mining::SubmitSharesSuccess(success)).into()); - } else { - let share_work = extended_channel.get_target().difficulty_float(); - info!( - "SubmitSharesExtended: valid share | downstream_id: {}, channel_id: {}, sequence_number: {}, share_hash: {}, share_work: {} βœ…", - downstream_id, channel_id, msg.sequence_number, share_hash, share_work - ); - } - } - Ok(ShareValidationResult::BlockFound(share_hash, template_id, coinbase)) => { - info!("SubmitSharesExtended: πŸ’° Block Found!!! πŸ’°{share_hash}"); - // if we have a template id (i.e.: this was not a custom job) - // we can propagate the solution to the TP - if let Some(template_id) = template_id { - info!("SubmitSharesExtended: Propagating solution to the Template Provider."); - let solution = SubmitSolution { - template_id, - version: msg.version, - header_timestamp: msg.ntime, - header_nonce: msg.nonce, - coinbase_tx: coinbase.try_into()?, - }; - messages.push(TemplateDistribution::SubmitSolution(solution).into()); - } - let share_accounting = extended_channel.get_share_accounting(); - let success = SubmitSharesSuccess { - channel_id, - last_sequence_number: share_accounting.get_last_share_sequence_number(), - new_submits_accepted_count: share_accounting.get_last_batch_accepted(), - new_shares_sum: share_accounting.get_last_batch_work_sum() as u64, - }; - messages.push((downstream_id, Mining::SubmitSharesSuccess(success)).into()); - } - Err(ShareValidationError::Invalid) => { - error!("SubmitSharesError: downstream_id: {}, channel_id: {}, sequence_number: {}, error_code: invalid-share ❌", downstream_id, channel_id, msg.sequence_number); - let error = SubmitSharesError { - channel_id: msg.channel_id, - sequence_number: msg.sequence_number, - error_code: "invalid-share" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - messages.push((downstream_id, Mining::SubmitSharesError(error)).into()); - } - Err(ShareValidationError::Stale) => { - error!("SubmitSharesError: downstream_id: {}, channel_id: {}, sequence_number: {}, error_code: stale-share ❌", downstream_id, channel_id, msg.sequence_number); - let error = SubmitSharesError { - channel_id: msg.channel_id, - sequence_number: msg.sequence_number, - error_code: "stale-share" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - messages.push((downstream_id, Mining::SubmitSharesError(error)).into()); - } - Err(ShareValidationError::InvalidJobId) => { - error!("SubmitSharesError: downstream_id: {}, channel_id: {}, sequence_number: {}, error_code: invalid-job-id ❌", downstream_id, channel_id, msg.sequence_number); - let error = SubmitSharesError { - channel_id: msg.channel_id, - sequence_number: msg.sequence_number, - error_code: "invalid-job-id" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - messages.push((downstream_id, Mining::SubmitSharesError(error)).into()); - } - Err(ShareValidationError::DoesNotMeetTarget) => { - error!("SubmitSharesError: downstream_id: {}, channel_id: {}, sequence_number: {}, error_code: difficulty-too-low ❌", downstream_id, channel_id, msg.sequence_number); - let error = SubmitSharesError { - channel_id: msg.channel_id, - sequence_number: msg.sequence_number, - error_code: "difficulty-too-low" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - messages.push((downstream_id, Mining::SubmitSharesError(error)).into()); - } - Err(ShareValidationError::DuplicateShare) => { - error!("SubmitSharesError: downstream_id: {}, channel_id: {}, sequence_number: {}, error_code: duplicate-share ❌", downstream_id, channel_id, msg.sequence_number); - let error = SubmitSharesError { - channel_id: msg.channel_id, - sequence_number: msg.sequence_number, - error_code: "duplicate-share" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - messages.push((downstream_id, Mining::SubmitSharesError(error)).into()); - } - Err(ShareValidationError::BadExtranonceSize) => { - error!("SubmitSharesError: downstream_id: {}, channel_id: {}, sequence_number: {}, error_code: bad-extranonce-size ❌", downstream_id, channel_id, msg.sequence_number); - let error = SubmitSharesError { - channel_id: msg.channel_id, - sequence_number: msg.sequence_number, - error_code: "bad-extranonce-size" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - messages.push((downstream_id, Mining::SubmitSharesError(error)).into()); - } - Err(e) => { - return Err(e)?; - } - } - - Ok(messages) - }) - })?; - - for message in messages { - message.forward(&self.channel_manager_channel).await; - } - - Ok(()) - } - - async fn handle_update_channel( - &mut self, - client_id: Option, - msg: UpdateChannel<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - - let downstream_id = - client_id.expect("client_id must be present for downstream_id extraction"); - - let messages: Vec = self.channel_manager_data.super_safe_lock(|channel_manager_data| { - let Some(downstream) = channel_manager_data.downstream.get(&downstream_id) else { - return Err(PoolError::DownstreamNotFound(downstream_id)); - }; - - downstream.downstream_data.super_safe_lock(|downstream_data| { - let mut messages = Vec::new(); - let channel_id = msg.channel_id; - let new_nominal_hash_rate = msg.nominal_hash_rate; - let requested_maximum_target = Target::from_le_bytes(msg.maximum_target.inner_as_ref().try_into().unwrap()); - - if let Some(standard_channel) = downstream_data.standard_channels.get_mut(&channel_id) { - let res = standard_channel - .update_channel(new_nominal_hash_rate, Some(requested_maximum_target)); - match res { - Ok(_) => {} - Err(e) => { - error!("UpdateChannelError: {:?}", e); - match e { - StandardChannelError::InvalidNominalHashrate => { - error!("UpdateChannelError: invalid-nominal-hashrate"); - let update_channel_error = UpdateChannelError { - channel_id, - error_code: "invalid-nominal-hashrate" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - messages.push((downstream_id, Mining::UpdateChannelError(update_channel_error)).into()); - } - StandardChannelError::RequestedMaxTargetOutOfRange => { - error!("UpdateChannelError: requested-max-target-out-of-range"); - let update_channel_error = UpdateChannelError { - channel_id, - error_code: "requested-max-target-out-of-range" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - messages.push((downstream_id, Mining::UpdateChannelError(update_channel_error)).into()); - } - standard_channel_error => { - return Err(standard_channel_error)?; - } - } - } - } - let new_target = standard_channel.get_target(); - let set_target = SetTarget { - channel_id, - maximum_target: new_target.to_le_bytes().into(), - }; - messages.push((downstream_id, Mining::SetTarget(set_target)).into()); - } else if let Some(extended_channel) = downstream_data.extended_channels.get_mut(&channel_id) { - let res = extended_channel - .update_channel(new_nominal_hash_rate, Some(requested_maximum_target)); - match res { - Ok(_) => {} - Err(e) => { - error!("UpdateChannelError: {:?}", e); - match e { - ExtendedChannelError::InvalidNominalHashrate => { - error!("UpdateChannelError: invalid-nominal-hashrate"); - let update_channel_error = UpdateChannelError { - channel_id, - error_code: "invalid-nominal-hashrate" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - messages.push((downstream_id, Mining::UpdateChannelError(update_channel_error)).into()); - } - ExtendedChannelError::RequestedMaxTargetOutOfRange => { - error!("UpdateChannelError: max-target-out-of-range"); - let update_channel_error = UpdateChannelError { - channel_id, - error_code: "max-target-out-of-range" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - messages.push((downstream_id, Mining::UpdateChannelError(update_channel_error)).into()); - } - extended_channel_error => { - return Err(extended_channel_error)?; - } - } - } - } - let new_target = extended_channel.get_target(); - let set_target = SetTarget { - channel_id, - maximum_target: new_target.to_le_bytes().into(), - }; - messages.push((downstream_id, Mining::SetTarget(set_target)).into()); - } else { - error!("UpdateChannelError: invalid-channel-id"); - let update_channel_error = UpdateChannelError { - channel_id, - error_code: "invalid-channel-id" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - messages.push((downstream_id, Mining::UpdateChannelError(update_channel_error)).into()); - } - - Ok(messages) - }) - })?; - - for message in messages { - message.forward(&self.channel_manager_channel).await; - } - - Ok(()) - } - - async fn handle_set_custom_mining_job( - &mut self, - client_id: Option, - msg: SetCustomMiningJob<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - let downstream_id = - client_id.expect("client_id must be present for downstream_id extraction"); - - // this is a naive implementation, but ideally we should check the SetCustomMiningJob - // message parameters, especially: - // - the mining_job_token - // - the amount of the pool payout output - let custom_job_coinbase_outputs = Vec::::consensus_decode( - &mut msg.coinbase_tx_outputs.inner_as_ref().to_vec().as_slice(), - )?; - - let message: RouteMessageTo = - self.channel_manager_data - .super_safe_lock(|channel_manager_data| { - // check that the script_pubkey from self.coinbase_reward_script - // is present in the custom job coinbase outputs - let missing_script = !custom_job_coinbase_outputs.iter().any(|pool_output| { - *pool_output.script_pubkey == *self.coinbase_reward_script.script_pubkey() - }); - - if missing_script { - error!("SetCustomMiningJobError: pool-payout-script-missing"); - - let error = SetCustomMiningJobError { - request_id: msg.request_id, - channel_id: msg.channel_id, - error_code: "pool-payout-script-missing" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - - return Ok((downstream_id, Mining::SetCustomMiningJobError(error)).into()); - } - - let Some(downstream) = channel_manager_data.downstream.get_mut(&downstream_id) - else { - return Err(PoolError::DownstreamNotFound(downstream_id)); - }; - - downstream - .downstream_data - .super_safe_lock(|downstream_data| { - let Some(extended_channel) = - downstream_data.extended_channels.get_mut(&msg.channel_id) - else { - error!("SetCustomMiningJobError: invalid-channel-id"); - let error = SetCustomMiningJobError { - request_id: msg.request_id, - channel_id: msg.channel_id, - error_code: "invalid-channel-id" - .to_string() - .try_into() - .expect("error code must be valid string"), - }; - return Ok( - (downstream_id, Mining::SetCustomMiningJobError(error)).into() - ); - }; - - let job_id = extended_channel - .on_set_custom_mining_job(msg.clone().into_static())?; - - let success = SetCustomMiningJobSuccess { - channel_id: msg.channel_id, - request_id: msg.request_id, - job_id, - }; - Ok((downstream_id, Mining::SetCustomMiningJobSuccess(success)).into()) - }) - })?; - - message.forward(&self.channel_manager_channel).await; - Ok(()) - } -} diff --git a/roles/pool/src/lib/channel_manager/mod.rs b/roles/pool/src/lib/channel_manager/mod.rs deleted file mode 100644 index ea876f3c5b..0000000000 --- a/roles/pool/src/lib/channel_manager/mod.rs +++ /dev/null @@ -1,564 +0,0 @@ -use std::{ - collections::HashMap, - net::SocketAddr, - sync::{atomic::AtomicUsize, Arc}, -}; - -use async_channel::{Receiver, Sender}; -use core::sync::atomic::Ordering; -use stratum_apps::{ - config_helpers::CoinbaseRewardScript, - custom_mutex::Mutex, - key_utils::{Secp256k1PublicKey, Secp256k1SecretKey}, - network_helpers::noise_stream::NoiseTcpStream, - stratum_core::{ - channels_sv2::{ - server::{ - extended::ExtendedChannel, - jobs::{extended::ExtendedJob, job_store::DefaultJobStore, standard::StandardJob}, - standard::StandardChannel, - }, - Vardiff, VardiffState, - }, - codec_sv2::HandshakeRole, - handlers_sv2::{ - HandleMiningMessagesFromClientAsync, HandleTemplateDistributionMessagesFromServerAsync, - }, - mining_sv2::{ExtendedExtranonce, SetTarget}, - noise_sv2::Responder, - parsers_sv2::{Mining, TemplateDistribution}, - template_distribution_sv2::{NewTemplate, SetNewPrevHash}, - }, -}; -use tokio::{net::TcpListener, select, sync::broadcast}; -use tracing::{debug, error, info, warn}; - -use crate::{ - config::PoolConfig, - downstream::Downstream, - error::PoolResult, - status::{handle_error, Status, StatusSender}, - task_manager::TaskManager, - utils::{Message, ShutdownMessage, VardiffKey}, -}; - -mod mining_message_handler; -mod template_distribution_message_handler; - -const POOL_ALLOCATION_BYTES: usize = 4; -const CLIENT_SEARCH_SPACE_BYTES: usize = 8; -pub const FULL_EXTRANONCE_SIZE: usize = POOL_ALLOCATION_BYTES + CLIENT_SEARCH_SPACE_BYTES; - -pub struct ChannelManagerData { - // Mapping of `downstream_id` β†’ `Downstream` object, - // used by the channel manager to locate and interact with downstream clients. - downstream: HashMap, - // Extranonce prefix factory for **extended downstream channels**. - // Each new extended downstream receives a unique extranonce prefix. - extranonce_prefix_factory_extended: ExtendedExtranonce, - // Extranonce prefix factory for **standard downstream channels**. - // Each new standard downstream receives a unique extranonce prefix. - extranonce_prefix_factory_standard: ExtendedExtranonce, - // Factory that assigns a unique ID to each new **downstream connection**. - downstream_id_factory: AtomicUsize, - // Mapping of `(downstream_id, channel_id)` β†’ vardiff controller. - // Each entry manages variable difficulty for a specific downstream channel. - vardiff: HashMap, - // Coinbase outputs - coinbase_outputs: Vec, - // Last new prevhash - last_new_prev_hash: Option>, - // Last future template - last_future_template: Option>, -} - -#[derive(Clone)] -pub struct ChannelManagerChannel { - tp_sender: Sender>, - tp_receiver: Receiver>, - downstream_sender: broadcast::Sender<(usize, Mining<'static>)>, - downstream_receiver: Receiver<(usize, Mining<'static>)>, -} - -/// Contains all the state of mutable and immutable data required -/// by channel manager to process its task along with channels -/// to perform message traversal. -#[derive(Clone)] -pub struct ChannelManager { - channel_manager_data: Arc>, - channel_manager_channel: ChannelManagerChannel, - pool_tag_string: String, - share_batch_size: usize, - shares_per_minute: f32, - coinbase_reward_script: CoinbaseRewardScript, -} - -impl ChannelManager { - /// Constructor method used to instantiate the ChannelManager - #[allow(clippy::too_many_arguments)] - pub async fn new( - config: PoolConfig, - tp_sender: Sender>, - tp_receiver: Receiver>, - downstream_sender: broadcast::Sender<(usize, Mining<'static>)>, - downstream_receiver: Receiver<(usize, Mining<'static>)>, - coinbase_outputs: Vec, - ) -> PoolResult { - let range_0 = 0..0; - let range_1 = 0..POOL_ALLOCATION_BYTES; - let range_2 = POOL_ALLOCATION_BYTES..POOL_ALLOCATION_BYTES + CLIENT_SEARCH_SPACE_BYTES; - - let make_extranonce_factory = || { - // simulating a scenario where there are multiple mining servers - // this static prefix allows unique extranonce_prefix allocation - // for this mining server - let static_prefix = config.server_id().to_be_bytes().to_vec(); - - ExtendedExtranonce::new( - range_0.clone(), - range_1.clone(), - range_2.clone(), - Some(static_prefix), - ) - .expect("Failed to create ExtendedExtranonce with valid ranges") - }; - - let extranonce_prefix_factory_extended = make_extranonce_factory(); - let extranonce_prefix_factory_standard = make_extranonce_factory(); - - let channel_manager_data = Arc::new(Mutex::new(ChannelManagerData { - downstream: HashMap::new(), - extranonce_prefix_factory_extended, - extranonce_prefix_factory_standard, - downstream_id_factory: AtomicUsize::new(1), - vardiff: HashMap::new(), - coinbase_outputs, - last_future_template: None, - last_new_prev_hash: None, - })); - - let channel_manager_channel = ChannelManagerChannel { - tp_sender, - tp_receiver, - downstream_sender, - downstream_receiver, - }; - - let channel_manager = ChannelManager { - channel_manager_data, - channel_manager_channel, - share_batch_size: config.share_batch_size(), - shares_per_minute: config.shares_per_minute(), - pool_tag_string: config.pool_signature().to_string(), - coinbase_reward_script: config.coinbase_reward_script().clone(), - }; - - Ok(channel_manager) - } - - /// Starts the downstream server, and accepts new connection request. - #[allow(clippy::too_many_arguments)] - pub async fn start_downstream_server( - self, - authority_public_key: Secp256k1PublicKey, - authority_secret_key: Secp256k1SecretKey, - cert_validity_sec: u64, - listening_address: SocketAddr, - task_manager: Arc, - notify_shutdown: broadcast::Sender, - status_sender: Sender, - channel_manager_sender: Sender<(usize, Mining<'static>)>, - channel_manager_receiver: broadcast::Sender<(usize, Mining<'static>)>, - ) -> PoolResult<()> { - info!("Starting downstream server at {listening_address}"); - let server = TcpListener::bind(listening_address).await.map_err(|e| { - error!(error = ?e, "Failed to bind downstream server at {listening_address}"); - e - })?; - - let mut shutdown_rx = notify_shutdown.subscribe(); - - let task_manager_clone = task_manager.clone(); - task_manager.spawn(async move { - - loop { - select! { - message = shutdown_rx.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - info!("Channel Manager: received shutdown signal"); - break; - } - Err(e) => { - warn!(error = ?e, "shutdown channel closed unexpectedly"); - break; - } - _ => {} - } - } - res = server.accept() => { - match res { - Ok((stream, socket_address)) => { - info!(%socket_address, "New downstream connection"); - let responder = match Responder::from_authority_kp( - &authority_public_key.into_bytes(), - &authority_secret_key.into_bytes(), - std::time::Duration::from_secs(cert_validity_sec), - ) { - Ok(r) => r, - Err(e) => { - error!(error = ?e, "Failed to create responder"); - continue; - } - }; - let noise_stream = match NoiseTcpStream::::new( - stream, - HandshakeRole::Responder(responder), - ) - .await - { - Ok(ns) => ns, - Err(e) => { - error!(error = ?e, "Noise handshake failed"); - continue; - } - }; - - let downstream_id = self - .channel_manager_data - .super_safe_lock(|data| data.downstream_id_factory.fetch_add(1, Ordering::SeqCst)); - - - let downstream = Downstream::new( - downstream_id, - channel_manager_sender.clone(), - channel_manager_receiver.clone(), - noise_stream, - notify_shutdown.clone(), - task_manager_clone.clone(), - status_sender.clone(), - ); - - - self.channel_manager_data.super_safe_lock(|data| { - data.downstream.insert(downstream_id, downstream.clone()); - }); - - downstream - .start( - notify_shutdown.clone(), - status_sender.clone(), - task_manager_clone.clone(), - ) - .await; - } - - Err(e) => { - error!(error = ?e, "Failed to accept new downstream connection"); - } - } - } - } - } - info!("Downstream server: Unified loop break"); - }); - Ok(()) - } - - /// The central orchestrator of the Channel Manager. - /// - /// Responsible for receiving messages from all subsystems, processing them, - /// and either forwarding them to the appropriate subsystem or updating - /// the internal state of the Channel Manager as needed. - pub async fn start( - self, - notify_shutdown: broadcast::Sender, - status_sender: Sender, - task_manager: Arc, - ) -> PoolResult<()> { - let status_sender = StatusSender::ChannelManager(status_sender); - let mut shutdown_rx = notify_shutdown.subscribe(); - - task_manager.spawn(async move { - let cm = self.clone(); - let vardiff_future = self.run_vardiff_loop(); - tokio::pin!(vardiff_future); - loop { - let mut cm_template = cm.clone(); - let mut cm_downstreams = cm.clone(); - tokio::select! { - message = shutdown_rx.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - info!("Channel Manager: received shutdown signal"); - break; - } - Ok(ShutdownMessage::DownstreamShutdown(downstream_id)) => { - info!(%downstream_id, "Channel Manager: removing downstream after shutdown"); - if let Err(e) = self.remove_downstream(downstream_id) { - tracing::error!(%downstream_id, error = ?e, "Failed to remove downstream"); - } - } - Err(e) => { - warn!(error = ?e, "shutdown channel closed unexpectedly"); - break; - } - _ => {} - } - } - res = &mut vardiff_future => { - info!("Vardiff loop completed with: {res:?}"); - } - res = cm_template.handle_template_provider_message() => { - if let Err(e) = res { - error!(error = ?e, "Error handling Template Receiver message"); - handle_error(&status_sender, e).await; - break; - } - } - res = cm_downstreams.handle_downstream_mining_message() => { - if let Err(e) = res { - error!(error = ?e, "Error handling Downstreams message"); - handle_error(&status_sender, e).await; - break; - } - } - } - } - }); - Ok(()) - } - - // Removes a Downstream entry from the ChannelManager’s state. - // - // Given a `downstream_id`, this method: - // 1. Removes the corresponding Downstream from the `downstream` map. - #[allow(clippy::result_large_err)] - fn remove_downstream(&self, downstream_id: usize) -> PoolResult<()> { - self.channel_manager_data.super_safe_lock(|cm_data| { - cm_data.downstream.remove(&downstream_id); - }); - Ok(()) - } - - // Handles messages received from the TP subsystem. - // - // This method listens for incoming frames on the `tp_receiver` channel. - // - If the frame contains a TemplateDistribution message, it forwards it to the template - // distribution message handler. - // - If the frame contains any unsupported message type, an error is returned. - async fn handle_template_provider_message(&mut self) -> PoolResult<()> { - if let Ok(message) = self.channel_manager_channel.tp_receiver.recv().await { - self.handle_template_distribution_message_from_server(None, message) - .await?; - } - Ok(()) - } - - async fn handle_downstream_mining_message(&mut self) -> PoolResult<()> { - if let Ok((downstream_id, message)) = self - .channel_manager_channel - .downstream_receiver - .recv() - .await - { - self.handle_mining_message_from_client(Some(downstream_id), message) - .await?; - } - - Ok(()) - } - - // Runs the vardiff on extended channel. - fn run_vardiff_on_extended_channel( - downstream_id: usize, - channel_id: u32, - channel_state: &mut ExtendedChannel<'static, DefaultJobStore>>, - vardiff_state: &mut VardiffState, - updates: &mut Vec, - ) { - let (hashrate, target, shares_per_minute) = ( - channel_state.get_nominal_hashrate(), - channel_state.get_target(), - channel_state.get_shares_per_minute(), - ); - - let Ok(new_hashrate_opt) = vardiff_state.try_vardiff(hashrate, target, shares_per_minute) - else { - debug!("Vardiff computation failed for extended channel {channel_id}"); - return; - }; - - let Some(new_hashrate) = new_hashrate_opt else { - return; - }; - - match channel_state.update_channel(new_hashrate, None) { - Ok(()) => { - let updated_target = channel_state.get_target(); - updates.push( - ( - downstream_id, - Mining::SetTarget(SetTarget { - channel_id, - maximum_target: updated_target.to_le_bytes().into(), - }), - ) - .into(), - ); - debug!("Updated target for extended channel_id={channel_id} to {updated_target:?}",); - } - Err(e) => warn!( - "Failed to update extended channel channel_id={channel_id} during vardiff {e:?}" - ), - } - } - - // Runs the vardiff on the standard channel. - fn run_vardiff_on_standard_channel( - downstream_id: usize, - channel_id: u32, - channel: &mut StandardChannel<'static, DefaultJobStore>>, - vardiff_state: &mut VardiffState, - updates: &mut Vec, - ) { - let hashrate = channel.get_nominal_hashrate(); - let target = channel.get_target(); - let shares_per_minute = channel.get_shares_per_minute(); - - let Ok(new_hashrate_opt) = vardiff_state.try_vardiff(hashrate, target, shares_per_minute) - else { - debug!("Vardiff computation failed for standard channel {channel_id}"); - return; - }; - - if let Some(new_hashrate) = new_hashrate_opt { - match channel.update_channel(new_hashrate, None) { - Ok(()) => { - let updated_target = channel.get_target(); - updates.push( - ( - downstream_id, - Mining::SetTarget(SetTarget { - channel_id, - maximum_target: updated_target.to_le_bytes().into(), - }), - ) - .into(), - ); - debug!( - "Updated target for standard channel channel_id={channel_id} to {updated_target:?}" - ); - } - Err(e) => warn!( - "Failed to update standard channel channel_id={channel_id} during vardiff {e:?}" - ), - } - } - } - - // Periodic vardiff task loop. - // - // # Purpose - // - Executes the vardiff cycle every 60 seconds for all downstreams. - // - Delegates to [`Self::run_vardiff`] on each tick. - async fn run_vardiff_loop(&self) -> PoolResult<()> { - let mut ticker = tokio::time::interval(std::time::Duration::from_secs(60)); - loop { - ticker.tick().await; - info!("Starting vardiff loop for downstreams"); - - if let Err(e) = self.run_vardiff().await { - error!(error = ?e, "Vardiff iteration failed"); - } - } - } - - // Runs vardiff across **all channels** and generates updates. - // - // # Purpose - // - Iterates through all downstream channels (both standard and extended). - // - Runs vardiff for each channel and collects the resulting updates. - // - Propagates difficulty changes to downstreams and also sends an `UpdateChannel` message - // upstream if applicable. - async fn run_vardiff(&self) -> PoolResult<()> { - let mut messages: Vec = vec![]; - self.channel_manager_data - .super_safe_lock(|channel_manager_data| { - for (vardiff_key, vardiff_state) in channel_manager_data.vardiff.iter_mut() { - let downstream_id = &vardiff_key.downstream_id; - let channel_id = &vardiff_key.channel_id; - - let Some(downstream) = channel_manager_data.downstream.get_mut(downstream_id) - else { - continue; - }; - downstream.downstream_data.super_safe_lock(|data| { - if let Some(standard_channel) = data.standard_channels.get_mut(channel_id) { - Self::run_vardiff_on_standard_channel( - *downstream_id, - *channel_id, - standard_channel, - vardiff_state, - &mut messages, - ); - } - if let Some(extended_channel) = data.extended_channels.get_mut(channel_id) { - Self::run_vardiff_on_extended_channel( - *downstream_id, - *channel_id, - extended_channel, - vardiff_state, - &mut messages, - ); - } - }); - } - }); - - for message in messages { - message.forward(&self.channel_manager_channel).await; - } - - info!("Vardiff update cycle complete"); - Ok(()) - } -} - -#[derive(Clone)] -pub enum RouteMessageTo<'a> { - /// Route to the template provider subsystem. - TemplateProvider(TemplateDistribution<'a>), - /// Route to a specific downstream client by ID, along with its mining message. - Downstream((usize, Mining<'a>)), -} - -impl<'a> From> for RouteMessageTo<'a> { - fn from(value: TemplateDistribution<'a>) -> Self { - Self::TemplateProvider(value) - } -} - -impl<'a> From<(usize, Mining<'a>)> for RouteMessageTo<'a> { - fn from(value: (usize, Mining<'a>)) -> Self { - Self::Downstream(value) - } -} - -impl RouteMessageTo<'_> { - pub async fn forward(self, channel_manager_channel: &ChannelManagerChannel) { - match self { - RouteMessageTo::Downstream((downstream_id, message)) => { - _ = channel_manager_channel - .downstream_sender - .send((downstream_id, message.into_static())); - } - RouteMessageTo::TemplateProvider(message) => { - _ = channel_manager_channel - .tp_sender - .send(message.into_static()) - .await; - } - } - } -} diff --git a/roles/pool/src/lib/channel_manager/template_distribution_message_handler.rs b/roles/pool/src/lib/channel_manager/template_distribution_message_handler.rs deleted file mode 100644 index 9327e5da6d..0000000000 --- a/roles/pool/src/lib/channel_manager/template_distribution_message_handler.rs +++ /dev/null @@ -1,306 +0,0 @@ -use std::sync::atomic::Ordering; - -use stratum_apps::stratum_core::{ - bitcoin::Amount, channels_sv2::outputs::deserialize_outputs, - handlers_sv2::HandleTemplateDistributionMessagesFromServerAsync, - mining_sv2::SetNewPrevHash as SetNewPrevHashMp, parsers_sv2::Mining, - template_distribution_sv2::*, -}; -use tracing::{info, warn}; - -use crate::{ - channel_manager::{ChannelManager, RouteMessageTo}, - error::PoolError, -}; - -impl HandleTemplateDistributionMessagesFromServerAsync for ChannelManager { - type Error = PoolError; - - async fn handle_new_template( - &mut self, - _server_id: Option, - msg: NewTemplate<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - - let messages = self.channel_manager_data.super_safe_lock(|channel_manager_data| { - if msg.future_template { - channel_manager_data.last_future_template = Some(msg.clone().into_static()); - } - - let mut messages: Vec = Vec::new(); - let mut coinbase_output = deserialize_outputs(channel_manager_data.coinbase_outputs.clone()).expect("deserialization failed"); - coinbase_output[0].value = Amount::from_sat(msg.coinbase_tx_value_remaining); - - for (downstream_id, downstream) in channel_manager_data.downstream.iter_mut() { - - let messages_ = downstream.downstream_data.super_safe_lock(|data| { - - let mut messages: Vec = vec![]; - - let group_channel_job = if let Some(ref mut group_channel) = data.group_channels { - if group_channel.on_new_template(msg.clone().into_static(), coinbase_output.clone()).is_ok() { - match msg.future_template { - true => { - let future_job_id = group_channel - .get_future_template_to_job_id() - .get(&msg.template_id) - .expect("job_id must exist"); - Some(group_channel - .get_future_jobs() - .get(future_job_id) - .expect("future job must exist")).cloned() - }, - false => { - Some(group_channel - .get_active_job() - .expect("active job must exist")).cloned() - } - } - } else { - tracing::error!("Some issue with downstream: {downstream_id}, group channel"); - None - } - } else { - None - }; - - match msg.future_template { - true => { - for (channel_id, standard_channel) in data.standard_channels.iter_mut() { - if data.group_channels.is_none() { - if let Err(e) = standard_channel.on_new_template(msg.clone().into_static(), coinbase_output.clone()) { - tracing::error!("Error while adding template to standard channel: {channel_id:?} {e:?}"); - continue; - } - let standard_job_id = standard_channel.get_future_template_to_job_id().get(&msg.template_id).expect("job_id must exist"); - let standard_job = standard_channel.get_future_jobs().get(standard_job_id).expect("standard job must exist"); - let standard_job_message = standard_job.get_job_message(); - messages.push((*downstream_id, Mining::NewMiningJob(standard_job_message.clone())).into()); - } - if let Some(ref group_channel_job) = group_channel_job { - if let Err(e) = standard_channel.on_new_template(msg.clone().into_static(), coinbase_output.clone()) { - tracing::error!("Error while adding template to standard channel: {channel_id:?} {e:?}"); - continue; - } - _ = standard_channel - .on_group_channel_job(group_channel_job.clone()); - } - } - if let Some(group_channel_job) = group_channel_job { - let job_message = group_channel_job.get_job_message(); - messages.push((*downstream_id, Mining::NewExtendedMiningJob(job_message.clone())).into()); - } - - for (channel_id, extended_channel) in data.extended_channels.iter_mut() { - if let Err(e) = extended_channel.on_new_template(msg.clone().into_static(), coinbase_output.clone()) { - tracing::error!("Error while adding template to standard channel: {channel_id:?} {e:?}"); - continue; - } - let extended_job_id = extended_channel - .get_future_template_to_job_id() - .get(&msg.template_id) - .expect("job_id must exist"); - - let extended_job = extended_channel - .get_future_jobs() - .get(extended_job_id) - .expect("extended job must exist"); - - let extended_job_message = extended_job.get_job_message(); - - messages.push((*downstream_id,Mining::NewExtendedMiningJob(extended_job_message.clone())).into()); - } - } - false => { - for (channel_id, standard_channel) in data.standard_channels.iter_mut() { - if data.group_channels.is_none() { - if let Err(e) = standard_channel.on_new_template(msg.clone().into_static(), coinbase_output.clone()) { - tracing::error!("Error while adding template to standard channel: {channel_id:?} {e:?}"); - continue; - } - let standard_job = standard_channel.get_active_job().expect("standard job must exist"); - let standard_job_message = standard_job.get_job_message(); - messages.push((*downstream_id, Mining::NewMiningJob(standard_job_message.clone())).into()); - } - if let Some(ref group_channel_job) = group_channel_job { - if let Err(e) = standard_channel.on_new_template(msg.clone().into_static(), coinbase_output.clone()) { - tracing::error!("Error while adding template to standard channel: {channel_id:?} {e:?}"); - continue; - } - _ = standard_channel - .on_group_channel_job(group_channel_job.clone()); - } - } - if let Some(group_channel_job) = group_channel_job { - let job_message = group_channel_job.get_job_message(); - messages.push((*downstream_id, Mining::NewExtendedMiningJob(job_message.clone())).into()); - } - - for (channel_id, extended_channel) in data.extended_channels.iter_mut() { - if let Err(e) = extended_channel.on_new_template(msg.clone().into_static(), coinbase_output.clone()) { - tracing::error!("Error while adding template to standard channel: {channel_id:?} {e:?}"); - continue; - } - let extended_job = extended_channel - .get_active_job() - .expect("extended job must exist"); - - let extended_job_message = extended_job.get_job_message(); - - messages.push((*downstream_id,Mining::NewExtendedMiningJob(extended_job_message.clone())).into()); - } - } - } - - messages - - }); - messages.extend(messages_); - } - messages - }); - - for message in messages { - message.forward(&self.channel_manager_channel).await; - } - - Ok(()) - } - - async fn handle_request_tx_data_error( - &mut self, - _server_id: Option, - msg: RequestTransactionDataError<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", msg); - Ok(()) - } - - async fn handle_request_tx_data_success( - &mut self, - _server_id: Option, - msg: RequestTransactionDataSuccess<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - Ok(()) - } - - async fn handle_set_new_prev_hash( - &mut self, - _server_id: Option, - msg: SetNewPrevHash<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - - let messages = self.channel_manager_data.super_safe_lock(|data| { - data.last_new_prev_hash = Some(msg.clone().into_static()); - - let mut messages: Vec = vec![]; - - for (downstream_id, downstream) in data.downstream.iter_mut() { - let downstream_messages = downstream.downstream_data.super_safe_lock(|data| { - let mut messages: Vec = vec![]; - if let Some(ref mut group_channel) = data.group_channels { - _ = group_channel.on_set_new_prev_hash(msg.clone().into_static()); - let group_channel_id = group_channel.get_group_channel_id(); - let activated_group_job_id = group_channel - .get_active_job() - .expect("active job must exist") - .get_job_id(); - - let set_new_prev_hash_message = SetNewPrevHashMp { - channel_id: group_channel_id, - job_id: activated_group_job_id, - prev_hash: msg.prev_hash.clone(), - min_ntime: msg.header_timestamp, - nbits: msg.n_bits, - }; - messages.push( - ( - *downstream_id, - Mining::SetNewPrevHash(set_new_prev_hash_message), - ) - .into(), - ); - } - - for (channel_id, standard_channel) in data.standard_channels.iter_mut() { - if let Err(e) = standard_channel.on_set_new_prev_hash(msg.clone().into_static()) { - tracing::error!("Error while adding new prev hash to standard channel: {channel_id:?} {e:?}"); - continue; - }; - - // did SetupConnection have the REQUIRES_STANDARD_JOBS flag set? - // if yes, there's no group channel, so we need to send the SetNewPrevHashMp - // to each standard channel - if data.group_channels.is_none() { - let activated_standard_job_id = standard_channel - .get_active_job() - .expect("active job must exist") - .get_job_id(); - let set_new_prev_hash_message = SetNewPrevHashMp { - channel_id: *channel_id, - job_id: activated_standard_job_id, - prev_hash: msg.prev_hash.clone(), - min_ntime: msg.header_timestamp, - nbits: msg.n_bits, - }; - messages.push( - ( - *downstream_id, - Mining::SetNewPrevHash(set_new_prev_hash_message), - ) - .into(), - ); - } - } - - for (channel_id, extended_channel) in data.extended_channels.iter_mut() { - if let Err(e) = extended_channel.on_set_new_prev_hash(msg.clone().into_static()) { - tracing::error!("Error while adding new prev hash to extended channel: {channel_id:?} {e:?}"); - continue; - }; - - // don't send any SetNewPrevHash messages to Extended Channels - // if the downstream requires custom work - if downstream.requires_custom_work.load(Ordering::SeqCst) { - continue; - } - - let activated_extended_job_id = extended_channel - .get_active_job() - .expect("active job must exist") - .get_job_id(); - let set_new_prev_hash_message = SetNewPrevHashMp { - channel_id: *channel_id, - job_id: activated_extended_job_id, - prev_hash: msg.prev_hash.clone(), - min_ntime: msg.header_timestamp, - nbits: msg.n_bits, - }; - messages.push( - ( - *downstream_id, - Mining::SetNewPrevHash(set_new_prev_hash_message), - ) - .into(), - ); - } - - messages - }); - - messages.extend(downstream_messages); - } - - messages - }); - - for message in messages { - message.forward(&self.channel_manager_channel).await; - } - - Ok(()) - } -} diff --git a/roles/pool/src/lib/config.rs b/roles/pool/src/lib/config.rs deleted file mode 100644 index 51e74317b4..0000000000 --- a/roles/pool/src/lib/config.rs +++ /dev/null @@ -1,199 +0,0 @@ -//! ## Configuration Module -//! -//! Defines [`PoolConfig`], the configuration structure for the Pool, along with its supporting -//! types. -//! -//! This module handles: -//! - Initializing [`PoolConfig`] -//! - Managing [`TemplateProviderConfig`], [`AuthorityConfig`], [`CoinbaseOutput`], and -//! [`ConnectionConfig`] -//! - Validating and converting coinbase outputs -use std::{ - net::SocketAddr, - path::{Path, PathBuf}, -}; - -use stratum_apps::{ - config_helpers::CoinbaseRewardScript, - key_utils::{Secp256k1PublicKey, Secp256k1SecretKey}, - stratum_core::bitcoin::{Amount, TxOut}, -}; - -/// Configuration for the Pool, including connection, authority, and coinbase settings. -#[derive(Clone, Debug, serde::Deserialize)] -pub struct PoolConfig { - listen_address: SocketAddr, - tp_address: String, - tp_authority_public_key: Option, - authority_public_key: Secp256k1PublicKey, - authority_secret_key: Secp256k1SecretKey, - cert_validity_sec: u64, - coinbase_reward_script: CoinbaseRewardScript, - pool_signature: String, - shares_per_minute: f32, - share_batch_size: usize, - log_file: Option, - server_id: u16, -} - -impl PoolConfig { - /// Creates a new instance of the [`PoolConfig`]. - /// - /// # Panics - /// - /// Panics if `coinbase_reward_script` is empty. - pub fn new( - pool_connection: ConnectionConfig, - template_provider: TemplateProviderConfig, - authority_config: AuthorityConfig, - coinbase_reward_script: CoinbaseRewardScript, - shares_per_minute: f32, - share_batch_size: usize, - server_id: u16, - ) -> Self { - Self { - listen_address: pool_connection.listen_address, - tp_address: template_provider.address, - tp_authority_public_key: template_provider.authority_public_key, - authority_public_key: authority_config.public_key, - authority_secret_key: authority_config.secret_key, - cert_validity_sec: pool_connection.cert_validity_sec, - coinbase_reward_script, - pool_signature: pool_connection.signature, - shares_per_minute, - share_batch_size, - log_file: None, - server_id, - } - } - - /// Returns the coinbase output. - pub fn coinbase_reward_script(&self) -> &CoinbaseRewardScript { - &self.coinbase_reward_script - } - - /// Returns Pool listenining address. - pub fn listen_address(&self) -> &SocketAddr { - &self.listen_address - } - - /// Returns the authority public key. - pub fn authority_public_key(&self) -> &Secp256k1PublicKey { - &self.authority_public_key - } - - /// Returns the authority secret key. - pub fn authority_secret_key(&self) -> &Secp256k1SecretKey { - &self.authority_secret_key - } - - /// Returns the certificate validity in seconds. - pub fn cert_validity_sec(&self) -> u64 { - self.cert_validity_sec - } - - /// Returns the Pool signature. - pub fn pool_signature(&self) -> &String { - &self.pool_signature - } - - /// Return the Template Provider authority public key. - pub fn tp_authority_public_key(&self) -> Option<&Secp256k1PublicKey> { - self.tp_authority_public_key.as_ref() - } - - /// Returns the Template Provider address. - pub fn tp_address(&self) -> &String { - &self.tp_address - } - - /// Returns the share batch size. - pub fn share_batch_size(&self) -> usize { - self.share_batch_size - } - - /// Sets the coinbase output. - pub fn set_coinbase_reward_script(&mut self, coinbase_output: CoinbaseRewardScript) { - self.coinbase_reward_script = coinbase_output; - } - - /// Returns the shares per minute. - pub fn shares_per_minute(&self) -> f32 { - self.shares_per_minute - } - - /// Change TP address. - pub fn set_tp_address(&mut self, tp_address: String) { - self.tp_address = tp_address; - } - - /// Sets the log directory. - pub fn set_log_dir(&mut self, log_dir: Option) { - if let Some(dir) = log_dir { - self.log_file = Some(dir); - } - } - /// Returns the log directory. - pub fn log_dir(&self) -> Option<&Path> { - self.log_file.as_deref() - } - - /// Returns the server id. - pub fn server_id(&self) -> u16 { - self.server_id - } - - pub fn get_txout(&self) -> TxOut { - TxOut { - value: Amount::from_sat(0), - script_pubkey: self.coinbase_reward_script.script_pubkey().to_owned(), - } - } -} - -/// Configuration for connecting to a Template Provider. -pub struct TemplateProviderConfig { - address: String, - authority_public_key: Option, -} - -impl TemplateProviderConfig { - pub fn new(address: String, authority_public_key: Option) -> Self { - Self { - address, - authority_public_key, - } - } -} - -/// Pool's authority public and secret keys. -pub struct AuthorityConfig { - pub public_key: Secp256k1PublicKey, - pub secret_key: Secp256k1SecretKey, -} - -impl AuthorityConfig { - pub fn new(public_key: Secp256k1PublicKey, secret_key: Secp256k1SecretKey) -> Self { - Self { - public_key, - secret_key, - } - } -} - -/// Connection settings for the Pool listener. -pub struct ConnectionConfig { - listen_address: SocketAddr, - cert_validity_sec: u64, - signature: String, -} - -impl ConnectionConfig { - pub fn new(listen_address: SocketAddr, cert_validity_sec: u64, signature: String) -> Self { - Self { - listen_address, - cert_validity_sec, - signature, - } - } -} diff --git a/roles/pool/src/lib/downstream/common_message_handler.rs b/roles/pool/src/lib/downstream/common_message_handler.rs deleted file mode 100644 index 028ef8bef8..0000000000 --- a/roles/pool/src/lib/downstream/common_message_handler.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::{downstream::Downstream, error::PoolError, utils::StdFrame}; -use std::{convert::TryInto, sync::atomic::Ordering}; -use stratum_apps::stratum_core::{ - common_messages_sv2::{ - has_requires_std_job, has_work_selection, SetupConnection, SetupConnectionSuccess, - }, - handlers_sv2::HandleCommonMessagesFromClientAsync, - parsers_sv2::AnyMessage, -}; -use tracing::info; - -impl HandleCommonMessagesFromClientAsync for Downstream { - type Error = PoolError; - - async fn handle_setup_connection( - &mut self, - _client_id: Option, - msg: SetupConnection<'_>, - ) -> Result<(), Self::Error> { - info!( - "Received `SetupConnection`: version={}, flags={:b}", - msg.min_version, msg.flags - ); - - self.requires_custom_work - .store(has_work_selection(msg.flags), Ordering::SeqCst); - self.requires_standard_jobs - .store(has_requires_std_job(msg.flags), Ordering::SeqCst); - - let response = SetupConnectionSuccess { - used_version: 2, - flags: msg.flags, - }; - let frame: StdFrame = AnyMessage::Common(response.into_static().into()).try_into()?; - self.downstream_channel - .downstream_sender - .send(frame) - .await?; - - Ok(()) - } -} diff --git a/roles/pool/src/lib/downstream/mod.rs b/roles/pool/src/lib/downstream/mod.rs deleted file mode 100644 index c16d06f87e..0000000000 --- a/roles/pool/src/lib/downstream/mod.rs +++ /dev/null @@ -1,283 +0,0 @@ -use std::{ - collections::HashMap, - sync::{ - atomic::{AtomicBool, AtomicUsize}, - Arc, - }, -}; - -use async_channel::{unbounded, Receiver, Sender}; -use stratum_apps::{ - custom_mutex::Mutex, - network_helpers::noise_stream::NoiseTcpStream, - stratum_core::{ - channels_sv2::server::{ - extended::ExtendedChannel, - group::GroupChannel, - jobs::{extended::ExtendedJob, job_store::DefaultJobStore, standard::StandardJob}, - standard::StandardChannel, - }, - common_messages_sv2::MESSAGE_TYPE_SETUP_CONNECTION, - handlers_sv2::HandleCommonMessagesFromClientAsync, - noise_sv2::Error, - parsers_sv2::{AnyMessage, Mining}, - }, -}; -use tokio::sync::broadcast; -use tracing::{debug, error, warn}; - -use crate::{ - error::{PoolError, PoolResult}, - status::{handle_error, Status, StatusSender}, - task_manager::TaskManager, - utils::{ - protocol_message_type, spawn_io_tasks, Message, MessageType, SV2Frame, ShutdownMessage, - StdFrame, - }, -}; - -mod common_message_handler; - -/// Holds state related to a downstream connection's mining channels. -/// -/// This includes: -/// - Whether the downstream requires a standard job (`require_std_job`). -/// - An optional [`GroupChannel`] if group channeling is used. -/// - Active [`ExtendedChannel`]s keyed by channel ID. -/// - Active [`StandardChannel`]s keyed by channel ID. -pub struct DownstreamData { - pub group_channels: Option>>>, - pub extended_channels: - HashMap>>>, - pub standard_channels: - HashMap>>>, - pub channel_id_factory: AtomicUsize, -} - -/// Communication layer for a downstream connection. -/// -/// Provides the messaging primitives for interacting with the -/// channel manager and the downstream peer: -/// - `channel_manager_sender`: sends frames to the channel manager. -/// - `channel_manager_receiver`: receives messages from the channel manager. -/// - `downstream_sender`: sends frames to the downstream. -/// - `downstream_receiver`: receives frames from the downstream. -#[derive(Clone)] -pub struct DownstreamChannel { - channel_manager_sender: Sender<(usize, Mining<'static>)>, - channel_manager_receiver: broadcast::Sender<(usize, Mining<'static>)>, - downstream_sender: Sender, - downstream_receiver: Receiver, -} - -/// Represents a downstream client connected to this node. -#[derive(Clone)] -pub struct Downstream { - pub downstream_data: Arc>, - downstream_channel: DownstreamChannel, - pub downstream_id: usize, - pub requires_standard_jobs: Arc, - pub requires_custom_work: Arc, -} - -impl Downstream { - /// Creates a new [`Downstream`] instance and spawns the necessary I/O tasks. - pub fn new( - downstream_id: usize, - channel_manager_sender: Sender<(usize, Mining<'static>)>, - channel_manager_receiver: broadcast::Sender<(usize, Mining<'static>)>, - noise_stream: NoiseTcpStream, - notify_shutdown: broadcast::Sender, - task_manager: Arc, - status_sender: Sender, - ) -> Self { - let (noise_stream_reader, noise_stream_writer) = noise_stream.into_split(); - let status_sender = StatusSender::Downstream { - downstream_id, - tx: status_sender, - }; - let (inbound_tx, inbound_rx) = unbounded::(); - let (outbound_tx, outbound_rx) = unbounded::(); - spawn_io_tasks( - task_manager, - noise_stream_reader, - noise_stream_writer, - outbound_rx, - inbound_tx, - notify_shutdown, - status_sender, - ); - - let downstream_channel = DownstreamChannel { - channel_manager_receiver, - channel_manager_sender, - downstream_sender: outbound_tx, - downstream_receiver: inbound_rx, - }; - let downstream_data = Arc::new(Mutex::new(DownstreamData { - extended_channels: HashMap::new(), - standard_channels: HashMap::new(), - group_channels: None, - channel_id_factory: AtomicUsize::new(1), - })); - Downstream { - downstream_channel, - downstream_data, - downstream_id, - requires_standard_jobs: Arc::new(AtomicBool::new(false)), - requires_custom_work: Arc::new(AtomicBool::new(false)), - } - } - - /// Starts the downstream loop. - /// - /// Responsibilities: - /// - Performs the initial `SetupConnection` handshake with the downstream. - /// - Forwards mining-related messages to the channel manager. - /// - Forwards channel manager messages back to the downstream peer. - pub async fn start( - mut self, - notify_shutdown: broadcast::Sender, - status_sender: Sender, - task_manager: Arc, - ) { - let status_sender = StatusSender::Downstream { - downstream_id: self.downstream_id, - tx: status_sender, - }; - - let mut shutdown_rx = notify_shutdown.subscribe(); - - // Setup initial connection - if let Err(e) = self.setup_connection_with_downstream().await { - error!(?e, "Failed to set up downstream connection"); - handle_error(&status_sender, e).await; - return; - } - - let mut receiver = self.downstream_channel.channel_manager_receiver.subscribe(); - task_manager.spawn(async move { - loop { - let self_clone_1 = self.clone(); - let downstream_id = self_clone_1.downstream_id; - let self_clone_2 = self.clone(); - tokio::select! { - message = shutdown_rx.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - debug!("Downstream {downstream_id}: Received global shutdown"); - break; - } - Ok(ShutdownMessage::DownstreamShutdown(id)) if downstream_id == id => { - debug!("Downstream {downstream_id}: Received downstream {id} shutdown"); - break; - } - _ => {} - } - } - res = self_clone_1.handle_downstream_mining_message() => { - if let Err(e) = res { - error!(?e, "Error handling downstream message for {downstream_id}"); - handle_error(&status_sender, e).await; - break; - } - } - res = self_clone_2.handle_channel_manager_message(&mut receiver) => { - if let Err(e) = res { - error!(?e, "Error handling channel manager message for {downstream_id}"); - handle_error(&status_sender, e).await; - break; - } - } - - } - } - warn!("Downstream: unified message loop exited."); - }); - } - - // Performs the initial handshake with a downstream peer. - async fn setup_connection_with_downstream(&mut self) -> PoolResult<()> { - let mut frame = self.downstream_channel.downstream_receiver.recv().await?; - - let Some(message_type) = frame.get_header().map(|m| m.msg_type()) else { - return Err(PoolError::UnexpectedMessage(0)); - }; - - // The first ever message received on a new downstream connection - // should always be a setup connection message. - if message_type == MESSAGE_TYPE_SETUP_CONNECTION { - self.handle_common_message_frame_from_client(None, message_type, frame.payload()) - .await?; - return Ok(()); - } - Err(PoolError::UnexpectedMessage(message_type)) - } - - // Handles messages sent from the channel manager to this downstream. - async fn handle_channel_manager_message( - self, - receiver: &mut broadcast::Receiver<(usize, Mining<'static>)>, - ) -> PoolResult<()> { - let (downstream_id, msg) = match receiver.recv().await { - Ok(msg) => msg, - Err(e) => { - warn!(?e, "Broadcast receive failed"); - return Ok(()); - } - }; - - if downstream_id != self.downstream_id { - debug!( - ?downstream_id, - "Message ignored for non-matching downstream" - ); - return Ok(()); - } - - let message = AnyMessage::Mining(msg); - let std_frame: StdFrame = message.try_into()?; - - self.downstream_channel - .downstream_sender - .send(std_frame) - .await - .map_err(|e| { - error!(?e, "Downstream send failed"); - PoolError::Noise(Error::ExpectedIncomingHandshakeMessage) - })?; - - Ok(()) - } - - // Handles incoming messages from the downstream peer. - async fn handle_downstream_mining_message(self) -> PoolResult<()> { - let mut sv2_frame = self.downstream_channel.downstream_receiver.recv().await?; - - let Some(message_type) = sv2_frame.get_header().map(|h| h.msg_type()) else { - return Ok(()); - }; - - if protocol_message_type(message_type) != MessageType::Mining { - warn!( - ?message_type, - "Received unsupported message type from downstream." - ); - return Ok(()); - } - - let mining = Mining::try_from((message_type, sv2_frame.payload()))?.into_static(); - - debug!("Received mining SV2 frame from downstream."); - self.downstream_channel - .channel_manager_sender - .send((self.downstream_id, mining)) - .await - .map_err(|e| { - error!(error=?e, "Failed to send mining message to channel manager."); - PoolError::ChannelErrorSender - })?; - - Ok(()) - } -} diff --git a/roles/pool/src/lib/error.rs b/roles/pool/src/lib/error.rs deleted file mode 100644 index 4c2b171e3c..0000000000 --- a/roles/pool/src/lib/error.rs +++ /dev/null @@ -1,267 +0,0 @@ -use std::{ - convert::From, - fmt::Debug, - sync::{MutexGuard, PoisonError}, -}; - -use stratum_apps::stratum_core::{ - binary_sv2, bitcoin, - channels_sv2::{ - server::{ - error::{ExtendedChannelError, GroupChannelError, StandardChannelError}, - share_accounting::ShareValidationError, - }, - vardiff::error::VardiffError, - }, - codec_sv2, framing_sv2, - handlers_sv2::HandlerErrorType, - mining_sv2::ExtendedExtranonceError, - noise_sv2, - parsers_sv2::{Mining, ParserError}, -}; - -pub type PoolResult = Result; - -#[derive(Debug)] -pub enum ChannelSv2Error { - ExtendedChannelServerSide(ExtendedChannelError), - StandardChannelServerSide(StandardChannelError), - GroupChannelServerSide(GroupChannelError), - ExtranonceError(ExtendedExtranonceError), - ShareValidationError(ShareValidationError), -} - -/// Represents various errors that can occur in the pool implementation. -#[derive(std::fmt::Debug)] -pub enum PoolError { - /// I/O-related error. - Io(std::io::Error), - ChannelSv2(ChannelSv2Error), - /// Error when sending a message through a channel. - ChannelSend(Box), - /// Error when receiving a message from an asynchronous channel. - ChannelRecv(async_channel::RecvError), - /// Error from the `binary_sv2` crate. - BinarySv2(binary_sv2::Error), - /// Error from the `codec_sv2` crate. - Codec(codec_sv2::Error), - /// Error related to parsing a coinbase output specification. - CoinbaseOutput(stratum_apps::config_helpers::CoinbaseOutputError), - /// Error from the `noise_sv2` crate. - Noise(noise_sv2::Error), - /// Error related to SV2 message framing. - Framing(framing_sv2::Error), - /// Error due to a poisoned lock, typically from a failed mutex operation. - PoisonLock(String), - /// Error indicating that a component has shut down unexpectedly. - ComponentShutdown(String), - /// Custom error message. - Custom(String), - /// Error related to the SV2 protocol, including an error code and a `Mining` message. - Sv2ProtocolError((u32, Mining<'static>)), - /// Vardiff Error - Vardiff(VardiffError), - /// Parser Error - Parser(ParserError), - /// Shutdown - Shutdown, - /// Unexpected message - UnexpectedMessage(u8), - /// Channel error sender - ChannelErrorSender, - /// Invalid socket address - InvalidSocketAddress(String), - /// Bitcoin Encode Error - BitcoinEncodeError(bitcoin::consensus::encode::Error), - /// Downstream not found for the channel - DownstreamNotFoundWithChannelId(u32), - /// Downstream not found - DownstreamNotFound(usize), - /// Downstream Id not found - DownstreamIdNotFound, - /// Future template not present - FutureTemplateNotPresent, - /// Last new prevhash not found - LastNewPrevhashNotFound, - /// Vardiff associated to channel not found - VardiffNotFound(u32), - /// Errors on bad `String` to `int` conversion. - ParseInt(std::num::ParseIntError), - /// Failed to create group channel - FailedToCreateGroupChannel(GroupChannelError), -} - -impl std::fmt::Display for PoolError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - use PoolError::*; - match self { - Io(e) => write!(f, "I/O error: `{e:?}"), - ChannelSend(e) => write!(f, "Channel send failed: `{e:?}`"), - ChannelRecv(e) => write!(f, "Channel recv failed: `{e:?}`"), - BinarySv2(e) => write!(f, "Binary SV2 error: `{e:?}`"), - Codec(e) => write!(f, "Codec SV2 error: `{e:?}"), - CoinbaseOutput(e) => write!(f, "Coinbase output error: `{e:?}"), - Framing(e) => write!(f, "Framing SV2 error: `{e:?}`"), - Noise(e) => write!(f, "Noise SV2 error: `{e:?}"), - PoisonLock(e) => write!(f, "Poison lock: {e:?}"), - ComponentShutdown(e) => write!(f, "Component shutdown: {e:?}"), - Custom(e) => write!(f, "Custom SV2 error: `{e:?}`"), - Sv2ProtocolError(e) => { - write!(f, "Received Sv2 Protocol Error from upstream: `{e:?}`") - } - PoolError::Vardiff(e) => { - write!(f, "Received Vardiff Error : {e:?}") - } - Parser(e) => write!(f, "Parser error: `{e:?}`"), - Shutdown => write!(f, "Shutdown"), - UnexpectedMessage(message_type) => write!(f, "message type: {message_type:?}"), - ChannelErrorSender => write!(f, "Channel sender error"), - InvalidSocketAddress(address) => write!(f, "Invalid socket address: {address:?}"), - BitcoinEncodeError(_) => write!(f, "Error generated during encoding"), - DownstreamNotFoundWithChannelId(channel_id) => { - write!(f, "Downstream not found for channel id: {channel_id}") - } - DownstreamNotFound(downstream_id) => write!( - f, - "Downstream not found with downstream id: {downstream_id}" - ), - DownstreamIdNotFound => write!(f, "Downstream id not found"), - FutureTemplateNotPresent => write!(f, "future template not present"), - LastNewPrevhashNotFound => write!(f, "last prev hash not present"), - VardiffNotFound(downstream_id) => write!( - f, - "Vardiff not found available for downstream id: {downstream_id}" - ), - ParseInt(e) => write!(f, "Conversion error: {e:?}"), - ChannelSv2(channel_error) => { - write!(f, "Channel error: {channel_error:?}") - } - FailedToCreateGroupChannel(ref e) => { - write!(f, "Failed to create group channel: {e:?}") - } - } - } -} - -impl From for PoolError { - fn from(e: std::io::Error) -> PoolError { - PoolError::Io(e) - } -} - -impl From for PoolError { - fn from(e: async_channel::RecvError) -> PoolError { - PoolError::ChannelRecv(e) - } -} - -impl From for PoolError { - fn from(e: binary_sv2::Error) -> PoolError { - PoolError::BinarySv2(e) - } -} - -impl From for PoolError { - fn from(e: codec_sv2::Error) -> PoolError { - PoolError::Codec(e) - } -} - -impl From for PoolError { - fn from(e: stratum_apps::config_helpers::CoinbaseOutputError) -> PoolError { - PoolError::CoinbaseOutput(e) - } -} - -impl From for PoolError { - fn from(e: noise_sv2::Error) -> PoolError { - PoolError::Noise(e) - } -} - -impl From> for PoolError { - fn from(e: async_channel::SendError) -> PoolError { - PoolError::ChannelSend(Box::new(e)) - } -} - -impl From for PoolError { - fn from(e: String) -> PoolError { - PoolError::Custom(e) - } -} -impl From for PoolError { - fn from(e: framing_sv2::Error) -> PoolError { - PoolError::Framing(e) - } -} - -impl From>> for PoolError { - fn from(e: PoisonError>) -> PoolError { - PoolError::PoisonLock(e.to_string()) - } -} - -impl From<(u32, Mining<'static>)> for PoolError { - fn from(e: (u32, Mining<'static>)) -> Self { - PoolError::Sv2ProtocolError(e) - } -} - -impl HandlerErrorType for PoolError { - fn parse_error(error: ParserError) -> Self { - PoolError::Parser(error) - } - - fn unexpected_message(message_type: u8) -> Self { - PoolError::UnexpectedMessage(message_type) - } -} - -impl From for PoolError { - fn from(value: stratum_apps::stratum_core::bitcoin::consensus::encode::Error) -> Self { - PoolError::BitcoinEncodeError(value) - } -} - -impl From for PoolError { - fn from(value: ExtendedChannelError) -> Self { - PoolError::ChannelSv2(ChannelSv2Error::ExtendedChannelServerSide(value)) - } -} - -impl From for PoolError { - fn from(value: StandardChannelError) -> Self { - PoolError::ChannelSv2(ChannelSv2Error::StandardChannelServerSide(value)) - } -} - -impl From for PoolError { - fn from(value: GroupChannelError) -> Self { - PoolError::ChannelSv2(ChannelSv2Error::GroupChannelServerSide(value)) - } -} - -impl From for PoolError { - fn from(value: ExtendedExtranonceError) -> Self { - PoolError::ChannelSv2(ChannelSv2Error::ExtranonceError(value)) - } -} - -impl From for PoolError { - fn from(value: VardiffError) -> Self { - PoolError::Vardiff(value) - } -} - -impl From for PoolError { - fn from(value: ParserError) -> Self { - PoolError::Parser(value) - } -} - -impl From for PoolError { - fn from(value: ShareValidationError) -> Self { - PoolError::ChannelSv2(ChannelSv2Error::ShareValidationError(value)) - } -} diff --git a/roles/pool/src/lib/mod.rs b/roles/pool/src/lib/mod.rs deleted file mode 100644 index df2381d661..0000000000 --- a/roles/pool/src/lib/mod.rs +++ /dev/null @@ -1,177 +0,0 @@ -use std::sync::Arc; - -use async_channel::unbounded; -use stratum_apps::stratum_core::{ - bitcoin::consensus::Encodable, parsers_sv2::TemplateDistribution, -}; -use tokio::sync::broadcast; -use tracing::{debug, info, warn}; - -use crate::{ - channel_manager::ChannelManager, - config::PoolConfig, - error::PoolResult, - status::{State, Status}, - task_manager::TaskManager, - template_receiver::TemplateReceiver, - utils::ShutdownMessage, -}; - -pub mod channel_manager; -pub mod config; -pub mod downstream; -pub mod error; -pub mod status; -pub mod task_manager; -pub mod template_receiver; -pub mod utils; - -#[derive(Debug, Clone)] -pub struct PoolSv2 { - config: PoolConfig, - notify_shutdown: broadcast::Sender, -} - -impl PoolSv2 { - pub fn new(config: PoolConfig) -> Self { - let (notify_shutdown, _) = tokio::sync::broadcast::channel::(100); - Self { - config, - notify_shutdown, - } - } - - /// Starts the Pool main loop. - pub async fn start(&self) -> PoolResult<()> { - let coinbase_outputs = vec![self.config.get_txout()]; - let mut encoded_outputs = vec![]; - - coinbase_outputs - .consensus_encode(&mut encoded_outputs) - .expect("Invalid coinbase output in config"); - - let notify_shutdown = self.notify_shutdown.clone(); - - let task_manager = Arc::new(TaskManager::new()); - - let (status_sender, status_receiver) = async_channel::unbounded::(); - - let (channel_manager_to_downstream_sender, _channel_manager_to_downstream_receiver) = - broadcast::channel(10); - let (downstream_to_channel_manager_sender, downstream_to_channel_manager_receiver) = - unbounded(); - - let (channel_manager_to_tp_sender, channel_manager_to_tp_receiver) = - unbounded::>(); - let (tp_to_channel_manager_sender, tp_to_channel_manager_receiver) = - unbounded::>(); - - debug!("Channels initialized."); - - let channel_manager = ChannelManager::new( - self.config.clone(), - channel_manager_to_tp_sender, - tp_to_channel_manager_receiver, - channel_manager_to_downstream_sender.clone(), - downstream_to_channel_manager_receiver, - encoded_outputs.clone(), - ) - .await?; - - let channel_manager_clone = channel_manager.clone(); - - // Initialize the template Receiver - let tp_address = self.config.tp_address().to_string(); - let tp_pubkey = self.config.tp_authority_public_key().copied(); - - let template_receiver = TemplateReceiver::new( - tp_address.clone(), - tp_pubkey, - channel_manager_to_tp_receiver, - tp_to_channel_manager_sender, - notify_shutdown.clone(), - task_manager.clone(), - status_sender.clone(), - ) - .await?; - - info!("Template provider setup done"); - - template_receiver - .start( - tp_address, - notify_shutdown.clone(), - status_sender.clone(), - task_manager.clone(), - encoded_outputs, - ) - .await?; - - channel_manager - .start( - notify_shutdown.clone(), - status_sender.clone(), - task_manager.clone(), - ) - .await?; - - channel_manager_clone - .start_downstream_server( - *self.config.authority_public_key(), - *self.config.authority_secret_key(), - self.config.cert_validity_sec(), - *self.config.listen_address(), - task_manager.clone(), - notify_shutdown.clone(), - status_sender, - downstream_to_channel_manager_sender, - channel_manager_to_downstream_sender, - ) - .await?; - - info!("Spawning status listener task..."); - loop { - tokio::select! { - _ = tokio::signal::ctrl_c() => { - info!("Ctrl+C received β€” initiating graceful shutdown..."); - let _ = notify_shutdown.send(ShutdownMessage::ShutdownAll); - break; - } - message = status_receiver.recv() => { - if let Ok(status) = message { - match status.state { - State::DownstreamShutdown{downstream_id,..} => { - warn!("Downstream {downstream_id:?} disconnected β€” Channel manager."); - let _ = notify_shutdown.send(ShutdownMessage::DownstreamShutdown(downstream_id)); - } - State::TemplateReceiverShutdown(_) => { - warn!("Template Receiver shutdown requested β€” initiating full shutdown."); - let _ = notify_shutdown.send(ShutdownMessage::ShutdownAll); - break; - } - State::ChannelManagerShutdown(_) => { - warn!("Channel Manager shutdown requested β€” initiating full shutdown."); - let _ = notify_shutdown.send(ShutdownMessage::ShutdownAll); - break; - } - } - } - } - } - } - - warn!("Graceful shutdown"); - task_manager.abort_all().await; - info!("Joining remaining tasks..."); - task_manager.join_all().await; - info!("Pool shutdown complete."); - Ok(()) - } -} - -impl Drop for PoolSv2 { - fn drop(&mut self) { - info!("PoolSv2 dropped"); - let _ = self.notify_shutdown.send(ShutdownMessage::ShutdownAll); - } -} diff --git a/roles/pool/src/lib/status.rs b/roles/pool/src/lib/status.rs deleted file mode 100644 index 95765e3ac3..0000000000 --- a/roles/pool/src/lib/status.rs +++ /dev/null @@ -1,124 +0,0 @@ -//! Status reporting and error propagation Utility. -//! -//! This module provides mechanisms for communicating shutdown events and -//! component state changes across the system. Each component (downstream, -//! upstream, job declarator, template receiver, channel manager) can send -//! and receive status updates via typed channels. Errors are automatically -//! converted into shutdown signals, allowing coordinated teardown of tasks. - -use tracing::{debug, error, warn}; - -use crate::error::PoolError; - -/// Sender type for propagating status updates from different system components. -#[derive(Debug, Clone)] -pub enum StatusSender { - /// Status updates from a specific downstream connection. - Downstream { - downstream_id: usize, - tx: async_channel::Sender, - }, - /// Status updates from the template receiver. - TemplateReceiver(async_channel::Sender), - /// Status updates from the channel manager. - ChannelManager(async_channel::Sender), -} - -/// High-level identifier of a component type that can send status updates. -#[derive(Debug, PartialEq, Eq)] -pub enum StatusType { - /// A downstream connection identified by its ID. - Downstream(usize), - /// The template receiver component. - TemplateReceiver, - /// The channel manager component. - ChannelManager, -} - -impl From<&StatusSender> for StatusType { - fn from(value: &StatusSender) -> Self { - match value { - StatusSender::ChannelManager(_) => StatusType::ChannelManager, - StatusSender::Downstream { - downstream_id, - tx: _, - } => StatusType::Downstream(*downstream_id), - StatusSender::TemplateReceiver(_) => StatusType::TemplateReceiver, - } - } -} - -impl StatusSender { - /// Sends a status update for the associated component. - pub async fn send(&self, status: Status) -> Result<(), async_channel::SendError> { - match self { - Self::Downstream { downstream_id, tx } => { - debug!( - "Sending status from Downstream [{}]: {:?}", - downstream_id, status.state - ); - tx.send(status).await - } - Self::TemplateReceiver(tx) => { - debug!("Sending status from TemplateReceiver: {:?}", status.state); - tx.send(status).await - } - Self::ChannelManager(tx) => { - debug!("Sending status from ChannelManager: {:?}", status.state); - tx.send(status).await - } - } - } -} - -/// Represents the state of a component, typically triggered by an error or shutdown event. -#[derive(Debug)] -pub enum State { - /// A downstream connection has shut down with a reason. - DownstreamShutdown { - downstream_id: usize, - reason: PoolError, - }, - /// Template receiver has shut down with a reason. - TemplateReceiverShutdown(PoolError), - /// Channel manager has shut down with a reason. - ChannelManagerShutdown(PoolError), -} - -/// Wrapper around a component’s state, sent as status updates across the system. -#[derive(Debug)] -pub struct Status { - /// The current state being reported. - pub state: State, -} - -/// Sends a shutdown status for the given component, logging the error cause. -async fn send_status(sender: &StatusSender, error: PoolError) { - let state = match sender { - StatusSender::Downstream { downstream_id, .. } => { - warn!("Downstream [{downstream_id}] shutting down due to error: {error:?}"); - State::DownstreamShutdown { - downstream_id: *downstream_id, - reason: error, - } - } - StatusSender::TemplateReceiver(_) => { - warn!("Template Receiver shutting down due to error: {error:?}"); - State::TemplateReceiverShutdown(error) - } - StatusSender::ChannelManager(_) => { - warn!("ChannelManager shutting down due to error: {error:?}"); - State::ChannelManagerShutdown(error) - } - }; - - if let Err(e) = sender.send(Status { state }).await { - tracing::error!("Failed to send status update from {sender:?}: {e:?}"); - } -} - -/// Logs an error and propagates a corresponding shutdown status for the component. -pub async fn handle_error(sender: &StatusSender, e: PoolError) { - error!("Error in {:?}: {:?}", sender, e); - send_status(sender, e).await; -} diff --git a/roles/pool/src/lib/task_manager.rs b/roles/pool/src/lib/task_manager.rs deleted file mode 100644 index 95435a020c..0000000000 --- a/roles/pool/src/lib/task_manager.rs +++ /dev/null @@ -1,72 +0,0 @@ -use std::sync::Mutex as StdMutex; -use tokio::task::JoinHandle; - -/// Manages a collection of spawned tokio tasks. -/// -/// This struct provides a centralized way to spawn, track, and manage the lifecycle -/// of async tasks. It maintains a list of join handles that can -/// be used to wait for all tasks to complete or abort them during shutdown. -pub struct TaskManager { - tasks: StdMutex>>, -} - -impl Default for TaskManager { - fn default() -> Self { - Self::new() - } -} - -impl TaskManager { - /// Creates a new TaskManager instance. - /// - /// Initializes an empty task manager ready to spawn and track tasks. - pub fn new() -> Self { - Self { - tasks: StdMutex::new(Vec::new()), - } - } - - /// Spawns a new async task and adds it to the managed collection. - /// - /// The task will be tracked by this manager and can be waited for or aborted - /// using the other methods. - /// - /// # Arguments - /// * `fut` - The future to spawn as a task - pub fn spawn(&self, fut: F) - where - F: std::future::Future + Send + 'static, - { - let handle = tokio::spawn(async move { - fut.await; - }); - self.tasks.lock().unwrap().push(handle); - } - - /// Waits for all managed tasks to complete. - /// - /// This method will block until all tasks that were spawned through this - /// manager have finished executing. Tasks are joined in reverse order - /// (most recently spawned first). - pub async fn join_all(&self) { - let handles = { - let mut tasks = self.tasks.lock().unwrap(); - std::mem::take(&mut *tasks) - }; - - for handle in handles { - let _ = handle.await; - } - } - - /// Aborts all managed tasks. - /// - /// This method immediately cancels all tasks that were spawned through this - /// manager. The tasks will be terminated without waiting for them to complete. - pub async fn abort_all(&self) { - let mut tasks = self.tasks.lock().unwrap(); - for handle in tasks.drain(..) { - handle.abort(); - } - } -} diff --git a/roles/pool/src/lib/template_receiver/common_message_handler.rs b/roles/pool/src/lib/template_receiver/common_message_handler.rs deleted file mode 100644 index c23e06422b..0000000000 --- a/roles/pool/src/lib/template_receiver/common_message_handler.rs +++ /dev/null @@ -1,58 +0,0 @@ -use stratum_apps::stratum_core::{ - common_messages_sv2::{ - ChannelEndpointChanged, Reconnect, SetupConnectionError, SetupConnectionSuccess, - }, - handlers_sv2::HandleCommonMessagesFromServerAsync, -}; -use tracing::{error, info}; - -use crate::{error::PoolError, template_receiver::TemplateReceiver}; - -impl HandleCommonMessagesFromServerAsync for TemplateReceiver { - type Error = PoolError; - - async fn handle_setup_connection_success( - &mut self, - _server_id: Option, - msg: SetupConnectionSuccess, - ) -> Result<(), Self::Error> { - info!( - "Received `SetupConnectionSuccess` from TP: version={}, flags={:b}", - msg.used_version, msg.flags - ); - Ok(()) - } - - async fn handle_channel_endpoint_changed( - &mut self, - _server_id: Option, - msg: ChannelEndpointChanged, - ) -> Result<(), Self::Error> { - info!( - "Received ChannelEndpointChanged with channel id: {}", - msg.channel_id - ); - Err(PoolError::Shutdown) - } - - async fn handle_reconnect( - &mut self, - _server_id: Option, - msg: Reconnect<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - Ok(()) - } - - async fn handle_setup_connection_error( - &mut self, - _server_id: Option, - msg: SetupConnectionError<'_>, - ) -> Result<(), Self::Error> { - error!( - "Received `SetupConnectionError` from TP with error code {}", - std::str::from_utf8(msg.error_code.as_ref()).unwrap_or("unknown error code") - ); - Err(PoolError::Shutdown) - } -} diff --git a/roles/pool/src/lib/template_receiver/mod.rs b/roles/pool/src/lib/template_receiver/mod.rs deleted file mode 100644 index 31697e7538..0000000000 --- a/roles/pool/src/lib/template_receiver/mod.rs +++ /dev/null @@ -1,377 +0,0 @@ -use std::{net::SocketAddr, sync::Arc}; -mod common_message_handler; -use async_channel::{unbounded, Receiver, Sender}; -use stratum_apps::{ - key_utils::Secp256k1PublicKey, - network_helpers::noise_stream::NoiseTcpStream, - stratum_core::{ - bitcoin::{ - self, absolute::LockTime, transaction::Version, OutPoint, ScriptBuf, Sequence, - Transaction, TxIn, TxOut, Witness, - }, - codec_sv2::HandshakeRole, - framing_sv2, - handlers_sv2::HandleCommonMessagesFromServerAsync, - noise_sv2::{Error, Initiator}, - parsers_sv2::{AnyMessage, TemplateDistribution}, - template_distribution_sv2::CoinbaseOutputConstraints, - }, -}; -use tokio::{net::TcpStream, sync::broadcast}; -use tracing::{debug, error, info, warn}; - -use crate::{ - error::{PoolError, PoolResult}, - status::{handle_error, Status, StatusSender}, - task_manager::TaskManager, - utils::{ - get_setup_connection_message_tp, protocol_message_type, spawn_io_tasks, Message, - MessageType, SV2Frame, ShutdownMessage, StdFrame, - }, -}; - -#[derive(Clone)] -pub struct TemplateReceiverChannel { - channel_manager_sender: Sender>, - channel_manager_receiver: Receiver>, - tp_sender: Sender, - tp_receiver: Receiver, -} - -#[derive(Clone)] -pub struct TemplateReceiver { - template_receiver_channel: TemplateReceiverChannel, -} - -impl TemplateReceiver { - /// Establish a new connection to a Template Provider. - /// - /// - Opens a TCP connection - /// - Performs Noise handshake - /// - Spawns IO tasks for inbound/outbound frames - /// - /// Retries up to 3 times before returning [`PoolError::Shutdown`]. - pub async fn new( - tp_address: String, - public_key: Option, - channel_manager_receiver: Receiver>, - channel_manager_sender: Sender>, - notify_shutdown: broadcast::Sender, - task_manager: Arc, - status_sender: Sender, - ) -> PoolResult { - const MAX_RETRIES: usize = 3; - - for attempt in 1..=MAX_RETRIES { - info!(attempt, MAX_RETRIES, "Connecting to template provider"); - - let initiator = match public_key { - Some(pub_key) => { - debug!(attempt, "Using public key for initiator handshake"); - Initiator::from_raw_k(pub_key.into_bytes()) - } - None => { - debug!(attempt, "Using anonymous initiator (no public key)"); - Initiator::without_pk() - } - }?; - - match TcpStream::connect(tp_address.as_str()).await { - Ok(stream) => { - info!( - attempt, - "TCP connection established, starting Noise handshake" - ); - - match NoiseTcpStream::::new( - stream, - HandshakeRole::Initiator(initiator), - ) - .await - { - Ok(noise_stream) => { - info!(attempt, "Noise handshake completed successfully"); - - let (noise_stream_reader, noise_stream_writer) = - noise_stream.into_split(); - - let status_sender = StatusSender::TemplateReceiver(status_sender); - let (inbound_tx, inbound_rx) = unbounded::(); - let (outbound_tx, outbound_rx) = unbounded::(); - - info!(attempt, "Spawning IO tasks for template receiver"); - spawn_io_tasks( - task_manager.clone(), - noise_stream_reader, - noise_stream_writer, - outbound_rx, - inbound_tx, - notify_shutdown, - status_sender, - ); - - let template_receiver_channel = TemplateReceiverChannel { - channel_manager_receiver, - channel_manager_sender, - tp_receiver: inbound_rx, - tp_sender: outbound_tx, - }; - - info!(attempt, "TemplateReceiver initialized successfully"); - return Ok(TemplateReceiver { - template_receiver_channel, - }); - } - Err(e) => { - error!(attempt, error = ?e, "Noise handshake failed"); - } - } - } - Err(e) => { - warn!(attempt, MAX_RETRIES, error = ?e, "Failed to connect to template provider"); - } - } - - if attempt < MAX_RETRIES { - debug!(attempt, "Retrying connection after backoff"); - tokio::time::sleep(std::time::Duration::from_secs(2)).await; - } - } - - error!("Exhausted all connection attempts, shutting down TemplateReceiver"); - Err(PoolError::Shutdown) - } - - /// Start unified message loop for TemplateReceiver. - /// - /// Responsibilities: - /// - Run handshake (`setup_connection`) - /// - Send [`CoinbaseOutputConstraints`] - /// - Handle: - /// - Messages from Template Provider - /// - Messages from ChannelManager - /// - Shutdown signals (upstream/job-declarator fallback) - pub async fn start( - mut self, - socket_address: String, - notify_shutdown: broadcast::Sender, - status_sender: Sender, - task_manager: Arc, - coinbase_outputs: Vec, - ) -> PoolResult<()> { - let status_sender = StatusSender::TemplateReceiver(status_sender); - let mut shutdown_rx = notify_shutdown.subscribe(); - - info!("Initialized state for starting template receiver"); - self.setup_connection(socket_address).await?; - - self.coinbase_constraints(coinbase_outputs).await?; - - info!("Setup Connection done. connection with template receiver is now done"); - task_manager.spawn( - async move { - loop { - let mut self_clone_1 = self.clone(); - let self_clone_2 = self.clone(); - tokio::select! { - message = shutdown_rx.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - info!("Template Receiver: received shutdown signal"); - break; - }, - Err(e) => { - warn!(error = ?e, "Template Receiver: shutdown channel closed unexpectedly"); - break; - } - _ => {} - } - } - res = self_clone_1.handle_template_provider_message() => { - if let Err(e) = res { - error!("TemplateReceiver template provider handler failed: {e:?}"); - handle_error(&status_sender, e).await; - break; - } - } - res = self_clone_2.handle_channel_manager_message() => { - if let Err(e) = res { - error!("TemplateReceiver channel manager handler failed: {e:?}"); - handle_error(&status_sender, e).await; - break; - } - }, - } - } - warn!("TemplateReceiver: unified message loop exited."); - }, - ); - Ok(()) - } - - /// Handle inbound messages from the template provider. - /// - /// Routes: - /// - `Common` messages β†’ handled locally - /// - `TemplateDistribution` messages β†’ forwarded to ChannelManager - /// - Unsupported messages β†’ logged and ignored - pub async fn handle_template_provider_message(&mut self) -> PoolResult<()> { - let mut sv2_frame = self.template_receiver_channel.tp_receiver.recv().await?; - debug!("Received SV2 frame from Template provider."); - let Some(message_type) = sv2_frame.get_header().map(|m| m.msg_type()) else { - return Ok(()); - }; - - match protocol_message_type(message_type) { - MessageType::Common => { - info!( - ?message_type, - "Handling common message from Template provider." - ); - - self.handle_common_message_frame_from_server( - None, - message_type, - sv2_frame.payload(), - ) - .await?; - } - MessageType::TemplateDistribution => { - let message = TemplateDistribution::try_from((message_type, sv2_frame.payload()))? - .into_static(); - - self.template_receiver_channel - .channel_manager_sender - .send(message) - .await - .map_err(|e| { - error!(error=?e, "Failed to send template distribution message to channel manager."); - PoolError::ChannelErrorSender - })?; - } - _ => { - warn!("Received unsupported message type from template provider: {message_type}"); - } - } - Ok(()) - } - - /// Handle messages from channel manager β†’ template provider. - /// - /// Forwards outbound frames upstream - pub async fn handle_channel_manager_message(&self) -> PoolResult<()> { - let msg = self - .template_receiver_channel - .channel_manager_receiver - .recv() - .await?; - let message = AnyMessage::TemplateDistribution(msg).into_static(); - let frame: StdFrame = message.try_into()?; - - debug!("Forwarding message from channel manager to outbound_tx"); - self.template_receiver_channel - .tp_sender - .send(frame) - .await - .map_err(|_| PoolError::ChannelErrorSender)?; - - Ok(()) - } - - /// Build and send [`CoinbaseOutputConstraints`] to the TP. - pub async fn coinbase_constraints(&mut self, coinbase_outputs: Vec) -> PoolResult<()> { - debug!( - "Deserializing coinbase outputs ({} bytes)", - coinbase_outputs.len() - ); - let outputs: Vec = bitcoin::consensus::deserialize(&coinbase_outputs)?; - - let max_size: u32 = outputs.iter().map(|o| o.size() as u32).sum(); - debug!( - max_size, - outputs_count = outputs.len(), - "Calculated max coinbase output size" - ); - - let dummy_coinbase = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { - previous_output: OutPoint::null(), - script_sig: ScriptBuf::new(), - sequence: Sequence::MAX, - witness: Witness::from(vec![vec![0; 32]]), - }], - output: outputs, - }; - - let max_sigops = dummy_coinbase.total_sigop_cost(|_| None) as u16; - debug!(max_sigops, "Calculated max sigops for coinbase"); - - let constraints = CoinbaseOutputConstraints { - coinbase_output_max_additional_size: max_size, - coinbase_output_max_additional_sigops: max_sigops, - }; - - let msg = AnyMessage::TemplateDistribution( - TemplateDistribution::CoinbaseOutputConstraints(constraints), - ); - - let frame: StdFrame = msg.try_into()?; - info!("Sending CoinbaseOutputConstraints message upstream"); - self.template_receiver_channel - .tp_sender - .send(frame) - .await - .map_err(|_| { - error!("Failed to send CoinbaseOutputConstraints message upstream"); - PoolError::ChannelErrorSender - })?; - - Ok(()) - } - - // Performs the initial handshake with Template Provider. - pub async fn setup_connection(&mut self, addr: String) -> PoolResult<()> { - let socket: SocketAddr = addr.parse().map_err(|_| { - error!(%addr, "Invalid socket address"); - PoolError::InvalidSocketAddress(addr.clone()) - })?; - - debug!(%socket, "Building SetupConnection message to the Template Provider"); - let setup_msg = get_setup_connection_message_tp(socket); - let frame: StdFrame = Message::Common(setup_msg.into()).try_into()?; - - info!("Sending SetupConnection message to the Template Provider"); - self.template_receiver_channel - .tp_sender - .send(frame) - .await - .map_err(|_| { - error!("Failed to send setup connection message upstream"); - PoolError::ChannelErrorSender - })?; - - info!("Waiting for upstream handshake response"); - let mut incoming: StdFrame = self - .template_receiver_channel - .tp_receiver - .recv() - .await - .map_err(|e| { - error!(?e, "Upstream connection closed during handshake"); - PoolError::Noise(Error::ExpectedIncomingHandshakeMessage) - })?; - - let msg_type = incoming - .get_header() - .ok_or(framing_sv2::Error::ExpectedHandshakeFrame)? - .msg_type(); - debug!(?msg_type, "Received upstream handshake response"); - - self.handle_common_message_frame_from_server(None, msg_type, incoming.payload()) - .await?; - info!("Handshake with upstream completed successfully"); - Ok(()) - } -} diff --git a/roles/pool/src/lib/utils.rs b/roles/pool/src/lib/utils.rs deleted file mode 100644 index f5136fbcec..0000000000 --- a/roles/pool/src/lib/utils.rs +++ /dev/null @@ -1,355 +0,0 @@ -use std::{net::SocketAddr, sync::Arc}; - -use async_channel::{Receiver, Sender}; -use stratum_apps::{ - network_helpers::noise_stream::{NoiseTcpReadHalf, NoiseTcpWriteHalf}, - stratum_core::{ - buffer_sv2, - codec_sv2::{StandardEitherFrame, StandardSv2Frame}, - common_messages_sv2::{ - Protocol, SetupConnection, MESSAGE_TYPE_CHANNEL_ENDPOINT_CHANGED, - MESSAGE_TYPE_RECONNECT, MESSAGE_TYPE_SETUP_CONNECTION, - MESSAGE_TYPE_SETUP_CONNECTION_ERROR, MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS, - }, - framing_sv2::framing::{Frame, Sv2Frame}, - job_declaration_sv2::{ - MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN, MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN_SUCCESS, - MESSAGE_TYPE_DECLARE_MINING_JOB, MESSAGE_TYPE_DECLARE_MINING_JOB_ERROR, - MESSAGE_TYPE_DECLARE_MINING_JOB_SUCCESS, MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS, - MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS_SUCCESS, MESSAGE_TYPE_PUSH_SOLUTION, - }, - mining_sv2::{ - MESSAGE_TYPE_CLOSE_CHANNEL, MESSAGE_TYPE_MINING_SET_NEW_PREV_HASH, - MESSAGE_TYPE_NEW_EXTENDED_MINING_JOB, MESSAGE_TYPE_NEW_MINING_JOB, - MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL, - MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL_SUCCESS, - MESSAGE_TYPE_OPEN_MINING_CHANNEL_ERROR, MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL, - MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL_SUCCESS, MESSAGE_TYPE_SET_CUSTOM_MINING_JOB, - MESSAGE_TYPE_SET_CUSTOM_MINING_JOB_ERROR, MESSAGE_TYPE_SET_CUSTOM_MINING_JOB_SUCCESS, - MESSAGE_TYPE_SET_EXTRANONCE_PREFIX, MESSAGE_TYPE_SET_GROUP_CHANNEL, - MESSAGE_TYPE_SET_TARGET, MESSAGE_TYPE_SUBMIT_SHARES_ERROR, - MESSAGE_TYPE_SUBMIT_SHARES_EXTENDED, MESSAGE_TYPE_SUBMIT_SHARES_STANDARD, - MESSAGE_TYPE_SUBMIT_SHARES_SUCCESS, MESSAGE_TYPE_UPDATE_CHANNEL, - MESSAGE_TYPE_UPDATE_CHANNEL_ERROR, - }, - parsers_sv2::AnyMessage, - template_distribution_sv2::{ - MESSAGE_TYPE_COINBASE_OUTPUT_CONSTRAINTS, MESSAGE_TYPE_NEW_TEMPLATE, - MESSAGE_TYPE_REQUEST_TRANSACTION_DATA, MESSAGE_TYPE_REQUEST_TRANSACTION_DATA_ERROR, - MESSAGE_TYPE_REQUEST_TRANSACTION_DATA_SUCCESS, MESSAGE_TYPE_SET_NEW_PREV_HASH, - MESSAGE_TYPE_SUBMIT_SOLUTION, - }, - }, -}; -use tokio::sync::broadcast; -use tracing::{error, trace, warn, Instrument}; - -use crate::{ - error::PoolResult, - status::{StatusSender, StatusType}, - task_manager::TaskManager, -}; - -pub type Message = AnyMessage<'static>; -pub type StdFrame = StandardSv2Frame; -pub type EitherFrame = StandardEitherFrame; -pub type SV2Frame = Sv2Frame; - -/// Represents a message that can trigger shutdown of various system components. -#[derive(Debug, Clone)] -pub enum ShutdownMessage { - /// Shutdown all components immediately - ShutdownAll, - /// Shutdown all downstream connections - DownstreamShutdownAll, - /// Shutdown a specific downstream connection by ID - DownstreamShutdown(usize), -} - -/// Constructs a `SetupConnection` message for the mining protocol. -#[allow(clippy::result_large_err)] -pub fn get_setup_connection_message( - min_version: u16, - max_version: u16, -) -> PoolResult> { - let endpoint_host = "0.0.0.0".to_string().into_bytes().try_into()?; - let vendor = String::new().try_into()?; - let hardware_version = String::new().try_into()?; - let firmware = String::new().try_into()?; - let device_id = String::new().try_into()?; - let flags = 0b0000_0000_0000_0000_0000_0000_0000_0110; - Ok(SetupConnection { - protocol: Protocol::MiningProtocol, - min_version, - max_version, - flags, - endpoint_host, - endpoint_port: 50, - vendor, - hardware_version, - firmware, - device_id, - }) -} - -/// Constructs a `SetupConnection` message for the Template Provider (TP). -pub fn get_setup_connection_message_tp(address: SocketAddr) -> SetupConnection<'static> { - let endpoint_host = address.ip().to_string().into_bytes().try_into().unwrap(); - let vendor = String::new().try_into().unwrap(); - let hardware_version = String::new().try_into().unwrap(); - let firmware = String::new().try_into().unwrap(); - let device_id = String::new().try_into().unwrap(); - SetupConnection { - protocol: Protocol::TemplateDistributionProtocol, - min_version: 2, - max_version: 2, - flags: 0b0000_0000_0000_0000_0000_0000_0000_0000, - endpoint_host, - endpoint_port: address.port(), - vendor, - hardware_version, - firmware, - device_id, - } -} - -/// Spawns async reader and writer tasks for handling framed I/O with shutdown support. -#[track_caller] -#[allow(clippy::too_many_arguments)] -pub fn spawn_io_tasks( - task_manager: Arc, - mut reader: NoiseTcpReadHalf, - mut writer: NoiseTcpWriteHalf, - outbound_rx: Receiver, - inbound_tx: Sender, - notify_shutdown: broadcast::Sender, - status_sender: StatusSender, -) { - let caller = std::panic::Location::caller(); - let inbound_tx_clone = inbound_tx.clone(); - let outbound_rx_clone = outbound_rx.clone(); - { - let mut shutdown_rx = notify_shutdown.subscribe(); - let status_sender = status_sender.clone(); - let status_type: StatusType = StatusType::from(&status_sender); - - task_manager.spawn(async move { - trace!("Reader task started"); - loop { - tokio::select! { - message = shutdown_rx.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - trace!("Received global shutdown"); - inbound_tx.close(); - break; - } - Ok(ShutdownMessage::DownstreamShutdown(down_id)) if matches!(status_type, StatusType::Downstream(id) if id == down_id) => { - trace!(down_id, "Received downstream shutdown"); - if status_type != StatusType::TemplateReceiver { - inbound_tx.close(); - break; - } - } - _ => {} - } - } - res = reader.read_frame() => { - match res { - Ok(frame) => { - match frame { - Frame::HandShake(frame) => { - error!(?frame, "Received handshake frame"); - drop(frame); - break; - }, - Frame::Sv2(sv2_frame) => { - trace!("Received inbound frame"); - if let Err(e) = inbound_tx.send(sv2_frame).await { - inbound_tx.close(); - error!(error=?e, "Failed to forward inbound frame"); - break; - } - }, - } - } - Err(e) => { - error!(error=?e, "Reader error"); - inbound_tx.close(); - break; - } - } - } - } - } - inbound_tx.close(); - outbound_rx_clone.close(); - drop(inbound_tx); - drop(outbound_rx_clone); - warn!("Reader task exited."); - }.instrument(tracing::trace_span!( - "reader_task", - spawned_at = %format!("{}:{}", caller.file(), caller.line()) - ))); - } - - { - let mut shutdown_rx = notify_shutdown.subscribe(); - let status_type: StatusType = StatusType::from(&status_sender); - - task_manager.spawn(async move { - trace!("Writer task started"); - loop { - tokio::select! { - message = shutdown_rx.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - trace!("Received global shutdown"); - outbound_rx.close(); - break; - } - Ok(ShutdownMessage::DownstreamShutdown(down_id)) if matches!(status_type, StatusType::Downstream(id) if id == down_id) => { - trace!(down_id, "Received downstream shutdown"); - if status_type != StatusType::TemplateReceiver { - outbound_rx.close(); - break; - } - } - _ => {} - } - } - res = outbound_rx.recv() => { - match res { - Ok(frame) => { - trace!("Sending outbound frame"); - if let Err(e) = writer.write_frame(frame.into()).await { - error!(error=?e, "Writer error"); - outbound_rx.close(); - break; - } - } - Err(_) => { - outbound_rx.close(); - warn!("Outbound channel closed"); - break; - } - } - } - } - } - outbound_rx.close(); - inbound_tx_clone.close(); - drop(outbound_rx); - drop(inbound_tx_clone); - warn!("Writer task exited."); - }.instrument(tracing::trace_span!( - "writer_task", - spawned_at = %format!("{}:{}", caller.file(), caller.line()) - ))); - } -} - -pub fn is_common_message(message_type: u8) -> bool { - matches!( - message_type, - MESSAGE_TYPE_SETUP_CONNECTION - | MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS - | MESSAGE_TYPE_SETUP_CONNECTION_ERROR - | MESSAGE_TYPE_CHANNEL_ENDPOINT_CHANGED - | MESSAGE_TYPE_RECONNECT - ) -} - -pub fn is_mining_message(message_type: u8) -> bool { - matches!( - message_type, - MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL - | MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL_SUCCESS - | MESSAGE_TYPE_OPEN_MINING_CHANNEL_ERROR - | MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL - | MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL_SUCCESS - | MESSAGE_TYPE_NEW_MINING_JOB - | MESSAGE_TYPE_UPDATE_CHANNEL - | MESSAGE_TYPE_UPDATE_CHANNEL_ERROR - | MESSAGE_TYPE_CLOSE_CHANNEL - | MESSAGE_TYPE_SET_EXTRANONCE_PREFIX - | MESSAGE_TYPE_SUBMIT_SHARES_STANDARD - | MESSAGE_TYPE_SUBMIT_SHARES_EXTENDED - | MESSAGE_TYPE_SUBMIT_SHARES_SUCCESS - | MESSAGE_TYPE_SUBMIT_SHARES_ERROR - // | MESSAGE_TYPE_RESERVED - | 0x1e - | MESSAGE_TYPE_NEW_EXTENDED_MINING_JOB - | MESSAGE_TYPE_MINING_SET_NEW_PREV_HASH - | MESSAGE_TYPE_SET_TARGET - | MESSAGE_TYPE_SET_CUSTOM_MINING_JOB - | MESSAGE_TYPE_SET_CUSTOM_MINING_JOB_SUCCESS - | MESSAGE_TYPE_SET_CUSTOM_MINING_JOB_ERROR - | MESSAGE_TYPE_SET_GROUP_CHANNEL - ) -} - -pub fn is_job_declaration_message(message_type: u8) -> bool { - matches!( - message_type, - MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN - | MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN_SUCCESS - | MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS - | MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS_SUCCESS - | MESSAGE_TYPE_DECLARE_MINING_JOB - | MESSAGE_TYPE_DECLARE_MINING_JOB_SUCCESS - | MESSAGE_TYPE_DECLARE_MINING_JOB_ERROR - | MESSAGE_TYPE_PUSH_SOLUTION - ) -} - -pub fn is_template_distribution_message(message_type: u8) -> bool { - matches!( - message_type, - MESSAGE_TYPE_COINBASE_OUTPUT_CONSTRAINTS - | MESSAGE_TYPE_NEW_TEMPLATE - | MESSAGE_TYPE_SET_NEW_PREV_HASH - | MESSAGE_TYPE_REQUEST_TRANSACTION_DATA - | MESSAGE_TYPE_REQUEST_TRANSACTION_DATA_SUCCESS - | MESSAGE_TYPE_REQUEST_TRANSACTION_DATA_ERROR - | MESSAGE_TYPE_SUBMIT_SOLUTION - ) -} - -#[derive(Debug, PartialEq, Eq)] -pub enum MessageType { - Common, - Mining, - JobDeclaration, - TemplateDistribution, - Unknown, -} - -pub fn protocol_message_type(message_type: u8) -> MessageType { - if is_common_message(message_type) { - MessageType::Common - } else if is_mining_message(message_type) { - MessageType::Mining - } else if is_job_declaration_message(message_type) { - MessageType::JobDeclaration - } else if is_template_distribution_message(message_type) { - MessageType::TemplateDistribution - } else { - MessageType::Unknown - } -} - -#[derive(Debug, PartialEq, Eq, Hash)] -pub struct VardiffKey { - pub downstream_id: usize, - pub channel_id: u32, -} - -impl From<(usize, u32)> for VardiffKey { - fn from(value: (usize, u32)) -> Self { - VardiffKey { - downstream_id: value.0, - channel_id: value.1, - } - } -} diff --git a/roles/pool/src/main.rs b/roles/pool/src/main.rs deleted file mode 100644 index 7020ef206a..0000000000 --- a/roles/pool/src/main.rs +++ /dev/null @@ -1,15 +0,0 @@ -use pool_sv2::PoolSv2; -use stratum_apps::config_helpers::logging::init_logging; - -use crate::args::process_cli_args; - -mod args; - -#[tokio::main] -async fn main() { - let config = process_cli_args(); - init_logging(config.log_dir()); - if let Err(e) = PoolSv2::new(config).start().await { - tracing::error!("Pool Error'ed out: {e}"); - }; -} diff --git a/roles/stratum-apps/Cargo.toml b/roles/stratum-apps/Cargo.toml deleted file mode 100644 index 8b6331991e..0000000000 --- a/roles/stratum-apps/Cargo.toml +++ /dev/null @@ -1,74 +0,0 @@ -[package] -name = "stratum-apps" -version = "0.1.0" -authors = ["The Stratum V2 Developers"] -edition = "2021" -readme = "README.md" -description = "Complete Stratum V2 application development kit - all utilities in one crate" -documentation = "https://docs.rs/stratum-apps" -license = "MIT OR Apache-2.0" -repository = "https://github.com/stratum-mining/stratum" -homepage = "https://stratumprotocol.org" -keywords = ["stratum", "mining", "bitcoin", "protocol", "sv2"] - -[dependencies] -# Core protocol layer -stratum-core = { path = "../../stratum-core", version = "0.1.0", optional = true} - -# External dependencies needed by the modules -# Network helpers dependencies -async-channel = { version = "1.8.0" } -tokio = { version = "1.44.1", features = ["full"] } -futures = { version = "0.3.28" } -tokio-util = { version = "0.7.10", default-features = false, features = ["codec"], optional = true } - -# Config helpers dependencies -serde = { version = "1.0.89", features = ["derive", "alloc"], default-features = false } -miniscript = { version = "12.3.4", default-features = false, features = ["no-std"] } -tracing-subscriber = { version = "0.3", features = ["env-filter"] } -tracing = { version = "0.1" } - -# Key utils dependencies -bs58 = { version = "0.4.0", default-features = false, features = ["check", "alloc"] } -secp256k1 = { version = "0.28.2", default-features = false, features = ["alloc", "rand"] } -rand = { version = "0.8.5", default-features = false } -rustversion = "1.0" -generic-array = "=0.14.7" - -# RPC optional dependencies -serde_json = { version = "1.0", default-features = false, features = ["alloc", "raw_value"], optional = true } -hex = { version = "0.4.3", optional = true } -base64 = { version = "0.21.5", optional = true } -hyper = { version = "1.1.0", features = ["full"], optional = true } -hyper-util = { version = "0.1", features = ["full"], optional = true } -http-body-util = { version = "0.1", optional = true } - -# Common external dependencies that roles always need -clap = { version = "4.5.39", features = ["derive"] } -ext-config = { version = "0.14.0", features = ["toml"], package = "config" } - -[features] -default = ["network", "config", "std"] - -# Core module features -network = ["tokio-util", "core"] -config = [] -rpc = ["serde_json", "hex", "base64", "hyper", "hyper-util", "http-body-util"] -std = ["bs58/std", "secp256k1/rand-std", "rand/std", "rand/std_rng"] -core = ["stratum-core"] - -# Protocol features passed through to stratum-core -sv1 = ["stratum-core/sv1", "stratum-core/translation", "tokio-util", "serde_json"] -with_buffer_pool = ["stratum-core/with_buffer_pool"] - -# Convenience feature bundles for different role types -pool = ["network", "config", "with_buffer_pool", "core"] -jd_client = ["network", "config", "with_buffer_pool", "core"] -# Note: jd_server intentionally excludes 'core', 'network', and 'rpc' - it uses crates.io crates directly -jd_server = ["config"] -translator = ["network", "config", "sv1", "with_buffer_pool", "core"] -# Note: mining_device intentionally excludes 'core', 'network', and 'rpc' - it uses crates.io crates directly -mining_device = ["config"] - -[package.metadata.docs.rs] -features = ["pool", "jd_client", "jd_server", "translator", "sv1", "rpc"] diff --git a/roles/stratum-apps/README.md b/roles/stratum-apps/README.md deleted file mode 100644 index 82b56c5399..0000000000 --- a/roles/stratum-apps/README.md +++ /dev/null @@ -1,92 +0,0 @@ -# Stratum Apps - -Complete Stratum V2 application development kit - all utilities in one crate. - -## Overview - -`stratum-apps` is a unified crate that provides all the utilities needed for building Stratum V2 applications. - -## Architecture - -This crate is organized into three main modules: - -- **`network_helpers`** - High-level networking utilities (from `network_helpers_sv2`) -- **`config_helpers`** - Configuration management helpers (from `config_helpers_sv2`) -- **`rpc`** - RPC utilities with custom serializable types (from `rpc_sv2`) - *feature-gated* - -The crate also re-exports `stratum-core`, the central hub for the Stratum V2 ecosystem that provides a cohesive API for all low-level protocol functionality. - -## Quick Start - -Add to your `Cargo.toml`: - -```toml -[dependencies] -stratum-apps = { version = "0.1.0", features = ["pool"] } -``` - -Basic usage: - -```rust -use stratum_apps::{network_helpers, config_helpers}; - -// For RPC functionality (when rpc feature is enabled) -#[cfg(feature = "rpc")] -use stratum_apps::rpc::{BlockHash, MiniRpcClient}; -``` - -## Features - -### Core Features -- `network` - Networking utilities (enabled by default) -- `config` - Configuration helpers (enabled by default) -- `rpc` - RPC utilities with custom serializable types (optional) - - Provides `Hash`, `BlockHash`, `Amount` types with proper JSON serialization - - `MiniRpcClient` for Bitcoin RPC communication - -### Protocol Features -- `sv1` - Enable SV1 protocol support (includes translation utilities) -- `with_buffer_pool` - Enable buffer pooling for better performance - -### Role-Specific Bundles -- `pool` - Everything needed for pool applications -- `jd_client` - Everything needed for JD client applications -- `jd_server` - Everything needed for JD server applications (includes RPC) -- `translator` - Everything needed for translator applications (includes SV1 + translation) -- `mining_device` - Everything needed for mining device applications - -## Usage Examples - -### Pool Application - -```toml -[dependencies] -stratum-apps = { version = "1.0", features = ["pool"] } -``` - -```rust -use stratum_apps::{network_helpers, config_helpers}; - -// Use networking -let connection = network_helpers::Connection::new(stream, HandshakeRole::Responder).await?; - -// Use configuration -let config: PoolConfig = config_helpers::parse_config("pool.toml")?; -``` - -### JD Server Application - -```toml -[dependencies] -stratum-apps = { version = "1.0", features = ["jd_server"] } -``` - -```rust -use stratum_apps::{network_helpers, config_helpers, rpc}; - -// RPC functionality with custom types -use stratum_apps::rpc::{BlockHash, MiniRpcClient}; - -// All networking and configuration utilities available -// Plus RPC server utilities with proper serialization -``` \ No newline at end of file diff --git a/roles/stratum-apps/src/config_helpers/coinbase_output/errors.rs b/roles/stratum-apps/src/config_helpers/coinbase_output/errors.rs deleted file mode 100644 index e3c1492b30..0000000000 --- a/roles/stratum-apps/src/config_helpers/coinbase_output/errors.rs +++ /dev/null @@ -1,65 +0,0 @@ -use core::fmt; - -use miniscript::bitcoin::{address, hex}; - -/// Error enum -#[derive(Debug)] -pub enum Error { - /// Error parsing a Bitcoin address - Address(address::ParseError), - // TODO rust-miniscript 13 will have functions to do these checks for us so we don't - // need to pollute our own error enum with this fiddly stuff - /// addr() descriptor did not have exactly 1 child - AddrDescriptorNChildren(usize), - /// raw() descriptor child did not have 0 children - AddrDescriptorGrandchild, - /// raw() descriptor did not have exactly 1 child - RawDescriptorNChildren(usize), - /// addr() descriptor child did not have 0 children - RawDescriptorGrandchild, - /// Error parsing a raw descriptor as hex. - Hex(hex::HexToBytesError), - /// Invalid `output_script_value` for script type. It must be a valid public key/script - InvalidOutputScript, - /// Unknown script type in config - UnknownOutputScriptType, - /// Error from the `miniscript` crate. - Miniscript(miniscript::Error), -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use Error::*; - match self { - Address(ref e) => write!(f, "Bitcoin address: {e}"), - AddrDescriptorNChildren(0) => write!(f, "Found addr() descriptor with no address"), - AddrDescriptorNChildren(n) => write!(f, "Found addr() descriptor with {n} children; must be exactly one valid address"), - AddrDescriptorGrandchild => write!(f, "Found descriptor of the form addr(X(y)); X must be a valid address and have no subexpression"), - RawDescriptorNChildren(0) => write!(f, "Found raw() descriptor with no hex-encoded script"), - RawDescriptorNChildren(n) => write!(f, "Found raw() descriptor with {n} children; must be exactly one hex-encoded script"), - RawDescriptorGrandchild => write!(f, "Found descriptor of the form raw(X(y)); X must be a hex-encoded script and have no subexpression"), - Hex(ref e) => write!(f, "Decoding hex-formatted script: {e}"), - UnknownOutputScriptType => write!(f, "Unknown script type in config"), - InvalidOutputScript => write!(f, "Invalid output_script_value for your script type. It must be a valid public key/script"), - Miniscript(ref e) => write!(f, "Miniscript: {e}"), - } - } -} - -impl From for Error { - fn from(e: address::ParseError) -> Self { - Error::Address(e) - } -} - -impl From for Error { - fn from(e: hex::HexToBytesError) -> Self { - Error::Hex(e) - } -} - -impl From for Error { - fn from(e: miniscript::Error) -> Self { - Error::Miniscript(e) - } -} diff --git a/roles/stratum-apps/src/config_helpers/coinbase_output/mod.rs b/roles/stratum-apps/src/config_helpers/coinbase_output/mod.rs deleted file mode 100644 index d4657a3530..0000000000 --- a/roles/stratum-apps/src/config_helpers/coinbase_output/mod.rs +++ /dev/null @@ -1,377 +0,0 @@ -mod errors; -mod serde_types; - -use miniscript::{ - bitcoin::{address::NetworkUnchecked, hex::FromHex as _, Address, Network, ScriptBuf}, - DefiniteDescriptorKey, Descriptor, -}; - -pub use errors::Error; - -/// Coinbase output transaction. -/// -/// Typically used for parsing coinbase outputs defined in SRI role configuration files. -#[derive(Debug, serde::Deserialize, Clone)] -#[serde(try_from = "serde_types::SerdeCoinbaseOutput")] -pub struct CoinbaseRewardScript { - script_pubkey: ScriptBuf, - ok_for_mainnet: bool, -} - -impl CoinbaseRewardScript { - /// Creates a new [`CoinbaseRewardScript`] from a descriptor string. - pub fn from_descriptor(mut s: &str) -> Result { - // Taproot descriptors cannot be parsed with `expression::Tree::from_str` and - // need special handling. So we special-case them early and just pass to - // rust-miniscript. In Miniscript 13 we will not need to do this. - if s.starts_with("tr") { - let desc = s.parse::>()?; - return Ok(Self { - script_pubkey: desc.script_pubkey(), - // Descriptors don't have a way to specify a network, so we assume - // they are OK to be used on mainnet. - ok_for_mainnet: true, - }); - } - - // Manually verify the checksum. FIXME in Miniscript 13 we will not need - // to do this, since `expression::Tree::from_str` will do the checksum - // validation for us. (And yield a much less horrible error type.) - if let Some((desc_str, checksum_str)) = s.rsplit_once('#') { - let expected_sum = miniscript::descriptor::checksum::desc_checksum(desc_str)?; - if checksum_str != expected_sum { - return Err(miniscript::Error::BadDescriptor(format!( - "Invalid checksum '{checksum_str}', expected '{expected_sum}'" - )) - .into()); - } - s = desc_str; - } - - let tree = miniscript::expression::Tree::from_str(s)?; - match tree.name { - "addr" => { - // In rust-miniscript 13 these can be replaced with a call to - // TreeIterItem::verify_toplevel which will these checks for us - // in a uniform way. - if tree.args.len() != 1 { - return Err(Error::AddrDescriptorNChildren(tree.args.len())); - } - if !tree.args[0].args.is_empty() { - return Err(Error::AddrDescriptorGrandchild); - } - - let addr = tree.args[0].name.parse::>()?; - Ok(Self { - script_pubkey: addr.assume_checked_ref().script_pubkey(), - ok_for_mainnet: addr.is_valid_for_network(Network::Bitcoin), - }) - } - "raw" => { - // In rust-miniscript 13 these can be replaced with a call to - // TreeIterItem::verify_toplevel which will these checks for us - // in a uniform way. - if tree.args.len() != 1 { - return Err(Error::RawDescriptorNChildren(tree.args.len())); - } - if !tree.args[0].args.is_empty() { - return Err(Error::RawDescriptorGrandchild); - } - - let bytes = Vec::::from_hex(tree.args[0].name)?; - Ok(Self { - script_pubkey: ScriptBuf::from(bytes), - // Users of hex scriptpubkeys are on their own. - ok_for_mainnet: true, - }) - } - _ => { - let desc = s.parse::>()?; - Ok(Self { - script_pubkey: desc.script_pubkey(), - // Descriptors don't have a way to specify a network, so we assume - // they are OK to be used on mainnet. - ok_for_mainnet: true, - }) - } - } - } - - /// Whether this coinbase output is okay for use on mainnet. - /// - /// This is a "best effort" check and currently only returns false if the user - /// provides an addr() descriptor in which they specified a testnet or regtest - /// address. - pub fn ok_for_mainnet(&self) -> bool { - self.ok_for_mainnet - } - - /// The `scriptPubKey` associated with the coinbase output - pub fn script_pubkey(&self) -> ScriptBuf { - self.script_pubkey.clone() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn fixed_vector_addr() { - // Valid - assert_eq!( - CoinbaseRewardScript::from_descriptor( - "addr(1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)#wdnlkpe8" - ) - .unwrap() - .script_pubkey() - .to_hex_string(), - "76a91477bff20c60e522dfaa3350c39b030a5d004e839a88ac", - ); - assert_eq!( - CoinbaseRewardScript::from_descriptor( - "addr(3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy)#rsjl0crt" - ) - .unwrap() - .script_pubkey() - .to_hex_string(), - "a914b472a266d0bd89c13706a4132ccfb16f7c3b9fcb87", - ); - assert_eq!( - CoinbaseRewardScript::from_descriptor( - "addr(bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t4)#uyjndxcw" - ) - .unwrap() - .script_pubkey() - .to_hex_string(), - "0014751e76e8199196d454941c45d1b3a323f1433bd6", - ); - assert_eq!( - CoinbaseRewardScript::from_descriptor( - "addr(bc1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qccfmv3)#8kzm8txf" - ) - .unwrap() - .script_pubkey() - .to_hex_string(), - "00201863143c14c5166804bd19203356da136c985678cd4d27a1b8c6329604903262", - ); - // no checksum is ok - assert_eq!( - CoinbaseRewardScript::from_descriptor("addr(1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)") - .unwrap() - .script_pubkey() - .to_hex_string(), - "76a91477bff20c60e522dfaa3350c39b030a5d004e839a88ac", - ); - assert_eq!( - CoinbaseRewardScript::from_descriptor("addr(1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2,)") - .unwrap_err() - .to_string(), - "Found addr() descriptor with 2 children; must be exactly one valid address", - ); - - // Invalid - // But empty checksum is not (in Miniscript 13 these error messages will be cleaner) - assert_eq!( - CoinbaseRewardScript::from_descriptor("addr(1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)#") - .unwrap_err() - .to_string(), - "Miniscript: Invalid descriptor: Invalid checksum '', expected 'wdnlkpe8'", - ); - assert_eq!( - CoinbaseRewardScript::from_descriptor( - "addr(1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)#wdnlkpe7" - ) - .unwrap_err() - .to_string(), - "Miniscript: Invalid descriptor: Invalid checksum 'wdnlkpe7', expected 'wdnlkpe8'", - ); - // Bad base58ck checksum even though the descriptor checksum is OK. Note that rust-bitcoin - // 0.32 interprets bad bech32 checksums as "base58 errors" because it doesn't know - // what encoding an invalid string is supposed to have. See https://github.com/rust-bitcoin/rust-bitcoin/issues/3044 - // Expected error: "Bitcoin address: base58 error: incorrect checksum: base58 checksum - // 0x6c7615f4 does not match expected 0x6b7615f4" (hex-conservative v0.3.0) - // or "Bitcoin address: base58 error" (hex-conservative v0.2.1) - assert!(CoinbaseRewardScript::from_descriptor( - "addr(1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN3)#5v55uzec" - ) - .is_err()); - // Expected error: "Bitcoin address: base58 error: decode: invalid base58 character 0x30" - // (hex-conservative v0.3.0) or "Bitcoin address: base58 error" (hex-conservative - // v0.2.1) - assert!(CoinbaseRewardScript::from_descriptor( - "addr(bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t3)#wfr7lfxf" - ) - .is_err()); - // Flagrantly bad stuff -- should probably PR these upstream to rust-miniscript. - // Expected error: "Bitcoin address: base58 error: too short: base58 decoded data was not - // long enough, must be at least 4 byte: 0" (hex-conservative v0.3.0) or "Bitcoin - // address: base58 error" (hex-conservative v0.2.1) - assert!(CoinbaseRewardScript::from_descriptor("addr()").is_err()); - assert_eq!( - CoinbaseRewardScript::from_descriptor("addr(It's a mad mad world!?! πŸ™ƒ)") - .unwrap_err() - .to_string(), - "Miniscript: unprintable character 0xf0", - ); - // This error is just wrong lol. Fixed in Miniscript 13. - assert_eq!( - CoinbaseRewardScript::from_descriptor("addr(It's a mad mad world!?! πŸ™ƒ)#abcdefg") - .unwrap_err() - .to_string(), - "Miniscript: Invalid descriptor: Invalid character in checksum: 'πŸ™ƒ'", - ); - // Expected error: "Bitcoin address: base58 error: decode: invalid base58 character 0x49" - // (hex-conservative v0.3.0) or "Bitcoin address: base58 error" (hex-conservative - // v0.2.1) - assert!( - CoinbaseRewardScript::from_descriptor("addr(It's a mad mad world!?!)#hmeprl29") - .is_err() - ); - assert_eq!( - CoinbaseRewardScript::from_descriptor("addr(It's a mad mad world!?!)#πŸ™ƒπŸ™ƒπŸ™ƒπŸ™ƒπŸ™ƒπŸ™ƒ") - .unwrap_err() - .to_string(), - "Miniscript: Invalid descriptor: Invalid checksum 'πŸ™ƒπŸ™ƒπŸ™ƒπŸ™ƒπŸ™ƒπŸ™ƒ', expected 'hmeprl29'", - ); - } - - #[test] - fn fixed_vector_combo() { - // We do not support combo descriptors. Nobody should. - assert_eq!( - CoinbaseRewardScript::from_descriptor( - "combo(0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798)" - ) - .unwrap_err() - .to_string(), - "Miniscript: unexpected Β«combo(1 args) while parsing MiniscriptΒ»" - ); - } - - #[test] - fn fixed_vector_musig() { - // We do not support musig descriptors. One day. - assert_eq!( - CoinbaseRewardScript::from_descriptor("musig(0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798,03fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556)").unwrap_err().to_string(), - "Miniscript: unexpected Β«musig(2 args) while parsing MiniscriptΒ»" - ); - assert_eq!( - CoinbaseRewardScript::from_descriptor("tr(musig(0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798,03fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556))").unwrap_err().to_string(), - "Miniscript: expected )", - ); - } - - #[test] - fn fixed_vector_raw() { - // Empty raw descriptors are OK; correspond to the empty script. - assert_eq!( - CoinbaseRewardScript::from_descriptor("raw()") - .unwrap() - .script_pubkey() - .to_hex_string(), - "", - ); - assert_eq!( - CoinbaseRewardScript::from_descriptor("raw(deadbeef)") - .unwrap() - .script_pubkey() - .to_hex_string(), - "deadbeef", - ); - assert_eq!( - CoinbaseRewardScript::from_descriptor("raw(DEADBEEF)") - .unwrap() - .script_pubkey() - .to_hex_string(), - "deadbeef", - ); - // Should we allow this? We do, so I guess we should test it and make sure we don't stop.. - assert_eq!( - CoinbaseRewardScript::from_descriptor("raw(DEADbeef)") - .unwrap() - .script_pubkey() - .to_hex_string(), - "deadbeef", - ); - // Expected error: "Decoding hex-formatted script: odd length, failed to create bytes from - // hex: odd hex string length 1" (hex-conservative v0.3.0) or "Decoding - // hex-formatted script: odd length, failed to create bytes from hex" (hex-conservative - // v0.2.1) - assert!(CoinbaseRewardScript::from_descriptor("raw(0)").is_err()); - assert_eq!( - CoinbaseRewardScript::from_descriptor("raw(0,1)") - .unwrap_err() - .to_string(), - "Found raw() descriptor with 2 children; must be exactly one hex-encoded script", - ); - } - - #[test] - fn fixed_vector_miniscript() { - assert_eq!( - CoinbaseRewardScript::from_descriptor("sh(wsh(multi(2,0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798,03fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556)))#qpcmf2lu").unwrap().script_pubkey().to_hex_string(), - "a9141cb55de50b72c67709ab16307d69557e6bb1a98787", - ); - assert_eq!( - CoinbaseRewardScript::from_descriptor( - "tr(0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798)" - ) - .unwrap() - .script_pubkey() - .to_hex_string(), - "5120da4710964f7852695de2da025290e24af6d8c281de5a0b902b7135fd9fd74d21", - ); - assert_eq!( - CoinbaseRewardScript::from_descriptor("tr(0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798,{pk(03fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556),{multi_a(2,026a245bf6dc698504c89a20cfded60853152b695336c28063b61c65cbd269e6b4,0231ecbfac95d972f0b8f81ec6e01e9c621d91a4b48d5f9d12d7e95febe9f34d64),multi_a(2,026a245bf6dc698504c89a20cfded60853152b695336c28063b61c65cbd269e6b4,0231ecbfac95d972f0b8f81ec6e01e9c621d91a4b48d5f9d12d7e95febe9f34d64)}})") - .unwrap() - .script_pubkey() - .to_hex_string(), - "5120493bdae0d225af5cb88c4cb2a1e1e89e391153ba7699c91ebee2fd082ed1636c", - ); - } - - #[test] - fn fixed_vector_keys() { - // xpub - assert_eq!( - CoinbaseRewardScript::from_descriptor("pkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8)").unwrap().script_pubkey().to_hex_string(), - "76a9143442193e1bb70916e914552172cd4e2dbc9df81188ac", - ); - // xpub with non-hardened path - assert_eq!( - CoinbaseRewardScript::from_descriptor("pkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/1/2/3)").unwrap().script_pubkey().to_hex_string(), - "76a914f2d2e1401c88353c2298d1a928d4ed827ff46ff688ac", - ); - // xpub with hardened path (not allowed) - assert_eq!( - CoinbaseRewardScript::from_descriptor("pkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/1'/2/3)").unwrap_err().to_string(), - "Miniscript: unexpected Β«cannot parse multi-path keys, keys with a wildcard or keys with hardened derivation steps as a DerivedDescriptorKeyΒ»", - ); - // no wildcards allowed (at least for now; gmax thinks it would be cool if we would - // instantiate it with the blockheight or something, but need to work out UX) - assert_eq!( - CoinbaseRewardScript::from_descriptor("pkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/*)").unwrap_err().to_string(), - "Miniscript: unexpected Β«cannot parse multi-path keys, keys with a wildcard or keys with hardened derivation steps as a DerivedDescriptorKeyΒ»", - ); - // No multipath descriptors allowed; this is not a wallet with change - assert_eq!( - CoinbaseRewardScript::from_descriptor("pkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/<0;1>)").unwrap_err().to_string(), - "Miniscript: unexpected Β«cannot parse multi-path keys, keys with a wildcard or keys with hardened derivation steps as a DerivedDescriptorKeyΒ»", - ); - // Private keys are not allowed, or xprvs. - assert_eq!( - CoinbaseRewardScript::from_descriptor( - "pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)" - ) - .unwrap_err() - .to_string(), - "Miniscript: unexpected Β«Key too short (<66 char), doesn't match any formatΒ»", - ); - // This is a confusing error message which should be fixed in Miniscript 13. - assert_eq!( - CoinbaseRewardScript::from_descriptor("pkh(xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi)").unwrap_err().to_string(), - "Miniscript: unexpected Β«Public keys must be 64/66/130 characters in sizeΒ»", - ); - } -} diff --git a/roles/stratum-apps/src/config_helpers/coinbase_output/serde_types.rs b/roles/stratum-apps/src/config_helpers/coinbase_output/serde_types.rs deleted file mode 100644 index d0399fef92..0000000000 --- a/roles/stratum-apps/src/config_helpers/coinbase_output/serde_types.rs +++ /dev/null @@ -1,136 +0,0 @@ -use core::convert::TryFrom; -use miniscript::bitcoin::{ - secp256k1::{All, Secp256k1}, - PublicKey, ScriptBuf, ScriptHash, WScriptHash, XOnlyPublicKey, -}; - -use super::Error; - -#[derive(serde::Deserialize)] -pub(super) struct LegacyCoinbaseOutput { - /// Specifies type of the script used in the output. - /// - /// Supported values include: - /// - `"P2PK"`: Pay-to-Public-Key - /// - `"P2PKH"`: Pay-to-Public-Key-Hash - /// - `"P2SH"`: Pay-to-Script-Hash - /// - `"P2WPKH"`: Pay-to-Witness-Public-Key-Hash:w - - /// - `"P2WSH"`: Pay-to-Witness-Script-Hash - /// - `"P2TR"`: Pay-to-Taproot - pub(super) output_script_type: String, - - /// Value associated with the script, typically a public key or script hash. - /// - /// This field's interpretation depends on the `output_script_type`: - /// - For `"P2PK"`: The raw public key. - /// - For `"P2PKH"`: A public key hash. - /// - For `"P2WPKH"`: A witness public key hash. - /// - For `"P2SH"`: A script hash. - /// - For `"P2WSH"`: A witness script hash. - /// - For `"P2TR"`: An x-only public key. - pub(super) output_script_value: String, -} - -impl TryFrom for super::CoinbaseRewardScript { - type Error = super::Error; - fn try_from(value: LegacyCoinbaseOutput) -> Result { - let script_pubkey = match value.output_script_type.as_str() { - "TEST" => { - let pub_key_hash = value - .output_script_value - .parse::() - .map_err(|_| Error::InvalidOutputScript)? - .pubkey_hash(); - ScriptBuf::new_p2pkh(&pub_key_hash) - } - "P2PK" => { - let pub_key = value - .output_script_value - .parse::() - .map_err(|_| Error::InvalidOutputScript)?; - ScriptBuf::new_p2pk(&pub_key) - } - "P2PKH" => { - let pub_key_hash = value - .output_script_value - .parse::() - .map_err(|_| Error::InvalidOutputScript)? - .pubkey_hash(); - ScriptBuf::new_p2pkh(&pub_key_hash) - } - "P2WPKH" => { - let w_pub_key_hash = value - .output_script_value - .parse::() - .map_err(|_| Error::InvalidOutputScript)? - .wpubkey_hash() - .unwrap(); - ScriptBuf::new_p2wpkh(&w_pub_key_hash) - } - "P2SH" => { - let script_hashed = value - .output_script_value - .parse::() - .map_err(|_| Error::InvalidOutputScript)?; - ScriptBuf::new_p2sh(&script_hashed) - } - "P2WSH" => { - let w_script_hashed = value - .output_script_value - .parse::() - .map_err(|_| Error::InvalidOutputScript)?; - ScriptBuf::new_p2wsh(&w_script_hashed) - } - "P2TR" => { - // From the bip - // - // Conceptually, every Taproot output corresponds to a combination of - // a single public key condition (the internal key), - // and zero or more general conditions encoded in scripts organized in a tree. - let pub_key = value - .output_script_value - .parse::() - .map_err(|_| Error::InvalidOutputScript)?; - ScriptBuf::new_p2tr::(&Secp256k1::::new(), pub_key, None) - } - _ => return Err(Error::UnknownOutputScriptType), - }; - Ok(Self { - script_pubkey, - // legacy encoding gives no way to specify testnet or mainnet - ok_for_mainnet: true, - }) - } -} - -/// A coinbase output script as it appears in a configuration file. -/// -/// Private to avoid exposing the enum constructors. -#[derive(serde::Deserialize)] -#[serde(untagged)] // decode as whichever variant makes sense for the input -enum SerdeCoinbaseOutputInner { - Legacy(LegacyCoinbaseOutput), - Descriptor(String), -} - -/// A structure representing a coinbase output script as it appears in a -/// configuration file. -/// -/// Can only be constructed via serde, and supports no operations except conversion -/// to a [`super::CoinbaseOutput`] via [`TryFrom`]. -#[derive(serde::Deserialize)] -#[serde(transparent)] -pub struct SerdeCoinbaseOutput { - inner: SerdeCoinbaseOutputInner, -} - -impl TryFrom for super::CoinbaseRewardScript { - type Error = super::Error; - fn try_from(value: SerdeCoinbaseOutput) -> Result { - match value.inner { - SerdeCoinbaseOutputInner::Legacy(legacy) => Self::try_from(legacy), - SerdeCoinbaseOutputInner::Descriptor(ref s) => Self::from_descriptor(s), - } - } -} diff --git a/roles/stratum-apps/src/config_helpers/logging.rs b/roles/stratum-apps/src/config_helpers/logging.rs deleted file mode 100644 index 4273c8c602..0000000000 --- a/roles/stratum-apps/src/config_helpers/logging.rs +++ /dev/null @@ -1,49 +0,0 @@ -use std::{ - fs::OpenOptions, - io::{self, IsTerminal}, - path::Path, - str::FromStr, -}; -use tracing::level_filters::LevelFilter; -use tracing_subscriber::{fmt, prelude::*, EnvFilter, Registry}; - -/// Initialize logging to stdout and optionally to a file. -/// -/// If `log_file` is Some, logs will be written to both stdout and the file. -/// If `log_level` is not provided or is invalid, it defaults to "info". -pub fn init_logging(log_file: Option<&Path>) { - let rust_log = std::env::var("RUST_LOG").unwrap_or_else(|_| "info".to_string()); - let log_level_filter = LevelFilter::from_str(&rust_log).unwrap_or(LevelFilter::INFO); - let env_filter = EnvFilter::new(log_level_filter.to_string()); - let stdout_layer = fmt::layer() - .with_writer(io::stdout) - .with_ansi(io::stdout().is_terminal()); - - let subscriber: Box = match log_file { - Some(path) => { - // Log to both file and stdout - let path = path.to_owned(); - let file_layer = fmt::layer() - .with_writer(move || { - OpenOptions::new() - .create(true) - .append(true) - .open(&path) - .expect("Failed to open log file") - }) - .with_ansi(false); - Box::new( - Registry::default() - .with(env_filter) - .with(stdout_layer) - .with(file_layer), - ) - } - None => { - // Log only to stdout - Box::new(Registry::default().with(env_filter).with(stdout_layer)) - } - }; - - tracing::subscriber::set_global_default(subscriber).expect("Failed to set global subscriber"); -} diff --git a/roles/stratum-apps/src/config_helpers/mod.rs b/roles/stratum-apps/src/config_helpers/mod.rs deleted file mode 100644 index 1f051d9460..0000000000 --- a/roles/stratum-apps/src/config_helpers/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -//! Configuration management helpers for SV2 applications -//! -//! This module provides utilities for: -//! - Parsing configuration files (TOML, etc.) -//! - Handling coinbase output specifications -//! - Setting up logging and tracing -//! -//! Originally from the `config_helpers_sv2` crate. - -mod coinbase_output; -pub use coinbase_output::{CoinbaseRewardScript, Error as CoinbaseOutputError}; - -pub mod logging; - -mod toml; -pub use toml::duration_from_toml; diff --git a/roles/stratum-apps/src/config_helpers/toml.rs b/roles/stratum-apps/src/config_helpers/toml.rs deleted file mode 100644 index 9e11497aa7..0000000000 --- a/roles/stratum-apps/src/config_helpers/toml.rs +++ /dev/null @@ -1,33 +0,0 @@ -use std::time::Duration; - -/// Deserialize a duration from a TOML string. -pub fn duration_from_toml<'de, D>(deserializer: D) -> Result -where - D: serde::Deserializer<'de>, -{ - use serde::Deserialize; - - #[derive(serde::Deserialize)] - struct Helper { - unit: String, - value: u64, - } - - let helper = Helper::deserialize(deserializer)?; - match helper.unit.as_str() { - "seconds" => Ok(Duration::from_secs(helper.value)), - "secs" => Ok(Duration::from_secs(helper.value)), - "s" => Ok(Duration::from_secs(helper.value)), - "milliseconds" => Ok(Duration::from_millis(helper.value)), - "millis" => Ok(Duration::from_millis(helper.value)), - "ms" => Ok(Duration::from_millis(helper.value)), - "microseconds" => Ok(Duration::from_micros(helper.value)), - "micros" => Ok(Duration::from_micros(helper.value)), - "us" => Ok(Duration::from_micros(helper.value)), - "nanoseconds" => Ok(Duration::from_nanos(helper.value)), - "nanos" => Ok(Duration::from_nanos(helper.value)), - "ns" => Ok(Duration::from_nanos(helper.value)), - // ... add other units as needed - _ => Err(serde::de::Error::custom("Unsupported duration unit")), - } -} diff --git a/roles/stratum-apps/src/custom_mutex.rs b/roles/stratum-apps/src/custom_mutex.rs deleted file mode 100644 index 8b23742da9..0000000000 --- a/roles/stratum-apps/src/custom_mutex.rs +++ /dev/null @@ -1,115 +0,0 @@ -//! # Collection of Helper Primitives -//! -//! Provides a collection of utilities and helper structures used throughout the Stratum V2 -//! protocol implementation. These utilities simplify common tasks, such as ID generation and -//! management, mutex management, difficulty target calculations, merkle root calculations, and -//! more. - -use std::sync::{Mutex as Mutex_, MutexGuard, PoisonError}; - -/// Custom synchronization primitive for managing shared mutable state. -/// -/// This custom mutex implementation builds on [`std::sync::Mutex`] to enhance usability and safety -/// in concurrent environments. It provides ergonomic methods to safely access and modify inner -/// values while reducing the risk of deadlocks and panics. It is used throughout SRI applications -/// to managed shared state across multiple threads, such as tracking active mining sessions, -/// routing jobs, and managing connections safely and efficiently. -/// -/// ## Advantages -/// - **Closure-Based Locking:** The `safe_lock` method encapsulates the locking process, ensuring -/// the lock is automatically released after the closure completes. -/// - **Error Handling:** `safe_lock` enforces explicit handling of potential [`PoisonError`] -/// conditions, reducing the risk of panics caused by poisoned locks. -/// - **Panic-Safe Option:** The `super_safe_lock` method provides an alternative that unwraps the -/// result of `safe_lock`, with optional runtime safeguards against panics. -/// - **Extensibility:** Includes feature-gated functionality to customize behavior, such as -/// stricter runtime checks using external tools like -/// [`no-panic`](https://github.com/dtolnay/no-panic). -#[derive(Debug)] -pub struct Mutex(Mutex_); - -impl Mutex { - /// Mutex safe lock. - /// - /// Safely locks the `Mutex` and executes a closer (`thunk`) with a mutable reference to the - /// inner value. This ensures that the lock is automatically released after the closure - /// completes, preventing deadlocks. It explicitly returns a [`PoisonError`] containing a - /// [`MutexGuard`] to the inner value in cases where the lock is poisoned. - /// - /// To prevent poison lock errors, unwraps should never be used within the closure. The result - /// should always be returned and handled outside of the sage lock. - pub fn safe_lock(&self, thunk: F) -> Result>> - where - F: FnOnce(&mut T) -> Ret, - { - let mut lock = self.0.lock()?; - let return_value = thunk(&mut *lock); - drop(lock); - Ok(return_value) - } - - /// Mutex super safe lock. - /// - /// Locks the `Mutex` and executes a closure (`thunk`) with a mutable reference to the inner - /// value, panicking if the lock is poisoned. - /// - /// This is a convenience wrapper around `safe_lock` for cases where explicit error handling is - /// unnecessary or undesirable. Use with caution in production code. - pub fn super_safe_lock(&self, thunk: F) -> Ret - where - F: FnOnce(&mut T) -> Ret, - { - //#[cfg(feature = "disable_nopanic")] - { - self.safe_lock(thunk).unwrap() - } - //#[cfg(not(feature = "disable_nopanic"))] - //{ - // // based on https://github.com/dtolnay/no-panic - // struct __NoPanic; - // extern "C" { - // #[link_name = "super_safe_lock called on a function that may panic"] - // fn trigger() -> !; - // } - // impl core::ops::Drop for __NoPanic { - // fn drop(&mut self) { - // unsafe { - // trigger(); - // } - // } - // } - // let mut lock = self.0.lock().expect("threads to never panic"); - // let __guard = __NoPanic; - // let return_value = thunk(&mut *lock); - // core::mem::forget(__guard); - // drop(lock); - // return_value - //} - } - - /// Creates a new [`Mutex`] instance, storing the initial value inside. - pub fn new(v: T) -> Self { - Mutex(Mutex_::new(v)) - } - - /// Removes lock for direct access. - /// - /// Acquires a lock on the [`Mutex`] and returns a [`MutexGuard`] for direct access to the - /// inner value. Allows for manual lock handling and is useful in scenarios where closures are - /// not convenient. - pub fn to_remove(&self) -> Result, PoisonError>> { - self.0.lock() - } -} - -#[cfg(test)] -mod tests { - - #[test] - fn test_super_safe_lock() { - let m = super::Mutex::new(1u32); - m.safe_lock(|i| *i += 1).unwrap(); - // m.super_safe_lock(|i| *i = (*i).checked_add(1).unwrap()); // will not compile - m.super_safe_lock(|i| *i = (*i).checked_add(1).unwrap_or_default()); // compiles - } -} diff --git a/roles/stratum-apps/src/key_utils/mod.rs b/roles/stratum-apps/src/key_utils/mod.rs deleted file mode 100644 index bc02fe4b3b..0000000000 --- a/roles/stratum-apps/src/key_utils/mod.rs +++ /dev/null @@ -1,265 +0,0 @@ -#![cfg_attr(not(feature = "std"), no_std)] - -extern crate alloc; - -use alloc::{ - string::{String, ToString}, - vec::Vec, -}; -use bs58::{decode, decode::Error as Bs58DecodeError}; -use core::{convert::TryFrom, fmt::Display, str::FromStr}; -use secp256k1::{ - schnorr::Signature, Keypair, Message as SecpMessage, Secp256k1, SecretKey, SignOnly, - VerifyOnly, XOnlyPublicKey, -}; -use serde::{Deserialize, Serialize}; - -#[derive(Debug)] -pub enum Error { - Bs58Decode(Bs58DecodeError), - Secp256k1(secp256k1::Error), - KeyVersion(u16), - KeyLength, - Custom(String), -} - -impl Display for Error { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - Self::Bs58Decode(error) => write!(f, "Base58 code error: {error}"), - Self::Secp256k1(error) => write!(f, "Secp256k1 error: {error}"), - Self::KeyVersion(obtained) => { - write!(f, "Unknown public key version. version found: {obtained}") - } - Self::KeyLength => write!(f, "Bad key length"), - Self::Custom(error) => write!(f, "Custom error: {error}"), - } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for Error {} -#[cfg(not(feature = "std"))] -#[rustversion::since(1.81)] -impl core::error::Error for Error {} - -impl From for Error { - fn from(e: Bs58DecodeError) -> Self { - Error::Bs58Decode(e) - } -} - -impl From for Error { - fn from(e: secp256k1::Error) -> Self { - Error::Secp256k1(e) - } -} - -#[derive(Debug, Copy, Clone, Serialize, Deserialize)] -#[serde(into = "String", try_from = "String")] -pub struct Secp256k1SecretKey(pub SecretKey); - -impl TryFrom for Secp256k1SecretKey { - type Error = Error; - - fn try_from(value: String) -> Result { - value.parse() - } -} - -impl FromStr for Secp256k1SecretKey { - type Err = Error; - - fn from_str(value: &str) -> Result { - let decoded = decode(value).with_check(None).into_vec()?; - let secret = SecretKey::from_slice(&decoded)?; - Ok(Secp256k1SecretKey(secret)) - } -} - -impl From for String { - fn from(secret: Secp256k1SecretKey) -> Self { - secret.to_string() - } -} - -impl Display for Secp256k1SecretKey { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let bytes = self.0.secret_bytes(); - f.write_str(&bs58::encode(bytes).with_check().into_string()) - } -} - -#[derive(Debug, Copy, Clone, Serialize, Deserialize)] -#[serde(into = "String", try_from = "String")] -pub struct Secp256k1PublicKey(pub XOnlyPublicKey); - -impl TryFrom for Secp256k1PublicKey { - type Error = Error; - - fn try_from(value: String) -> Result { - value.parse() - } -} - -impl FromStr for Secp256k1PublicKey { - type Err = Error; - - fn from_str(value: &str) -> Result { - let decoded = decode(value).with_check(None).into_vec()?; - if decoded.len() < 34 { - return Err(Error::KeyLength); - } - let key_version = - u16::from_le_bytes(decoded[..2].try_into().expect("Invalid array length")); - if key_version != 1 { - return Err(Error::KeyVersion(key_version)); - } - let public = XOnlyPublicKey::from_slice(&decoded[2..]).map_err(Error::Secp256k1)?; - Ok(Secp256k1PublicKey(public)) - } -} - -impl From for String { - fn from(public: Secp256k1PublicKey) -> Self { - public.to_string() - } -} - -impl Display for Secp256k1PublicKey { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let mut output = [0_u8; 34]; - output[0] = 1; - let bytes = self.0.serialize(); - output[2..].copy_from_slice(&bytes); - f.write_str(&bs58::encode(&output).with_check().into_string()) - } -} - -impl Secp256k1PublicKey { - pub fn into_bytes(self) -> [u8; 32] { - self.0.serialize() - } -} -impl Secp256k1SecretKey { - pub fn into_bytes(self) -> [u8; 32] { - self.0.secret_bytes() - } -} - -impl From for Secp256k1PublicKey { - fn from(value: Secp256k1SecretKey) -> Self { - let context = secp256k1::Secp256k1::new(); - let (x_coordinate, _) = value.0.public_key(&context).x_only_public_key(); - Self(x_coordinate) - } -} - -pub struct SignatureService { - secp_sign: Secp256k1, - secp_verify: Secp256k1, -} - -impl SignatureService { - pub fn new() -> Self { - SignatureService { - secp_sign: Secp256k1::signing_only(), - secp_verify: Secp256k1::verification_only(), - } - } - - #[cfg(feature = "std")] - pub fn sign(&self, message: Vec, private_key: SecretKey) -> Signature { - self.sign_with_rng(message, private_key, &mut rand::thread_rng()) - } - - #[inline] - pub fn sign_with_rng( - &self, - message: Vec, - private_key: SecretKey, - rng: &mut R, - ) -> Signature { - let secret_key = private_key; - let kp = Keypair::from_secret_key(&self.secp_sign, &secret_key); - - self.secp_sign.sign_schnorr_with_rng( - &SecpMessage::from_digest_slice(&message).unwrap(), - &kp, - rng, - ) - } - - pub fn verify( - &self, - message: Vec, - signature: secp256k1::schnorr::Signature, - public_key: XOnlyPublicKey, - ) -> Result<(), secp256k1::Error> { - let x_only_public_key = public_key; - - // Verify signature - self.secp_verify.verify_schnorr( - &signature, - &secp256k1::Message::from_digest_slice(&message)?, - &x_only_public_key, - ) - } -} - -impl Default for SignatureService { - fn default() -> Self { - Self::new() - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn key_conversions() { - let secret_key = "zmBEmPhqo3A92FkiLVvyCz6htc3e53ph3ZbD4ASqGaLjwnFLi"; - let public_key = "9bDuixKmZqAJnrmP746n8zU1wyAQRrus7th9dxnkPg6RzQvCnan"; - let bad_public_key1 = "9bDuixKmZqAJnrmP746n8zU1wyAQRrus7th9dxnkPg6RzQvCnam"; // invalid checksum (swapped char) - let bad_public_key2 = "2myPhc5vkPzuC5FXNK5tee79WmP7uoLh55SxezoF8iqwF3E3rnPY"; // invalid version (version 12) - let bad_public_key3 = "2wmHTKZkLg2QzXyEXGMBXzKP7JXDUt8yy9SA5hoQwERc92qR6c"; // invalid length (1 B missing) - - let error = bad_public_key1 - .parse::() - .expect_err("Bad bud public key failed to raise error"); - assert!( - matches!(error, Error::Bs58Decode(_)), - "expected failed checksum error, got {}", - error - ); - let error = bad_public_key2 - .parse::() - .expect_err("Bad bud public key failed to raise error"); - assert!( - matches!(error, Error::KeyVersion(_)), - "expected invalid key version error, got {}", - error - ); - let error = bad_public_key3 - .parse::() - .expect_err("Bad bud public key failed to raise error"); - assert!( - matches!(error, Error::KeyLength), - "expected invalid key length error, got {}", - error - ); - - let parsed_key = secret_key - .parse::() - .expect("Invalid test key"); - - let calculated_public_key = Secp256k1PublicKey::from(parsed_key); - assert_eq!(calculated_public_key.to_string(), public_key); - - let parsed_public_key = public_key - .parse::() - .expect("Invalid test pubkey"); - assert_eq!(calculated_public_key.0, parsed_public_key.0); - } -} diff --git a/roles/stratum-apps/src/lib.rs b/roles/stratum-apps/src/lib.rs deleted file mode 100644 index 37a88b8cdd..0000000000 --- a/roles/stratum-apps/src/lib.rs +++ /dev/null @@ -1,60 +0,0 @@ -//! # Stratum Apps - SV2 Application Utilities -//! -//! This crate consolidates the essential utilities needed for building Stratum V2 applications. -//! It combines the functionality from the original separate utility crates into a single, -//! well-organized library with feature-based compilation. -//! -//! ## Features -//! -//! ### Core Features -//! - `network` - High-level networking utilities (enabled by default) -//! - `config` - Configuration management helpers (enabled by default) -//! - `rpc` - RPC utilities with custom types for JSON-RPC communication (optional) -//! -//! ### Role-Specific Feature Bundles -//! - `pool` - Everything needed for pool applications -//! - `jd_client` - Everything needed for JD client applications -//! - `jd_server` - Everything needed for JD server applications (includes RPC) -//! - `translator` - Everything needed for translator applications (includes SV1) -//! - `mining_device` - Everything needed for mining device applications -//! -//! ## Modules -//! -//! - [`network_helpers`] - High-level networking utilities for SV2 connections -//! - [`config_helpers`] - Configuration management and parsing utilities -//! - [`rpc`] - RPC utilities with custom serializable types (`Hash`, `BlockHash`, `Amount`) - -/// Re-export all the modules from `stratum_core` -#[cfg(feature = "core")] -pub use stratum_core; - -/// High-level networking utilities for SV2 connections -/// -/// Provides connection management, encrypted streams, and protocol handling. -/// Originally from the `network_helpers_sv2` crate. -#[cfg(feature = "network")] -pub mod network_helpers; - -/// Configuration management helpers -/// -/// Utilities for parsing configuration files, handling coinbase outputs, -/// and setting up logging. Originally from the `config_helpers_sv2` crate. -#[cfg(feature = "config")] -pub mod config_helpers; - -/// Custom Mutex -/// -/// A wrapper around std::sync::Mutex -pub mod custom_mutex; -/// RPC utilities for Job Declaration Server -/// -/// HTTP-based RPC server implementation for JD Server functionality. -/// Originally from the `rpc_sv2` crate. -#[cfg(feature = "rpc")] -pub mod rpc; - -/// Key utilities for cryptographic operations -/// -/// Provides Secp256k1 key management, serialization/deserialization, and signature services. -/// Supports both standard and no_std environments. -pub mod key_utils; diff --git a/roles/stratum-apps/src/network_helpers/mod.rs b/roles/stratum-apps/src/network_helpers/mod.rs deleted file mode 100644 index 93ee6a0242..0000000000 --- a/roles/stratum-apps/src/network_helpers/mod.rs +++ /dev/null @@ -1,53 +0,0 @@ -//! High-level networking utilities for SV2 connections -//! -//! This module provides connection management, encrypted streams, and protocol handling -//! for Stratum V2 applications. It includes support for: -//! -//! - Noise-encrypted connections ([`noise_connection`], [`noise_stream`]) -//! - Plain TCP connections ([`plain_connection`]) -//! - SV1 protocol connections ([`sv1_connection`]) - when `sv1` feature is enabled -//! -//! Originally from the `network_helpers_sv2` crate. - -pub mod noise_connection; -pub mod noise_stream; -pub mod plain_connection; - -#[cfg(feature = "sv1")] -pub mod sv1_connection; - -use async_channel::{RecvError, SendError}; -use stratum_core::codec_sv2::Error as CodecError; - -/// Networking errors that can occur in SV2 connections -#[derive(Debug)] -pub enum Error { - /// Invalid handshake message received from remote peer - HandshakeRemoteInvalidMessage, - /// Error from the codec layer - CodecError(CodecError), - /// Error receiving from async channel - RecvError, - /// Error sending to async channel - SendError, - /// Socket was closed, likely by the peer - SocketClosed, -} - -impl From for Error { - fn from(e: CodecError) -> Self { - Error::CodecError(e) - } -} - -impl From for Error { - fn from(_: RecvError) -> Self { - Error::RecvError - } -} - -impl From> for Error { - fn from(_: SendError) -> Self { - Error::SendError - } -} diff --git a/roles/stratum-apps/src/network_helpers/noise_connection.rs b/roles/stratum-apps/src/network_helpers/noise_connection.rs deleted file mode 100644 index 4e82b609e9..0000000000 --- a/roles/stratum-apps/src/network_helpers/noise_connection.rs +++ /dev/null @@ -1,139 +0,0 @@ -#![allow(clippy::new_ret_no_self)] -use crate::network_helpers::{ - noise_stream::{NoiseTcpReadHalf, NoiseTcpStream, NoiseTcpWriteHalf}, - Error, -}; -use async_channel::{unbounded, Receiver, Sender}; -use std::sync::Arc; -use stratum_core::{ - binary_sv2::{Deserialize, GetSize, Serialize}, - codec_sv2::{HandshakeRole, StandardEitherFrame}, -}; -use tokio::{net::TcpStream, task}; -use tracing::{debug, error}; - -pub struct Connection; - -struct ConnectionState { - sender_incoming: Sender>, - receiver_incoming: Receiver>, - sender_outgoing: Sender>, - receiver_outgoing: Receiver>, -} - -impl ConnectionState { - fn close_all(&self) { - self.sender_incoming.close(); - self.receiver_incoming.close(); - self.sender_outgoing.close(); - self.receiver_outgoing.close(); - } -} - -impl Connection { - pub async fn new( - stream: TcpStream, - role: HandshakeRole, - ) -> Result< - ( - Receiver>, - Sender>, - ), - Error, - > - where - Message: Serialize + Deserialize<'static> + GetSize + Send + 'static, - { - let (sender_incoming, receiver_incoming) = unbounded(); - let (sender_outgoing, receiver_outgoing) = unbounded(); - - let conn_state = Arc::new(ConnectionState { - sender_incoming, - receiver_incoming: receiver_incoming.clone(), - sender_outgoing: sender_outgoing.clone(), - receiver_outgoing, - }); - - let (read_half, write_half) = NoiseTcpStream::::new(stream, role) - .await? - .into_split(); - - Self::spawn_reader(read_half, Arc::clone(&conn_state)); - Self::spawn_writer(write_half, conn_state); - - Ok((receiver_incoming, sender_outgoing)) - } - fn spawn_reader( - mut read_half: NoiseTcpReadHalf, - conn_state: Arc>, - ) -> task::JoinHandle<()> - where - Message: Serialize + Deserialize<'static> + GetSize + Send + 'static, - { - let sender_incoming = conn_state.sender_incoming.clone(); - - task::spawn(async move { - loop { - tokio::select! { - _ = tokio::signal::ctrl_c() => { - debug!("Reader received shutdown signal."); - break; - } - res = read_half.read_frame() => match res { - Ok(frame) => { - if sender_incoming.send(frame).await.is_err() { - error!("Reader: channel closed, shutting down."); - break; - } - } - Err(e) => { - error!("Reader: error while reading frame: {e:?}"); - break; - } - } - } - } - - conn_state.close_all(); - }) - } - - fn spawn_writer( - mut write_half: NoiseTcpWriteHalf, - conn_state: Arc>, - ) -> task::JoinHandle<()> - where - Message: Serialize + Deserialize<'static> + GetSize + Send + 'static, - { - let receiver_outgoing = conn_state.receiver_outgoing.clone(); - - task::spawn(async move { - loop { - tokio::select! { - _ = tokio::signal::ctrl_c() => { - debug!("Writer received shutdown signal."); - break; - } - res = receiver_outgoing.recv() => match res { - Ok(frame) => { - if let Err(e) = write_half.write_frame(frame).await { - error!("Writer: error while writing frame: {e:?}"); - break; - } - } - Err(_) => { - debug!("Writer: channel closed, shutting down."); - break; - } - } - } - } - - if let Err(e) = write_half.shutdown().await { - error!("Writer: error during shutdown: {e:?}"); - } - - conn_state.close_all(); - }) - } -} diff --git a/roles/stratum-apps/src/network_helpers/noise_stream.rs b/roles/stratum-apps/src/network_helpers/noise_stream.rs deleted file mode 100644 index a7078059b5..0000000000 --- a/roles/stratum-apps/src/network_helpers/noise_stream.rs +++ /dev/null @@ -1,332 +0,0 @@ -//! A Noise-encrypted wrapper around a `TcpStream`, providing framed read/write I/O using the SV2 -//! protocol and a stateful Noise handshake. -//! -//! This module provides `NoiseTcpStream`, which wraps a `TcpStream` and performs a Noise-based -//! authenticated key exchange based on the provided [`HandshakeRole`]. -//! -//! After a successful handshake, the stream can be split into a `NoiseTcpReadHalf` and -//! `NoiseTcpWriteHalf`, which support frame-based encoding/decoding of SV2 messages with optional -//! non-blocking behavior. - -use crate::network_helpers::Error; -use stratum_core::{ - binary_sv2::{Deserialize, GetSize, Serialize}, - codec_sv2::{HandshakeRole, NoiseEncoder, StandardNoiseDecoder, State}, - noise_sv2::INITIATOR_EXPECTED_HANDSHAKE_MESSAGE_SIZE, -}; -use tokio::net::{ - tcp::{OwnedReadHalf, OwnedWriteHalf}, - TcpStream, -}; - -use stratum_core::{ - codec_sv2::StandardEitherFrame, framing_sv2::framing::HandShakeFrame, - noise_sv2::ELLSWIFT_ENCODING_SIZE, -}; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tracing::{debug, error}; - -/// A Noise-secured duplex stream over TCP that wraps a `TcpStream` -/// and provides secure read/write capabilities using the Noise protocol. -/// -/// This stream performs the full Noise handshake during construction -/// and returns a bidirectional encrypted stream split into read and write halves. -/// -/// **Note:** This struct is **not cancellation-safe**. -/// If `read_frame()` or `write_frame()` is canceled mid-way, -/// internal state may be left in an inconsistent state, which can lead to -/// protocol errors or dropped frames. -pub struct NoiseTcpStream + GetSize + Send + 'static> { - reader: NoiseTcpReadHalf, - writer: NoiseTcpWriteHalf, -} - -/// The reading half of a `NoiseTcpStream`. -/// -/// It buffers incoming encrypted bytes, attempts to decode full Noise frames, -/// and exposes a method to retrieve structured messages of type `Message`. -pub struct NoiseTcpReadHalf + GetSize + Send + 'static> { - reader: OwnedReadHalf, - decoder: StandardNoiseDecoder, - state: State, - current_frame_buf: Vec, - bytes_read: usize, -} - -/// The writing half of a `NoiseTcpStream`. -/// -/// It accepts structured messages, encodes them via the Noise protocol, -/// and writes the result to the socket. -pub struct NoiseTcpWriteHalf + GetSize + Send + 'static> { - writer: OwnedWriteHalf, - encoder: NoiseEncoder, - state: State, -} - -impl NoiseTcpStream -where - Message: Serialize + Deserialize<'static> + GetSize + Send + 'static, -{ - /// Constructs a new `NoiseTcpStream` over the given TCP stream, - /// performing the Noise handshake in the given `role`. - /// - /// On success, returns a stream with encrypted communication channels. - pub async fn new(stream: TcpStream, role: HandshakeRole) -> Result { - let (mut reader, mut writer) = stream.into_split(); - - let mut decoder = StandardNoiseDecoder::::new(); - let mut encoder = NoiseEncoder::::new(); - let mut state = State::initialized(role.clone()); - - match role { - HandshakeRole::Initiator(_) => { - let mut responder_state = State::not_initialized(&role); - let first_msg = state.step_0()?; - send_message(&mut writer, first_msg.into(), &mut state, &mut encoder).await?; - debug!("First handshake message sent"); - - loop { - match receive_message(&mut reader, &mut responder_state, &mut decoder).await { - Ok(second_msg) => { - debug!("Second handshake message received"); - let handshake_frame: HandShakeFrame = second_msg - .try_into() - .map_err(|_| Error::HandshakeRemoteInvalidMessage)?; - let payload: [u8; INITIATOR_EXPECTED_HANDSHAKE_MESSAGE_SIZE] = - handshake_frame - .get_payload_when_handshaking() - .try_into() - .map_err(|_| Error::HandshakeRemoteInvalidMessage)?; - let transport_state = state.step_2(payload)?; - state = transport_state; - break; - } - Err(Error::CodecError(stratum_core::codec_sv2::Error::MissingBytes(_))) => { - debug!("Waiting for more bytes during handshake"); - } - Err(e) => { - error!("Handshake failed with upstream: {:?}", e); - return Err(e); - } - } - } - } - HandshakeRole::Responder(_) => { - let mut initiator_state = State::not_initialized(&role); - - loop { - match receive_message(&mut reader, &mut initiator_state, &mut decoder).await { - Ok(first_msg) => { - debug!("First handshake message received"); - let handshake_frame: HandShakeFrame = first_msg - .try_into() - .map_err(|_| Error::HandshakeRemoteInvalidMessage)?; - let payload: [u8; ELLSWIFT_ENCODING_SIZE] = handshake_frame - .get_payload_when_handshaking() - .try_into() - .map_err(|_| Error::HandshakeRemoteInvalidMessage)?; - let (second_msg, transport_state) = state.step_1(payload)?; - send_message(&mut writer, second_msg.into(), &mut state, &mut encoder) - .await?; - debug!("Second handshake message sent"); - state = transport_state; - break; - } - Err(Error::CodecError(stratum_core::codec_sv2::Error::MissingBytes(_))) => { - debug!("Waiting for more bytes during handshake"); - } - Err(e) => { - error!("Handshake failed with downstream: {:?}", e); - return Err(e); - } - } - } - } - }; - Ok(Self { - reader: NoiseTcpReadHalf { - reader, - decoder, - state: state.clone(), - current_frame_buf: vec![], - bytes_read: 0, - }, - writer: NoiseTcpWriteHalf { - writer, - encoder, - state, - }, - }) - } - - /// Consumes the stream and returns its reader and writer halves. - pub fn into_split(self) -> (NoiseTcpReadHalf, NoiseTcpWriteHalf) { - (self.reader, self.writer) - } -} - -impl NoiseTcpWriteHalf -where - Message: Serialize + Deserialize<'static> + GetSize + Send + 'static, -{ - /// Encrypts and writes a full message frame to the socket. - /// - /// Returns an error if the socket is closed or the message cannot be encoded. - /// - /// Not cancellation-safe: A canceled write may cause partial writes or state corruption. - pub async fn write_frame(&mut self, frame: StandardEitherFrame) -> Result<(), Error> { - let buf = self.encoder.encode(frame, &mut self.state)?; - self.writer - .write_all(buf.as_ref()) - .await - .map_err(|_| Error::SocketClosed)?; - Ok(()) - } - - /// Attempts to write a message without blocking. - /// - /// Returns: - /// - `Ok(true)` if the entire frame was written successfully. - /// - `Ok(false)` if the socket is not ready (would block). - /// - `Err(_)` on socket or encoding errors. - pub fn try_write_frame(&mut self, frame: StandardEitherFrame) -> Result { - let buf = self.encoder.encode(frame, &mut self.state)?; - - match self.writer.try_write(buf.as_ref()) { - Ok(n) if n == buf.len() => Ok(true), - Ok(_) => Err(Error::SocketClosed), - Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => Ok(false), - Err(_) => Err(Error::SocketClosed), - } - } - - /// Gracefully shuts down the writing half of the stream. - /// - /// Returns an error if the shutdown fails. - pub async fn shutdown(&mut self) -> Result<(), Error> { - self.writer - .shutdown() - .await - .map_err(|_| Error::SocketClosed) - } -} - -impl NoiseTcpReadHalf -where - Message: Serialize + Deserialize<'static> + GetSize + Send + 'static, -{ - /// Reads and decodes a complete frame from the socket. - /// - /// This method blocks until a full frame is read and decoded, - /// handling `MissingBytes` errors from the codec automatically. - /// - /// Not cancellation-safe: Cancellation may leave partially-read state behind. - pub async fn read_frame(&mut self) -> Result, Error> { - loop { - let expected = self.decoder.writable_len(); - - if self.current_frame_buf.len() != expected { - self.current_frame_buf.resize(expected, 0); - self.bytes_read = 0; - } - - while self.bytes_read < expected { - let n = self - .reader - .read(&mut self.current_frame_buf[self.bytes_read..]) - .await - .map_err(|_| Error::SocketClosed)?; - - if n == 0 { - return Err(Error::SocketClosed); - } - - self.bytes_read += n; - } - - self.decoder - .writable() - .copy_from_slice(&self.current_frame_buf[..]); - - self.bytes_read = 0; - - match self.decoder.next_frame(&mut self.state) { - Ok(frame) => return Ok(frame), - Err(stratum_core::codec_sv2::Error::MissingBytes(_)) => { - tokio::task::yield_now().await; - continue; - } - Err(e) => return Err(Error::CodecError(e)), - } - } - } - - /// Attempts to read and decode a frame without blocking. - /// - /// Returns: - /// - `Ok(Some(frame))` if a full frame is successfully decoded. - /// - `Ok(None)` if not enough data is available yet. - /// - `Err(_)` on socket or decoding errors. - pub fn try_read_frame(&mut self) -> Result>, Error> { - let expected = self.decoder.writable_len(); - - if self.current_frame_buf.len() != expected { - self.current_frame_buf.resize(expected, 0); - self.bytes_read = 0; - } - - match self - .reader - .try_read(&mut self.current_frame_buf[self.bytes_read..]) - { - Ok(0) => return Err(Error::SocketClosed), - Ok(n) => self.bytes_read += n, - Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => return Ok(None), - Err(_) => return Err(Error::SocketClosed), - } - - if self.bytes_read < expected { - return Ok(None); - } - - self.decoder - .writable() - .copy_from_slice(&self.current_frame_buf[..]); - - self.bytes_read = 0; - - match self.decoder.next_frame(&mut self.state) { - Ok(frame) => Ok(Some(frame)), - Err(stratum_core::codec_sv2::Error::MissingBytes(_)) => Ok(None), - Err(e) => Err(Error::CodecError(e)), - } - } -} - -async fn send_message + GetSize + Send + 'static>( - writer: &mut OwnedWriteHalf, - msg: StandardEitherFrame, - state: &mut State, - encoder: &mut NoiseEncoder, -) -> Result<(), Error> { - let buffer = encoder.encode(msg, state)?; - writer - .write_all(buffer.as_ref()) - .await - .map_err(|_| Error::SocketClosed)?; - Ok(()) -} - -async fn receive_message + GetSize + Send + 'static>( - reader: &mut OwnedReadHalf, - state: &mut State, - decoder: &mut StandardNoiseDecoder, -) -> Result, Error> { - let mut buffer = vec![0u8; decoder.writable_len()]; - reader - .read_exact(&mut buffer) - .await - .map_err(|_| Error::SocketClosed)?; - decoder.writable().copy_from_slice(&buffer); - decoder.next_frame(state).map_err(Error::CodecError) -} diff --git a/roles/stratum-apps/src/network_helpers/plain_connection.rs b/roles/stratum-apps/src/network_helpers/plain_connection.rs deleted file mode 100644 index 291eca626c..0000000000 --- a/roles/stratum-apps/src/network_helpers/plain_connection.rs +++ /dev/null @@ -1,135 +0,0 @@ -use async_channel::{bounded, Receiver, Sender}; -use core::convert::TryInto; -use stratum_core::binary_sv2::{Deserialize, Serialize}; -use tokio::{ - io::{AsyncReadExt, AsyncWriteExt}, - net::{TcpListener, TcpStream}, - task, -}; - -use stratum_core::{ - binary_sv2::GetSize, - codec_sv2::{Error::MissingBytes, StandardDecoder, StandardEitherFrame}, -}; -use tracing::{error, trace}; - -#[derive(Debug)] -pub struct PlainConnection {} - -impl PlainConnection { - /// - /// - /// # Arguments - /// - /// * `strict` - true - will disconnect a connection that sends a message that can't be - /// translated, false - will ignore messages that can't be translated - #[allow(clippy::new_ret_no_self)] - pub async fn new<'a, Message: Serialize + Deserialize<'a> + GetSize + Send + 'static>( - stream: TcpStream, - ) -> ( - Receiver>, - Sender>, - ) { - const NOISE_HANDSHAKE_SIZE_HINT: usize = 3363412; - - let (mut reader, mut writer) = stream.into_split(); - - let (sender_incoming, receiver_incoming): ( - Sender>, - Receiver>, - ) = bounded(10); // TODO caller should provide this param - let (sender_outgoing, receiver_outgoing): ( - Sender>, - Receiver>, - ) = bounded(10); // TODO caller should provide this param - - // RECEIVE AND PARSE INCOMING MESSAGES FROM TCP STREAM - task::spawn(async move { - let mut decoder = StandardDecoder::::new(); - - loop { - let writable = decoder.writable(); - match reader.read_exact(writable).await { - Ok(_) => { - match decoder.next_frame() { - Ok(frame) => { - if let Err(e) = sender_incoming.send(frame.into()).await { - error!("Failed to send incoming message: {}", e); - task::yield_now().await; - break; - } - } - Err(MissingBytes(size)) => { - // Only disconnect if we get noise handshake message - this - // shouldn't - // happen in plain_connection - if size == NOISE_HANDSHAKE_SIZE_HINT { - error!("Got noise message on unencrypted connection - disconnecting"); - break; - } else { - trace!("MissingBytes({}) on incoming message - ignoring", size); - } - } - Err(e) => { - error!("Failed to read from stream: {}", e); - sender_incoming.close(); - task::yield_now().await; - break; - } - } - } - Err(e) => { - // Just fail and force to reinitialize everything - error!("Failed to read from stream: {}", e); - sender_incoming.close(); - task::yield_now().await; - break; - } - } - } - }); - - // ENCODE AND SEND INCOMING MESSAGES TO TCP STREAM - task::spawn(async move { - let mut encoder = stratum_core::codec_sv2::Encoder::::new(); - - loop { - let received = receiver_outgoing.recv().await; - match received { - Ok(frame) => { - let b = encoder.encode(frame.try_into().unwrap()).unwrap(); - - match (writer).write_all(b).await { - Ok(_) => (), - Err(_) => { - let _ = writer.shutdown().await; - } - } - } - Err(_) => { - // Just fail and force to reinitilize everything - let _ = writer.shutdown().await; - error!("Failed to read from stream - terminating connection"); - task::yield_now().await; - break; - } - }; - } - }); - - (receiver_incoming, sender_outgoing) - } -} - -pub async fn plain_listen(address: &str, sender: Sender) { - let listener = TcpListener::bind(address).await.unwrap(); - loop { - if let Ok((stream, _)) = listener.accept().await { - let _ = sender.send(stream).await; - } - } -} -pub async fn plain_connect(address: &str) -> Result { - let stream = TcpStream::connect(address).await.map_err(|_| ())?; - Ok(stream) -} diff --git a/roles/stratum-apps/src/network_helpers/sv1_connection.rs b/roles/stratum-apps/src/network_helpers/sv1_connection.rs deleted file mode 100644 index 29f00e41b7..0000000000 --- a/roles/stratum-apps/src/network_helpers/sv1_connection.rs +++ /dev/null @@ -1,214 +0,0 @@ -use async_channel::{unbounded, Receiver, Sender}; -use futures::StreamExt; -use stratum_core::sv1_api::json_rpc; -use tokio::{ - io::{AsyncWriteExt, BufReader, BufWriter}, - net::TcpStream, -}; -use tokio_util::codec::{FramedRead, LinesCodec}; -use tracing::{error, trace, warn}; - -/// Represents a connection between two roles communicating using SV1 protocol. -/// -/// This struct can be used to read and write messages to the other side of the connection. The -/// channel is unidirectional, i.e., each [`ConnectionSV1`] instance handles the connection either -/// from the upstream perspective or the downstream perspective. In order to communicate in both -/// directions, you will need two instances of this struct. -#[derive(Debug)] -pub struct ConnectionSV1 { - receiver: Receiver, - sender: Sender, -} - -struct ConnectionState { - receiver_outgoing: Receiver, - sender_outgoing: Sender, - receiver_incoming: Receiver, - sender_incoming: Sender, -} - -impl ConnectionState { - fn new( - receiver_outgoing: Receiver, - sender_outgoing: Sender, - receiver_incoming: Receiver, - sender_incoming: Sender, - ) -> Self { - Self { - receiver_incoming, - receiver_outgoing, - sender_incoming, - sender_outgoing, - } - } - - fn close(&self) { - self.receiver_incoming.close(); - self.receiver_outgoing.close(); - self.sender_incoming.close(); - self.sender_outgoing.close(); - } -} - -const MAX_LINE_LENGTH: usize = 1 << 16; - -impl ConnectionSV1 { - pub async fn new(stream: TcpStream) -> Self { - let (read_half, write_half) = stream.into_split(); - let (sender_incoming, receiver_incoming) = unbounded(); - let (sender_outgoing, receiver_outgoing) = unbounded(); - - let buffer_read_half = BufReader::new(read_half); - let buffer_write_half = BufWriter::new(write_half); - - let connection_state = ConnectionState::new( - receiver_outgoing.clone(), - sender_outgoing.clone(), - receiver_incoming.clone(), - sender_incoming.clone(), - ); - - tokio::spawn(async move { - tokio::select! { - _ = Self::run_reader(buffer_read_half, sender_incoming.clone()) => { - trace!("Reader task exited. Closing writer sender."); - connection_state.close(); - } - _ = Self::run_writer(buffer_write_half, receiver_outgoing.clone()) => { - trace!("Writer task exited. Closing reader sender."); - connection_state.close(); - } - } - }); - - Self { - receiver: receiver_incoming, - sender: sender_outgoing, - } - } - - async fn run_reader( - reader: BufReader, - sender: Sender, - ) { - let mut lines = FramedRead::new(reader, LinesCodec::new_with_max_length(MAX_LINE_LENGTH)); - while let Some(result) = lines.next().await { - match result { - Ok(line) => match serde_json::from_str::(&line) { - Ok(msg) => { - if sender.send(msg).await.is_err() { - warn!("Receiver dropped, stopping reader"); - break; - } - } - Err(e) => { - error!("Failed to deserialize message: {e:?}"); - } - }, - Err(e) => { - error!("Error reading from stream: {e:?}"); - break; - } - } - } - } - - async fn run_writer( - mut writer: BufWriter, - receiver: Receiver, - ) { - while let Ok(msg) = receiver.recv().await { - match serde_json::to_string(&msg) { - Ok(line) => { - let data = format!("{line}\n"); - if writer.write_all(data.as_bytes()).await.is_err() { - error!("Failed to write to stream"); - break; - } - if writer.flush().await.is_err() { - error!("Failed to flush writer."); - break; - } - } - Err(e) => { - error!("Failed to serialize message: {e:?}"); - break; - } - } - } - } - - /// Send a message to the other side of the connection. - pub async fn send(&self, msg: json_rpc::Message) -> bool { - self.sender.send(msg).await.is_ok() - } - - /// Receive a message from the other side of the connection. - pub async fn receive(&self) -> Option { - self.receiver.recv().await.ok() - } - - /// Get a clone of the receiver channel. - pub fn receiver(&self) -> Receiver { - self.receiver.clone() - } - - /// Get a clone of the sender channel. - pub fn sender(&self) -> Sender { - self.sender.clone() - } -} - -#[cfg(test)] -mod tests { - use tokio::net::TcpListener; - - use super::*; - - #[tokio::test] - async fn test_sv1_connection() { - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - let addr = listener.local_addr().unwrap(); - let downstream_stream = TcpStream::connect(addr).await.unwrap(); - let (upstream_stream, _) = listener.accept().await.unwrap(); - - let upstream_connection = ConnectionSV1::new(upstream_stream).await; - let downstream_connection = ConnectionSV1::new(downstream_stream).await; - let message = json_rpc::Message::StandardRequest(json_rpc::StandardRequest { - id: 1, - method: "test".to_string(), - params: serde_json::Value::Null, - }); - assert!(downstream_connection.send(message).await); - let received_on_upstream = upstream_connection.receive().await.unwrap(); - match received_on_upstream { - json_rpc::Message::StandardRequest(received) => { - assert_eq!(received.id, 1); - assert_eq!(received.method, "test".to_string()); - assert_eq!(received.params, serde_json::Value::Null); - } - _ => { - panic!("Unexpected message type"); - } - } - let upstream_response = json_rpc::Message::OkResponse(json_rpc::Response { - id: 1, - result: serde_json::Value::String("response".to_string()), - error: None, - }); - assert!(upstream_connection.send(upstream_response).await); - let received_upstream = downstream_connection.receive().await.unwrap(); - match received_upstream { - json_rpc::Message::OkResponse(received) => { - assert_eq!(received.id, 1); - assert_eq!( - received.result, - serde_json::Value::String("response".to_string()) - ); - } - _ => { - panic!("Unexpected message type"); - } - } - } -} diff --git a/roles/stratum-apps/src/rpc/mini_rpc_client.rs b/roles/stratum-apps/src/rpc/mini_rpc_client.rs deleted file mode 100644 index 1b98ca2f63..0000000000 --- a/roles/stratum-apps/src/rpc/mini_rpc_client.rs +++ /dev/null @@ -1,215 +0,0 @@ -// TODO -// - manage id in RpcResult messages -use base64::Engine; -use hex::decode; -use http_body_util::{BodyExt, Full}; -use hyper::{ - body::Bytes, - header::{AUTHORIZATION, CONTENT_TYPE}, - Request, -}; -use hyper_util::{ - client::legacy::{connect::HttpConnector, Client}, - rt::TokioExecutor, -}; -use serde::{Deserialize, Serialize}; -use serde_json::json; -use stratum_core::bitcoin::{consensus::encode::deserialize as consensus_decode, Transaction}; - -use super::BlockHash; - -#[derive(Clone, Debug)] -pub struct MiniRpcClient { - client: Client>, - url: hyper::Uri, - auth: Auth, -} - -impl MiniRpcClient { - pub fn new(url: hyper::Uri, auth: Auth) -> MiniRpcClient { - let client: Client<_, Full> = Client::builder(TokioExecutor::new()).build_http(); - MiniRpcClient { client, url, auth } - } - - pub async fn get_raw_transaction( - &self, - txid: &String, - block_hash: Option<&BlockHash>, - ) -> Result { - let response = match block_hash { - Some(hash) => { - self.send_json_rpc_request("getrawtransaction", json!([txid, false, hash])) - } - None => self.send_json_rpc_request("getrawtransaction", json!([txid, false])), - } - .await; - match response { - Ok(result_hex) => { - let result_deserialized: JsonRpcResult = serde_json::from_str(&result_hex) - .map_err(|e| { - RpcError::Deserialization(e.to_string()) // TODO manage message ids - })?; - let transaction_hex: String = result_deserialized - .result - .ok_or_else(|| RpcError::Other("Result not found".to_string()))?; - let transaction_bytes = decode(transaction_hex).expect("Decoding failed"); - Ok(consensus_decode(&transaction_bytes).expect("Deserialization failed")) - } - Err(error) => Err(error), - } - } - - pub async fn get_raw_mempool(&self) -> Result, RpcError> { - let response = self.send_json_rpc_request("getrawmempool", json!([])).await; - match response { - Ok(result_hex) => { - let result_deserialized: JsonRpcResult> = - serde_json::from_str(&result_hex).map_err(|e| { - RpcError::Deserialization(e.to_string()) // TODO manage message ids - })?; - let mempool: Vec = result_deserialized - .result - .ok_or_else(|| RpcError::Other("Result not found".to_string()))?; - Ok(mempool) - } - Err(error) => Err(error), - } - } - - pub async fn submit_block(&self, block_hex: String) -> Result<(), RpcError> { - let response = self - .send_json_rpc_request("submitblock", json!([block_hex])) - .await; - - match response { - Ok(_) => Ok(()), - Err(error) => Err(error), - } - } - - /// Checks the health of the RPC connection by sending a request to the blockchain info - /// endpoint - pub async fn health(&self) -> Result<(), RpcError> { - let response = self - .send_json_rpc_request("getblockchaininfo", json!([])) - .await; - match response { - Ok(_) => Ok(()), - Err(error) => Err(error), - } - } - - async fn send_json_rpc_request( - &self, - method: &str, - params: serde_json::Value, - ) -> Result { - let client = &self.client; - let (username, password) = self.auth.clone().get_user_pass(); - let request = JsonRpcRequest { - jsonrpc: "2.0".to_string(), - method: method.to_string(), - params, - id: 1, //TODO manage message ids - }; - - let request_body = match serde_json::to_string(&request) { - Ok(body) => body, - Err(e) => return Err(RpcError::Serialization(e.to_string())), - }; - - let req = Request::builder() - .method("POST") - .uri(self.url.clone()) - .header(CONTENT_TYPE, "application/json") - .header( - AUTHORIZATION, - format!( - "Basic {}", - base64::engine::general_purpose::STANDARD - .encode(format!("{username}:{password}")) - ), - ) - .body(Full::::from(request_body)) - .map_err(|e| RpcError::Http(e.to_string()))?; - - let response = client - .request(req) - .await - .map_err(|e| RpcError::Http(e.to_string()))?; - - let status = response.status(); - let body = response - .into_body() - .collect() - .await - .map_err(|e| RpcError::Http(e.to_string()))? - .to_bytes() - .to_vec(); - - if status.is_success() { - String::from_utf8(body).map_err(|e| { - RpcError::Deserialization(e.to_string()) // TODO manage message ids - }) - } else { - let error_result: Result, _> = serde_json::from_slice(&body); - match error_result { - Ok(error_response) => Err(error_response.into()), - Err(e) => Err(RpcError::Deserialization(e.to_string())), - } - } - } -} - -#[derive(Clone, Debug)] -pub struct Auth { - username: String, - password: String, -} - -impl Auth { - pub fn get_user_pass(self) -> (String, String) { - (self.username, self.password) - } - pub fn new(username: String, password: String) -> Auth { - Auth { username, password } - } -} - -#[derive(Debug, Serialize)] -struct JsonRpcRequest { - jsonrpc: String, - method: String, - params: serde_json::Value, - id: u64, -} - -#[derive(Debug, Deserialize)] -pub struct JsonRpcResult { - result: Option, - pub error: Option, - pub id: u64, -} - -#[derive(Debug, Deserialize, Clone)] -pub struct JsonRpcError { - pub code: i32, - pub message: String, -} - -#[derive(Debug, Deserialize)] -pub enum RpcError { - // TODO this type is slightly incorrect, as the JsonRpcError evaluates a generic that is meant - // for the result field of JsonRpcResult struct. This should be corrected - JsonRpc(JsonRpcResult), - Deserialization(String), - Serialization(String), - Http(String), - Other(String), -} - -impl From> for RpcError { - fn from(error: JsonRpcResult) -> Self { - Self::JsonRpc(error) - } -} diff --git a/roles/stratum-apps/src/rpc/mod.rs b/roles/stratum-apps/src/rpc/mod.rs deleted file mode 100644 index fd7eb9432e..0000000000 --- a/roles/stratum-apps/src/rpc/mod.rs +++ /dev/null @@ -1,24 +0,0 @@ -//! RPC utilities for Job Declaration Server -//! -//! This module provides HTTP-based RPC server implementation for JD Server functionality. -//! It includes utilities for handling RPC requests and responses. -//! -//! Originally from the `rpc_sv2` crate. -//! -//! This module is only available when the `rpc` feature is enabled. - -pub mod mini_rpc_client; - -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct Hash([u8; 32]); - -#[allow(dead_code)] -#[derive(Clone, Deserialize)] -pub struct Amount(f64); - -#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct BlockHash(Hash); - -pub use hyper::Uri; diff --git a/roles/tarpaulin.toml b/roles/tarpaulin.toml deleted file mode 100644 index 96f8f04b3d..0000000000 --- a/roles/tarpaulin.toml +++ /dev/null @@ -1,8 +0,0 @@ -[default] -features = "with_buffer_pool default" -run-types = [ "Lib" ] -timeout = "120s" -fail-under = 0 - -[report] -out = ["Xml"] diff --git a/roles/test-utils/mining-device/Cargo.toml b/roles/test-utils/mining-device/Cargo.toml deleted file mode 100644 index 7e5234e4e0..0000000000 --- a/roles/test-utils/mining-device/Cargo.toml +++ /dev/null @@ -1,61 +0,0 @@ -[package] -name = "mining_device" -version = "0.1.3" -authors = ["The Stratum V2 Developers"] -edition = "2021" -publish = false -documentation = "https://github.com/stratum-mining/stratum" -readme = "README.md" -homepage = "https://stratumprotocol.org" -repository = "https://github.com/stratum-mining/stratum" -license = "MIT OR Apache-2.0" -keywords = ["stratum", "mining", "bitcoin", "protocol"] - - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[lib] -name = "mining_device" -path = "src/lib/mod.rs" - - -[dependencies] -stratum-apps = { path = "../../stratum-apps", features = ["mining_device"] } -roles_logic_sv2 = "5.0.0" -binary_sv2 = "4.0.0" -codec_sv2 = "3.0.1" -framing_sv2 = "5.0.1" -noise_sv2 = "1.4.0" -parsers_sv2 = "0.1.1" -common_messages_sv2 = "6.0.1" -mining_sv2 = "5.0.1" -network_helpers_sv2 = "4.0.1" -async-channel = "1.5.1" -async-recursion = "0.3.2" -rand = "0.8.4" -futures = "0.3.5" -clap = { version = "^4.5.4", features = ["derive"] } -tracing = { version = "0.1" } -tracing-subscriber = "0.3" -sha2 = { version = "0.10.6", features = ["compress", "asm"] } -tokio = "1.44.1" -primitive-types = "0.13.1" -num-format = "0.4" - -[dev-dependencies] -# Criterion 0.5 without default features; combined with a dev pin of `half = 2.3.1` to stay Rust 1.75-compatible. -criterion = { version = "0.5", default-features = false, features = ["stable"] } -half = "=2.3.1" -num_cpus = "1" - -[[bench]] -name = "hasher_bench" -harness = false - -[[bench]] -name = "microbatch_bench" -harness = false - -[[bench]] -name = "scaling_bench" -harness = false diff --git a/roles/test-utils/mining-device/README.md b/roles/test-utils/mining-device/README.md deleted file mode 100644 index dc9db27d7e..0000000000 --- a/roles/test-utils/mining-device/README.md +++ /dev/null @@ -1,137 +0,0 @@ -# CPU Sv2 mining device - -Header only sv2 cpu miner. - -``` -Usage: mining_device [OPTIONS] --address-pool - -Options: - -p, --pubkey-pool - Pool pub key, when left empty the pool certificate is not checked - -i, --id-device - Sometimes used by the pool to identify the device - -a, --address-pool - Address of the pool in this format ip:port or domain:port - --handicap - This value is used to slow down the cpu miner, it represents the number of micro-seconds that are awaited between hashes [default: 0] - --id-user - User id, used when a new channel is opened, it can be used by the pool to identify the miner - --nominal-hashrate-multiplier - This floating point number is used to modify the advertised nominal hashrate when opening a channel with the upstream. - If 0.0 < nominal_hashrate_multiplier < 1.0, the CPU miner will advertise a nominal hashrate that is smaller than its real capacity. - If nominal_hashrate_multiplier > 1.0, the CPU miner will advertise a nominal hashrate that is bigger than its real capacity. - If empty, the CPU miner will simply advertise its real capacity. - --nonces-per-call - Number of nonces to try per mining loop iteration when fast hashing is available (micro-batching). [default: 32] - --cores - Number of worker threads to use for mining. Defaults to logical CPUs minus one (leaves one core free). - -h, --help - Print help - -V, --version - Print version -``` - -Usage example: -``` -cargo run --release -- --address-pool 127.0.0.1:20000 --id-device device_id::SOLO::bc1qxy2kgdygjrsqtzq2n0yrf2493p83kkfjhx0wlh -``` - -To adjust micro-batching (see below), you can pass for example `--nonces-per-call 64`: - -``` -cargo run --release -- --address-pool 127.0.0.1:20000 \ - --id-device device_id::SOLO::bc1qxy2kgdygjrsqtzq2n0yrf2493p83kkfjhx0wlh \ - --nonces-per-call 64 -``` - -## handicap - -CPU mining could damage the system due to excessive heat. - -The `--handicap` parameter should be used as a safety mechanism to slow down the hashrate in order to preserve hardware. - -## nominal hashrate multiplier - -Let's imagine that: -- the upstream wants to receive shares every ~100s (on average) -- the CPU miner nominal hashrate is 1k H/s - -Maybe we want to do a test where we don't want to wait ~100s before a share is submitted by the CPU miner. - -In that case, we need the CPU miner to advertise a smaller hashrate, which will force the upstream to set a lower -difficulty target. - -The `--nominal-hashrate-multiplier` can be used to advertise a custom nominal hashrate. - -In the scenario described above, we could launch the CPU miner with `--nominal-hashrate-multiplier 0.01`. - -The CPU miner would advertise 0.01k H/s, which would cause the upstream to set the difficulty target such that the CPU miner would find a share within ~1s. - -This feature can also be used to advertise a bigger nominal hashrate by using values above `1.0`. - -That can also be useful for testing difficulty adjustment algorithms on Sv2 upstreams. - -## Micro-batching (nonces per call) - -The miner supports hashing multiple consecutive nonces per loop iteration when the fast hashing path is available. This reduces outer-loop overhead and can slightly increase throughput on some CPUs. - -- Flag: `--nonces-per-call ` -- Default: `32` -- Trade-off: larger batches can increase latency to detecting a found share because the loop advances in steps of `N`. Choose smaller values (e.g., `4`–`16`) if you care more about latency; larger values (e.g., `32`–`128`) may squeeze a bit more throughput. - -This setting only affects the CPU loop structure; it does not change the hash function or correctness. - -## Worker threads - -By default, the miner uses one worker thread per logical CPU minus one (N-1). This leaves a core available for the operating system and scheduling overhead. - -You can override this with `--cores `, clamped between `1` and the number of logical CPUs. - -Examples: - -```zsh -# Pin to a small fixed number of workers -cargo run --release -- --address-pool 127.0.0.1:20000 --cores 2 -``` - -If `--cores` is omitted, auto mode (N-1) is used. - -## Benchmarks - -You can measure performance with Criterion. From this directory: - -```zsh -cargo bench --bench hasher_bench -- --quiet -``` - -- `hasher_bench` compares baseline `block_hash()` against the optimized midstate+compress256 path. - -To analyze the effect of micro-batching on an end-of-loop iteration, run: - -```zsh -cargo bench --bench microbatch_bench -- --quiet -``` - -- `microbatch_bench` sweeps several batch sizes and sets Criterion throughput to `Elements = N` where each element is one nonce. This means: - - The reported time per iteration divides roughly by `N` to get per-nonce time. - - Criterion also prints throughput in elements/s (hashes/s). For convenience, the bench additionally prints a concise `MH/s` per configuration. - -By default the bench runs a concise subset of batch sizes: `1,8,32,128`. You can override the list via an environment variable: - -```zsh -MINING_DEVICE_BATCH_SIZES=1,4,8,16,32,64,128 cargo bench --bench microbatch_bench -- --quiet -``` - -Tip: pick the smallest `N` that gives you near-peak throughput to keep share-finding latency low. - -### Total scaling (multi-core) - -Total throughput doesn’t always scale linearly with more workers (due to CPU topology, turbo, thermal limits, etc.). Use the scaling bench to measure aggregate MH/s while ramping worker counts from 1 up to your number of logical CPUs: - -```zsh -cargo bench --bench scaling_bench -- --quiet -``` - -- The bench automatically detects the number of logical CPUs and iterates workers from `1..=N` (no environment variable needed). - -The bench prints one concise summary line per configuration and shows incremental improvements versus the previous worker count, including the approximate MH/s gained per additional worker. It also sets Criterion throughput to Elements equal to total nonces hashed, so Elements/s equals total hashes/s. diff --git a/roles/test-utils/mining-device/benches/hasher_bench.rs b/roles/test-utils/mining-device/benches/hasher_bench.rs deleted file mode 100644 index 7f4284eec8..0000000000 --- a/roles/test-utils/mining-device/benches/hasher_bench.rs +++ /dev/null @@ -1,54 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; -use mining_device::FastSha256d; -use rand::{thread_rng, Rng}; -use stratum_apps::stratum_core::bitcoin::{ - block::Version, blockdata::block::Header, hash_types::BlockHash, hashes::Hash, CompactTarget, -}; - -fn random_header() -> Header { - let mut rng = thread_rng(); - let prev_hash: [u8; 32] = rng.gen(); - let prev_hash = Hash::from_byte_array(prev_hash); - let merkle_root: [u8; 32] = rng.gen(); - let merkle_root = Hash::from_byte_array(merkle_root); - Header { - version: Version::from_consensus(rng.gen::()), - prev_blockhash: BlockHash::from_raw_hash(prev_hash), - merkle_root, - time: std::time::SystemTime::now() - .duration_since(std::time::SystemTime::UNIX_EPOCH - std::time::Duration::from_secs(60)) - .unwrap() - .as_secs() as u32, - bits: CompactTarget::from_consensus(rng.gen()), - nonce: 0, - } -} - -fn bench_hasher(c: &mut Criterion) { - let mut group = c.benchmark_group("mining_device_hasher"); - let header = random_header(); - - // Baseline using rust-bitcoin block_hash() - group.bench_function(BenchmarkId::new("baseline_block_hash", "full"), |b| { - let mut h = header; - b.iter(|| { - h.nonce = h.nonce.wrapping_add(1); - let _ = black_box(h.block_hash()); - }); - }); - - // Optimized midstate+compress256 - group.bench_function(BenchmarkId::new("fast_midstate", "compress256"), |b| { - let mut h = header; - let mut fast = FastSha256d::from_header_static(&h); - b.iter(|| { - h.nonce = h.nonce.wrapping_add(1); - let _ = black_box(fast.hash_with_nonce_time(h.nonce, h.time)); - }); - }); - - group.finish(); -} - -criterion_group!(benches, bench_hasher); -criterion_main!(benches); diff --git a/roles/test-utils/mining-device/benches/microbatch_bench.rs b/roles/test-utils/mining-device/benches/microbatch_bench.rs deleted file mode 100644 index f5b4e88cd0..0000000000 --- a/roles/test-utils/mining-device/benches/microbatch_bench.rs +++ /dev/null @@ -1,105 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use mining_device::{set_nonces_per_call, FastSha256d}; -use rand::{thread_rng, Rng}; -use std::time::Duration; -use stratum_apps::stratum_core::bitcoin::{ - block::Version, blockdata::block::Header, hash_types::BlockHash, hashes::Hash, CompactTarget, -}; - -fn random_header() -> Header { - let mut rng = thread_rng(); - let prev_hash: [u8; 32] = rng.gen(); - let prev_hash = Hash::from_byte_array(prev_hash); - let merkle_root: [u8; 32] = rng.gen(); - let merkle_root = Hash::from_byte_array(merkle_root); - Header { - version: Version::from_consensus(rng.gen::()), - prev_blockhash: BlockHash::from_raw_hash(prev_hash), - merkle_root, - time: std::time::SystemTime::now() - .duration_since(std::time::SystemTime::UNIX_EPOCH - std::time::Duration::from_secs(60)) - .unwrap() - .as_secs() as u32, - bits: CompactTarget::from_consensus(rng.gen()), - nonce: 0, - } -} - -fn bench_microbatch(c: &mut Criterion) { - // Report hardware SHA availability once at start - #[cfg(target_arch = "x86_64")] - println!( - "Hardware SHA available (x86 SHA-NI): {}", - std::is_x86_feature_detected!("sha") - ); - #[cfg(target_arch = "aarch64")] - println!( - "Hardware SHA available (ARMv8 SHA2): {}", - std::arch::is_aarch64_feature_detected!("sha2") - ); - #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] - println!("Hardware SHA detection: not applicable for this arch"); - - let mut group = c.benchmark_group("mining_device_microbatch"); - // Keep output and run-time concise - group.sample_size(10); - group.warm_up_time(Duration::from_millis(100)); - group.measurement_time(Duration::from_secs(1)); - let header = random_header(); - let mut fast = FastSha256d::from_header_static(&header); - // Fewer defaults for less verbose output; allow override via env var - let batches: Vec = std::env::var("MINING_DEVICE_BATCH_SIZES") - .ok() - .and_then(|s| { - s.split(',') - .map(|p| p.trim().parse::().ok()) - .collect::>>() - }) - .filter(|v| !v.is_empty()) - .unwrap_or_else(|| vec![1, 8, 32, 128]); - - for &b in &batches { - group.throughput(Throughput::Elements(b as u64)); - group.bench_function(BenchmarkId::from_parameter(b), |bencher| { - set_nonces_per_call(b); - let mut h = header; - bencher.iter(|| { - // Simulate one mining-loop iteration: hash "b" nonces - let start = h.nonce; - let time = h.time; - for i in 0..b { - let hsh = fast.hash_with_nonce_time(start.wrapping_add(i), time); - black_box(hsh); - } - h.nonce = start.wrapping_add(b); - }); - }); - - // Print a concise MH/s estimate per configuration (outside Criterion's stats) - // Do a quick one-shot timing over a small fixed workload to avoid noisy output. - // Note: This is a convenience display; for rigorous numbers, rely on Criterion results. - let mut h = header; - set_nonces_per_call(b); - let reps: u32 = 200_000 / b.max(1); // ~200k hashes in total; fast and stable - let total_hashes: u64 = reps as u64 * b as u64; - let start_inst = std::time::Instant::now(); - for _ in 0..reps { - let start = h.nonce; - let time = h.time; - for i in 0..b { - let _ = black_box(fast.hash_with_nonce_time(start.wrapping_add(i), time)); - } - h.nonce = start.wrapping_add(b); - } - let dur = start_inst.elapsed(); - let secs = dur.as_secs_f64().max(1e-9); - let hps = (total_hashes as f64) / secs; // hashes per second - let mhps = hps / 1_000_000.0; - println!("batch={b}: ~{mhps:.3} MH/s"); - } - - group.finish(); -} - -criterion_group!(benches, bench_microbatch); -criterion_main!(benches); diff --git a/roles/test-utils/mining-device/benches/scaling_bench.rs b/roles/test-utils/mining-device/benches/scaling_bench.rs deleted file mode 100644 index 1efda8ea99..0000000000 --- a/roles/test-utils/mining-device/benches/scaling_bench.rs +++ /dev/null @@ -1,169 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use mining_device::FastSha256d; -use rand::{thread_rng, Rng}; -use std::{ - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, Barrier, - }, - thread, - time::Instant, -}; -use stratum_apps::stratum_core::bitcoin::{ - block::Version, blockdata::block::Header, hash_types::BlockHash, hashes::Hash, CompactTarget, -}; - -fn random_header() -> Header { - let mut rng = thread_rng(); - let prev_hash: [u8; 32] = rng.gen(); - let prev_hash = Hash::from_byte_array(prev_hash); - let merkle_root: [u8; 32] = rng.gen(); - let merkle_root = Hash::from_byte_array(merkle_root); - Header { - version: Version::from_consensus(rng.gen::()), - prev_blockhash: BlockHash::from_raw_hash(prev_hash), - merkle_root, - time: std::time::SystemTime::now() - .duration_since(std::time::SystemTime::UNIX_EPOCH - std::time::Duration::from_secs(60)) - .unwrap() - .as_secs() as u32, - bits: CompactTarget::from_consensus(rng.gen()), - nonce: 0, - } -} - -fn bench_scaling(c: &mut Criterion) { - let mut group = c.benchmark_group("mining_device_scaling"); - // Measure logical CPUs and test scaling from 1..=N - let logical_cpus = num_cpus::get().max(1); - let workers: Vec = (1..=logical_cpus).collect(); - - // Keep runs short but representative - group.sample_size(10); - group.warm_up_time(std::time::Duration::from_millis(100)); - group.measurement_time(std::time::Duration::from_secs(1)); - - let header = random_header(); - - // Helper: quick one-shot timing used only for concise logging (outside Criterion loop) - let quick_measure_total_mhps = |n: usize| -> f64 { - // Each measurement hashes this many nonces total across n threads - let per_thread: u32 = 200_000 / (n as u32).max(1); - let total_hashes = per_thread as u64 * n as u64; - let stop = Arc::new(AtomicBool::new(false)); - let barrier = Arc::new(Barrier::new(n)); - let mut handles = Vec::with_capacity(n); - for i in 0..n { - let stop = stop.clone(); - let barrier = barrier.clone(); - let mut h = header; - h.nonce = i as u32; - let mut fast = FastSha256d::from_header_static(&h); - let per = per_thread; - handles.push(thread::spawn(move || { - barrier.wait(); - let start = Instant::now(); - let time = h.time; - let mut nonce = h.nonce; - for _ in 0..per { - let _ = black_box(fast.hash_with_nonce_time(nonce, time)); - nonce = nonce.wrapping_add(n as u32); // stride to avoid overlap - if stop.load(Ordering::Relaxed) { - break; - } - } - start.elapsed() - })); - } - let mut max_elapsed = std::time::Duration::ZERO; - for h in handles { - let d = h.join().unwrap(); - if d > max_elapsed { - max_elapsed = d; - } - } - let secs = max_elapsed.as_secs_f64().max(1e-9); - let hps = (total_hashes as f64) / secs; - hps / 1_000_000.0 - }; - - // Print one concise line per worker count, including incremental gain vs previous - let mut prev_workers: Option = None; - let mut prev_mhps: Option = None; - - for &n in &workers { - // Each iteration hashes this many nonces total across n threads - let per_thread: u32 = 200_000 / (n as u32).max(1); - let total_hashes = per_thread as u64 * n as u64; - group.throughput(Throughput::Elements(total_hashes)); - - // One-shot concise summary print (not part of Criterion timing) - let mhps = quick_measure_total_mhps(n); - if let (Some(pn), Some(prev)) = (prev_workers, prev_mhps) { - let added = n.saturating_sub(pn).max(1); - let delta = mhps - prev; - let pct = if prev > 0.0 { - (delta / prev) * 100.0 - } else { - 0.0 - }; - let per_cpu = delta / (added as f64); - println!( - "workers={n}: ~{mhps:.3} MH/s (total) | +{delta:.3} vs prev (+{pct:.1}%), ~{per_cpu:.3} MH/s per added worker" - ); - } else { - println!("workers={n}: ~{mhps:.3} MH/s (total)"); - } - prev_workers = Some(n); - prev_mhps = Some(mhps); - - group.bench_function(BenchmarkId::from_parameter(n), |b| { - b.iter(|| { - let stop = Arc::new(AtomicBool::new(false)); - let barrier = Arc::new(Barrier::new(n)); - let mut handles = Vec::with_capacity(n); - for i in 0..n { - let stop = stop.clone(); - let barrier = barrier.clone(); - let mut h = header; - h.nonce = i as u32; - let mut fast = FastSha256d::from_header_static(&h); - let per = per_thread; - handles.push(thread::spawn(move || { - // start together - barrier.wait(); - let start = Instant::now(); - let time = h.time; - let mut nonce = h.nonce; - for _ in 0..per { - // One hash per step; inner batching isn't necessary here - let _ = black_box(fast.hash_with_nonce_time(nonce, time)); - nonce = nonce.wrapping_add(n as u32); // stride to avoid overlap - if stop.load(Ordering::Relaxed) { - break; - } - } - start.elapsed() - })); - } - // Collect times and compute MH/s - let mut max_elapsed = std::time::Duration::ZERO; - for h in handles { - let d = h.join().unwrap(); - if d > max_elapsed { - max_elapsed = d; - } - } - let _secs = max_elapsed.as_secs_f64().max(1e-9); - let _hps = (total_hashes as f64) / _secs; - let _mhps = _hps / 1_000_000.0; - // Intentionally no println! inside Criterion iteration to keep output concise - }); - }); - } - - group.finish(); -} - -criterion_group!(benches, bench_scaling); -criterion_main!(benches); diff --git a/roles/test-utils/mining-device/src/lib/mod.rs b/roles/test-utils/mining-device/src/lib/mod.rs deleted file mode 100644 index 96cec9a7bf..0000000000 --- a/roles/test-utils/mining-device/src/lib/mod.rs +++ /dev/null @@ -1,1079 +0,0 @@ -#![allow(clippy::option_map_unit_fn)] -use async_channel::{Receiver, Sender}; -use codec_sv2::{self, StandardEitherFrame, StandardSv2Frame}; -use common_messages_sv2::{Protocol, SetupConnection, SetupConnectionSuccess}; -use mining_sv2::*; -use network_helpers_sv2::noise_connection::Connection; -use noise_sv2::Initiator; -use num_format::{Locale, ToFormattedString}; -use parsers_sv2::{Mining, MiningDeviceMessages}; -use primitive_types::U256; -use rand::{thread_rng, Rng}; -use roles_logic_sv2::{ - errors::Error, - handlers::{ - common::ParseCommonMessagesFromUpstream, - mining::{ParseMiningMessagesFromUpstream, SendTo, SupportedChannelTypes}, - }, - utils::Mutex, -}; -use std::{ - net::{SocketAddr, ToSocketAddrs}, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - thread::available_parallelism, - time::{Duration, Instant}, -}; -use stratum_apps::{ - key_utils::Secp256k1PublicKey, - stratum_core::bitcoin::{ - blockdata::block::Header, hash_types::BlockHash, hashes::Hash, CompactTarget, - }, -}; -use tokio::net::TcpStream; -use tracing::{debug, error, info}; - -use stratum_apps::stratum_core::bitcoin::consensus::encode::serialize as btc_serialize; - -// Fast SHA256d midstate hasher -use sha2::{ - compress256, - digest::generic_array::{typenum::U64, GenericArray}, -}; - -// Tuneable: how many nonces to try per mining loop iteration when fast hasher is available. -// Runtime-configurable so the binary and benches can adjust it without changing code. -use std::sync::atomic::AtomicU32; -static NONCES_PER_CALL_RUNTIME: AtomicU32 = AtomicU32::new(32); -// Runtime-configurable number of worker threads; 0 means "auto" (N-1) -static WORKER_OVERRIDE: AtomicU32 = AtomicU32::new(0); - -#[inline] -pub fn set_nonces_per_call(n: u32) { - // Avoid zero (would stall the loop); clamp to at least 1 - let n = n.max(1); - NONCES_PER_CALL_RUNTIME.store(n, Ordering::Relaxed); -} - -#[inline] -fn nonces_per_call() -> u32 { - NONCES_PER_CALL_RUNTIME.load(Ordering::Relaxed).max(1) -} - -/// Override the number of mining worker threads. If set to 0, auto mode (N-1) is used. -#[inline] -pub fn set_cores(n: u32) { - WORKER_OVERRIDE.store(n, Ordering::Relaxed); -} - -/// Resolve effective worker count: if override is 0, use max(1, logical_cpus-1). -#[inline] -fn worker_count() -> u32 { - let total_cpus = available_parallelism().map(|p| p.get()).unwrap_or(1) as u32; - let auto = total_cpus.saturating_sub(1).max(1); - let override_n = WORKER_OVERRIDE.load(Ordering::Relaxed); - if override_n == 0 { - auto - } else { - // Clamp to [1, total_cpus] to avoid oversubscription or zero - override_n.clamp(1, total_cpus) - } -} - -/// Public helper: current effective worker threads (after considering override and auto mode) -#[inline] -pub fn effective_worker_count() -> u32 { - worker_count() -} - -/// Public helper: total logical CPUs detected -#[inline] -pub fn total_logical_cpus() -> u32 { - available_parallelism().map(|p| p.get()).unwrap_or(1) as u32 -} - -pub async fn connect( - address: String, - pub_key: Option, - device_id: Option, - user_id: Option, - handicap: u32, - nominal_hashrate_multiplier: Option, - single_submit: bool, -) { - let address = address - .clone() - .to_socket_addrs() - .expect("Invalid pool address, use one of this formats: ip:port, domain:port") - .next() - .expect("Invalid pool address, use one of this formats: ip:port, domain:port"); - info!("Connecting to pool at {}", address); - let socket = loop { - let pool = tokio::time::timeout(Duration::from_secs(5), TcpStream::connect(address)).await; - match pool { - Ok(result) => match result { - Ok(socket) => break socket, - Err(e) => { - error!( - "Failed to connect to Upstream role at {}, retrying in 5s: {}", - address, e - ); - tokio::time::sleep(Duration::from_secs(5)).await; - } - }, - Err(_) => { - error!("Pool is unresponsive, terminating"); - std::process::exit(1); - } - } - }; - info!("Pool tcp connection established at {}", address); - let address = socket.peer_addr().unwrap(); - let initiator = Initiator::new(pub_key.map(|e| e.0)); - let (receiver, sender) = - Connection::new(socket, codec_sv2::HandshakeRole::Initiator(initiator)) - .await - .unwrap(); - info!("Pool noise connection established at {}", address); - Device::start( - receiver, - sender, - address, - device_id, - user_id, - handicap, - nominal_hashrate_multiplier, - single_submit, - ) - .await -} - -pub type Message = MiningDeviceMessages<'static>; -pub type StdFrame = StandardSv2Frame; -pub type EitherFrame = StandardEitherFrame; - -struct SetupConnectionHandler {} -use common_messages_sv2::Reconnect; -use std::convert::TryInto; -use stratum_apps::stratum_core::bitcoin::block::Version; - -impl SetupConnectionHandler { - pub fn new() -> Self { - SetupConnectionHandler {} - } - fn get_setup_connection_message( - address: SocketAddr, - device_id: Option, - ) -> SetupConnection<'static> { - let endpoint_host = address.ip().to_string().into_bytes().try_into().unwrap(); - let vendor = String::new().try_into().unwrap(); - let hardware_version = String::new().try_into().unwrap(); - let firmware = String::new().try_into().unwrap(); - let device_id = device_id.unwrap_or_default(); - info!( - "Creating SetupConnection message with device id: {:?}", - device_id - ); - SetupConnection { - protocol: Protocol::MiningProtocol, - min_version: 2, - max_version: 2, - flags: 0b0000_0000_0000_0000_0000_0000_0000_0001, - endpoint_host, - endpoint_port: address.port(), - vendor, - hardware_version, - firmware, - device_id: device_id.try_into().unwrap(), - } - } - pub async fn setup( - self_: Arc>, - receiver: &mut Receiver, - sender: &mut Sender, - device_id: Option, - address: SocketAddr, - ) { - let setup_connection = Self::get_setup_connection_message(address, device_id); - - let sv2_frame: StdFrame = MiningDeviceMessages::Common(setup_connection.into()) - .try_into() - .unwrap(); - let sv2_frame = sv2_frame.into(); - sender.send(sv2_frame).await.unwrap(); - info!("Setup connection sent to {}", address); - - let mut incoming: StdFrame = receiver.recv().await.unwrap().try_into().unwrap(); - let message_type = incoming.get_header().unwrap().msg_type(); - let payload = incoming.payload(); - ParseCommonMessagesFromUpstream::handle_message_common(self_, message_type, payload) - .unwrap(); - } -} - -impl ParseCommonMessagesFromUpstream for SetupConnectionHandler { - fn handle_setup_connection_success( - &mut self, - m: SetupConnectionSuccess, - ) -> Result { - use roles_logic_sv2::handlers::common::SendTo; - info!( - "Received `SetupConnectionSuccess`: version={}, flags={:b}", - m.used_version, m.flags - ); - Ok(SendTo::None(None)) - } - - fn handle_setup_connection_error( - &mut self, - _: common_messages_sv2::SetupConnectionError, - ) -> Result { - error!("Setup connection error"); - todo!() - } - - fn handle_channel_endpoint_changed( - &mut self, - _: common_messages_sv2::ChannelEndpointChanged, - ) -> Result { - todo!() - } - - fn handle_reconnect( - &mut self, - _m: Reconnect, - ) -> Result { - todo!() - } -} - -#[derive(Debug, Clone)] -struct NewWorkNotifier { - should_send: bool, - sender: Sender<()>, -} - -#[derive(Debug)] -pub struct Device { - #[allow(dead_code)] - receiver: Receiver, - sender: Sender, - #[allow(dead_code)] - channel_opened: bool, - channel_id: Option, - miner: Arc>, - jobs: Vec>, - prev_hash: Option>, - sequence_numbers: AtomicU32, - notify_changes_to_mining_thread: NewWorkNotifier, -} - -fn open_channel( - device_id: Option, - nominal_hashrate_multiplier: Option, - handicap: u32, -) -> OpenStandardMiningChannel<'static> { - let user_identity = device_id.unwrap_or_default().try_into().unwrap(); - let id: u32 = 10; - info!("Measuring CPU hashrate"); - let measured_total_hs = measure_hashrate(5, handicap); - let measured_total_mhs = measured_total_hs / 1_000_000.0; - info!( - "Measured CPU hashrate β‰ˆ {} MH/s", - format_mhs(measured_total_mhs) - ); - let measured_hashrate = measured_total_hs as f32; - let nominal_hash_rate = match nominal_hashrate_multiplier { - Some(m) => measured_hashrate * m, - None => measured_hashrate, - }; - - info!("MINING DEVICE: send open channel with request id {}", id); - - OpenStandardMiningChannel { - request_id: id.into(), - user_identity, - nominal_hash_rate, - max_target: vec![0xFF_u8; 32].try_into().unwrap(), - } -} - -impl Device { - #[allow(clippy::too_many_arguments)] - async fn start( - mut receiver: Receiver, - mut sender: Sender, - addr: SocketAddr, - device_id: Option, - user_id: Option, - handicap: u32, - nominal_hashrate_multiplier: Option, - single_submit: bool, - ) { - let setup_connection_handler = Arc::new(Mutex::new(SetupConnectionHandler::new())); - SetupConnectionHandler::setup( - setup_connection_handler, - &mut receiver, - &mut sender, - device_id, - addr, - ) - .await; - info!("Pool sv2 connection established at {}", addr); - let miner = Arc::new(Mutex::new(Miner::new(handicap))); - let (notify_changes_to_mining_thread, update_miners) = async_channel::unbounded(); - let self_ = Self { - channel_opened: false, - receiver: receiver.clone(), - sender: sender.clone(), - miner: miner.clone(), - jobs: Vec::new(), - prev_hash: None, - channel_id: None, - sequence_numbers: AtomicU32::new(0), - notify_changes_to_mining_thread: NewWorkNotifier { - should_send: true, - sender: notify_changes_to_mining_thread, - }, - }; - let open_channel = MiningDeviceMessages::Mining(Mining::OpenStandardMiningChannel( - open_channel(user_id, nominal_hashrate_multiplier, handicap), - )); - let frame: StdFrame = open_channel.try_into().unwrap(); - self_.sender.send(frame.into()).await.unwrap(); - let self_mutex = std::sync::Arc::new(Mutex::new(self_)); - let cloned = self_mutex.clone(); - - let (share_send, share_recv) = async_channel::unbounded(); - - start_mining_threads(update_miners, miner, share_send); - tokio::task::spawn(async move { - let recv = share_recv.clone(); - loop { - let (nonce, job_id, version, ntime) = recv.recv().await.unwrap(); - Self::send_share(cloned.clone(), nonce, job_id, version, ntime).await; - if single_submit { - break; - } - } - }); - - loop { - let mut incoming: StdFrame = receiver.recv().await.unwrap().try_into().unwrap(); - let message_type = incoming.get_header().unwrap().msg_type(); - let payload = incoming.payload(); - let next = - Device::handle_message_mining(self_mutex.clone(), message_type, payload).unwrap(); - let mut notify_changes_to_mining_thread = self_mutex - .safe_lock(|s| s.notify_changes_to_mining_thread.clone()) - .unwrap(); - if notify_changes_to_mining_thread.should_send - && (message_type == stratum_apps::stratum_core::mining_sv2::MESSAGE_TYPE_NEW_MINING_JOB - || message_type - == stratum_apps::stratum_core::mining_sv2::MESSAGE_TYPE_MINING_SET_NEW_PREV_HASH - || message_type == stratum_apps::stratum_core::mining_sv2::MESSAGE_TYPE_SET_TARGET) - { - notify_changes_to_mining_thread - .sender - .send(()) - .await - .unwrap(); - notify_changes_to_mining_thread.should_send = false; - }; - match next { - SendTo::RelayNewMessageToRemote(_, m) => { - let sv2_frame: StdFrame = MiningDeviceMessages::Mining(m).try_into().unwrap(); - let either_frame: EitherFrame = sv2_frame.into(); - sender.send(either_frame).await.unwrap(); - } - SendTo::None(_) => (), - _ => panic!(), - } - } - } - - async fn send_share( - self_mutex: Arc>, - nonce: u32, - job_id: u32, - version: u32, - ntime: u32, - ) { - let share = - MiningDeviceMessages::Mining(Mining::SubmitSharesStandard(SubmitSharesStandard { - channel_id: self_mutex.safe_lock(|s| s.channel_id.unwrap()).unwrap(), - sequence_number: self_mutex - .safe_lock(|s| s.sequence_numbers.fetch_add(1, Ordering::Relaxed)) - .unwrap(), - job_id, - nonce, - ntime, - version, - })); - let frame: StdFrame = share.try_into().unwrap(); - let sender = self_mutex.safe_lock(|s| s.sender.clone()).unwrap(); - sender.send(frame.into()).await.unwrap(); - } -} - -impl ParseMiningMessagesFromUpstream<()> for Device { - fn get_channel_type(&self) -> SupportedChannelTypes { - SupportedChannelTypes::Standard - } - - fn is_work_selection_enabled(&self) -> bool { - false - } - - fn handle_open_standard_mining_channel_success( - &mut self, - m: OpenStandardMiningChannelSuccess, - ) -> Result, Error> { - self.channel_opened = true; - self.channel_id = Some(m.channel_id); - let req_id = m.get_request_id_as_u32(); - info!( - "MINING DEVICE: channel opened with: group id {}, channel id {}, request id {}", - m.group_channel_id, m.channel_id, req_id - ); - self.miner - .safe_lock(|miner| miner.new_target(m.target.to_vec())) - .unwrap(); - self.notify_changes_to_mining_thread.should_send = true; - Ok(SendTo::None(None)) - } - - fn handle_open_extended_mining_channel_success( - &mut self, - _: OpenExtendedMiningChannelSuccess, - ) -> Result, Error> { - unreachable!() - } - - fn handle_open_mining_channel_error( - &mut self, - _: OpenMiningChannelError, - ) -> Result, Error> { - todo!() - } - - fn handle_update_channel_error(&mut self, _: UpdateChannelError) -> Result, Error> { - todo!() - } - - fn handle_close_channel(&mut self, _: CloseChannel) -> Result, Error> { - todo!() - } - - fn handle_set_extranonce_prefix( - &mut self, - _: SetExtranoncePrefix, - ) -> Result, Error> { - todo!() - } - - fn handle_submit_shares_success( - &mut self, - m: SubmitSharesSuccess, - ) -> Result, Error> { - info!("Received SubmitSharesSuccess"); - debug!("SubmitSharesSuccess: {}", m); - Ok(SendTo::None(None)) - } - - fn handle_submit_shares_error(&mut self, m: SubmitSharesError) -> Result, Error> { - error!( - "Received SubmitSharesError with error code {}", - std::str::from_utf8(m.error_code.as_ref()).unwrap_or("unknown error code") - ); - Ok(SendTo::None(None)) - } - - fn handle_new_mining_job(&mut self, m: NewMiningJob) -> Result, Error> { - info!( - "Received new mining job for channel id: {} with job id: {} is future: {}", - m.channel_id, - m.job_id, - m.is_future() - ); - debug!("NewMiningJob: {}", m); - match (m.is_future(), self.prev_hash.as_ref()) { - (false, Some(p_h)) => { - self.miner - .safe_lock(|miner| miner.new_header(p_h, &m)) - .unwrap(); - self.jobs = vec![m.as_static()]; - self.notify_changes_to_mining_thread.should_send = true; - } - (true, _) => self.jobs.push(m.as_static()), - (false, None) => { - panic!() - } - } - Ok(SendTo::None(None)) - } - - fn handle_new_extended_mining_job( - &mut self, - _: NewExtendedMiningJob, - ) -> Result, Error> { - todo!() - } - - fn handle_set_new_prev_hash(&mut self, m: SetNewPrevHash) -> Result, Error> { - info!( - "Received SetNewPrevHash channel id: {}, job id: {}", - m.channel_id, m.job_id - ); - debug!("SetNewPrevHash: {}", m); - let jobs: Vec<&NewMiningJob<'static>> = self - .jobs - .iter() - .filter(|j| j.job_id == m.job_id && j.is_future()) - .collect(); - match jobs.len() { - 0 => { - self.prev_hash = Some(m.as_static()); - } - 1 => { - self.miner - .safe_lock(|miner| miner.new_header(&m, jobs[0])) - .unwrap(); - self.jobs = vec![jobs[0].clone()]; - self.prev_hash = Some(m.as_static()); - self.notify_changes_to_mining_thread.should_send = true; - } - _ => panic!(), - } - Ok(SendTo::None(None)) - } - - fn handle_set_custom_mining_job_success( - &mut self, - _: SetCustomMiningJobSuccess, - ) -> Result, Error> { - todo!() - } - - fn handle_set_custom_mining_job_error( - &mut self, - _: SetCustomMiningJobError, - ) -> Result, Error> { - todo!() - } - - fn handle_set_target(&mut self, m: SetTarget) -> Result, Error> { - info!("Received SetTarget for channel id: {}", m.channel_id); - debug!("SetTarget: {}", m); - self.miner - .safe_lock(|miner| miner.new_target(m.maximum_target.to_vec())) - .unwrap(); - self.notify_changes_to_mining_thread.should_send = true; - Ok(SendTo::None(None)) - } - - fn handle_set_group_channel(&mut self, _m: SetGroupChannel) -> Result, Error> { - todo!() - } -} - -#[derive(Debug, Clone)] -struct Miner { - header: Option
, - target: Option, - job_id: Option, - version: Option, - handicap: u32, - // Optimized hashing state - fast_hasher: Option, -} - -impl Miner { - fn new(handicap: u32) -> Self { - Self { - target: None, - header: None, - job_id: None, - version: None, - handicap, - fast_hasher: None, - } - } - - fn new_target(&mut self, target: Vec) { - // target is sent in LE format, we'll keep it that way - let hex_string = target - .iter() - .fold("".to_string(), |acc, b| acc + format!("{b:02x}").as_str()); - info!("Set target to {}", hex_string); - // Store the target as U256 in little-endian format - self.target = Some(U256::from_little_endian(target.as_slice())); - } - - // Same as new_target but without logging (useful for internal probes) - fn new_target_silent(&mut self, target: Vec) { - self.target = Some(U256::from_little_endian(target.as_slice())); - } - - fn new_header(&mut self, set_new_prev_hash: &SetNewPrevHash, new_job: &NewMiningJob) { - self.job_id = Some(new_job.job_id); - self.version = Some(new_job.version); - let prev_hash: [u8; 32] = set_new_prev_hash.prev_hash.to_vec().try_into().unwrap(); - let prev_hash = Hash::from_byte_array(prev_hash); - let merkle_root: [u8; 32] = new_job.merkle_root.to_vec().try_into().unwrap(); - let merkle_root = Hash::from_byte_array(merkle_root); - // fields need to be added as BE and the are converted to LE in the background before - // hashing - let header = Header { - version: Version::from_consensus(new_job.version as i32), - prev_blockhash: BlockHash::from_raw_hash(prev_hash), - merkle_root, - time: std::time::SystemTime::now() - .duration_since( - std::time::SystemTime::UNIX_EPOCH - std::time::Duration::from_secs(60), - ) - .unwrap() - .as_secs() as u32, - bits: CompactTarget::from_consensus(set_new_prev_hash.nbits), - nonce: 0, - }; - self.header = Some(header); - // Build a fast hasher with midstate prepared for the static parts of the header - if let Some(h) = &self.header { - self.fast_hasher = Some(FastSha256d::from_header_static(h)); - } else { - self.fast_hasher = None; - } - } - pub fn next_share(&mut self) -> NextShareOutcome { - if let Some(header) = self.header.as_ref() { - // Use optimized path if available - let hash: [u8; 32] = if let Some(fast) = &mut self.fast_hasher { - fast.hash_with_nonce_time(header.nonce, header.time) - } else { - let hash_ = header.block_hash(); - *hash_.to_raw_hash().as_ref() - }; - - // Compare hash against target quickly in little-endian u32 words (most significant at - // index 7) - if let Some(target) = self.target { - let tgt_le = target.to_little_endian(); - // Interpret as 8 little-endian u32 words - let mut is_below = false; - let mut is_equal = true; - // Compare from most significant word (index 7) to least (index 0) - for i in (0..8).rev() { - let off = i * 4; - let hw = u32::from_le_bytes([ - hash[off], - hash[off + 1], - hash[off + 2], - hash[off + 3], - ]); - let tw = u32::from_le_bytes([ - tgt_le[off], - tgt_le[off + 1], - tgt_le[off + 2], - tgt_le[off + 3], - ]); - match hw.cmp(&tw) { - core::cmp::Ordering::Less => { - is_below = true; - is_equal = false; - break; - } - core::cmp::Ordering::Greater => { - is_below = false; - is_equal = false; - break; - } - core::cmp::Ordering::Equal => {} - } - } - - if is_below || is_equal { - info!( - "Found share with nonce: {}, for target: {:?}, with hash: {:?}", - header.nonce, self.target, hash, - ); - NextShareOutcome::ValidShare - } else { - NextShareOutcome::InvalidShare - } - } else { - std::thread::yield_now(); - NextShareOutcome::NoTarget - } - } else { - std::thread::yield_now(); - NextShareOutcome::NoHeader - } - } -} - -// A fast double-SHA256 hasher specialized for Bitcoin block headers. -// It precomputes the midstate of the first 64 bytes (version, prev_blockhash, merkle_root[0..28]) -// and allows quickly hashing varying (time, nonce) fields. -#[derive(Clone, Debug)] -pub struct FastSha256d { - // Midstate after processing the first 64 bytes of the header (chunk 0) - state0: [u32; 8], - // Second block for the first SHA256 (contains merkle tail, time, bits, nonce, padding, length) - // We mutate only the time (bytes 4..8) and nonce (bytes 12..16) per attempt. - block1: GenericArray, - // Reusable buffer for the second SHA256 block. Bytes 32 and 56..64 are constant; we only - // overwrite the first 32 bytes with the first digest each attempt. - second_block: GenericArray, -} - -impl FastSha256d { - pub fn from_header_static(h: &Header) -> Self { - // Use consensus serialization to get correct 80-byte header (proper endianness). - let header_ser = btc_serialize(h); - debug_assert_eq!(header_ser.len(), 80, "Serialized header must be 80 bytes"); - let mut header_bytes = [0u8; 80]; - header_bytes.copy_from_slice(&header_ser); - - // First SHA256 pass: split into two 64-byte chunks - let chunk0 = &header_bytes[0..64]; - let chunk1_last16 = &header_bytes[64..80]; // 16 bytes: merkle_tail(4), time(4), bits(4), nonce(4) - - // Compute midstate after chunk0 using compress256 on an initial state - let mut state0 = sha256_initial_state(); - let mut block = [0u8; 64]; - block.copy_from_slice(chunk0); - let ga0 = GenericArray::::clone_from_slice(&block); - compress256(&mut state0, std::slice::from_ref(&ga0)); - - // Prepare block1 template (64 bytes) which will be: - // bytes 0..16: last 16 bytes of header (time, bits, nonce) - // bytes 16: 0x80 padding - // bytes 17..56: zeros - // bytes 56..64: length in bits of the message (80 bytes -> 640 bits) in big-endian - let mut block1 = GenericArray::::default(); - block1[0..16].copy_from_slice(chunk1_last16); - block1[16] = 0x80; - block1[56..64].copy_from_slice(&640u64.to_be_bytes()); - - // Prepare reusable second block: set constants once - let mut second_block = GenericArray::::default(); - second_block[32] = 0x80; - // 33..56 are already zero via default - second_block[56..64].copy_from_slice(&256u64.to_be_bytes()); - - Self { - state0, - block1, - second_block, - } - } - - // Hashes header where only time and nonce vary, returns double-SHA256 as [u8;32] (little-endian - // like rust-bitcoin output) - pub fn hash_with_nonce_time(&mut self, nonce: u32, time: u32) -> [u8; 32] { - // First SHA256 second chunk: update time and nonce at offsets 68..72 and 76..80 within - // 80-byte header In our block1_template (offset 0..16 == 64..80 of header): - // time at 0..4, bits at 4..8, nonce at 12..16 - // Update time and nonce in place - self.block1[4..8].copy_from_slice(&time.to_le_bytes()); - self.block1[12..16].copy_from_slice(&nonce.to_le_bytes()); - - // Compute first SHA256 digest using midstate + block1 - let mut state1 = self.state0; - compress256(&mut state1, std::slice::from_ref(&self.block1)); - - // Now perform the second SHA256 over the 32-byte first digest Build 64-byte block: - // [digest(32)] + [0x80] + [zeros] + [length=256 bits] - // state1 words -> big-endian bytes per SHA-256 spec (fill first 32 bytes) - for (i, word) in state1.iter().enumerate() { - self.second_block[i * 4..i * 4 + 4].copy_from_slice(&word.to_be_bytes()); - } - - let mut state2 = sha256_initial_state(); - compress256(&mut state2, std::slice::from_ref(&self.second_block)); - - // Convert state2 words to bytes (big-endian), then reverse for Bitcoin-style - // little-endian - let mut out = [0u8; 32]; - for (i, word) in state2.iter().enumerate() { - out[i * 4..i * 4 + 4].copy_from_slice(&word.to_be_bytes()); - } - out - } -} - -fn sha256_initial_state() -> [u32; 8] { - [ - 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, - 0x5be0cd19, - ] -} - -enum NextShareOutcome { - ValidShare, - InvalidShare, - NoTarget, - NoHeader, -} - -impl NextShareOutcome { - pub fn is_valid(&self) -> bool { - matches!(self, NextShareOutcome::ValidShare) - } -} - -#[inline] -fn hash_meets_target_le(hash: &[u8; 32], tgt_le: &[u8; 32]) -> bool { - // Compare from most significant u32 word (index 7) to least (index 0) - let mut is_below = false; - let mut is_equal = true; - for i in (0..8).rev() { - let off = i * 4; - let hw = u32::from_le_bytes([hash[off], hash[off + 1], hash[off + 2], hash[off + 3]]); - let tw = u32::from_le_bytes([ - tgt_le[off], - tgt_le[off + 1], - tgt_le[off + 2], - tgt_le[off + 3], - ]); - match hw.cmp(&tw) { - core::cmp::Ordering::Less => { - is_below = true; - is_equal = false; - break; - } - core::cmp::Ordering::Greater => { - is_below = false; - is_equal = false; - break; - } - core::cmp::Ordering::Equal => {} - } - } - is_below || is_equal -} - -// Format MH/s with thousands separators and 2 decimal places using en locale separators -fn format_mhs(val_mhs: f64) -> String { - let rounded = val_mhs.round() as i64; - rounded.to_formatted_string(&Locale::en) -} - -// returns hashrate by running all worker threads in parallel for the given duration -fn measure_hashrate(duration_secs: u64, handicap: u32) -> f64 { - use std::sync::Barrier; - - // Prepare a random header template to hash - let mut rng = thread_rng(); - let prev_hash: [u8; 32] = generate_random_32_byte_array().to_vec().try_into().unwrap(); - let prev_hash = Hash::from_byte_array(prev_hash); - let merkle_root: [u8; 32] = generate_random_32_byte_array().to_vec().try_into().unwrap(); - let merkle_root = Hash::from_byte_array(merkle_root); - let header_template = Header { - version: Version::from_consensus(rng.gen()), - prev_blockhash: BlockHash::from_raw_hash(prev_hash), - merkle_root, - time: std::time::SystemTime::now() - .duration_since(std::time::SystemTime::UNIX_EPOCH - std::time::Duration::from_secs(60)) - .unwrap() - .as_secs() as u32, - bits: CompactTarget::from_consensus(rng.gen()), - nonce: 0, - }; - - let duration = Duration::from_secs(duration_secs); - let p = worker_count() as usize; - let barrier = Arc::new(Barrier::new(p + 1)); // +1 for coordinator - - let mut handles = Vec::with_capacity(p); - // Log a single consolidated target-setting message for the probe - info!("Set target to {}", "0".repeat(64)); - for _ in 0..p { - let barrier = barrier.clone(); - // Each thread gets its own miner and header copy - let mut miner = Miner::new(handicap); - // Set target to zero (silently) so we never trigger share submits; we're only counting - // hashes - miner.new_target_silent(vec![0_u8; 32]); - miner.header = Some(header_template); - if let Some(h) = miner.header.as_ref() { - miner.fast_hasher = Some(FastSha256d::from_header_static(h)); - } - handles.push(std::thread::spawn(move || { - // Synchronize start across threads - barrier.wait(); - let start = Instant::now(); - let mut hashes: u64 = 0; - while start.elapsed() < duration { - miner.next_share(); - hashes += 1; - } - hashes - })); - } - - // Release all workers simultaneously - barrier.wait(); - let mut total_hashes: u64 = 0; - for h in handles { - total_hashes += h.join().unwrap_or(0); - } - // Each thread ran for approximately `duration`, so total hashes per second is total/duration - (total_hashes as f64) / (duration_secs as f64) -} -fn generate_random_32_byte_array() -> [u8; 32] { - let mut rng = thread_rng(); - let mut arr = [0u8; 32]; - rng.fill(&mut arr[..]); - arr -} - -fn start_mining_threads( - have_new_job: Receiver<()>, - miner: Arc>, - share_send: Sender<(u32, u32, u32, u32)>, -) { - tokio::task::spawn(async move { - let mut killers: Vec> = vec![]; - loop { - // Determine number of workers based on override or auto (N-1) - let p = worker_count(); - let unit = u32::MAX / p; - while have_new_job.recv().await.is_ok() { - while let Some(killer) = killers.pop() { - killer.store(true, Ordering::Relaxed); - } - let miner = miner.safe_lock(|m| m.clone()).unwrap(); - for i in 0..p { - let mut miner = miner.clone(); - let share_send = share_send.clone(); - let killer = Arc::new(AtomicBool::new(false)); - miner.header.as_mut().map(|h| h.nonce = i * unit); - killers.push(killer.clone()); - std::thread::spawn(move || { - mine(miner, share_send, killer); - }); - } - } - } - }); -} - -fn mine(mut miner: Miner, share_send: Sender<(u32, u32, u32, u32)>, kill: Arc) { - if miner.handicap != 0 { - loop { - if kill.load(Ordering::Relaxed) { - break; - } - std::thread::sleep(std::time::Duration::from_micros(miner.handicap.into())); - // Prefer fast path with micro-batching when possible - let can_fast = - miner.fast_hasher.is_some() && miner.target.is_some() && miner.header.is_some(); - if can_fast { - let header = miner.header.as_mut().unwrap(); - let time = header.time; - let start = header.nonce; - let tgt_le = miner.target.unwrap().to_little_endian(); - let fast = miner.fast_hasher.as_mut().unwrap(); - let mut found = None; - let batch = nonces_per_call(); - for i in 0..batch { - let nonce = start.wrapping_add(i); - let hash = fast.hash_with_nonce_time(nonce, time); - if hash_meets_target_le(&hash, &tgt_le) { - found = Some((nonce, hash)); - break; - } - } - if let Some((nonce, hash)) = found { - header.nonce = nonce; - info!( - "Found share with nonce: {}, for target: {:?}, with hash: {:?}", - header.nonce, miner.target, hash, - ); - let job_id = miner.job_id.unwrap(); - let version = miner.version; - share_send - .try_send((nonce, job_id, version.unwrap(), time)) - .unwrap(); - } - // Advance nonce window - header.nonce = start.wrapping_add(batch); - } else { - if miner.next_share().is_valid() { - let nonce = miner.header.unwrap().nonce; - let time = miner.header.unwrap().time; - let job_id = miner.job_id.unwrap(); - let version = miner.version; - share_send - .try_send((nonce, job_id, version.unwrap(), time)) - .unwrap(); - } - miner - .header - .as_mut() - .map(|h| h.nonce = h.nonce.wrapping_add(1)); - } - } - } else { - loop { - // Prefer fast path with micro-batching when possible - if kill.load(Ordering::Relaxed) { - break; - } - let can_fast = - miner.fast_hasher.is_some() && miner.target.is_some() && miner.header.is_some(); - if can_fast { - let header = miner.header.as_mut().unwrap(); - let time = header.time; - let start = header.nonce; - let tgt_le = miner.target.unwrap().to_little_endian(); - let fast = miner.fast_hasher.as_mut().unwrap(); - let mut found = None; - let batch = nonces_per_call(); - for i in 0..batch { - let nonce = start.wrapping_add(i); - let hash = fast.hash_with_nonce_time(nonce, time); - if hash_meets_target_le(&hash, &tgt_le) { - found = Some((nonce, hash)); - break; - } - } - if let Some((nonce, hash)) = found { - header.nonce = nonce; - info!( - "Found share with nonce: {}, for target: {:?}, with hash: {:?}", - header.nonce, miner.target, hash, - ); - let job_id = miner.job_id.unwrap(); - let version = miner.version; - share_send - .try_send((nonce, job_id, version.unwrap(), time)) - .unwrap(); - } - // Advance nonce window - header.nonce = start.wrapping_add(batch); - } else { - if miner.next_share().is_valid() { - if kill.load(Ordering::Relaxed) { - break; - } - let nonce = miner.header.unwrap().nonce; - let time = miner.header.unwrap().time; - let job_id = miner.job_id.unwrap(); - let version = miner.version; - share_send - .try_send((nonce, job_id, version.unwrap(), time)) - .unwrap(); - } - miner - .header - .as_mut() - .map(|h| h.nonce = h.nonce.wrapping_add(1)); - } - } - } -} diff --git a/roles/test-utils/mining-device/src/main.rs b/roles/test-utils/mining-device/src/main.rs deleted file mode 100644 index be70942157..0000000000 --- a/roles/test-utils/mining-device/src/main.rs +++ /dev/null @@ -1,89 +0,0 @@ -#![allow(special_module_name)] -#![allow(clippy::option_map_unit_fn)] -use stratum_apps::key_utils::Secp256k1PublicKey; - -use clap::Parser; -use tracing::info; - -#[derive(Parser, Debug)] -#[command(version, about, long_about = None)] -struct Args { - #[arg( - short, - long, - help = "Pool pub key, when left empty the pool certificate is not checked" - )] - pubkey_pool: Option, - #[arg( - short, - long, - help = "Sometimes used by the pool to identify the device" - )] - id_device: Option, - #[arg( - short, - long, - help = "Address of the pool in this format ip:port or domain:port" - )] - address_pool: String, - #[arg( - long, - help = "This value is used to slow down the cpu miner, it represents the number of micro-seconds that are awaited between hashes", - default_value = "0" - )] - handicap: u32, - #[arg( - long, - help = "User id, used when a new channel is opened, it can be used by the pool to identify the miner" - )] - id_user: Option, - #[arg( - long, - help = "This floating point number is used to modify the advertised nominal hashrate when opening a channel with the upstream.\ - \nIf 0.0 < nominal_hashrate_multiplier < 1.0, the CPU miner will advertise a nominal hashrate that is smaller than its real capacity.\ - \nIf nominal_hashrate_multiplier > 1.0, the CPU miner will advertise a nominal hashrate that is bigger than its real capacity.\ - \nIf empty, the CPU miner will simply advertise its real capacity." - )] - nominal_hashrate_multiplier: Option, - #[arg( - long, - help = "Number of nonces to try per mining loop iteration when fast hashing is available (micro-batching)", - default_value = "32" - )] - nonces_per_call: u32, - #[arg( - long, - help = "Number of worker threads to use for mining. Defaults to logical CPUs minus one (leaves one core free)." - )] - cores: Option, -} - -#[tokio::main(flavor = "current_thread")] -async fn main() { - let args = Args::parse(); - tracing_subscriber::fmt::init(); - info!("start"); - // Configure micro-batch size - mining_device::set_nonces_per_call(args.nonces_per_call); - // Optional override of worker threads - if let Some(n) = args.cores { - mining_device::set_cores(n); - } - // Log worker usage (after applying overrides) - let used = mining_device::effective_worker_count(); - let total = mining_device::total_logical_cpus(); - info!( - "Using {} worker threads out of {} logical CPUs", - used, total - ); - let _ = mining_device::connect( - args.address_pool, - args.pubkey_pool, - args.id_device, - args.id_user, - args.handicap, - args.nominal_hashrate_multiplier, - false, - ) - .await; -} diff --git a/roles/test-utils/mining-device/tests/fast_hasher_equivalence.rs b/roles/test-utils/mining-device/tests/fast_hasher_equivalence.rs deleted file mode 100644 index 01e5225cf3..0000000000 --- a/roles/test-utils/mining-device/tests/fast_hasher_equivalence.rs +++ /dev/null @@ -1,41 +0,0 @@ -use mining_device::FastSha256d; -use rand::{thread_rng, Rng}; -use stratum_apps::stratum_core::bitcoin::{ - block::Version, blockdata::block::Header, hash_types::BlockHash, hashes::Hash, CompactTarget, -}; - -fn random_header() -> Header { - let mut rng = thread_rng(); - let prev_hash: [u8; 32] = rng.gen(); - let prev_hash = Hash::from_byte_array(prev_hash); - let merkle_root: [u8; 32] = rng.gen(); - let merkle_root = Hash::from_byte_array(merkle_root); - Header { - version: Version::from_consensus(rng.gen::()), - prev_blockhash: BlockHash::from_raw_hash(prev_hash), - merkle_root, - time: std::time::SystemTime::now() - .duration_since(std::time::SystemTime::UNIX_EPOCH - std::time::Duration::from_secs(60)) - .unwrap() - .as_secs() as u32, - bits: CompactTarget::from_consensus(rng.gen()), - nonce: 0, - } -} - -#[test] -fn fast_hasher_matches_baseline() { - let mut h = random_header(); - let mut fast = FastSha256d::from_header_static(&h); - - for _ in 0..1000 { - // Advance nonce, occasionally tweak time - h.nonce = h.nonce.wrapping_add(1); - if h.nonce % 128 == 0 { - h.time = h.time.wrapping_add(1); - } - let fast_hash = fast.hash_with_nonce_time(h.nonce, h.time); - let baseline: [u8; 32] = *h.block_hash().to_raw_hash().as_ref(); - assert_eq!(fast_hash, baseline, "Fast hasher must match baseline"); - } -} diff --git a/roles/translator/Cargo.toml b/roles/translator/Cargo.toml deleted file mode 100644 index 529f8ceae1..0000000000 --- a/roles/translator/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -[package] -name = "translator_sv2" -version = "2.0.0" -authors = ["The Stratum V2 Developers"] -edition = "2021" -description = "SV1 to SV2 translation proxy" -documentation = "https://docs.rs/translator_sv2" -readme = "README.md" -homepage = "https://stratumprotocol.org" -repository = "https://github.com/stratum-mining/stratum" -license = "MIT OR Apache-2.0" -keywords = ["stratum", "mining", "bitcoin", "protocol", "translator", "proxy"] - -[lib] -name = "translator_sv2" -path = "src/lib/mod.rs" - -[[bin]] -name = "translator_sv2" -path = "src/main.rs" - -[dependencies] -stratum-apps = { path = "../stratum-apps", features = ["translator"] } -async-channel = "1.5.1" -serde = { version = "1.0.89", default-features = false, features = ["derive", "alloc"] } -serde_json = { version = "1.0.64", default-features = false, features = ["alloc"] } -tokio = { version = "1.44.1", features = ["full"] } -ext-config = { version = "0.14.0", features = ["toml"], package = "config" } -tracing = { version = "0.1" } -clap = { version = "4.5.39", features = ["derive"] } - - -[dev-dependencies] -sha2 = "0.10.6" - diff --git a/roles/translator/README.md b/roles/translator/README.md deleted file mode 100644 index ee14bf3aa1..0000000000 --- a/roles/translator/README.md +++ /dev/null @@ -1,204 +0,0 @@ -# SV1 to SV2 Translator Proxy - -A proxy that translates between Stratum V1 (SV1) and Stratum V2 (SV2) mining protocols. This translator enables SV1 mining devices to connect to SV2 pools and infrastructure, bridging the gap between legacy mining hardware and modern mining protocols. - -## Architecture Overview - -The translator sits between SV1 downstream roles (mining devices) and SV2 upstream roles (pool servers or proxies), providing seamless protocol translation and advanced features like channel aggregation and failover. - -``` -<--- Most Downstream ----------------------------------------- Most Upstream ---> - -+---------------------------------------------------+ +------------------------+ -| Mining Farm | | Remote Pool | -| | | | -| +-------------------+ +------------------+ | | +-----------------+ | -| | SV1 Mining Device | <-> | Translator Proxy | <------> | SV2 Pool Server | | -| +-------------------+ +------------------+ | | +-----------------+ | -| | | | -+---------------------------------------------------+ +------------------------+ -``` - -## Configuration - -### Configuration File Structure - -The translator uses TOML configuration files with the following structure: - -```toml -# Downstream SV1 Connection (where miners connect) -downstream_address = "0.0.0.0" -downstream_port = 34255 - -# Protocol Version Support -max_supported_version = 2 -min_supported_version = 2 - -# Extranonce Configuration -downstream_extranonce2_size = 4 # Min: 2, Max: 16 (CGminer max: 8) - -# User Identity (appended with counter for each miner) -user_identity = "your_username_here" - -# Channel Configuration -aggregate_channels = true # true: shared channel, false: individual channels - -# Downstream Difficulty Configuration -[downstream_difficulty_config] -min_individual_miner_hashrate = 10_000_000_000_000.0 # 10 TH/s -shares_per_minute = 6.0 -enable_vardiff = true # Set to false when using with Job Declarator Client (JDC) - -# Upstream SV2 Connections (supports multiple with failover) -[[upstreams]] -address = "127.0.0.1" -port = 34254 -authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" - -[[upstreams]] -address = "backup.pool.com" -port = 34254 -authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" -``` - -### Configuration Parameters - -#### **Downstream Configuration** -- `downstream_address`: IP address for SV1 miners to connect to -- `downstream_port`: Port for SV1 miners to connect to - -#### **Protocol Configuration** -- `max_supported_version`/`min_supported_version`: SV2 protocol version support -- `min_extranonce2_size`: Minimum extranonce2 size (affects mining efficiency) - -#### **Channel Configuration** -- `aggregate_channels`: - - `true`: All miners share one upstream extended channel (more efficient) - - `false`: Each miner gets its own upstream extended channel (more isolated) -- `user_identity`: Username for pool authentication (auto-suffixed per miner) - -#### **Difficulty Configuration** -- `min_individual_miner_hashrate`: Expected hashrate of weakest miner (in H/s) -- `shares_per_minute`: Target share submission rate -- `enable_vardiff`: Enable/disable variable difficulty adjustment (set to false when using with JDC) - - When `true`: Translator manages difficulty adjustments based on share submission rates - - When `false`: Upstream manages difficulty, translator forwards SetTarget messages to miners - -#### **Upstream Configuration** -- `address`/`port`: SV2 upstream server connection details -- `authority_pubkey`: Public key for SV2 connection authentication - -## Usage - -### Installation & Build - -```bash -# Clone the repository -git clone https://github.com/stratum-mining/stratum.git -cd stratum - -# Build the translator -cargo build --release -p translator_sv2 -``` - -### Running the Translator - -#### **With Local Pool** -```bash -cd roles/translator -cargo run -- -c config-examples/tproxy-config-local-pool-example.toml -``` - -#### **With Job Declaration Client** -```bash -cd roles/translator -cargo run -- -c config-examples/tproxy-config-local-jdc-example.toml -``` - -#### **With Hosted Pool** -```bash -cd roles/translator -cargo run -- -c config-examples/tproxy-config-hosted-pool-example.toml -``` - -### Command Line Options - -```bash -# Use specific config file -translator_sv2 -c /path/to/config.toml -translator_sv2 --config /path/to/config.toml - -# Show help -translator_sv2 -h -translator_sv2 --help -``` - -## Configuration Examples - -### Example 1: Local Pool Setup -For connecting to a local SV2 pool server: - -```toml -downstream_address = "0.0.0.0" -downstream_port = 34255 -user_identity = "miner_farm_1" -aggregate_channels = true - -[downstream_difficulty_config] -min_individual_miner_hashrate = 10_000_000_000_000.0 -shares_per_minute = 6.0 -enable_vardiff = true - -[[upstreams]] -address = "127.0.0.1" -port = 34254 -authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" -``` - -### Example 2: High-Availability Setup -For production environments with failover: - -```toml -downstream_address = "0.0.0.0" -downstream_port = 34255 -user_identity = "production_farm" -aggregate_channels = true - -[downstream_difficulty_config] -min_individual_miner_hashrate = 50_000_000_000_000.0 # 50 TH/s -shares_per_minute = 10.0 -enable_vardiff = true - -# Primary upstream -[[upstreams]] -address = "primary.pool.com" -port = 34254 -authority_pubkey = "primary_pool_pubkey" - -# Backup upstream -[[upstreams]] -address = "backup.pool.com" -port = 34254 -authority_pubkey = "backup_pool_pubkey" -``` - -## Architecture Details - -### **Component Overview** - -1. **SV1 Server**: Handles incoming SV1 connections from mining devices -2. **SV2 Upstream**: Manages connections to SV2 pool servers with failover -3. **Channel Manager**: Orchestrates message routing and protocol translation -4. **Task Manager**: Manages async task lifecycle and coordination -5. **Status System**: Provides real-time monitoring and health reporting - -### **Channel Modes** - -- **Aggregated Mode**: All miners share one extended channel - - More efficient for large farms - - Reduced upstream connection overhead - - Shared work distribution - -- **Non-Aggregated Mode**: Each miner gets individual upstream channel - - Better isolation between miners - - Individual difficulty adjustment by the upstream Pool \ No newline at end of file diff --git a/roles/translator/config-examples/tproxy-config-hosted-pool-example.toml b/roles/translator/config-examples/tproxy-config-hosted-pool-example.toml deleted file mode 100644 index 020c66a404..0000000000 --- a/roles/translator/config-examples/tproxy-config-hosted-pool-example.toml +++ /dev/null @@ -1,46 +0,0 @@ -# Local Mining Device Downstream Connection -downstream_address = "0.0.0.0" -downstream_port = 34255 - -# Version support -max_supported_version = 2 -min_supported_version = 2 - -# Extranonce2 size for downstream connections -# This controls the rollable part of the extranonce for downstream SV1 miners -# Max value for CGminer: 8 -# Min value: 2 -downstream_extranonce2_size = 4 - -# User identity/username for pool connection -# This will be appended with a counter for each mining client (e.g., username.miner1, username.miner2) -user_identity = "your_username_here" - -# Aggregate channels: if true, all miners share one upstream channel; if false, each miner gets its own channel -aggregate_channels = true - -# Enable this option to set a predefined log file path. -# When enabled, logs will always be written to this file. -# The CLI option --log-file (or -f) will override this setting if provided. -# log_file = "./tproxy.log" - -# Difficulty params -[downstream_difficulty_config] -# hashes/s of the weakest miner that will be connecting (e.g.: 10 Th/s = 10_000_000_000_000.0) -min_individual_miner_hashrate=10_000_000_000_000.0 -# target number of shares per minute the miner should be sending -shares_per_minute = 6.0 -# enable variable difficulty adjustment (true by default, set to false when using with JDC) -enable_vardiff = true - -[[upstreams]] -# SRI Pool Primary Pool -address = "75.119.150.111" -port = 34254 -authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" - -# Braiins Pool Backup Pool -[[upstreams]] -address = "107.170.42.64" -port = 3333 -authority_pubkey = "9awtMD5KQgvRUh2yFbjVeT7b6hjipWcAsQHd6wEhgtDT9soosna" \ No newline at end of file diff --git a/roles/translator/config-examples/tproxy-config-local-jdc-example.toml b/roles/translator/config-examples/tproxy-config-local-jdc-example.toml deleted file mode 100644 index 28645afa8d..0000000000 --- a/roles/translator/config-examples/tproxy-config-local-jdc-example.toml +++ /dev/null @@ -1,40 +0,0 @@ -# Local Mining Device Downstream Connection -downstream_address = "0.0.0.0" -downstream_port = 34255 - -# Version support -max_supported_version = 2 -min_supported_version = 2 - -# Extranonce2 size for downstream connections -# This controls the rollable part of the extranonce for downstream miners -# Max value for CGminer: 8 -# Min value: 2 -downstream_extranonce2_size = 4 - -# User identity/username for pool connection -# This will be appended with a counter for each mining client (e.g., username.miner1, username.miner2) -user_identity = "your_username_here" - -# Aggregate channels: if true, all miners share one upstream channel; if false, each miner gets its own channel -aggregate_channels = false - -# Enable this option to set a predefined log file path. -# When enabled, logs will always be written to this file. -# The CLI option --log-file (or -f) will override this setting if provided. -# log_file = "./tproxy.log" - -# Difficulty params -[downstream_difficulty_config] -# hashes/s of the weakest miner that will be connecting (e.g.: 10 Th/s = 10_000_000_000_000.0) -min_individual_miner_hashrate=10_000_000_000_000.0 -# target number of shares per minute the miner should be sending -shares_per_minute = 6.0 -# disable variable difficulty adjustment when using with JDC (JDC handles vardiff) -enable_vardiff = false - - -[[upstreams]] -address = "127.0.0.1" -port = 34265 -authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" \ No newline at end of file diff --git a/roles/translator/config-examples/tproxy-config-local-pool-example.toml b/roles/translator/config-examples/tproxy-config-local-pool-example.toml deleted file mode 100644 index 65ee173af8..0000000000 --- a/roles/translator/config-examples/tproxy-config-local-pool-example.toml +++ /dev/null @@ -1,39 +0,0 @@ -# Local Mining Device Downstream Connection -downstream_address = "0.0.0.0" -downstream_port = 34255 - -# Version support -max_supported_version = 2 -min_supported_version = 2 - -# Extranonce2 size for downstream connections -# This controls the rollable part of the extranonce for downstream miners -# Max value for CGminer: 8 -# Min value: 2 -downstream_extranonce2_size = 4 - -# User identity/username for pool connection -# This will be appended with a counter for each mining client (e.g., username.miner1, username.miner2) -user_identity = "your_username_here" - -# Aggregate channels: if true, all miners share one upstream channel; if false, each miner gets its own channel -aggregate_channels = true - -# Enable this option to set a predefined log file path. -# When enabled, logs will always be written to this file. -# The CLI option --log-file (or -f) will override this setting if provided. -# log_file = "./tproxy.log" - -# Difficulty params -[downstream_difficulty_config] -# hashes/s of the weakest miner that will be connecting (e.g.: 10 Th/s = 10_000_000_000_000.0) -min_individual_miner_hashrate=10_000_000_000_000.0 -# target number of shares per minute the miner should be sending -shares_per_minute = 6.0 -# enable variable difficulty adjustment (true by default, set to false when using with JDC) -enable_vardiff = true - -[[upstreams]] -address = "127.0.0.1" -port = 34254 -authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" \ No newline at end of file diff --git a/roles/translator/src/args.rs b/roles/translator/src/args.rs deleted file mode 100644 index e43746ccaa..0000000000 --- a/roles/translator/src/args.rs +++ /dev/null @@ -1,52 +0,0 @@ -//! Defines the structure and parsing logic for command-line arguments. -//! -//! It provides the `Args` struct to hold parsed arguments, -//! and the `from_args` function to parse them from the command line. -use clap::Parser; -use ext_config::{Config, File, FileFormat}; -use std::path::PathBuf; -use tracing::error; -use translator_sv2::{config::TranslatorConfig, error::TproxyError}; - -/// Holds the parsed CLI arguments. -#[derive(Parser, Debug)] -#[command(author, version, about = "Translator Proxy", long_about = None)] -pub struct Args { - #[arg( - short = 'c', - long = "config", - help = "Path to the TOML configuration file", - default_value = "proxy-config.toml" - )] - pub config_path: PathBuf, - #[arg( - short = 'f', - long = "log-file", - help = "Path to the log file. If not set, logs will only be written to stdout." - )] - pub log_file: Option, -} - -/// Process CLI args, if any. -#[allow(clippy::result_large_err)] -pub fn process_cli_args() -> Result { - // Parse CLI arguments - let args = Args::parse(); - - // Build configuration from the provided file path - let config_path = args.config_path.to_str().ok_or_else(|| { - error!("Invalid configuration path."); - TproxyError::BadCliArgs - })?; - - let settings = Config::builder() - .add_source(File::new(config_path, FileFormat::Toml)) - .build()?; - - // Deserialize settings into TranslatorConfig - let mut config = settings.try_deserialize::()?; - - config.set_log_dir(args.log_file); - - Ok(config) -} diff --git a/roles/translator/src/lib/config.rs b/roles/translator/src/lib/config.rs deleted file mode 100644 index 8094147f93..0000000000 --- a/roles/translator/src/lib/config.rs +++ /dev/null @@ -1,268 +0,0 @@ -//! ## Translator Configuration Module -//! -//! Defines [`TranslatorConfig`], the primary configuration structure for the Translator. -//! -//! This module provides the necessary structures to configure the Translator, -//! managing connections and settings for both upstream and downstream interfaces. -//! -//! This module handles: -//! - Upstream server address, port, and authentication key ([`UpstreamConfig`]) -//! - Downstream interface address and port ([`DownstreamConfig`]) -//! - Supported protocol versions -//! - Downstream difficulty adjustment parameters ([`DownstreamDifficultyConfig`]) -use std::path::{Path, PathBuf}; - -use serde::Deserialize; -use stratum_apps::key_utils::Secp256k1PublicKey; - -/// Configuration for the Translator. -#[derive(Debug, Deserialize, Clone)] -pub struct TranslatorConfig { - pub upstreams: Vec, - /// The address for the downstream interface. - pub downstream_address: String, - /// The port for the downstream interface. - pub downstream_port: u16, - /// The maximum supported protocol version for communication. - pub max_supported_version: u16, - /// The minimum supported protocol version for communication. - pub min_supported_version: u16, - /// The size of the extranonce2 field for downstream mining connections. - pub downstream_extranonce2_size: u16, - /// The user identity/username to use when connecting to the pool. - /// This will be appended with a counter for each mining channel (e.g., username.miner1, - /// username.miner2). - pub user_identity: String, - /// Configuration settings for managing difficulty on the downstream connection. - pub downstream_difficulty_config: DownstreamDifficultyConfig, - /// Whether to aggregate all downstream connections into a single upstream channel. - /// If true, all miners share one channel. If false, each miner gets its own channel. - pub aggregate_channels: bool, - /// The path to the log file for the Translator. - log_file: Option, -} - -#[derive(Debug, Deserialize, Clone)] -pub struct Upstream { - /// The address of the upstream server. - pub address: String, - /// The port of the upstream server. - pub port: u16, - /// The Secp256k1 public key used to authenticate the upstream authority. - pub authority_pubkey: Secp256k1PublicKey, -} - -impl Upstream { - /// Creates a new `UpstreamConfig` instance. - pub fn new(address: String, port: u16, authority_pubkey: Secp256k1PublicKey) -> Self { - Self { - address, - port, - authority_pubkey, - } - } -} - -impl TranslatorConfig { - /// Creates a new `TranslatorConfig` instance with the specified upstream and downstream - /// configurations and version constraints. - #[allow(clippy::too_many_arguments)] - pub fn new( - upstreams: Vec, - downstream_address: String, - downstream_port: u16, - downstream_difficulty_config: DownstreamDifficultyConfig, - max_supported_version: u16, - min_supported_version: u16, - downstream_extranonce2_size: u16, - user_identity: String, - aggregate_channels: bool, - ) -> Self { - Self { - upstreams, - downstream_address, - downstream_port, - max_supported_version, - min_supported_version, - downstream_extranonce2_size, - user_identity, - downstream_difficulty_config, - aggregate_channels, - log_file: None, - } - } - - pub fn set_log_dir(&mut self, log_dir: Option) { - if let Some(dir) = log_dir { - self.log_file = Some(dir); - } - } - pub fn log_dir(&self) -> Option<&Path> { - self.log_file.as_deref() - } -} - -/// Configuration settings for managing difficulty adjustments on the downstream connection. -#[derive(Debug, Deserialize, Clone)] -pub struct DownstreamDifficultyConfig { - /// The minimum hashrate expected from an individual miner on the downstream connection. - pub min_individual_miner_hashrate: f32, - /// The target number of shares per minute for difficulty adjustment. - pub shares_per_minute: f32, - /// Whether to enable variable difficulty adjustment mechanism. - /// If false, difficulty will be managed by upstream (useful with JDC). - pub enable_vardiff: bool, -} - -impl DownstreamDifficultyConfig { - /// Creates a new `DownstreamDifficultyConfig` instance. - pub fn new( - min_individual_miner_hashrate: f32, - shares_per_minute: f32, - enable_vardiff: bool, - ) -> Self { - Self { - min_individual_miner_hashrate, - shares_per_minute, - enable_vardiff, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use std::str::FromStr; - - fn create_test_upstream() -> Upstream { - // Use a valid base58-encoded public key from the key-utils test cases - let pubkey_str = "9bDuixKmZqAJnrmP746n8zU1wyAQRrus7th9dxnkPg6RzQvCnan"; - let pubkey = Secp256k1PublicKey::from_str(pubkey_str).unwrap(); - Upstream::new("127.0.0.1".to_string(), 4444, pubkey) - } - - fn create_test_difficulty_config() -> DownstreamDifficultyConfig { - DownstreamDifficultyConfig::new(100.0, 5.0, true) - } - - #[test] - fn test_upstream_creation() { - let upstream = create_test_upstream(); - assert_eq!(upstream.address, "127.0.0.1"); - assert_eq!(upstream.port, 4444); - } - - #[test] - fn test_downstream_difficulty_config_creation() { - let config = create_test_difficulty_config(); - assert_eq!(config.min_individual_miner_hashrate, 100.0); - assert_eq!(config.shares_per_minute, 5.0); - assert!(config.enable_vardiff); - } - - #[test] - fn test_translator_config_creation() { - let upstreams = vec![create_test_upstream()]; - let difficulty_config = create_test_difficulty_config(); - - let config = TranslatorConfig::new( - upstreams, - "0.0.0.0".to_string(), - 3333, - difficulty_config, - 2, - 1, - 4, - "test_user".to_string(), - true, - ); - - assert_eq!(config.upstreams.len(), 1); - assert_eq!(config.downstream_address, "0.0.0.0"); - assert_eq!(config.downstream_port, 3333); - assert_eq!(config.max_supported_version, 2); - assert_eq!(config.min_supported_version, 1); - assert_eq!(config.downstream_extranonce2_size, 4); - assert_eq!(config.user_identity, "test_user"); - assert!(config.aggregate_channels); - assert!(config.log_file.is_none()); - } - - #[test] - fn test_translator_config_log_dir() { - let upstreams = vec![create_test_upstream()]; - let difficulty_config = create_test_difficulty_config(); - - let mut config = TranslatorConfig::new( - upstreams, - "0.0.0.0".to_string(), - 3333, - difficulty_config, - 2, - 1, - 4, - "test_user".to_string(), - false, - ); - - assert!(config.log_dir().is_none()); - - let log_path = PathBuf::from("/tmp/logs"); - config.set_log_dir(Some(log_path.clone())); - assert_eq!(config.log_dir(), Some(log_path.as_path())); - - config.set_log_dir(None); - assert_eq!(config.log_dir(), Some(log_path.as_path())); // Should remain unchanged - } - - #[test] - fn test_multiple_upstreams() { - let upstream1 = create_test_upstream(); - let mut upstream2 = create_test_upstream(); - upstream2.address = "192.168.1.1".to_string(); - upstream2.port = 5555; - - let upstreams = vec![upstream1, upstream2]; - let difficulty_config = create_test_difficulty_config(); - - let config = TranslatorConfig::new( - upstreams, - "0.0.0.0".to_string(), - 3333, - difficulty_config, - 2, - 1, - 4, - "test_user".to_string(), - true, - ); - - assert_eq!(config.upstreams.len(), 2); - assert_eq!(config.upstreams[0].address, "127.0.0.1"); - assert_eq!(config.upstreams[0].port, 4444); - assert_eq!(config.upstreams[1].address, "192.168.1.1"); - assert_eq!(config.upstreams[1].port, 5555); - } - - #[test] - fn test_vardiff_disabled_config() { - let mut difficulty_config = create_test_difficulty_config(); - difficulty_config.enable_vardiff = false; - - let upstreams = vec![create_test_upstream()]; - let config = TranslatorConfig::new( - upstreams, - "0.0.0.0".to_string(), - 3333, - difficulty_config, - 2, - 1, - 4, - "test_user".to_string(), - false, - ); - - assert!(!config.downstream_difficulty_config.enable_vardiff); - assert!(!config.aggregate_channels); - } -} diff --git a/roles/translator/src/lib/error.rs b/roles/translator/src/lib/error.rs deleted file mode 100644 index 655e2d9626..0000000000 --- a/roles/translator/src/lib/error.rs +++ /dev/null @@ -1,213 +0,0 @@ -//! ## Translator Error Module -//! -//! Defines the custom error types used throughout the translator proxy. -//! -//! This module centralizes error handling by providing: -//! - A primary `Error` enum encompassing various error kinds from different sources (I/O, parsing, -//! protocol logic, channels, configuration, etc.). -//! - A specific `ChannelSendError` enum for errors occurring during message sending over -//! asynchronous channels. - -use ext_config::ConfigError; -use std::{fmt, sync::PoisonError}; -use stratum_apps::stratum_core::{ - binary_sv2, framing_sv2, handlers_sv2::HandlerErrorType, noise_sv2, - parsers_sv2::ParserError as RolesParserError, sv1_api::server_to_client::SetDifficulty, -}; -use tokio::sync::broadcast; - -#[derive(Debug)] -pub enum TproxyError { - /// Generic SV1 protocol error - SV1Error, - /// Error from the network helpers library - NetworkHelpersError(stratum_apps::network_helpers::Error), - /// Error from roles logic parser library - ParserError(RolesParserError), - /// Errors on bad CLI argument input. - BadCliArgs, - /// Errors on bad `serde_json` serialize/deserialize. - BadSerdeJson(serde_json::Error), - /// Errors on bad `config` TOML deserialize. - BadConfigDeserialize(ConfigError), - /// Errors from `binary_sv2` crate. - BinarySv2(binary_sv2::Error), - /// Errors on bad noise handshake. - CodecNoise(noise_sv2::Error), - /// Errors from `framing_sv2` crate. - FramingSv2(framing_sv2::Error), - /// Errors on bad `TcpStream` connection. - Io(std::io::Error), - /// Errors on bad `String` to `int` conversion. - ParseInt(std::num::ParseIntError), - /// Mutex poison lock error - PoisonLock, - /// Channel receiver error - ChannelErrorReceiver(async_channel::RecvError), - /// Channel sender error - ChannelErrorSender, - /// Broadcast channel receiver error - BroadcastChannelErrorReceiver(broadcast::error::RecvError), - /// Tokio channel receiver error - TokioChannelErrorRecv(tokio::sync::broadcast::error::RecvError), - /// Error converting SetDifficulty to Message - SetDifficultyToMessage(SetDifficulty), - /// Received an unexpected message type - UnexpectedMessage(u8), - /// Job not found during share validation - JobNotFound, - /// Invalid merkle root during share validation - InvalidMerkleRoot, - /// Shutdown signal received - Shutdown, - /// Pending channel not found for the given request ID - PendingChannelNotFound(u32), - /// Represents a generic channel send failure, described by a string. - General(String), - /// Error bubbling up from translator-core library - TranslatorCore(stratum_apps::stratum_core::stratum_translation::error::StratumTranslationError), -} - -impl std::error::Error for TproxyError {} - -impl fmt::Display for TproxyError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use TproxyError::*; - match self { - General(e) => write!(f, "{e}"), - BadCliArgs => write!(f, "Bad CLI arg input"), - BadSerdeJson(ref e) => write!(f, "Bad serde json: `{e:?}`"), - BadConfigDeserialize(ref e) => write!(f, "Bad `config` TOML deserialize: `{e:?}`"), - BinarySv2(ref e) => write!(f, "Binary SV2 error: `{e:?}`"), - CodecNoise(ref e) => write!(f, "Noise error: `{e:?}"), - FramingSv2(ref e) => write!(f, "Framing SV2 error: `{e:?}`"), - Io(ref e) => write!(f, "I/O error: `{e:?}"), - ParseInt(ref e) => write!(f, "Bad convert from `String` to `int`: `{e:?}`"), - PoisonLock => write!(f, "Poison Lock error"), - ChannelErrorReceiver(ref e) => write!(f, "Channel receive error: `{e:?}`"), - BroadcastChannelErrorReceiver(ref e) => { - write!(f, "Broadcast channel receive error: {e:?}") - } - ChannelErrorSender => write!(f, "Sender error"), - TokioChannelErrorRecv(ref e) => write!(f, "Channel receive error: `{e:?}`"), - SetDifficultyToMessage(ref e) => { - write!(f, "Error converting SetDifficulty to Message: `{e:?}`") - } - UnexpectedMessage(message_type) => { - write!( - f, - "Received a message type that was not expected: {message_type}" - ) - } - JobNotFound => write!(f, "Job not found during share validation"), - InvalidMerkleRoot => write!(f, "Invalid merkle root during share validation"), - Shutdown => write!(f, "Shutdown signal"), - PendingChannelNotFound(request_id) => { - write!(f, "No pending channel found for request_id: {}", request_id) - } - SV1Error => write!(f, "Sv1 error"), - TranslatorCore(ref e) => write!(f, "Translator core error: {e:?}"), - NetworkHelpersError(ref e) => write!(f, "Network helpers error: {e:?}"), - ParserError(ref e) => write!(f, "Roles logic parser error: {e:?}"), - } - } -} - -impl From for TproxyError { - fn from(e: binary_sv2::Error) -> Self { - TproxyError::BinarySv2(e) - } -} - -impl From for TproxyError { - fn from(e: noise_sv2::Error) -> Self { - TproxyError::CodecNoise(e) - } -} - -impl From for TproxyError { - fn from(e: framing_sv2::Error) -> Self { - TproxyError::FramingSv2(e) - } -} - -impl From for TproxyError { - fn from(e: std::io::Error) -> Self { - TproxyError::Io(e) - } -} - -impl From for TproxyError { - fn from(e: std::num::ParseIntError) -> Self { - TproxyError::ParseInt(e) - } -} - -impl From for TproxyError { - fn from(e: serde_json::Error) -> Self { - TproxyError::BadSerdeJson(e) - } -} - -impl From for TproxyError { - fn from(e: ConfigError) -> Self { - TproxyError::BadConfigDeserialize(e) - } -} - -impl From for TproxyError { - fn from(e: async_channel::RecvError) -> Self { - TproxyError::ChannelErrorReceiver(e) - } -} - -impl From for TproxyError { - fn from(e: tokio::sync::broadcast::error::RecvError) -> Self { - TproxyError::TokioChannelErrorRecv(e) - } -} - -//*** LOCK ERRORS *** -impl From> for TproxyError { - fn from(_e: PoisonError) -> Self { - TproxyError::PoisonLock - } -} - -impl From for TproxyError { - fn from(e: SetDifficulty) -> Self { - TproxyError::SetDifficultyToMessage(e) - } -} - -impl<'a> From> for TproxyError { - fn from(_: stratum_apps::stratum_core::sv1_api::error::Error<'a>) -> Self { - TproxyError::SV1Error - } -} - -impl From for TproxyError { - fn from(value: stratum_apps::network_helpers::Error) -> Self { - TproxyError::NetworkHelpersError(value) - } -} - -impl From - for TproxyError -{ - fn from( - e: stratum_apps::stratum_core::stratum_translation::error::StratumTranslationError, - ) -> Self { - TproxyError::TranslatorCore(e) - } -} - -impl HandlerErrorType for TproxyError { - fn parse_error(error: RolesParserError) -> Self { - TproxyError::ParserError(error) - } - - fn unexpected_message(message_type: u8) -> Self { - TproxyError::UnexpectedMessage(message_type) - } -} diff --git a/roles/translator/src/lib/mod.rs b/roles/translator/src/lib/mod.rs deleted file mode 100644 index 298d12a0c1..0000000000 --- a/roles/translator/src/lib/mod.rs +++ /dev/null @@ -1,255 +0,0 @@ -//! ## Translator Sv2 -//! -//! Provides the core logic and main struct (`TranslatorSv2`) for running a -//! Stratum V1 to Stratum V2 translation proxy. -//! -//! This module orchestrates the interaction between downstream SV1 miners and upstream SV2 -//! applications (proxies or pool servers). -//! -//! The central component is the `TranslatorSv2` struct, which encapsulates the state and -//! provides the `start` method as the main entry point for running the translator service. -//! It relies on several sub-modules (`config`, `downstream_sv1`, `upstream_sv2`, `proxy`, `status`, -//! etc.) for specialized functionalities. -#![allow(clippy::module_inception)] -use async_channel::unbounded; -use std::{net::SocketAddr, sync::Arc}; -use tokio::sync::mpsc; -use tracing::{debug, error, info, warn}; - -pub use stratum_apps::stratum_core::sv1_api::server_to_client; - -use config::TranslatorConfig; - -use crate::{ - status::{State, Status}, - sv1::sv1_server::sv1_server::Sv1Server, - sv2::{channel_manager::ChannelMode, ChannelManager, Upstream}, - task_manager::TaskManager, - utils::ShutdownMessage, -}; - -pub mod config; -pub mod error; -pub mod status; -pub mod sv1; -pub mod sv2; -mod task_manager; -pub mod utils; - -/// The main struct that manages the SV1/SV2 translator. -#[derive(Clone, Debug)] -pub struct TranslatorSv2 { - config: TranslatorConfig, -} - -impl TranslatorSv2 { - /// Creates a new `TranslatorSv2`. - /// - /// Initializes the translator with the given configuration and sets up - /// the reconnect wait time. - pub fn new(config: TranslatorConfig) -> Self { - Self { config } - } - - /// Starts the translator. - /// - /// This method starts the main event loop, which handles connections, - /// protocol translation, job management, and status reporting. - pub async fn start(self) { - info!("Starting Translator Proxy..."); - - let (notify_shutdown, _) = tokio::sync::broadcast::channel::(1); - let (shutdown_complete_tx, mut shutdown_complete_rx) = mpsc::channel::<()>(1); - let task_manager = Arc::new(TaskManager::new()); - - let (status_sender, status_receiver) = async_channel::unbounded::(); - - let (channel_manager_to_upstream_sender, channel_manager_to_upstream_receiver) = - unbounded(); - let (upstream_to_channel_manager_sender, upstream_to_channel_manager_receiver) = - unbounded(); - let (channel_manager_to_sv1_server_sender, channel_manager_to_sv1_server_receiver) = - unbounded(); - let (sv1_server_to_channel_manager_sender, sv1_server_to_channel_manager_receiver) = - unbounded(); - - debug!("Channels initialized."); - - let upstream_addresses = self - .config - .upstreams - .iter() - .map(|upstream| { - let upstream_addr = - SocketAddr::new(upstream.address.parse().unwrap(), upstream.port); - (upstream_addr, upstream.authority_pubkey) - }) - .collect::>(); - - let upstream = match Upstream::new( - &upstream_addresses, - upstream_to_channel_manager_sender.clone(), - channel_manager_to_upstream_receiver.clone(), - notify_shutdown.clone(), - shutdown_complete_tx.clone(), - ) - .await - { - Ok(upstream) => { - debug!("Upstream initialized successfully."); - upstream - } - Err(e) => { - error!("Failed to initialize upstream connection: {e:?}"); - return; - } - }; - - let channel_manager = Arc::new(ChannelManager::new( - channel_manager_to_upstream_sender, - upstream_to_channel_manager_receiver, - channel_manager_to_sv1_server_sender.clone(), - sv1_server_to_channel_manager_receiver, - if self.config.aggregate_channels { - ChannelMode::Aggregated - } else { - ChannelMode::NonAggregated - }, - )); - - let downstream_addr = SocketAddr::new( - self.config.downstream_address.parse().unwrap(), - self.config.downstream_port, - ); - - let sv1_server = Arc::new(Sv1Server::new( - downstream_addr, - channel_manager_to_sv1_server_receiver, - sv1_server_to_channel_manager_sender, - self.config.clone(), - )); - - ChannelManager::run_channel_manager_tasks( - channel_manager.clone(), - notify_shutdown.clone(), - shutdown_complete_tx.clone(), - status_sender.clone(), - task_manager.clone(), - ) - .await; - - if let Err(e) = upstream - .start( - notify_shutdown.clone(), - shutdown_complete_tx.clone(), - status_sender.clone(), - task_manager.clone(), - ) - .await - { - error!("Failed to start upstream listener: {e:?}"); - return; - } - - let notify_shutdown_clone = notify_shutdown.clone(); - let shutdown_complete_tx_clone = shutdown_complete_tx.clone(); - let status_sender_clone = status_sender.clone(); - let task_manager_clone = task_manager.clone(); - task_manager.spawn(async move { - loop { - tokio::select! { - _ = tokio::signal::ctrl_c() => { - info!("Ctrl+C received β€” initiating graceful shutdown..."); - let _ = notify_shutdown_clone.send(ShutdownMessage::ShutdownAll); - break; - } - message = status_receiver.recv() => { - if let Ok(status) = message { - match status.state { - State::DownstreamShutdown{downstream_id,..} => { - warn!("Downstream {downstream_id:?} disconnected β€” notifying SV1 server."); - let _ = notify_shutdown_clone.send(ShutdownMessage::DownstreamShutdown(downstream_id)); - } - State::Sv1ServerShutdown(_) => { - warn!("SV1 Server shutdown requested β€” initiating full shutdown."); - let _ = notify_shutdown_clone.send(ShutdownMessage::ShutdownAll); - break; - } - State::ChannelManagerShutdown(_) => { - warn!("Channel Manager shutdown requested β€” initiating full shutdown."); - let _ = notify_shutdown_clone.send(ShutdownMessage::ShutdownAll); - break; - } - State::UpstreamShutdown(msg) => { - warn!("Upstream connection dropped: {msg:?} β€” attempting reconnection..."); - - match Upstream::new( - &upstream_addresses, - upstream_to_channel_manager_sender.clone(), - channel_manager_to_upstream_receiver.clone(), - notify_shutdown_clone.clone(), - shutdown_complete_tx_clone.clone(), - ).await { - Ok(upstream) => { - if let Err(e) = upstream - .start( - notify_shutdown_clone.clone(), - shutdown_complete_tx_clone.clone(), - status_sender_clone.clone(), - task_manager_clone.clone() - ) - .await - { - error!("Restarted upstream failed to start: {e:?}"); - let _ = notify_shutdown_clone.send(ShutdownMessage::ShutdownAll); - break; - } else { - info!("Upstream restarted successfully."); - // Reset channel manager state and shutdown downstreams in one message - let _ = notify_shutdown_clone.send(ShutdownMessage::UpstreamReconnectedResetAndShutdownDownstreams); - } - } - Err(e) => { - error!("Failed to reinitialize upstream after disconnect: {e:?}"); - let _ = notify_shutdown_clone.send(ShutdownMessage::ShutdownAll); - break; - } - } - } - } - } - } - } - } - }); - - if let Err(e) = Sv1Server::start( - sv1_server, - notify_shutdown.clone(), - shutdown_complete_tx.clone(), - status_sender.clone(), - task_manager.clone(), - ) - .await - { - error!("SV1 server startup failed: {e:?}"); - notify_shutdown.send(ShutdownMessage::ShutdownAll).unwrap(); - } - - drop(shutdown_complete_tx); - info!("Waiting for shutdown completion signals from subsystems..."); - let shutdown_timeout = tokio::time::Duration::from_secs(5); - tokio::select! { - _ = shutdown_complete_rx.recv() => { - info!("All subsystems reported shutdown complete."); - } - _ = tokio::time::sleep(shutdown_timeout) => { - warn!("Graceful shutdown timed out after {shutdown_timeout:?} β€” forcing shutdown."); - task_manager.abort_all().await; - } - } - info!("Joining remaining tasks..."); - task_manager.join_all().await; - info!("TranslatorSv2 shutdown complete."); - } -} diff --git a/roles/translator/src/lib/status.rs b/roles/translator/src/lib/status.rs deleted file mode 100644 index 896cff9a93..0000000000 --- a/roles/translator/src/lib/status.rs +++ /dev/null @@ -1,117 +0,0 @@ -//! ## Status Reporting System -//! -//! This module provides a centralized way for components of the Translator to report -//! health updates, shutdown reasons, or fatal errors to the main runtime loop. -//! -//! Each task wraps its report in a [`Status`] and sends it over an async channel, -//! tagged with a [`Sender`] variant that identifies the source subsystem. - -use tracing::{debug, error, warn}; - -use crate::error::TproxyError; - -/// Identifies the component that originated a [`Status`] update. -/// -/// Each variant contains a channel to the main coordinator, and optionally a component ID -/// (e.g. a downstream connection ID). -#[derive(Debug, Clone)] -pub enum StatusSender { - /// A specific downstream connection. - Downstream { - downstream_id: u32, - tx: async_channel::Sender, - }, - /// The SV1 server listener. - Sv1Server(async_channel::Sender), - /// The SV2 <-> SV1 bridge manager. - ChannelManager(async_channel::Sender), - /// The upstream SV2 connection handler. - Upstream(async_channel::Sender), -} - -impl StatusSender { - /// Sends a [`Status`] update. - pub async fn send(&self, status: Status) -> Result<(), async_channel::SendError> { - match self { - Self::Downstream { downstream_id, tx } => { - debug!( - "Sending status from Downstream [{}]: {:?}", - downstream_id, status.state - ); - tx.send(status).await - } - Self::Sv1Server(tx) => { - debug!("Sending status from Sv1Server: {:?}", status.state); - tx.send(status).await - } - Self::ChannelManager(tx) => { - debug!("Sending status from ChannelManager: {:?}", status.state); - tx.send(status).await - } - Self::Upstream(tx) => { - debug!("Sending status from Upstream: {:?}", status.state); - tx.send(status).await - } - } - } -} - -/// The type of event or error being reported by a component. -#[derive(Debug)] -pub enum State { - /// Downstream task exited or encountered an unrecoverable error. - DownstreamShutdown { - downstream_id: u32, - reason: TproxyError, - }, - /// SV1 server listener exited unexpectedly. - Sv1ServerShutdown(TproxyError), - /// Channel manager shut down (SV2 bridge manager). - ChannelManagerShutdown(TproxyError), - /// Upstream SV2 connection closed or failed. - UpstreamShutdown(TproxyError), -} - -/// A message reporting the current [`State`] of a component. -#[derive(Debug)] -pub struct Status { - pub state: State, -} - -/// Constructs and sends a [`Status`] update based on the [`Sender`] and error context. -async fn send_status(sender: &StatusSender, error: TproxyError) { - let state = match sender { - StatusSender::Downstream { downstream_id, .. } => { - warn!("Downstream [{downstream_id}] shutting down due to error: {error:?}"); - State::DownstreamShutdown { - downstream_id: *downstream_id, - reason: error, - } - } - StatusSender::Sv1Server(_) => { - warn!("Sv1Server shutting down due to error: {error:?}"); - State::Sv1ServerShutdown(error) - } - StatusSender::ChannelManager(_) => { - warn!("ChannelManager shutting down due to error: {error:?}"); - State::ChannelManagerShutdown(error) - } - StatusSender::Upstream(_) => { - warn!("Upstream shutting down due to error: {error:?}"); - State::UpstreamShutdown(error) - } - }; - - if let Err(e) = sender.send(Status { state }).await { - error!("Failed to send status update from {sender:?}: {e:?}"); - } -} - -/// Centralized error dispatcher for the Translator. -/// -/// Used by the `handle_result!` macro across the codebase. -/// Decides whether the task should `Continue` or `Break` based on the error type and source. -pub async fn handle_error(sender: &StatusSender, e: TproxyError) { - error!("Error in {:?}: {:?}", sender, e); - send_status(sender, e).await; -} diff --git a/roles/translator/src/lib/sv1/downstream/channel.rs b/roles/translator/src/lib/sv1/downstream/channel.rs deleted file mode 100644 index cc53d49024..0000000000 --- a/roles/translator/src/lib/sv1/downstream/channel.rs +++ /dev/null @@ -1,35 +0,0 @@ -use super::DownstreamMessages; -use async_channel::{Receiver, Sender}; -use stratum_apps::stratum_core::sv1_api::json_rpc; -use tokio::sync::broadcast; -use tracing::debug; - -#[derive(Debug)] -pub struct DownstreamChannelState { - pub downstream_sv1_sender: Sender, - pub downstream_sv1_receiver: Receiver, - pub sv1_server_sender: Sender, - pub sv1_server_receiver: broadcast::Receiver<(u32, Option, json_rpc::Message)>, /* channel_id, optional downstream_id, message */ -} - -impl DownstreamChannelState { - pub fn new( - downstream_sv1_sender: Sender, - downstream_sv1_receiver: Receiver, - sv1_server_sender: Sender, - sv1_server_receiver: broadcast::Receiver<(u32, Option, json_rpc::Message)>, - ) -> Self { - Self { - downstream_sv1_receiver, - downstream_sv1_sender, - sv1_server_receiver, - sv1_server_sender, - } - } - - pub fn drop(&self) { - debug!("Dropping downstream channel state"); - self.downstream_sv1_receiver.close(); - self.downstream_sv1_sender.close(); - } -} diff --git a/roles/translator/src/lib/sv1/downstream/data.rs b/roles/translator/src/lib/sv1/downstream/data.rs deleted file mode 100644 index 4dae61e83a..0000000000 --- a/roles/translator/src/lib/sv1/downstream/data.rs +++ /dev/null @@ -1,98 +0,0 @@ -use std::{ - cell::RefCell, - sync::{atomic::AtomicBool, Arc}, -}; -use stratum_apps::{ - custom_mutex::Mutex, - stratum_core::{ - bitcoin::Target, - sv1_api::{json_rpc, utils::HexU32Be}, - }, -}; -use tracing::debug; - -use super::SubmitShareWithChannelId; -use crate::sv1::sv1_server::data::Sv1ServerData; - -#[derive(Debug)] -pub struct DownstreamData { - pub channel_id: Option, - pub downstream_id: u32, - pub extranonce1: Vec, - pub extranonce2_len: usize, - pub version_rolling_mask: Option, - pub version_rolling_min_bit: Option, - pub last_job_version_field: Option, - pub authorized_worker_name: String, - pub user_identity: String, - pub target: Target, - pub hashrate: Option, - pub cached_set_difficulty: Option, - pub cached_notify: Option, - pub pending_target: Option, - pub pending_hashrate: Option, - // Flag to track if SV1 handshake is complete (subscribe + authorize) - pub sv1_handshake_complete: AtomicBool, - // Queue of Sv1 handshake messages received while waiting for SV2 channel to open - pub queued_sv1_handshake_messages: Vec, - // Flag to indicate we're processing queued Sv1 handshake message responses - pub processing_queued_sv1_handshake_responses: AtomicBool, - // Stores pending shares to be sent to the sv1_server - pub pending_share: RefCell>, - // Reference to shared sv1_server data for accessing valid_jobs during downstream sv1 - // validation - pub sv1_server_data: Arc>, - // Tracks the upstream target for this downstream, used for vardiff target comparison - pub upstream_target: Option, -} - -impl DownstreamData { - pub fn new( - downstream_id: u32, - target: Target, - hashrate: Option, - sv1_server_data: Arc>, - ) -> Self { - DownstreamData { - channel_id: None, - downstream_id, - extranonce1: vec![0; 8], - extranonce2_len: 4, - version_rolling_mask: None, - version_rolling_min_bit: None, - last_job_version_field: None, - authorized_worker_name: String::new(), - user_identity: String::new(), - target, - hashrate, - cached_set_difficulty: None, - cached_notify: None, - pending_target: None, - pending_hashrate: None, - sv1_handshake_complete: AtomicBool::new(false), - queued_sv1_handshake_messages: Vec::new(), - processing_queued_sv1_handshake_responses: AtomicBool::new(false), - pending_share: RefCell::new(None), - sv1_server_data, - upstream_target: None, - } - } - - pub fn set_pending_target(&mut self, new_target: Target) { - self.pending_target = Some(new_target); - debug!("Downstream {}: Set pending target", self.downstream_id); - } - - pub fn set_pending_hashrate(&mut self, new_hashrate: Option) { - self.pending_hashrate = new_hashrate; - debug!("Downstream {}: Set pending hashrate", self.downstream_id); - } - - pub fn set_upstream_target(&mut self, upstream_target: Target) { - self.upstream_target = Some(upstream_target); - debug!( - "Downstream {}: Set upstream target to {:?}", - self.downstream_id, upstream_target - ); - } -} diff --git a/roles/translator/src/lib/sv1/downstream/downstream.rs b/roles/translator/src/lib/sv1/downstream/downstream.rs deleted file mode 100644 index 4e8923e743..0000000000 --- a/roles/translator/src/lib/sv1/downstream/downstream.rs +++ /dev/null @@ -1,531 +0,0 @@ -use super::DownstreamMessages; -use crate::{ - error::TproxyError, - status::{handle_error, StatusSender}, - sv1::{ - downstream::{channel::DownstreamChannelState, data::DownstreamData}, - sv1_server::data::Sv1ServerData, - }, - task_manager::TaskManager, - utils::ShutdownMessage, -}; -use async_channel::{Receiver, Sender}; -use std::sync::Arc; -use stratum_apps::{ - custom_mutex::Mutex, - stratum_core::{ - bitcoin::Target, - sv1_api::{ - json_rpc::{self, Message}, - server_to_client, IsServer, - }, - }, -}; -use tokio::sync::{broadcast, mpsc}; -use tracing::{debug, error, info, warn}; - -/// Represents a downstream SV1 miner connection. -/// -/// This struct manages the state and communication for a single SV1 miner connected -/// to the translator. It handles: -/// - SV1 protocol message processing (subscribe, authorize, submit) -/// - Bidirectional message routing between miner and SV1 server -/// - Mining job tracking and share validation -/// - Difficulty adjustment coordination -/// - Connection lifecycle management -/// -/// Each downstream connection runs in its own async task that processes messages -/// from both the miner and the server, ensuring proper message ordering and -/// handling connection-specific state. -#[derive(Debug)] -pub struct Downstream { - pub downstream_data: Arc>, - downstream_channel_state: DownstreamChannelState, -} - -impl Downstream { - /// Creates a new downstream connection instance. - #[allow(clippy::too_many_arguments)] - pub fn new( - downstream_id: u32, - downstream_sv1_sender: Sender, - downstream_sv1_receiver: Receiver, - sv1_server_sender: Sender, - sv1_server_receiver: broadcast::Receiver<(u32, Option, json_rpc::Message)>, - target: Target, - hashrate: Option, - sv1_server_data: Arc>, - ) -> Self { - let downstream_data = Arc::new(Mutex::new(DownstreamData::new( - downstream_id, - target, - hashrate, - sv1_server_data, - ))); - let downstream_channel_state = DownstreamChannelState::new( - downstream_sv1_sender, - downstream_sv1_receiver, - sv1_server_sender, - sv1_server_receiver, - ); - Self { - downstream_data, - downstream_channel_state, - } - } - - /// Spawns and runs the main task loop for this downstream connection. - /// - /// This method creates an async task that handles all communication for this - /// downstream connection. The task runs a select loop that processes: - /// - Shutdown signals (global, targeted, or all-downstream) - /// - Messages from the miner (subscribe, authorize, submit) - /// - Messages from the SV1 server (notify, set_difficulty, etc.) - /// - /// The task will continue running until a shutdown signal is received or - /// an unrecoverable error occurs. It ensures graceful cleanup of resources - /// and proper error reporting. - pub fn run_downstream_tasks( - self: Arc, - notify_shutdown: broadcast::Sender, - shutdown_complete_tx: mpsc::Sender<()>, - status_sender: StatusSender, - task_manager: Arc, - ) { - let mut sv1_server_receiver = self - .downstream_channel_state - .sv1_server_receiver - .resubscribe(); - let mut shutdown_rx = notify_shutdown.subscribe(); - let downstream_id = self.downstream_data.super_safe_lock(|d| d.downstream_id); - task_manager.spawn(async move { - loop { - tokio::select! { - msg = shutdown_rx.recv() => { - match msg { - Ok(ShutdownMessage::ShutdownAll) => { - info!("Downstream {downstream_id}: received global shutdown"); - break; - } - Ok(ShutdownMessage::DownstreamShutdown(id)) if id == downstream_id => { - info!("Downstream {downstream_id}: received targeted shutdown"); - break; - } - Ok(ShutdownMessage::DownstreamShutdownAll) => { - info!("All downstream shutdown message received"); - break; - } - Ok(ShutdownMessage::UpstreamReconnectedResetAndShutdownDownstreams) => { - info!("All downstream shutdown message received (upstream reconnected)"); - break; - } - Ok(_) => { - // shutdown for other downstream - } - Err(e) => { - warn!("Downstream {downstream_id}: shutdown channel closed: {e}"); - break; - } - } - } - - // Handle downstream -> server message - res = Self::handle_downstream_message(self.clone()) => { - if let Err(e) = res { - error!("Downstream {downstream_id}: error in downstream message handler: {e:?}"); - handle_error(&status_sender, e).await; - break; - } - } - - // Handle server -> downstream message - res = Self::handle_sv1_server_message(self.clone(),&mut sv1_server_receiver) => { - if let Err(e) = res { - error!("Downstream {downstream_id}: error in server message handler: {e:?}"); - handle_error(&status_sender, e).await; - break; - } - } - - else => { - warn!("Downstream {downstream_id}: all channels closed; exiting task"); - break; - } - } - } - - warn!("Downstream {downstream_id}: unified task shutting down"); - self.downstream_channel_state.drop(); - drop(shutdown_complete_tx); - }); - } - - /// Handles messages received from the SV1 server. - /// - /// This method processes messages broadcast from the SV1 server to downstream - /// connections. Since `mining.notify` messages are guaranteed to never arrive - /// before their corresponding `mining.set_difficulty` message, the logic is - /// simplified to handle only handshake completion timing. - /// - /// Key behaviors: - /// - Filters messages by channel ID and downstream ID - /// - For `mining.set_difficulty`: Always caches the message (never sent immediately) - /// - For `mining.notify`: Sends any pending set_difficulty first, then forwards the notify - /// - For other messages: Forwards directly to the miner - /// - Caches both `mining.set_difficulty` and `mining.notify` messages if handshake is not yet - /// complete - /// - On handshake completion: sends cached messages in correct order (set_difficulty first, - /// then notify) - pub async fn handle_sv1_server_message( - self: Arc, - sv1_server_receiver: &mut broadcast::Receiver<(u32, Option, json_rpc::Message)>, - ) -> Result<(), TproxyError> { - match sv1_server_receiver.recv().await { - Ok((channel_id, downstream_id, message)) => { - let (my_channel_id, my_downstream_id, handshake_complete) = - self.downstream_data.super_safe_lock(|d| { - ( - d.channel_id, - d.downstream_id, - d.sv1_handshake_complete - .load(std::sync::atomic::Ordering::SeqCst), - ) - }); - let id_matches = (my_channel_id == Some(channel_id) || channel_id == 0) - && (downstream_id.is_none() || downstream_id == Some(my_downstream_id)); - if !id_matches { - return Ok(()); // Message not intended for this downstream - } - - // Check if this is a queued message response - let is_queued_sv1_handshake_response = self.downstream_data.super_safe_lock(|d| { - d.processing_queued_sv1_handshake_responses - .load(std::sync::atomic::Ordering::SeqCst) - }); - - // Handle messages based on message type and handshake state - if let Message::Notification(notification) = &message { - // For notifications (mining.set_difficulty, mining.notify), only send if - // handshake is complete - if handshake_complete { - match notification.method.as_str() { - "mining.set_difficulty" => { - // Cache the Sv1 set_difficulty message to be sent before the next - // notify - debug!("Down: Caching mining.set_difficulty to send before next mining.notify"); - self.downstream_data.super_safe_lock(|d| { - d.cached_set_difficulty = Some(message); - }); - return Ok(()); - } - "mining.notify" => { - let (pending_set_difficulty, notify_opt) = - self.downstream_data.super_safe_lock(|d| { - let cached_set_difficulty = d.cached_set_difficulty.take(); - - // Prepare the notify message and update state - let notify_result = server_to_client::Notify::try_from( - notification.clone(), - ); - if let Ok(mut notify) = notify_result { - if cached_set_difficulty.is_some() { - notify.clean_jobs = true; - } - d.last_job_version_field = Some(notify.version.0); - - // Update target and hashrate if we're sending - // set_difficulty - if cached_set_difficulty.is_some() { - if let Some(new_target) = d.pending_target.take() { - d.target = new_target; - } - if let Some(new_hashrate) = - d.pending_hashrate.take() - { - d.hashrate = Some(new_hashrate); - } - } - - (cached_set_difficulty, Some(notify)) - } else { - (cached_set_difficulty, None) - } - }); - - if let Some(set_difficulty_msg) = &pending_set_difficulty { - debug!("Down: Sending pending mining.set_difficulty before mining.notify"); - self.downstream_channel_state - .downstream_sv1_sender - .send(set_difficulty_msg.clone()) - .await - .map_err(|e| { - error!( - "Down: Failed to send mining.set_difficulty to downstream: {:?}", - e - ); - TproxyError::ChannelErrorSender - })?; - } - - if let Some(notify) = notify_opt { - debug!("Down: Sending mining.notify"); - self.downstream_channel_state - .downstream_sv1_sender - .send(notify.into()) - .await - .map_err(|e| { - error!("Down: Failed to send mining.notify to downstream: {:?}", e); - TproxyError::ChannelErrorSender - })?; - } - return Ok(()); - } - _ => { - // Other notifications - forward if handshake complete - self.downstream_channel_state - .downstream_sv1_sender - .send(message.clone()) - .await - .map_err(|e| { - error!( - "Down: Failed to send notification to downstream: {:?}", - e - ); - TproxyError::ChannelErrorSender - })?; - } - } - } else { - // Handshake not complete - cache mining notifications but skip others - match notification.method.as_str() { - "mining.set_difficulty" => { - debug!("Down: SV1 handshake not complete, caching mining.set_difficulty"); - self.downstream_data.super_safe_lock(|d| { - d.cached_set_difficulty = Some(message); - }); - } - "mining.notify" => { - debug!("Down: SV1 handshake not complete, caching mining.notify"); - self.downstream_data.super_safe_lock(|d| { - d.cached_notify = Some(message.clone()); - let notify = - server_to_client::Notify::try_from(notification.clone()) - .expect("this must be a mining.notify"); - d.last_job_version_field = Some(notify.version.0); - }); - } - _ => { - debug!( - "Down: SV1 handshake not complete, skipping other notification" - ); - } - } - } - } else if is_queued_sv1_handshake_response { - // For non-notification messages, send if processing queued handshake responses - self.downstream_channel_state - .downstream_sv1_sender - .send(message.clone()) - .await - .map_err(|e| { - error!("Down: Failed to send queued message to downstream: {:?}", e); - TproxyError::ChannelErrorSender - })?; - } else { - // Neither handshake complete nor queued response - skip non-notification - // messages - debug!("Down: SV1 handshake not complete, skipping non-notification message"); - } - } - Err(e) => { - let downstream_id = self.downstream_data.super_safe_lock(|d| d.downstream_id); - error!( - "Sv1 message handler error for downstream {}: {:?}", - downstream_id, e - ); - return Err(TproxyError::BroadcastChannelErrorReceiver(e)); - } - } - - Ok(()) - } - - /// Handles messages received from the downstream SV1 miner. - /// - /// This method processes SV1 protocol messages sent by the miner, including: - /// - `mining.subscribe` - Subscription requests - /// - `mining.authorize` - Authorization requests - /// - `mining.submit` - Share submissions - /// - Other SV1 protocol messages - /// - /// The method delegates message processing to the downstream data handler, - /// which implements the SV1 protocol logic and generates appropriate responses. - /// Responses are sent back to the miner, while share submissions are forwarded - /// to the SV1 server for upstream processing. - pub async fn handle_downstream_message(self: Arc) -> Result<(), TproxyError> { - let message = match self - .downstream_channel_state - .downstream_sv1_receiver - .recv() - .await - { - Ok(msg) => msg, - Err(e) => { - error!("Error receiving downstream message: {:?}", e); - return Err(TproxyError::ChannelErrorReceiver(e)); - } - }; - - // Check if channel is established - let channel_established = self - .downstream_data - .super_safe_lock(|d| d.channel_id.is_some()); - - if !channel_established { - // Check if this is the first message (queue is empty) and send OpenChannel request - let is_first_message = self - .downstream_data - .super_safe_lock(|d| d.queued_sv1_handshake_messages.is_empty()); - - if is_first_message { - let downstream_id = self.downstream_data.super_safe_lock(|d| d.downstream_id); - self.downstream_channel_state - .sv1_server_sender - .send(DownstreamMessages::OpenChannel(downstream_id)) - .await - .map_err(|e| { - error!("Down: Failed to send OpenChannel request: {:?}", e); - TproxyError::ChannelErrorSender - })?; - debug!( - "Down: Sent OpenChannel request for downstream {}", - downstream_id - ); - } - - // Queue all messages until channel is established - debug!("Down: Queuing Sv1 message until channel is established"); - self.downstream_data.safe_lock(|d| { - d.queued_sv1_handshake_messages.push(message.clone()); - })?; - return Ok(()); - } - - // Channel is established, process message normally - let response = self - .downstream_data - .super_safe_lock(|data| data.handle_message(message.clone())); - - match response { - Ok(Some(response_msg)) => { - debug!( - "Down: Sending Sv1 message to downstream: {:?}", - response_msg - ); - self.downstream_channel_state - .downstream_sv1_sender - .send(response_msg.into()) - .await - .map_err(|e| { - error!("Down: Failed to send message to downstream: {:?}", e); - TproxyError::ChannelErrorSender - })?; - - // Check if this was an authorize message and handle sv1 handshake completion - if let stratum_apps::stratum_core::sv1_api::json_rpc::Message::StandardRequest( - request, - ) = &message - { - if request.method == "mining.authorize" { - info!("Down: Handling mining.authorize after handshake completion"); - if let Err(e) = self.handle_sv1_handshake_completion().await { - error!("Down: Failed to handle handshake completion: {:?}", e); - return Err(e); - } - } - } - } - Ok(None) => { - // Message was handled but no response needed - } - Err(e) => { - error!("Down: Error handling downstream message: {:?}", e); - return Err(e.into()); - } - } - - // Check if there's a pending share to send to the Sv1Server - let pending_share = self - .downstream_data - .super_safe_lock(|d| d.pending_share.take()); - if let Some(share) = pending_share { - self.downstream_channel_state - .sv1_server_sender - .send(DownstreamMessages::SubmitShares(share)) - .await - .map_err(|e| { - error!("Down: Failed to send share to SV1 server: {:?}", e); - TproxyError::ChannelErrorSender - })?; - } - - Ok(()) - } - - /// Handles SV1 handshake completion after mining.authorize. - /// - /// This method is called when the downstream completes the SV1 handshake - /// (subscribe + authorize). It sends any cached messages in the correct order: - /// set_difficulty first, then notify. - async fn handle_sv1_handshake_completion(self: &Arc) -> Result<(), TproxyError> { - let (cached_set_difficulty, cached_notify) = self.downstream_data.super_safe_lock(|d| { - d.sv1_handshake_complete - .store(true, std::sync::atomic::Ordering::SeqCst); - (d.cached_set_difficulty.take(), d.cached_notify.take()) - }); - debug!("Down: SV1 handshake completed for downstream"); - - // Send cached messages in correct order: set_difficulty first, then notify - if let Some(set_difficulty_msg) = cached_set_difficulty { - debug!("Down: Sending cached mining.set_difficulty after handshake completion"); - self.downstream_channel_state - .downstream_sv1_sender - .send(set_difficulty_msg) - .await - .map_err(|e| { - error!( - "Down: Failed to send cached mining.set_difficulty to downstream: {:?}", - e - ); - TproxyError::ChannelErrorSender - })?; - - // Update target and hashrate after sending set_difficulty - self.downstream_data.super_safe_lock(|d| { - if let Some(new_target) = d.pending_target.take() { - d.target = new_target; - } - if let Some(new_hashrate) = d.pending_hashrate.take() { - d.hashrate = Some(new_hashrate); - } - }); - } - - if let Some(notify_msg) = cached_notify { - debug!("Down: Sending cached mining.notify after handshake completion"); - self.downstream_channel_state - .downstream_sv1_sender - .send(notify_msg) - .await - .map_err(|e| { - error!( - "Down: Failed to send cached mining.notify to downstream: {:?}", - e - ); - TproxyError::ChannelErrorSender - })?; - } - - Ok(()) - } -} diff --git a/roles/translator/src/lib/sv1/downstream/message_handler.rs b/roles/translator/src/lib/sv1/downstream/message_handler.rs deleted file mode 100644 index 6ab75c619e..0000000000 --- a/roles/translator/src/lib/sv1/downstream/message_handler.rs +++ /dev/null @@ -1,165 +0,0 @@ -use stratum_apps::stratum_core::sv1_api::{ - client_to_server, json_rpc, server_to_client, - utils::{Extranonce, HexU32Be}, - IsServer, -}; -use tracing::{debug, error, info, warn}; - -use crate::{ - sv1::downstream::{data::DownstreamData, SubmitShareWithChannelId}, - utils::validate_sv1_share, -}; - -// Implements `IsServer` for `Downstream` to handle the Sv1 messages. -impl IsServer<'static> for DownstreamData { - fn handle_configure( - &mut self, - request: &client_to_server::Configure, - ) -> (Option, Option) { - info!("Received mining.configure from Sv1 downstream"); - debug!("Down: Handling mining.configure: {:?}", request); - self.version_rolling_mask = request - .version_rolling_mask() - .map(|mask| HexU32Be(mask & 0x1FFFE000)); - self.version_rolling_min_bit = request.version_rolling_min_bit_count(); - - debug!( - "Negotiated version_rolling_mask is {:?}", - self.version_rolling_mask - ); - ( - Some(server_to_client::VersionRollingParams::new( - self.version_rolling_mask.clone().unwrap_or(HexU32Be(0)), - self.version_rolling_min_bit.clone().unwrap_or(HexU32Be(0)), - ).expect("Version mask invalid, automatic version mask selection not supported, please change it in crate::downstream::mod.rs")), - Some(false), - ) - } - - fn handle_subscribe(&self, request: &client_to_server::Subscribe) -> Vec<(String, String)> { - info!("Received mining.subscribe from Sv1 downstream"); - debug!("Down: Handling mining.subscribe: {:?}", request); - - let set_difficulty_sub = ( - "mining.set_difficulty".to_string(), - self.downstream_id.to_string(), - ); - - let notify_sub = ( - "mining.notify".to_string(), - "ae6812eb4cd7735a302a8a9dd95cf71f".to_string(), - ); - - vec![set_difficulty_sub, notify_sub] - } - - fn handle_authorize(&self, request: &client_to_server::Authorize) -> bool { - info!("Received mining.authorize from Sv1 downstream"); - debug!("Down: Handling mining.authorize: {:?}", request); - true - } - - fn handle_submit(&self, request: &client_to_server::Submit<'static>) -> bool { - if let Some(channel_id) = self.channel_id { - info!( - "Received mining.submit from SV1 downstream for channel id: {}", - channel_id - ); - let is_valid_share = validate_sv1_share( - request, - self.target, - self.extranonce1.clone(), - self.version_rolling_mask.clone(), - self.sv1_server_data.clone(), - channel_id, - ) - .unwrap_or(false); - if !is_valid_share { - error!("Invalid share for channel id: {}", channel_id); - return false; - } - let to_send: SubmitShareWithChannelId = SubmitShareWithChannelId { - channel_id, - downstream_id: self.downstream_id, - share: request.clone(), - extranonce: self.extranonce1.clone(), - extranonce2_len: self.extranonce2_len, - version_rolling_mask: self.version_rolling_mask.clone(), - job_version: self.last_job_version_field, - }; - // Store the share to be sent to the Sv1Server - self.pending_share.replace(Some(to_send)); - true - } else { - error!("Cannot submit share: channel_id is None (waiting for OpenExtendedMiningChannelSuccess)"); - false - } - } - - /// Indicates to the server that the client supports the mining.set_extranonce method. - fn handle_extranonce_subscribe(&self) {} - - /// Checks if a Downstream role is authorized. - fn is_authorized(&self, name: &str) -> bool { - self.authorized_worker_name == *name - } - - /// Authorizes a Downstream role. - fn authorize(&mut self, name: &str) { - let name: String = name.into(); - if !self.is_authorized(&name) { - self.authorized_worker_name = name.to_string(); - } - } - - /// Sets the `extranonce1` field sent in the SV1 `mining.notify` message to the value specified - /// by the SV2 `OpenExtendedMiningChannelSuccess` message sent from the Upstream role. - fn set_extranonce1( - &mut self, - _extranonce1: Option>, - ) -> Extranonce<'static> { - self.extranonce1.clone().try_into().unwrap() - } - - /// Returns the `Downstream`'s `extranonce1` value. - fn extranonce1(&self) -> Extranonce<'static> { - self.extranonce1.clone().try_into().unwrap() - } - - /// Sets the `extranonce2_size` field sent in the SV1 `mining.notify` message to the value - /// specified by the SV2 `OpenExtendedMiningChannelSuccess` message sent from the Upstream role. - fn set_extranonce2_size(&mut self, _extra_nonce2_size: Option) -> usize { - self.extranonce2_len - } - - /// Returns the `Downstream`'s `extranonce2_size` value. - fn extranonce2_size(&self) -> usize { - self.extranonce2_len - } - - /// Returns the version rolling mask. - fn version_rolling_mask(&self) -> Option { - self.version_rolling_mask.clone() - } - - /// Sets the version rolling mask. - fn set_version_rolling_mask(&mut self, mask: Option) { - self.version_rolling_mask = mask; - } - - /// Sets the minimum version rolling bit. - fn set_version_rolling_min_bit(&mut self, mask: Option) { - self.version_rolling_min_bit = mask - } - - fn notify( - &'_ mut self, - ) -> Result> { - warn!("notify() called on DownstreamData - this method is not implemented for the translator proxy"); - Err( - stratum_apps::stratum_core::sv1_api::error::Error::UnexpectedMessage( - "notify".to_string(), - ), - ) - } -} diff --git a/roles/translator/src/lib/sv1/downstream/mod.rs b/roles/translator/src/lib/sv1/downstream/mod.rs deleted file mode 100644 index 9cd602e24c..0000000000 --- a/roles/translator/src/lib/sv1/downstream/mod.rs +++ /dev/null @@ -1,43 +0,0 @@ -pub(super) mod channel; -pub(super) mod data; -pub mod downstream; -mod message_handler; - -use stratum_apps::stratum_core::sv1_api::{client_to_server::Submit, utils::HexU32Be}; - -/// Messages sent from downstream handling logic to the SV1 server. -/// -/// This enum defines the types of messages that downstream connections can send -/// to the central SV1 server for processing and forwarding to upstream. -#[derive(Debug)] -pub enum DownstreamMessages { - /// Represents a submitted share from a downstream miner, - /// wrapped with the relevant channel ID. - SubmitShares(SubmitShareWithChannelId), - /// Request to open an extended mining channel for a downstream that just sent its first - /// message. - OpenChannel(u32), // downstream_id -} - -/// A wrapper around a `mining.submit` message with additional channel information. -/// -/// This struct contains all the necessary information to process a share submission -/// from an SV1 miner, including the share data itself and metadata needed for -/// proper routing and validation. -#[derive(Debug, Clone)] -pub struct SubmitShareWithChannelId { - /// The SV2 channel ID this share belongs to - pub channel_id: u32, - /// The downstream connection ID that submitted this share - pub downstream_id: u32, - /// The actual SV1 share submission data - pub share: Submit<'static>, - /// The complete extranonce used for this share - pub extranonce: Vec, - /// The length of the extranonce2 field - pub extranonce2_len: usize, - /// Optional version rolling mask for the share - pub version_rolling_mask: Option, - /// The version field from the job, used for validation - pub job_version: Option, -} diff --git a/roles/translator/src/lib/sv1/mod.rs b/roles/translator/src/lib/sv1/mod.rs deleted file mode 100644 index 0b62d78494..0000000000 --- a/roles/translator/src/lib/sv1/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -//! ## Downstream SV1 Module -//! -//! This module defines the structures, messages, and utility functions -//! used for handling the downstream connection with SV1 mining clients. -//! -//! It includes definitions for messages exchanged with a Bridge component, -//! structures for submitting shares and updating targets, and constants -//! and functions for managing client interactions. -//! -//! The module is organized into the following sub-modules: -//! - [`diff_management`]: (Declared here, likely contains downstream difficulty logic) -//! - [`downstream`]: Defines the core [`Downstream`] struct and its functionalities. - -pub mod downstream; -pub mod sv1_server; -pub use sv1_server::sv1_server::Sv1Server; diff --git a/roles/translator/src/lib/sv1/sv1_server/channel.rs b/roles/translator/src/lib/sv1/sv1_server/channel.rs deleted file mode 100644 index 1a306337d6..0000000000 --- a/roles/translator/src/lib/sv1/sv1_server/channel.rs +++ /dev/null @@ -1,41 +0,0 @@ -use crate::sv1::downstream::DownstreamMessages; -use async_channel::{unbounded, Receiver, Sender}; -use stratum_apps::stratum_core::parsers_sv2::Mining; - -use stratum_apps::stratum_core::sv1_api::json_rpc; -use tokio::sync::broadcast; - -pub struct Sv1ServerChannelState { - pub sv1_server_to_downstream_sender: broadcast::Sender<(u32, Option, json_rpc::Message)>, - pub downstream_to_sv1_server_sender: Sender, - pub downstream_to_sv1_server_receiver: Receiver, - pub channel_manager_receiver: Receiver>, - pub channel_manager_sender: Sender>, -} - -impl Sv1ServerChannelState { - pub fn new( - channel_manager_receiver: Receiver>, - channel_manager_sender: Sender>, - ) -> Self { - let (sv1_server_to_downstream_sender, _) = broadcast::channel(100); - let (downstream_to_sv1_server_sender, downstream_to_sv1_server_receiver) = unbounded(); - - Self { - sv1_server_to_downstream_sender, - downstream_to_sv1_server_receiver, - downstream_to_sv1_server_sender, - channel_manager_receiver, - channel_manager_sender, - } - } - - pub fn drop(&self) { - self.channel_manager_receiver.close(); - self.channel_manager_sender.close(); - self.downstream_to_sv1_server_receiver.close(); - self.downstream_to_sv1_server_sender.close(); - self.channel_manager_receiver.close(); - self.channel_manager_sender.close(); - } -} diff --git a/roles/translator/src/lib/sv1/sv1_server/data.rs b/roles/translator/src/lib/sv1/sv1_server/data.rs deleted file mode 100644 index 043cca44ac..0000000000 --- a/roles/translator/src/lib/sv1/sv1_server/data.rs +++ /dev/null @@ -1,47 +0,0 @@ -use crate::sv1::downstream::downstream::Downstream; -use std::{ - collections::HashMap, - sync::{atomic::AtomicU32, Arc, RwLock}, -}; -use stratum_apps::stratum_core::{ - bitcoin::Target, channels_sv2::vardiff::classic::VardiffState, mining_sv2::SetNewPrevHash, - sv1_api::server_to_client, -}; - -#[derive(Debug, Clone)] -pub struct PendingTargetUpdate { - pub downstream_id: u32, - pub new_target: Target, - pub new_hashrate: f32, -} - -#[derive(Debug)] -pub struct Sv1ServerData { - pub downstreams: HashMap>, - pub vardiff: HashMap>>, - pub prevhash: Option>, - pub downstream_id_factory: AtomicU32, - /// Job storage for aggregated mode - all Sv1 downstreams share the same jobs - pub aggregated_valid_jobs: Option>>, - /// Job storage for non-aggregated mode - each Sv1 downstream has its own jobs - pub non_aggregated_valid_jobs: Option>>>, - /// Tracks pending target updates that are waiting for SetTarget response from upstream - pub pending_target_updates: Vec, - /// The initial target used when opening channels - used when no downstreams remain - pub initial_target: Option, -} - -impl Sv1ServerData { - pub fn new(aggregate_channels: bool) -> Self { - Self { - downstreams: HashMap::new(), - vardiff: HashMap::new(), - prevhash: None, - downstream_id_factory: AtomicU32::new(0), - aggregated_valid_jobs: aggregate_channels.then(Vec::new), - non_aggregated_valid_jobs: (!aggregate_channels).then(HashMap::new), - pending_target_updates: Vec::new(), - initial_target: None, - } - } -} diff --git a/roles/translator/src/lib/sv1/sv1_server/difficulty_manager.rs b/roles/translator/src/lib/sv1/sv1_server/difficulty_manager.rs deleted file mode 100644 index f4e5ce9ecf..0000000000 --- a/roles/translator/src/lib/sv1/sv1_server/difficulty_manager.rs +++ /dev/null @@ -1,751 +0,0 @@ -use crate::{ - sv1::sv1_server::data::{PendingTargetUpdate, Sv1ServerData}, - utils::ShutdownMessage, -}; -use async_channel::Sender; -use std::{collections::HashMap, sync::Arc, time::Duration}; -use stratum_apps::{ - custom_mutex::Mutex, - stratum_core::{ - bitcoin::Target, - channels_sv2::{target::hash_rate_to_target, Vardiff}, - mining_sv2::{SetTarget, UpdateChannel}, - parsers_sv2::Mining, - stratum_translation::sv2_to_sv1::build_sv1_set_difficulty_from_sv2_target, - sv1_api::json_rpc, - }, -}; -use tokio::{sync::broadcast, time}; -use tracing::{debug, error, info, trace, warn}; - -/// Handles all variable difficulty adjustment logic for the SV1 server. -/// -/// This module contains the core vardiff implementation that: -/// - Periodically adjusts difficulty targets based on share submission rates -/// - Manages the relationship between upstream and downstream targets -/// - Handles both aggregated and non-aggregated channel modes -/// - Coordinates with the channel manager for target updates -pub struct DifficultyManager { - shares_per_minute: f32, - is_aggregated: bool, -} - -impl DifficultyManager { - /// Creates a new difficulty manager instance. - /// - /// # Arguments - /// * `shares_per_minute` - Target shares per minute for difficulty adjustment - /// * `is_aggregated` - Whether channels are operating in aggregated mode - pub fn new(shares_per_minute: f32, is_aggregated: bool) -> Self { - Self { - shares_per_minute, - is_aggregated, - } - } - - /// Spawns the variable difficulty adjustment loop. - /// - /// This method implements the SV1 server's variable difficulty logic for all downstreams. - /// Every 60 seconds, this method updates the difficulty state for each downstream. - pub async fn spawn_vardiff_loop( - sv1_server_data: Arc>, - channel_manager_sender: Sender>, - sv1_server_to_downstream_sender: broadcast::Sender<(u32, Option, json_rpc::Message)>, - shares_per_minute: f32, - is_aggregated: bool, - mut notify_shutdown: broadcast::Receiver, - shutdown_complete_tx: tokio::sync::mpsc::Sender<()>, - ) { - let difficulty_manager = DifficultyManager::new(shares_per_minute, is_aggregated); - - 'vardiff_loop: loop { - tokio::select! { - message = notify_shutdown.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - debug!("SV1 Server: Vardiff loop received shutdown signal. Exiting."); - break 'vardiff_loop; - } - Ok(ShutdownMessage::DownstreamShutdown(downstream_id)) => { - sv1_server_data.super_safe_lock(|d| { - d.vardiff.remove(&downstream_id); - }); - } - Ok(ShutdownMessage::DownstreamShutdownAll) => { - sv1_server_data.super_safe_lock(|d|{ - d.vardiff = HashMap::new(); - d.downstreams = HashMap::new(); - }); - info!("πŸ”Œ All downstreams removed from sv1 server as upstream changed"); - - // In aggregated mode, send UpdateChannel to reflect the new state (no downstreams) - Self::send_update_channel_on_downstream_state_change( - &sv1_server_data, - &channel_manager_sender, - is_aggregated, - ).await; - } - Ok(ShutdownMessage::UpstreamReconnectedResetAndShutdownDownstreams) => { - sv1_server_data.super_safe_lock(|d|{ - d.vardiff = HashMap::new(); - d.downstreams = HashMap::new(); - }); - info!("πŸ”Œ All downstreams removed from sv1 server as upstream reconnected"); - - // In aggregated mode, send UpdateChannel to reflect the new state (no downstreams) - Self::send_update_channel_on_downstream_state_change( - &sv1_server_data, - &channel_manager_sender, - is_aggregated, - ).await; - } - _ => {} - } - } - _ = time::sleep(Duration::from_secs(60)) => { - difficulty_manager.handle_vardiff_updates( - &sv1_server_data, - &channel_manager_sender, - &sv1_server_to_downstream_sender, - ).await; - } - } - } - drop(shutdown_complete_tx); - debug!("SV1 Server: Vardiff loop exited."); - } - - /// Handles variable difficulty adjustments for all connected downstreams. - /// - /// This method implements the core vardiff logic: - /// 1. For each downstream, calculate if a target update is needed - /// 2. Always send UpdateChannel to keep upstream informed - /// 3. Compare new target with upstream target to decide when to send set_difficulty: - /// - If new_target >= upstream_target: send set_difficulty immediately - /// - If new_target < upstream_target: wait for SetTarget response before sending - /// set_difficulty - /// 4. Handle aggregated vs non-aggregated modes for UpdateChannel messages - async fn handle_vardiff_updates( - &self, - sv1_server_data: &Arc>, - channel_manager_sender: &Sender>, - sv1_server_to_downstream_sender: &broadcast::Sender<(u32, Option, json_rpc::Message)>, - ) { - let vardiff_map = sv1_server_data.super_safe_lock(|v| v.vardiff.clone()); - let mut immediate_updates = Vec::new(); - let mut all_updates = Vec::new(); // All updates will generate UpdateChannel messages - - // Process each downstream and determine update strategy - for (downstream_id, vardiff_state) in vardiff_map.iter() { - debug!("Updating vardiff for downstream_id: {}", downstream_id); - let mut vardiff = vardiff_state.write().unwrap(); - - // Get current state from downstream - let Some((channel_id, hashrate, target, upstream_target)) = sv1_server_data - .super_safe_lock(|data| { - data.downstreams.get(downstream_id).and_then(|ds| { - ds.downstream_data.super_safe_lock(|d| { - Some(( - d.channel_id, - d.hashrate.unwrap(), /* It's safe to unwrap because we know that - * the downstream has a hashrate (we are - * doing vardiff) */ - d.target, - d.upstream_target, - )) - }) - }) - }) - else { - continue; - }; - - let Some(channel_id) = channel_id else { - error!("Channel id is none for downstream_id: {}", downstream_id); - continue; - }; - - let new_hashrate_opt = vardiff.try_vardiff(hashrate, &target, self.shares_per_minute); - - if let Ok(Some(new_hashrate)) = new_hashrate_opt { - // Calculate new target based on new hashrate - let new_target: Target = - match hash_rate_to_target(new_hashrate as f64, self.shares_per_minute as f64) { - Ok(target) => target, - Err(e) => { - error!( - "Failed to calculate target for hashrate {}: {:?}", - new_hashrate, e - ); - continue; - } - }; - - // Always update the downstream's pending target and hashrate - _ = sv1_server_data.safe_lock(|dmap| { - if let Some(d) = dmap.downstreams.get(downstream_id) { - _ = d.downstream_data.safe_lock(|d| { - d.set_pending_target(new_target); - d.set_pending_hashrate(Some(new_hashrate)); - }); - } - }); - - // All updates will be sent as UpdateChannel messages - all_updates.push((*downstream_id, channel_id, new_target, new_hashrate)); - - // Determine if we should send set_difficulty immediately or wait - match upstream_target { - Some(upstream_target) => { - if new_target >= upstream_target { - // Case 1: new_target >= upstream_target, send set_difficulty - // immediately - trace!( - "βœ… Target comparison: new_target ({:?}) >= upstream_target ({:?}) for downstream {}, will send set_difficulty immediately", - new_target, upstream_target, downstream_id - ); - immediate_updates.push((channel_id, Some(*downstream_id), new_target)); - } else { - // Case 2: new_target < upstream_target, delay set_difficulty until - // SetTarget - trace!( - "⏳ Target comparison: new_target ({:?}) < upstream_target ({:?}) for downstream {}, will delay set_difficulty until SetTarget", - new_target, upstream_target, downstream_id - ); - // Store as pending update for when SetTarget arrives - sv1_server_data.super_safe_lock(|data| { - data.pending_target_updates.push(PendingTargetUpdate { - downstream_id: *downstream_id, - new_target, - new_hashrate, - }); - }); - } - } - None => { - // No upstream target set yet, send set_difficulty immediately as fallback - trace!( - "No upstream target set for downstream {}, will send set_difficulty immediately", - downstream_id - ); - immediate_updates.push((channel_id, Some(*downstream_id), new_target)); - } - } - } - } - - // Send UpdateChannel messages for ALL updates (both immediate and delayed) - if !all_updates.is_empty() { - self.send_update_channel_messages(all_updates, sv1_server_data, channel_manager_sender) - .await; - } - - // Process immediate set_difficulty updates (for new_target >= upstream_target) - for (channel_id, downstream_id, target) in immediate_updates { - // Send set_difficulty message immediately - if let Ok(set_difficulty_msg) = build_sv1_set_difficulty_from_sv2_target(target) { - if let Err(e) = sv1_server_to_downstream_sender.send(( - channel_id, - downstream_id, - set_difficulty_msg, - )) { - error!( - "Failed to send immediate SetDifficulty message to downstream {}: {:?}", - downstream_id.unwrap_or(0), - e - ); - } else { - trace!( - "Sent immediate SetDifficulty to downstream {} (new_target >= upstream_target)", - downstream_id.unwrap_or(0) - ); - } - } - } - } - - /// Sends UpdateChannel messages for all target updates. - /// - /// Always sends UpdateChannel to keep upstream informed about target changes. - /// Handles both aggregated and non-aggregated modes: - /// - Aggregated: Send single UpdateChannel with minimum target and sum of hashrates - /// - Non-aggregated: Send individual UpdateChannel for each downstream - async fn send_update_channel_messages( - &self, - all_updates: Vec<(u32, u32, Target, f32)>, /* (downstream_id, channel_id, new_target, - * new_hashrate) */ - sv1_server_data: &Arc>, - channel_manager_sender: &Sender>, - ) { - if self.is_aggregated { - // Aggregated mode: Send single UpdateChannel with minimum target and total hashrate of - // ALL downstreams - if let Some((_, channel_id, _, _)) = all_updates.first() { - // Get minimum target among ALL downstreams, not just the ones with updates - let min_target = sv1_server_data.super_safe_lock(|data| { - data.downstreams - .values() - .map(|downstream| { - downstream.downstream_data.super_safe_lock(|d| { - // Use pending_target if available, otherwise current target - *d.pending_target.as_ref().unwrap_or(&d.target) - }) - }) - .min() - .expect("At least one downstream should exist") - }); - - // Get total hashrate of ALL downstreams, not just the ones with updates - let total_hashrate: f32 = sv1_server_data.super_safe_lock(|data| { - data.downstreams - .values() - .map(|downstream| { - downstream.downstream_data.super_safe_lock(|d| { - // Use pending_hashrate if available, otherwise current hashrate - // It's safe to unwrap because we know that the downstream has a - // hashrate (we are doing vardiff) - d.pending_hashrate.unwrap_or(d.hashrate.unwrap()) - }) - }) - .sum() - }); - - let update_channel = UpdateChannel { - channel_id: *channel_id, - nominal_hash_rate: total_hashrate, - maximum_target: min_target.to_le_bytes().into(), - }; - - debug!( - "Sending UpdateChannel for aggregated mode: channel_id={}, total_hashrate={} (all {} downstreams), min_target={:?}, vardiff_updates={}", - channel_id, total_hashrate, - sv1_server_data.super_safe_lock(|data| data.downstreams.len()), - &min_target, all_updates.len() - ); - - if let Err(e) = channel_manager_sender - .send(Mining::UpdateChannel(update_channel)) - .await - { - error!("Failed to send UpdateChannel message: {:?}", e); - } - } - } else { - // Non-aggregated mode: Send individual UpdateChannel for each downstream - for (downstream_id, channel_id, new_target, new_hashrate) in &all_updates { - let update_channel = UpdateChannel { - channel_id: *channel_id, - nominal_hash_rate: *new_hashrate, - maximum_target: new_target.to_le_bytes().into(), - }; - - debug!( - "Sending UpdateChannel for downstream {}: channel_id={}, hashrate={}, target={:?}", - downstream_id, channel_id, new_hashrate, new_target - ); - - if let Err(e) = channel_manager_sender - .send(Mining::UpdateChannel(update_channel)) - .await - { - error!( - "Failed to send UpdateChannel message for downstream {}: {:?}", - downstream_id, e - ); - } - } - } - } - - /// Handles SetTarget messages from the ChannelManager. - /// - /// Aggregated mode: Single SetTarget updates all downstreams and processes all pending updates - /// Non-aggregated mode: Each SetTarget updates one specific downstream and processes its - /// pending update - pub async fn handle_set_target_message( - set_target: SetTarget<'_>, - sv1_server_data: &Arc>, - channel_manager_sender: &Sender>, - sv1_server_to_downstream_sender: &broadcast::Sender<(u32, Option, json_rpc::Message)>, - is_aggregated: bool, - ) { - let new_upstream_target = - Target::from_le_bytes(set_target.maximum_target.inner_as_ref().try_into().unwrap()); - debug!( - "Received SetTarget for channel {}: new_upstream_target = {:?}", - set_target.channel_id, new_upstream_target - ); - - if is_aggregated { - Self::handle_aggregated_set_target( - new_upstream_target, - set_target.channel_id, - sv1_server_data, - channel_manager_sender, - sv1_server_to_downstream_sender, - ) - .await; - } else { - Self::handle_non_aggregated_set_target( - set_target.channel_id, - new_upstream_target, - sv1_server_data, - channel_manager_sender, - sv1_server_to_downstream_sender, - ) - .await; - } - } - - /// Handles SetTarget in aggregated mode. - /// Updates all downstreams and processes all pending set_difficulty messages. - async fn handle_aggregated_set_target( - new_upstream_target: Target, - channel_id: u32, - sv1_server_data: &Arc>, - _channel_manager_sender: &Sender>, - sv1_server_to_downstream_sender: &broadcast::Sender<(u32, Option, json_rpc::Message)>, - ) { - debug!("Aggregated mode: Updating upstream target for all downstreams"); - - // Update upstream target for ALL downstreams - let downstream_ids: Vec = - sv1_server_data.super_safe_lock(|data| data.downstreams.keys().cloned().collect()); - - for downstream_id in downstream_ids { - _ = sv1_server_data.safe_lock(|data| { - if let Some(downstream) = data.downstreams.get(&downstream_id) { - _ = downstream.downstream_data.safe_lock(|d| { - d.set_upstream_target(new_upstream_target); - }); - } - }); - } - - // Process ALL pending difficulty updates that can now be sent downstream - let applicable_updates = Self::get_pending_difficulty_updates( - new_upstream_target, - None, - channel_id, - sv1_server_data, - ); - Self::send_pending_set_difficulty_messages_to_downstream( - applicable_updates, - sv1_server_data, - sv1_server_to_downstream_sender, - ) - .await; - } - - /// Handles SetTarget in non-aggregated mode. - /// Updates the specific downstream and processes its pending set_difficulty message. - async fn handle_non_aggregated_set_target( - channel_id: u32, - new_upstream_target: Target, - sv1_server_data: &Arc>, - _channel_manager_sender: &Sender>, - sv1_server_to_downstream_sender: &broadcast::Sender<(u32, Option, json_rpc::Message)>, - ) { - debug!( - "Non-aggregated mode: Processing SetTarget for channel {}", - channel_id - ); - - let affected_downstream = sv1_server_data.super_safe_lock(|data| { - data.downstreams - .iter() - .find_map(|(downstream_id, downstream)| { - downstream.downstream_data.super_safe_lock(|d| { - if d.channel_id == Some(channel_id) { - Some(*downstream_id) - } else { - None - } - }) - }) - }); - - if let Some(downstream_id) = affected_downstream { - // Update upstream target for this specific downstream - _ = sv1_server_data.safe_lock(|data| { - if let Some(downstream) = data.downstreams.get(&downstream_id) { - _ = downstream.downstream_data.safe_lock(|d| { - d.set_upstream_target(new_upstream_target); - }); - } - }); - trace!("Updated upstream target for downstream {}", downstream_id); - - // Process pending difficulty updates for this specific downstream only - let applicable_updates = Self::get_pending_difficulty_updates( - new_upstream_target, - Some(downstream_id), - channel_id, - sv1_server_data, - ); - Self::send_pending_set_difficulty_messages_to_downstream( - applicable_updates, - sv1_server_data, - sv1_server_to_downstream_sender, - ) - .await; - } else { - warn!("No downstream found for channel {}", channel_id); - } - } - - /// Gets pending updates that can now be applied based on the new upstream target. - /// If downstream_id is provided, only returns updates for that specific downstream. - /// Logs a warning if the upstream target is higher than any requested target. - fn get_pending_difficulty_updates( - new_upstream_target: Target, - downstream_id: Option, - channel_id: u32, - sv1_server_data: &Arc>, - ) -> Vec { - let mut applicable_updates = Vec::new(); - - sv1_server_data.super_safe_lock(|data| { - data.pending_target_updates.retain(|pending_update| { - // Check if we should process this update - let should_process = match downstream_id { - Some(downstream_id) => pending_update.downstream_id == downstream_id, - None => true, // Process all in aggregated mode - }; - - if should_process { - if pending_update.new_target >= new_upstream_target { - // Target is acceptable, can apply immediately - applicable_updates.push(pending_update.clone()); - false // remove from pending list - } else { - // WARNING: Upstream gave us a target higher than what we requested - error!( - "❌ Protocol issue: SetTarget response has target ({:?}) which is higher than requested target ({:?}) in UpdateChannel for channel {:?}. Ignoring this pending update for downstream {:?}.", - new_upstream_target, pending_update.new_target, channel_id, pending_update.downstream_id - ); - false // remove from pending list (don't keep invalid requests) - } - } else { - true // keep in pending list (not relevant for this SetTarget) - } - }); - }); - applicable_updates - } - - /// Sends set_difficulty messages for all applicable pending updates. - async fn send_pending_set_difficulty_messages_to_downstream( - difficulty_updates: Vec, - sv1_server_data: &Arc>, - sv1_server_to_downstream_sender: &broadcast::Sender<(u32, Option, json_rpc::Message)>, - ) { - for pending_update in &difficulty_updates { - // Get channel_id for this downstream - let channel_id = sv1_server_data.super_safe_lock(|data| { - data.downstreams - .get(&pending_update.downstream_id) - .and_then(|ds| ds.downstream_data.super_safe_lock(|d| d.channel_id)) - }); - - if let Some(channel_id) = channel_id { - // Send set_difficulty message - if let Ok(set_difficulty_msg) = - build_sv1_set_difficulty_from_sv2_target(pending_update.new_target) - { - if let Err(e) = sv1_server_to_downstream_sender.send(( - channel_id, - Some(pending_update.downstream_id), - set_difficulty_msg, - )) { - error!( - "Failed to send SetDifficulty to downstream {}: {:?}", - pending_update.downstream_id, e - ); - } else { - trace!( - "Sent SetDifficulty to downstream {}", - pending_update.downstream_id - ); - } - } - } - } - } - - /// Sends an UpdateChannel message for aggregated mode when downstream state changes - /// (e.g., disconnect). Calculates total hashrate and minimum target among all remaining - /// downstreams. - pub async fn send_update_channel_on_downstream_state_change( - sv1_server_data: &Arc>, - channel_manager_sender: &Sender>, - is_aggregated: bool, - ) { - if !is_aggregated { - return; // Only applies to aggregated mode - } - - let (total_hashrate, min_target, channel_id, downstream_count) = sv1_server_data - .super_safe_lock(|data| { - // Hardcoded channel_id 0 (the ChannelManager will set this channel_id to the - // upstream extended channel id) - let channel_id = 0; - - let total_hashrate: f32 = data - .downstreams - .values() - .map(|downstream| { - downstream.downstream_data.super_safe_lock(|d| { - // Use pending_hashrate if available, otherwise current hashrate - // It's safe to unwrap because we know that the downstream has a - // hashrate (we are doing vardiff) - d.pending_hashrate.unwrap_or(d.hashrate.unwrap()) - }) - }) - .sum(); - - let min_target = data - .downstreams - .values() - .map(|downstream| { - downstream.downstream_data.super_safe_lock(|d| { - // Use pending_target if available, otherwise current target - *d.pending_target.as_ref().unwrap_or(&d.target) - }) - }) - .min(); - - ( - total_hashrate, - min_target, - Some(channel_id), - data.downstreams.len(), - ) - }); - - if let (Some(min_target), Some(channel_id)) = (min_target, channel_id) { - let update_channel = UpdateChannel { - channel_id, - nominal_hash_rate: total_hashrate, - maximum_target: min_target.to_le_bytes().into(), - }; - - if let Err(e) = channel_manager_sender - .send(Mining::UpdateChannel(update_channel)) - .await - { - error!( - "Failed to send UpdateChannel message after downstream state change: {:?}", - e - ); - } - } else if downstream_count == 0 { - // No downstreams remaining, send UpdateChannel with maximum possible target - let update_channel = UpdateChannel { - channel_id: 0, - nominal_hash_rate: 0.0, // No hashrate when no downstreams - maximum_target: [0xFF; 32].into(), - }; - - if let Err(e) = channel_manager_sender - .send(Mining::UpdateChannel(update_channel)) - .await - { - error!( - "Failed to send UpdateChannel message with maximum target: {:?}", - e - ); - } - } else { - warn!("Cannot send UpdateChannel after downstream state change: no downstreams remaining or no channel_id"); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::sv1::sv1_server::data::Sv1ServerData; - use async_channel::unbounded; - use std::sync::Arc; - - fn create_test_difficulty_manager() -> DifficultyManager { - DifficultyManager::new(5.0, true) // 5 shares per minute, aggregated mode - } - - fn create_test_sv1_server_data() -> Arc> { - let data = Sv1ServerData::new(true); // aggregated mode - Arc::new(Mutex::new(data)) - } - - #[test] - fn test_difficulty_manager_creation() { - let manager = create_test_difficulty_manager(); - assert_eq!(manager.shares_per_minute, 5.0); - assert!(manager.is_aggregated); - - let non_agg_manager = DifficultyManager::new(10.0, false); - assert_eq!(non_agg_manager.shares_per_minute, 10.0); - assert!(!non_agg_manager.is_aggregated); - } - - #[tokio::test] - async fn test_send_update_channel_on_downstream_state_change_aggregated() { - let sv1_server_data = create_test_sv1_server_data(); - let (sender, receiver) = unbounded(); - - // Test with no downstreams - DifficultyManager::send_update_channel_on_downstream_state_change( - &sv1_server_data, - &sender, - true, // aggregated - ) - .await; - - // Should send UpdateChannel with maximum target when no downstreams - let received_message = receiver - .try_recv() - .expect("Should receive UpdateChannel message"); - if let Mining::UpdateChannel(update_channel) = received_message { - assert_eq!(update_channel.channel_id, 0); - assert_eq!(update_channel.nominal_hash_rate, 0.0); - assert_eq!(update_channel.maximum_target, [0xFF; 32].into()); - } else { - panic!( - "Expected UpdateChannel message, got: {:?}", - received_message - ); - } - } - - #[tokio::test] - async fn test_send_update_channel_on_downstream_state_change_non_aggregated() { - let sv1_server_data = create_test_sv1_server_data(); - let (sender, _receiver) = unbounded(); - - DifficultyManager::send_update_channel_on_downstream_state_change( - &sv1_server_data, - &sender, - false, // non-aggregated - ) - .await; - - // Non-aggregated mode should return early and not crash - } - - #[test] - fn test_get_pending_difficulty_updates_basic() { - let sv1_server_data = create_test_sv1_server_data(); - let upstream_target: Target = hash_rate_to_target(150.0, 5.0).unwrap(); - - // Test with empty pending updates - let applicable_updates = DifficultyManager::get_pending_difficulty_updates( - upstream_target, - None, // All downstreams - 1, // channel_id - &sv1_server_data, - ); - - assert_eq!(applicable_updates.len(), 0); - } -} diff --git a/roles/translator/src/lib/sv1/sv1_server/mod.rs b/roles/translator/src/lib/sv1/sv1_server/mod.rs deleted file mode 100644 index 4491b592cc..0000000000 --- a/roles/translator/src/lib/sv1/sv1_server/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub(super) mod channel; -pub mod data; -pub mod difficulty_manager; -pub mod sv1_server; diff --git a/roles/translator/src/lib/sv1/sv1_server/sv1_server.rs b/roles/translator/src/lib/sv1/sv1_server/sv1_server.rs deleted file mode 100644 index 1d81310a28..0000000000 --- a/roles/translator/src/lib/sv1/sv1_server/sv1_server.rs +++ /dev/null @@ -1,1025 +0,0 @@ -use crate::{ - config::TranslatorConfig, - error::TproxyError, - status::{handle_error, Status, StatusSender}, - sv1::{ - downstream::{downstream::Downstream, DownstreamMessages}, - sv1_server::{ - channel::Sv1ServerChannelState, data::Sv1ServerData, - difficulty_manager::DifficultyManager, - }, - }, - task_manager::TaskManager, - utils::ShutdownMessage, -}; -use async_channel::{Receiver, Sender}; -use std::{ - collections::HashMap, - net::SocketAddr, - sync::{ - atomic::{AtomicBool, AtomicU32, Ordering}, - Arc, RwLock, - }, -}; -use stratum_apps::{ - custom_mutex::Mutex, - network_helpers::sv1_connection::ConnectionSV1, - stratum_core::{ - binary_sv2::Str0255, - bitcoin::Target, - channels_sv2::{target::hash_rate_to_target, Vardiff, VardiffState}, - mining_sv2::{CloseChannel, SetTarget}, - parsers_sv2::Mining, - stratum_translation::{ - sv1_to_sv2::{ - build_sv2_open_extended_mining_channel, - build_sv2_submit_shares_extended_from_sv1_submit, - }, - sv2_to_sv1::{build_sv1_notify_from_sv2, build_sv1_set_difficulty_from_sv2_target}, - }, - sv1_api::IsServer, - }, -}; -use tokio::{ - net::TcpListener, - sync::{broadcast, mpsc}, -}; -use tracing::{debug, error, info, warn}; - -/// SV1 server that handles connections from SV1 miners. -/// -/// This struct manages the SV1 server component of the translator, which: -/// - Accepts connections from SV1 miners -/// - Manages difficulty adjustment for connected miners -/// - Coordinates with the SV2 channel manager for upstream communication -/// - Tracks mining jobs and share submissions -/// -/// The server maintains state for multiple downstream connections and implements -/// variable difficulty adjustment based on share submission rates. -pub struct Sv1Server { - sv1_server_channel_state: Sv1ServerChannelState, - sv1_server_data: Arc>, - shares_per_minute: f32, - listener_addr: SocketAddr, - config: TranslatorConfig, - clean_job: AtomicBool, - sequence_counter: AtomicU32, - miner_counter: AtomicU32, -} - -impl Sv1Server { - /// Drops the server's channel state, cleaning up resources. - pub fn drop(&self) { - self.sv1_server_channel_state.drop(); - } - - /// Creates a new SV1 server instance. - /// - /// # Arguments - /// * `listener_addr` - The socket address to bind the server to - /// * `channel_manager_receiver` - Channel to receive messages from the channel manager - /// * `channel_manager_sender` - Channel to send messages to the channel manager - /// * `config` - Configuration settings for the translator - /// - /// # Returns - /// A new Sv1Server instance ready to accept connections - pub fn new( - listener_addr: SocketAddr, - channel_manager_receiver: Receiver>, - channel_manager_sender: Sender>, - config: TranslatorConfig, - ) -> Self { - let shares_per_minute = config.downstream_difficulty_config.shares_per_minute; - let sv1_server_channel_state = - Sv1ServerChannelState::new(channel_manager_receiver, channel_manager_sender); - let sv1_server_data = Arc::new(Mutex::new(Sv1ServerData::new(config.aggregate_channels))); - Self { - sv1_server_channel_state, - sv1_server_data, - config, - listener_addr, - shares_per_minute, - clean_job: AtomicBool::new(true), - miner_counter: AtomicU32::new(0), - sequence_counter: AtomicU32::new(0), - } - } - - /// Starts the SV1 server and begins accepting connections. - /// - /// This method: - /// - Binds to the configured listening address - /// - Spawns the variable difficulty adjustment loop - /// - Enters the main event loop to handle: - /// - New miner connections - /// - Shutdown signals - /// - Messages from downstream miners (submit shares) - /// - Messages from upstream SV2 channel manager - /// - /// The server will continue running until a shutdown signal is received. - /// - /// # Arguments - /// * `notify_shutdown` - Broadcast channel for shutdown coordination - /// * `shutdown_complete_tx` - Channel to signal shutdown completion - /// * `status_sender` - Channel for sending status updates - /// * `task_manager` - Manager for spawned async tasks - /// - /// # Returns - /// * `Ok(())` - Server shut down gracefully - /// * `Err(TproxyError)` - Server encountered an error - pub async fn start( - self: Arc, - notify_shutdown: broadcast::Sender, - shutdown_complete_tx: mpsc::Sender<()>, - status_sender: Sender, - task_manager: Arc, - ) -> Result<(), TproxyError> { - info!("Starting SV1 server on {}", self.listener_addr); - let mut shutdown_rx_main = notify_shutdown.subscribe(); - let shutdown_complete_tx_main_clone = shutdown_complete_tx.clone(); - - // get the first target for the first set difficulty message - let first_target: Target = hash_rate_to_target( - self.config - .downstream_difficulty_config - .min_individual_miner_hashrate as f64, - self.config.downstream_difficulty_config.shares_per_minute as f64, - ) - .unwrap(); - - // Spawn vardiff loop only if enabled - if self.config.downstream_difficulty_config.enable_vardiff { - info!("Variable difficulty adjustment enabled - starting vardiff loop"); - task_manager.spawn(DifficultyManager::spawn_vardiff_loop( - self.sv1_server_data.clone(), - self.sv1_server_channel_state.channel_manager_sender.clone(), - self.sv1_server_channel_state - .sv1_server_to_downstream_sender - .clone(), - self.shares_per_minute, - self.config.aggregate_channels, - notify_shutdown.subscribe(), - shutdown_complete_tx_main_clone.clone(), - )); - } else { - info!("Variable difficulty adjustment disabled - upstream will manage difficulty, SV1 server will forward SetTarget messages to downstreams"); - } - - let listener = TcpListener::bind(self.listener_addr).await.map_err(|e| { - error!("Failed to bind to {}: {}", self.listener_addr, e); - e - })?; - - info!("Translator Proxy: listening on {}", self.listener_addr); - - let sv1_status_sender = StatusSender::Sv1Server(status_sender.clone()); - - loop { - tokio::select! { - message = shutdown_rx_main.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - debug!("SV1 Server: Vardiff loop received shutdown signal. Exiting."); - break; - } - Ok(ShutdownMessage::DownstreamShutdown(downstream_id)) => { - let current_downstream = self.sv1_server_data.super_safe_lock(|d| { - // Only remove from vardiff map if vardiff is enabled - if self.config.downstream_difficulty_config.enable_vardiff { - d.vardiff.remove(&downstream_id); - } - d.downstreams.remove(&downstream_id) - }); - if let Some(downstream) = current_downstream { - info!("πŸ”Œ Downstream: {downstream_id} disconnected and removed from sv1 server downstreams"); - - // In aggregated mode, send UpdateChannel to reflect the new state (only if vardiff enabled) - if self.config.downstream_difficulty_config.enable_vardiff { - DifficultyManager::send_update_channel_on_downstream_state_change( - &self.sv1_server_data, - &self.sv1_server_channel_state.channel_manager_sender, - self.config.aggregate_channels, - ).await; - } - - let channel_id = downstream.downstream_data.super_safe_lock(|d| d.channel_id); - - if let Some(channel_id) = channel_id { - if !self.config.aggregate_channels { - info!("Sending CloseChannel message: {channel_id} for downstream: {downstream_id}"); - let reason_code = Str0255::try_from("downstream disconnected".to_string()).unwrap(); - _ = self.sv1_server_channel_state - .channel_manager_sender - .send(Mining::CloseChannel(CloseChannel { - channel_id, - reason_code, - })) - .await; - } - } - } - } - Ok(ShutdownMessage::DownstreamShutdownAll) => { - self.sv1_server_data.super_safe_lock(|d|{ - if self.config.downstream_difficulty_config.enable_vardiff { - d.vardiff = HashMap::new(); - } - d.downstreams = HashMap::new(); - }); - info!("πŸ”Œ All downstreams removed from sv1 server as upstream changed"); - - // In aggregated mode, send UpdateChannel to reflect the new state (no downstreams) - if self.config.downstream_difficulty_config.enable_vardiff { - DifficultyManager::send_update_channel_on_downstream_state_change( - &self.sv1_server_data, - &self.sv1_server_channel_state.channel_manager_sender, - self.config.aggregate_channels, - ).await; - } - } - Ok(ShutdownMessage::UpstreamReconnectedResetAndShutdownDownstreams) => { - self.sv1_server_data.super_safe_lock(|d|{ - if self.config.downstream_difficulty_config.enable_vardiff { - d.vardiff = HashMap::new(); - } - d.downstreams = HashMap::new(); - }); - info!("πŸ”Œ All downstreams removed from sv1 server as upstream reconnected"); - - // In aggregated mode, send UpdateChannel to reflect the new state (no downstreams) - if self.config.downstream_difficulty_config.enable_vardiff { - DifficultyManager::send_update_channel_on_downstream_state_change( - &self.sv1_server_data, - &self.sv1_server_channel_state.channel_manager_sender, - self.config.aggregate_channels, - ).await; - } - } - _ => {} - } - } - result = listener.accept() => { - match result { - Ok((stream, addr)) => { - info!("New SV1 downstream connection from {}", addr); - - let connection = ConnectionSV1::new(stream).await; - let downstream_id = self.sv1_server_data.super_safe_lock(|v| v.downstream_id_factory.fetch_add(1, Ordering::Relaxed)); - let downstream = Arc::new(Downstream::new( - downstream_id, - connection.sender().clone(), - connection.receiver().clone(), - self.sv1_server_channel_state.downstream_to_sv1_server_sender.clone(), - self.sv1_server_channel_state.sv1_server_to_downstream_sender.clone().subscribe(), - first_target, - Some(self.config.downstream_difficulty_config.min_individual_miner_hashrate), - self.sv1_server_data.clone(), - )); - // vardiff initialization (only if enabled) - _ = self.sv1_server_data - .safe_lock(|d| { - d.downstreams.insert(downstream_id, downstream.clone()); - // Insert vardiff state for this downstream only if vardiff is enabled - if self.config.downstream_difficulty_config.enable_vardiff { - let vardiff = Arc::new(RwLock::new(VardiffState::new().expect("Failed to create vardiffstate"))); - d.vardiff.insert(downstream_id, vardiff); - } - }); - info!("Downstream {} registered successfully (channel will be opened after first message)", downstream_id); - - // Start downstream tasks immediately, but defer channel opening until first message - let status_sender = StatusSender::Downstream { - downstream_id, - tx: status_sender.clone(), - }; - - Downstream::run_downstream_tasks( - downstream, - notify_shutdown.clone(), - shutdown_complete_tx.clone(), - status_sender, - task_manager.clone(), - ); - } - Err(e) => { - warn!("Failed to accept new connection: {:?}", e); - } - } - } - res = Self::handle_downstream_message( - Arc::clone(&self) - ) => { - if let Err(e) = res { - handle_error(&sv1_status_sender, e).await; - break; - } - } - res = Self::handle_upstream_message( - Arc::clone(&self), - first_target, - ) => { - if let Err(e) = res { - handle_error(&sv1_status_sender, e).await; - break; - } - } - } - } - self.sv1_server_channel_state.drop(); - drop(shutdown_complete_tx); - debug!("SV1 Server main listener loop exited."); - Ok(()) - } - - /// Handles messages received from downstream SV1 miners. - /// - /// This method processes share submissions from miners by: - /// - Updating variable difficulty counters - /// - Extracting and validating share data - /// - Converting SV1 share format to SV2 SubmitSharesExtended - /// - Forwarding the share to the channel manager for upstream submission - /// - /// # Returns - /// * `Ok(())` - Message processed successfully - /// * `Err(TproxyError)` - Error processing the message - pub async fn handle_downstream_message(self: Arc) -> Result<(), TproxyError> { - let downstream_message = self - .sv1_server_channel_state - .downstream_to_sv1_server_receiver - .recv() - .await - .map_err(TproxyError::ChannelErrorReceiver)?; - - match downstream_message { - DownstreamMessages::SubmitShares(message) => { - return self.handle_submit_shares(message).await; - } - DownstreamMessages::OpenChannel(downstream_id) => { - return self.handle_open_channel_request(downstream_id).await; - } - } - } - - /// Handles share submission messages from downstream. - async fn handle_submit_shares( - self: &Arc, - message: crate::sv1::downstream::SubmitShareWithChannelId, - ) -> Result<(), TproxyError> { - // Increment vardiff counter for this downstream (only if vardiff is enabled) - if self.config.downstream_difficulty_config.enable_vardiff { - self.sv1_server_data.safe_lock(|v| { - if let Some(vardiff_state) = v.vardiff.get(&message.downstream_id) { - vardiff_state - .write() - .unwrap() - .increment_shares_since_last_update(); - } - })?; - } - - let job_version = match message.job_version { - Some(version) => version, - None => { - warn!("Received share submission without valid job version, skipping"); - return Ok(()); - } - }; - - let submit_share_extended = build_sv2_submit_shares_extended_from_sv1_submit( - &message.share, - message.channel_id, - self.sequence_counter.load(Ordering::SeqCst), - job_version, - message.version_rolling_mask, - ) - .map_err(|_| TproxyError::SV1Error)?; - - self.sv1_server_channel_state - .channel_manager_sender - .send(Mining::SubmitSharesExtended(submit_share_extended)) - .await - .map_err(|_| TproxyError::ChannelErrorSender)?; - - self.sequence_counter.fetch_add(1, Ordering::SeqCst); - - Ok(()) - } - - /// Handles channel opening requests from downstream when they send their first message. - async fn handle_open_channel_request( - self: &Arc, - downstream_id: u32, - ) -> Result<(), TproxyError> { - info!("SV1 Server: Opening extended mining channel for downstream {} after receiving first message", downstream_id); - - let downstreams = self - .sv1_server_data - .super_safe_lock(|v| v.downstreams.clone()); - if let Some(downstream) = Self::get_downstream(downstream_id, downstreams) { - self.open_extended_mining_channel(downstream).await?; - } else { - error!( - "Downstream {} not found when trying to open channel", - downstream_id - ); - } - - Ok(()) - } - - /// Handles messages received from the upstream SV2 server via the channel manager. - /// - /// This method processes various SV2 messages including: - /// - OpenExtendedMiningChannelSuccess: Sets up downstream connections - /// - NewExtendedMiningJob: Converts to SV1 notify messages - /// - SetNewPrevHash: Updates block template information - /// - Channel error messages (TODO: implement proper handling) - /// - /// # Arguments - /// * `first_target` - Initial difficulty target for new connections - /// * `notify_shutdown` - Broadcast channel for shutdown coordination - /// * `shutdown_complete_tx` - Channel to signal shutdown completion - /// * `status_sender` - Channel for sending status updates - /// * `task_manager` - Manager for spawned async tasks - /// - /// # Returns - /// * `Ok(())` - Message processed successfully - /// * `Err(TproxyError)` - Error processing the message - pub async fn handle_upstream_message( - self: Arc, - first_target: Target, - ) -> Result<(), TproxyError> { - let message = self - .sv1_server_channel_state - .channel_manager_receiver - .recv() - .await - .map_err(TproxyError::ChannelErrorReceiver)?; - - match message { - Mining::OpenExtendedMiningChannelSuccess(m) => { - debug!( - "Received OpenExtendedMiningChannelSuccess for channel id: {}", - m.channel_id - ); - let downstream_id = m.request_id; - let downstreams = self - .sv1_server_data - .super_safe_lock(|v| v.downstreams.clone()); - if let Some(downstream) = Self::get_downstream(downstream_id, downstreams) { - let initial_target = - Target::from_le_bytes(m.target.inner_as_ref().try_into().unwrap()); - downstream.downstream_data.safe_lock(|d| { - d.extranonce1 = m.extranonce_prefix.to_vec(); - d.extranonce2_len = m.extranonce_size.into(); - d.channel_id = Some(m.channel_id); - // Set the initial upstream target from OpenExtendedMiningChannelSuccess - d.set_upstream_target(initial_target); - })?; - - // Process all queued messages now that channel is established - if let Ok(queued_messages) = downstream.downstream_data.safe_lock(|d| { - let messages = d.queued_sv1_handshake_messages.clone(); - d.queued_sv1_handshake_messages.clear(); - messages - }) { - if !queued_messages.is_empty() { - info!( - "Processing {} queued Sv1 messages for downstream {}", - queued_messages.len(), - downstream_id - ); - - // Set flag to indicate we're processing queued responses - downstream.downstream_data.super_safe_lock(|data| { - data.processing_queued_sv1_handshake_responses - .store(true, std::sync::atomic::Ordering::SeqCst); - }); - - for message in queued_messages { - if let Ok(Some(response_msg)) = downstream - .downstream_data - .super_safe_lock(|data| data.handle_message(message)) - { - self.sv1_server_channel_state - .sv1_server_to_downstream_sender - .send(( - m.channel_id, - Some(downstream_id), - response_msg.into(), - )) - .map_err(|_| TproxyError::ChannelErrorSender)?; - } - } - } - } - - let set_difficulty = build_sv1_set_difficulty_from_sv2_target(first_target) - .map_err(|_| { - TproxyError::General("Failed to generate set_difficulty".into()) - })?; - // send the set_difficulty message to the downstream - self.sv1_server_channel_state - .sv1_server_to_downstream_sender - .send((m.channel_id, None, set_difficulty)) - .map_err(|_| TproxyError::ChannelErrorSender)?; - } else { - error!("Downstream not found for downstream_id: {}", downstream_id); - } - } - - Mining::NewExtendedMiningJob(m) => { - debug!( - "Received NewExtendedMiningJob for channel id: {}", - m.channel_id - ); - if let Some(prevhash) = self.sv1_server_data.super_safe_lock(|v| v.prevhash.clone()) - { - let notify = build_sv1_notify_from_sv2( - prevhash, - m.clone().into_static(), - self.clean_job.load(Ordering::SeqCst), - )?; - let clean_jobs = self.clean_job.load(Ordering::SeqCst); - self.clean_job.store(false, Ordering::SeqCst); - - // Update job storage based on the configured mode - let notify_parsed = notify.clone(); - self.sv1_server_data.super_safe_lock(|server_data| { - if let Some(ref mut aggregated_jobs) = server_data.aggregated_valid_jobs { - // Aggregated mode: all downstreams share the same jobs - if clean_jobs { - aggregated_jobs.clear(); - } - aggregated_jobs.push(notify_parsed); - } else if let Some(ref mut non_aggregated_jobs) = - server_data.non_aggregated_valid_jobs - { - // Non-aggregated mode: per-downstream jobs - let channel_jobs = non_aggregated_jobs - .entry(m.channel_id) - .or_insert_with(Vec::new); - if clean_jobs { - channel_jobs.clear(); - } - channel_jobs.push(notify_parsed); - } - }); - - let _ = self - .sv1_server_channel_state - .sv1_server_to_downstream_sender - .send((m.channel_id, None, notify.into())); - } - } - - Mining::SetNewPrevHash(m) => { - debug!("Received SetNewPrevHash for channel id: {}", m.channel_id); - self.clean_job.store(true, Ordering::SeqCst); - self.sv1_server_data - .super_safe_lock(|v| v.prevhash = Some(m.clone().into_static())); - } - - Mining::SetTarget(m) => { - debug!("Received SetTarget for channel id: {}", m.channel_id); - if self.config.downstream_difficulty_config.enable_vardiff { - // Vardiff enabled - use full difficulty management - DifficultyManager::handle_set_target_message( - m, - &self.sv1_server_data, - &self.sv1_server_channel_state.channel_manager_sender, - &self - .sv1_server_channel_state - .sv1_server_to_downstream_sender, - self.config.aggregate_channels, - ) - .await; - } else { - // Vardiff disabled - just forward the difficulty to downstreams - debug!("Vardiff disabled - forwarding SetTarget to downstreams"); - self.handle_set_target_without_vardiff(m).await; - } - } - - Mining::CloseChannel(_) => { - todo!("Handle CloseChannel message from upstream"); - } - - Mining::OpenMiningChannelError(_) => { - todo!("Handle OpenMiningChannelError message from upstream"); - } - - Mining::UpdateChannelError(_) => { - todo!("Handle UpdateChannelError message from upstream"); - } - - _ => unreachable!("Unexpected message type received from upstream"), - } - - Ok(()) - } - - /// Opens an extended mining channel for a downstream connection. - /// - /// This method initiates the SV2 channel setup process by: - /// - Calculating the initial target based on configuration - /// - Generating a unique user identity for the miner - /// - Creating an OpenExtendedMiningChannel message - /// - Sending the request to the channel manager - /// - /// # Arguments - /// * `downstream` - The downstream connection to set up a channel for - /// - /// # Returns - /// * `Ok(())` - Channel setup request sent successfully - /// * `Err(TproxyError)` - Error setting up the channel - pub async fn open_extended_mining_channel( - &self, - downstream: Arc, - ) -> Result<(), TproxyError> { - let config = &self.config.downstream_difficulty_config; - - let hashrate = config.min_individual_miner_hashrate as f64; - let shares_per_min = config.shares_per_minute as f64; - let min_extranonce_size = self.config.downstream_extranonce2_size; - let vardiff_enabled = config.enable_vardiff; - - let max_target = if vardiff_enabled { - hash_rate_to_target(hashrate, shares_per_min).unwrap() - } else { - // If translator doesn't manage vardiff, we rely on upstream to do that, - // so we give it more freedom by setting max_target to maximum possible value - Target::from_le_bytes([0xff; 32]) - }; - - // Store the initial target for use when no downstreams remain - self.sv1_server_data.super_safe_lock(|data| { - if data.initial_target.is_none() { - data.initial_target = Some(max_target); - } - }); - - let miner_id = self.miner_counter.fetch_add(1, Ordering::SeqCst) + 1; - let user_identity = format!("{}.miner{}", self.config.user_identity, miner_id); - - downstream - .downstream_data - .safe_lock(|d| d.user_identity = user_identity.clone())?; - - if let Ok(open_channel_msg) = build_sv2_open_extended_mining_channel( - downstream - .downstream_data - .super_safe_lock(|d| d.downstream_id), - user_identity.clone(), - hashrate as f32, - max_target, - min_extranonce_size, - ) { - self.sv1_server_channel_state - .channel_manager_sender - .send(Mining::OpenExtendedMiningChannel(open_channel_msg)) - .await - .map_err(|_| TproxyError::ChannelErrorSender)?; - } else { - error!("Failed to build OpenExtendedMiningChannel message"); - } - - Ok(()) - } - - /// Retrieves a downstream connection by ID from the provided map. - /// - /// # Arguments - /// * `downstream_id` - The ID of the downstream connection to find - /// * `downstream` - HashMap containing downstream connections - /// - /// # Returns - /// * `Some(Downstream)` - If a downstream with the given ID exists - /// * `None` - If no downstream with the given ID is found - pub fn get_downstream( - downstream_id: u32, - downstream: HashMap>, - ) -> Option> { - downstream.get(&downstream_id).cloned() - } - - /// Extracts the downstream ID from a Downstream instance. - /// - /// # Arguments - /// * `downstream` - The downstream connection to get the ID from - /// - /// # Returns - /// The downstream ID as a u32 - pub fn get_downstream_id(downstream: Downstream) -> u32 { - downstream - .downstream_data - .super_safe_lock(|s| s.downstream_id) - } - - /// Handles SetTarget messages when vardiff is disabled. - /// - /// This method forwards difficulty changes from upstream directly to downstream miners - /// without any variable difficulty logic. It respects the aggregated/non-aggregated - /// channel configuration. - async fn handle_set_target_without_vardiff(&self, set_target: SetTarget<'_>) { - let new_target = - Target::from_le_bytes(set_target.maximum_target.inner_as_ref().try_into().unwrap()); - debug!( - "Forwarding SetTarget to downstreams: channel_id={}, target={:?}", - set_target.channel_id, new_target - ); - - if self.config.aggregate_channels { - // Aggregated mode: send set_difficulty to ALL downstreams - self.send_set_difficulty_to_all_downstreams(new_target) - .await; - } else { - // Non-aggregated mode: send set_difficulty to specific downstream for this channel - self.send_set_difficulty_to_specific_downstream(set_target.channel_id, new_target) - .await; - } - } - - /// Sends set_difficulty to all downstreams (aggregated mode). - /// Used only when vardiff is disabled. - async fn send_set_difficulty_to_all_downstreams(&self, target: Target) { - let downstreams = self - .sv1_server_data - .super_safe_lock(|data| data.downstreams.clone()); - - for (downstream_id, downstream) in downstreams { - let channel_id = downstream.downstream_data.super_safe_lock(|d| d.channel_id); - - if let Some(channel_id) = channel_id { - // Update the downstream's target - _ = downstream.downstream_data.safe_lock(|d| { - d.set_upstream_target(target); - d.set_pending_target(target); - }); - - // Send set_difficulty message - if let Ok(set_difficulty_msg) = build_sv1_set_difficulty_from_sv2_target(target) { - if let Err(e) = self - .sv1_server_channel_state - .sv1_server_to_downstream_sender - .send((channel_id, Some(downstream_id), set_difficulty_msg)) - { - error!( - "Failed to send SetDifficulty to downstream {}: {:?}", - downstream_id, e - ); - } else { - debug!( - "Sent SetDifficulty to downstream {} (vardiff disabled)", - downstream_id - ); - } - } - } - } - } - - /// Sends set_difficulty to the specific downstream associated with a channel (non-aggregated - /// mode). - /// Used only when vardiff is disabled. - async fn send_set_difficulty_to_specific_downstream(&self, channel_id: u32, target: Target) { - let affected_downstream = self.sv1_server_data.super_safe_lock(|data| { - data.downstreams - .iter() - .find_map(|(downstream_id, downstream)| { - downstream.downstream_data.super_safe_lock(|d| { - if d.channel_id == Some(channel_id) { - Some((*downstream_id, downstream.clone())) - } else { - None - } - }) - }) - }); - - if let Some((downstream_id, downstream)) = affected_downstream { - // Update the downstream's target - _ = downstream.downstream_data.safe_lock(|d| { - d.set_upstream_target(target); - d.set_pending_target(target); - }); - - // Send set_difficulty message - if let Ok(set_difficulty_msg) = build_sv1_set_difficulty_from_sv2_target(target) { - if let Err(e) = self - .sv1_server_channel_state - .sv1_server_to_downstream_sender - .send((channel_id, Some(downstream_id), set_difficulty_msg)) - { - error!( - "Failed to send SetDifficulty to downstream {}: {:?}", - downstream_id, e - ); - } else { - debug!( - "Sent SetDifficulty to downstream {} for channel {} (vardiff disabled)", - downstream_id, channel_id - ); - } - } - } else { - warn!( - "No downstream found for channel {} when vardiff disabled", - channel_id - ); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::config::{DownstreamDifficultyConfig, TranslatorConfig, Upstream}; - use async_channel::unbounded; - use std::{collections::HashMap, str::FromStr}; - use stratum_apps::key_utils::Secp256k1PublicKey; - - fn create_test_config() -> TranslatorConfig { - let pubkey_str = "9bDuixKmZqAJnrmP746n8zU1wyAQRrus7th9dxnkPg6RzQvCnan"; - let pubkey = Secp256k1PublicKey::from_str(pubkey_str).unwrap(); - - let upstream = Upstream::new("127.0.0.1".to_string(), 4444, pubkey); - let difficulty_config = DownstreamDifficultyConfig::new(100.0, 5.0, true); - - TranslatorConfig::new( - vec![upstream], - "0.0.0.0".to_string(), // downstream_address - 3333, // downstream_port - difficulty_config, // downstream_difficulty_config - 2, // max_supported_version - 1, // min_supported_version - 4, // downstream_extranonce2_size - "test_user".to_string(), - true, // aggregate_channels - ) - } - - fn create_test_sv1_server() -> Sv1Server { - let (cm_sender, _cm_receiver) = unbounded(); - let (_downstream_sender, cm_receiver) = unbounded(); - let config = create_test_config(); - let addr = "127.0.0.1:3333".parse().unwrap(); - - Sv1Server::new(addr, cm_receiver, cm_sender, config) - } - - #[test] - fn test_sv1_server_creation() { - let server = create_test_sv1_server(); - - assert_eq!(server.shares_per_minute, 5.0); - assert_eq!(server.listener_addr.ip().to_string(), "127.0.0.1"); - assert_eq!(server.listener_addr.port(), 3333); - assert_eq!(server.config.user_identity, "test_user"); - assert!(server.config.aggregate_channels); - } - - #[test] - fn test_sv1_server_aggregated_config() { - let mut config = create_test_config(); - config.aggregate_channels = true; - config.downstream_difficulty_config.enable_vardiff = true; - - let (cm_sender, _cm_receiver) = unbounded(); - let (_downstream_sender, cm_receiver) = unbounded(); - let addr = "127.0.0.1:3333".parse().unwrap(); - - let server = Sv1Server::new(addr, cm_receiver, cm_sender, config); - - assert!(server.config.aggregate_channels); - assert!(server.config.downstream_difficulty_config.enable_vardiff); - } - - #[test] - fn test_sv1_server_non_aggregated_config() { - let mut config = create_test_config(); - config.aggregate_channels = false; - config.downstream_difficulty_config.enable_vardiff = false; - - let (cm_sender, _cm_receiver) = unbounded(); - let (_downstream_sender, cm_receiver) = unbounded(); - let addr = "127.0.0.1:3333".parse().unwrap(); - - let server = Sv1Server::new(addr, cm_receiver, cm_sender, config); - - assert!(!server.config.aggregate_channels); - assert!(!server.config.downstream_difficulty_config.enable_vardiff); - } - - #[test] - fn test_get_downstream_basic() { - let downstreams = HashMap::new(); - - // Test non-existing downstream - let not_found = Sv1Server::get_downstream(999, downstreams); - assert!(not_found.is_none()); - } - - #[tokio::test] - async fn test_send_set_difficulty_to_all_downstreams_empty() { - let server = create_test_sv1_server(); - let target: Target = hash_rate_to_target(200.0, 5.0).unwrap(); - - // Test with empty downstreams - server.send_set_difficulty_to_all_downstreams(target).await; - - // Should not crash with empty downstreams - } - - #[tokio::test] - async fn test_send_set_difficulty_to_specific_downstream_not_found() { - let server = create_test_sv1_server(); - let target: Target = hash_rate_to_target(200.0, 5.0).unwrap(); - let channel_id = 1u32; - - // Test with no downstreams - server - .send_set_difficulty_to_specific_downstream(channel_id, target) - .await; - - // Should not crash when no downstreams are found - } - - #[tokio::test] - async fn test_handle_set_target_without_vardiff_aggregated() { - let mut config = create_test_config(); - config.downstream_difficulty_config.enable_vardiff = false; - config.aggregate_channels = true; - - let (cm_sender, _cm_receiver) = unbounded(); - let (_downstream_sender, cm_receiver) = unbounded(); - let addr = "127.0.0.1:3333".parse().unwrap(); - - let server = Sv1Server::new(addr, cm_receiver, cm_sender, config); - let target: Target = hash_rate_to_target(200.0, 5.0).unwrap(); - - let set_target = SetTarget { - channel_id: 1, - maximum_target: target.to_le_bytes().into(), - }; - - // Test should not panic and should handle the message - server.handle_set_target_without_vardiff(set_target).await; - } - - #[tokio::test] - async fn test_handle_set_target_without_vardiff_non_aggregated() { - let mut config = create_test_config(); - config.downstream_difficulty_config.enable_vardiff = false; - config.aggregate_channels = false; - - let (cm_sender, _cm_receiver) = unbounded(); - let (_downstream_sender, cm_receiver) = unbounded(); - let addr = "127.0.0.1:3333".parse().unwrap(); - - let server = Sv1Server::new(addr, cm_receiver, cm_sender, config); - let target: Target = hash_rate_to_target(200.0, 5.0).unwrap(); - - let set_target = SetTarget { - channel_id: 1, - maximum_target: target.to_le_bytes().into(), - }; - - // Test should not panic and should handle the message - server.handle_set_target_without_vardiff(set_target).await; - } - - #[test] - fn test_sv1_server_counters() { - let server = create_test_sv1_server(); - - // Test initial values - assert_eq!(server.miner_counter.load(Ordering::SeqCst), 0); - assert_eq!(server.sequence_counter.load(Ordering::SeqCst), 0); - - // Test incrementing - let miner_id = server.miner_counter.fetch_add(1, Ordering::SeqCst); - assert_eq!(miner_id, 0); - assert_eq!(server.miner_counter.load(Ordering::SeqCst), 1); - - let seq_id = server.sequence_counter.fetch_add(1, Ordering::SeqCst); - assert_eq!(seq_id, 0); - assert_eq!(server.sequence_counter.load(Ordering::SeqCst), 1); - } - - #[test] - fn test_sv1_server_clean_job_flag() { - let server = create_test_sv1_server(); - - // Test initial value - assert!(server.clean_job.load(Ordering::SeqCst)); - - // Test setting to false - server.clean_job.store(false, Ordering::SeqCst); - assert!(!server.clean_job.load(Ordering::SeqCst)); - - // Test setting back to true - server.clean_job.store(true, Ordering::SeqCst); - assert!(server.clean_job.load(Ordering::SeqCst)); - } -} diff --git a/roles/translator/src/lib/sv2/channel_manager/channel.rs b/roles/translator/src/lib/sv2/channel_manager/channel.rs deleted file mode 100644 index fc95a2c59a..0000000000 --- a/roles/translator/src/lib/sv2/channel_manager/channel.rs +++ /dev/null @@ -1,36 +0,0 @@ -use crate::sv2::upstream::upstream::EitherFrame; -use async_channel::{Receiver, Sender}; -use stratum_apps::stratum_core::parsers_sv2::Mining; -use tracing::debug; - -#[derive(Clone, Debug)] -pub struct ChannelState { - pub upstream_sender: Sender, - pub upstream_receiver: Receiver, - pub sv1_server_sender: Sender>, - pub sv1_server_receiver: Receiver>, -} - -impl ChannelState { - pub fn new( - upstream_sender: Sender, - upstream_receiver: Receiver, - sv1_server_sender: Sender>, - sv1_server_receiver: Receiver>, - ) -> Self { - Self { - upstream_sender, - upstream_receiver, - sv1_server_sender, - sv1_server_receiver, - } - } - - pub fn drop(&self) { - debug!("Dropping channel manager channels"); - self.upstream_receiver.close(); - self.upstream_sender.close(); - self.sv1_server_receiver.close(); - self.sv1_server_sender.close(); - } -} diff --git a/roles/translator/src/lib/sv2/channel_manager/channel_manager.rs b/roles/translator/src/lib/sv2/channel_manager/channel_manager.rs deleted file mode 100644 index e242b32a4e..0000000000 --- a/roles/translator/src/lib/sv2/channel_manager/channel_manager.rs +++ /dev/null @@ -1,864 +0,0 @@ -use crate::{ - error::TproxyError, - status::{handle_error, Status, StatusSender}, - sv2::{ - channel_manager::{ - channel::ChannelState, - data::{ChannelManagerData, ChannelMode}, - }, - upstream::upstream::{EitherFrame, Message, StdFrame}, - }, - task_manager::TaskManager, - utils::{into_static, ShutdownMessage}, -}; -use async_channel::{Receiver, Sender}; -use std::sync::{Arc, RwLock}; -use stratum_apps::{ - custom_mutex::Mutex, - stratum_core::{ - channels_sv2::client::extended::ExtendedChannel, - framing_sv2::framing::Frame, - handlers_sv2::HandleMiningMessagesFromServerAsync, - mining_sv2::OpenExtendedMiningChannelSuccess, - parsers_sv2::{AnyMessage, Mining}, - }, -}; -use tokio::sync::{broadcast, mpsc}; -use tracing::{debug, error, info, warn}; - -/// Extra bytes allocated for translator search space in aggregated mode. -/// This allows the translator to manage multiple downstream connections -/// by allocating unique extranonce prefixes to each downstream. -const AGGREGATED_MODE_TRANSLATOR_SEARCH_SPACE_BYTES: usize = 4; - -/// Type alias for SV2 mining messages with static lifetime -pub type Sv2Message = Mining<'static>; - -/// Manages SV2 channels and message routing between upstream and downstream. -/// -/// The ChannelManager serves as the central component that bridges SV2 upstream -/// connections with SV1 downstream connections. It handles: -/// - SV2 channel lifecycle management (open, close, error handling) -/// - Message translation and routing between protocols -/// - Extranonce management for aggregated vs non-aggregated modes -/// - Share submission processing and validation -/// - Job distribution to downstream connections -/// -/// The manager supports two operational modes: -/// - Aggregated: All downstream connections share a single extended channel -/// - Non-aggregated: Each downstream connection gets its own extended channel -/// -/// This design allows the translator to efficiently manage multiple mining -/// connections while maintaining proper isolation and state management. -#[derive(Debug, Clone)] -pub struct ChannelManager { - pub channel_state: ChannelState, - pub channel_manager_data: Arc>, -} - -impl ChannelManager { - /// Creates a new ChannelManager instance. - /// - /// # Arguments - /// * `upstream_sender` - Channel to send messages to upstream - /// * `upstream_receiver` - Channel to receive messages from upstream - /// * `sv1_server_sender` - Channel to send messages to SV1 server - /// * `sv1_server_receiver` - Channel to receive messages from SV1 server - /// * `mode` - Operating mode (Aggregated or NonAggregated) - /// - /// # Returns - /// A new ChannelManager instance ready to handle message routing - pub fn new( - upstream_sender: Sender, - upstream_receiver: Receiver, - sv1_server_sender: Sender>, - sv1_server_receiver: Receiver>, - mode: ChannelMode, - ) -> Self { - let channel_state = ChannelState::new( - upstream_sender, - upstream_receiver, - sv1_server_sender, - sv1_server_receiver, - ); - let channel_manager_data = Arc::new(Mutex::new(ChannelManagerData::new(mode))); - Self { - channel_state, - channel_manager_data, - } - } - - /// Spawns and runs the main channel manager task loop. - /// - /// This method creates an async task that handles all message routing for the - /// channel manager. The task runs a select loop that processes: - /// - Shutdown signals for graceful termination - /// - Messages from upstream SV2 server - /// - Messages from downstream SV1 server - /// - /// The task continues running until a shutdown signal is received or an - /// unrecoverable error occurs. It ensures proper cleanup of resources - /// and error reporting. - /// - /// # Arguments - /// * `notify_shutdown` - Broadcast channel for receiving shutdown signals - /// * `shutdown_complete_tx` - Channel to signal when shutdown is complete - /// * `status_sender` - Channel for sending status updates and errors - /// * `task_manager` - Manager for tracking spawned tasks - pub async fn run_channel_manager_tasks( - self: Arc, - notify_shutdown: broadcast::Sender, - shutdown_complete_tx: mpsc::Sender<()>, - status_sender: Sender, - task_manager: Arc, - ) { - let mut shutdown_rx = notify_shutdown.subscribe(); - let status_sender = StatusSender::ChannelManager(status_sender); - task_manager.spawn(async move { - loop { - tokio::select! { - message = shutdown_rx.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - info!("ChannelManager: received shutdown signal."); - break; - } - Ok(ShutdownMessage::UpstreamReconnectedResetAndShutdownDownstreams) => { - info!("ChannelManager: upstream reconnected, resetting channel state."); - self.channel_manager_data.super_safe_lock(|data| { - data.reset_for_upstream_reconnection(); - }); - // Note: DownstreamShutdownAll handling is done by SV1Server separately - } - Ok(_) => { - // Ignore other shutdown message types - } - Err(e) => { - // Handle channel lag gracefully - don't shutdown on lag errors - if let tokio::sync::broadcast::error::RecvError::Lagged(_) = e { - warn!("ChannelManager: broadcast channel lagged, continuing: {e}"); - } else { - error!("ChannelManager: failed to receive shutdown signal: {e}"); - break; - } - } - } - } - res = Self::handle_upstream_message(self.clone()) => { - if let Err(e) = res { - handle_error(&status_sender, e).await; - break; - } - }, - res = Self::handle_downstream_message(self.clone()) => { - if let Err(e) = res { - handle_error(&status_sender, e).await; - break; - } - }, - else => { - warn!("All channel manager message streams closed. Exiting..."); - break; - } - } - } - - self.channel_state.drop(); - drop(shutdown_complete_tx); - warn!("ChannelManager: unified message loop exited."); - }); - } - - /// Handles messages received from the upstream SV2 server. - /// - /// This method processes SV2 messages from upstream and routes them appropriately: - /// - Mining messages: Processed through the roles logic and forwarded to SV1 server - /// - Channel responses: Handled to manage channel lifecycle - /// - Job notifications: Converted and distributed to downstream connections - /// - Error messages: Logged and handled appropriately - /// - /// The method implements the core SV2 protocol logic for channel management, - /// including handling both aggregated and non-aggregated channel modes. - /// - /// # Returns - /// * `Ok(())` - Message processed successfully - /// * `Err(TproxyError)` - Error processing the message - pub async fn handle_upstream_message(self: Arc) -> Result<(), TproxyError> { - let mut channel_manager = self.get_channel_manager(); - let message = self - .channel_state - .upstream_receiver - .recv() - .await - .map_err(TproxyError::ChannelErrorReceiver)?; - - let Frame::Sv2(mut frame) = message else { - warn!("Received non-SV2 frame from upstream"); - return Ok(()); - }; - - let header = frame.get_header().ok_or_else(|| { - error!("Missing header in SV2 frame"); - TproxyError::General("Missing frame header".into()) - })?; - - let message_type = header.msg_type(); - let mut payload = frame.payload().to_vec(); - - let message: AnyMessage<'_> = into_static( - (message_type, payload.as_mut_slice()) - .try_into() - .map_err(|e| { - error!("Failed to parse upstream frame into AnyMessage: {:?}", e); - TproxyError::General("Failed to parse AnyMessage".into()) - })?, - )?; - - match message { - Message::Mining(_) => { - channel_manager - .handle_mining_message_frame_from_server(None, message_type, &mut payload) - .await?; - } - _ => { - warn!("Unhandled upstream message type: {:?}", message); - } - } - - Ok(()) - } - - /// Handles messages received from the downstream SV1 server. - /// - /// This method processes requests from the SV1 server, primarily: - /// - OpenExtendedMiningChannel: Sets up new SV2 channels for downstream connections - /// - SubmitSharesExtended: Processes share submissions from miners - /// - /// For channel opening, the method handles both aggregated and non-aggregated modes: - /// - Aggregated: Creates extended channels using extranonce prefixes - /// - Non-aggregated: Opens individual extended channels with the upstream for each downstream - /// - /// Share submissions are validated, processed through the channel logic, - /// and forwarded to the upstream server with appropriate extranonce handling. - /// - /// # Returns - /// * `Ok(())` - Message processed successfully - /// * `Err(TproxyError)` - Error processing the message - pub async fn handle_downstream_message(self: Arc) -> Result<(), TproxyError> { - let message = self - .channel_state - .sv1_server_receiver - .recv() - .await - .map_err(TproxyError::ChannelErrorReceiver)?; - match message { - Mining::OpenExtendedMiningChannel(m) => { - let mut open_channel_msg = m.clone(); - let mut user_identity = std::str::from_utf8(m.user_identity.as_ref()) - .map(|s| s.to_string()) - .unwrap_or_else(|_| "unknown".to_string()); - let hashrate = m.nominal_hash_rate; - let min_extranonce_size = m.min_extranonce_size as usize; - let mode = self - .channel_manager_data - .super_safe_lock(|c| c.mode.clone()); - - if mode == ChannelMode::Aggregated { - if self - .channel_manager_data - .super_safe_lock(|c| c.upstream_extended_channel.is_some()) - { - // We already have the unique channel open and so we create a new - // extranonce prefix and we send the - // OpenExtendedMiningChannelSuccess message directly to the sv1 - // server - let target = self.channel_manager_data.super_safe_lock(|c| { - *c.upstream_extended_channel - .as_ref() - .unwrap() - .read() - .unwrap() - .get_target() - }); - let new_extranonce_prefix = - self.channel_manager_data.super_safe_lock(|c| { - c.extranonce_prefix_factory - .as_ref() - .unwrap() - .safe_lock(|e| { - e.next_prefix_extended( - open_channel_msg.min_extranonce_size.into(), - ) - }) - .ok() - .and_then(|r| r.ok()) - }); - let new_extranonce_size = self.channel_manager_data.super_safe_lock(|c| { - c.extranonce_prefix_factory - .as_ref() - .unwrap() - .safe_lock(|e| e.get_range2_len()) - .unwrap() - }); - if let Some(new_extranonce_prefix) = new_extranonce_prefix { - if new_extranonce_size >= open_channel_msg.min_extranonce_size as usize - { - let next_channel_id = - self.channel_manager_data.super_safe_lock(|c| { - c.extended_channels.keys().max().unwrap_or(&0) + 1 - }); - let new_downstream_extended_channel = ExtendedChannel::new( - next_channel_id, - user_identity.clone(), - new_extranonce_prefix - .clone() - .into_b032() - .into_static() - .to_vec(), - target, - hashrate, - true, - new_extranonce_size as u16, - ); - self.channel_manager_data.super_safe_lock(|c| { - c.extended_channels.insert( - next_channel_id, - Arc::new(RwLock::new(new_downstream_extended_channel)), - ); - }); - let success_message = Mining::OpenExtendedMiningChannelSuccess( - OpenExtendedMiningChannelSuccess { - request_id: open_channel_msg.request_id, - channel_id: next_channel_id, - target: target.to_le_bytes().into(), - extranonce_size: new_extranonce_size as u16, - extranonce_prefix: new_extranonce_prefix.clone().into(), - }, - ); - - self.channel_state - .sv1_server_sender - .send(success_message) - .await - .map_err(|e| { - error!( - "Failed to send open channel message to upstream: {:?}", - e - ); - TproxyError::ChannelErrorSender - })?; - // get the last active job from the upstream extended channel - let last_active_job = - self.channel_manager_data.super_safe_lock(|c| { - c.upstream_extended_channel - .as_ref() - .and_then(|ch| ch.read().ok()) - .and_then(|ch| ch.get_active_job().map(|j| j.0.clone())) - }); - - // get the last chain tip from the upstream extended channel - let last_chain_tip = - self.channel_manager_data.super_safe_lock(|c| { - c.upstream_extended_channel - .as_ref() - .and_then(|ch| ch.read().ok()) - .and_then(|ch| ch.get_chain_tip().cloned()) - }); - // update the downstream channel with the active job and the chain - // tip - if let Some(mut job) = last_active_job { - if let Some(last_chain_tip) = last_chain_tip { - // update the downstream channel with the active chain tip - self.channel_manager_data.super_safe_lock(|c| { - if let Some(ch) = - c.extended_channels.get(&next_channel_id) - { - ch.write() - .unwrap() - .set_chain_tip(last_chain_tip.clone()); - } - }); - } - job.channel_id = next_channel_id; - // update the downstream channel with the active job - self.channel_manager_data.super_safe_lock(|c| { - if let Some(ch) = c.extended_channels.get(&next_channel_id) - { - let _ = ch - .write() - .unwrap() - .on_new_extended_mining_job(job.clone()); - } - }); - - self.channel_state - .sv1_server_sender - .send(Mining::NewExtendedMiningJob(job.clone())) - .await - .map_err(|e| { - error!("Failed to send last new extended mining job to upstream: {:?}", e); - TproxyError::ChannelErrorSender - })?; - } - } - } - return Ok(()); - } else { - // We don't have the unique channel open yet and so we send the - // OpenExtendedMiningChannel message to the upstream - // Before doing that we need to truncate the user identity at the - // first dot and append .translator-proxy - // Truncate at the first dot and append .translator-proxy - let translator_identity = if let Some(dot_index) = user_identity.find('.') { - format!("{}.translator-proxy", &user_identity[..dot_index]) - } else { - format!("{user_identity}.translator-proxy") - }; - user_identity = translator_identity; - open_channel_msg.user_identity = - user_identity.as_bytes().to_vec().try_into().unwrap(); - } - } - // In aggregated mode, add extra bytes for translator search space allocation - let upstream_min_extranonce_size = self.channel_manager_data.super_safe_lock(|c| { - if c.mode == ChannelMode::Aggregated { - min_extranonce_size + AGGREGATED_MODE_TRANSLATOR_SEARCH_SPACE_BYTES - } else { - min_extranonce_size - } - }); - - // Update the message with the adjusted extranonce size for upstream - open_channel_msg.min_extranonce_size = upstream_min_extranonce_size as u16; - - // Store the user identity, hashrate, and original downstream extranonce size - self.channel_manager_data.super_safe_lock(|c| { - c.pending_channels.insert( - open_channel_msg.request_id, - (user_identity, hashrate, min_extranonce_size), - ); - }); - - info!( - "Sending OpenExtendedMiningChannel message to upstream: {:?}", - open_channel_msg - ); - - let frame = StdFrame::try_from(Message::Mining(Mining::OpenExtendedMiningChannel( - open_channel_msg, - ))) - .map_err(TproxyError::ParserError)?; - self.channel_state - .upstream_sender - .send(frame.into()) - .await - .map_err(|e| { - error!("Failed to send open channel message to upstream: {:?}", e); - TproxyError::ChannelErrorSender - })?; - } - Mining::SubmitSharesExtended(mut m) => { - let value = self.channel_manager_data.super_safe_lock(|c| { - let extended_channel = c.extended_channels.get(&m.channel_id); - if let Some(extended_channel) = extended_channel { - let channel = extended_channel.write(); - if let Ok(mut channel) = channel { - return Some(( - channel.validate_share(m.clone()), - channel.get_share_accounting().clone(), - )); - } - } - None - }); - if let Some((Ok(_result), _share_accounting)) = value { - let mode = self - .channel_manager_data - .super_safe_lock(|c| c.mode.clone()); - - if mode == ChannelMode::Aggregated - && self - .channel_manager_data - .super_safe_lock(|c| c.upstream_extended_channel.is_some()) - { - let upstream_extended_channel_id = - self.channel_manager_data.super_safe_lock(|c| { - let upstream_extended_channel = c - .upstream_extended_channel - .as_ref() - .unwrap() - .read() - .unwrap(); - upstream_extended_channel.get_channel_id() - }); - - // In aggregated mode, use a single sequence counter for all valid shares - m.sequence_number = self.channel_manager_data.super_safe_lock(|c| { - c.next_share_sequence_number(upstream_extended_channel_id) - }); - // Get the downstream channel's extranonce prefix (contains - // upstream prefix + translator proxy prefix) - let downstream_extranonce_prefix = - self.channel_manager_data.super_safe_lock(|c| { - c.extended_channels.get(&m.channel_id).map(|channel| { - channel.read().unwrap().get_extranonce_prefix().clone() - }) - }); - // Get the length of the upstream prefix (range0) - let range0_len = self.channel_manager_data.super_safe_lock(|c| { - c.extranonce_prefix_factory - .as_ref() - .unwrap() - .safe_lock(|e| e.get_range0_len()) - .unwrap() - }); - if let Some(downstream_extranonce_prefix) = downstream_extranonce_prefix { - // Skip the upstream prefix (range0) and take the remaining - // bytes (translator proxy prefix) - let translator_prefix = &downstream_extranonce_prefix[range0_len..]; - // Create new extranonce: translator proxy prefix + miner's - // extranonce - let mut new_extranonce = translator_prefix.to_vec(); - new_extranonce.extend_from_slice(m.extranonce.as_ref()); - // Replace the original extranonce with the modified one for - // upstream submission - m.extranonce = new_extranonce.try_into()?; - } - // We need to set the channel id to the upstream extended - // channel id - m.channel_id = upstream_extended_channel_id; - } else { - // In non-aggregated mode, each downstream channel has its own sequence - // counter - m.sequence_number = self - .channel_manager_data - .super_safe_lock(|c| c.next_share_sequence_number(m.channel_id)); - - // Check if we have a per-channel factory for extranonce adjustment - let channel_factory = self.channel_manager_data.super_safe_lock(|c| { - c.extranonce_factories - .as_ref() - .and_then(|factories| factories.get(&m.channel_id).cloned()) - }); - - if let Some(factory) = channel_factory { - // We need to adjust the extranonce for this channel - let downstream_extranonce_prefix = - self.channel_manager_data.super_safe_lock(|c| { - c.extended_channels.get(&m.channel_id).map(|channel| { - channel.read().unwrap().get_extranonce_prefix().clone() - }) - }); - let range0_len = factory - .safe_lock(|e| e.get_range0_len()) - .expect("Failed to access extranonce factory range - this should not happen"); - if let Some(downstream_extranonce_prefix) = downstream_extranonce_prefix - { - // Skip the upstream prefix (range0) and take the remaining - // bytes (translator proxy prefix) - let translator_prefix = &downstream_extranonce_prefix[range0_len..]; - // Create new extranonce: translator proxy prefix + miner's - // extranonce - let mut new_extranonce = translator_prefix.to_vec(); - new_extranonce.extend_from_slice(m.extranonce.as_ref()); - // Replace the original extranonce with the modified one for - // upstream submission - m.extranonce = new_extranonce.try_into()?; - } - } - } - - info!( - "SubmitSharesExtended: valid share, forwarding it to upstream | channel_id: {}, sequence_number: {} β˜‘οΈ", - m.channel_id, m.sequence_number - ); - let frame: StdFrame = Message::Mining(Mining::SubmitSharesExtended(m)) - .try_into() - .map_err(TproxyError::ParserError)?; - let frame: EitherFrame = frame.into(); - self.channel_state - .upstream_sender - .send(frame) - .await - .map_err(|e| { - error!("Error while sending message to upstream: {e:?}"); - TproxyError::ChannelErrorSender - })?; - } - } - Mining::UpdateChannel(mut m) => { - debug!("Received UpdateChannel from SV1Server: {:?}", m); - let mode = self - .channel_manager_data - .super_safe_lock(|c| c.mode.clone()); - - if mode == ChannelMode::Aggregated { - let upstream_extended_channel_id = - self.channel_manager_data.super_safe_lock(|c| { - c.upstream_extended_channel - .as_ref() - .unwrap() - .read() - .unwrap() - .get_channel_id() - }); - // We need to set the channel id to the upstream extended - // channel id - m.channel_id = upstream_extended_channel_id; - } - info!( - "Sending UpdateChannel message to upstream for channel_id: {:?}", - m.channel_id - ); - // Forward UpdateChannel message to upstream - let frame = StdFrame::try_from(Message::Mining(Mining::UpdateChannel(m))) - .map_err(TproxyError::ParserError)?; - - self.channel_state - .upstream_sender - .send(frame.into()) - .await - .map_err(|e| { - error!("Failed to send UpdateChannel message to upstream: {:?}", e); - TproxyError::ChannelErrorSender - })?; - } - Mining::CloseChannel(m) => { - debug!("Received CloseChannel from SV1Server: {m}"); - let frame = StdFrame::try_from(Message::Mining(Mining::CloseChannel(m))) - .map_err(TproxyError::ParserError)?; - - self.channel_state - .upstream_sender - .send(frame.into()) - .await - .map_err(|e| { - error!("Failed to send UpdateChannel message to upstream: {:?}", e); - TproxyError::ChannelErrorSender - })?; - } - _ => { - warn!("Unhandled downstream message: {:?}", message); - } - } - - Ok(()) - } - - pub fn get_channel_manager(&self) -> ChannelManager { - ChannelManager { - channel_manager_data: self.channel_manager_data.clone(), - channel_state: self.channel_state.clone(), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::sv2::channel_manager::data::ChannelMode; - use async_channel::unbounded; - use stratum_apps::stratum_core::mining_sv2::{ - OpenExtendedMiningChannel, SubmitSharesExtended, UpdateChannel, - }; - - fn create_test_channel_manager(mode: ChannelMode) -> ChannelManager { - let (upstream_sender, _upstream_receiver) = unbounded(); - let (_upstream_sender2, upstream_receiver) = unbounded(); - let (sv1_server_sender, _sv1_server_receiver) = unbounded(); - let (_sv1_server_sender2, sv1_server_receiver) = unbounded(); - - ChannelManager::new( - upstream_sender, - upstream_receiver, - sv1_server_sender, - sv1_server_receiver, - mode, - ) - } - - #[test] - fn test_channel_manager_creation_aggregated() { - let manager = create_test_channel_manager(ChannelMode::Aggregated); - - let mode = manager - .channel_manager_data - .super_safe_lock(|data| data.mode.clone()); - assert_eq!(mode, ChannelMode::Aggregated); - } - - #[test] - fn test_channel_manager_creation_non_aggregated() { - let manager = create_test_channel_manager(ChannelMode::NonAggregated); - - let mode = manager - .channel_manager_data - .super_safe_lock(|data| data.mode.clone()); - assert_eq!(mode, ChannelMode::NonAggregated); - } - - #[test] - fn test_get_channel_manager() { - let manager = create_test_channel_manager(ChannelMode::Aggregated); - let cloned_manager = manager.get_channel_manager(); - - // Should be a different instance but share the same data - let original_mode = manager - .channel_manager_data - .super_safe_lock(|data| data.mode.clone()); - let cloned_mode = cloned_manager - .channel_manager_data - .super_safe_lock(|data| data.mode.clone()); - - assert_eq!(original_mode, cloned_mode); - } - - #[tokio::test] - async fn test_handle_downstream_open_channel_message() { - let manager = create_test_channel_manager(ChannelMode::NonAggregated); - - // Create an OpenExtendedMiningChannel message - let open_channel = OpenExtendedMiningChannel { - request_id: 1, - user_identity: "test_user".as_bytes().to_vec().try_into().unwrap(), - nominal_hash_rate: 1000.0, - max_target: vec![0xFFu8; 32].try_into().unwrap(), - min_extranonce_size: 4, - }; - - // Store the pending channel information - manager.channel_manager_data.super_safe_lock(|data| { - data.pending_channels - .insert(1, ("test_user".to_string(), 1000.0, 4)); - }); - - // Test that the message can be handled without panicking - // In a real test environment, we would need to mock the upstream sender - // For now, we just verify the channel manager can process the message type - let mining_message = Mining::OpenExtendedMiningChannel(open_channel); - - // Verify the message can be processed (would normally be sent to upstream) - match mining_message { - Mining::OpenExtendedMiningChannel(msg) => { - assert_eq!(msg.request_id, 1); - assert_eq!(msg.nominal_hash_rate, 1000.0); - assert_eq!(msg.min_extranonce_size, 4); - } - _ => panic!("Expected OpenExtendedMiningChannel"), - } - } - - #[tokio::test] - async fn test_handle_downstream_submit_shares_message() { - let _manager = create_test_channel_manager(ChannelMode::NonAggregated); - - // Create a SubmitSharesExtended message - let submit_shares = SubmitSharesExtended { - channel_id: 1, - sequence_number: 100, - job_id: 42, - nonce: 0x12345678, - ntime: 1234567890, - version: 0x20000000, - extranonce: vec![0x01, 0x02, 0x03, 0x04].try_into().unwrap(), - }; - - // Test that the message can be handled - let mining_message = Mining::SubmitSharesExtended(submit_shares); - - // Verify the message structure - match mining_message { - Mining::SubmitSharesExtended(msg) => { - assert_eq!(msg.channel_id, 1); - assert_eq!(msg.sequence_number, 100); - assert_eq!(msg.job_id, 42); - assert_eq!(msg.nonce, 0x12345678); - } - _ => panic!("Expected SubmitSharesExtended"), - } - } - - #[tokio::test] - async fn test_handle_downstream_update_channel_message() { - let _manager = create_test_channel_manager(ChannelMode::Aggregated); - - // Create an UpdateChannel message - let update_channel = UpdateChannel { - channel_id: 1, - nominal_hash_rate: 2000.0, - maximum_target: [0xFFu8; 32].try_into().unwrap(), - }; - - // Test that the message can be handled - let mining_message = Mining::UpdateChannel(update_channel); - - // Verify the message structure - match mining_message { - Mining::UpdateChannel(msg) => { - assert_eq!(msg.channel_id, 1); - assert_eq!(msg.nominal_hash_rate, 2000.0); - } - _ => panic!("Expected UpdateChannel"), - } - } - - #[test] - fn test_channel_manager_debug() { - let manager = create_test_channel_manager(ChannelMode::Aggregated); - - // Test that Debug trait is implemented - let debug_str = format!("{:?}", manager); - assert!(debug_str.contains("ChannelManager")); - } - - #[test] - fn test_channel_manager_clone() { - let manager = create_test_channel_manager(ChannelMode::Aggregated); - let cloned = manager.clone(); - - // Verify that both managers share the same underlying data - let original_mode = manager - .channel_manager_data - .super_safe_lock(|data| data.mode.clone()); - let cloned_mode = cloned - .channel_manager_data - .super_safe_lock(|data| data.mode.clone()); - - assert_eq!(original_mode, cloned_mode); - } - - #[test] - fn test_channel_manager_data_access() { - let manager = create_test_channel_manager(ChannelMode::NonAggregated); - - // Test that we can access and modify channel manager data - manager.channel_manager_data.super_safe_lock(|data| { - // Add a pending channel - data.pending_channels - .insert(1, ("test".to_string(), 100.0, 4)); - }); - - let has_pending = manager - .channel_manager_data - .super_safe_lock(|data| data.pending_channels.contains_key(&1)); - - assert!(has_pending); - } - - #[test] - fn test_channel_manager_mode_consistency() { - let aggregated_manager = create_test_channel_manager(ChannelMode::Aggregated); - let non_aggregated_manager = create_test_channel_manager(ChannelMode::NonAggregated); - - let agg_mode = aggregated_manager - .channel_manager_data - .super_safe_lock(|data| data.mode.clone()); - let non_agg_mode = non_aggregated_manager - .channel_manager_data - .super_safe_lock(|data| data.mode.clone()); - - assert_eq!(agg_mode, ChannelMode::Aggregated); - assert_eq!(non_agg_mode, ChannelMode::NonAggregated); - assert_ne!(agg_mode, non_agg_mode); - } -} diff --git a/roles/translator/src/lib/sv2/channel_manager/data.rs b/roles/translator/src/lib/sv2/channel_manager/data.rs deleted file mode 100644 index c79bae825b..0000000000 --- a/roles/translator/src/lib/sv2/channel_manager/data.rs +++ /dev/null @@ -1,110 +0,0 @@ -use std::{ - collections::HashMap, - sync::{Arc, RwLock}, -}; -use stratum_apps::{ - custom_mutex::Mutex, - stratum_core::{ - channels_sv2::client::extended::ExtendedChannel, mining_sv2::ExtendedExtranonce, - }, -}; - -/// Defines the operational mode for channel management. -/// -/// The channel manager can operate in two different modes that affect how -/// downstream connections are mapped to upstream SV2 channels: -#[derive(Debug, Clone, PartialEq, serde::Deserialize)] -pub enum ChannelMode { - /// All downstream connections share a single extended SV2 channel. - /// This mode uses extranonce prefix allocation to distinguish between - /// different downstream miners while presenting them as a single entity - /// to the upstream server. This is more efficient for pools with many - /// miners. - Aggregated, - /// Each downstream connection gets its own dedicated extended SV2 channel. - /// This mode provides complete isolation between downstream connections - /// but may be less efficient for large numbers of miners. - NonAggregated, -} - -/// Internal data structure for the ChannelManager. -/// -/// This struct maintains all the state needed for SV2 channel management, -/// including pending channel requests, active channels, and mode-specific -/// data structures like extranonce factories for aggregated mode. -#[derive(Debug, Clone)] -pub struct ChannelManagerData { - /// Store pending channel info by downstream_id: (user_identity, hashrate, - /// downstream_extranonce_len) - pub pending_channels: HashMap, - /// Map of active extended channels by channel ID - pub extended_channels: HashMap>>>, - /// The upstream extended channel used in aggregated mode - pub upstream_extended_channel: Option>>>, - /// Extranonce prefix factory for allocating unique prefixes in aggregated mode - pub extranonce_prefix_factory: Option>>, - /// Current operational mode - pub mode: ChannelMode, - /// Share sequence number counter for tracking valid shares forwarded upstream. - /// In aggregated mode: single counter for all shares going to the upstream channel. - /// In non-aggregated mode: one counter per downstream channel. - pub share_sequence_counters: HashMap, - /// Per-channel extranonce factories for non-aggregated mode when extranonce adjustment is - /// needed - pub extranonce_factories: Option>>>, -} - -impl ChannelManagerData { - /// Creates a new ChannelManagerData instance. - /// - /// # Arguments - /// * `mode` - The operational mode (Aggregated or NonAggregated) - /// - /// # Returns - /// A new ChannelManagerData instance with empty state - pub fn new(mode: ChannelMode) -> Self { - Self { - pending_channels: HashMap::new(), - extended_channels: HashMap::new(), - upstream_extended_channel: None, - extranonce_prefix_factory: None, - mode, - share_sequence_counters: HashMap::new(), - extranonce_factories: None, - } - } - - /// Resets all channel state for upstream reconnection. - /// - /// This method clears all existing channel state that becomes invalid - /// when the upstream connection is lost and reestablished. It preserves - /// the operational mode but clears: - /// - All pending channel requests - /// - All active extended channels - /// - The upstream extended channel - /// - The extranonce prefix factory - /// - /// This ensures that new channels will be properly opened with the - /// newly connected upstream server. - pub fn reset_for_upstream_reconnection(&mut self) { - self.pending_channels.clear(); - self.extended_channels.clear(); - self.upstream_extended_channel = None; - self.extranonce_prefix_factory = None; - self.share_sequence_counters.clear(); - self.extranonce_factories = None; - // Note: we intentionally preserve `mode` as it's a configuration setting - } - - /// Gets the next sequence number for a valid share and increments the counter. - /// - /// The counter_key determines which counter to use: - /// - In aggregated mode: use upstream channel ID (single counter for all shares) - /// - In non-aggregated mode: use downstream channel ID (one counter per channel) - pub fn next_share_sequence_number(&mut self, counter_key: u32) -> u32 { - let counter = self.share_sequence_counters.entry(counter_key).or_insert(1); - let current = *counter; - *counter += 1; - current - } -} diff --git a/roles/translator/src/lib/sv2/channel_manager/message_handler.rs b/roles/translator/src/lib/sv2/channel_manager/message_handler.rs deleted file mode 100644 index bfa714e7cd..0000000000 --- a/roles/translator/src/lib/sv2/channel_manager/message_handler.rs +++ /dev/null @@ -1,528 +0,0 @@ -use std::sync::{Arc, RwLock}; - -use crate::{ - error::TproxyError, - sv2::{channel_manager::ChannelMode, ChannelManager}, - utils::proxy_extranonce_prefix_len, -}; -use stratum_apps::{ - custom_mutex::Mutex, - stratum_core::{ - bitcoin::Target, - channels_sv2::client::extended::ExtendedChannel, - handlers_sv2::{HandleMiningMessagesFromServerAsync, SupportedChannelTypes}, - mining_sv2::{ - CloseChannel, ExtendedExtranonce, Extranonce, NewExtendedMiningJob, NewMiningJob, - OpenExtendedMiningChannelSuccess, OpenMiningChannelError, - OpenStandardMiningChannelSuccess, SetCustomMiningJobError, SetCustomMiningJobSuccess, - SetExtranoncePrefix, SetGroupChannel, SetNewPrevHash, SetTarget, SubmitSharesError, - SubmitSharesSuccess, UpdateChannelError, - MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL_SUCCESS, - MESSAGE_TYPE_SET_CUSTOM_MINING_JOB_ERROR, MESSAGE_TYPE_SET_CUSTOM_MINING_JOB_SUCCESS, - MESSAGE_TYPE_SET_GROUP_CHANNEL, - }, - parsers_sv2::Mining, - }, -}; -use tracing::{debug, error, info, warn}; - -impl HandleMiningMessagesFromServerAsync for ChannelManager { - type Error = TproxyError; - - fn get_channel_type_for_server(&self, _server_id: Option) -> SupportedChannelTypes { - SupportedChannelTypes::Extended - } - - fn is_work_selection_enabled_for_server(&self, _server_id: Option) -> bool { - false - } - - async fn handle_open_standard_mining_channel_success( - &mut self, - _server_id: Option, - m: OpenStandardMiningChannelSuccess<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", m); - Err(Self::Error::UnexpectedMessage( - MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL_SUCCESS, - )) - } - - async fn handle_open_extended_mining_channel_success( - &mut self, - _server_id: Option, - m: OpenExtendedMiningChannelSuccess<'_>, - ) -> Result<(), Self::Error> { - // Check if we have the pending channel data, return error if not - let (user_identity, nominal_hashrate, downstream_extranonce_len) = self - .channel_manager_data - .safe_lock(|channel_manager_data| { - channel_manager_data.pending_channels.remove(&m.request_id) - }) - .map_err(|e| { - error!("Failed to lock channel manager data: {:?}", e); - TproxyError::PoisonLock - })? - .ok_or_else(|| { - error!("No pending channel found for request_id: {}", m.request_id); - TproxyError::PendingChannelNotFound(m.request_id) - })?; - - let success = self - .channel_manager_data - .safe_lock(|channel_manager_data| { - info!( - "Received: {}, user_identity: {}, nominal_hashrate: {}", - m, user_identity, nominal_hashrate - ); - let extranonce_prefix = m.extranonce_prefix.clone().into_static().to_vec(); - let target = Target::from_le_bytes(m.target.clone().inner_as_ref().try_into().unwrap()); - let version_rolling = true; // we assume this is always true on extended channels - let extended_channel = ExtendedChannel::new( - m.channel_id, - user_identity.clone(), - extranonce_prefix.clone(), - target, - nominal_hashrate, - version_rolling, - m.extranonce_size, - ); - - // If we are in aggregated mode, we need to create a new extranonce prefix and - // insert the extended channel into the map - if channel_manager_data.mode == ChannelMode::Aggregated { - channel_manager_data.upstream_extended_channel = - Some(Arc::new(RwLock::new(extended_channel.clone()))); - - let upstream_extranonce_prefix: Extranonce = m.extranonce_prefix.clone().into(); - let translator_proxy_extranonce_prefix_len = proxy_extranonce_prefix_len( - m.extranonce_size.into(), - downstream_extranonce_len, - ); - - // range 0 is the extranonce1 from upstream - // range 1 is the extranonce1 added by the tproxy - // range 2 is the extranonce2 used by the miner for rolling (this is the one - // that is used for rolling) - let range_0 = 0..extranonce_prefix.len(); - let range1 = range_0.end..range_0.end + translator_proxy_extranonce_prefix_len; - let range2 = range1.end..range1.end + downstream_extranonce_len; - debug!("\n\nrange_0: {:?}, range1: {:?}, range2: {:?}\n\n", range_0, range1, range2); - let extended_extranonce_factory = ExtendedExtranonce::from_upstream_extranonce( - upstream_extranonce_prefix, - range_0, - range1, - range2, - ) - .expect("Failed to create ExtendedExtranonce from upstream extranonce"); - channel_manager_data.extranonce_prefix_factory = - Some(Arc::new(Mutex::new(extended_extranonce_factory))); - - let factory = channel_manager_data - .extranonce_prefix_factory - .as_ref() - .expect("extranonce_prefix_factory should be set after creation"); - let new_extranonce_size = factory - .safe_lock(|f| f.get_range2_len()) - .expect("extranonce_prefix_factory mutex should not be poisoned") - as u16; - let new_extranonce_prefix = factory - .safe_lock(|f| f.next_prefix_extended(new_extranonce_size as usize)) - .expect("extranonce_prefix_factory mutex should not be poisoned") - .expect("next_prefix_extended should return a value for valid input") - .into_b032(); - let new_downstream_extended_channel = ExtendedChannel::new( - m.channel_id, - user_identity.clone(), - new_extranonce_prefix.clone().into_static().to_vec(), - target, - nominal_hashrate, - true, - new_extranonce_size, - ); - channel_manager_data.extended_channels.insert( - m.channel_id, - Arc::new(RwLock::new(new_downstream_extended_channel)), - ); - let new_open_extended_mining_channel_success = - OpenExtendedMiningChannelSuccess { - request_id: m.request_id, - channel_id: m.channel_id, - extranonce_prefix: new_extranonce_prefix, - extranonce_size: new_extranonce_size, - target: m.target.clone(), - }; - new_open_extended_mining_channel_success.into_static() - } else { - // Non-aggregated mode: check if we need to adjust extranonce size - if m.extranonce_size as usize != downstream_extranonce_len { - // We need to create an extranonce factory to ensure proper extranonce2_size - let upstream_extranonce_prefix: Extranonce = m.extranonce_prefix.clone().into(); - let translator_proxy_extranonce_prefix_len = proxy_extranonce_prefix_len( - m.extranonce_size.into(), - downstream_extranonce_len, - ); - - // range 0 is the extranonce1 from upstream - // range 1 is the extranonce1 added by the tproxy - // range 2 is the extranonce2 used by the miner for rolling - let range_0 = 0..extranonce_prefix.len(); - let range1 = range_0.end..range_0.end + translator_proxy_extranonce_prefix_len; - let range2 = range1.end..range1.end + downstream_extranonce_len; - debug!("\n\nrange_0: {:?}, range1: {:?}, range2: {:?}\n\n", range_0, range1, range2); - // Create the factory - this should succeed if configuration is valid - let extended_extranonce_factory = ExtendedExtranonce::from_upstream_extranonce( - upstream_extranonce_prefix, - range_0, - range1, - range2, - ) - .expect("Failed to create ExtendedExtranonce factory - likely extranonce size configuration issue"); - // Store the factory for this specific channel - let factory = Arc::new(Mutex::new(extended_extranonce_factory)); - let new_extranonce_prefix = factory - .safe_lock(|f| f.next_prefix_extended(downstream_extranonce_len)) - .expect("Failed to access extranonce factory") - .expect("Failed to generate extranonce prefix") - .into_b032(); - // Create channel with the configured extranonce size - let new_downstream_extended_channel = ExtendedChannel::new( - m.channel_id, - user_identity.clone(), - new_extranonce_prefix.clone().into_static().to_vec(), - target, - nominal_hashrate, - true, - downstream_extranonce_len as u16, - ); - channel_manager_data.extended_channels.insert( - m.channel_id, - Arc::new(RwLock::new(new_downstream_extended_channel)), - ); - // Store factory for this channel (we'll need it for share processing) - if channel_manager_data.extranonce_factories.is_none() { - channel_manager_data.extranonce_factories = Some(std::collections::HashMap::new()); - } - if let Some(ref mut factories) = channel_manager_data.extranonce_factories { - factories.insert(m.channel_id, factory); - } - let new_open_extended_mining_channel_success = OpenExtendedMiningChannelSuccess { - request_id: m.request_id, - channel_id: m.channel_id, - extranonce_prefix: new_extranonce_prefix, - extranonce_size: downstream_extranonce_len as u16, - target: m.target.clone(), - }; - new_open_extended_mining_channel_success.into_static() - } else { - // Extranonce size matches, use as-is - channel_manager_data - .extended_channels - .insert(m.channel_id, Arc::new(RwLock::new(extended_channel))); - m.into_static() - } - } - }) - .map_err(|e| { - error!("Failed to lock channel manager data: {:?}", e); - TproxyError::PoisonLock - })?; - - self.channel_state - .sv1_server_sender - .send(Mining::OpenExtendedMiningChannelSuccess(success.clone())) - .await - .map_err(|e| { - error!("Failed to send OpenExtendedMiningChannelSuccess: {:?}", e); - TproxyError::ChannelErrorSender - })?; - - Ok(()) - } - - async fn handle_open_mining_channel_error( - &mut self, - _server_id: Option, - m: OpenMiningChannelError<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", m); - todo!("OpenMiningChannelError not handled yet"); - } - - async fn handle_update_channel_error( - &mut self, - _server_id: Option, - m: UpdateChannelError<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", m); - Ok(()) - } - - async fn handle_close_channel( - &mut self, - _server_id: Option, - m: CloseChannel<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", m); - _ = self.channel_manager_data.safe_lock(|channel_data_manager| { - if channel_data_manager.mode == ChannelMode::Aggregated { - if channel_data_manager.upstream_extended_channel.is_some() { - channel_data_manager.upstream_extended_channel = None; - } - } else { - channel_data_manager.extended_channels.remove(&m.channel_id); - } - }); - Ok(()) - } - - async fn handle_set_extranonce_prefix( - &mut self, - _server_id: Option, - m: SetExtranoncePrefix<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", m); - warn!("⚠️ Cannot process SetExtranoncePrefix since set_extranonce is not supported for majority of sv1 clients. Ignoring."); - Ok(()) - } - - async fn handle_submit_shares_success( - &mut self, - _server_id: Option, - m: SubmitSharesSuccess, - ) -> Result<(), Self::Error> { - info!("Received: {} βœ…", m); - Ok(()) - } - - async fn handle_submit_shares_error( - &mut self, - _server_id: Option, - m: SubmitSharesError<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {} ❌", m); - Ok(()) - } - - async fn handle_new_mining_job( - &mut self, - _server_id: Option, - m: NewMiningJob<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", m); - warn!("⚠️ Cannot process NewMiningJob since Translator Proxy supports only extended mining jobs. Ignoring."); - Ok(()) - } - - async fn handle_new_extended_mining_job( - &mut self, - _server_id: Option, - m: NewExtendedMiningJob<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", m); - let mut m_static = m.clone().into_static(); - _ = self.channel_manager_data.safe_lock(|channel_manage_data| { - if channel_manage_data.mode == ChannelMode::Aggregated { - if let Some(upstream_channel) = &channel_manage_data.upstream_extended_channel { - if let Ok(mut upstream_extended_channel) = upstream_channel.write() { - let _ = - upstream_extended_channel.on_new_extended_mining_job(m_static.clone()); - m_static.channel_id = 0; // this is done so that every aggregated downstream - // will - // receive the NewExtendedMiningJob message - } - } - channel_manage_data - .extended_channels - .iter() - .for_each(|(_, channel)| { - if let Ok(mut channel) = channel.write() { - let _ = channel.on_new_extended_mining_job(m_static.clone()); - } - }); - } else if let Some(channel) = channel_manage_data - .extended_channels - .get(&m_static.channel_id) - { - if let Ok(mut channel) = channel.write() { - let _ = channel.on_new_extended_mining_job(m_static.clone()); - } - } - }); - let job = m_static; - if !job.is_future() { - self.channel_state - .sv1_server_sender - .send(Mining::NewExtendedMiningJob(job)) - .await - .map_err(|e| { - error!("Failed to send immediate NewExtendedMiningJob: {:?}", e); - TproxyError::ChannelErrorSender - })?; - } - Ok(()) - } - - async fn handle_set_new_prev_hash( - &mut self, - _server_id: Option, - m: SetNewPrevHash<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", m); - let m_static = m.clone().into_static(); - _ = self.channel_manager_data.safe_lock(|channel_manager_data| { - if channel_manager_data.mode == ChannelMode::Aggregated { - if let Some(upstream_channel) = &channel_manager_data.upstream_extended_channel { - if let Ok(mut upstream_extended_channel) = upstream_channel.write() { - _ = upstream_extended_channel.on_set_new_prev_hash(m_static.clone()); - } - } - channel_manager_data - .extended_channels - .iter() - .for_each(|(_, channel)| { - if let Ok(mut channel) = channel.write() { - _ = channel.on_set_new_prev_hash(m_static.clone()); - } - }); - } else if let Some(channel) = channel_manager_data - .extended_channels - .get(&m_static.channel_id) - { - if let Ok(mut channel) = channel.write() { - _ = channel.on_set_new_prev_hash(m_static.clone()); - } - } - }); - - self.channel_state - .sv1_server_sender - .send(Mining::SetNewPrevHash(m_static.clone())) - .await - .map_err(|e| { - error!("Failed to send SetNewPrevHash: {:?}", e); - TproxyError::ChannelErrorSender - })?; - - let mode = self - .channel_manager_data - .super_safe_lock(|c| c.mode.clone()); - - let active_job = if mode == ChannelMode::Aggregated { - self.channel_manager_data.super_safe_lock(|c| { - c.upstream_extended_channel - .as_ref() - .and_then(|ch| ch.read().ok()) - .and_then(|ch| ch.get_active_job().map(|j| j.0.clone())) - }) - } else { - self.channel_manager_data.super_safe_lock(|c| { - c.extended_channels - .get(&m.channel_id) - .and_then(|ch| ch.read().ok()) - .and_then(|ch| ch.get_active_job().map(|j| j.0.clone())) - }) - }; - - if let Some(mut job) = active_job { - if mode == ChannelMode::Aggregated { - job.channel_id = 0; - } - self.channel_state - .sv1_server_sender - .send(Mining::NewExtendedMiningJob(job)) - .await - .map_err(|e| { - error!("Failed to send NewExtendedMiningJob: {:?}", e); - TproxyError::ChannelErrorSender - })?; - } - Ok(()) - } - - async fn handle_set_custom_mining_job_success( - &mut self, - _server_id: Option, - m: SetCustomMiningJobSuccess, - ) -> Result<(), Self::Error> { - warn!("Received: {}", m); - warn!("⚠️ Cannot process SetCustomMiningJobSuccess since Translator Proxy does not support custom mining jobs. Ignoring."); - Err(Self::Error::UnexpectedMessage( - MESSAGE_TYPE_SET_CUSTOM_MINING_JOB_SUCCESS, - )) - } - - async fn handle_set_custom_mining_job_error( - &mut self, - _server_id: Option, - m: SetCustomMiningJobError<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", m); - warn!("⚠️ Cannot process SetCustomMiningJobError since Translator Proxy does not support custom mining jobs. Ignoring."); - Err(Self::Error::UnexpectedMessage( - MESSAGE_TYPE_SET_CUSTOM_MINING_JOB_ERROR, - )) - } - - async fn handle_set_target( - &mut self, - _server_id: Option, - m: SetTarget<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", m); - - // Update the channel targets in the channel manager - _ = self.channel_manager_data.safe_lock(|channel_manager_data| { - if channel_manager_data.mode == ChannelMode::Aggregated { - if let Some(upstream_channel) = &channel_manager_data.upstream_extended_channel { - if let Ok(mut upstream_extended_channel) = upstream_channel.write() { - upstream_extended_channel.set_target(Target::from_le_bytes( - m.maximum_target.inner_as_ref().try_into().unwrap(), - )); - } - } - channel_manager_data - .extended_channels - .iter() - .for_each(|(_, channel)| { - if let Ok(mut channel) = channel.write() { - channel.set_target(Target::from_le_bytes( - m.maximum_target.inner_as_ref().try_into().unwrap(), - )); - } - }); - } else if let Some(channel) = channel_manager_data.extended_channels.get(&m.channel_id) - { - if let Ok(mut channel) = channel.write() { - channel.set_target(Target::from_le_bytes( - m.maximum_target.inner_as_ref().try_into().unwrap(), - )); - } - } - }); - - // Forward SetTarget message to SV1Server for vardiff processing - self.channel_state - .sv1_server_sender - .send(Mining::SetTarget(m.clone().into_static())) - .await - .map_err(|e| { - error!("Failed to forward SetTarget message to SV1Server: {:?}", e); - TproxyError::ChannelErrorSender - })?; - - Ok(()) - } - - async fn handle_set_group_channel( - &mut self, - _server_id: Option, - m: SetGroupChannel<'_>, - ) -> Result<(), Self::Error> { - warn!("Received: {}", m); - warn!("⚠️ Cannot process SetGroupChannel since Translator Proxy does not support group channels. Ignoring."); - Err(Self::Error::UnexpectedMessage( - MESSAGE_TYPE_SET_GROUP_CHANNEL, - )) - } -} diff --git a/roles/translator/src/lib/sv2/channel_manager/mod.rs b/roles/translator/src/lib/sv2/channel_manager/mod.rs deleted file mode 100644 index 689a6efc7f..0000000000 --- a/roles/translator/src/lib/sv2/channel_manager/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -pub mod channel_manager; -pub mod message_handler; -pub use channel_manager::ChannelManager; -pub(super) mod channel; -pub(crate) mod data; -pub use data::ChannelMode; diff --git a/roles/translator/src/lib/sv2/mod.rs b/roles/translator/src/lib/sv2/mod.rs deleted file mode 100644 index d8cb5e360c..0000000000 --- a/roles/translator/src/lib/sv2/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub mod channel_manager; -pub mod upstream; - -pub use channel_manager::channel_manager::ChannelManager; -pub use upstream::upstream::Upstream; diff --git a/roles/translator/src/lib/sv2/upstream/channel.rs b/roles/translator/src/lib/sv2/upstream/channel.rs deleted file mode 100644 index 25fb324a58..0000000000 --- a/roles/translator/src/lib/sv2/upstream/channel.rs +++ /dev/null @@ -1,40 +0,0 @@ -use async_channel::{Receiver, Sender}; -use stratum_apps::stratum_core::{codec_sv2::StandardEitherFrame, parsers_sv2::AnyMessage}; -use tracing::debug; - -pub type Message = AnyMessage<'static>; -pub type EitherFrame = StandardEitherFrame; - -#[derive(Debug, Clone)] -pub struct UpstreamChannelState { - /// Receiver for the SV2 Upstream role - pub upstream_receiver: Receiver, - /// Sender for the SV2 Upstream role - pub upstream_sender: Sender, - /// Sender for the ChannelManager thread - pub channel_manager_sender: Sender, - /// Receiver for the ChannelManager thread - pub channel_manager_receiver: Receiver, -} - -impl UpstreamChannelState { - pub fn new( - channel_manager_sender: Sender, - channel_manager_receiver: Receiver, - upstream_receiver: Receiver, - upstream_sender: Sender, - ) -> Self { - Self { - channel_manager_sender, - channel_manager_receiver, - upstream_receiver, - upstream_sender, - } - } - - pub fn drop(&self) { - debug!("Closing all upstream channels"); - self.upstream_receiver.close(); - self.upstream_receiver.close(); - } -} diff --git a/roles/translator/src/lib/sv2/upstream/message_handler.rs b/roles/translator/src/lib/sv2/upstream/message_handler.rs deleted file mode 100644 index 6ca38fcd08..0000000000 --- a/roles/translator/src/lib/sv2/upstream/message_handler.rs +++ /dev/null @@ -1,48 +0,0 @@ -use crate::{error::TproxyError, sv2::Upstream}; -use stratum_apps::stratum_core::{ - common_messages_sv2::{ - ChannelEndpointChanged, Reconnect, SetupConnectionError, SetupConnectionSuccess, - }, - handlers_sv2::HandleCommonMessagesFromServerAsync, -}; -use tracing::{error, info}; - -impl HandleCommonMessagesFromServerAsync for Upstream { - type Error = TproxyError; - - async fn handle_setup_connection_error( - &mut self, - _server_id: Option, - msg: SetupConnectionError<'_>, - ) -> Result<(), Self::Error> { - error!("Received: {}", msg); - todo!() - } - - async fn handle_setup_connection_success( - &mut self, - _server_id: Option, - msg: SetupConnectionSuccess, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - Ok(()) - } - - async fn handle_channel_endpoint_changed( - &mut self, - _server_id: Option, - msg: ChannelEndpointChanged, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - todo!() - } - - async fn handle_reconnect( - &mut self, - _server_id: Option, - msg: Reconnect<'_>, - ) -> Result<(), Self::Error> { - info!("Received: {}", msg); - todo!() - } -} diff --git a/roles/translator/src/lib/sv2/upstream/mod.rs b/roles/translator/src/lib/sv2/upstream/mod.rs deleted file mode 100644 index 52cef24ca7..0000000000 --- a/roles/translator/src/lib/sv2/upstream/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod message_handler; -pub mod upstream; -pub use upstream::Upstream; -pub(super) mod channel; diff --git a/roles/translator/src/lib/sv2/upstream/upstream.rs b/roles/translator/src/lib/sv2/upstream/upstream.rs deleted file mode 100644 index 8fa2bf0d25..0000000000 --- a/roles/translator/src/lib/sv2/upstream/upstream.rs +++ /dev/null @@ -1,457 +0,0 @@ -use crate::{ - error::TproxyError, - status::{handle_error, Status, StatusSender}, - sv2::upstream::channel::UpstreamChannelState, - task_manager::TaskManager, - utils::{message_from_frame, ShutdownMessage}, -}; -use async_channel::{Receiver, Sender}; -use std::{net::SocketAddr, sync::Arc}; -use stratum_apps::{ - key_utils::Secp256k1PublicKey, - network_helpers::noise_connection::Connection, - stratum_core::{ - buffer_sv2, - codec_sv2::{HandshakeRole, StandardEitherFrame, StandardSv2Frame}, - common_messages_sv2::{Protocol, SetupConnection}, - framing_sv2, - handlers_sv2::HandleCommonMessagesFromServerAsync, - noise_sv2::Initiator, - parsers_sv2::AnyMessage, - }, -}; -use tokio::{ - net::TcpStream, - sync::{broadcast, mpsc}, - time::{sleep, Duration}, -}; -use tracing::{debug, error, info, warn}; - -/// Type alias for SV2 messages with static lifetime -pub type Message = AnyMessage<'static>; -/// Type alias for standard SV2 frames -pub type StdFrame = StandardSv2Frame; -/// Type alias for either handshake or SV2 frames -pub type EitherFrame = StandardEitherFrame; - -/// Manages the upstream SV2 connection to a mining pool or proxy. -/// -/// This struct handles the SV2 protocol communication with upstream servers, -/// including: -/// - Connection establishment with multiple upstream fallbacks -/// - SV2 handshake and setup procedures -/// - Message routing between channel manager and upstream -/// - Connection monitoring and error handling -/// - Graceful shutdown coordination -/// -/// The upstream connection supports automatic failover between multiple -/// configured upstream servers and implements retry logic for connection -/// establishment. -#[derive(Debug, Clone)] -pub struct Upstream { - upstream_channel_state: UpstreamChannelState, -} - -impl Upstream { - /// Creates a new upstream connection by attempting to connect to configured servers. - /// - /// This method tries to establish a connection to one of the provided upstream - /// servers, implementing retry logic and fallback behavior. It will attempt - /// to connect to each server multiple times before giving up. - /// - /// # Arguments - /// * `upstreams` - List of (address, public_key) pairs for upstream servers - /// * `channel_manager_sender` - Channel to send messages to the channel manager - /// * `channel_manager_receiver` - Channel to receive messages from the channel manager - /// * `notify_shutdown` - Broadcast channel for shutdown coordination - /// * `shutdown_complete_tx` - Channel to signal shutdown completion - /// - /// # Returns - /// * `Ok(Upstream)` - Successfully connected to an upstream server - /// * `Err(TproxyError)` - Failed to connect to any upstream server - pub async fn new( - upstreams: &[(SocketAddr, Secp256k1PublicKey)], - channel_manager_sender: Sender, - channel_manager_receiver: Receiver, - notify_shutdown: broadcast::Sender, - shutdown_complete_tx: mpsc::Sender<()>, - ) -> Result { - let mut shutdown_rx = notify_shutdown.subscribe(); - const RETRIES_PER_UPSTREAM: u8 = 3; - - for (index, (addr, pubkey)) in upstreams.iter().enumerate() { - info!("Trying to connect to upstream {} at {}", index, addr); - - for attempt in 1..=RETRIES_PER_UPSTREAM { - if shutdown_rx.try_recv().is_ok() { - info!("Shutdown signal received during upstream connection attempt. Aborting."); - drop(shutdown_complete_tx); - return Err(TproxyError::Shutdown); - } - - match TcpStream::connect(addr).await { - Ok(socket) => { - info!( - "Connected to upstream at {addr} (attempt {attempt}/{RETRIES_PER_UPSTREAM})" - ); - let initiator = Initiator::from_raw_k(pubkey.into_bytes())?; - match Connection::new(socket, HandshakeRole::Initiator(initiator)).await { - Ok((receiver, sender)) => { - let upstream_channel_state = UpstreamChannelState::new( - channel_manager_sender, - channel_manager_receiver, - receiver, - sender, - ); - debug!("Successfully initialized upstream channel with {addr}"); - - return Ok(Self { - upstream_channel_state, - }); - } - Err(e) => { - error!("Failed Noise handshake with {addr}: {e:?}. Retrying..."); - } - } - } - Err(e) => { - error!( - "Failed to connect to {addr}: {e}. Retry {attempt}/{RETRIES_PER_UPSTREAM}..." - ); - } - } - - sleep(Duration::from_secs(5)).await; - } - - warn!("Exhausted retries for upstream {index} at {addr}"); - } - - error!("Failed to connect to any configured upstream."); - drop(shutdown_complete_tx); - Err(TproxyError::Shutdown) - } - - /// Starts the upstream connection and begins message processing. - /// - /// This method: - /// - Completes the SV2 handshake with the upstream server - /// - Spawns the main message processing task - /// - Handles graceful shutdown coordination - /// - /// The method will first attempt to complete the SV2 setup connection - /// handshake. If successful, it spawns a task to handle bidirectional - /// message flow between the channel manager and upstream server. - pub async fn start( - mut self, - notify_shutdown: broadcast::Sender, - shutdown_complete_tx: mpsc::Sender<()>, - status_sender: Sender, - task_manager: Arc, - ) -> Result<(), TproxyError> { - let mut shutdown_rx = notify_shutdown.subscribe(); - // Wait for connection setup or shutdown signal - tokio::select! { - result = self.setup_connection() => { - if let Err(e) = result { - error!("Upstream: failed to set up SV2 connection: {e:?}"); - drop(shutdown_complete_tx); - return Err(e); - } - } - message = shutdown_rx.recv() => { - match message { - Ok(ShutdownMessage::ShutdownAll) => { - info!("Upstream: shutdown signal received during connection setup."); - drop(shutdown_complete_tx); - return Ok(()); - } - Ok(_) => {} - - Err(e) => { - error!("Upstream: failed to receive shutdown signal: {e}"); - drop(shutdown_complete_tx); - return Ok(()); - } - } - } - } - - // Wrap status sender and start upstream task - let wrapped_status_sender = StatusSender::Upstream(status_sender); - - self.run_upstream_task( - notify_shutdown, - shutdown_complete_tx, - wrapped_status_sender, - task_manager, - )?; - - Ok(()) - } - - /// Performs the SV2 handshake setup with the upstream server. - /// - /// This method handles the initial SV2 protocol handshake by: - /// - Creating and sending a SetupConnection message - /// - Waiting for the handshake response - /// - Validating and processing the response - /// - /// The handshake establishes the protocol version, capabilities, and - /// other connection parameters needed for SV2 communication. - pub async fn setup_connection(&mut self) -> Result<(), TproxyError> { - debug!("Upstream: initiating SV2 handshake..."); - // Build SetupConnection message - let setup_conn_msg = Self::get_setup_connection_message(2, 2, false)?; - let sv2_frame: StdFrame = - Message::Common(setup_conn_msg.into()) - .try_into() - .map_err(|e| { - error!("Failed to serialize SetupConnection message: {:?}", e); - TproxyError::ParserError(e) - })?; - - // Send SetupConnection message to upstream - self.upstream_channel_state - .upstream_sender - .send(sv2_frame.into()) - .await - .map_err(|e| { - error!("Failed to send SetupConnection to upstream: {:?}", e); - TproxyError::ChannelErrorSender - })?; - - let mut incoming: StdFrame = - match self.upstream_channel_state.upstream_receiver.recv().await { - Ok(frame) => { - debug!("Received handshake response from upstream."); - frame.try_into()? - } - Err(e) => { - error!("Failed to receive handshake response from upstream: {}", e); - return Err(TproxyError::CodecNoise( - stratum_apps::stratum_core::noise_sv2::Error::ExpectedIncomingHandshakeMessage, - )); - } - }; - - let message_type = incoming - .get_header() - .ok_or_else(|| { - error!("Expected handshake frame but no header found."); - framing_sv2::Error::ExpectedHandshakeFrame - })? - .msg_type(); - - let payload = incoming.payload(); - - self.handle_common_message_frame_from_server(None, message_type, payload) - .await?; - debug!("Upstream: handshake completed successfully."); - Ok(()) - } - - /// Processes incoming messages from the upstream SV2 server. - /// - /// This method handles different types of frames received from upstream: - /// - SV2 frames: Parses and routes mining/common messages appropriately - /// - Handshake frames: Logs for debugging (shouldn't occur during normal operation) - /// - /// Common messages are handled directly, while mining messages are forwarded - /// to the channel manager for processing and distribution to downstream connections. - pub async fn on_upstream_message(&self, message: EitherFrame) -> Result<(), TproxyError> { - let mut upstream = self.get_upstream(); - match message { - EitherFrame::Sv2(sv2_frame) => { - // Convert to standard frame - let std_frame: StdFrame = sv2_frame; - - // Parse message from frame - let mut frame: stratum_apps::stratum_core::framing_sv2::framing::Frame< - AnyMessage<'static>, - buffer_sv2::Slice, - > = std_frame.clone().into(); - - let (messsage_type, mut payload, parsed_message) = message_from_frame(&mut frame)?; - - match parsed_message { - AnyMessage::Common(_) => { - // Handle common upstream messages - upstream - .handle_common_message_frame_from_server( - None, - messsage_type, - &mut payload, - ) - .await?; - } - - AnyMessage::Mining(_) => { - // Forward mining message to channel manager - let frame_to_forward = EitherFrame::Sv2(std_frame.clone()); - self.upstream_channel_state - .channel_manager_sender - .send(frame_to_forward) - .await - .map_err(|e| { - error!("Failed to send mining message to channel manager: {:?}", e); - TproxyError::ChannelErrorSender - })?; - } - - _ => { - error!("Received unsupported message type from upstream."); - return Err(TproxyError::UnexpectedMessage(0)); - } - } - } - - EitherFrame::HandShake(handshake_frame) => { - debug!("Received handshake frame: {:?}", handshake_frame); - } - } - Ok(()) - } - - /// Spawns a unified task to handle upstream message I/O and shutdown logic. - fn run_upstream_task( - self, - notify_shutdown: broadcast::Sender, - shutdown_complete_tx: mpsc::Sender<()>, - status_sender: StatusSender, - task_manager: Arc, - ) -> Result<(), TproxyError> { - let mut shutdown_rx = notify_shutdown.subscribe(); - let shutdown_complete_tx = shutdown_complete_tx.clone(); - - task_manager.spawn(async move { - loop { - tokio::select! { - // Handle shutdown signals - shutdown = shutdown_rx.recv() => { - match shutdown { - Ok(ShutdownMessage::ShutdownAll) => { - info!("Upstream: received ShutdownAll signal. Exiting loop."); - break; - } - Ok(_) => { - // Ignore other shutdown variants for upstream - } - Err(e) => { - error!("Upstream: failed to receive shutdown signal: {e}"); - break; - } - } - } - - // Handle incoming SV2 messages from upstream - result = self.upstream_channel_state.upstream_receiver.recv() => { - match result { - Ok(frame) => { - debug!("Upstream: received frame."); - if let Err(e) = self.on_upstream_message(frame).await { - error!("Upstream: error while processing message: {e:?}"); - handle_error(&status_sender, TproxyError::ChannelErrorSender).await; - } - } - Err(e) => { - error!("Upstream: receiver channel closed unexpectedly: {e}"); - handle_error(&status_sender, TproxyError::ChannelErrorReceiver(e)).await; - break; - } - } - } - - // Handle messages from channel manager to send upstream - result = self.upstream_channel_state.channel_manager_receiver.recv() => { - match result { - Ok(msg) => { - debug!("Upstream: sending message from channel manager: {:?}", msg); - if let Err(e) = self.send_upstream(msg).await { - error!("Upstream: failed to send message: {e:?}"); - handle_error(&status_sender, TproxyError::ChannelErrorSender).await; - } - } - Err(e) => { - error!("Upstream: channel manager receiver closed: {e}"); - handle_error(&status_sender, TproxyError::ChannelErrorReceiver(e)).await; - break; - } - } - } - } - } - - self.upstream_channel_state.drop(); - warn!("Upstream: task shutting down cleanly."); - drop(shutdown_complete_tx); - }); - - Ok(()) - } - - /// Sends a message to the upstream SV2 server. - /// - /// This method forwards messages from the channel manager to the upstream - /// server. Messages are typically mining-related (share submissions, channel - /// requests, etc.) that need to be sent upstream. - /// - /// # Arguments - /// * `sv2_frame` - The SV2 frame to send to the upstream server - /// - /// # Returns - /// * `Ok(())` - Message sent successfully - /// * `Err(TproxyError)` - Error sending the message - pub async fn send_upstream(&self, sv2_frame: EitherFrame) -> Result<(), TproxyError> { - debug!("Sending message to upstream."); - - self.upstream_channel_state - .upstream_sender - .send(sv2_frame) - .await - .map_err(|e| { - error!("Failed to send message to upstream: {:?}", e); - TproxyError::ChannelErrorSender - })?; - - Ok(()) - } - - /// Constructs the `SetupConnection` message. - #[allow(clippy::result_large_err)] - fn get_setup_connection_message( - min_version: u16, - max_version: u16, - is_work_selection_enabled: bool, - ) -> Result, TproxyError> { - let endpoint_host = "0.0.0.0".to_string().into_bytes().try_into()?; - let vendor = "SRI".to_string().try_into()?; - let hardware_version = "Translator Proxy".to_string().try_into()?; - let firmware = String::new().try_into()?; - let device_id = String::new().try_into()?; - let flags = if is_work_selection_enabled { - 0b110 - } else { - 0b100 - }; - - Ok(SetupConnection { - protocol: Protocol::MiningProtocol, - min_version, - max_version, - flags, - endpoint_host, - endpoint_port: 50, - vendor, - hardware_version, - firmware, - device_id, - }) - } - - fn get_upstream(&self) -> Upstream { - Upstream { - upstream_channel_state: self.upstream_channel_state.clone(), - } - } -} diff --git a/roles/translator/src/lib/task_manager.rs b/roles/translator/src/lib/task_manager.rs deleted file mode 100644 index ac615a3a7b..0000000000 --- a/roles/translator/src/lib/task_manager.rs +++ /dev/null @@ -1,80 +0,0 @@ -use std::sync::Mutex as StdMutex; -use tokio::task::JoinHandle; - -/// Manages a collection of spawned tokio tasks. -/// -/// This struct provides a centralized way to spawn, track, and manage the lifecycle -/// of async tasks in the translator. It maintains a list of join handles that can -/// be used to wait for all tasks to complete or abort them during shutdown. -pub struct TaskManager { - tasks: StdMutex>>, -} - -impl Default for TaskManager { - fn default() -> Self { - Self::new() - } -} - -impl TaskManager { - /// Creates a new TaskManager instance. - /// - /// Initializes an empty task manager ready to spawn and track tasks. - pub fn new() -> Self { - Self { - tasks: StdMutex::new(Vec::new()), - } - } - - /// Spawns a new async task and adds it to the managed collection. - /// - /// The task will be tracked by this manager and can be waited for or aborted - /// using the other methods. - /// - /// # Arguments - /// * `fut` - The future to spawn as a task - #[track_caller] - pub fn spawn(&self, fut: F) - where - F: std::future::Future + Send + 'static, - { - use tracing::Instrument; - let location = std::panic::Location::caller(); - let span = tracing::trace_span!( - "task", - file = location.file(), - line = location.line(), - column = location.column(), - ); - - let handle = tokio::spawn(fut.instrument(span)); - self.tasks.lock().unwrap().push(handle); - } - - /// Waits for all managed tasks to complete. - /// - /// This method will block until all tasks that were spawned through this - /// manager have finished executing. Tasks are joined in reverse order - /// (most recently spawned first). - pub async fn join_all(&self) { - let handles = { - let mut tasks = self.tasks.lock().unwrap(); - std::mem::take(&mut *tasks) - }; - - for handle in handles { - let _ = handle.await; - } - } - - /// Aborts all managed tasks. - /// - /// This method immediately cancels all tasks that were spawned through this - /// manager. The tasks will be terminated without waiting for them to complete. - pub async fn abort_all(&self) { - let mut tasks = self.tasks.lock().unwrap(); - for handle in tasks.drain(..) { - handle.abort(); - } - } -} diff --git a/roles/translator/src/lib/utils.rs b/roles/translator/src/lib/utils.rs deleted file mode 100644 index 944e70471e..0000000000 --- a/roles/translator/src/lib/utils.rs +++ /dev/null @@ -1,297 +0,0 @@ -use stratum_apps::{ - custom_mutex::Mutex, - stratum_core::{ - binary_sv2::{Sv2DataType, U256}, - bitcoin::{ - block::{Header, Version}, - hashes::Hash, - CompactTarget, Target, TxMerkleNode, - }, - buffer_sv2::Slice, - channels_sv2::{ - merkle_root::merkle_root_from_path, - target::{bytes_to_hex, u256_to_block_hash}, - }, - framing_sv2::framing::Frame, - parsers_sv2::{AnyMessage, CommonMessages}, - sv1_api::{client_to_server, utils::HexU32Be}, - }, -}; -use tracing::{debug, error}; - -use crate::error::TproxyError; - -/// Validates an SV1 share against the target difficulty and job parameters. -/// -/// This function performs complete share validation by: -/// 1. Finding the corresponding job from the valid jobs storage -/// 2. Constructing the full extranonce from extranonce1 and extranonce2 -/// 3. Calculating the merkle root from the coinbase transaction and merkle path -/// 4. Building the block header with the share's nonce and timestamp -/// 5. Hashing the header and comparing against the target difficulty -/// -/// # Arguments -/// * `share` - The SV1 submit message containing the share data -/// * `target` - The target difficulty for this share -/// * `extranonce1` - The first part of the extranonce (from server) -/// * `version_rolling_mask` - Optional mask for version rolling -/// * `sv1_server_data` - Reference to shared SV1 server data for accessing valid jobs -/// * `channel_id` - Channel ID for job lookup -/// -/// # Returns -/// * `Ok(true)` if the share is valid and meets the target -/// * `Ok(false)` if the share is valid but doesn't meet the target -/// * `Err(TproxyError)` if validation fails due to missing job or invalid data -pub fn validate_sv1_share( - share: &client_to_server::Submit<'static>, - target: Target, - extranonce1: Vec, - version_rolling_mask: Option, - sv1_server_data: std::sync::Arc>, - channel_id: u32, -) -> Result { - let job_id = share.job_id.clone(); - - // Access valid jobs based on the configured mode - let job = sv1_server_data - .super_safe_lock(|server_data| { - if let Some(ref aggregated_jobs) = server_data.aggregated_valid_jobs { - // Aggregated mode: search in shared jobs - aggregated_jobs - .iter() - .find(|job| job.job_id == job_id) - .cloned() - } else if let Some(ref non_aggregated_jobs) = server_data.non_aggregated_valid_jobs { - // Non-aggregated mode: search in channel-specific jobs - non_aggregated_jobs - .get(&channel_id) - .and_then(|channel_jobs| channel_jobs.iter().find(|job| job.job_id == job_id)) - .cloned() - } else { - None - } - }) - .ok_or(TproxyError::JobNotFound)?; - - let mut full_extranonce = vec![]; - full_extranonce.extend_from_slice(extranonce1.as_slice()); - full_extranonce.extend_from_slice(share.extra_nonce2.0.as_ref()); - - let share_version = share - .version_bits - .clone() - .map(|vb| vb.0) - .unwrap_or(job.version.0); - let mask = version_rolling_mask.unwrap_or(HexU32Be(0x1FFFE000_u32)).0; - let version = (job.version.0 & !mask) | (share_version & mask); - - let prev_hash_vec: Vec = job.prev_hash.clone().into(); - let prev_hash = U256::from_vec_(prev_hash_vec).map_err(TproxyError::BinarySv2)?; - - // calculate the merkle root from: - // - job coinbase_tx_prefix - // - full extranonce - // - job coinbase_tx_suffix - // - job merkle_path - let merkle_root: [u8; 32] = merkle_root_from_path( - job.coin_base1.as_ref(), - job.coin_base2.as_ref(), - full_extranonce.as_ref(), - job.merkle_branch.as_ref(), - ) - .ok_or(TproxyError::InvalidMerkleRoot)? - .try_into() - .map_err(|_| TproxyError::InvalidMerkleRoot)?; - - // create the header for validation - let header = Header { - version: Version::from_consensus(version as i32), - prev_blockhash: u256_to_block_hash(prev_hash), - merkle_root: TxMerkleNode::from_byte_array(merkle_root), - time: share.time.0, - bits: CompactTarget::from_consensus(job.bits.0), - nonce: share.nonce.0, - }; - - // convert the header hash to a target type for easy comparison - let hash = header.block_hash(); - let raw_hash: [u8; 32] = *hash.to_raw_hash().as_ref(); - let hash_as_target = Target::from_le_bytes(raw_hash); - - // print hash_as_target and self.target as human readable hex - let hash_bytes = hash_as_target.to_be_bytes(); - let target_bytes = target.to_be_bytes(); - - debug!( - "share validation \nshare:\t\t{}\ndownstream target:\t{}\n", - bytes_to_hex(&hash_bytes), - bytes_to_hex(&target_bytes), - ); - // check if the share hash meets the downstream target - if hash_as_target < target { - /*if self.share_accounting.is_share_seen(hash.to_raw_hash()) { - return Err(ShareValidationError::DuplicateShare); - }*/ - - return Ok(true); - } - - Ok(false) -} - -/// Calculates the required length of the proxy's extranonce prefix. -/// -/// This function determines how many bytes the proxy needs to reserve for its own -/// extranonce prefix, based on the difference between the channel's rollable extranonce -/// size and the downstream miner's rollable extranonce size. -/// -/// # Arguments -/// * `channel_rollable_extranonce_size` - Size of the rollable extranonce from the channel -/// * `downstream_rollable_extranonce_size` - Size of the rollable extranonce for downstream -/// -/// # Returns -/// The number of bytes needed for the proxy's extranonce prefix -pub fn proxy_extranonce_prefix_len( - channel_rollable_extranonce_size: usize, - downstream_rollable_extranonce_size: usize, -) -> usize { - channel_rollable_extranonce_size - downstream_rollable_extranonce_size -} - -/// Extracts message type, payload, and parsed message from an SV2 frame. -/// -/// This function processes an SV2 frame and extracts the essential components: -/// - Message type identifier -/// - Raw payload bytes -/// - Parsed message structure -/// -/// # Arguments -/// * `frame` - The SV2 frame to process -/// -/// # Returns -/// A tuple containing (message_type, payload, parsed_message) on success, -/// or a TproxyError if the frame is invalid or cannot be parsed -pub fn message_from_frame( - frame: &mut Frame, Slice>, -) -> Result<(u8, Vec, AnyMessage<'static>), TproxyError> { - match frame { - Frame::Sv2(frame) => { - let header = frame - .get_header() - .ok_or(TproxyError::UnexpectedMessage(0))?; - let message_type = header.msg_type(); - let mut payload = frame.payload().to_vec(); - let message: Result, _> = - (message_type, payload.as_mut_slice()).try_into(); - match message { - Ok(message) => { - let message = into_static(message)?; - Ok((message_type, payload.to_vec(), message)) - } - Err(_) => { - error!("Received frame with invalid payload or message type: {frame:?}"); - Err(TproxyError::UnexpectedMessage(message_type)) - } - } - } - Frame::HandShake(f) => { - error!("Received unexpected handshake frame: {f:?}"); - Err(TproxyError::UnexpectedMessage(0)) - } - } -} - -/// Converts a borrowed AnyMessage to a static lifetime version. -/// -/// This function takes an AnyMessage with a borrowed lifetime and converts it to -/// a static lifetime version, which is necessary for storing messages across -/// async boundaries and in data structures. -/// -/// # Arguments -/// * `m` - The AnyMessage to convert to static lifetime -/// -/// # Returns -/// A static lifetime version of the message, or TproxyError if the message -/// type is not supported for static conversion -pub fn into_static(m: AnyMessage<'_>) -> Result, TproxyError> { - match m { - AnyMessage::Mining(m) => Ok(AnyMessage::Mining(m.into_static())), - AnyMessage::Common(m) => match m { - CommonMessages::ChannelEndpointChanged(m) => Ok(AnyMessage::Common( - CommonMessages::ChannelEndpointChanged(m.into_static()), - )), - CommonMessages::SetupConnection(m) => Ok(AnyMessage::Common( - CommonMessages::SetupConnection(m.into_static()), - )), - CommonMessages::SetupConnectionError(m) => Ok(AnyMessage::Common( - CommonMessages::SetupConnectionError(m.into_static()), - )), - CommonMessages::SetupConnectionSuccess(m) => Ok(AnyMessage::Common( - CommonMessages::SetupConnectionSuccess(m.into_static()), - )), - CommonMessages::Reconnect(m) => Ok(AnyMessage::Common(CommonMessages::Reconnect( - m.into_static(), - ))), - }, - _ => Err(TproxyError::UnexpectedMessage(0)), - } -} - -/// Messages used for coordinating shutdown across different components. -/// -/// This enum defines the different types of shutdown signals that can be sent -/// through the broadcast channel to coordinate graceful shutdown of the translator. -#[derive(Debug, Clone)] -pub enum ShutdownMessage { - /// Shutdown all components immediately - ShutdownAll, - /// Shutdown all downstream connections - DownstreamShutdownAll, - /// Shutdown a specific downstream connection by ID - DownstreamShutdown(u32), - /// Reset channel manager state and shutdown downstreams due to upstream reconnection - UpstreamReconnectedResetAndShutdownDownstreams, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_proxy_extranonce_prefix_len() { - assert_eq!(proxy_extranonce_prefix_len(8, 4), 4); - assert_eq!(proxy_extranonce_prefix_len(10, 6), 4); - assert_eq!(proxy_extranonce_prefix_len(4, 4), 0); - } - - #[test] - fn test_shutdown_message_debug() { - let msg1 = ShutdownMessage::ShutdownAll; - let msg2 = ShutdownMessage::DownstreamShutdown(123); - let msg3 = ShutdownMessage::DownstreamShutdownAll; - let msg4 = ShutdownMessage::UpstreamReconnectedResetAndShutdownDownstreams; - - // Test Debug implementation - assert!(format!("{:?}", msg1).contains("ShutdownAll")); - assert!(format!("{:?}", msg2).contains("DownstreamShutdown")); - assert!(format!("{:?}", msg2).contains("123")); - assert!(format!("{:?}", msg3).contains("DownstreamShutdownAll")); - assert!(format!("{:?}", msg4).contains("UpstreamReconnected")); - } - - #[test] - fn test_shutdown_message_clone() { - let msg = ShutdownMessage::DownstreamShutdown(456); - let cloned = msg.clone(); - - match (msg, cloned) { - ( - ShutdownMessage::DownstreamShutdown(id1), - ShutdownMessage::DownstreamShutdown(id2), - ) => { - assert_eq!(id1, id2); - } - _ => panic!("Clone failed"), - } - } -} diff --git a/roles/translator/src/main.rs b/roles/translator/src/main.rs deleted file mode 100644 index 4b09cecea4..0000000000 --- a/roles/translator/src/main.rs +++ /dev/null @@ -1,25 +0,0 @@ -mod args; -use std::process; - -use stratum_apps::config_helpers::logging::init_logging; -pub use translator_sv2::{config, error, status, sv1, sv2, TranslatorSv2}; - -use crate::args::process_cli_args; - -/// Entrypoint for the Translator binary. -/// -/// Loads the configuration from TOML and initializes the main runtime -/// defined in `translator_sv2::TranslatorSv2`. Errors during startup are logged. -#[tokio::main] -async fn main() { - let proxy_config = process_cli_args().unwrap_or_else(|e| { - eprintln!("Translator proxy config error: {e}"); - std::process::exit(1); - }); - - init_logging(proxy_config.log_dir()); - - TranslatorSv2::new(proxy_config).start().await; - - process::exit(1); -} diff --git a/rustfmt.toml b/rustfmt.toml deleted file mode 100644 index 36fbd1dc63..0000000000 --- a/rustfmt.toml +++ /dev/null @@ -1,10 +0,0 @@ -edition = "2018" -imports_indent = "Block" -imports_layout = "Mixed" -imports_granularity = "Crate" -wrap_comments = true -format_code_in_doc_comments = true -comment_width = 100 # Default 80 -normalize_comments = false -normalize_doc_attributes = false -format_strings = false \ No newline at end of file diff --git a/scripts/build-on-all-workspaces.sh b/scripts/build-on-all-workspaces.sh deleted file mode 100755 index 16b6109719..0000000000 --- a/scripts/build-on-all-workspaces.sh +++ /dev/null @@ -1,24 +0,0 @@ - -#!/bin/sh - -WORKSPACES="stratum-core protocols roles utils" - -for workspace in $WORKSPACES; do - echo "Executing build on: $workspace" - cargo +1.75.0 build --manifest-path="$workspace/Cargo.toml" -- - if [ $? -ne 0 ]; then - echo "Build found some errors in: $workspace" - exit 1 - fi - - echo "Running fmt on: $workspace" - (cd $workspace && cargo +nightly fmt) - if [ $? -ne 0 ]; then - echo "Fmt failed in: $workspace" - exit 1 - fi -done - -echo "build success!" - - diff --git a/scripts/clippy-fmt-and-test.sh b/scripts/clippy-fmt-and-test.sh deleted file mode 100755 index fc44c8489b..0000000000 --- a/scripts/clippy-fmt-and-test.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/sh - -WORKSPACES="stratum-core protocols roles utils" - -for workspace in $WORKSPACES; do - echo "Executing clippy on: $workspace" - cargo +1.75.0 clippy --manifest-path="$workspace/Cargo.toml" -- -D warnings -A dead-code - if [ $? -ne 0 ]; then - echo "Clippy found some errors in: $workspace" - exit 1 - fi - - echo "Running tests on: $workspace" - cargo +1.75 test --manifest-path="$workspace/Cargo.toml" - if [ $? -ne 0 ]; then - echo "Tests failed in: $workspace" - exit 1 - fi - - echo "Running fmt on: $workspace" - (cd $workspace && cargo +nightly fmt) - if [ $? -ne 0 ]; then - echo "Fmt failed in: $workspace" - exit 1 - fi -done - -echo "Clippy success, all tests passed!" diff --git a/scripts/coverage-protocols.sh b/scripts/coverage-protocols.sh index 79c601a622..6724efa6b5 100755 --- a/scripts/coverage-protocols.sh +++ b/scripts/coverage-protocols.sh @@ -10,24 +10,26 @@ cd protocols tarpaulin crates=( - "v1" - "v2/binary-sv2/derive_codec" - "v2/binary-sv2" - "v2/channels-sv2" - "v2/noise-sv2" - "v2/framing-sv2" - "v2/codec-sv2" - "v2/subprotocols/common-messages" - "v2/subprotocols/template-distribution" - "v2/subprotocols/mining" - "v2/subprotocols/job-declaration" - "v2/parsers-sv2" + "sv1" + "sv2/binary-sv2/derive_codec" + "sv2/binary-sv2" + "sv2/channels-sv2" + "sv2/noise-sv2" + "sv2/framing-sv2" + "sv2/codec-sv2" + "sv2/subprotocols/common-messages" + "sv2/subprotocols/template-distribution" + "sv2/subprotocols/mining" + "sv2/subprotocols/job-declaration" + "sv2/roles-logic-sv2" + "sv2/parsers-sv2" + "sv2/handlers-sv2" ) for crate in "${crates[@]}"; do echo "Running Tarpaulin for $crate..." crate_name=$(basename "$crate") - cd "$crate" || exit 1 + cd "$crate" || exit 1 tarpaulin "$crate_name-coverage" cd - || exit 1 done diff --git a/scripts/coverage-roles.sh b/scripts/coverage-roles.sh deleted file mode 100755 index 95b38fae7a..0000000000 --- a/scripts/coverage-roles.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -tarpaulin() { - crate_name=$1 - output_dir="target/tarpaulin-reports/$crate_name" - mkdir -p "$output_dir" - cargo +nightly tarpaulin --verbose --out Xml --output-dir "$output_dir" --all-features -} - -cd roles -tarpaulin - -crates=( - "pool" - "test-utils/mining-device" - "translator" - "jd-client" - "jd-server" - "stratum-apps" -) - -for crate in "${crates[@]}"; do - echo "Running Tarpaulin for $crate..." - crate_name=$(basename "$crate") - cd "$crate" || exit 1 - tarpaulin "$crate_name-coverage" - cd - || exit 1 -done diff --git a/scripts/coverage-utils.sh b/scripts/coverage-utils.sh deleted file mode 100755 index 4991d2dfe2..0000000000 --- a/scripts/coverage-utils.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -tarpaulin() { - crate_name=$1 - output_dir="target/tarpaulin-reports/$crate_name" - mkdir -p "$output_dir" - cargo +nightly tarpaulin --verbose --out Xml --output-dir "$output_dir" --all-features -} - -cd utils -tarpaulin - -crates=( - "buffer" -) - -for crate in "${crates[@]}"; do - echo "Running Tarpaulin for $crate..." - crate_name=$(basename "$crate") - cd "$crate" || exit 1 - tarpaulin "$crate_name-coverage" - cd - || exit 1 -done diff --git a/scripts/release-libs.sh b/scripts/release-libs.sh index 86c0e29eaf..ff113e5ca2 100755 --- a/scripts/release-libs.sh +++ b/scripts/release-libs.sh @@ -32,4 +32,4 @@ fi echo "Publish command failed for $CRATE_DIR" echo "$OUTPUT" -exit 1 \ No newline at end of file +exit 1 diff --git a/scripts/run-integration-tests.sh b/scripts/run-integration-tests.sh new file mode 100755 index 0000000000..59709a96ab --- /dev/null +++ b/scripts/run-integration-tests.sh @@ -0,0 +1,67 @@ +#!/bin/bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +SV2_APPS_DIR="$REPO_ROOT/integration-test-framework/sv2-apps" +INTEGRATION_TESTS_DIR="$REPO_ROOT/integration-test-framework/sv2-apps/integration-tests" +SV2_APPS_REPO_URL=https://github.com/stratum-mining/sv2-apps.git + +echo "πŸ§ͺ Running integration tests for sv2-miner-apps changes..." +echo "πŸ“ Repository root: $REPO_ROOT" +echo "πŸ“ Integration test dir: $INTEGRATION_TESTS_DIR" +mkdir -p "$REPO_ROOT/integration-test-framework" + +# Clone/update integration test framework +if [ ! -d "$SV2_APPS_DIR" ]; then + echo "πŸ“₯ Cloning integration test framework..." + cd "$(dirname "$SV2_APPS_DIR")" + git clone $SV2_APPS_REPO_URL +else + echo "πŸ”„ Updating integration test framework..." + cd "$SV2_APPS_DIR" + git fetch origin + git reset --hard origin/main +fi + +if cargo nextest --version &>/dev/null; then + echo "βœ… cargo-nextest is already installed." +else + echo "πŸ”§ Configuring cargo nextest..." + curl -L --proto '=https' --tlsv1.2 -sSf \ + https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash + + cargo binstall cargo-nextest --secure --no-confirm + echo "βœ… cargo-nextest installed successfully." +fi + +cd "$INTEGRATION_TESTS_DIR" + +# # Add patch section to override all git dependencies with local paths +echo "πŸ”§ Adding patch section to override git dependencies..." + +# Remove any existing patch section first +if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' '/^# Override git dependencies with local paths/,/^$/d' Cargo.toml +else + sed -i '/^# Override git dependencies with local paths/,/^$/d' Cargo.toml +fi + + +# Add the patch section at the end of the file +cat >> Cargo.toml << 'EOF' + +# Override git dependencies with local paths to avoid version conflicts +# TODO: will need to replace to patch.crates-io as soons as they are available and updated on the sv2-apps repo +[patch."https://github.com/stratum-mining/stratum"] +stratum-core = {path = "../../../stratum-core"} +EOF + +echo "βœ… Updated Cargo.toml to use local dependencies" +echo "πŸƒ Running integration tests..." + +# Run the integration tests +RUST_BACKTRACE=1 RUST_LOG=debug cargo nextest run --nocapture --verbose + +cd "$REPO_ROOT" +echo "βœ… Integration tests completed!" diff --git a/scripts/rust/clippy.sh b/scripts/rust/clippy.sh deleted file mode 100755 index f91ba2c1e4..0000000000 --- a/scripts/rust/clippy.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -workspaces=("stratum-core" "roles" "protocols" "utils" "test/integration-tests") - -# print current rust version -echo "Rust version: $(rustc --version)" - -for workspace in "${workspaces[@]}"; do - echo "Running clippy for workspace: $workspace" - cargo clippy --manifest-path="$workspace/Cargo.toml" -- -D warnings - if [[ $? -ne 0 ]]; then - echo "Clippy failed for workspace: $workspace" - exit 1 - else - echo "Clippy passed for workspace: $workspace" - fi -done - -echo "Clippy success!" -exit 0 diff --git a/stratum-core/Cargo.toml b/stratum-core/Cargo.toml index aeea186af3..52d4ce02c4 100644 --- a/stratum-core/Cargo.toml +++ b/stratum-core/Cargo.toml @@ -12,21 +12,21 @@ homepage = "https://stratumprotocol.org" keywords = ["stratum", "mining", "bitcoin", "protocol"] [dependencies] -buffer_sv2 = { path = "../utils/buffer", version = "^2.0.0" } +buffer_sv2 = { path = "../sv2/buffer-sv2", version = "^2.0.0" } bitcoin = "0.32.5" -binary_sv2 = { path = "../protocols/v2/binary-sv2", version = "^5.0.0" } -codec_sv2 = { path = "../protocols/v2/codec-sv2", version = "^4.0.0", features = ["noise_sv2"]} -framing_sv2 = { path = "../protocols/v2/framing-sv2", version = "^5.0.0" } -noise_sv2 = { path = "../protocols/v2/noise-sv2", version = "^1.0.0" } -parsers_sv2 = { path = "../protocols/v2/parsers-sv2", version = "^0.1.0" } -handlers_sv2 = { path = "../protocols/v2/handlers-sv2", version = "^0.2.0" } -channels_sv2 = { path = "../protocols/v2/channels-sv2", version = "^2.0.0" } -common_messages_sv2 = { path = "../protocols/v2/subprotocols/common-messages", version = "^6.0.0" } -mining_sv2 = { path = "../protocols/v2/subprotocols/mining", version = "^6.0.0" } -template_distribution_sv2 = { path = "../protocols/v2/subprotocols/template-distribution", version = "^4.0.0" } -job_declaration_sv2 = { path = "../protocols/v2/subprotocols/job-declaration", version = "^5.0.0" } -sv1_api = { path = "../protocols/v1", version = "^2.1.0", optional = true } -stratum_translation = { path = "../protocols/stratum-translation", version = "^0.1.0", optional = true } +binary_sv2 = { path = "../sv2/binary-sv2", version = "^5.0.0" } +codec_sv2 = { path = "../sv2/codec-sv2", version = "^4.0.0", features = ["noise_sv2"]} +framing_sv2 = { path = "../sv2/framing-sv2", version = "^5.0.0" } +noise_sv2 = { path = "../sv2/noise-sv2", version = "^1.0.0" } +parsers_sv2 = { path = "../sv2/parsers-sv2", version = "^0.1.0" } +handlers_sv2 = { path = "../sv2/handlers-sv2", version = "^0.2.0" } +channels_sv2 = { path = "../sv2/channels-sv2", version = "^2.0.0" } +common_messages_sv2 = { path = "../sv2/subprotocols/common-messages", version = "^6.0.0" } +mining_sv2 = { path = "../sv2/subprotocols/mining", version = "^6.0.0" } +template_distribution_sv2 = { path = "../sv2/subprotocols/template-distribution", version = "^4.0.0" } +job_declaration_sv2 = { path = "../sv2/subprotocols/job-declaration", version = "^5.0.0" } +sv1_api = { path = "../sv1", version = "^2.1.0", optional = true } +stratum_translation = { path = "stratum-translation", version = "^0.1.0", optional = true } [features] with_buffer_pool = [ diff --git a/protocols/stratum-translation/Cargo.toml b/stratum-core/stratum-translation/Cargo.toml similarity index 54% rename from protocols/stratum-translation/Cargo.toml rename to stratum-core/stratum-translation/Cargo.toml index 9454524dd5..b410f998e2 100644 --- a/protocols/stratum-translation/Cargo.toml +++ b/stratum-core/stratum-translation/Cargo.toml @@ -11,8 +11,8 @@ path = "src/lib.rs" [dependencies] bitcoin = { version = "0.32.5" } -binary_sv2 = { path = "../v2/binary-sv2", version = "^5.0.0" } -mining_sv2 = { path = "../v2/subprotocols/mining", version = "^6.0.0" } -channels_sv2 = { path = "../v2/channels-sv2", version = "^2.0.0" } -v1 = { path = "../v1", package = "sv1_api", version = "^2.0.0" } +binary_sv2 = { path = "../../sv2/binary-sv2", version = "^5.0.0" } +mining_sv2 = { path = "../../sv2/subprotocols/mining", version = "^6.0.0" } +channels_sv2 = { path = "../../sv2/channels-sv2", version = "^2.0.0" } +v1 = { path = "../../sv1", package = "sv1_api", version = "^2.0.0" } tracing = "0.1" diff --git a/protocols/stratum-translation/src/error.rs b/stratum-core/stratum-translation/src/error.rs similarity index 100% rename from protocols/stratum-translation/src/error.rs rename to stratum-core/stratum-translation/src/error.rs diff --git a/protocols/stratum-translation/src/lib.rs b/stratum-core/stratum-translation/src/lib.rs similarity index 100% rename from protocols/stratum-translation/src/lib.rs rename to stratum-core/stratum-translation/src/lib.rs diff --git a/protocols/stratum-translation/src/sv1_to_sv2.rs b/stratum-core/stratum-translation/src/sv1_to_sv2.rs similarity index 100% rename from protocols/stratum-translation/src/sv1_to_sv2.rs rename to stratum-core/stratum-translation/src/sv1_to_sv2.rs diff --git a/protocols/stratum-translation/src/sv2_to_sv1.rs b/stratum-core/stratum-translation/src/sv2_to_sv1.rs similarity index 100% rename from protocols/stratum-translation/src/sv2_to_sv1.rs rename to stratum-core/stratum-translation/src/sv2_to_sv1.rs diff --git a/protocols/v1/Cargo.toml b/sv1/Cargo.toml similarity index 92% rename from protocols/v1/Cargo.toml rename to sv1/Cargo.toml index 5ef1b8df26..6e19ed8946 100644 --- a/protocols/v1/Cargo.toml +++ b/sv1/Cargo.toml @@ -20,7 +20,7 @@ hex = "0.4.3" serde = { version = "1.0.89", default-features = false, features = ["derive", "alloc"] } serde_json = { version = "1.0.64", default-features = false, features = ["alloc"] } tracing = {version = "0.1"} -binary_sv2 = { path = "../v2/binary-sv2", version = "^5.0.0" } +binary_sv2 = { path = "../sv2/binary-sv2", version = "^5.0.0" } [dev-dependencies] quickcheck = "1" diff --git a/protocols/v1/README.md b/sv1/README.md similarity index 100% rename from protocols/v1/README.md rename to sv1/README.md diff --git a/protocols/v1/examples/client_and_server.rs b/sv1/examples/client_and_server.rs similarity index 100% rename from protocols/v1/examples/client_and_server.rs rename to sv1/examples/client_and_server.rs diff --git a/protocols/v1/src/error.rs b/sv1/src/error.rs similarity index 100% rename from protocols/v1/src/error.rs rename to sv1/src/error.rs diff --git a/protocols/v1/src/json_rpc.rs b/sv1/src/json_rpc.rs similarity index 100% rename from protocols/v1/src/json_rpc.rs rename to sv1/src/json_rpc.rs diff --git a/protocols/v1/src/lib.rs b/sv1/src/lib.rs similarity index 100% rename from protocols/v1/src/lib.rs rename to sv1/src/lib.rs diff --git a/protocols/v1/src/methods/client_to_server.rs b/sv1/src/methods/client_to_server.rs similarity index 100% rename from protocols/v1/src/methods/client_to_server.rs rename to sv1/src/methods/client_to_server.rs diff --git a/protocols/v1/src/methods/mod.rs b/sv1/src/methods/mod.rs similarity index 100% rename from protocols/v1/src/methods/mod.rs rename to sv1/src/methods/mod.rs diff --git a/protocols/v1/src/methods/server_to_client.rs b/sv1/src/methods/server_to_client.rs similarity index 100% rename from protocols/v1/src/methods/server_to_client.rs rename to sv1/src/methods/server_to_client.rs diff --git a/protocols/v1/src/utils.rs b/sv1/src/utils.rs similarity index 100% rename from protocols/v1/src/utils.rs rename to sv1/src/utils.rs diff --git a/protocols/v2/binary-sv2/Cargo.toml b/sv2/binary-sv2/Cargo.toml similarity index 90% rename from protocols/v2/binary-sv2/Cargo.toml rename to sv2/binary-sv2/Cargo.toml index f3b6eb32d9..9e7585962e 100644 --- a/protocols/v2/binary-sv2/Cargo.toml +++ b/sv2/binary-sv2/Cargo.toml @@ -16,7 +16,7 @@ keywords = ["stratum", "mining", "bitcoin", "protocol"] [dependencies] derive_codec_sv2 = { path = "derive_codec", version = "^1.0.0" } quickcheck = { version = "1.0.0", optional = true } -buffer_sv2 = { path = "../../../utils/buffer", optional=true, version = "^2.0.0" } +buffer_sv2 = { path = "../buffer-sv2", optional=true, version = "^2.0.0" } [features] no_std = [] diff --git a/protocols/v2/binary-sv2/README.md b/sv2/binary-sv2/README.md similarity index 100% rename from protocols/v2/binary-sv2/README.md rename to sv2/binary-sv2/README.md diff --git a/protocols/fuzz-tests/.gitignore b/sv2/binary-sv2/derive_codec/.gitignore similarity index 100% rename from protocols/fuzz-tests/.gitignore rename to sv2/binary-sv2/derive_codec/.gitignore diff --git a/protocols/v2/binary-sv2/derive_codec/Cargo.toml b/sv2/binary-sv2/derive_codec/Cargo.toml similarity index 100% rename from protocols/v2/binary-sv2/derive_codec/Cargo.toml rename to sv2/binary-sv2/derive_codec/Cargo.toml diff --git a/protocols/v2/binary-sv2/derive_codec/README.md b/sv2/binary-sv2/derive_codec/README.md similarity index 100% rename from protocols/v2/binary-sv2/derive_codec/README.md rename to sv2/binary-sv2/derive_codec/README.md diff --git a/protocols/v2/binary-sv2/derive_codec/src/lib.rs b/sv2/binary-sv2/derive_codec/src/lib.rs similarity index 100% rename from protocols/v2/binary-sv2/derive_codec/src/lib.rs rename to sv2/binary-sv2/derive_codec/src/lib.rs diff --git a/protocols/v2/binary-sv2/examples/encode_decode.rs b/sv2/binary-sv2/examples/encode_decode.rs similarity index 100% rename from protocols/v2/binary-sv2/examples/encode_decode.rs rename to sv2/binary-sv2/examples/encode_decode.rs diff --git a/protocols/v2/binary-sv2/src/codec/decodable.rs b/sv2/binary-sv2/src/codec/decodable.rs similarity index 100% rename from protocols/v2/binary-sv2/src/codec/decodable.rs rename to sv2/binary-sv2/src/codec/decodable.rs diff --git a/protocols/v2/binary-sv2/src/codec/encodable.rs b/sv2/binary-sv2/src/codec/encodable.rs similarity index 100% rename from protocols/v2/binary-sv2/src/codec/encodable.rs rename to sv2/binary-sv2/src/codec/encodable.rs diff --git a/protocols/v2/binary-sv2/src/codec/impls.rs b/sv2/binary-sv2/src/codec/impls.rs similarity index 100% rename from protocols/v2/binary-sv2/src/codec/impls.rs rename to sv2/binary-sv2/src/codec/impls.rs diff --git a/protocols/v2/binary-sv2/src/codec/mod.rs b/sv2/binary-sv2/src/codec/mod.rs similarity index 100% rename from protocols/v2/binary-sv2/src/codec/mod.rs rename to sv2/binary-sv2/src/codec/mod.rs diff --git a/protocols/v2/binary-sv2/src/datatypes/copy_data_types.rs b/sv2/binary-sv2/src/datatypes/copy_data_types.rs similarity index 100% rename from protocols/v2/binary-sv2/src/datatypes/copy_data_types.rs rename to sv2/binary-sv2/src/datatypes/copy_data_types.rs diff --git a/protocols/v2/binary-sv2/src/datatypes/mod.rs b/sv2/binary-sv2/src/datatypes/mod.rs similarity index 100% rename from protocols/v2/binary-sv2/src/datatypes/mod.rs rename to sv2/binary-sv2/src/datatypes/mod.rs diff --git a/protocols/v2/binary-sv2/src/datatypes/non_copy_data_types/inner.rs b/sv2/binary-sv2/src/datatypes/non_copy_data_types/inner.rs similarity index 100% rename from protocols/v2/binary-sv2/src/datatypes/non_copy_data_types/inner.rs rename to sv2/binary-sv2/src/datatypes/non_copy_data_types/inner.rs diff --git a/protocols/v2/binary-sv2/src/datatypes/non_copy_data_types/mod.rs b/sv2/binary-sv2/src/datatypes/non_copy_data_types/mod.rs similarity index 100% rename from protocols/v2/binary-sv2/src/datatypes/non_copy_data_types/mod.rs rename to sv2/binary-sv2/src/datatypes/non_copy_data_types/mod.rs diff --git a/protocols/v2/binary-sv2/src/datatypes/non_copy_data_types/seq_inner.rs b/sv2/binary-sv2/src/datatypes/non_copy_data_types/seq_inner.rs similarity index 100% rename from protocols/v2/binary-sv2/src/datatypes/non_copy_data_types/seq_inner.rs rename to sv2/binary-sv2/src/datatypes/non_copy_data_types/seq_inner.rs diff --git a/protocols/v2/binary-sv2/src/lib.rs b/sv2/binary-sv2/src/lib.rs similarity index 100% rename from protocols/v2/binary-sv2/src/lib.rs rename to sv2/binary-sv2/src/lib.rs diff --git a/protocols/v2/binary-sv2/tests/test.rs b/sv2/binary-sv2/tests/test.rs similarity index 100% rename from protocols/v2/binary-sv2/tests/test.rs rename to sv2/binary-sv2/tests/test.rs diff --git a/utils/buffer/BENCHES.md b/sv2/buffer-sv2/BENCHES.md similarity index 100% rename from utils/buffer/BENCHES.md rename to sv2/buffer-sv2/BENCHES.md diff --git a/utils/buffer/Cargo.toml b/sv2/buffer-sv2/Cargo.toml similarity index 100% rename from utils/buffer/Cargo.toml rename to sv2/buffer-sv2/Cargo.toml diff --git a/utils/buffer/README.md b/sv2/buffer-sv2/README.md similarity index 100% rename from utils/buffer/README.md rename to sv2/buffer-sv2/README.md diff --git a/utils/buffer/benches/control_struct.rs b/sv2/buffer-sv2/benches/control_struct.rs similarity index 100% rename from utils/buffer/benches/control_struct.rs rename to sv2/buffer-sv2/benches/control_struct.rs diff --git a/utils/buffer/benches/pool_benchmark.rs b/sv2/buffer-sv2/benches/pool_benchmark.rs similarity index 100% rename from utils/buffer/benches/pool_benchmark.rs rename to sv2/buffer-sv2/benches/pool_benchmark.rs diff --git a/utils/buffer/benches/pool_iai.rs b/sv2/buffer-sv2/benches/pool_iai.rs similarity index 100% rename from utils/buffer/benches/pool_iai.rs rename to sv2/buffer-sv2/benches/pool_iai.rs diff --git a/utils/buffer/examples/basic_buffer_pool.rs b/sv2/buffer-sv2/examples/basic_buffer_pool.rs similarity index 100% rename from utils/buffer/examples/basic_buffer_pool.rs rename to sv2/buffer-sv2/examples/basic_buffer_pool.rs diff --git a/utils/buffer/examples/buffer_pool_exhaustion.rs b/sv2/buffer-sv2/examples/buffer_pool_exhaustion.rs similarity index 100% rename from utils/buffer/examples/buffer_pool_exhaustion.rs rename to sv2/buffer-sv2/examples/buffer_pool_exhaustion.rs diff --git a/utils/buffer/examples/variable_sized_messages.rs b/sv2/buffer-sv2/examples/variable_sized_messages.rs similarity index 100% rename from utils/buffer/examples/variable_sized_messages.rs rename to sv2/buffer-sv2/examples/variable_sized_messages.rs diff --git a/utils/buffer/fuzz/.gitignore b/sv2/buffer-sv2/fuzz/.gitignore similarity index 100% rename from utils/buffer/fuzz/.gitignore rename to sv2/buffer-sv2/fuzz/.gitignore diff --git a/utils/buffer/fuzz/Cargo.toml b/sv2/buffer-sv2/fuzz/Cargo.toml similarity index 100% rename from utils/buffer/fuzz/Cargo.toml rename to sv2/buffer-sv2/fuzz/Cargo.toml diff --git a/utils/buffer/fuzz/fuzz_targets/faster.rs b/sv2/buffer-sv2/fuzz/fuzz_targets/faster.rs similarity index 100% rename from utils/buffer/fuzz/fuzz_targets/faster.rs rename to sv2/buffer-sv2/fuzz/fuzz_targets/faster.rs diff --git a/utils/buffer/fuzz/fuzz_targets/slower.rs b/sv2/buffer-sv2/fuzz/fuzz_targets/slower.rs similarity index 100% rename from utils/buffer/fuzz/fuzz_targets/slower.rs rename to sv2/buffer-sv2/fuzz/fuzz_targets/slower.rs diff --git a/utils/buffer/fuzz/random b/sv2/buffer-sv2/fuzz/random similarity index 100% rename from utils/buffer/fuzz/random rename to sv2/buffer-sv2/fuzz/random diff --git a/utils/buffer/fuzz/run.sh b/sv2/buffer-sv2/fuzz/run.sh similarity index 100% rename from utils/buffer/fuzz/run.sh rename to sv2/buffer-sv2/fuzz/run.sh diff --git a/utils/buffer/fuzz/rust-toolchain.toml b/sv2/buffer-sv2/fuzz/rust-toolchain.toml similarity index 100% rename from utils/buffer/fuzz/rust-toolchain.toml rename to sv2/buffer-sv2/fuzz/rust-toolchain.toml diff --git a/utils/buffer/src/buffer.rs b/sv2/buffer-sv2/src/buffer.rs similarity index 100% rename from utils/buffer/src/buffer.rs rename to sv2/buffer-sv2/src/buffer.rs diff --git a/utils/buffer/src/buffer_pool/mod.rs b/sv2/buffer-sv2/src/buffer_pool/mod.rs similarity index 99% rename from utils/buffer/src/buffer_pool/mod.rs rename to sv2/buffer-sv2/src/buffer_pool/mod.rs index 280b128ac1..57e0c17444 100644 --- a/utils/buffer/src/buffer_pool/mod.rs +++ b/sv2/buffer-sv2/src/buffer_pool/mod.rs @@ -260,8 +260,7 @@ impl InnerMemory { fn get_front_capacity(&self, back_start: usize) -> usize { #[cfg(feature = "fuzz")] assert!( - back_start >= 1 - && back_start < POOL_CAPACITY + (1..POOL_CAPACITY).contains(&back_start) && self.slots[back_start].0 != 0_usize && self.slots[back_start].1 != 0_usize && self.slots[back_start].0 + self.slots[back_start].1 <= self.pool.len() diff --git a/utils/buffer/src/buffer_pool/pool_back.rs b/sv2/buffer-sv2/src/buffer_pool/pool_back.rs similarity index 97% rename from utils/buffer/src/buffer_pool/pool_back.rs rename to sv2/buffer-sv2/src/buffer_pool/pool_back.rs index 86e8f746d5..8000086de6 100644 --- a/utils/buffer/src/buffer_pool/pool_back.rs +++ b/sv2/buffer-sv2/src/buffer_pool/pool_back.rs @@ -123,10 +123,9 @@ impl PoolBack { #[cfg(feature = "fuzz")] assert!( - self.len + self.back_start <= POOL_CAPACITY - && self.len + element_to_drop + already_dropped + self.back_start - == POOL_CAPACITY - && self.len + self.back_start <= POOL_CAPACITY + !(self.len + self.back_start > POOL_CAPACITY + || self.len + element_to_drop + already_dropped + self.back_start + != POOL_CAPACITY) ); memory.try_change_len(self.len + self.back_start, len) diff --git a/utils/buffer/src/lib.rs b/sv2/buffer-sv2/src/lib.rs similarity index 100% rename from utils/buffer/src/lib.rs rename to sv2/buffer-sv2/src/lib.rs diff --git a/utils/buffer/src/slice.rs b/sv2/buffer-sv2/src/slice.rs similarity index 100% rename from utils/buffer/src/slice.rs rename to sv2/buffer-sv2/src/slice.rs diff --git a/utils/buffer/src/test.rs b/sv2/buffer-sv2/src/test.rs similarity index 100% rename from utils/buffer/src/test.rs rename to sv2/buffer-sv2/src/test.rs diff --git a/protocols/v2/channels-sv2/Cargo.toml b/sv2/channels-sv2/Cargo.toml similarity index 100% rename from protocols/v2/channels-sv2/Cargo.toml rename to sv2/channels-sv2/Cargo.toml diff --git a/protocols/v2/channels-sv2/README.md b/sv2/channels-sv2/README.md similarity index 100% rename from protocols/v2/channels-sv2/README.md rename to sv2/channels-sv2/README.md diff --git a/protocols/v2/channels-sv2/src/bip141.rs b/sv2/channels-sv2/src/bip141.rs similarity index 100% rename from protocols/v2/channels-sv2/src/bip141.rs rename to sv2/channels-sv2/src/bip141.rs diff --git a/protocols/v2/channels-sv2/src/chain_tip.rs b/sv2/channels-sv2/src/chain_tip.rs similarity index 100% rename from protocols/v2/channels-sv2/src/chain_tip.rs rename to sv2/channels-sv2/src/chain_tip.rs diff --git a/protocols/v2/channels-sv2/src/client/error.rs b/sv2/channels-sv2/src/client/error.rs similarity index 100% rename from protocols/v2/channels-sv2/src/client/error.rs rename to sv2/channels-sv2/src/client/error.rs diff --git a/protocols/v2/channels-sv2/src/client/extended.rs b/sv2/channels-sv2/src/client/extended.rs similarity index 100% rename from protocols/v2/channels-sv2/src/client/extended.rs rename to sv2/channels-sv2/src/client/extended.rs diff --git a/protocols/v2/channels-sv2/src/client/group.rs b/sv2/channels-sv2/src/client/group.rs similarity index 100% rename from protocols/v2/channels-sv2/src/client/group.rs rename to sv2/channels-sv2/src/client/group.rs diff --git a/protocols/v2/channels-sv2/src/client/mod.rs b/sv2/channels-sv2/src/client/mod.rs similarity index 100% rename from protocols/v2/channels-sv2/src/client/mod.rs rename to sv2/channels-sv2/src/client/mod.rs diff --git a/protocols/v2/channels-sv2/src/client/share_accounting.rs b/sv2/channels-sv2/src/client/share_accounting.rs similarity index 100% rename from protocols/v2/channels-sv2/src/client/share_accounting.rs rename to sv2/channels-sv2/src/client/share_accounting.rs diff --git a/protocols/v2/channels-sv2/src/client/standard.rs b/sv2/channels-sv2/src/client/standard.rs similarity index 100% rename from protocols/v2/channels-sv2/src/client/standard.rs rename to sv2/channels-sv2/src/client/standard.rs diff --git a/protocols/v2/channels-sv2/src/lib.rs b/sv2/channels-sv2/src/lib.rs similarity index 94% rename from protocols/v2/channels-sv2/src/lib.rs rename to sv2/channels-sv2/src/lib.rs index dc8feef62e..33a8868c2b 100644 --- a/protocols/v2/channels-sv2/src/lib.rs +++ b/sv2/channels-sv2/src/lib.rs @@ -29,5 +29,9 @@ pub mod chain_tip; pub mod client; pub mod merkle_root; pub mod target; + +#[cfg(not(feature = "no_std"))] pub mod vardiff; + +#[cfg(not(feature = "no_std"))] pub use vardiff::{classic::VardiffState, Vardiff}; diff --git a/protocols/v2/channels-sv2/src/merkle_root.rs b/sv2/channels-sv2/src/merkle_root.rs similarity index 100% rename from protocols/v2/channels-sv2/src/merkle_root.rs rename to sv2/channels-sv2/src/merkle_root.rs diff --git a/protocols/v2/channels-sv2/src/outputs.rs b/sv2/channels-sv2/src/outputs.rs similarity index 100% rename from protocols/v2/channels-sv2/src/outputs.rs rename to sv2/channels-sv2/src/outputs.rs diff --git a/protocols/v2/channels-sv2/src/server/error.rs b/sv2/channels-sv2/src/server/error.rs similarity index 100% rename from protocols/v2/channels-sv2/src/server/error.rs rename to sv2/channels-sv2/src/server/error.rs diff --git a/protocols/v2/channels-sv2/src/server/extended.rs b/sv2/channels-sv2/src/server/extended.rs similarity index 100% rename from protocols/v2/channels-sv2/src/server/extended.rs rename to sv2/channels-sv2/src/server/extended.rs diff --git a/protocols/v2/channels-sv2/src/server/group.rs b/sv2/channels-sv2/src/server/group.rs similarity index 100% rename from protocols/v2/channels-sv2/src/server/group.rs rename to sv2/channels-sv2/src/server/group.rs diff --git a/protocols/v2/channels-sv2/src/server/jobs/error.rs b/sv2/channels-sv2/src/server/jobs/error.rs similarity index 100% rename from protocols/v2/channels-sv2/src/server/jobs/error.rs rename to sv2/channels-sv2/src/server/jobs/error.rs diff --git a/protocols/v2/channels-sv2/src/server/jobs/extended.rs b/sv2/channels-sv2/src/server/jobs/extended.rs similarity index 100% rename from protocols/v2/channels-sv2/src/server/jobs/extended.rs rename to sv2/channels-sv2/src/server/jobs/extended.rs diff --git a/protocols/v2/channels-sv2/src/server/jobs/factory.rs b/sv2/channels-sv2/src/server/jobs/factory.rs similarity index 100% rename from protocols/v2/channels-sv2/src/server/jobs/factory.rs rename to sv2/channels-sv2/src/server/jobs/factory.rs diff --git a/protocols/v2/channels-sv2/src/server/jobs/job_store.rs b/sv2/channels-sv2/src/server/jobs/job_store.rs similarity index 100% rename from protocols/v2/channels-sv2/src/server/jobs/job_store.rs rename to sv2/channels-sv2/src/server/jobs/job_store.rs diff --git a/protocols/v2/channels-sv2/src/server/jobs/mod.rs b/sv2/channels-sv2/src/server/jobs/mod.rs similarity index 100% rename from protocols/v2/channels-sv2/src/server/jobs/mod.rs rename to sv2/channels-sv2/src/server/jobs/mod.rs diff --git a/protocols/v2/channels-sv2/src/server/jobs/standard.rs b/sv2/channels-sv2/src/server/jobs/standard.rs similarity index 100% rename from protocols/v2/channels-sv2/src/server/jobs/standard.rs rename to sv2/channels-sv2/src/server/jobs/standard.rs diff --git a/protocols/v2/channels-sv2/src/server/mod.rs b/sv2/channels-sv2/src/server/mod.rs similarity index 100% rename from protocols/v2/channels-sv2/src/server/mod.rs rename to sv2/channels-sv2/src/server/mod.rs diff --git a/protocols/v2/channels-sv2/src/server/share_accounting.rs b/sv2/channels-sv2/src/server/share_accounting.rs similarity index 100% rename from protocols/v2/channels-sv2/src/server/share_accounting.rs rename to sv2/channels-sv2/src/server/share_accounting.rs diff --git a/protocols/v2/channels-sv2/src/server/standard.rs b/sv2/channels-sv2/src/server/standard.rs similarity index 100% rename from protocols/v2/channels-sv2/src/server/standard.rs rename to sv2/channels-sv2/src/server/standard.rs diff --git a/protocols/v2/channels-sv2/src/target.rs b/sv2/channels-sv2/src/target.rs similarity index 100% rename from protocols/v2/channels-sv2/src/target.rs rename to sv2/channels-sv2/src/target.rs diff --git a/protocols/v2/channels-sv2/src/vardiff/classic.rs b/sv2/channels-sv2/src/vardiff/classic.rs similarity index 100% rename from protocols/v2/channels-sv2/src/vardiff/classic.rs rename to sv2/channels-sv2/src/vardiff/classic.rs diff --git a/protocols/v2/channels-sv2/src/vardiff/error.rs b/sv2/channels-sv2/src/vardiff/error.rs similarity index 100% rename from protocols/v2/channels-sv2/src/vardiff/error.rs rename to sv2/channels-sv2/src/vardiff/error.rs diff --git a/protocols/v2/channels-sv2/src/vardiff/mod.rs b/sv2/channels-sv2/src/vardiff/mod.rs similarity index 100% rename from protocols/v2/channels-sv2/src/vardiff/mod.rs rename to sv2/channels-sv2/src/vardiff/mod.rs diff --git a/protocols/v2/channels-sv2/src/vardiff/test/classic.rs b/sv2/channels-sv2/src/vardiff/test/classic.rs similarity index 100% rename from protocols/v2/channels-sv2/src/vardiff/test/classic.rs rename to sv2/channels-sv2/src/vardiff/test/classic.rs diff --git a/protocols/v2/channels-sv2/src/vardiff/test/mod.rs b/sv2/channels-sv2/src/vardiff/test/mod.rs similarity index 100% rename from protocols/v2/channels-sv2/src/vardiff/test/mod.rs rename to sv2/channels-sv2/src/vardiff/test/mod.rs diff --git a/protocols/v2/codec-sv2/Cargo.toml b/sv2/codec-sv2/Cargo.toml similarity index 70% rename from protocols/v2/codec-sv2/Cargo.toml rename to sv2/codec-sv2/Cargo.toml index 43a825111b..553df291ab 100644 --- a/protocols/v2/codec-sv2/Cargo.toml +++ b/sv2/codec-sv2/Cargo.toml @@ -12,10 +12,10 @@ homepage = "https://stratumprotocol.org" keywords = ["stratum", "mining", "bitcoin", "protocol"] [dependencies] -framing_sv2 = { path = "../../../protocols/v2/framing-sv2", version = "^5.0.0" } -noise_sv2 = { path = "../../../protocols/v2/noise-sv2", default-features = false, optional = true, version = "^1.0.0" } -binary_sv2 = { path = "../../../protocols/v2/binary-sv2", version = "^5.0.0" } -buffer_sv2 = { path = "../../../utils/buffer", version = "^2.0.0" } +framing_sv2 = { path = "../framing-sv2", version = "^5.0.0" } +noise_sv2 = { path = "../noise-sv2", default-features = false, optional = true, version = "^1.0.0" } +binary_sv2 = { path = "../binary-sv2", version = "^5.0.0" } +buffer_sv2 = { path = "../buffer-sv2", version = "^2.0.0" } rand = { version = "0.8.5", default-features = false } tracing = { version = "0.1", optional = true } diff --git a/protocols/v2/codec-sv2/README.md b/sv2/codec-sv2/README.md similarity index 100% rename from protocols/v2/codec-sv2/README.md rename to sv2/codec-sv2/README.md diff --git a/protocols/v2/codec-sv2/examples/encrypted.rs b/sv2/codec-sv2/examples/encrypted.rs similarity index 100% rename from protocols/v2/codec-sv2/examples/encrypted.rs rename to sv2/codec-sv2/examples/encrypted.rs diff --git a/protocols/v2/codec-sv2/examples/unencrypted.rs b/sv2/codec-sv2/examples/unencrypted.rs similarity index 100% rename from protocols/v2/codec-sv2/examples/unencrypted.rs rename to sv2/codec-sv2/examples/unencrypted.rs diff --git a/protocols/v2/codec-sv2/src/decoder.rs b/sv2/codec-sv2/src/decoder.rs similarity index 97% rename from protocols/v2/codec-sv2/src/decoder.rs rename to sv2/codec-sv2/src/decoder.rs index c27b48ffcc..7f527066e7 100644 --- a/protocols/v2/codec-sv2/src/decoder.rs +++ b/sv2/codec-sv2/src/decoder.rs @@ -228,9 +228,15 @@ impl<'a, T: Serialize + GetSize + Deserialize<'a>, B: IsBuffer + AeadBuffer> Wit let src = self.noise_buffer.get_data_owned().as_mut().to_vec(); // Since the frame length is already validated during the handshake process, this - // operation is infallible + // operation is infallible. + // Conditionally call `.into()` based on `with_buffer_pool` feature to handle differences + // between Clippy and test builds. See: https://github.com/stratum-mining/stratum/pull/1860#discussion_r2457908851 + #[cfg(feature = "with_buffer_pool")] let frame = HandShakeFrame::from_bytes_unchecked(src.into()); + #[cfg(not(feature = "with_buffer_pool"))] + let frame = HandShakeFrame::from_bytes_unchecked(src); + frame.into() } diff --git a/protocols/v2/codec-sv2/src/encoder.rs b/sv2/codec-sv2/src/encoder.rs similarity index 100% rename from protocols/v2/codec-sv2/src/encoder.rs rename to sv2/codec-sv2/src/encoder.rs diff --git a/protocols/v2/codec-sv2/src/error.rs b/sv2/codec-sv2/src/error.rs similarity index 100% rename from protocols/v2/codec-sv2/src/error.rs rename to sv2/codec-sv2/src/error.rs diff --git a/protocols/v2/codec-sv2/src/lib.rs b/sv2/codec-sv2/src/lib.rs similarity index 100% rename from protocols/v2/codec-sv2/src/lib.rs rename to sv2/codec-sv2/src/lib.rs diff --git a/protocols/v2/framing-sv2/Cargo.toml b/sv2/framing-sv2/Cargo.toml similarity index 70% rename from protocols/v2/framing-sv2/Cargo.toml rename to sv2/framing-sv2/Cargo.toml index fb3ebfd996..992bd1c9e5 100644 --- a/protocols/v2/framing-sv2/Cargo.toml +++ b/sv2/framing-sv2/Cargo.toml @@ -14,12 +14,12 @@ keywords = ["stratum", "mining", "bitcoin", "protocol"] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -binary_sv2 = { path = "../../../protocols/v2/binary-sv2", version = "^5.0.0" } -buffer_sv2 = { path = "../../../utils/buffer", optional=true, version = "^2.0.0" } -noise_sv2 = { path = "../../../protocols/v2/noise-sv2", version = "^1.0.0" } +binary_sv2 = { path = "../binary-sv2", version = "^5.0.0" } +buffer_sv2 = { path = "../buffer-sv2", optional=true, version = "^2.0.0" } +noise_sv2 = { path = "../noise-sv2", version = "^1.0.0" } [dev-dependencies] -noise_sv2 = { path = "../../../protocols/v2/noise-sv2", version = "^1.0.0" } +noise_sv2 = { path = "../noise-sv2", version = "^1.0.0" } rand = "0.8.3" secp256k1 = { version = "0.28.2", default-features = false, features =["alloc","rand","rand-std"] } diff --git a/protocols/v2/framing-sv2/README.md b/sv2/framing-sv2/README.md similarity index 100% rename from protocols/v2/framing-sv2/README.md rename to sv2/framing-sv2/README.md diff --git a/protocols/v2/framing-sv2/examples/sv2_frame.rs b/sv2/framing-sv2/examples/sv2_frame.rs similarity index 100% rename from protocols/v2/framing-sv2/examples/sv2_frame.rs rename to sv2/framing-sv2/examples/sv2_frame.rs diff --git a/protocols/v2/framing-sv2/src/error.rs b/sv2/framing-sv2/src/error.rs similarity index 100% rename from protocols/v2/framing-sv2/src/error.rs rename to sv2/framing-sv2/src/error.rs diff --git a/protocols/v2/framing-sv2/src/framing.rs b/sv2/framing-sv2/src/framing.rs similarity index 100% rename from protocols/v2/framing-sv2/src/framing.rs rename to sv2/framing-sv2/src/framing.rs diff --git a/protocols/v2/framing-sv2/src/header.rs b/sv2/framing-sv2/src/header.rs similarity index 100% rename from protocols/v2/framing-sv2/src/header.rs rename to sv2/framing-sv2/src/header.rs diff --git a/protocols/v2/framing-sv2/src/lib.rs b/sv2/framing-sv2/src/lib.rs similarity index 100% rename from protocols/v2/framing-sv2/src/lib.rs rename to sv2/framing-sv2/src/lib.rs diff --git a/protocols/v2/handlers-sv2/Cargo.toml b/sv2/handlers-sv2/Cargo.toml similarity index 100% rename from protocols/v2/handlers-sv2/Cargo.toml rename to sv2/handlers-sv2/Cargo.toml diff --git a/protocols/v2/handlers-sv2/README.md b/sv2/handlers-sv2/README.md similarity index 100% rename from protocols/v2/handlers-sv2/README.md rename to sv2/handlers-sv2/README.md diff --git a/protocols/v2/handlers-sv2/src/common.rs b/sv2/handlers-sv2/src/common.rs similarity index 100% rename from protocols/v2/handlers-sv2/src/common.rs rename to sv2/handlers-sv2/src/common.rs diff --git a/protocols/v2/handlers-sv2/src/error.rs b/sv2/handlers-sv2/src/error.rs similarity index 100% rename from protocols/v2/handlers-sv2/src/error.rs rename to sv2/handlers-sv2/src/error.rs diff --git a/protocols/v2/handlers-sv2/src/job_declaration.rs b/sv2/handlers-sv2/src/job_declaration.rs similarity index 100% rename from protocols/v2/handlers-sv2/src/job_declaration.rs rename to sv2/handlers-sv2/src/job_declaration.rs diff --git a/protocols/v2/handlers-sv2/src/lib.rs b/sv2/handlers-sv2/src/lib.rs similarity index 100% rename from protocols/v2/handlers-sv2/src/lib.rs rename to sv2/handlers-sv2/src/lib.rs diff --git a/protocols/v2/handlers-sv2/src/mining.rs b/sv2/handlers-sv2/src/mining.rs similarity index 100% rename from protocols/v2/handlers-sv2/src/mining.rs rename to sv2/handlers-sv2/src/mining.rs diff --git a/protocols/v2/handlers-sv2/src/template_distribution.rs b/sv2/handlers-sv2/src/template_distribution.rs similarity index 100% rename from protocols/v2/handlers-sv2/src/template_distribution.rs rename to sv2/handlers-sv2/src/template_distribution.rs diff --git a/protocols/v2/noise-sv2/Cargo.toml b/sv2/noise-sv2/Cargo.toml similarity index 100% rename from protocols/v2/noise-sv2/Cargo.toml rename to sv2/noise-sv2/Cargo.toml diff --git a/protocols/v2/noise-sv2/README.md b/sv2/noise-sv2/README.md similarity index 100% rename from protocols/v2/noise-sv2/README.md rename to sv2/noise-sv2/README.md diff --git a/protocols/v2/noise-sv2/examples/handshake.rs b/sv2/noise-sv2/examples/handshake.rs similarity index 100% rename from protocols/v2/noise-sv2/examples/handshake.rs rename to sv2/noise-sv2/examples/handshake.rs diff --git a/protocols/v2/noise-sv2/src/aed_cipher.rs b/sv2/noise-sv2/src/aed_cipher.rs similarity index 100% rename from protocols/v2/noise-sv2/src/aed_cipher.rs rename to sv2/noise-sv2/src/aed_cipher.rs diff --git a/protocols/v2/noise-sv2/src/cipher_state.rs b/sv2/noise-sv2/src/cipher_state.rs similarity index 100% rename from protocols/v2/noise-sv2/src/cipher_state.rs rename to sv2/noise-sv2/src/cipher_state.rs diff --git a/protocols/v2/noise-sv2/src/error.rs b/sv2/noise-sv2/src/error.rs similarity index 100% rename from protocols/v2/noise-sv2/src/error.rs rename to sv2/noise-sv2/src/error.rs diff --git a/protocols/v2/noise-sv2/src/handshake.rs b/sv2/noise-sv2/src/handshake.rs similarity index 100% rename from protocols/v2/noise-sv2/src/handshake.rs rename to sv2/noise-sv2/src/handshake.rs diff --git a/protocols/v2/noise-sv2/src/initiator.rs b/sv2/noise-sv2/src/initiator.rs similarity index 100% rename from protocols/v2/noise-sv2/src/initiator.rs rename to sv2/noise-sv2/src/initiator.rs diff --git a/protocols/v2/noise-sv2/src/lib.rs b/sv2/noise-sv2/src/lib.rs similarity index 100% rename from protocols/v2/noise-sv2/src/lib.rs rename to sv2/noise-sv2/src/lib.rs diff --git a/protocols/v2/noise-sv2/src/responder.rs b/sv2/noise-sv2/src/responder.rs similarity index 100% rename from protocols/v2/noise-sv2/src/responder.rs rename to sv2/noise-sv2/src/responder.rs diff --git a/protocols/v2/noise-sv2/src/signature_message.rs b/sv2/noise-sv2/src/signature_message.rs similarity index 100% rename from protocols/v2/noise-sv2/src/signature_message.rs rename to sv2/noise-sv2/src/signature_message.rs diff --git a/protocols/v2/noise-sv2/src/test.rs b/sv2/noise-sv2/src/test.rs similarity index 100% rename from protocols/v2/noise-sv2/src/test.rs rename to sv2/noise-sv2/src/test.rs diff --git a/protocols/v2/parsers-sv2/Cargo.toml b/sv2/parsers-sv2/Cargo.toml similarity index 100% rename from protocols/v2/parsers-sv2/Cargo.toml rename to sv2/parsers-sv2/Cargo.toml diff --git a/protocols/v2/parsers-sv2/README.md b/sv2/parsers-sv2/README.md similarity index 100% rename from protocols/v2/parsers-sv2/README.md rename to sv2/parsers-sv2/README.md diff --git a/protocols/v2/parsers-sv2/src/error.rs b/sv2/parsers-sv2/src/error.rs similarity index 100% rename from protocols/v2/parsers-sv2/src/error.rs rename to sv2/parsers-sv2/src/error.rs diff --git a/protocols/v2/parsers-sv2/src/lib.rs b/sv2/parsers-sv2/src/lib.rs similarity index 100% rename from protocols/v2/parsers-sv2/src/lib.rs rename to sv2/parsers-sv2/src/lib.rs diff --git a/protocols/v2/subprotocols/common-messages/Cargo.toml b/sv2/subprotocols/common-messages/Cargo.toml similarity index 100% rename from protocols/v2/subprotocols/common-messages/Cargo.toml rename to sv2/subprotocols/common-messages/Cargo.toml diff --git a/protocols/v2/subprotocols/common-messages/README.md b/sv2/subprotocols/common-messages/README.md similarity index 100% rename from protocols/v2/subprotocols/common-messages/README.md rename to sv2/subprotocols/common-messages/README.md diff --git a/protocols/v2/subprotocols/common-messages/src/channel_endpoint_changed.rs b/sv2/subprotocols/common-messages/src/channel_endpoint_changed.rs similarity index 100% rename from protocols/v2/subprotocols/common-messages/src/channel_endpoint_changed.rs rename to sv2/subprotocols/common-messages/src/channel_endpoint_changed.rs diff --git a/protocols/v2/subprotocols/common-messages/src/lib.rs b/sv2/subprotocols/common-messages/src/lib.rs similarity index 100% rename from protocols/v2/subprotocols/common-messages/src/lib.rs rename to sv2/subprotocols/common-messages/src/lib.rs diff --git a/protocols/v2/subprotocols/common-messages/src/reconnect.rs b/sv2/subprotocols/common-messages/src/reconnect.rs similarity index 100% rename from protocols/v2/subprotocols/common-messages/src/reconnect.rs rename to sv2/subprotocols/common-messages/src/reconnect.rs diff --git a/protocols/v2/subprotocols/common-messages/src/setup_connection.rs b/sv2/subprotocols/common-messages/src/setup_connection.rs similarity index 100% rename from protocols/v2/subprotocols/common-messages/src/setup_connection.rs rename to sv2/subprotocols/common-messages/src/setup_connection.rs diff --git a/protocols/v2/subprotocols/job-declaration/Cargo.toml b/sv2/subprotocols/job-declaration/Cargo.toml similarity index 100% rename from protocols/v2/subprotocols/job-declaration/Cargo.toml rename to sv2/subprotocols/job-declaration/Cargo.toml diff --git a/protocols/v2/subprotocols/job-declaration/README.md b/sv2/subprotocols/job-declaration/README.md similarity index 100% rename from protocols/v2/subprotocols/job-declaration/README.md rename to sv2/subprotocols/job-declaration/README.md diff --git a/protocols/v2/subprotocols/job-declaration/job-negotiation-flow.png b/sv2/subprotocols/job-declaration/job-negotiation-flow.png similarity index 100% rename from protocols/v2/subprotocols/job-declaration/job-negotiation-flow.png rename to sv2/subprotocols/job-declaration/job-negotiation-flow.png diff --git a/protocols/v2/subprotocols/job-declaration/src/allocate_mining_job_token.rs b/sv2/subprotocols/job-declaration/src/allocate_mining_job_token.rs similarity index 100% rename from protocols/v2/subprotocols/job-declaration/src/allocate_mining_job_token.rs rename to sv2/subprotocols/job-declaration/src/allocate_mining_job_token.rs diff --git a/protocols/v2/subprotocols/job-declaration/src/declare_mining_job.rs b/sv2/subprotocols/job-declaration/src/declare_mining_job.rs similarity index 100% rename from protocols/v2/subprotocols/job-declaration/src/declare_mining_job.rs rename to sv2/subprotocols/job-declaration/src/declare_mining_job.rs diff --git a/protocols/v2/subprotocols/job-declaration/src/lib.rs b/sv2/subprotocols/job-declaration/src/lib.rs similarity index 100% rename from protocols/v2/subprotocols/job-declaration/src/lib.rs rename to sv2/subprotocols/job-declaration/src/lib.rs diff --git a/protocols/v2/subprotocols/job-declaration/src/provide_missing_transactions.rs b/sv2/subprotocols/job-declaration/src/provide_missing_transactions.rs similarity index 100% rename from protocols/v2/subprotocols/job-declaration/src/provide_missing_transactions.rs rename to sv2/subprotocols/job-declaration/src/provide_missing_transactions.rs diff --git a/protocols/v2/subprotocols/job-declaration/src/push_solution.rs b/sv2/subprotocols/job-declaration/src/push_solution.rs similarity index 100% rename from protocols/v2/subprotocols/job-declaration/src/push_solution.rs rename to sv2/subprotocols/job-declaration/src/push_solution.rs diff --git a/protocols/v2/subprotocols/mining/Cargo.toml b/sv2/subprotocols/mining/Cargo.toml similarity index 100% rename from protocols/v2/subprotocols/mining/Cargo.toml rename to sv2/subprotocols/mining/Cargo.toml diff --git a/protocols/v2/subprotocols/mining/README.md b/sv2/subprotocols/mining/README.md similarity index 100% rename from protocols/v2/subprotocols/mining/README.md rename to sv2/subprotocols/mining/README.md diff --git a/protocols/v2/subprotocols/mining/src/close_channel.rs b/sv2/subprotocols/mining/src/close_channel.rs similarity index 100% rename from protocols/v2/subprotocols/mining/src/close_channel.rs rename to sv2/subprotocols/mining/src/close_channel.rs diff --git a/protocols/v2/subprotocols/mining/src/lib.rs b/sv2/subprotocols/mining/src/lib.rs similarity index 100% rename from protocols/v2/subprotocols/mining/src/lib.rs rename to sv2/subprotocols/mining/src/lib.rs diff --git a/protocols/v2/subprotocols/mining/src/new_mining_job.rs b/sv2/subprotocols/mining/src/new_mining_job.rs similarity index 100% rename from protocols/v2/subprotocols/mining/src/new_mining_job.rs rename to sv2/subprotocols/mining/src/new_mining_job.rs diff --git a/protocols/v2/subprotocols/mining/src/open_channel.rs b/sv2/subprotocols/mining/src/open_channel.rs similarity index 100% rename from protocols/v2/subprotocols/mining/src/open_channel.rs rename to sv2/subprotocols/mining/src/open_channel.rs diff --git a/protocols/v2/subprotocols/mining/src/set_custom_mining_job.rs b/sv2/subprotocols/mining/src/set_custom_mining_job.rs similarity index 100% rename from protocols/v2/subprotocols/mining/src/set_custom_mining_job.rs rename to sv2/subprotocols/mining/src/set_custom_mining_job.rs diff --git a/protocols/v2/subprotocols/mining/src/set_extranonce_prefix.rs b/sv2/subprotocols/mining/src/set_extranonce_prefix.rs similarity index 100% rename from protocols/v2/subprotocols/mining/src/set_extranonce_prefix.rs rename to sv2/subprotocols/mining/src/set_extranonce_prefix.rs diff --git a/protocols/v2/subprotocols/mining/src/set_group_channel.rs b/sv2/subprotocols/mining/src/set_group_channel.rs similarity index 100% rename from protocols/v2/subprotocols/mining/src/set_group_channel.rs rename to sv2/subprotocols/mining/src/set_group_channel.rs diff --git a/protocols/v2/subprotocols/mining/src/set_new_prev_hash.rs b/sv2/subprotocols/mining/src/set_new_prev_hash.rs similarity index 100% rename from protocols/v2/subprotocols/mining/src/set_new_prev_hash.rs rename to sv2/subprotocols/mining/src/set_new_prev_hash.rs diff --git a/protocols/v2/subprotocols/mining/src/set_target.rs b/sv2/subprotocols/mining/src/set_target.rs similarity index 100% rename from protocols/v2/subprotocols/mining/src/set_target.rs rename to sv2/subprotocols/mining/src/set_target.rs diff --git a/protocols/v2/subprotocols/mining/src/submit_shares.rs b/sv2/subprotocols/mining/src/submit_shares.rs similarity index 100% rename from protocols/v2/subprotocols/mining/src/submit_shares.rs rename to sv2/subprotocols/mining/src/submit_shares.rs diff --git a/protocols/v2/subprotocols/mining/src/update_channel.rs b/sv2/subprotocols/mining/src/update_channel.rs similarity index 100% rename from protocols/v2/subprotocols/mining/src/update_channel.rs rename to sv2/subprotocols/mining/src/update_channel.rs diff --git a/protocols/v2/subprotocols/template-distribution/Cargo.toml b/sv2/subprotocols/template-distribution/Cargo.toml similarity index 100% rename from protocols/v2/subprotocols/template-distribution/Cargo.toml rename to sv2/subprotocols/template-distribution/Cargo.toml diff --git a/protocols/v2/subprotocols/template-distribution/README.md b/sv2/subprotocols/template-distribution/README.md similarity index 100% rename from protocols/v2/subprotocols/template-distribution/README.md rename to sv2/subprotocols/template-distribution/README.md diff --git a/protocols/v2/subprotocols/template-distribution/src/coinbase_output_constraints.rs b/sv2/subprotocols/template-distribution/src/coinbase_output_constraints.rs similarity index 100% rename from protocols/v2/subprotocols/template-distribution/src/coinbase_output_constraints.rs rename to sv2/subprotocols/template-distribution/src/coinbase_output_constraints.rs diff --git a/protocols/v2/subprotocols/template-distribution/src/lib.rs b/sv2/subprotocols/template-distribution/src/lib.rs similarity index 89% rename from protocols/v2/subprotocols/template-distribution/src/lib.rs rename to sv2/subprotocols/template-distribution/src/lib.rs index e9a37e1e60..4def9ce5ac 100644 --- a/protocols/v2/subprotocols/template-distribution/src/lib.rs +++ b/sv2/subprotocols/template-distribution/src/lib.rs @@ -94,8 +94,8 @@ impl NewTemplate<'static> { impl CoinbaseOutputConstraints { pub fn from_gen(g: &mut Gen) -> Self { CoinbaseOutputConstraints { - coinbase_output_max_additional_size: u32::arbitrary(g).try_into().unwrap(), - coinbase_output_max_additional_sigops: u16::arbitrary(g).try_into().unwrap(), + coinbase_output_max_additional_size: u32::arbitrary(g), + coinbase_output_max_additional_sigops: u16::arbitrary(g), } } } @@ -104,7 +104,7 @@ impl CoinbaseOutputConstraints { impl RequestTransactionData { pub fn from_gen(g: &mut Gen) -> Self { RequestTransactionData { - template_id: u64::arbitrary(g).try_into().unwrap(), + template_id: u64::arbitrary(g), } } } @@ -118,7 +118,7 @@ impl RequestTransactionDataError<'static> { let error_code: binary_sv2::Str0255 = error_code.try_into().unwrap(); RequestTransactionDataError { - template_id: u64::arbitrary(g).try_into().unwrap(), + template_id: u64::arbitrary(g), error_code, } } @@ -133,7 +133,7 @@ impl RequestTransactionDataSuccess<'static> { vec![transaction_list_inner].into(); RequestTransactionDataSuccess { - template_id: u64::arbitrary(g).try_into().unwrap(), + template_id: u64::arbitrary(g), excess_data, transaction_list, } @@ -146,10 +146,10 @@ impl SetNewPrevHash<'static> { let prev_hash = binary_sv2::U256::from_gen(g); let target = binary_sv2::U256::from_gen(g); SetNewPrevHash { - template_id: u64::arbitrary(g).try_into().unwrap(), + template_id: u64::arbitrary(g), prev_hash, - header_timestamp: u32::arbitrary(g).try_into().unwrap(), - n_bits: u32::arbitrary(g).try_into().unwrap(), + header_timestamp: u32::arbitrary(g), + n_bits: u32::arbitrary(g), target, } } @@ -160,10 +160,10 @@ impl SubmitSolution<'static> { pub fn from_gen(g: &mut Gen) -> Self { let coinbase_tx: binary_sv2::B064K = vec::Vec::::arbitrary(g).try_into().unwrap(); SubmitSolution { - template_id: u64::arbitrary(g).try_into().unwrap(), - version: u32::arbitrary(g).try_into().unwrap(), - header_timestamp: u32::arbitrary(g).try_into().unwrap(), - header_nonce: u32::arbitrary(g).try_into().unwrap(), + template_id: u64::arbitrary(g), + version: u32::arbitrary(g), + header_timestamp: u32::arbitrary(g), + header_nonce: u32::arbitrary(g), coinbase_tx, } } diff --git a/protocols/v2/subprotocols/template-distribution/src/new_template.rs b/sv2/subprotocols/template-distribution/src/new_template.rs similarity index 100% rename from protocols/v2/subprotocols/template-distribution/src/new_template.rs rename to sv2/subprotocols/template-distribution/src/new_template.rs diff --git a/protocols/v2/subprotocols/template-distribution/src/request_transaction_data.rs b/sv2/subprotocols/template-distribution/src/request_transaction_data.rs similarity index 100% rename from protocols/v2/subprotocols/template-distribution/src/request_transaction_data.rs rename to sv2/subprotocols/template-distribution/src/request_transaction_data.rs diff --git a/protocols/v2/subprotocols/template-distribution/src/set_new_prev_hash.rs b/sv2/subprotocols/template-distribution/src/set_new_prev_hash.rs similarity index 100% rename from protocols/v2/subprotocols/template-distribution/src/set_new_prev_hash.rs rename to sv2/subprotocols/template-distribution/src/set_new_prev_hash.rs diff --git a/protocols/v2/subprotocols/template-distribution/src/submit_solution.rs b/sv2/subprotocols/template-distribution/src/submit_solution.rs similarity index 100% rename from protocols/v2/subprotocols/template-distribution/src/submit_solution.rs rename to sv2/subprotocols/template-distribution/src/submit_solution.rs diff --git a/protocols/tarpaulin.toml b/tarpaulin.toml similarity index 100% rename from protocols/tarpaulin.toml rename to tarpaulin.toml diff --git a/test/integration-tests/.config/nextest.toml b/test/integration-tests/.config/nextest.toml deleted file mode 100644 index 5f5d26a09c..0000000000 --- a/test/integration-tests/.config/nextest.toml +++ /dev/null @@ -1,17 +0,0 @@ -[profile.default] - -# SRI has flaky integration tests, which we are ok to live with for now -# but if a test fails more than 3 times, it's safe to assume it's failing deterministically -# and that's a reliable indication that we shouldn't merge this PR -retries = { backoff = "fixed", count = 3, delay = "2s" } - -# only run one test at a time, which allows a human-friendly experience for inspecting logs -test-threads = 1 - -# label as slow if a test runs for more than 60s -# kill it after 120s -slow-timeout = { period = "60s", terminate-after = 2 } - -# display status for all levels (pass, fail, flaky, slow, etc) -status-level = "all" -final-status-level = "all" \ No newline at end of file diff --git a/test/integration-tests/Cargo.lock b/test/integration-tests/Cargo.lock deleted file mode 100644 index d9135ea85d..0000000000 --- a/test/integration-tests/Cargo.lock +++ /dev/null @@ -1,3249 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "addr2line" -version = "0.25.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler2" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" - -[[package]] -name = "aead" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" -dependencies = [ - "crypto-common", - "generic-array", -] - -[[package]] -name = "aes" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" -dependencies = [ - "cfg-if", - "cipher", - "cpufeatures", -] - -[[package]] -name = "aes-gcm" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" -dependencies = [ - "aead", - "aes", - "cipher", - "ctr", - "ghash", - "subtle", -] - -[[package]] -name = "ahash" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" -dependencies = [ - "getrandom 0.2.16", - "once_cell", - "version_check", -] - -[[package]] -name = "ahash" -version = "0.8.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" -dependencies = [ - "cfg-if", - "once_cell", - "version_check", - "zerocopy", -] - -[[package]] -name = "aho-corasick" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" -dependencies = [ - "memchr", -] - -[[package]] -name = "allocator-api2" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" - -[[package]] -name = "anstream" -version = "0.6.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" -dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "is_terminal_polyfill", - "utf8parse", -] - -[[package]] -name = "anstyle" -version = "1.0.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" - -[[package]] -name = "anstyle-parse" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" -dependencies = [ - "utf8parse", -] - -[[package]] -name = "anstyle-query" -version = "1.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2" -dependencies = [ - "windows-sys 0.60.2", -] - -[[package]] -name = "anstyle-wincon" -version = "3.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a" -dependencies = [ - "anstyle", - "once_cell_polyfill", - "windows-sys 0.60.2", -] - -[[package]] -name = "anyhow" -version = "1.0.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" - -[[package]] -name = "arraydeque" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" - -[[package]] -name = "arrayvec" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" - -[[package]] -name = "async-channel" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" -dependencies = [ - "concurrent-queue", - "event-listener", - "futures-core", -] - -[[package]] -name = "async-recursion" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7d78656ba01f1b93024b7c3a0467f1608e4be67d725749fdcd7d2c7678fd7a2" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "async-trait" -version = "0.1.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "atomic-waker" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" - -[[package]] -name = "backtrace" -version = "0.3.76" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb531853791a215d7c62a30daf0dde835f381ab5de4589cfe7c649d2cbe92bd6" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "windows-link 0.2.1", -] - -[[package]] -name = "base58ck" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c8d66485a3a2ea485c1913c4572ce0256067a5377ac8c75c4960e1cda98605f" -dependencies = [ - "bitcoin-internals 0.3.0", - "bitcoin_hashes 0.14.0", -] - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "base64" -version = "0.21.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" - -[[package]] -name = "base64" -version = "0.22.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" - -[[package]] -name = "bech32" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" - -[[package]] -name = "binary_codec_sv2" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad24342e0abdcc463ad6ad4ac7b0ec606122c11eddf92de186a657df0114eb7" - -[[package]] -name = "binary_codec_sv2" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16415a0a9ccee2f71820da352c1f2a7f16d9f8e3ae6fb5e97834c6d732e98cd" -dependencies = [ - "buffer_sv2 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "binary_sv2" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba8295945d51b707f3a49e17810dddef858549e2b52383c7f2c4dd036f6bc1e6" -dependencies = [ - "binary_codec_sv2 3.0.0", - "derive_codec_sv2 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "binary_sv2" -version = "5.0.0" -dependencies = [ - "buffer_sv2 2.0.0", - "derive_codec_sv2 1.1.1", -] - -[[package]] -name = "bitcoin" -version = "0.32.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda569d741b895131a88ee5589a467e73e9c4718e958ac9308e4f7dc44b6945" -dependencies = [ - "base58ck", - "base64 0.21.7", - "bech32", - "bitcoin-internals 0.3.0", - "bitcoin-io", - "bitcoin-units", - "bitcoin_hashes 0.14.0", - "hex-conservative 0.2.1", - "hex_lit", - "secp256k1 0.29.1", - "serde", -] - -[[package]] -name = "bitcoin-internals" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" - -[[package]] -name = "bitcoin-internals" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bdbe14aa07b06e6cfeffc529a1f099e5fbe249524f8125358604df99a4bed2" -dependencies = [ - "serde", -] - -[[package]] -name = "bitcoin-io" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf" - -[[package]] -name = "bitcoin-units" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5285c8bcaa25876d07f37e3d30c303f2609179716e11d688f51e8f1fe70063e2" -dependencies = [ - "bitcoin-internals 0.3.0", - "serde", -] - -[[package]] -name = "bitcoin_hashes" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b7a2e9773ee7ae7f2560f0426c938f57902dcb9e39321b0cbd608f47ed579a4" -dependencies = [ - "byteorder", -] - -[[package]] -name = "bitcoin_hashes" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" -dependencies = [ - "bitcoin-internals 0.2.0", - "hex-conservative 0.1.2", -] - -[[package]] -name = "bitcoin_hashes" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" -dependencies = [ - "bitcoin-io", - "hex-conservative 0.2.1", - "serde", -] - -[[package]] -name = "bitflags" -version = "2.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" -dependencies = [ - "serde", -] - -[[package]] -name = "bitvec" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array", -] - -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array", -] - -[[package]] -name = "bs58" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" -dependencies = [ - "sha2 0.9.9", -] - -[[package]] -name = "buffer_sv2" -version = "2.0.0" -dependencies = [ - "aes-gcm", - "generic-array", -] - -[[package]] -name = "buffer_sv2" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19781425841d2e217eb7ded68089b693b47c8f756eb02231c92122dbf505bcf0" -dependencies = [ - "aes-gcm", -] - -[[package]] -name = "byte-slice-cast" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" - -[[package]] -name = "byteorder" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" - -[[package]] -name = "bytes" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" - -[[package]] -name = "cc" -version = "1.2.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac9fe6cdbb24b6ade63616c0a0688e45bb56732262c158df3c0c4bea4ca47cb7" -dependencies = [ - "find-msvc-tools", - "shlex", -] - -[[package]] -name = "cfg-if" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" - -[[package]] -name = "chacha20" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" -dependencies = [ - "cfg-if", - "cipher", - "cpufeatures", -] - -[[package]] -name = "chacha20poly1305" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" -dependencies = [ - "aead", - "chacha20", - "cipher", - "poly1305", - "zeroize", -] - -[[package]] -name = "channels_sv2" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11ac02b93b5bd92a7dda2bc4b8c9d1f087e1fffc8b1018b532109135629051fc" -dependencies = [ - "binary_sv2 4.0.0", - "bitcoin", - "common_messages_sv2 6.0.1", - "job_declaration_sv2 5.0.1", - "mining_sv2 5.0.1", - "primitive-types", - "template_distribution_sv2 4.0.1", - "tracing", -] - -[[package]] -name = "channels_sv2" -version = "2.0.0" -dependencies = [ - "binary_sv2 5.0.0", - "bitcoin", - "common_messages_sv2 6.0.2", - "job_declaration_sv2 5.0.2", - "mining_sv2 6.0.0", - "primitive-types", - "template_distribution_sv2 4.0.2", - "tracing", -] - -[[package]] -name = "cipher" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" -dependencies = [ - "crypto-common", - "inout", - "zeroize", -] - -[[package]] -name = "clap" -version = "4.5.49" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4512b90fa68d3a9932cea5184017c5d200f5921df706d45e853537dea51508f" -dependencies = [ - "clap_builder", - "clap_derive", -] - -[[package]] -name = "clap_builder" -version = "4.5.49" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0025e98baa12e766c67ba13ff4695a887a1eba19569aad00a472546795bd6730" -dependencies = [ - "anstream", - "anstyle", - "clap_lex", - "strsim", -] - -[[package]] -name = "clap_derive" -version = "4.5.49" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "clap_lex" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" - -[[package]] -name = "codec_sv2" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01e6d43e79e66d0f98038922157db8b6101594921be87ac2cca3754d669f2a05" -dependencies = [ - "binary_sv2 4.0.0", - "buffer_sv2 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "framing_sv2 5.0.1", - "noise_sv2 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.8.5", - "tracing", -] - -[[package]] -name = "codec_sv2" -version = "4.0.0" -dependencies = [ - "binary_sv2 5.0.0", - "buffer_sv2 2.0.0", - "framing_sv2 5.0.2", - "noise_sv2 1.4.0", - "rand 0.8.5", - "tracing", -] - -[[package]] -name = "colorchoice" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" - -[[package]] -name = "common_messages_sv2" -version = "6.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e6ec6ab527aeebf8ead273d6ab712ff181c050ee5e1082f3f6a2c65c0a10bf6" -dependencies = [ - "binary_sv2 4.0.0", -] - -[[package]] -name = "common_messages_sv2" -version = "6.0.2" -dependencies = [ - "binary_sv2 5.0.0", -] - -[[package]] -name = "concurrent-queue" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "config" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68578f196d2a33ff61b27fae256c3164f65e36382648e30666dde05b8cc9dfdf" -dependencies = [ - "async-trait", - "convert_case", - "json5", - "nom", - "pathdiff", - "ron", - "rust-ini", - "serde", - "serde_json", - "toml", - "yaml-rust2", -] - -[[package]] -name = "const-random" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" -dependencies = [ - "const-random-macro", -] - -[[package]] -name = "const-random-macro" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" -dependencies = [ - "getrandom 0.2.16", - "once_cell", - "tiny-keccak", -] - -[[package]] -name = "const_format" -version = "0.2.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7faa7469a93a566e9ccc1c73fe783b4a65c274c5ace346038dca9c39fe0030ad" -dependencies = [ - "const_format_proc_macros", -] - -[[package]] -name = "const_format_proc_macros" -version = "0.2.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" -dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", -] - -[[package]] -name = "convert_case" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "core-foundation" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" - -[[package]] -name = "corepc-client" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c6b8eaeccd3df6c1f264cd6ff8cf5df7d056029473ce9551b2a6257832d38e0" -dependencies = [ - "bitcoin", - "corepc-types", - "jsonrpc", - "log", - "serde", - "serde_json", -] - -[[package]] -name = "corepc-node" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2bcc6e09458f052024ec36e4728bd5619e248643da6175876eb3b10ca6d4d86" -dependencies = [ - "anyhow", - "corepc-client", - "log", - "serde_json", - "tempfile", - "which", -] - -[[package]] -name = "corepc-types" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4bc664fdaeeae1eaf459edb88650af3009b758010fc69b423c5b38142446cfb" -dependencies = [ - "bitcoin", - "serde", - "serde_json", -] - -[[package]] -name = "cpufeatures" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" -dependencies = [ - "libc", -] - -[[package]] -name = "crc32fast" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" - -[[package]] -name = "crunchy" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "rand_core 0.6.4", - "typenum", -] - -[[package]] -name = "ctr" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" -dependencies = [ - "cipher", -] - -[[package]] -name = "derive_codec_sv2" -version = "1.1.1" - -[[package]] -name = "derive_codec_sv2" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "924f288d967a5cd37956b195269ee7f710999169895cf670a736e1b2267d6137" -dependencies = [ - "binary_codec_sv2 1.2.0", -] - -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer 0.10.4", - "crypto-common", -] - -[[package]] -name = "dlv-list" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "442039f5147480ba31067cb00ada1adae6892028e40e45fc5de7b7df6dcc1b5f" -dependencies = [ - "const-random", -] - -[[package]] -name = "encoding_rs" -version = "0.8.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "equivalent" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" - -[[package]] -name = "errno" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" -dependencies = [ - "libc", - "windows-sys 0.61.2", -] - -[[package]] -name = "error_handling" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfdf3be9049288001eb8a37f21b0f4e922598a6fa0098630fd3a6a14459ef217" - -[[package]] -name = "event-listener" -version = "2.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" - -[[package]] -name = "fastrand" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" - -[[package]] -name = "filetime" -version = "0.2.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc0505cd1b6fa6580283f6bdf70a73fcf4aba1184038c90902b92b3dd0df63ed" -dependencies = [ - "cfg-if", - "libc", - "libredox", - "windows-sys 0.60.2", -] - -[[package]] -name = "find-msvc-tools" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127" - -[[package]] -name = "fixed-hash" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" -dependencies = [ - "byteorder", - "rand 0.8.5", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "flate2" -version = "1.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc5a4e564e38c699f2880d3fda590bedc2e69f3f84cd48b457bd892ce61d0aa9" -dependencies = [ - "crc32fast", - "miniz_oxide", -] - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "framing_sv2" -version = "5.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc6543955264144174b93780e0e76623ee4293037c9e180cfde3e2c155b59fa9" -dependencies = [ - "binary_sv2 4.0.0", - "buffer_sv2 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "noise_sv2 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "framing_sv2" -version = "5.0.2" -dependencies = [ - "binary_sv2 5.0.0", - "buffer_sv2 2.0.0", - "noise_sv2 1.4.0", -] - -[[package]] -name = "funty" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" - -[[package]] -name = "futures" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" - -[[package]] -name = "futures-executor" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-io" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" - -[[package]] -name = "futures-macro" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "futures-sink" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" - -[[package]] -name = "futures-task" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" - -[[package]] -name = "futures-util" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", -] - -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.11.1+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" -dependencies = [ - "cfg-if", - "libc", - "r-efi", - "wasi 0.14.7+wasi-0.2.4", -] - -[[package]] -name = "ghash" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" -dependencies = [ - "opaque-debug", - "polyval", -] - -[[package]] -name = "gimli" -version = "0.32.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" - -[[package]] -name = "h2" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" -dependencies = [ - "atomic-waker", - "bytes", - "fnv", - "futures-core", - "futures-sink", - "http", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "handlers_sv2" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "472824f98b68a963dbf4c77625a8b5525c322abe49afa9403dfb816e35dd4d93" -dependencies = [ - "binary_sv2 4.0.0", - "common_messages_sv2 6.0.1", - "job_declaration_sv2 5.0.1", - "mining_sv2 5.0.1", - "parsers_sv2 0.1.1", - "template_distribution_sv2 4.0.1", - "trait-variant", -] - -[[package]] -name = "handlers_sv2" -version = "0.2.0" -dependencies = [ - "binary_sv2 5.0.0", - "common_messages_sv2 6.0.2", - "job_declaration_sv2 5.0.2", - "mining_sv2 6.0.0", - "parsers_sv2 0.1.2", - "template_distribution_sv2 4.0.2", - "trait-variant", -] - -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" -dependencies = [ - "ahash 0.7.8", - "serde", -] - -[[package]] -name = "hashbrown" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" -dependencies = [ - "ahash 0.8.12", - "allocator-api2", -] - -[[package]] -name = "hashbrown" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" - -[[package]] -name = "hashlink" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" -dependencies = [ - "hashbrown 0.14.5", -] - -[[package]] -name = "heck" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "hex-conservative" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20" - -[[package]] -name = "hex-conservative" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd" -dependencies = [ - "arrayvec", -] - -[[package]] -name = "hex-conservative" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afe881d0527571892c4034822e59bb10c6c991cce6abe8199b6f5cf10766f55" -dependencies = [ - "arrayvec", -] - -[[package]] -name = "hex_lit" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3011d1213f159867b13cfd6ac92d2cd5f1345762c63be3554e84092d85a50bbd" - -[[package]] -name = "http" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" -dependencies = [ - "bytes", - "http", -] - -[[package]] -name = "http-body-util" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" -dependencies = [ - "bytes", - "futures-core", - "http", - "http-body", - "pin-project-lite", -] - -[[package]] -name = "httparse" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" - -[[package]] -name = "httpdate" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" - -[[package]] -name = "hyper" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" -dependencies = [ - "atomic-waker", - "bytes", - "futures-channel", - "futures-core", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "pin-utils", - "smallvec", - "tokio", - "want", -] - -[[package]] -name = "hyper-util" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" -dependencies = [ - "base64 0.22.1", - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "http", - "http-body", - "hyper", - "ipnet", - "libc", - "percent-encoding", - "pin-project-lite", - "socket2", - "system-configuration", - "tokio", - "tower-service", - "tracing", - "windows-registry", -] - -[[package]] -name = "impl-codec" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d40b9d5e17727407e55028eafc22b2dc68781786e6d7eb8a21103f5058e3a14" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "indexmap" -version = "2.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" -dependencies = [ - "equivalent", - "hashbrown 0.16.0", -] - -[[package]] -name = "inout" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" -dependencies = [ - "generic-array", -] - -[[package]] -name = "integration_tests_sv2" -version = "0.1.0" -dependencies = [ - "async-channel", - "corepc-node", - "flate2", - "jd_client_sv2", - "jd_server", - "mining_device", - "minreq", - "once_cell", - "pool_sv2", - "rand 0.9.2", - "stratum-apps", - "tar", - "tokio", - "tokio-util", - "tracing", - "tracing-subscriber", - "translator_sv2", -] - -[[package]] -name = "io-uring" -version = "0.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b" -dependencies = [ - "bitflags", - "cfg-if", - "libc", -] - -[[package]] -name = "ipnet" -version = "2.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" - -[[package]] -name = "is_terminal_polyfill" -version = "1.70.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" - -[[package]] -name = "itoa" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" - -[[package]] -name = "jd_client_sv2" -version = "0.1.0" -dependencies = [ - "async-channel", - "clap", - "config", - "serde", - "stratum-apps", - "tokio", - "tracing", -] - -[[package]] -name = "jd_server" -version = "0.1.3" -dependencies = [ - "async-channel", - "binary_sv2 4.0.0", - "bitcoin", - "clap", - "codec_sv2 3.0.1", - "common_messages_sv2 6.0.1", - "config", - "error_handling", - "framing_sv2 5.0.1", - "hashbrown 0.11.2", - "hex", - "job_declaration_sv2 5.0.1", - "mining_sv2 5.0.1", - "network_helpers_sv2", - "nohash-hasher", - "noise_sv2 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parsers_sv2 0.1.1", - "rand 0.8.5", - "roles_logic_sv2 5.0.0", - "rpc_sv2", - "serde", - "serde_json", - "stratum-apps", - "tokio", - "tracing", -] - -[[package]] -name = "job_declaration_sv2" -version = "5.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d4edc436d29e8dcac178539222de2b3681d629f9884191bd7db8831e49dd24" -dependencies = [ - "binary_sv2 4.0.0", -] - -[[package]] -name = "job_declaration_sv2" -version = "5.0.2" -dependencies = [ - "binary_sv2 5.0.0", -] - -[[package]] -name = "json5" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" -dependencies = [ - "pest", - "pest_derive", - "serde", -] - -[[package]] -name = "jsonrpc" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3662a38d341d77efecb73caf01420cfa5aa63c0253fd7bc05289ef9f6616e1bf" -dependencies = [ - "base64 0.13.1", - "minreq", - "serde", - "serde_json", -] - -[[package]] -name = "lazy_static" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" - -[[package]] -name = "libc" -version = "0.2.177" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" - -[[package]] -name = "libredox" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" -dependencies = [ - "bitflags", - "libc", - "redox_syscall", -] - -[[package]] -name = "linux-raw-sys" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" - -[[package]] -name = "lock_api" -version = "0.4.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" -dependencies = [ - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" - -[[package]] -name = "matchers" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" -dependencies = [ - "regex-automata", -] - -[[package]] -name = "memchr" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" - -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - -[[package]] -name = "mining_device" -version = "0.1.3" -dependencies = [ - "async-channel", - "async-recursion", - "binary_sv2 4.0.0", - "clap", - "codec_sv2 3.0.1", - "common_messages_sv2 6.0.1", - "framing_sv2 5.0.1", - "futures", - "mining_sv2 5.0.1", - "network_helpers_sv2", - "noise_sv2 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "num-format", - "parsers_sv2 0.1.1", - "primitive-types", - "rand 0.8.5", - "roles_logic_sv2 5.0.0", - "sha2 0.10.9", - "stratum-apps", - "tokio", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "mining_sv2" -version = "5.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1eb3c055232f64d36e3eee4296adcaa584fb3185a57e0de11ad5807766c45edc" -dependencies = [ - "binary_sv2 4.0.0", -] - -[[package]] -name = "mining_sv2" -version = "6.0.0" -dependencies = [ - "binary_sv2 5.0.0", -] - -[[package]] -name = "miniscript" -version = "12.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "487906208f38448e186e3deb02f2b8ef046a9078b0de00bdb28bf4fb9b76951c" -dependencies = [ - "bech32", - "bitcoin", -] - -[[package]] -name = "miniz_oxide" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" -dependencies = [ - "adler2", - "simd-adler32", -] - -[[package]] -name = "minreq" -version = "2.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0c420feb01b9fb5061f8c8f452534361dd783756dcf38ec45191ce55e7a161" -dependencies = [ - "log", - "once_cell", - "rustls", - "rustls-webpki", - "serde", - "serde_json", - "webpki-roots", -] - -[[package]] -name = "mio" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" -dependencies = [ - "libc", - "wasi 0.11.1+wasi-snapshot-preview1", - "windows-sys 0.59.0", -] - -[[package]] -name = "network_helpers_sv2" -version = "4.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d720d6a31532fb4f08e59b71669084d06462f42e9d2c2aede7368d221d36db" -dependencies = [ - "async-channel", - "codec_sv2 3.0.1", - "futures", - "tokio", - "tracing", -] - -[[package]] -name = "nohash-hasher" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" - -[[package]] -name = "noise_sv2" -version = "1.4.0" -dependencies = [ - "aes-gcm", - "chacha20poly1305", - "generic-array", - "rand 0.8.5", - "rand_chacha 0.3.1", - "secp256k1 0.28.2", -] - -[[package]] -name = "noise_sv2" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30964f9fbc4572bb5a1b0046176331d20e9ce6de0ca18afc3cfd42c6e91a94aa" -dependencies = [ - "aes-gcm", - "chacha20poly1305", - "rand 0.8.5", - "rand_chacha 0.3.1", - "secp256k1 0.28.2", -] - -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - -[[package]] -name = "nu-ansi-term" -version = "0.50.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" -dependencies = [ - "windows-sys 0.61.2", -] - -[[package]] -name = "num-format" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3" -dependencies = [ - "arrayvec", - "itoa", -] - -[[package]] -name = "object" -version = "0.37.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" -dependencies = [ - "memchr", -] - -[[package]] -name = "once_cell" -version = "1.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" - -[[package]] -name = "once_cell_polyfill" -version = "1.70.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" - -[[package]] -name = "opaque-debug" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" - -[[package]] -name = "ordered-multimap" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49203cdcae0030493bad186b28da2fa25645fa276a51b6fec8010d281e02ef79" -dependencies = [ - "dlv-list", - "hashbrown 0.14.5", -] - -[[package]] -name = "parity-scale-codec" -version = "3.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799781ae679d79a948e13d4824a40970bfa500058d245760dd857301059810fa" -dependencies = [ - "arrayvec", - "bitvec", - "byte-slice-cast", - "const_format", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "rustversion", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "3.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34b4653168b563151153c9e4c08ebed57fb8262bebfa79711552fa983c623e7a" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "parking_lot" -version = "0.12.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-link 0.2.1", -] - -[[package]] -name = "parsers_sv2" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109e80bc77241a729f61cad15f3f246f3de12e1b741b31e419fc7e02f20c2ccb" -dependencies = [ - "binary_sv2 4.0.0", - "common_messages_sv2 6.0.1", - "framing_sv2 5.0.1", - "job_declaration_sv2 5.0.1", - "mining_sv2 5.0.1", - "template_distribution_sv2 4.0.1", -] - -[[package]] -name = "parsers_sv2" -version = "0.1.2" -dependencies = [ - "binary_sv2 5.0.0", - "common_messages_sv2 6.0.2", - "framing_sv2 5.0.2", - "job_declaration_sv2 5.0.2", - "mining_sv2 6.0.0", - "template_distribution_sv2 4.0.2", -] - -[[package]] -name = "pathdiff" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3" - -[[package]] -name = "percent-encoding" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" - -[[package]] -name = "pest" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "198db74531d58c70a361c42201efde7e2591e976d518caf7662a47dc5720e7b6" -dependencies = [ - "memchr", - "thiserror", - "ucd-trie", -] - -[[package]] -name = "pest_derive" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d725d9cfd79e87dccc9341a2ef39d1b6f6353d68c4b33c177febbe1a402c97c5" -dependencies = [ - "pest", - "pest_generator", -] - -[[package]] -name = "pest_generator" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db7d01726be8ab66ab32f9df467ae8b1148906685bbe75c82d1e65d7f5b3f841" -dependencies = [ - "pest", - "pest_meta", - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "pest_meta" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9f832470494906d1fca5329f8ab5791cc60beb230c74815dff541cbd2b5ca0" -dependencies = [ - "once_cell", - "pest", - "sha2 0.10.9", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "poly1305" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" -dependencies = [ - "cpufeatures", - "opaque-debug", - "universal-hash", -] - -[[package]] -name = "polyval" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" -dependencies = [ - "cfg-if", - "cpufeatures", - "opaque-debug", - "universal-hash", -] - -[[package]] -name = "pool_sv2" -version = "0.2.0" -dependencies = [ - "async-channel", - "clap", - "config", - "rand 0.8.5", - "secp256k1 0.28.2", - "serde", - "stratum-apps", - "tokio", - "tracing", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" -dependencies = [ - "zerocopy", -] - -[[package]] -name = "primitive-types" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d15600a7d856470b7d278b3fe0e311fe28c2526348549f8ef2ff7db3299c87f5" -dependencies = [ - "fixed-hash", - "impl-codec", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" -dependencies = [ - "toml_edit 0.23.7", -] - -[[package]] -name = "proc-macro2" -version = "1.0.101" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "r-efi" -version = "5.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" - -[[package]] -name = "radium" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" -dependencies = [ - "rand_chacha 0.9.0", - "rand_core 0.9.3", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" -dependencies = [ - "ppv-lite86", - "rand_core 0.9.3", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom 0.2.16", -] - -[[package]] -name = "rand_core" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" -dependencies = [ - "getrandom 0.3.3", -] - -[[package]] -name = "redox_syscall" -version = "0.5.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" -dependencies = [ - "bitflags", -] - -[[package]] -name = "regex-automata" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" - -[[package]] -name = "ring" -version = "0.17.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" -dependencies = [ - "cc", - "cfg-if", - "getrandom 0.2.16", - "libc", - "untrusted", - "windows-sys 0.52.0", -] - -[[package]] -name = "roles_logic_sv2" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7241840512841396df29ede2094619ad06cbbd1a0dc342553c7a5901506d096b" -dependencies = [ - "bitcoin", - "chacha20poly1305", - "channels_sv2 1.0.2", - "codec_sv2 3.0.1", - "common_messages_sv2 6.0.1", - "handlers_sv2 0.1.0", - "hex-conservative 0.3.0", - "job_declaration_sv2 5.0.1", - "mining_sv2 5.0.1", - "nohash-hasher", - "parsers_sv2 0.1.1", - "primitive-types", - "template_distribution_sv2 4.0.1", - "tracing", -] - -[[package]] -name = "roles_logic_sv2" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88850ead16993f86cb4616d154ddd37b9c0d739ea23711b1cc51f40484e0e39a" -dependencies = [ - "binary_sv2 4.0.0", - "bitcoin", - "chacha20poly1305", - "channels_sv2 1.0.2", - "codec_sv2 3.0.1", - "common_messages_sv2 6.0.1", - "handlers_sv2 0.1.0", - "hex-conservative 0.3.0", - "job_declaration_sv2 5.0.1", - "mining_sv2 5.0.1", - "nohash-hasher", - "parsers_sv2 0.1.1", - "primitive-types", - "template_distribution_sv2 4.0.1", - "tracing", -] - -[[package]] -name = "ron" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" -dependencies = [ - "base64 0.21.7", - "bitflags", - "serde", - "serde_derive", -] - -[[package]] -name = "rpc_sv2" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af9b3a6c43d03c5cc6ca9f40797cbf17a9a30b8db236be6c87f5243bd404d6af" -dependencies = [ - "base64 0.21.7", - "hex", - "http-body-util", - "hyper", - "hyper-util", - "serde", - "serde_json", - "stratum-common", -] - -[[package]] -name = "rust-ini" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e0698206bcb8882bf2a9ecb4c1e7785db57ff052297085a6efd4fe42302068a" -dependencies = [ - "cfg-if", - "ordered-multimap", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "rustix" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" -dependencies = [ - "bitflags", - "errno", - "libc", - "linux-raw-sys", - "windows-sys 0.61.2", -] - -[[package]] -name = "rustls" -version = "0.21.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" -dependencies = [ - "log", - "ring", - "rustls-webpki", - "sct", -] - -[[package]] -name = "rustls-webpki" -version = "0.101.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "rustversion" -version = "1.0.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" - -[[package]] -name = "ryu" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" - -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "secp256k1" -version = "0.28.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" -dependencies = [ - "bitcoin_hashes 0.13.0", - "rand 0.8.5", - "secp256k1-sys 0.9.2", -] - -[[package]] -name = "secp256k1" -version = "0.29.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" -dependencies = [ - "bitcoin_hashes 0.14.0", - "secp256k1-sys 0.10.1", - "serde", -] - -[[package]] -name = "secp256k1-sys" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" -dependencies = [ - "cc", -] - -[[package]] -name = "secp256k1-sys" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" -dependencies = [ - "cc", -] - -[[package]] -name = "serde" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" -dependencies = [ - "serde_core", - "serde_derive", -] - -[[package]] -name = "serde_core" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "serde_json" -version = "1.0.145" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" -dependencies = [ - "itoa", - "memchr", - "ryu", - "serde", - "serde_core", -] - -[[package]] -name = "serde_spanned" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" -dependencies = [ - "serde", -] - -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "sha2" -version = "0.10.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest 0.10.7", - "sha2-asm", -] - -[[package]] -name = "sha2-asm" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b845214d6175804686b2bd482bcffe96651bb2d1200742b712003504a2dac1ab" -dependencies = [ - "cc", -] - -[[package]] -name = "sharded-slab" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "shlex" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" - -[[package]] -name = "signal-hook-registry" -version = "1.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" -dependencies = [ - "libc", -] - -[[package]] -name = "simd-adler32" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" - -[[package]] -name = "slab" -version = "0.4.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" - -[[package]] -name = "smallvec" -version = "1.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" - -[[package]] -name = "socket2" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" -dependencies = [ - "libc", - "windows-sys 0.60.2", -] - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "stratum-apps" -version = "0.1.0" -dependencies = [ - "async-channel", - "bs58", - "clap", - "config", - "futures", - "generic-array", - "miniscript", - "rand 0.8.5", - "rustversion", - "secp256k1 0.28.2", - "serde", - "serde_json", - "stratum-core", - "tokio", - "tokio-util", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "stratum-common" -version = "4.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77b7dc7a762d19aba6f355599a61440b29603ceece5a158914888691b9867ebe" -dependencies = [ - "roles_logic_sv2 4.0.0", -] - -[[package]] -name = "stratum-core" -version = "0.1.0" -dependencies = [ - "binary_sv2 5.0.0", - "bitcoin", - "buffer_sv2 2.0.0", - "channels_sv2 2.0.0", - "codec_sv2 4.0.0", - "common_messages_sv2 6.0.2", - "framing_sv2 5.0.2", - "handlers_sv2 0.2.0", - "job_declaration_sv2 5.0.2", - "mining_sv2 6.0.0", - "noise_sv2 1.4.0", - "parsers_sv2 0.1.2", - "stratum_translation", - "sv1_api", - "template_distribution_sv2 4.0.2", -] - -[[package]] -name = "stratum_translation" -version = "0.1.1" -dependencies = [ - "binary_sv2 5.0.0", - "bitcoin", - "channels_sv2 2.0.0", - "mining_sv2 6.0.0", - "sv1_api", - "tracing", -] - -[[package]] -name = "strsim" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" - -[[package]] -name = "subtle" -version = "2.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" - -[[package]] -name = "sv1_api" -version = "2.1.2" -dependencies = [ - "binary_sv2 5.0.0", - "bitcoin_hashes 0.3.2", - "byteorder", - "hex", - "serde", - "serde_json", - "tracing", -] - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.106" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "system-configuration" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" -dependencies = [ - "bitflags", - "core-foundation", - "system-configuration-sys", -] - -[[package]] -name = "system-configuration-sys" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - -[[package]] -name = "tar" -version = "0.4.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a" -dependencies = [ - "filetime", - "libc", -] - -[[package]] -name = "tempfile" -version = "3.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" -dependencies = [ - "fastrand", - "once_cell", - "rustix", - "windows-sys 0.61.2", -] - -[[package]] -name = "template_distribution_sv2" -version = "4.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6298fc9f339b1c3b654ef3590857d5d3eff6d709891f003b7f7a701b8a64a3a4" -dependencies = [ - "binary_sv2 4.0.0", -] - -[[package]] -name = "template_distribution_sv2" -version = "4.0.2" -dependencies = [ - "binary_sv2 5.0.0", -] - -[[package]] -name = "thiserror" -version = "2.0.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "2.0.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "thread_local" -version = "1.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - -[[package]] -name = "tokio" -version = "1.47.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" -dependencies = [ - "backtrace", - "bytes", - "io-uring", - "libc", - "mio", - "parking_lot", - "pin-project-lite", - "signal-hook-registry", - "slab", - "socket2", - "tokio-macros", - "tracing", - "windows-sys 0.59.0", -] - -[[package]] -name = "tokio-macros" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "tokio-util" -version = "0.7.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "toml" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime 0.6.11", - "toml_edit 0.22.27", -] - -[[package]] -name = "toml_datetime" -version = "0.6.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" -dependencies = [ - "serde", -] - -[[package]] -name = "toml_datetime" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" -dependencies = [ - "serde_core", -] - -[[package]] -name = "toml_edit" -version = "0.22.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" -dependencies = [ - "indexmap", - "serde", - "serde_spanned", - "toml_datetime 0.6.11", - "toml_write", - "winnow", -] - -[[package]] -name = "toml_edit" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" -dependencies = [ - "indexmap", - "toml_datetime 0.7.3", - "toml_parser", - "winnow", -] - -[[package]] -name = "toml_parser" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" -dependencies = [ - "winnow", -] - -[[package]] -name = "toml_write" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" - -[[package]] -name = "tower-service" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" - -[[package]] -name = "tracing" -version = "0.1.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" -dependencies = [ - "pin-project-lite", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "tracing-core" -version = "0.1.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" -dependencies = [ - "once_cell", - "valuable", -] - -[[package]] -name = "tracing-log" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" -dependencies = [ - "log", - "once_cell", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.3.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" -dependencies = [ - "matchers", - "nu-ansi-term", - "once_cell", - "regex-automata", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", -] - -[[package]] -name = "trait-variant" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "translator_sv2" -version = "2.0.0" -dependencies = [ - "async-channel", - "clap", - "config", - "serde", - "serde_json", - "stratum-apps", - "tokio", - "tracing", -] - -[[package]] -name = "try-lock" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" - -[[package]] -name = "typenum" -version = "1.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" - -[[package]] -name = "ucd-trie" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" - -[[package]] -name = "uint" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - -[[package]] -name = "unicode-ident" -version = "1.0.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" - -[[package]] -name = "unicode-segmentation" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" - -[[package]] -name = "unicode-xid" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" - -[[package]] -name = "universal-hash" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" -dependencies = [ - "crypto-common", - "subtle", -] - -[[package]] -name = "untrusted" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" - -[[package]] -name = "utf8parse" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" - -[[package]] -name = "valuable" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" - -[[package]] -name = "version_check" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" - -[[package]] -name = "want" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" -dependencies = [ - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.11.1+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" - -[[package]] -name = "wasi" -version = "0.14.7+wasi-0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "883478de20367e224c0090af9cf5f9fa85bed63a95c1abf3afc5c083ebc06e8c" -dependencies = [ - "wasip2", -] - -[[package]] -name = "wasip2" -version = "1.0.1+wasi-0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" -dependencies = [ - "wit-bindgen", -] - -[[package]] -name = "webpki-roots" -version = "0.25.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" - -[[package]] -name = "which" -version = "3.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" -dependencies = [ - "libc", -] - -[[package]] -name = "windows-link" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" - -[[package]] -name = "windows-link" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" - -[[package]] -name = "windows-registry" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" -dependencies = [ - "windows-link 0.1.3", - "windows-result", - "windows-strings", -] - -[[package]] -name = "windows-result" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" -dependencies = [ - "windows-link 0.1.3", -] - -[[package]] -name = "windows-strings" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" -dependencies = [ - "windows-link 0.1.3", -] - -[[package]] -name = "windows-sys" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.60.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" -dependencies = [ - "windows-targets 0.53.5", -] - -[[package]] -name = "windows-sys" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" -dependencies = [ - "windows-link 0.2.1", -] - -[[package]] -name = "windows-targets" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" -dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm 0.52.6", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", -] - -[[package]] -name = "windows-targets" -version = "0.53.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" -dependencies = [ - "windows-link 0.2.1", - "windows_aarch64_gnullvm 0.53.1", - "windows_aarch64_msvc 0.53.1", - "windows_i686_gnu 0.53.1", - "windows_i686_gnullvm 0.53.1", - "windows_i686_msvc 0.53.1", - "windows_x86_64_gnu 0.53.1", - "windows_x86_64_gnullvm 0.53.1", - "windows_x86_64_msvc 0.53.1", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - -[[package]] -name = "windows_i686_gnu" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" - -[[package]] -name = "windows_i686_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" - -[[package]] -name = "winnow" -version = "0.7.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" -dependencies = [ - "memchr", -] - -[[package]] -name = "wit-bindgen" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" - -[[package]] -name = "wyz" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" -dependencies = [ - "tap", -] - -[[package]] -name = "yaml-rust2" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8902160c4e6f2fb145dbe9d6760a75e3c9522d8bf796ed7047c85919ac7115f8" -dependencies = [ - "arraydeque", - "encoding_rs", - "hashlink", -] - -[[package]] -name = "zerocopy" -version = "0.8.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" -dependencies = [ - "zerocopy-derive", -] - -[[package]] -name = "zerocopy-derive" -version = "0.8.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "zeroize" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" diff --git a/test/integration-tests/Cargo.toml b/test/integration-tests/Cargo.toml deleted file mode 100644 index 884ec26136..0000000000 --- a/test/integration-tests/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -[package] -name = "integration_tests_sv2" -version = "0.1.0" -authors = ["The Stratum V2 Developers"] -edition = "2021" -documentation = "https://github.com/stratum-mining/stratum" -readme = "README.md" -homepage = "https://stratumprotocol.org" -repository = "https://github.com/stratum-mining/stratum" -license = "MIT OR Apache-2.0" -keywords = ["stratum", "mining", "bitcoin", "protocol"] - -[dependencies] -stratum-apps = { path = "../../roles/stratum-apps", features = ["network", "config"] } -jd_client_sv2 = { path = "../../roles/jd-client" } -jd_server = { path = "../../roles/jd-server" } -mining_device = { path = "../../roles/test-utils/mining-device" } -pool_sv2 = { path = "../../roles/pool" } -translator_sv2 = { path = "../../roles/translator" } -async-channel = { version = "1.5.1", default-features = false } -corepc-node = { version = "0.7.0", default-features = false, features = ["28_0"] } -flate2 = { version = "1.1.0", default-features = false, features = ["rust_backend"] } -minreq = { version = "2.12.0", default-features = false, features = ["https"] } -once_cell = { version = "1.19.0", default-features = false } -rand = { version = "0.9.0", default-features = false, features = ["thread_rng"] } -tar = { version = "0.4.41", default-features = false } -tokio = { version="1.44.1", default-features = false, features = ["tracing"] } -tokio-util = { version = "0.7", default-features = false } -tracing = { version = "0.1.41", default-features = false } -tracing-subscriber = { version = "0.3.19", default-features = false } - -[lib] -path = "lib/mod.rs" diff --git a/test/integration-tests/README.md b/test/integration-tests/README.md deleted file mode 100644 index 6078038501..0000000000 --- a/test/integration-tests/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# SV2 Integration Tests - -This is a test crate and it can be used in order to test the behavior of different roles when -working together. Each role should have a `start_[role_name]` function under `common` folder that -can be called in order to run the role. In order to assert the behavior of the role or the messages -it exchanges with other roles, you can use the `Sniffer` helper in order to listen to the messages -exchanged between the roles, and assert those messages using the `assert_message_[message_type]` -function. For examples on how to use the `Sniffer` helper, you can check the -`sniffer_integration.rs` module or other tests in the `tests` folder. - -All of our tests run in regtest network. We download the Template Provider node from -https://github.com/Sjors/bitcoin/releases/download. This is a pre-built binary that we use to run an -Stratum V2 compatible bitcoin node. Note that this is the only external dependency(and Role) that we -have in our tests. - -## Running Instructions - -In order to run the integration tests, you can use the following command: - -```bash -$ git clone git@github.com:stratum-mining/stratum.git -$ cargo test --manifest-path=test/integration-tests/Cargo.toml --verbose --test '*' -- --nocapture -``` - -Note: during the execution of the tests, a new directory called `template-provider` is created. -This directory holds the executable for Template Provider node, as well as the different data -directories created for each execution. - -## License -MIT OR Apache-2.0 diff --git a/test/integration-tests/high_diff_chain/blocks/.lock b/test/integration-tests/high_diff_chain/blocks/.lock deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/integration-tests/high_diff_chain/blocks/blk00000.dat b/test/integration-tests/high_diff_chain/blocks/blk00000.dat deleted file mode 100644 index 63f072fe0e..0000000000 Binary files a/test/integration-tests/high_diff_chain/blocks/blk00000.dat and /dev/null differ diff --git a/test/integration-tests/high_diff_chain/blocks/index/000005.ldb b/test/integration-tests/high_diff_chain/blocks/index/000005.ldb deleted file mode 100644 index 9f1ad6e971..0000000000 Binary files a/test/integration-tests/high_diff_chain/blocks/index/000005.ldb and /dev/null differ diff --git a/test/integration-tests/high_diff_chain/blocks/index/000255.ldb b/test/integration-tests/high_diff_chain/blocks/index/000255.ldb deleted file mode 100644 index c2bd3c61fb..0000000000 Binary files a/test/integration-tests/high_diff_chain/blocks/index/000255.ldb and /dev/null differ diff --git a/test/integration-tests/high_diff_chain/blocks/index/000257.ldb b/test/integration-tests/high_diff_chain/blocks/index/000257.ldb deleted file mode 100644 index 40bea491ac..0000000000 Binary files a/test/integration-tests/high_diff_chain/blocks/index/000257.ldb and /dev/null differ diff --git a/test/integration-tests/high_diff_chain/blocks/index/000260.ldb b/test/integration-tests/high_diff_chain/blocks/index/000260.ldb deleted file mode 100644 index 691ab9ea0e..0000000000 Binary files a/test/integration-tests/high_diff_chain/blocks/index/000260.ldb and /dev/null differ diff --git a/test/integration-tests/high_diff_chain/blocks/index/000261.log b/test/integration-tests/high_diff_chain/blocks/index/000261.log deleted file mode 100644 index b8a88c3cdf..0000000000 Binary files a/test/integration-tests/high_diff_chain/blocks/index/000261.log and /dev/null differ diff --git a/test/integration-tests/high_diff_chain/blocks/index/CURRENT b/test/integration-tests/high_diff_chain/blocks/index/CURRENT deleted file mode 100644 index 4ee1a673cd..0000000000 --- a/test/integration-tests/high_diff_chain/blocks/index/CURRENT +++ /dev/null @@ -1 +0,0 @@ -MANIFEST-000259 diff --git a/test/integration-tests/high_diff_chain/blocks/index/LOCK b/test/integration-tests/high_diff_chain/blocks/index/LOCK deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/integration-tests/high_diff_chain/blocks/index/MANIFEST-000259 b/test/integration-tests/high_diff_chain/blocks/index/MANIFEST-000259 deleted file mode 100644 index 512f6c489c..0000000000 Binary files a/test/integration-tests/high_diff_chain/blocks/index/MANIFEST-000259 and /dev/null differ diff --git a/test/integration-tests/high_diff_chain/blocks/rev00000.dat b/test/integration-tests/high_diff_chain/blocks/rev00000.dat deleted file mode 100644 index fe1a71b6c4..0000000000 Binary files a/test/integration-tests/high_diff_chain/blocks/rev00000.dat and /dev/null differ diff --git a/test/integration-tests/high_diff_chain/blocks/xor.dat b/test/integration-tests/high_diff_chain/blocks/xor.dat deleted file mode 100644 index 1aff61dd9f..0000000000 --- a/test/integration-tests/high_diff_chain/blocks/xor.dat +++ /dev/null @@ -1 +0,0 @@ -ˆTl3Υ" \ No newline at end of file diff --git a/test/integration-tests/high_diff_chain/chainstate/000289.log b/test/integration-tests/high_diff_chain/chainstate/000289.log deleted file mode 100644 index 4e33a9fd12..0000000000 Binary files a/test/integration-tests/high_diff_chain/chainstate/000289.log and /dev/null differ diff --git a/test/integration-tests/high_diff_chain/chainstate/000290.ldb b/test/integration-tests/high_diff_chain/chainstate/000290.ldb deleted file mode 100644 index c8e87db2e1..0000000000 Binary files a/test/integration-tests/high_diff_chain/chainstate/000290.ldb and /dev/null differ diff --git a/test/integration-tests/high_diff_chain/chainstate/CURRENT b/test/integration-tests/high_diff_chain/chainstate/CURRENT deleted file mode 100644 index 3b48cae827..0000000000 --- a/test/integration-tests/high_diff_chain/chainstate/CURRENT +++ /dev/null @@ -1 +0,0 @@ -MANIFEST-000287 diff --git a/test/integration-tests/high_diff_chain/chainstate/LOCK b/test/integration-tests/high_diff_chain/chainstate/LOCK deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/integration-tests/high_diff_chain/chainstate/MANIFEST-000287 b/test/integration-tests/high_diff_chain/chainstate/MANIFEST-000287 deleted file mode 100644 index be1d2c41ca..0000000000 Binary files a/test/integration-tests/high_diff_chain/chainstate/MANIFEST-000287 and /dev/null differ diff --git a/test/integration-tests/lib/interceptor.rs b/test/integration-tests/lib/interceptor.rs deleted file mode 100644 index 6ac0bdd955..0000000000 --- a/test/integration-tests/lib/interceptor.rs +++ /dev/null @@ -1,116 +0,0 @@ -use std::fmt; - -use crate::types::MsgType; -use stratum_apps::stratum_core::parsers_sv2::AnyMessage; - -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum MessageDirection { - ToDownstream, - ToUpstream, -} - -impl fmt::Display for MessageDirection { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - MessageDirection::ToDownstream => write!(f, "downstream"), - MessageDirection::ToUpstream => write!(f, "upstream"), - } - } -} - -/// Represents an action that [`Sniffer`] can take on intercepted messages. -#[derive(Debug, Clone)] -pub enum InterceptAction { - /// Prevents a message from being forwarded and stored into the message aggregator. - IgnoreMessage(IgnoreMessage), - /// Intercepts and modifies a message before forwarding it. - ReplaceMessage(Box), -} - -impl InterceptAction { - /// Returns the action if it is `IgnoreMessage` or `ReplaceMessage` - /// with the specified message type. - pub fn find_matching_action( - &self, - msg_type: MsgType, - direction: MessageDirection, - ) -> Option<&Self> { - match self { - InterceptAction::IgnoreMessage(bm) - if bm.direction == direction && bm.expected_message_type == msg_type => - { - Some(self) - } - - InterceptAction::ReplaceMessage(im) - if im.direction == direction && im.expected_message_type == msg_type => - { - Some(self) - } - - _ => None, - } - } -} -/// Defines an action that prevents a message from being forwarded. -/// -/// When a message matching the specified type and direction is intercepted, -/// it will not be added to the message aggregator for inspection and will not be -/// forwarded to the destination. All other messages will continue to be forwarded normally. -#[derive(Debug, Clone)] -pub struct IgnoreMessage { - direction: MessageDirection, - expected_message_type: MsgType, -} - -impl IgnoreMessage { - /// Creates a new [`IgnoreMessage`] action. - /// - /// - `direction`: The direction of the message to be ignored. - /// - `expected_message_type`: The type of message to be ignored. - pub fn new(direction: MessageDirection, expected_message_type: MsgType) -> Self { - IgnoreMessage { - direction, - expected_message_type, - } - } -} - -impl From for InterceptAction { - fn from(value: IgnoreMessage) -> Self { - InterceptAction::IgnoreMessage(value) - } -} - -/// Allows [`Sniffer`] to replace some intercepted message before forwarding it. -#[derive(Debug, Clone)] -pub struct ReplaceMessage { - direction: MessageDirection, - expected_message_type: MsgType, - pub(crate) replacement_message: AnyMessage<'static>, -} - -impl ReplaceMessage { - /// Constructor of `ReplaceMessage` - /// - `direction`: direction of message to be intercepted and replaced - /// - `expected_message_type`: type of message to be intercepted and replaced - /// - `replacement_message`: message to replace the intercepted one - /// - `replacement_message_type`: type of message to replace the intercepted one - pub fn new( - direction: MessageDirection, - expected_message_type: MsgType, - replacement_message: AnyMessage<'static>, - ) -> Self { - Self { - direction, - expected_message_type, - replacement_message, - } - } -} - -impl From for InterceptAction { - fn from(value: ReplaceMessage) -> Self { - InterceptAction::ReplaceMessage(Box::new(value)) - } -} diff --git a/test/integration-tests/lib/message_aggregator.rs b/test/integration-tests/lib/message_aggregator.rs deleted file mode 100644 index 56d8a3cfa0..0000000000 --- a/test/integration-tests/lib/message_aggregator.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::{collections::VecDeque, sync::Arc}; -use stratum_apps::{custom_mutex::Mutex, stratum_core::parsers_sv2::AnyMessage}; - -use crate::types::MsgType; - -#[derive(Debug, Clone)] -pub struct MessagesAggregator { - messages: Arc)>>>, -} - -impl Default for MessagesAggregator { - fn default() -> Self { - Self::new() - } -} - -impl MessagesAggregator { - /// Creates a new [`MessagesAggregator`]. - pub fn new() -> Self { - Self { - messages: Arc::new(Mutex::new(VecDeque::new())), - } - } - - /// Adds a message to the end of the queue. - pub fn add_message(&self, msg_type: MsgType, message: AnyMessage<'static>) { - self.messages - .safe_lock(|messages| messages.push_back((msg_type, message))) - .unwrap(); - } - - /// Returns false if the queue is empty, true otherwise. - pub fn is_empty(&self) -> bool { - self.messages - .safe_lock(|messages| messages.is_empty()) - .unwrap() - } - - /// returns true if contains message_type - pub fn has_message_type(&self, message_type: u8) -> bool { - let has_message: bool = self - .messages - .safe_lock(|messages| { - for (t, _) in messages.iter() { - if *t == message_type { - return true; // Exit early with `true` - } - } - false // Default value if no match is found - }) - .unwrap(); - has_message - } - - /// returns true if contains message_type and removes messages from the queue - /// until the first message of type message_type. - pub fn has_message_type_with_remove(&self, message_type: u8) -> bool { - self.messages - .safe_lock(|messages| { - let mut cloned_messages = messages.clone(); - for (pos, (t, _)) in cloned_messages.iter().enumerate() { - if *t == message_type { - let drained = cloned_messages.drain(pos + 1..).collect(); - *messages = drained; - return true; - } - } - false - }) - .unwrap() - } - - /// The aggregator queues messages in FIFO order, so this function returns the oldest message in - /// the queue. - /// - /// The returned message is removed from the queue. - pub fn next_message(&self) -> Option<(MsgType, AnyMessage<'static>)> { - let is_state = self - .messages - .safe_lock(|messages| { - let mut cloned = messages.clone(); - if let Some((msg_type, msg)) = cloned.pop_front() { - *messages = cloned; - Some((msg_type, msg)) - } else { - None - } - }) - .unwrap(); - is_state - } -} diff --git a/test/integration-tests/lib/mock_roles.rs b/test/integration-tests/lib/mock_roles.rs deleted file mode 100644 index 461217be47..0000000000 --- a/test/integration-tests/lib/mock_roles.rs +++ /dev/null @@ -1,191 +0,0 @@ -use crate::{ - message_aggregator::MessagesAggregator, - types::{MessageFrame, MsgType}, - utils::{create_downstream, create_upstream, message_from_frame, wait_for_client}, -}; -use async_channel::Sender; -use std::net::SocketAddr; -use stratum_apps::stratum_core::{ - codec_sv2::StandardEitherFrame, framing_sv2::framing::Sv2Frame, parsers_sv2::AnyMessage, -}; -use tokio::net::TcpStream; - -pub struct MockDownstream { - upstream_address: SocketAddr, - messages_from_upstream: MessagesAggregator, -} - -impl MockDownstream { - pub fn new(upstream_address: SocketAddr) -> Self { - Self { - upstream_address, - messages_from_upstream: MessagesAggregator::new(), - } - } - - pub async fn start(&self) -> Sender { - let upstream_address = self.upstream_address; - let (upstream_receiver, upstream_sender) = create_upstream(loop { - match TcpStream::connect(upstream_address).await { - Ok(stream) => break stream, - Err(_) => { - println!("MockDownstream: unable to connect to upstream, retrying"); - } - } - }) - .await - .expect("Failed to create upstream"); - let messages_from_upstream = self.messages_from_upstream.clone(); - tokio::spawn(async move { - while let Ok(mut frame) = upstream_receiver.recv().await { - let (msg_type, msg) = message_from_frame(&mut frame); - messages_from_upstream.add_message(msg_type, msg); - } - }); - upstream_sender - } - - pub fn next_message_from_upstream(&self) -> Option<(MsgType, AnyMessage<'static>)> { - self.messages_from_upstream.next_message() - } -} - -pub struct MockUpstream { - listening_address: SocketAddr, - messages_from_dowsntream: MessagesAggregator, - // First item in tuple refer to the message(MsgType) received and second to what - // response(AnyMessage) should the upstream send back. - response_messages: Vec<(MsgType, AnyMessage<'static>)>, -} - -impl MockUpstream { - pub fn new( - listening_address: SocketAddr, - response_messages: Vec<(MsgType, AnyMessage<'static>)>, - ) -> Self { - Self { - listening_address, - messages_from_dowsntream: MessagesAggregator::new(), - response_messages, - } - } - - pub async fn start(&self) { - let listening_address = self.listening_address; - let messages_from_dowsntream = self.messages_from_dowsntream.clone(); - let response_messages = self.response_messages.clone(); - tokio::spawn(async move { - let (downstream_receiver, downstream_sender) = - create_downstream(wait_for_client(listening_address).await) - .await - .expect("Failed to connect to downstream"); - while let Ok(mut frame) = downstream_receiver.recv().await { - let (msg_type, msg) = message_from_frame(&mut frame); - // save messages received from downstream - messages_from_dowsntream.add_message(msg_type, msg); - // find a response if the user provided one - let response = response_messages - .iter() - .find(|(m_type, _)| m_type == &msg_type); - // send response back to the downstream if found - if let Some((_, response_msg)) = response { - let message = StandardEitherFrame::>::Sv2( - Sv2Frame::from_message(response_msg.clone(), msg_type, 0, false) - .expect("Failed to create the frame"), - ); - downstream_sender.send(message).await.unwrap(); - } - } - }); - } - - pub fn next_message_from_downstream(&self) -> Option<(MsgType, AnyMessage<'static>)> { - self.messages_from_dowsntream.next_message() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{start_template_provider, template_provider::DifficultyLevel}; - use std::{convert::TryInto, net::TcpListener}; - use stratum_apps::stratum_core::{ - codec_sv2::StandardEitherFrame, - common_messages_sv2::{Protocol, SetupConnection, SetupConnectionSuccess, *}, - framing_sv2::framing::Sv2Frame, - parsers_sv2::CommonMessages, - }; - - #[tokio::test] - async fn test_mock_downstream() { - let (_tp, socket) = start_template_provider(None, DifficultyLevel::Low); - let mock_downstream = MockDownstream::new(socket); - let send_to_upstream = mock_downstream.start().await; - let setup_connection = - AnyMessage::Common(CommonMessages::SetupConnection(SetupConnection { - protocol: Protocol::TemplateDistributionProtocol, - min_version: 2, - max_version: 2, - flags: 0, - endpoint_host: b"0.0.0.0".to_vec().try_into().unwrap(), - endpoint_port: 8081, - vendor: b"Bitmain".to_vec().try_into().unwrap(), - hardware_version: b"901".to_vec().try_into().unwrap(), - firmware: b"abcX".to_vec().try_into().unwrap(), - device_id: b"89567".to_vec().try_into().unwrap(), - })); - let message = StandardEitherFrame::>::Sv2( - Sv2Frame::from_message(setup_connection, MESSAGE_TYPE_SETUP_CONNECTION, 0, false) - .expect("Failed to create the frame"), - ); - send_to_upstream.send(message).await.unwrap(); - mock_downstream - .messages_from_upstream - .has_message_type(MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS); - } - - #[tokio::test] - async fn test_mock_upstream() { - let port = TcpListener::bind("127.0.0.1:0") - .unwrap() - .local_addr() - .unwrap() - .port(); - let upstream_socket_addr = SocketAddr::from(([127, 0, 0, 1], port)); - let mock_downstream = MockDownstream::new(upstream_socket_addr); - let upon_receiving_setup_connection = MESSAGE_TYPE_SETUP_CONNECTION; - let respond_with_success = AnyMessage::Common(CommonMessages::SetupConnectionSuccess( - SetupConnectionSuccess { - used_version: 2, - flags: 0, - }, - )); - let mock_upstream = MockUpstream::new( - upstream_socket_addr, - vec![(upon_receiving_setup_connection, respond_with_success)], - ); - mock_upstream.start().await; - let send_to_upstream = mock_downstream.start().await; - let setup_connection = - AnyMessage::Common(CommonMessages::SetupConnection(SetupConnection { - protocol: Protocol::TemplateDistributionProtocol, - min_version: 2, - max_version: 2, - flags: 0, - endpoint_host: b"0.0.0.0".to_vec().try_into().unwrap(), - endpoint_port: 8081, - vendor: b"Bitmain".to_vec().try_into().unwrap(), - hardware_version: b"901".to_vec().try_into().unwrap(), - firmware: b"abcX".to_vec().try_into().unwrap(), - device_id: b"89567".to_vec().try_into().unwrap(), - })); - let message = StandardEitherFrame::>::Sv2( - Sv2Frame::from_message(setup_connection, MESSAGE_TYPE_SETUP_CONNECTION, 0, false) - .expect("Failed to create the frame"), - ); - send_to_upstream.send(message).await.unwrap(); - mock_downstream - .messages_from_upstream - .has_message_type(MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS); - } -} diff --git a/test/integration-tests/lib/mod.rs b/test/integration-tests/lib/mod.rs deleted file mode 100644 index a93b836a47..0000000000 --- a/test/integration-tests/lib/mod.rs +++ /dev/null @@ -1,334 +0,0 @@ -use crate::{sniffer::*, sv1_minerd::MinerdProcess, template_provider::*}; -use corepc_node::{ConnectParams, CookieValues}; -use interceptor::InterceptAction; -use jd_client_sv2::JobDeclaratorClient; -use jd_server::JobDeclaratorServer; -use once_cell::sync::OnceCell; -use pool_sv2::PoolSv2; -use std::{ - convert::TryFrom, - net::{Ipv4Addr, SocketAddr}, - time::Duration, -}; -use stratum_apps::{ - config_helpers::CoinbaseRewardScript, - key_utils::{Secp256k1PublicKey, Secp256k1SecretKey}, -}; -use tracing::Level; -use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; -use translator_sv2::TranslatorSv2; -use utils::get_available_address; - -pub mod interceptor; -pub mod message_aggregator; -pub mod mock_roles; -pub mod sniffer; -pub mod sniffer_error; -pub mod sv1_minerd; -pub mod sv1_sniffer; -pub mod template_provider; -pub mod types; -pub(crate) mod utils; - -const SHARES_PER_MINUTE: f32 = 120.0; - -static LOGGER: OnceCell<()> = OnceCell::new(); - -/// Each test function should call `start_tracing()` to enable logging. -pub fn start_tracing() { - LOGGER.get_or_init(|| { - let env_filter = EnvFilter::try_from_default_env() - .unwrap_or_else(|_| EnvFilter::new(Level::INFO.to_string())); - - tracing_subscriber::registry() - .with(env_filter) - .with(fmt::layer()) - .init(); - }); -} - -pub fn start_sniffer( - identifier: &str, - upstream: SocketAddr, - check_on_drop: bool, - action: Vec, - timeout: Option, -) -> (Sniffer<'_>, SocketAddr) { - let listening_address = get_available_address(); - let sniffer = Sniffer::new( - identifier, - listening_address, - upstream, - check_on_drop, - action, - timeout, - ); - sniffer.start(); - (sniffer, listening_address) -} - -pub async fn start_pool(template_provider_address: Option) -> (PoolSv2, SocketAddr) { - use pool_sv2::config::PoolConfig; - let listening_address = get_available_address(); - let authority_public_key = Secp256k1PublicKey::try_from( - "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72".to_string(), - ) - .expect("failed"); - let authority_secret_key = Secp256k1SecretKey::try_from( - "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n".to_string(), - ) - .expect("failed"); - let cert_validity_sec = 3600; - let coinbase_reward_script = CoinbaseRewardScript::from_descriptor( - "wpkh(036adc3bdf21e6f9a0f0fb0066bf517e5b7909ed1563d6958a10993849a7554075)", - ) - .unwrap(); - let pool_signature = "Stratum V2 SRI Pool".to_string(); - let tp_address = if let Some(tp_add) = template_provider_address { - tp_add.to_string() - } else { - "127.0.0.1:8442".to_string() - }; - let connection_config = pool_sv2::config::ConnectionConfig::new( - listening_address, - cert_validity_sec, - pool_signature, - ); - let template_provider_config = pool_sv2::config::TemplateProviderConfig::new(tp_address, None); - let authority_config = - pool_sv2::config::AuthorityConfig::new(authority_public_key, authority_secret_key); - let share_batch_size = 1; - let config = PoolConfig::new( - connection_config, - template_provider_config, - authority_config, - coinbase_reward_script, - SHARES_PER_MINUTE, - share_batch_size, - 1, - ); - let pool = PoolSv2::new(config); - let pool_clone = pool.clone(); - tokio::spawn(async move { - _ = pool_clone.start().await; - }); - tokio::time::sleep(Duration::from_secs(1)).await; - (pool, listening_address) -} - -pub fn start_template_provider( - sv2_interval: Option, - difficulty_level: DifficultyLevel, -) -> (TemplateProvider, SocketAddr) { - let address = get_available_address(); - let sv2_interval = sv2_interval.unwrap_or(20); - let template_provider = TemplateProvider::start(address.port(), sv2_interval, difficulty_level); - template_provider.generate_blocks(1); - (template_provider, address) -} - -pub fn start_jdc( - pool: &[(SocketAddr, SocketAddr)], // (pool_address, jds_address) - tp_address: SocketAddr, -) -> (JobDeclaratorClient, SocketAddr) { - use jd_client_sv2::config::{ - JobDeclaratorClientConfig, PoolConfig, ProtocolConfig, TPConfig, Upstream, - }; - let jdc_address = get_available_address(); - let max_supported_version = 2; - let min_supported_version = 2; - let authority_public_key = Secp256k1PublicKey::try_from( - "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72".to_string(), - ) - .unwrap(); - let authority_secret_key = Secp256k1SecretKey::try_from( - "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n".to_string(), - ) - .unwrap(); - let coinbase_reward_script = CoinbaseRewardScript::from_descriptor( - "wpkh(036adc3bdf21e6f9a0f0fb0066bf517e5b7909ed1563d6958a10993849a7554075)", - ) - .unwrap(); - let authority_pubkey = Secp256k1PublicKey::try_from( - "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72".to_string(), - ) - .unwrap(); - let upstreams = pool - .iter() - .map(|(pool_addr, jds_addr)| { - Upstream::new( - authority_pubkey, - pool_addr.ip().to_string(), - pool_addr.port(), - jds_addr.ip().to_string(), - jds_addr.port(), - ) - }) - .collect(); - let pool_config = PoolConfig::new(authority_public_key, authority_secret_key); - let tp_config = TPConfig::new(1000, tp_address.to_string(), None); - let protocol_config = ProtocolConfig::new( - max_supported_version, - min_supported_version, - coinbase_reward_script, - ); - let shares_per_minute = 10.0; - let shares_batch_size = 1; - let user_identity = "IT-test".to_string(); - let jdc_signature = "JDC".to_string(); - let jd_client_proxy = JobDeclaratorClientConfig::new( - jdc_address, - protocol_config, - user_identity, - shares_per_minute, - shares_batch_size, - pool_config, - tp_config, - upstreams, - jdc_signature, - None, - ); - let ret = jd_client_sv2::JobDeclaratorClient::new(jd_client_proxy); - let ret_clone = ret.clone(); - tokio::spawn(async move { ret_clone.start().await }); - (ret, jdc_address) -} - -pub fn start_jds(tp_rpc_connection: &ConnectParams) -> (JobDeclaratorServer, SocketAddr) { - use jd_server::config::{CoreRpc, JobDeclaratorServerConfig}; - let authority_public_key = Secp256k1PublicKey::try_from( - "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72".to_string(), - ) - .unwrap(); - let authority_secret_key = Secp256k1SecretKey::try_from( - "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n".to_string(), - ) - .unwrap(); - let listen_jd_address = get_available_address(); - let cert_validity_sec = 3600; - let coinbase_reward_script = CoinbaseRewardScript::from_descriptor( - "wpkh(036adc3bdf21e6f9a0f0fb0066bf517e5b7909ed1563d6958a10993849a7554075)", - ) - .unwrap(); - if let Ok(Some(CookieValues { user, password })) = tp_rpc_connection.get_cookie_values() { - let ip = tp_rpc_connection.rpc_socket.ip().to_string(); - let url = jd_server::Uri::builder() - .scheme("http") - .authority(ip) - .path_and_query("") - .build() - .unwrap(); - let core_rpc = CoreRpc::new( - url.to_string(), - tp_rpc_connection.rpc_socket.port(), - user, - password, - ); - let config = JobDeclaratorServerConfig::new( - listen_jd_address.to_string(), - authority_public_key, - authority_secret_key, - cert_validity_sec, - coinbase_reward_script, - core_rpc, - std::time::Duration::from_secs(1), - ); - let job_declarator_server = JobDeclaratorServer::new(config); - let job_declarator_server_clone = job_declarator_server.clone(); - tokio::spawn(async move { - job_declarator_server_clone.start().await.unwrap(); - }); - (job_declarator_server, listen_jd_address) - } else { - panic!("Failed to get TP cookie values"); - } -} - -pub async fn start_sv2_translator(upstream: SocketAddr) -> (TranslatorSv2, SocketAddr) { - let upstream_address = upstream.ip().to_string(); - let upstream_port = upstream.port(); - let upstream_authority_pubkey = Secp256k1PublicKey::try_from( - "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72".to_string(), - ) - .expect("failed"); - let listening_address = get_available_address(); - let listening_port = listening_address.port(); - - let minerd_process = MinerdProcess::new(SocketAddr::from((Ipv4Addr::LOCALHOST, 0)), false) - .await - .unwrap(); - let min_individual_miner_hashrate = minerd_process.measure_hashrate().await.unwrap() as f32; - - let downstream_difficulty_config = translator_sv2::config::DownstreamDifficultyConfig::new( - min_individual_miner_hashrate, - SHARES_PER_MINUTE, - true, - ); - let upstream_conf = translator_sv2::config::Upstream::new( - upstream_address, - upstream_port, - upstream_authority_pubkey, - ); - let downstream_extranonce2_size = 4; - - let config = translator_sv2::config::TranslatorConfig::new( - vec![upstream_conf], - listening_address.ip().to_string(), - listening_port, - downstream_difficulty_config, - 2, - 2, - downstream_extranonce2_size, - "user_identity".to_string(), - false, - ); - let translator_v2 = translator_sv2::TranslatorSv2::new(config); - let clone_translator_v2 = translator_v2.clone(); - tokio::spawn(async move { - clone_translator_v2.start().await; - }); - (translator_v2, listening_address) -} - -pub async fn start_minerd( - upstream_addr: SocketAddr, - username: Option, - password: Option, - single_submit: bool, -) -> (sv1_minerd::MinerdProcess, SocketAddr) { - let (process, local_addr) = - sv1_minerd::start_minerd(upstream_addr, username, password, single_submit) - .await - .expect("Failed to start minerd process"); - (process, local_addr) -} - -pub fn start_mining_device_sv2( - upstream: SocketAddr, - pub_key: Option, - device_id: Option, - user_id: Option, - handicap: u32, - nominal_hashrate_multiplier: Option, - single_submit: bool, -) { - tokio::spawn(async move { - mining_device::connect( - upstream.to_string(), - pub_key, - device_id, - user_id, - handicap, - nominal_hashrate_multiplier, - single_submit, - ) - .await; - }); -} - -pub fn start_sv1_sniffer(upstream_address: SocketAddr) -> (sv1_sniffer::SnifferSV1, SocketAddr) { - let listening_address = get_available_address(); - let sniffer_sv1 = sv1_sniffer::SnifferSV1::new(listening_address, upstream_address); - sniffer_sv1.start(); - (sniffer_sv1, listening_address) -} diff --git a/test/integration-tests/lib/sniffer.rs b/test/integration-tests/lib/sniffer.rs deleted file mode 100644 index d77447b614..0000000000 --- a/test/integration-tests/lib/sniffer.rs +++ /dev/null @@ -1,390 +0,0 @@ -use crate::{ - interceptor::{InterceptAction, MessageDirection}, - message_aggregator::MessagesAggregator, - types::MsgType, - utils::{ - create_downstream, create_upstream, recv_from_down_send_to_up, recv_from_up_send_to_down, - wait_for_client, - }, -}; -use std::net::SocketAddr; -use stratum_apps::stratum_core::parsers_sv2::{message_type_to_name, AnyMessage}; -use tokio::{net::TcpStream, select}; - -const DEFAULT_TIMEOUT: u64 = 60; - -/// Allows to intercept messages sent between two roles. -/// -/// Can be useful for testing purposes, as it allows to assert that the roles have sent specific -/// messages in a specific order and to inspect the messages details. -/// -/// The downstream (or client) role connects to the [`Sniffer`] `listening_address` and the -/// [`Sniffer`] connects to the `upstream` server. This way, the Sniffer can intercept messages sent -/// between the downstream and upstream roles. -/// -/// Messages received from downstream are stored in the `messages_from_downstream` aggregator and -/// forwarded to the upstream role. Alternatively, messages received from upstream are stored in -/// the `messages_from_upstream` and forwarded to the downstream role. Both -/// `messages_from_downstream` and `messages_from_upstream` aggregators can be accessed as FIFO -/// queues via [`Sniffer::next_message_from_downstream`] and -/// [`Sniffer::next_message_from_upstream`], respectively. -/// -/// The `timeout` parameter can be used to configure the timeout for the sniffer. If not provided, -/// the default timeout is 1 minute. -/// -/// In order to replace or ignore the messages sent between the roles, [`InterceptAction`] can be -/// used in [`Sniffer::new`]. -#[derive(Debug, Clone)] -pub struct Sniffer<'a> { - identifier: &'a str, - listening_address: SocketAddr, - upstream_address: SocketAddr, - messages_from_downstream: MessagesAggregator, - messages_from_upstream: MessagesAggregator, - check_on_drop: bool, - action: Vec, - timeout: Option, -} - -impl<'a> Sniffer<'a> { - /// Creates a new sniffer that listens on the given listening address and connects to the given - /// upstream address. - pub fn new( - identifier: &'a str, - listening_address: SocketAddr, - upstream_address: SocketAddr, - check_on_drop: bool, - action: Vec, - timeout: Option, - ) -> Self { - Self { - identifier, - listening_address, - upstream_address, - messages_from_downstream: MessagesAggregator::new(), - messages_from_upstream: MessagesAggregator::new(), - check_on_drop, - action, - timeout, - } - } - - /// Starts the sniffer. - /// - /// The sniffer should be started after the upstream role have been initialized and is ready to - /// accept messages and before the downstream role starts sending messages. - pub fn start(&self) { - let listening_address = self.listening_address; - let upstream_address = self.upstream_address; - let messages_from_downstream = self.messages_from_downstream.clone(); - let messages_from_upstream = self.messages_from_upstream.clone(); - let action = self.action.clone(); - let identifier = self.identifier.to_string(); - tokio::spawn(async move { - let (downstream_receiver, downstream_sender) = - create_downstream(wait_for_client(listening_address).await) - .await - .expect("Failed to create downstream"); - let (upstream_receiver, upstream_sender) = create_upstream( - TcpStream::connect(upstream_address) - .await - .expect("Failed to connect to upstream"), - ) - .await - .expect("Failed to create upstream"); - select! { - _ = tokio::signal::ctrl_c() => { }, - _ = recv_from_down_send_to_up(downstream_receiver, upstream_sender, messages_from_downstream, action.clone(), &identifier) => { }, - _ = recv_from_up_send_to_down(upstream_receiver, downstream_sender, messages_from_upstream, action, &identifier) => { }, - }; - }); - } - - /// Returns the oldest message sent by downstream. - /// - /// The queue is FIFO and once a message is returned it is removed from the queue. - /// - /// This can be used to assert that the downstream sent: - /// - specific message types - /// - specific message fields - pub fn next_message_from_downstream(&self) -> Option<(MsgType, AnyMessage<'static>)> { - self.messages_from_downstream.next_message() - } - - /// Returns the oldest message sent by upstream. - /// - /// The queue is FIFO and once a message is returned it is removed from the queue. - /// - /// This can be used to assert that the upstream sent: - /// - specific message types - /// - specific message fields - pub fn next_message_from_upstream(&self) -> Option<(MsgType, AnyMessage<'static>)> { - self.messages_from_upstream.next_message() - } - - /// Waits until a message of the specified type is received into the `message_direction` - /// corresponding queue. - pub async fn wait_for_message_type( - &self, - message_direction: MessageDirection, - message_type: u8, - ) { - let now = std::time::Instant::now(); - loop { - let has_message_type = match message_direction { - MessageDirection::ToDownstream => { - self.messages_from_upstream.has_message_type(message_type) - } - MessageDirection::ToUpstream => { - self.messages_from_downstream.has_message_type(message_type) - } - }; - - // ready to unblock test runtime - if has_message_type { - return; - } - - // configurable timeout, 1 minute default - if now.elapsed().as_secs() > self.timeout.unwrap_or(DEFAULT_TIMEOUT) { - panic!( - "timeout while waiting for message {} to go {}", - message_type_to_name(message_type), - message_direction - ); - } - - // sleep to reduce async lock contention - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - } - } - - /// Assert message is not present in the queue - /// - /// Will return true if the message is not present in the queue, false otherwise. - pub async fn assert_message_not_present( - &self, - message_direction: MessageDirection, - message_type: u8, - ) -> bool { - let has_message_type = match message_direction { - MessageDirection::ToDownstream => { - self.messages_from_upstream.has_message_type(message_type) - } - MessageDirection::ToUpstream => { - self.messages_from_downstream.has_message_type(message_type) - } - }; - !has_message_type - } - - /// Similar to `[Sniffer::wait_for_message_type]` but also removes the messages from the queue - /// including the specified message type. - pub async fn wait_for_message_type_and_clean_queue( - &self, - message_direction: MessageDirection, - message_type: u8, - ) -> bool { - let now = std::time::Instant::now(); - loop { - let has_message_type = match message_direction { - MessageDirection::ToDownstream => self - .messages_from_upstream - .has_message_type_with_remove(message_type), - MessageDirection::ToUpstream => self - .messages_from_downstream - .has_message_type_with_remove(message_type), - }; - - // ready to unblock test runtime - if has_message_type { - return true; - } - - // configurable timeout, 1 minute default - if now.elapsed().as_secs() > self.timeout.unwrap_or(DEFAULT_TIMEOUT) { - panic!( - "timeout while waiting for message {} to go {}", - message_type_to_name(message_type), - message_direction - ); - } - - // sleep to reduce async lock contention - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - } - } - - /// Checks whether the sniffer has received a message of the specified type. - pub fn includes_message_type( - &self, - message_direction: MessageDirection, - message_type: u8, - ) -> bool { - match message_direction { - MessageDirection::ToDownstream => { - self.messages_from_upstream.has_message_type(message_type) - } - MessageDirection::ToUpstream => { - self.messages_from_downstream.has_message_type(message_type) - } - } - } -} - -// Utility macro to assert that the downstream and upstream roles have sent specific messages. -// -// This macro can be called in two ways: -// 1. If you want to assert the message without any of its properties, you can invoke the macro -// with the message group, the nested message group, the message, and the expected message: -// `assert_message!(TemplateDistribution, TemplateDistribution, $msg, -// $expected_message_variant);`. -// -// 2. If you want to assert the message with its properties, you can invoke the macro with the -// message group, the nested message group, the message, the expected message, and the expected -// properties and values: -// `assert_message!(TemplateDistribution, TemplateDistribution, $msg, $expected_message_variant, -// $expected_property, $expected_property_value, ...);`. -// Note that you can provide any number of properties and values. -// -// In both cases, the `$message_group` could be any variant of `AnyMessage::$message_group` and -// the `$nested_message_group` could be any variant of -// `AnyMessage::$message_group($nested_message_group)`. -// -// If you dont want to provide the `$message_group` and `$nested_message_group` arguments, you can -// utilize `assert_common_message!`, `assert_tp_message!`, `assert_mining_message!`, and -// `assert_jd_message!` macros. All those macros are just wrappers around `assert_message!` macro -// with predefined `$message_group` and `$nested_message_group` arguments. They also can be called -// in two ways, with or without properties validation. -#[macro_export] -macro_rules! assert_message { - ($message_group:ident, $nested_message_group:ident, $msg:expr, $expected_message_variant:ident, - $($expected_property:ident, $expected_property_value:expr),*) => { match $msg { - Some((_, message)) => { - match message { - AnyMessage::$message_group($nested_message_group::$expected_message_variant( - $expected_message_variant { - $($expected_property,)* - .. - }, - )) => { - $( - assert_eq!($expected_property.clone(), $expected_property_value); - )* - } - _ => { - panic!( - "Sent wrong message: {:?}", - message - ); - } - } - } - _ => panic!("No message received"), - } - }; - ($message_group:ident, $nested_message_group:ident, $msg:expr, $expected_message_variant:ident) => { - match $msg { - Some((_, message)) => { - match message { - AnyMessage::$message_group($nested_message_group::$expected_message_variant(_)) => {} - _ => { - panic!( - "Sent wrong message: {:?}", - message - ); - } - } - } - _ => panic!("No message received"), - } - }; -} - -// Assert that the message is a common message and that it has the expected properties and values. -#[macro_export] -macro_rules! assert_common_message { - ($msg:expr, $expected_message_variant:ident, $($expected_property:ident, $expected_property_value:expr),*) => { - assert_message!(Common, CommonMessages, $msg, $expected_message_variant, $($expected_property, $expected_property_value),*); - }; - ($msg:expr, $expected_message_variant:ident) => { - assert_message!(Common, CommonMessages, $msg, $expected_message_variant); - }; -} - -// Assert that the message is a template distribution message and that it has the expected -// properties and values. -#[macro_export] -macro_rules! assert_tp_message { - ($msg:expr, $expected_message_variant:ident, $($expected_property:ident, $expected_property_value:expr),*) => { - assert_message!(TemplateDistribution, TemplateDistribution, $msg, $expected_message_variant, $($expected_property, $expected_property_value),*); - }; - ($msg:expr, $expected_message_variant:ident) => { - assert_message!(TemplateDistribution, TemplateDistribution, $msg, $expected_message_variant); - }; -} - -// Assert that the message is a mining message and that it has the expected properties and values. -#[macro_export] -macro_rules! assert_mining_message { - ($msg:expr, $expected_message_variant:ident, $($expected_property:ident, $expected_property_value:expr),*) => { - assert_message!(Mining, Mining, $msg, $expected_message_variant, $($expected_property, $expected_property_value),*); - }; - ($msg:expr, $expected_message_variant:ident) => { - assert_message!(Mining, Mining, $msg, $expected_message_variant); - }; -} - -// Assert that the message is a job declaration message and that it has the expected properties and -// values. -#[macro_export] -macro_rules! assert_jd_message { - ($msg:expr, $expected_message_variant:ident, $($expected_property:ident, $expected_property_value:expr),*) => { - assert_message!(JobDeclaration, JobDeclaration, $msg, $expected_message_variant, $($expected_property, $expected_property_value),*); - }; - ($msg:expr, $expected_message_variant:ident) => { - assert_message!(JobDeclaration, JobDeclaration, $msg, $expected_message_variant); - }; -} - -// This implementation is used in order to check if a test has handled all messages sent by the -// downstream and upstream roles. If not, the test will panic. -// -// This is useful to ensure that the test has checked all exchanged messages between the roles. -impl Drop for Sniffer<'_> { - fn drop(&mut self) { - if self.check_on_drop { - match ( - self.messages_from_downstream.is_empty(), - self.messages_from_upstream.is_empty(), - ) { - (true, true) => {} - (true, false) => { - println!( - "Sniffer {}: You didn't handle all upstream messages: {:?}", - self.identifier, self.messages_from_upstream - ); - panic!(); - } - (false, true) => { - println!( - "Sniffer {}: You didn't handle all downstream messages: {:?}", - self.identifier, self.messages_from_downstream - ); - panic!(); - } - (false, false) => { - println!( - "Sniffer {}: You didn't handle all downstream messages: {:?}", - self.identifier, self.messages_from_downstream - ); - println!( - "Sniffer {}: You didn't handle all upstream messages: {:?}", - self.identifier, self.messages_from_upstream - ); - panic!(); - } - } - } - } -} diff --git a/test/integration-tests/lib/sniffer_error.rs b/test/integration-tests/lib/sniffer_error.rs deleted file mode 100644 index ee0fc817c4..0000000000 --- a/test/integration-tests/lib/sniffer_error.rs +++ /dev/null @@ -1,5 +0,0 @@ -#[derive(Debug, PartialEq)] -pub enum SnifferError { - DownstreamClosed, - UpstreamClosed, -} diff --git a/test/integration-tests/lib/sv1_minerd/error.rs b/test/integration-tests/lib/sv1_minerd/error.rs deleted file mode 100644 index e6a408428c..0000000000 --- a/test/integration-tests/lib/sv1_minerd/error.rs +++ /dev/null @@ -1,70 +0,0 @@ -use std::fmt; - -/// Errors that can occur when using MinerdWrapper -#[derive(Debug)] -pub enum MinerdError { - /// IO operation failed - Io(tokio::io::Error), - /// Process spawn failed - ProcessSpawn(tokio::io::Error), - /// Process is already running - ProcessAlreadyRunning, - /// Process is not running when expected - ProcessNotRunning, - /// Network connection failed - NetworkConnection(tokio::io::Error), - /// Proxy setup failed - ProxySetup(tokio::io::Error), - /// Invalid configuration - InvalidConfiguration(String), - /// Failed to parse hashrate from minerd benchmark output - HashrateParseError, - /// Mutex was poisoned - MutexPoisoned, - /// OS or Architecture not supported - OsArchNotSupported(String), -} - -impl fmt::Display for MinerdError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - MinerdError::Io(e) => write!(f, "IO error: {}", e), - MinerdError::ProcessSpawn(e) => write!(f, "Failed to spawn minerd process: {}", e), - MinerdError::ProcessAlreadyRunning => write!(f, "Minerd process is already running"), - MinerdError::ProcessNotRunning => write!(f, "Minerd process is not running"), - MinerdError::NetworkConnection(e) => write!(f, "Network connection failed: {}", e), - MinerdError::ProxySetup(e) => write!(f, "Proxy setup failed: {}", e), - MinerdError::InvalidConfiguration(msg) => write!(f, "Invalid configuration: {}", msg), - MinerdError::HashrateParseError => { - write!(f, "Failed to parse hashrate from minerd benchmark output") - } - MinerdError::MutexPoisoned => write!(f, "Mutex was poisoned"), - MinerdError::OsArchNotSupported(msg) => { - write!(f, "OS or architecture not supported: {}", msg) - } - } - } -} - -impl std::error::Error for MinerdError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - MinerdError::Io(e) => Some(e), - MinerdError::ProcessSpawn(e) => Some(e), - MinerdError::NetworkConnection(e) => Some(e), - MinerdError::ProxySetup(e) => Some(e), - MinerdError::ProcessAlreadyRunning - | MinerdError::ProcessNotRunning - | MinerdError::InvalidConfiguration(_) - | MinerdError::HashrateParseError - | MinerdError::MutexPoisoned => None, - MinerdError::OsArchNotSupported(_) => None, - } - } -} - -impl From for MinerdError { - fn from(error: tokio::io::Error) -> Self { - MinerdError::Io(error) - } -} diff --git a/test/integration-tests/lib/sv1_minerd/mod.rs b/test/integration-tests/lib/sv1_minerd/mod.rs deleted file mode 100644 index 8c015aa533..0000000000 --- a/test/integration-tests/lib/sv1_minerd/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub mod error; -pub mod process; - -pub use error::MinerdError; -pub use process::{start_minerd, MinerdProcess}; diff --git a/test/integration-tests/lib/sv1_minerd/process.rs b/test/integration-tests/lib/sv1_minerd/process.rs deleted file mode 100644 index 094c747cdc..0000000000 --- a/test/integration-tests/lib/sv1_minerd/process.rs +++ /dev/null @@ -1,592 +0,0 @@ -use std::{ - fs, - net::SocketAddr, - path::PathBuf, - sync::{Arc, Mutex}, -}; -use tokio::{ - io::{AsyncBufReadExt, AsyncReadExt, AsyncWriteExt, BufReader}, - net::{TcpListener, TcpStream}, - process::{Child as TokioChild, Command as TokioCommand}, -}; -use tokio_util::sync::CancellationToken; -use tracing::{debug, error, info}; - -use crate::utils::{http, tarball}; - -use super::error::MinerdError; - -const VERSION_MINERD: &str = "2.5.1"; - -fn get_minerd_filename(os: &str, arch: &str) -> Result { - match (os, arch) { - ("macos", "aarch64") => Ok(format!( - "pooler-cpuminer-{VERSION_MINERD}-arm64-apple-darwin.tar.gz" - )), - ("macos", "x86_64") => Ok(format!( - "pooler-cpuminer-{VERSION_MINERD}-x86_64-apple-darwin.tar.gz" - )), - ("linux", "x86_64") => Ok(format!( - "pooler-cpuminer-{VERSION_MINERD}-linux-x86_64.tar.gz" - )), - ("linux", "aarch64") => Ok(format!( - "pooler-cpuminer-{VERSION_MINERD}-linux-arm64.tar.gz" - )), - _ => Err(MinerdError::OsArchNotSupported(format!( - "OS or architecture not supported: {} {}", - os, arch - ))), - } -} - -/// A wrapper struct for the minerd process that provides: -/// - TCP proxy functionality to intercept communications -/// - Process management for spawning and killing minerd -#[derive(Debug)] -pub struct MinerdProcess { - /// Path to the minerd binary - minerd_binary: PathBuf, - /// Handle to the spawned minerd process - process: Arc>>, - /// Address where the wrapper listens for minerd connections - local_address: SocketAddr, - /// Address of the upstream mining server - upstream_address: SocketAddr, - /// Whether to kill the process after the first mining.submit - single_submit: bool, - /// Cancellation token to coordinate shutdown of all tasks - cancellation_token: CancellationToken, -} - -impl MinerdProcess { - /// Creates a new MinerdProcess with the given upstream address - pub async fn new( - upstream_address: SocketAddr, - single_submit: bool, - ) -> Result { - let current_dir: PathBuf = std::env::current_dir().expect("failed to read current dir"); - let minerd_dir = current_dir.join("minerd"); - - let os = std::env::consts::OS; - let arch = std::env::consts::ARCH; - let download_filename = get_minerd_filename(os, arch)?; - - if !minerd_dir.exists() { - fs::create_dir_all(minerd_dir.clone()).expect("failed to create minerd directory"); - let download_endpoint = format!( - "https://github.com/stratum-mining/cpuminer/releases/download/v{VERSION_MINERD}/" - ); - let url = format!("{download_endpoint}{download_filename}"); - let tarball_bytes = http::make_get_request(&url, 5); - tarball::unpack(&tarball_bytes, &minerd_dir); - } - - let minerd_binary = minerd_dir.join("minerd"); - - if os == "macos" { - std::process::Command::new("codesign") - .arg("--sign") - .arg("-") - .arg(&minerd_binary) - .output() - .expect("failed to sign minerd binary"); - } - - // Bind to local address for the proxy - // use 0 to let the OS assign a randomly available port - let listener = TcpListener::bind(("127.0.0.1", 0)) - .await - .map_err(MinerdError::ProxySetup)?; - let local_address = listener.local_addr().map_err(MinerdError::ProxySetup)?; - - Ok(MinerdProcess { - minerd_binary, - process: Arc::new(Mutex::new(None)), - local_address, - upstream_address, - single_submit, - cancellation_token: CancellationToken::new(), - }) - } - - /// Returns the local address where the wrapper is listening - pub fn local_address(&self) -> SocketAddr { - self.local_address - } - - /// Returns the upstream address that minerd will connect to through the proxy - pub fn upstream_address(&self) -> SocketAddr { - self.upstream_address - } - - /// Spawns the minerd process with the given parameters - pub async fn spawn_minerd( - &mut self, - username: Option, - password: Option, - ) -> Result<(), MinerdError> { - let mut process_guard = self - .process - .lock() - .map_err(|_| MinerdError::MutexPoisoned)?; - if process_guard.is_some() { - return Err(MinerdError::ProcessAlreadyRunning); - } - - let mut cmd = TokioCommand::new(&self.minerd_binary); - - // Kill the process on drop - cmd.kill_on_drop(true); - - // Set the algorithm to sha256d - cmd.arg("-a").arg("sha256d"); - - // Set the number of threads to use for mining - cmd.arg("--threads").arg("1"); - - // Set the retry pause to 1 second - cmd.arg("--retry-pause").arg("1"); - - // Set the URL to connect to our local proxy instead of upstream directly - cmd.arg("--url").arg(format!( - "stratum+tcp://{}:{}", - self.local_address.ip(), - self.local_address.port() - )); - - // Add username and password if provided - if let Some(ref username) = username { - cmd.arg("--userpass").arg(format!( - "{}:{}", - username, - password.as_deref().unwrap_or("") - )); - } - - info!("Spawning minerd with command: {:?}", cmd); - - let child = cmd.spawn().map_err(MinerdError::ProcessSpawn)?; - *process_guard = Some(child); - info!("minerd process spawned successfully"); - Ok(()) - } - - /// Starts the TCP proxy to intercept communications between minerd and the upstream server - pub async fn start_tcp_proxy(&mut self) -> Result<(), MinerdError> { - let listener = TcpListener::bind(self.local_address) - .await - .map_err(MinerdError::ProxySetup)?; - let upstream_address = self.upstream_address; - let single_submit = self.single_submit; - let process = Arc::clone(&self.process); - let cancellation_token = self.cancellation_token.clone(); - - tokio::spawn(async move { - info!("Proxy server started, waiting for connections..."); - - loop { - tokio::select! { - accept_result = listener.accept() => { - match accept_result { - Ok((downstream_stream, _)) => { - info!("New connection from minerd"); - - // Connect to upstream server - match TcpStream::connect(upstream_address).await { - Ok(upstream_stream) => { - // Split streams for bidirectional communication - let (downstream_read, downstream_write) = downstream_stream.into_split(); - let (upstream_read, upstream_write) = upstream_stream.into_split(); - - let process_clone = Arc::clone(&process); - let token_clone1 = cancellation_token.clone(); - let token_clone2 = cancellation_token.clone(); - - // Task for downstream -> upstream (minerd -> pool) - if single_submit { - tokio::spawn(async move { - let _ = Self::proxy_tcp_data_single_submit( - downstream_read, - upstream_write, - process_clone, - token_clone1, - ).await; - }); - } else { - tokio::spawn(async move { - let _ = Self::proxy_tcp_data( - downstream_read, - upstream_write, - token_clone1, - ).await; - }); - } - - // Task for upstream -> downstream (pool -> minerd) - tokio::spawn(async move { - let _ = Self::proxy_tcp_data( - upstream_read, - downstream_write, - token_clone2, - ).await; - }); - } - Err(e) => { - error!("Failed to connect to upstream server {}: {}", upstream_address, e); - } - } - } - Err(e) => { - error!("Failed to accept connection: {}", e); - break; - } - } - } - _ = cancellation_token.cancelled() => { - info!("Proxy server shutting down due to cancellation"); - break; - } - } - } - }); - - Ok(()) - } - - /// Proxies data between two TCP streams with monitoring for mining.submit - /// This function will automatically kill the process and trigger shutdown when mining.submit is - /// detected - async fn proxy_tcp_data_single_submit( - mut from: tokio::net::tcp::OwnedReadHalf, - mut to: tokio::net::tcp::OwnedWriteHalf, - process: Arc>>, - cancellation_token: CancellationToken, - ) { - let mut buffer = [0; 4096]; - - loop { - tokio::select! { - read_result = from.read(&mut buffer) => { - match read_result { - Ok(0) => { - debug!("Connection closed"); - break; - } - Ok(n) => { - let data = &buffer[..n]; - - // Check for mining.submit and trigger shutdown - if let Ok(data_str) = std::str::from_utf8(data) { - if data_str.contains("\"mining.submit\"") { - info!("Detected mining.submit, killing minerd process and triggering shutdown"); - - // Forward the data first - if let Err(e) = to.write_all(data).await { - error!("Failed to write data: {}", e); - } - - // Kill the process - let child = { - match process.lock() { - Ok(mut process_guard) => process_guard.take(), - Err(_) => { - error!("Mutex poisoned while trying to kill process"); - None - } - } - }; // Lock is released here - - if let Some(mut child) = child { - if let Err(e) = child.kill().await { - error!("Failed to kill minerd process: {}", e); - } else { - info!("minerd process killed successfully after mining.submit"); - } - } - - // Trigger cancellation to stop all tasks - cancellation_token.cancel(); - break; - } - } - - // Forward the data - if let Err(e) = to.write_all(data).await { - error!("Failed to write data: {}", e); - break; - } - } - Err(e) => { - error!("Failed to read data: {}", e); - break; - } - } - } - _ = cancellation_token.cancelled() => { - info!("Proxy task (downstream->upstream) shutting down due to cancellation"); - break; - } - } - } - } - - /// Proxies data between two TCP streams - async fn proxy_tcp_data( - mut from: tokio::net::tcp::OwnedReadHalf, - mut to: tokio::net::tcp::OwnedWriteHalf, - cancellation_token: CancellationToken, - ) -> Result<(), MinerdError> { - let mut buffer = [0; 4096]; - - loop { - tokio::select! { - read_result = from.read(&mut buffer) => { - match read_result { - Ok(0) => { - debug!("Connection closed"); - return Ok(()); - } - Ok(n) => { - let data = &buffer[..n]; - - // Forward the data - if let Err(e) = to.write_all(data).await { - error!("Failed to write data: {}", e); - return Err(MinerdError::Io(e)); - } - } - Err(e) => { - error!("Failed to read data: {}", e); - return Err(MinerdError::Io(e)); - } - } - } - _ = cancellation_token.cancelled() => { - info!("TCP proxy shutting down due to cancellation"); - return Ok(()); - } - } - } - } - - /// Checks if the minerd process is still running - pub fn is_running(&self) -> Result { - let mut process_guard = self - .process - .lock() - .map_err(|_| MinerdError::MutexPoisoned)?; - if let Some(ref mut process) = *process_guard { - match process.try_wait() { - Ok(Some(_)) => Ok(false), // Process has exited - Ok(None) => Ok(true), // Process is still running - Err(_) => Ok(false), // Error checking process status - } - } else { - Ok(false) - } - } - - /// Measures the hashrate of the local minerd binary in benchmark mode - /// Returns the hashrate in hashes per second - pub async fn measure_hashrate(&self) -> Result { - info!("Starting hashrate measurement using minerd benchmark mode"); - - let mut cmd = TokioCommand::new(&self.minerd_binary); - - // Set benchmark mode with specific parameters for consistent measurement - cmd.arg("-a") - .arg("sha256d") // Use sha256d algorithm - .arg("-t") - .arg("1") // Use 1 thread for consistent measurement - .arg("--benchmark") // Enable benchmark mode (no network connection) - .arg("-q"); // Quiet mode to reduce output noise - - // Capture stderr to parse the hashrate output (minerd outputs to stderr) - cmd.stdout(std::process::Stdio::null()); - cmd.stderr(std::process::Stdio::piped()); - - info!("Running minerd benchmark: {:?}", cmd); - - let mut child = cmd.spawn().map_err(MinerdError::ProcessSpawn)?; - - let stderr = child.stderr.take().ok_or_else(|| { - MinerdError::ProcessSpawn(std::io::Error::other("Failed to get stderr")) - })?; - - // Give minerd some time to run and produce hashrate output - tokio::time::sleep(tokio::time::Duration::from_secs(3)).await; - - // Kill the benchmark process - if let Err(e) = child.kill().await { - error!("Failed to kill benchmark process: {}", e); - } - - // Read and parse the output from stderr - let mut reader = BufReader::new(stderr); - let mut line = String::new(); - let mut hashrate_hashes_per_sec = None; - - // Read output lines to find hashrate information - let mut all_output = Vec::new(); - while let Ok(bytes_read) = reader.read_line(&mut line).await { - if bytes_read == 0 { - break; - } - - let line_trimmed = line.trim(); - all_output.push(line_trimmed.to_string()); - debug!("Benchmark output: {}", line_trimmed); - - // Parse hashrate from lines like: - // "[2025-08-29 20:10:39] thread 0: 2097152 hashes, 1441 khash/s" - // "[2025-08-29 20:10:39] Total: 1441 khash/s" - if let Some(hashrate_khash) = parse_hashrate_from_benchmark_line(line_trimmed) { - info!("Detected benchmark hashrate: {} khash/s", hashrate_khash); - // Convert khash/s to hashes/s (multiply by 1000) - hashrate_hashes_per_sec = Some(hashrate_khash * 1000.0); - // We can break after finding the first hashrate measurement - break; - } - - line.clear(); - } - - // If we couldn't parse hashrate, log all output for debugging - if hashrate_hashes_per_sec.is_none() { - error!("Failed to parse hashrate from minerd benchmark output. Full output:"); - for (i, line) in all_output.iter().enumerate() { - error!(" Line {}: {}", i + 1, line); - } - } - - hashrate_hashes_per_sec.ok_or(MinerdError::HashrateParseError) - } -} - -impl Drop for MinerdProcess { - fn drop(&mut self) { - // Trigger cancellation to signal all tasks to stop - self.cancellation_token.cancel(); - - match self.process.lock() { - Ok(mut process_guard) => { - if let Some(mut process) = process_guard.take() { - if let Err(e) = process.start_kill() { - error!("Error killing minerd process on drop: {}", e); - } else { - info!("minerd process killed on drop"); - } - } - } - Err(_) => { - error!("Mutex poisoned in Drop implementation, cannot kill process cleanly"); - } - } - } -} - -/// Parses hashrate from a minerd benchmark output line -/// Examples: -/// - "[2025-08-29 20:10:39] thread 0: 2097152 hashes, 1441 khash/s" -/// - "[2025-08-29 20:10:39] Total: 1441 khash/s" -fn parse_hashrate_from_benchmark_line(line: &str) -> Option { - // Look for pattern: "X khash/s" or "X.Y khash/s" - if let Some(pos) = line.find(" khash/s") { - // Find the number before " khash/s" - let before_khash = &line[..pos]; - if let Some(space_pos) = before_khash.rfind(' ') { - let number_str = &before_khash[space_pos + 1..]; - if let Ok(hashrate) = number_str.parse::() { - return Some(hashrate); - } - } - } - - // Also try to parse "hash/s" patterns (without the 'k' prefix) - convert to khash/s - if let Some(pos) = line.find(" hash/s") { - let before_hash = &line[..pos]; - if let Some(space_pos) = before_hash.rfind(' ') { - let number_str = &before_hash[space_pos + 1..]; - if let Ok(hashrate) = number_str.parse::() { - // Convert hash/s to khash/s - return Some(hashrate / 1000.0); - } - } - } - - None -} - -pub async fn start_minerd( - upstream_address: SocketAddr, - username: Option, - password: Option, - single_submit: bool, -) -> Result<(MinerdProcess, SocketAddr), MinerdError> { - if username.is_none() && password.is_some() || username.is_some() && password.is_none() { - return Err(MinerdError::InvalidConfiguration( - "Username and password must be provided together".to_string(), - )); - } - - let mut minerd_process = MinerdProcess::new(upstream_address, single_submit).await?; - let local_address = minerd_process.local_address(); - - minerd_process.start_tcp_proxy().await?; - minerd_process.spawn_minerd(username, password).await?; - - Ok((minerd_process, local_address)) -} - -#[cfg(test)] -mod tests { - use super::*; - use std::net::{Ipv4Addr, SocketAddr}; - - #[tokio::test] - async fn test_measure_hashrate() { - let minerd_process = MinerdProcess::new(SocketAddr::from((Ipv4Addr::LOCALHOST, 0)), false) - .await - .unwrap(); - let hashrate = minerd_process.measure_hashrate().await.unwrap(); - println!("Hashrate: {} hashes/s", hashrate); - assert!(hashrate > 0.0); - } - - #[test] - fn test_parse_hashrate_from_benchmark_line() { - // Test the parsing logic with known good inputs - assert_eq!( - parse_hashrate_from_benchmark_line( - "[2025-08-29 20:10:39] thread 0: 2097152 hashes, 1441 khash/s" - ), - Some(1441.0) - ); - - assert_eq!( - parse_hashrate_from_benchmark_line("[2025-08-29 20:10:39] Total: 1441 khash/s"), - Some(1441.0) - ); - - assert_eq!( - parse_hashrate_from_benchmark_line( - "[2025-08-29 20:10:39] thread 0: 2097152 hashes, 1441.5 khash/s" - ), - Some(1441.5) - ); - - // Test hash/s conversion - assert_eq!( - parse_hashrate_from_benchmark_line( - "[2025-08-29 20:10:39] thread 0: 2097152 hashes, 1441000 hash/s" - ), - Some(1441.0) - ); - - // Test invalid lines - assert_eq!( - parse_hashrate_from_benchmark_line("random text without hashrate"), - None - ); - } -} diff --git a/test/integration-tests/lib/sv1_sniffer.rs b/test/integration-tests/lib/sv1_sniffer.rs deleted file mode 100644 index bb7619b9a8..0000000000 --- a/test/integration-tests/lib/sv1_sniffer.rs +++ /dev/null @@ -1,193 +0,0 @@ -use crate::interceptor::MessageDirection; -use async_channel::{Receiver, Sender}; -use std::{collections::VecDeque, net::SocketAddr, sync::Arc}; -use stratum_apps::{network_helpers::sv1_connection::ConnectionSV1, stratum_core::sv1_api}; -use tokio::{ - net::{TcpListener, TcpStream}, - select, - sync::Mutex, -}; - -#[derive(Debug, PartialEq)] -enum SnifferError { - DownstreamClosed, - UpstreamClosed, -} - -/// Represents an SV1 sniffer. -/// -/// This struct acts as a middleman between two SV1 roles. It forwards messages from one role to -/// the other and vice versa. It also provides methods to wait for specific messages to be received -/// from the downstream or upstream role. -#[derive(Debug, Clone)] -pub struct SnifferSV1 { - listening_address: SocketAddr, - upstream_address: SocketAddr, - messages_from_downstream: MessagesAggregatorSV1, - messages_from_upstream: MessagesAggregatorSV1, -} - -impl SnifferSV1 { - /// Create a new [`SnifferSV1`] instance. - /// - /// The listening address is the address the sniffer will listen on for incoming connections - /// from the downstream role. The upstream address is the address the sniffer will connect to - /// in order to forward messages to the upstream role. - pub fn new(listening_address: SocketAddr, upstream_address: SocketAddr) -> Self { - Self { - listening_address, - upstream_address, - messages_from_downstream: MessagesAggregatorSV1::new(), - messages_from_upstream: MessagesAggregatorSV1::new(), - } - } - - /// Start the sniffer. - pub fn start(&self) { - let upstream_address = self.upstream_address; - let listening_address = self.listening_address; - let messages_from_downstream = self.messages_from_downstream.clone(); - let messages_from_upstream = self.messages_from_upstream.clone(); - tokio::spawn(async move { - let listener = TcpListener::bind(listening_address) - .await - .expect("Failed to listen on given address"); - let sniffer_to_upstream_stream = loop { - match TcpStream::connect(upstream_address).await { - Ok(s) => break s, - Err(_) => { - continue; - } - } - }; - let (downstream_stream, _) = listener - .accept() - .await - .expect("Failed to accept downstream connection"); - let sniffer_to_upstream_connection = - ConnectionSV1::new(sniffer_to_upstream_stream).await; - let downstream_to_sniffer_connection = ConnectionSV1::new(downstream_stream).await; - select! { - _ = tokio::signal::ctrl_c() => { }, - _ = Self::recv_from_down_send_to_up_sv1( - downstream_to_sniffer_connection.receiver(), - sniffer_to_upstream_connection.sender(), - messages_from_downstream - ) => { }, - _ = Self::recv_from_up_send_to_down_sv1( - sniffer_to_upstream_connection.receiver(), - downstream_to_sniffer_connection.sender(), - messages_from_upstream - ) => { }, - }; - }); - } - - /// Wait for a specific message to be received from the downstream role. - pub async fn wait_for_message(&self, message: &[&str], direction: MessageDirection) { - if message.is_empty() { - panic!("Message cannot be empty"); - } - let now = std::time::Instant::now(); - tokio::select!( - _ = tokio::signal::ctrl_c() => { }, - _ = async { - loop { - match direction { - MessageDirection::ToUpstream => { - if self.messages_from_downstream.has_message(message).await { - break; - } - } - MessageDirection::ToDownstream => { - if self.messages_from_upstream.has_message(message).await { - break; - } - } - } - if now.elapsed().as_secs() > 60 { - panic!( "Timeout: SV1 message {} not found", message.first().unwrap()); - } else { - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - continue; - } - } - } => {} - ); - } - - async fn recv_from_up_send_to_down_sv1( - recv: Receiver, - send: Sender, - upstream_messages: MessagesAggregatorSV1, - ) -> Result<(), SnifferError> { - while let Ok(msg) = recv.recv().await { - send.send(msg.clone()) - .await - .map_err(|_| SnifferError::DownstreamClosed)?; - upstream_messages.add_message(msg.clone()).await; - tracing::info!("πŸ” Sv1 Sniffer | Direction: ⬇ | Forwarded: {}", msg); - } - Err(SnifferError::UpstreamClosed) - } - - async fn recv_from_down_send_to_up_sv1( - recv: Receiver, - send: Sender, - downstream_messages: MessagesAggregatorSV1, - ) -> Result<(), SnifferError> { - while let Ok(msg) = recv.recv().await { - send.send(msg.clone()) - .await - .map_err(|_| SnifferError::UpstreamClosed)?; - downstream_messages.add_message(msg.clone()).await; - tracing::info!("πŸ” Sv1 Sniffer | Direction: ⬆ | Forwarded: {}", msg); - } - Err(SnifferError::DownstreamClosed) - } -} - -/// Represents a SV1 message manager. -/// -/// This struct can be used in order to aggregate and manage SV1 messages. -#[derive(Debug, Clone)] -pub(crate) struct MessagesAggregatorSV1 { - messages: Arc>>, -} - -impl MessagesAggregatorSV1 { - fn new() -> Self { - Self { - messages: Arc::new(Mutex::new(VecDeque::new())), - } - } - - async fn add_message(&self, message: sv1_api::Message) { - let mut messages = self.messages.lock().await; - messages.push_back(message); - } - - async fn has_message(&self, expected_msg: &[&str]) -> bool { - let messages = self.messages.lock().await; - let ret = messages.iter().any(|msg| match msg { - sv1_api::Message::StandardRequest(req) => req.method == *expected_msg.first().unwrap(), - sv1_api::Message::Notification(notif) => notif.method == *expected_msg.first().unwrap(), - sv1_api::Message::OkResponse(res) => { - if let Ok(res) = corepc_node::serde_json::to_string(&res) { - for m in expected_msg { - if !res.contains(m) { - return false; - } - } - true - } else { - false - } - } - sv1_api::Message::ErrorResponse(res) => { - res.error.clone().unwrap().message == *expected_msg.first().unwrap() - } - }); - ret - } -} diff --git a/test/integration-tests/lib/template_provider.rs b/test/integration-tests/lib/template_provider.rs deleted file mode 100644 index 023e58bb4b..0000000000 --- a/test/integration-tests/lib/template_provider.rs +++ /dev/null @@ -1,236 +0,0 @@ -use corepc_node::{types::GetBlockchainInfo, Conf, ConnectParams, Node}; -use std::{env, fs::create_dir_all, path::PathBuf}; -use stratum_apps::stratum_core::bitcoin::{Address, Amount, Txid}; -use tracing::warn; - -use crate::utils::{fs_utils, http, tarball}; - -const VERSION_TP: &str = "0.1.19"; - -fn get_bitcoind_filename(os: &str, arch: &str) -> String { - match (os, arch) { - ("macos", "aarch64") => { - format!("bitcoin-sv2-tp-{VERSION_TP}-arm64-apple-darwin-unsigned.tar.gz") - } - ("macos", "x86_64") => { - format!("bitcoin-sv2-tp-{VERSION_TP}-x86_64-apple-darwin-unsigned.tar.gz") - } - ("linux", "x86_64") => format!("bitcoin-sv2-tp-{VERSION_TP}-x86_64-linux-gnu.tar.gz"), - ("linux", "aarch64") => format!("bitcoin-sv2-tp-{VERSION_TP}-aarch64-linux-gnu.tar.gz"), - _ => format!("bitcoin-sv2-tp-{VERSION_TP}-x86_64-apple-darwin-unsigned.zip"), - } -} - -/// Represents a template provider node. -/// -/// The template provider is a bitcoin node that implements the Stratum V2 protocol. -#[derive(Debug)] -pub struct TemplateProvider { - bitcoind: Node, -} - -/// Represents the consensus difficulty level of the network. -/// -/// Low: regtest mode (every share is a block) -/// -/// Mid: signet mode with genesis difficulty -/// (most of the time, a CPU should find a block in a minute or less) -/// -/// High: signet mode with premined blocks raising difficulty to 77761.11 -/// (most of the time, a CPU should take a REALLY long time to find a block) -/// -/// Note: signet mode has signetchallenge=51, which means no signature is needed on the coinbase. -pub enum DifficultyLevel { - Low, - Mid, - High, -} - -impl TemplateProvider { - /// Start a new [`TemplateProvider`] instance. - pub fn start(port: u16, sv2_interval: u32, difficulty_level: DifficultyLevel) -> Self { - let current_dir: PathBuf = std::env::current_dir().expect("failed to read current dir"); - let tp_dir = current_dir.join("template-provider"); - let mut conf = Conf::default(); - conf.wallet = Some(port.to_string()); - - let staticdir = format!(".bitcoin-{port}"); - conf.staticdir = Some(tp_dir.join(staticdir.clone())); - let port_arg = format!("-sv2port={port}"); - let sv2_interval_arg = format!("-sv2interval={sv2_interval}"); - - match difficulty_level { - DifficultyLevel::Low => { - // use default corepc-node settings, which means regtest mode - // where every share is a block - } - DifficultyLevel::Mid => { - // use signet mode with genesis difficulty - // (signetchallenge=51, no signature needed on the coinbase) - // most of the time, a CPU should find a block in a minute or less - conf.args = vec!["-signet", "-fallbackfee=0.0001", "-signetchallenge=51"]; - conf.network = "signet"; - } - DifficultyLevel::High => { - // use signet mode with premined blocks raising difficulty to 77761.11 - // (signetchallenge=51, no signature needed on the coinbase) - // most of the time, a CPU should take a REALLY long time to find a block - conf.args = vec!["-signet", "-fallbackfee=0.0001", "-signetchallenge=51"]; - conf.network = "signet"; - - // Create signet datadir - let signet_datadir = tp_dir.join(staticdir.clone()).join("signet"); - create_dir_all(signet_datadir.clone()).expect("Failed to create signet directory"); - - // Copy high difficulty signet data into signet datadir - let high_diff_chain_dir = current_dir.join("high_diff_chain"); - fs_utils::copy_dir_contents(&high_diff_chain_dir, &signet_datadir) - .expect("Failed to copy high difficulty chain data"); - } - } - - conf.args.extend(vec![ - "-txindex=1", - "-sv2", - &port_arg, - "-debug=rpc", - "-debug=sv2", - &sv2_interval_arg, - "-sv2feedelta=0", - "-loglevel=sv2:trace", - "-logtimemicros=1", - ]); - let os = env::consts::OS; - let arch = env::consts::ARCH; - let download_filename = get_bitcoind_filename(os, arch); - let bitcoin_exe_home = tp_dir - .join(format!("bitcoin-sv2-tp-{VERSION_TP}")) - .join("bin"); - - if !bitcoin_exe_home.exists() { - let tarball_bytes = match env::var("BITCOIND_TARBALL_FILE") { - Ok(path) => tarball::read_from_file(&path), - Err(_) => { - warn!("Downloading template provider for the testing session. This could take a while..."); - let download_endpoint = - env::var("BITCOIND_DOWNLOAD_ENDPOINT").unwrap_or_else(|_| { - "https://github.com/Sjors/bitcoin/releases/download".to_owned() - }); - let url = - format!("{download_endpoint}/sv2-tp-{VERSION_TP}/{download_filename}"); - http::make_get_request(&url, 5) - } - }; - - if let Some(parent) = bitcoin_exe_home.parent() { - create_dir_all(parent).unwrap(); - } - - tarball::unpack(&tarball_bytes, &tp_dir); - - if os == "macos" { - let bitcoind_binary = bitcoin_exe_home.join("bitcoind"); - std::process::Command::new("codesign") - .arg("--sign") - .arg("-") - .arg(&bitcoind_binary) - .output() - .expect("Failed to sign bitcoind binary"); - } - } - - env::set_var("BITCOIND_EXE", bitcoin_exe_home.join("bitcoind")); - let exe_path = corepc_node::exe_path().expect("Failed to get bitcoind path"); - - // this timeout is used to avoid potential racing conditions - // on the bitcoind executable while executing Integration Tests in parallel - // for more context, see https://github.com/stratum-mining/stratum/issues/1278#issuecomment-2692316174 - let timeout = std::time::Duration::from_secs(10); - let current_time = std::time::Instant::now(); - loop { - match Node::with_conf(&exe_path, &conf) { - Ok(bitcoind) => { - break TemplateProvider { bitcoind }; - } - Err(e) => { - if current_time.elapsed() > timeout { - panic!("Failed to start bitcoind: {e}"); - } - println!("Failed to start bitcoind due to {e}"); - } - } - } - } - - /// Mine `n` blocks. - pub fn generate_blocks(&self, n: u64) { - let mining_address = self - .bitcoind - .client - .new_address() - .expect("Failed to get mining address"); - self.bitcoind - .client - .generate_to_address(n as usize, &mining_address) - .expect("Failed to generate blocks"); - } - - /// Retrun the node's RPC info. - pub fn rpc_info(&self) -> &ConnectParams { - &self.bitcoind.params - } - - /// Return the result of `getblockchaininfo` RPC call. - pub fn get_blockchain_info(&self) -> Result { - let client = &self.bitcoind.client; - let blockchain_info = client.get_blockchain_info()?; - Ok(blockchain_info) - } - - /// Create and broadcast a transaction to the mempool. - /// - /// It is recommended to use [`TemplateProvider::fund_wallet`] before calling this method to - /// ensure the wallet has enough funds. - pub fn create_mempool_transaction(&self) -> Result<(Address, Txid), corepc_node::Error> { - let client = &self.bitcoind.client; - const MILLION_SATS: Amount = Amount::from_sat(1_000_000); - let address = client.new_address()?; - let txid = client - .send_to_address(&address, MILLION_SATS)? - .txid() - .expect("Unexpected behavior: txid is None"); - Ok((address, txid)) - } - - /// Fund the node's wallet. - /// - /// This can be useful before using [`TemplateProvider::create_mempool_transaction`]. - pub fn fund_wallet(&self) -> Result<(), corepc_node::Error> { - let client = &self.bitcoind.client; - let address = client.new_address()?; - client.generate_to_address(101, &address)?; - Ok(()) - } - - /// Return the hash of the most recent block. - pub fn get_best_block_hash(&self) -> Result { - let client = &self.bitcoind.client; - let block_hash = client.get_best_block_hash()?.0; - Ok(block_hash) - } -} - -#[cfg(test)] -mod tests { - use super::{DifficultyLevel, TemplateProvider}; - use crate::utils::get_available_address; - - #[tokio::test] - async fn test_create_mempool_transaction() { - let address = get_available_address(); - let port = address.port(); - let tp = TemplateProvider::start(port, 1, DifficultyLevel::Low); - assert!(tp.fund_wallet().is_ok()); - assert!(tp.create_mempool_transaction().is_ok()); - } -} diff --git a/test/integration-tests/lib/types.rs b/test/integration-tests/lib/types.rs deleted file mode 100644 index a707d59db6..0000000000 --- a/test/integration-tests/lib/types.rs +++ /dev/null @@ -1,4 +0,0 @@ -use stratum_apps::stratum_core::{codec_sv2::StandardEitherFrame, parsers_sv2::AnyMessage}; - -pub type MessageFrame = StandardEitherFrame>; -pub type MsgType = u8; diff --git a/test/integration-tests/lib/utils.rs b/test/integration-tests/lib/utils.rs deleted file mode 100644 index 0c7c8575b8..0000000000 --- a/test/integration-tests/lib/utils.rs +++ /dev/null @@ -1,448 +0,0 @@ -use crate::{ - interceptor::{InterceptAction, MessageDirection}, - message_aggregator::MessagesAggregator, - sniffer_error::SnifferError, - types::{MessageFrame, MsgType}, -}; -use async_channel::{Receiver, Sender}; -use once_cell::sync::Lazy; -use std::{ - collections::HashSet, - convert::TryInto, - net::{SocketAddr, TcpListener}, - sync::Mutex, -}; -use stratum_apps::{ - key_utils::{Secp256k1PublicKey, Secp256k1SecretKey}, - network_helpers::noise_connection::Connection, - stratum_core::{ - codec_sv2::{HandshakeRole, StandardEitherFrame}, - framing_sv2::framing::{Frame, Sv2Frame}, - noise_sv2::{Initiator, Responder}, - parsers_sv2::{ - message_type_to_name, AnyMessage, CommonMessages, IsSv2Message, - JobDeclaration::{ - AllocateMiningJobToken, AllocateMiningJobTokenSuccess, DeclareMiningJob, - DeclareMiningJobError, DeclareMiningJobSuccess, ProvideMissingTransactions, - ProvideMissingTransactionsSuccess, PushSolution, - }, - TemplateDistribution, - TemplateDistribution::CoinbaseOutputConstraints, - }, - }, -}; - -// prevents get_available_port from ever returning the same port twice -static UNIQUE_PORTS: Lazy>> = Lazy::new(|| Mutex::new(HashSet::new())); - -pub fn get_available_address() -> SocketAddr { - let port = get_available_port(); - SocketAddr::from(([127, 0, 0, 1], port)) -} - -fn get_available_port() -> u16 { - let mut unique_ports = UNIQUE_PORTS.lock().unwrap(); - - loop { - let port = TcpListener::bind("127.0.0.1:0") - .unwrap() - .local_addr() - .unwrap() - .port(); - if !unique_ports.contains(&port) { - unique_ports.insert(port); - return port; - } - } -} -pub async fn wait_for_client(listen_socket: SocketAddr) -> tokio::net::TcpStream { - let listener = tokio::net::TcpListener::bind(listen_socket) - .await - .expect("Impossible to listen on given address"); - if let Ok((stream, _)) = listener.accept().await { - stream - } else { - panic!("Impossible to accept dowsntream connection") - } -} - -pub async fn create_downstream( - stream: tokio::net::TcpStream, -) -> Option<(Receiver, Sender)> { - let pub_key = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" - .to_string() - .parse::() - .unwrap() - .into_bytes(); - let prv_key = "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n" - .to_string() - .parse::() - .unwrap() - .into_bytes(); - let responder = - Responder::from_authority_kp(&pub_key, &prv_key, std::time::Duration::from_secs(10000)) - .unwrap(); - if let Ok((receiver_from_client, sender_to_client)) = - Connection::new::>(stream, HandshakeRole::Responder(responder)).await - { - Some((receiver_from_client, sender_to_client)) - } else { - None - } -} - -pub async fn create_upstream( - stream: tokio::net::TcpStream, -) -> Option<(Receiver, Sender)> { - let initiator = Initiator::without_pk().expect("This fn call can not fail"); - if let Ok((receiver_from_server, sender_to_server)) = - Connection::new::>(stream, HandshakeRole::Initiator(initiator)).await - { - Some((receiver_from_server, sender_to_server)) - } else { - None - } -} - -pub async fn recv_from_down_send_to_up( - recv: Receiver, - send: Sender, - downstream_messages: MessagesAggregator, - action: Vec, - identifier: &str, -) -> Result<(), SnifferError> { - while let Ok(mut frame) = recv.recv().await { - let (msg_type, msg) = message_from_frame(&mut frame); - let action = action.iter().find(|action| { - action - .find_matching_action(msg_type, MessageDirection::ToUpstream) - .is_some() - }); - if let Some(action) = action { - match action { - InterceptAction::IgnoreMessage(_) => { - tracing::info!( - "πŸ” Sv2 Sniffer {} | Ignored: {} | Direction: ⬆", - identifier, - message_type_to_name(msg_type) - ); - continue; - } - InterceptAction::ReplaceMessage(intercept_message) => { - let intercept_frame = StandardEitherFrame::>::Sv2( - Sv2Frame::from_message( - intercept_message.replacement_message.clone(), - intercept_message.replacement_message.message_type(), - 0, - false, - ) - .expect("Failed to create the frame"), - ); - downstream_messages.add_message( - intercept_message.replacement_message.message_type(), - intercept_message.replacement_message.clone(), - ); - send.send(intercept_frame) - .await - .map_err(|_| SnifferError::UpstreamClosed)?; - tracing::info!( - "πŸ” Sv2 Sniffer {} | Replaced: {} with {} | Direction: ⬆", - identifier, - message_type_to_name(msg_type), - message_type_to_name(intercept_message.replacement_message.message_type()) - ); - } - } - } else { - downstream_messages.add_message(msg_type, msg.clone()); - send.send(frame) - .await - .map_err(|_| SnifferError::UpstreamClosed)?; - tracing::info!( - "πŸ” Sv2 Sniffer {} | Forwarded: {} | Direction: ⬆ | Data: {}", - identifier, - message_type_to_name(msg_type), - msg - ); - } - } - Err(SnifferError::DownstreamClosed) -} - -pub async fn recv_from_up_send_to_down( - recv: Receiver, - send: Sender, - upstream_messages: MessagesAggregator, - action: Vec, - identifier: &str, -) -> Result<(), SnifferError> { - while let Ok(mut frame) = recv.recv().await { - let (msg_type, msg) = message_from_frame(&mut frame); - let action = action.iter().find(|action| { - action - .find_matching_action(msg_type, MessageDirection::ToDownstream) - .is_some() - }); - - if let Some(action) = action { - match action { - InterceptAction::IgnoreMessage(_) => { - tracing::info!( - "πŸ” Sv2 Sniffer {} | Ignored: {} | Direction: ⬇", - identifier, - message_type_to_name(msg_type) - ); - continue; - } - InterceptAction::ReplaceMessage(intercept_message) => { - let intercept_frame = StandardEitherFrame::>::Sv2( - Sv2Frame::from_message( - intercept_message.replacement_message.clone(), - intercept_message.replacement_message.message_type(), - 0, - false, - ) - .expect("Failed to create the frame"), - ); - upstream_messages.add_message( - intercept_message.replacement_message.message_type(), - intercept_message.replacement_message.clone(), - ); - send.send(intercept_frame) - .await - .map_err(|_| SnifferError::DownstreamClosed)?; - tracing::info!( - "πŸ” Sv2 Sniffer {} | Replaced: {} with {} | Direction: ⬇", - identifier, - message_type_to_name(msg_type), - message_type_to_name(intercept_message.replacement_message.message_type()) - ); - } - } - } else { - upstream_messages.add_message(msg_type, msg.clone()); - send.send(frame) - .await - .map_err(|_| SnifferError::DownstreamClosed)?; - tracing::info!( - "πŸ” Sv2 Sniffer {} | Forwarded: {} | Direction: ⬇ | Data: {}", - identifier, - message_type_to_name(msg_type), - msg - ); - } - } - Err(SnifferError::UpstreamClosed) -} - -pub fn message_from_frame(frame: &mut MessageFrame) -> (MsgType, AnyMessage<'static>) { - match frame { - Frame::Sv2(frame) => { - if let Some(header) = frame.get_header() { - let message_type = header.msg_type(); - let mut payload = frame.payload().to_vec(); - let message: Result, _> = - (message_type, payload.as_mut_slice()).try_into(); - match message { - Ok(message) => { - let message = into_static(message); - (message_type, message) - } - _ => { - println!("Received frame with invalid payload or message type: {frame:?}"); - panic!(); - } - } - } else { - println!("Received frame with invalid header: {frame:?}"); - panic!(); - } - } - Frame::HandShake(f) => { - println!("Received unexpected handshake frame: {f:?}"); - panic!(); - } - } -} - -pub fn into_static(m: AnyMessage<'_>) -> AnyMessage<'static> { - match m { - AnyMessage::Mining(m) => AnyMessage::Mining(m.into_static()), - AnyMessage::Common(m) => match m { - CommonMessages::ChannelEndpointChanged(m) => { - AnyMessage::Common(CommonMessages::ChannelEndpointChanged(m.into_static())) - } - CommonMessages::SetupConnection(m) => { - AnyMessage::Common(CommonMessages::SetupConnection(m.into_static())) - } - CommonMessages::SetupConnectionError(m) => { - AnyMessage::Common(CommonMessages::SetupConnectionError(m.into_static())) - } - CommonMessages::SetupConnectionSuccess(m) => { - AnyMessage::Common(CommonMessages::SetupConnectionSuccess(m.into_static())) - } - CommonMessages::Reconnect(m) => { - AnyMessage::Common(CommonMessages::Reconnect(m.into_static())) - } - }, - AnyMessage::JobDeclaration(m) => match m { - AllocateMiningJobToken(m) => { - AnyMessage::JobDeclaration(AllocateMiningJobToken(m.into_static())) - } - AllocateMiningJobTokenSuccess(m) => { - AnyMessage::JobDeclaration(AllocateMiningJobTokenSuccess(m.into_static())) - } - DeclareMiningJob(m) => AnyMessage::JobDeclaration(DeclareMiningJob(m.into_static())), - DeclareMiningJobError(m) => { - AnyMessage::JobDeclaration(DeclareMiningJobError(m.into_static())) - } - DeclareMiningJobSuccess(m) => { - AnyMessage::JobDeclaration(DeclareMiningJobSuccess(m.into_static())) - } - ProvideMissingTransactions(m) => { - AnyMessage::JobDeclaration(ProvideMissingTransactions(m.into_static())) - } - ProvideMissingTransactionsSuccess(m) => { - AnyMessage::JobDeclaration(ProvideMissingTransactionsSuccess(m.into_static())) - } - PushSolution(m) => AnyMessage::JobDeclaration(PushSolution(m.into_static())), - }, - AnyMessage::TemplateDistribution(m) => match m { - CoinbaseOutputConstraints(m) => { - AnyMessage::TemplateDistribution(CoinbaseOutputConstraints(m.into_static())) - } - TemplateDistribution::NewTemplate(m) => { - AnyMessage::TemplateDistribution(TemplateDistribution::NewTemplate(m.into_static())) - } - TemplateDistribution::RequestTransactionData(m) => AnyMessage::TemplateDistribution( - TemplateDistribution::RequestTransactionData(m.into_static()), - ), - TemplateDistribution::RequestTransactionDataError(m) => { - AnyMessage::TemplateDistribution(TemplateDistribution::RequestTransactionDataError( - m.into_static(), - )) - } - TemplateDistribution::RequestTransactionDataSuccess(m) => { - AnyMessage::TemplateDistribution( - TemplateDistribution::RequestTransactionDataSuccess(m.into_static()), - ) - } - TemplateDistribution::SetNewPrevHash(m) => AnyMessage::TemplateDistribution( - TemplateDistribution::SetNewPrevHash(m.into_static()), - ), - TemplateDistribution::SubmitSolution(m) => AnyMessage::TemplateDistribution( - TemplateDistribution::SubmitSolution(m.into_static()), - ), - }, - } -} - -pub mod http { - pub fn make_get_request(download_url: &str, retries: usize) -> Vec { - for attempt in 1..=retries { - let response = minreq::get(download_url).send(); - match response { - Ok(res) => { - let status_code = res.status_code; - if (200..300).contains(&status_code) { - return res.as_bytes().to_vec(); - } else if (500..600).contains(&status_code) { - eprintln!( - "Attempt {attempt}: URL {download_url} returned a server error code {status_code}" - ); - } else { - panic!( - "URL {download_url} returned unexpected status code {status_code}. Aborting." - ); - } - } - Err(err) => { - eprintln!( - "Attempt {}: Failed to fetch URL {}: {:?}", - attempt + 1, - download_url, - err - ); - } - } - - if attempt < retries { - let delay = 1u64 << (attempt - 1); - eprintln!("Retrying in {delay} seconds (exponential backoff)..."); - std::thread::sleep(std::time::Duration::from_secs(delay)); - } - } - // If all retries fail, panic with an error message - panic!("Cannot reach URL {download_url} after {retries} attempts"); - } -} - -pub mod tarball { - use std::{ - fs::File, - io::{BufReader, Read}, - path::Path, - }; - - pub fn read_from_file(path: &str) -> Vec { - let file = File::open(path).unwrap_or_else(|_| { - panic!("Cannot find {path:?} specified with env var BITCOIND_TARBALL_FILE") - }); - let mut reader = BufReader::new(file); - let mut buffer = Vec::new(); - reader.read_to_end(&mut buffer).unwrap(); - buffer - } - - pub fn unpack(tarball_bytes: &[u8], destination: &Path) { - use std::{io::Write as IoWrite, process::Command}; - - // Write tarball bytes to a temp file - let temp_tarball = destination.join("temp.tar.gz"); - let mut temp_file = File::create(&temp_tarball).unwrap(); - temp_file.write_all(tarball_bytes).unwrap(); - drop(temp_file); - - // Use system tar command to extract, which properly handles GNU sparse files - let output = Command::new("tar") - .arg("-xzf") - .arg(&temp_tarball) - .arg("-C") - .arg(destination) - .arg("--strip-components=0") - .output() - .expect("Failed to execute tar command"); - - if !output.status.success() { - eprintln!("tar stderr: {}", String::from_utf8_lossy(&output.stderr)); - panic!("tar extraction failed"); - } - - // Clean up temp tarball - std::fs::remove_file(&temp_tarball).ok(); - } -} - -pub mod fs_utils { - use std::{fs, path::Path}; - - /// Recursively copy all contents from source directory to destination directory - pub fn copy_dir_contents(src: &Path, dst: &Path) -> std::io::Result<()> { - if !dst.exists() { - fs::create_dir_all(dst)?; - } - - for entry in fs::read_dir(src)? { - let entry = entry?; - let src_path = entry.path(); - let dst_path = dst.join(entry.file_name()); - - if src_path.is_dir() { - copy_dir_contents(&src_path, &dst_path)?; - } else { - fs::copy(&src_path, &dst_path)?; - } - } - Ok(()) - } -} diff --git a/test/integration-tests/tests/jd_integration.rs b/test/integration-tests/tests/jd_integration.rs deleted file mode 100644 index dd98173b22..0000000000 --- a/test/integration-tests/tests/jd_integration.rs +++ /dev/null @@ -1,244 +0,0 @@ -// This file contains integration tests for the `JDC/S` module. -use integration_tests_sv2::{ - interceptor::{MessageDirection, ReplaceMessage}, - template_provider::DifficultyLevel, - *, -}; -use stratum_apps::stratum_core::{ - binary_sv2::{Seq064K, B032, U256}, - common_messages_sv2::*, - job_declaration_sv2::{ProvideMissingTransactionsSuccess, PushSolution, *}, - parsers_sv2::{self, AnyMessage}, -}; - -// This test verifies that jd-server does not exit when a connected jd-client shuts down. -// -// It is performing the verification by shutding down a jd-client connected to a jd-server and then -// starting a new jd-client that connects to the same jd-server successfully. -#[tokio::test] -async fn jds_should_not_panic_if_jdc_shutsdown() { - start_tracing(); - let (tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); - let (_pool, pool_addr) = start_pool(Some(tp_addr)).await; - let (_jds, jds_addr) = start_jds(tp.rpc_info()); - let (sniffer_a, sniffer_addr_a) = start_sniffer("0", jds_addr, false, vec![], None); - let (jdc, jdc_addr) = start_jdc(&[(pool_addr, sniffer_addr_a)], tp_addr); - sniffer_a - .wait_for_message_type(MessageDirection::ToUpstream, MESSAGE_TYPE_SETUP_CONNECTION) - .await; - sniffer_a - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS, - ) - .await; - drop(jdc); - tokio::time::sleep(tokio::time::Duration::from_millis(2000)).await; - assert!(tokio::net::TcpListener::bind(jdc_addr).await.is_ok()); - let (sniffer, sniffer_addr) = start_sniffer("0", jds_addr, false, vec![], None); - let (_jdc_1, _jdc_addr_1) = start_jdc(&[(pool_addr, sniffer_addr)], tp_addr); - sniffer - .wait_for_message_type(MessageDirection::ToUpstream, MESSAGE_TYPE_SETUP_CONNECTION) - .await; -} - -// This test verifies that jd-client exchange SetupConnection messages with a Template Provider. -// -// Note that jd-client starts to exchange messages with the Template Provider after it has accepted -// a downstream connection. -#[tokio::test] -async fn jdc_tp_success_setup() { - start_tracing(); - let (tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); - let (_pool, pool_addr) = start_pool(Some(tp_addr)).await; - let (_jds, jds_addr) = start_jds(tp.rpc_info()); - let (tp_jdc_sniffer, tp_jdc_sniffer_addr) = start_sniffer("0", tp_addr, false, vec![], None); - let (_jdc, jdc_addr) = start_jdc(&[(pool_addr, jds_addr)], tp_jdc_sniffer_addr); - // This is needed because jd-client waits for a downstream connection before it starts - // exchanging messages with the Template Provider. - start_sv2_translator(jdc_addr).await; - tp_jdc_sniffer - .wait_for_message_type(MessageDirection::ToUpstream, MESSAGE_TYPE_SETUP_CONNECTION) - .await; - tp_jdc_sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS, - ) - .await; -} - -// This test verifies that JDS does not exit when it receives a `SubmitSolution` -// while still expecting a `ProvideMissingTransactionsSuccess`. -// -// It is performing the verification by connecting to JDS after the message exchange -// to check whether it remains alive. -#[tokio::test] -async fn jds_receive_solution_while_processing_declared_job_test() { - start_tracing(); - let (tp_1, tp_addr_1) = start_template_provider(None, DifficultyLevel::Low); - let (tp_2, tp_addr_2) = start_template_provider(None, DifficultyLevel::Low); - let (_pool, pool_addr) = start_pool(Some(tp_addr_1)).await; - let (_jds, jds_addr) = start_jds(tp_1.rpc_info()); - - let prev_hash = U256::Owned(vec![ - 184, 103, 138, 88, 153, 105, 236, 29, 123, 246, 107, 203, 1, 33, 10, 122, 188, 139, 218, - 141, 62, 177, 158, 101, 125, 92, 214, 150, 199, 220, 29, 8, - ]); - let extranonce = B032::Owned(vec![ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, - 0, 0, - ]); - let submit_solution_replace = ReplaceMessage::new( - MessageDirection::ToUpstream, - MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS_SUCCESS, - AnyMessage::JobDeclaration(parsers_sv2::JobDeclaration::PushSolution(PushSolution { - ntime: 0, - nbits: 0, - nonce: 0, - version: 0, - prev_hash, - extranonce, - })), - ); - - // This sniffer sits between `jds` and `jdc`, replacing `ProvideMissingTransactionSuccess` - // with `SubmitSolution`. - let (sniffer_a, sniffer_a_addr) = start_sniffer( - "A", - jds_addr, - false, - vec![submit_solution_replace.into()], - None, - ); - let (_jdc, jdc_addr) = start_jdc(&[(pool_addr, sniffer_a_addr)], tp_addr_2); - let (_translator, tproxy_addr) = start_sv2_translator(jdc_addr).await; - let (_minerd_process, _minerd_addr) = start_minerd(tproxy_addr, None, None, false).await; - assert!(tp_2.fund_wallet().is_ok()); - assert!(tp_2.create_mempool_transaction().is_ok()); - sniffer_a - .wait_for_message_type(MessageDirection::ToUpstream, MESSAGE_TYPE_SETUP_CONNECTION) - .await; - sniffer_a - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS, - ) - .await; - sniffer_a - .wait_for_message_type( - MessageDirection::ToUpstream, - MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN, - ) - .await; - sniffer_a - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN_SUCCESS, - ) - .await; - sniffer_a - .wait_for_message_type( - MessageDirection::ToUpstream, - MESSAGE_TYPE_DECLARE_MINING_JOB, - ) - .await; - sniffer_a - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS, - ) - .await; - sniffer_a - .wait_for_message_type(MessageDirection::ToUpstream, MESSAGE_TYPE_PUSH_SOLUTION) - .await; - assert!(tokio::net::TcpListener::bind(jds_addr).await.is_err()); -} - -// This test ensures that JDS does not exit upon receiving a `ProvideMissingTransactionsSuccess` -// message containing a transaction set that differs from the `tx_short_hash_list` -// in the Declare Mining Job. -// -// It is performing the verification by connecting to JDS after the message exchange -// to check whether it remains alive -#[tokio::test] -async fn jds_wont_exit_upon_receiving_unexpected_txids_in_provide_missing_transaction_success() { - start_tracing(); - let (tp_1, tp_addr_1) = start_template_provider(None, DifficultyLevel::Low); - let (tp_2, tp_addr_2) = start_template_provider(None, DifficultyLevel::Low); - - assert!(tp_2.fund_wallet().is_ok()); - assert!(tp_2.create_mempool_transaction().is_ok()); - - let (_pool, pool_addr) = start_pool(Some(tp_addr_1)).await; - let (_jds, jds_addr) = start_jds(tp_1.rpc_info()); - - let provide_missing_transaction_success_replace = ReplaceMessage::new( - MessageDirection::ToUpstream, - MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS_SUCCESS, - AnyMessage::JobDeclaration( - parsers_sv2::JobDeclaration::ProvideMissingTransactionsSuccess( - ProvideMissingTransactionsSuccess { - request_id: 1, - transaction_list: Seq064K::new(Vec::new()).unwrap(), - }, - ), - ), - ); - - // This sniffer sits between `jds` and `jdc`, replacing `ProvideMissingTransactionSuccess` - // with `ProvideMissingTransactionSuccess` with different transaction list. - let (sniffer, sniffer_addr) = start_sniffer( - "A", - jds_addr, - false, - vec![provide_missing_transaction_success_replace.into()], - None, - ); - - let (_, jdc_addr_1) = start_jdc(&[(pool_addr, sniffer_addr)], tp_addr_2); - let (_translator, tproxy_addr) = start_sv2_translator(jdc_addr_1).await; - let (_minerd_process, _minerd_addr) = start_minerd(tproxy_addr, None, None, false).await; - - sniffer - .wait_for_message_type(MessageDirection::ToUpstream, MESSAGE_TYPE_SETUP_CONNECTION) - .await; - sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS, - ) - .await; - sniffer - .wait_for_message_type( - MessageDirection::ToUpstream, - MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN, - ) - .await; - sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN_SUCCESS, - ) - .await; - sniffer - .wait_for_message_type( - MessageDirection::ToUpstream, - MESSAGE_TYPE_DECLARE_MINING_JOB, - ) - .await; - sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS, - ) - .await; - sniffer - .wait_for_message_type( - MessageDirection::ToUpstream, - MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS_SUCCESS, - ) - .await; - - assert!(tokio::net::TcpListener::bind(jds_addr).await.is_err()); -} diff --git a/test/integration-tests/tests/jd_provide_missing_transaction.rs b/test/integration-tests/tests/jd_provide_missing_transaction.rs deleted file mode 100644 index f8201292d3..0000000000 --- a/test/integration-tests/tests/jd_provide_missing_transaction.rs +++ /dev/null @@ -1,41 +0,0 @@ -use integration_tests_sv2::{interceptor::MessageDirection, template_provider::DifficultyLevel, *}; -use stratum_apps::stratum_core::job_declaration_sv2::*; - -#[tokio::test] -async fn jds_ask_for_missing_transactions() { - start_tracing(); - let (tp_1, tp_addr_1) = start_template_provider(None, DifficultyLevel::Low); - let (tp_2, tp_addr_2) = start_template_provider(None, DifficultyLevel::Low); - let (_pool, pool_addr) = start_pool(Some(tp_addr_1)).await; - let (_jds, jds_addr) = start_jds(tp_1.rpc_info()); - let (sniffer, sniffer_addr) = start_sniffer("A", jds_addr, false, vec![], None); - let (_jdc, jdc_addr) = start_jdc(&[(pool_addr, sniffer_addr)], tp_addr_2); - let (_translator, tproxy_addr) = start_sv2_translator(jdc_addr).await; - let (_minerd_process, _minerd_addr) = start_minerd(tproxy_addr, None, None, false).await; - assert!(tp_2.fund_wallet().is_ok()); - assert!(tp_2.create_mempool_transaction().is_ok()); - sniffer - .wait_for_message_type( - MessageDirection::ToUpstream, - MESSAGE_TYPE_DECLARE_MINING_JOB, - ) - .await; - sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS, - ) - .await; - sniffer - .wait_for_message_type( - MessageDirection::ToUpstream, - MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS_SUCCESS, - ) - .await; - sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_DECLARE_MINING_JOB_SUCCESS, - ) - .await; -} diff --git a/test/integration-tests/tests/jd_tproxy_integration.rs b/test/integration-tests/tests/jd_tproxy_integration.rs deleted file mode 100644 index b3671ecbe1..0000000000 --- a/test/integration-tests/tests/jd_tproxy_integration.rs +++ /dev/null @@ -1,48 +0,0 @@ -use integration_tests_sv2::{interceptor::MessageDirection, template_provider::DifficultyLevel, *}; -use stratum_apps::stratum_core::{common_messages_sv2::*, mining_sv2::*}; - -#[tokio::test] -async fn jd_tproxy_integration() { - start_tracing(); - let (tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); - let (_pool, pool_addr) = start_pool(Some(tp_addr)).await; - let (jdc_pool_sniffer, jdc_pool_sniffer_addr) = - start_sniffer("0", pool_addr, false, vec![], None); - let (_jds, jds_addr) = start_jds(tp.rpc_info()); - let (_jdc, jdc_addr) = start_jdc(&[(jdc_pool_sniffer_addr, jds_addr)], tp_addr); - let (_translator, tproxy_addr) = start_sv2_translator(jdc_addr).await; - let (_minerd_process, _minerd_addr) = start_minerd(tproxy_addr, None, None, false).await; - jdc_pool_sniffer - .wait_for_message_type(MessageDirection::ToUpstream, MESSAGE_TYPE_SETUP_CONNECTION) - .await; - jdc_pool_sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS, - ) - .await; - jdc_pool_sniffer - .wait_for_message_type( - MessageDirection::ToUpstream, - MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL, - ) - .await; - jdc_pool_sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL_SUCCESS, - ) - .await; - jdc_pool_sniffer - .wait_for_message_type( - MessageDirection::ToUpstream, - MESSAGE_TYPE_SUBMIT_SHARES_EXTENDED, - ) - .await; - jdc_pool_sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SUBMIT_SHARES_SUCCESS, - ) - .await; -} diff --git a/test/integration-tests/tests/jdc_block_propagation.rs b/test/integration-tests/tests/jdc_block_propagation.rs deleted file mode 100644 index 962116b02e..0000000000 --- a/test/integration-tests/tests/jdc_block_propagation.rs +++ /dev/null @@ -1,38 +0,0 @@ -use integration_tests_sv2::{ - interceptor::{IgnoreMessage, MessageDirection}, - template_provider::DifficultyLevel, - *, -}; -use stratum_apps::stratum_core::{job_declaration_sv2::*, template_distribution_sv2::*}; - -// Block propagated from JDC to TP -#[tokio::test] -async fn propagated_from_jdc_to_tp() { - start_tracing(); - let (tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); - let current_block_hash = tp.get_best_block_hash().unwrap(); - let (_pool, pool_addr) = start_pool(Some(tp_addr)).await; - let (_jds, jds_addr) = start_jds(tp.rpc_info()); - let ignore_push_solution = - IgnoreMessage::new(MessageDirection::ToUpstream, MESSAGE_TYPE_PUSH_SOLUTION); - let (jdc_jds_sniffer, jdc_jds_sniffer_addr) = start_sniffer( - "0", - jds_addr, - false, - vec![ignore_push_solution.into()], - None, - ); - let (jdc_tp_sniffer, jdc_tp_sniffer_addr) = start_sniffer("1", tp_addr, false, vec![], None); - let (_jdc, jdc_addr) = start_jdc(&[(pool_addr, jdc_jds_sniffer_addr)], jdc_tp_sniffer_addr); - let (_translator, tproxy_addr) = start_sv2_translator(jdc_addr).await; - let (_minerd_process, _minerd_addr) = start_minerd(tproxy_addr, None, None, false).await; - jdc_tp_sniffer - .wait_for_message_type(MessageDirection::ToUpstream, MESSAGE_TYPE_SUBMIT_SOLUTION) - .await; - jdc_jds_sniffer - .assert_message_not_present(MessageDirection::ToUpstream, MESSAGE_TYPE_PUSH_SOLUTION) - .await; - let new_block_hash = tp.get_best_block_hash().unwrap(); - tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await; - assert_ne!(current_block_hash, new_block_hash); -} diff --git a/test/integration-tests/tests/jdc_receives_submit_shares_success.rs b/test/integration-tests/tests/jdc_receives_submit_shares_success.rs deleted file mode 100644 index 4581415c49..0000000000 --- a/test/integration-tests/tests/jdc_receives_submit_shares_success.rs +++ /dev/null @@ -1,22 +0,0 @@ -use integration_tests_sv2::{interceptor::MessageDirection, template_provider::DifficultyLevel, *}; -use stratum_apps::stratum_core::mining_sv2::*; - -#[tokio::test] -async fn jdc_submit_shares_success() { - start_tracing(); - let (tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); - let (_pool, pool_addr) = start_pool(Some(tp_addr)).await; - let (sniffer, sniffer_addr) = start_sniffer("0", pool_addr, false, vec![], None); - let (_jds, jds_addr) = start_jds(tp.rpc_info()); - let (_jdc, jdc_addr) = start_jdc(&[(sniffer_addr, jds_addr)], tp_addr); - let (_translator, tproxy_addr) = start_sv2_translator(jdc_addr).await; - let (_minerd_process, _minerd_addr) = start_minerd(tproxy_addr, None, None, false).await; - - // make sure sure JDC gets a share acknowledgement - sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SUBMIT_SHARES_SUCCESS, - ) - .await; -} diff --git a/test/integration-tests/tests/jds_block_propagation.rs b/test/integration-tests/tests/jds_block_propagation.rs deleted file mode 100644 index 75b0236992..0000000000 --- a/test/integration-tests/tests/jds_block_propagation.rs +++ /dev/null @@ -1,37 +0,0 @@ -use integration_tests_sv2::{ - interceptor::{IgnoreMessage, MessageDirection}, - template_provider::DifficultyLevel, - *, -}; -use stratum_apps::stratum_core::{job_declaration_sv2::*, template_distribution_sv2::*}; - -// Block propagated from JDS to TP -#[tokio::test] -async fn propagated_from_jds_to_tp() { - start_tracing(); - let (tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); - let current_block_hash = tp.get_best_block_hash().unwrap(); - let (_pool, pool_addr) = start_pool(Some(tp_addr)).await; - let (_jds, jds_addr) = start_jds(tp.rpc_info()); - let (jdc_jds_sniffer, jdc_jds_sniffer_addr) = start_sniffer("0", jds_addr, false, vec![], None); - let ignore_submit_solution = - IgnoreMessage::new(MessageDirection::ToUpstream, MESSAGE_TYPE_SUBMIT_SOLUTION); - let (jdc_tp_sniffer, jdc_tp_sniffer_addr) = start_sniffer( - "1", - tp_addr, - false, - vec![ignore_submit_solution.into()], - None, - ); - let (_jdc, jdc_addr) = start_jdc(&[(pool_addr, jdc_jds_sniffer_addr)], jdc_tp_sniffer_addr); - let (_translator, tproxy_addr) = start_sv2_translator(jdc_addr).await; - let (_minerd_process, _minerd_addr) = start_minerd(tproxy_addr, None, None, false).await; - jdc_jds_sniffer - .wait_for_message_type(MessageDirection::ToUpstream, MESSAGE_TYPE_PUSH_SOLUTION) - .await; - jdc_tp_sniffer - .assert_message_not_present(MessageDirection::ToUpstream, MESSAGE_TYPE_SUBMIT_SOLUTION) - .await; - let new_block_hash = tp.get_best_block_hash().unwrap(); - assert_ne!(current_block_hash, new_block_hash); -} diff --git a/test/integration-tests/tests/pool_integration.rs b/test/integration-tests/tests/pool_integration.rs deleted file mode 100644 index af078f13ac..0000000000 --- a/test/integration-tests/tests/pool_integration.rs +++ /dev/null @@ -1,225 +0,0 @@ -// This file contains integration tests for the `PoolSv2` module. -// -// `PoolSv2` is a module that implements the Pool role in the Stratum V2 protocol. -use integration_tests_sv2::{interceptor::MessageDirection, template_provider::DifficultyLevel, *}; -use stratum_apps::stratum_core::{ - common_messages_sv2::{Protocol, SetupConnection, *}, - mining_sv2::*, - parsers_sv2::{AnyMessage, CommonMessages, Mining, TemplateDistribution}, - template_distribution_sv2::*, -}; - -// This test starts a Template Provider and a Pool, and checks if they exchange the correct -// messages upon connection. -// The Sniffer is used as a proxy between the Upstream(Template Provider) and Downstream(Pool). The -// Pool will connect to the Sniffer, and the Sniffer will connect to the Template Provider. -#[tokio::test] -async fn success_pool_template_provider_connection() { - start_tracing(); - let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); - let (sniffer, sniffer_addr) = start_sniffer("", tp_addr, true, vec![], None); - let _ = start_pool(Some(sniffer_addr)).await; - // here we assert that the downstream(pool in this case) have sent `SetupConnection` message - // with the correct parameters, protocol, flags, min_version and max_version. Note that the - // macro can take any number of arguments after the message argument, but the order is - // important where a property should be followed by its value. - sniffer - .wait_for_message_type(MessageDirection::ToUpstream, MESSAGE_TYPE_SETUP_CONNECTION) - .await; - assert_common_message!( - &sniffer.next_message_from_downstream(), - SetupConnection, - protocol, - Protocol::TemplateDistributionProtocol, - flags, - 0, - min_version, - 2, - max_version, - 2 - ); - sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS, - ) - .await; - assert_common_message!( - &sniffer.next_message_from_upstream(), - SetupConnectionSuccess - ); - sniffer - .wait_for_message_type( - MessageDirection::ToUpstream, - MESSAGE_TYPE_COINBASE_OUTPUT_CONSTRAINTS, - ) - .await; - assert_tp_message!( - &sniffer.next_message_from_downstream(), - CoinbaseOutputConstraints - ); - sniffer - .wait_for_message_type(MessageDirection::ToDownstream, MESSAGE_TYPE_NEW_TEMPLATE) - .await; - assert_tp_message!(&sniffer.next_message_from_upstream(), NewTemplate); - sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SET_NEW_PREV_HASH, - ) - .await; - assert_tp_message!(sniffer.next_message_from_upstream(), SetNewPrevHash); -} - -// This test starts a Template Provider, a Pool, and a Translator Proxy, and verifies the -// correctness of the exchanged messages during connection and operation. -// -// Two Sniffers are used: -// - Between the Template Provider and the Pool. -// - Between the Pool and the Translator Proxy. -// -// The test ensures that: -// - The Template Provider sends valid `SetNewPrevHash` and `NewTemplate` messages. -// - The `minntime` field in the second `NewExtendedMiningJob` message sent to the Translator Proxy -// matches the `header_timestamp` from the `SetNewPrevHash` message, addressing a bug that -// occurred with non-future jobs. -// -// Related issue: https://github.com/stratum-mining/stratum/issues/1324 -#[tokio::test] -async fn header_timestamp_value_assertion_in_new_extended_mining_job() { - start_tracing(); - let sv2_interval = Some(5); - let (tp, tp_addr) = start_template_provider(sv2_interval, DifficultyLevel::Low); - tp.fund_wallet().unwrap(); - let tp_pool_sniffer_identifier = - "header_timestamp_value_assertion_in_new_extended_mining_job tp_pool sniffer"; - let (tp_pool_sniffer, tp_pool_sniffer_addr) = - start_sniffer(tp_pool_sniffer_identifier, tp_addr, false, vec![], None); - let (_pool, pool_addr) = start_pool(Some(tp_pool_sniffer_addr)).await; - let pool_translator_sniffer_identifier = - "header_timestamp_value_assertion_in_new_extended_mining_job pool_translator sniffer"; - let (pool_translator_sniffer, pool_translator_sniffer_addr) = start_sniffer( - pool_translator_sniffer_identifier, - pool_addr, - false, - vec![ - // Block SubmitSharesExtended messages to prevent regtest blocks from being mined - integration_tests_sv2::interceptor::IgnoreMessage::new( - integration_tests_sv2::interceptor::MessageDirection::ToUpstream, - MESSAGE_TYPE_SUBMIT_SHARES_EXTENDED, - ) - .into(), - ], - None, - ); - let (_tproxy, tproxy_addr) = start_sv2_translator(pool_translator_sniffer_addr).await; - let (_minerd_process, _minerd_addr) = start_minerd(tproxy_addr, None, None, false).await; - - tp_pool_sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS, - ) - .await; - assert_common_message!( - &tp_pool_sniffer.next_message_from_upstream(), - SetupConnectionSuccess - ); - // Wait for a NewTemplate message from the Template Provider - tp_pool_sniffer - .wait_for_message_type(MessageDirection::ToDownstream, MESSAGE_TYPE_NEW_TEMPLATE) - .await; - assert_tp_message!(&tp_pool_sniffer.next_message_from_upstream(), NewTemplate); - // Extract header timestamp from SetNewPrevHash message - let header_timestamp_to_check = match tp_pool_sniffer.next_message_from_upstream() { - Some((_, AnyMessage::TemplateDistribution(TemplateDistribution::SetNewPrevHash(msg)))) => { - msg.header_timestamp - } - _ => panic!("SetNewPrevHash not found!"), - }; - pool_translator_sniffer - .wait_for_message_type_and_clean_queue( - MessageDirection::ToDownstream, - MESSAGE_TYPE_MINING_SET_NEW_PREV_HASH, - ) - .await; - - // create a mempool transaction to force TP to send a non-future NewTemplate - tp.create_mempool_transaction().unwrap(); - - // Wait for a second NewExtendedMiningJob message - pool_translator_sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_NEW_EXTENDED_MINING_JOB, - ) - .await; - // Extract min_ntime from the second NewExtendedMiningJob message - let second_job_ntime = match pool_translator_sniffer.next_message_from_upstream() { - Some((_, AnyMessage::Mining(Mining::NewExtendedMiningJob(job)))) => { - job.min_ntime.into_inner() - } - _ => panic!("Second NewExtendedMiningJob not found!"), - }; - // Assert that min_ntime matches header_timestamp - assert_eq!( - second_job_ntime, - Some(header_timestamp_to_check), - "The `minntime` field of the second NewExtendedMiningJob does not match the `header_timestamp`!" - ); -} - -// This test starts a Pool, a Sniffer, and a Sv2 Mining Device. It then checks if the Pool receives -// a share from the Sv2 Mining Device. While also checking all the messages exchanged between the -// Pool and the Mining Device in between. -#[tokio::test] -async fn pool_standard_channel_receives_share() { - start_tracing(); - let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); - let (_pool, pool_addr) = start_pool(Some(tp_addr)).await; - let (sniffer, sniffer_addr) = start_sniffer("A", pool_addr, false, vec![], None); - start_mining_device_sv2(sniffer_addr, None, None, None, 1, None, true); - sniffer - .wait_for_message_type(MessageDirection::ToUpstream, MESSAGE_TYPE_SETUP_CONNECTION) - .await; - sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS, - ) - .await; - sniffer - .wait_for_message_type( - MessageDirection::ToUpstream, - MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL, - ) - .await; - - sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL_SUCCESS, - ) - .await; - sniffer - .wait_for_message_type(MessageDirection::ToDownstream, MESSAGE_TYPE_NEW_MINING_JOB) - .await; - sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_MINING_SET_NEW_PREV_HASH, - ) - .await; - sniffer - .wait_for_message_type( - MessageDirection::ToUpstream, - MESSAGE_TYPE_SUBMIT_SHARES_STANDARD, - ) - .await; - sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SUBMIT_SHARES_SUCCESS, - ) - .await; -} diff --git a/test/integration-tests/tests/sniffer_integration.rs b/test/integration-tests/tests/sniffer_integration.rs deleted file mode 100644 index a840d71535..0000000000 --- a/test/integration-tests/tests/sniffer_integration.rs +++ /dev/null @@ -1,136 +0,0 @@ -// This file contains integration tests for the `Sniffer` module. -use integration_tests_sv2::{ - interceptor::{IgnoreMessage, MessageDirection, ReplaceMessage}, - template_provider::DifficultyLevel, - *, -}; -use std::convert::TryInto; -use stratum_apps::stratum_core::{ - common_messages_sv2::{Protocol, SetupConnection, SetupConnectionSuccess, *}, - parsers_sv2::{AnyMessage, CommonMessages}, - template_distribution_sv2::*, -}; - -// This test aims to assert that Sniffer is able to intercept and replace/ignore messages. -// TP -> sniffer_a -> sniffer_b -> Pool -#[tokio::test] -async fn test_sniffer_interception() { - start_tracing(); - let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); - let ignore_message = - IgnoreMessage::new(MessageDirection::ToDownstream, MESSAGE_TYPE_NEW_TEMPLATE); - let setup_connection_message = - AnyMessage::Common(CommonMessages::SetupConnection(SetupConnection { - protocol: Protocol::TemplateDistributionProtocol, - min_version: 2, - max_version: 2, - flags: 0, - endpoint_host: b"0.0.0.0".to_vec().try_into().unwrap(), - endpoint_port: 8081, - vendor: b"Bitmain".to_vec().try_into().unwrap(), - hardware_version: b"901".to_vec().try_into().unwrap(), - firmware: b"abcX".to_vec().try_into().unwrap(), - device_id: b"89567".to_vec().try_into().unwrap(), - })); - let setup_connection_replacement = ReplaceMessage::new( - MessageDirection::ToUpstream, - MESSAGE_TYPE_SETUP_CONNECTION, - setup_connection_message, - ); - let setup_connection_error_message = AnyMessage::Common( - CommonMessages::SetupConnectionSuccess(SetupConnectionSuccess { - flags: 0, - used_version: 0, - }), - ); - let setup_connection_success_replacement = ReplaceMessage::new( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS, - setup_connection_error_message, - ); - let (sniffer_a, sniffer_a_addr) = start_sniffer( - "A", - tp_addr, - false, - vec![ - setup_connection_success_replacement.into(), - ignore_message.into(), - ], - None, - ); - let (sniffer_b, sniffer_b_addr) = start_sniffer( - "B", - sniffer_a_addr, - false, - vec![setup_connection_replacement.into()], - None, - ); - let _ = start_pool(Some(sniffer_b_addr)).await; - sniffer_a - .wait_for_message_type(MessageDirection::ToUpstream, MESSAGE_TYPE_SETUP_CONNECTION) - .await; - assert_common_message!( - &sniffer_a.next_message_from_downstream(), - SetupConnection, - protocol, - Protocol::TemplateDistributionProtocol, - flags, - 0, - min_version, - 2, - max_version, - 2, - endpoint_host, - "0.0.0.0".to_string().into_bytes().try_into().unwrap(), - endpoint_port, - 8081, - vendor, - "Bitmain".to_string().into_bytes().try_into().unwrap() - ); - sniffer_b - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS, - ) - .await; - assert_common_message!( - &sniffer_b.next_message_from_upstream(), - SetupConnectionSuccess, - used_version, - 0, - flags, - 0 - ); - assert!( - !(sniffer_b - .includes_message_type(MessageDirection::ToDownstream, MESSAGE_TYPE_NEW_TEMPLATE)) - ); -} - -#[tokio::test] -async fn test_sniffer_wait_for_message_type_with_remove() { - start_tracing(); - let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); - let (sniffer, sniffer_addr) = start_sniffer("", tp_addr, false, vec![], None); - let _ = start_pool(Some(sniffer_addr)).await; - assert!( - sniffer - .wait_for_message_type_and_clean_queue( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SET_NEW_PREV_HASH, - ) - .await - ); - assert!( - !(sniffer.includes_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS - )) - ); - assert!( - !(sniffer.includes_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SET_NEW_PREV_HASH - )) - ); -} diff --git a/test/integration-tests/tests/sv1.rs b/test/integration-tests/tests/sv1.rs deleted file mode 100644 index 4529934fd0..0000000000 --- a/test/integration-tests/tests/sv1.rs +++ /dev/null @@ -1,25 +0,0 @@ -#![cfg(feature = "sv1")] -use integration_tests_sv2::{template_provider::DifficultyLevel, *}; -use interceptor::MessageDirection; - -#[tokio::test] -async fn test_basic_sv1() { - start_tracing(); - let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); - let (_pool, pool_addr) = start_pool(Some(tp_addr)).await; - let (_, tproxy_addr) = start_sv2_translator(pool_addr).await; - let (sniffer_sv1, sniffer_sv1_addr) = start_sv1_sniffer(tproxy_addr); - let (_minerd_process, _minerd_addr) = start_minerd(sniffer_sv1_addr, None, None, false).await; - sniffer_sv1 - .wait_for_message(&["mining.subscribe"], MessageDirection::ToUpstream) - .await; - sniffer_sv1 - .wait_for_message(&["mining.authorize"], MessageDirection::ToUpstream) - .await; - sniffer_sv1 - .wait_for_message(&["mining.set_difficulty"], MessageDirection::ToDownstream) - .await; - sniffer_sv1 - .wait_for_message(&["mining.notify"], MessageDirection::ToDownstream) - .await; -} diff --git a/test/integration-tests/tests/sv2_mining_device.rs b/test/integration-tests/tests/sv2_mining_device.rs deleted file mode 100644 index 17da146679..0000000000 --- a/test/integration-tests/tests/sv2_mining_device.rs +++ /dev/null @@ -1,20 +0,0 @@ -use integration_tests_sv2::{interceptor::MessageDirection, template_provider::DifficultyLevel, *}; -use stratum_apps::stratum_core::common_messages_sv2::*; - -#[tokio::test] -async fn sv2_mining_device_and_pool_success() { - start_tracing(); - let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); - let (_pool, pool_addr) = start_pool(Some(tp_addr)).await; - let (sniffer, sniffer_addr) = start_sniffer("A", pool_addr, false, vec![], None); - start_mining_device_sv2(sniffer_addr, None, None, None, 1, None, true); - sniffer - .wait_for_message_type(MessageDirection::ToUpstream, MESSAGE_TYPE_SETUP_CONNECTION) - .await; - sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS, - ) - .await; -} diff --git a/test/integration-tests/tests/template_provider_integration.rs b/test/integration-tests/tests/template_provider_integration.rs deleted file mode 100644 index 78615bd2ee..0000000000 --- a/test/integration-tests/tests/template_provider_integration.rs +++ /dev/null @@ -1,25 +0,0 @@ -use integration_tests_sv2::{template_provider::DifficultyLevel, *}; - -#[tokio::test] -async fn tp_low_diff() { - let (tp, _) = start_template_provider(None, DifficultyLevel::Low); - let blockchain_info = tp.get_blockchain_info().unwrap(); - assert_eq!(blockchain_info.difficulty, 4.6565423739069247e-10); - assert_eq!(blockchain_info.chain, "regtest"); -} - -#[tokio::test] -async fn tp_mid_diff() { - let (tp, _) = start_template_provider(None, DifficultyLevel::Mid); - let blockchain_info = tp.get_blockchain_info().unwrap(); - assert_eq!(blockchain_info.difficulty, 0.001126515290698186); - assert_eq!(blockchain_info.chain, "signet"); -} - -#[tokio::test] -async fn tp_high_diff() { - let (tp, _) = start_template_provider(None, DifficultyLevel::High); - let blockchain_info = tp.get_blockchain_info().unwrap(); - assert_eq!(blockchain_info.difficulty, 77761.1123986095); - assert_eq!(blockchain_info.chain, "signet"); -} diff --git a/test/integration-tests/tests/translator_integration.rs b/test/integration-tests/tests/translator_integration.rs deleted file mode 100644 index 55c3542b27..0000000000 --- a/test/integration-tests/tests/translator_integration.rs +++ /dev/null @@ -1,51 +0,0 @@ -// This file contains integration tests for the `TranslatorSv2` module. -use integration_tests_sv2::{interceptor::MessageDirection, template_provider::DifficultyLevel, *}; -use stratum_apps::stratum_core::{common_messages_sv2::*, mining_sv2::*}; - -// This test runs an sv2 translator between an sv1 mining device and a pool. the connection between -// the translator and the pool is intercepted by a sniffer. The test checks if the translator and -// the pool exchange the correct messages upon connection. And that the miner is able to submit -// shares. -#[tokio::test] -async fn translate_sv1_to_sv2_successfully() { - start_tracing(); - let (_tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); - let (_pool, pool_addr) = start_pool(Some(tp_addr)).await; - let (pool_translator_sniffer, pool_translator_sniffer_addr) = - start_sniffer("0", pool_addr, false, vec![], None); - let (_, tproxy_addr) = start_sv2_translator(pool_translator_sniffer_addr).await; - let (_minerd_process, _minerd_addr) = start_minerd(tproxy_addr, None, None, false).await; - pool_translator_sniffer - .wait_for_message_type(MessageDirection::ToUpstream, MESSAGE_TYPE_SETUP_CONNECTION) - .await; - pool_translator_sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS, - ) - .await; - pool_translator_sniffer - .wait_for_message_type( - MessageDirection::ToUpstream, - MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL, - ) - .await; - pool_translator_sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL_SUCCESS, - ) - .await; - pool_translator_sniffer - .wait_for_message_type( - MessageDirection::ToDownstream, - MESSAGE_TYPE_NEW_EXTENDED_MINING_JOB, - ) - .await; - pool_translator_sniffer - .wait_for_message_type( - MessageDirection::ToUpstream, - MESSAGE_TYPE_SUBMIT_SHARES_EXTENDED, - ) - .await; -} diff --git a/utils/Cargo.lock b/utils/Cargo.lock deleted file mode 100644 index b21d533eb1..0000000000 --- a/utils/Cargo.lock +++ /dev/null @@ -1,813 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "aead" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" -dependencies = [ - "crypto-common", - "generic-array", -] - -[[package]] -name = "aes" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" -dependencies = [ - "cfg-if", - "cipher", - "cpufeatures", -] - -[[package]] -name = "aes-gcm" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" -dependencies = [ - "aead", - "aes", - "cipher", - "ctr", - "ghash", - "subtle", -] - -[[package]] -name = "aho-corasick" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" -dependencies = [ - "memchr", -] - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - -[[package]] -name = "autocfg" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "buffer_sv2" -version = "2.0.0" -dependencies = [ - "aes-gcm", - "criterion", - "generic-array", - "iai", - "rand", -] - -[[package]] -name = "bumpalo" -version = "3.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" - -[[package]] -name = "cast" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" - -[[package]] -name = "cfg-if" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" - -[[package]] -name = "cipher" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" -dependencies = [ - "crypto-common", - "inout", -] - -[[package]] -name = "clap" -version = "2.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" -dependencies = [ - "bitflags", - "textwrap", - "unicode-width", -] - -[[package]] -name = "cpufeatures" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" -dependencies = [ - "libc", -] - -[[package]] -name = "criterion" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" -dependencies = [ - "atty", - "cast", - "clap", - "criterion-plot", - "csv", - "itertools", - "lazy_static", - "num-traits", - "oorandom", - "plotters", - "rayon", - "regex", - "serde", - "serde_cbor", - "serde_derive", - "serde_json", - "tinytemplate", - "walkdir", -] - -[[package]] -name = "criterion-plot" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" -dependencies = [ - "cast", - "itertools", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "typenum", -] - -[[package]] -name = "csv" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf" -dependencies = [ - "csv-core", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "csv-core" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d02f3b0da4c6504f86e9cd789d8dbafab48c2321be74e9987593de5a894d93d" -dependencies = [ - "memchr", -] - -[[package]] -name = "ctr" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" -dependencies = [ - "cipher", -] - -[[package]] -name = "either" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" - -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "ghash" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" -dependencies = [ - "opaque-debug", - "polyval", -] - -[[package]] -name = "half" -version = "1.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "iai" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71a816c97c42258aa5834d07590b718b4c9a598944cd39a52dc25b351185d678" - -[[package]] -name = "inout" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" -dependencies = [ - "generic-array", -] - -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" - -[[package]] -name = "js-sys" -version = "0.3.81" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec48937a97411dcb524a265206ccd4c90bb711fca92b2792c407f268825b9305" -dependencies = [ - "once_cell", - "wasm-bindgen", -] - -[[package]] -name = "lazy_static" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" - -[[package]] -name = "libc" -version = "0.2.177" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" - -[[package]] -name = "log" -version = "0.4.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" - -[[package]] -name = "memchr" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" - -[[package]] -name = "num-traits" -version = "0.2.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" -dependencies = [ - "autocfg", -] - -[[package]] -name = "once_cell" -version = "1.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" - -[[package]] -name = "oorandom" -version = "11.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" - -[[package]] -name = "opaque-debug" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" - -[[package]] -name = "plotters" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" -dependencies = [ - "num-traits", - "plotters-backend", - "plotters-svg", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "plotters-backend" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" - -[[package]] -name = "plotters-svg" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" -dependencies = [ - "plotters-backend", -] - -[[package]] -name = "polyval" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" -dependencies = [ - "cfg-if", - "cpufeatures", - "opaque-debug", - "universal-hash", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" -dependencies = [ - "zerocopy", -] - -[[package]] -name = "proc-macro2" -version = "1.0.101" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "rayon" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" -dependencies = [ - "crossbeam-deque", - "crossbeam-utils", -] - -[[package]] -name = "regex" -version = "1.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" -dependencies = [ - "aho-corasick", - "memchr", - "regex-automata", - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" - -[[package]] -name = "rustversion" -version = "1.0.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" - -[[package]] -name = "ryu" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "serde" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" -dependencies = [ - "serde_core", -] - -[[package]] -name = "serde_cbor" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" -dependencies = [ - "half", - "serde", -] - -[[package]] -name = "serde_core" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.145" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" -dependencies = [ - "itoa", - "memchr", - "ryu", - "serde", - "serde_core", -] - -[[package]] -name = "subtle" -version = "2.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" - -[[package]] -name = "syn" -version = "2.0.106" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - -[[package]] -name = "tinytemplate" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "typenum" -version = "1.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" - -[[package]] -name = "unicode-ident" -version = "1.0.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" - -[[package]] -name = "unicode-width" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" - -[[package]] -name = "universal-hash" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" -dependencies = [ - "crypto-common", - "subtle", -] - -[[package]] -name = "version_check" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" - -[[package]] -name = "walkdir" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" -dependencies = [ - "same-file", - "winapi-util", -] - -[[package]] -name = "wasi" -version = "0.11.1+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" - -[[package]] -name = "wasm-bindgen" -version = "0.2.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1da10c01ae9f1ae40cbfac0bac3b1e724b320abfcf52229f80b547c0d250e2d" -dependencies = [ - "cfg-if", - "once_cell", - "rustversion", - "wasm-bindgen-macro", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "671c9a5a66f49d8a47345ab942e2cb93c7d1d0339065d4f8139c486121b43b19" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ca60477e4c59f5f2986c50191cd972e3a50d8a95603bc9434501cf156a9a119" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bad67dc8b2a1a6e5448428adec4c3e84c43e561d8c9ee8a9e5aabeb193ec41d1" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "web-sys" -version = "0.3.81" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9367c417a924a74cae129e6a2ae3b47fabb1f8995595ab474029da749a8be120" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" -dependencies = [ - "windows-sys", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-link" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" - -[[package]] -name = "windows-sys" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" -dependencies = [ - "windows-link", -] - -[[package]] -name = "zerocopy" -version = "0.8.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" -dependencies = [ - "zerocopy-derive", -] - -[[package]] -name = "zerocopy-derive" -version = "0.8.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] diff --git a/utils/Cargo.toml b/utils/Cargo.toml deleted file mode 100644 index 0c439c11eb..0000000000 --- a/utils/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[workspace] -resolver="2" - -members = [ - "buffer", -] - -exclude = [ - "stratum-message-generator" -] diff --git a/utils/tarpaulin.toml b/utils/tarpaulin.toml deleted file mode 100644 index 8607047202..0000000000 --- a/utils/tarpaulin.toml +++ /dev/null @@ -1,7 +0,0 @@ -[default] -run-types = [ "Lib" ] -timeout = "120s" -fail-under = 0 - -[report] -out = ["Xml"]