diff --git a/protocols/stratum-translation/Cargo.toml b/protocols/stratum-translation/Cargo.toml index 1b4da3f8f4..9454524dd5 100644 --- a/protocols/stratum-translation/Cargo.toml +++ b/protocols/stratum-translation/Cargo.toml @@ -10,8 +10,9 @@ name = "stratum_translation" path = "src/lib.rs" [dependencies] +bitcoin = { version = "0.32.5" } binary_sv2 = { path = "../v2/binary-sv2", version = "^5.0.0" } -mining_sv2 = { path = "../v2/subprotocols/mining", version = "^5.0.0" } +mining_sv2 = { path = "../v2/subprotocols/mining", version = "^6.0.0" } channels_sv2 = { path = "../v2/channels-sv2", version = "^2.0.0" } v1 = { path = "../v1", package = "sv1_api", version = "^2.0.0" } -tracing = "0.1" \ No newline at end of file +tracing = "0.1" diff --git a/protocols/stratum-translation/src/sv1_to_sv2.rs b/protocols/stratum-translation/src/sv1_to_sv2.rs index 23e52cbd08..767b40cddd 100644 --- a/protocols/stratum-translation/src/sv1_to_sv2.rs +++ b/protocols/stratum-translation/src/sv1_to_sv2.rs @@ -1,5 +1,6 @@ use crate::error::{Result, StratumTranslationError}; -use mining_sv2::{OpenExtendedMiningChannel, SubmitSharesExtended, Target}; +use bitcoin::Target; +use mining_sv2::{OpenExtendedMiningChannel, SubmitSharesExtended}; use v1::{client_to_server, utils::HexU32Be}; /// Builds an SV2 `OpenExtendedMiningChannel` message from the provided inputs. @@ -28,7 +29,7 @@ pub fn build_sv2_open_extended_mining_channel( .try_into() .map_err(|_| StratumTranslationError::InvalidUserIdentity(user_identity))?, nominal_hash_rate, - max_target: max_target.into(), + max_target: max_target.to_le_bytes().into(), min_extranonce_size, }) } @@ -163,7 +164,7 @@ mod tests { #[test] fn test_build_sv2_open_extended_mining_channel_happy() { - let max_target: Target = [0xffu8; 32].into(); + let max_target = Target::from_le_bytes([0xffu8; 32]); let res = build_sv2_open_extended_mining_channel( 123, "user.worker1".to_string(), @@ -182,7 +183,7 @@ mod tests { #[test] fn test_build_sv2_open_extended_mining_channel_invalid_user() { - let max_target: Target = [0xffu8; 32].into(); + let max_target = Target::from_le_bytes([0xffu8; 32]); // Create a user identity that's too long (> 255 chars) let long_user = "x".repeat(300); let res = build_sv2_open_extended_mining_channel(1, long_user, 1.0, max_target, 8); diff --git a/protocols/stratum-translation/src/sv2_to_sv1.rs b/protocols/stratum-translation/src/sv2_to_sv1.rs index 40c346e6b1..4dd3812b74 100644 --- a/protocols/stratum-translation/src/sv2_to_sv1.rs +++ b/protocols/stratum-translation/src/sv2_to_sv1.rs @@ -9,8 +9,9 @@ //! - SV2 difficulty targets to SV1 set_difficulty messages use crate::error::{Result, StratumTranslationError}; -use channels_sv2::{bip141::try_strip_bip141, target::target_to_difficulty}; -use mining_sv2::{NewExtendedMiningJob, SetNewPrevHash, SetTarget, Target}; +use bitcoin::Target; +use channels_sv2::bip141::try_strip_bip141; +use mining_sv2::{NewExtendedMiningJob, SetNewPrevHash, SetTarget}; use tracing::debug; use v1::{ json_rpc, server_to_client, @@ -100,7 +101,14 @@ pub fn build_sv1_notify_from_sv2( pub fn build_sv1_set_difficulty_from_sv2_set_target( set_target: SetTarget<'_>, ) -> Result { - build_sv1_set_difficulty_from_sv2_target(set_target.maximum_target.into()) + build_sv1_set_difficulty_from_sv2_target(Target::from_le_bytes( + set_target + .maximum_target + .clone() + .as_ref() + .try_into() + .unwrap(), + )) } /// Builds an SV1 `mining.set_difficulty` JSON-RPC message from an SV2 target. @@ -111,7 +119,7 @@ pub fn build_sv1_set_difficulty_from_sv2_set_target( /// # Returns /// * `Ok(json_rpc::Message)` - The constructed SV1 mining.set_difficulty message. pub fn build_sv1_set_difficulty_from_sv2_target(target: Target) -> Result { - let value = target_to_difficulty(target); + let value = target.difficulty_float(); let set_target = v1::methods::server_to_client::SetDifficulty { value }; Ok(set_target.into()) } @@ -120,10 +128,11 @@ pub fn build_sv1_set_difficulty_from_sv2_target(target: Target) -> Result Target { - [0xffu8; 32].into() + Target::from_le_bytes([0xffu8; 32]) } #[test] @@ -145,7 +154,7 @@ mod tests { fn test_build_sv1_set_difficulty_from_sv2_set_target() { let set_target = Sv2SetTarget { channel_id: 1, - maximum_target: dummy_target().into(), + maximum_target: dummy_target().to_le_bytes().into(), }; let msg = build_sv1_set_difficulty_from_sv2_set_target(set_target) .expect("Should convert SetTarget to difficulty"); diff --git a/protocols/v2/channels-sv2/Cargo.toml b/protocols/v2/channels-sv2/Cargo.toml index 8d08df2dbc..0cfce04567 100644 --- a/protocols/v2/channels-sv2/Cargo.toml +++ b/protocols/v2/channels-sv2/Cargo.toml @@ -16,7 +16,7 @@ keywords = ["stratum", "mining", "bitcoin", "protocol"] [dependencies] binary_sv2 = { path = "../binary-sv2", version = "^5.0.0" } common_messages_sv2 = { path = "../subprotocols/common-messages", version = "^6.0.0" } -mining_sv2 = { path = "../subprotocols/mining", version = "^5.0.0" } +mining_sv2 = { path = "../subprotocols/mining", version = "^6.0.0" } template_distribution_sv2 = { path = "../subprotocols/template-distribution", version = "^4.0.0" } job_declaration_sv2 = { path = "../subprotocols/job-declaration", version = "^5.0.0" } tracing = { version = "0.1"} diff --git a/protocols/v2/channels-sv2/src/client/extended.rs b/protocols/v2/channels-sv2/src/client/extended.rs index feff3fe5a2..d0b8a076de 100644 --- a/protocols/v2/channels-sv2/src/client/extended.rs +++ b/protocols/v2/channels-sv2/src/client/extended.rs @@ -13,7 +13,7 @@ use crate::{ share_accounting::{ShareAccounting, ShareValidationError, ShareValidationResult}, }, merkle_root::merkle_root_from_path, - target::{bytes_to_hex, target_to_difficulty, u256_to_block_hash}, + target::{bytes_to_hex, u256_to_block_hash}, MAX_EXTRANONCE_PREFIX_LEN, }; use alloc::{format, string::String, vec, vec::Vec}; @@ -24,11 +24,11 @@ use bitcoin::{ consensus::{serialize, Decodable}, hashes::sha256d::Hash, transaction::Version, - CompactTarget, OutPoint, Sequence, Target as BitcoinTarget, Transaction, TxIn, TxOut, Witness, + CompactTarget, OutPoint, Sequence, Target, Transaction, TxIn, TxOut, Witness, }; use mining_sv2::{ NewExtendedMiningJob, SetCustomMiningJob, SetCustomMiningJobSuccess, - SetNewPrevHash as SetNewPrevHashMp, SubmitSharesExtended, Target, + SetNewPrevHash as SetNewPrevHashMp, SubmitSharesExtended, }; use tracing::debug; @@ -64,7 +64,7 @@ pub struct ExtendedChannel<'a> { user_identity: String, extranonce_prefix: Vec, rollable_extranonce_size: u16, - target: Target, // todo: try to use Target from rust-bitcoin + target: Target, nominal_hashrate: f32, version_rolling: bool, // future jobs are indexed with job_id (u32) @@ -514,22 +514,18 @@ impl<'a> ExtendedChannel<'a> { // convert the header hash to a target type for easy comparison let hash = header.block_hash(); let raw_hash: [u8; 32] = *hash.to_raw_hash().as_ref(); - let hash_as_target: Target = raw_hash.into(); - let hash_as_diff = target_to_difficulty(hash_as_target.clone()); + let block_hash_target = Target::from_le_bytes(raw_hash); + let hash_as_diff = block_hash_target.difficulty_float(); - let network_target = BitcoinTarget::from_compact(nbits); + let network_target = Target::from_compact(nbits); // print hash_as_target and self.target as human readable hex - let hash_as_u256: binary_sv2::U256 = hash_as_target.clone().into(); - let mut hash_bytes = hash_as_u256.to_vec(); - hash_bytes.reverse(); // Convert to big-endian for display - let target_u256: binary_sv2::U256 = self.target.clone().into(); - let mut target_bytes = target_u256.to_vec(); - target_bytes.reverse(); // Convert to big-endian for display + let block_hash_target_bytes = block_hash_target.to_be_bytes(); + let target_bytes = self.target.to_be_bytes(); debug!( "share validation \nshare:\t\t{}\nchannel target:\t{}\nnetwork target:\t{}", - bytes_to_hex(&hash_bytes), + bytes_to_hex(&block_hash_target_bytes), bytes_to_hex(&target_bytes), format!("{:x}", network_target) ); @@ -537,7 +533,7 @@ impl<'a> ExtendedChannel<'a> { // check if a block was found if network_target.is_met_by(hash) { self.share_accounting.update_share_accounting( - target_to_difficulty(self.target.clone()) as u64, + self.target.difficulty_float() as u64, share.sequence_number, hash.to_raw_hash(), ); @@ -545,13 +541,13 @@ impl<'a> ExtendedChannel<'a> { } // check if the share hash meets the channel target - if hash_as_target < self.target { + if block_hash_target < self.target { if self.share_accounting.is_share_seen(hash.to_raw_hash()) { return Err(ShareValidationError::DuplicateShare); } self.share_accounting.update_share_accounting( - target_to_difficulty(self.target.clone()) as u64, + self.target.difficulty_float() as u64, share.sequence_number, hash.to_raw_hash(), ); @@ -573,6 +569,7 @@ mod tests { share_accounting::{ShareValidationError, ShareValidationResult}, }; use binary_sv2::Sv2Option; + use bitcoin::Target; use mining_sv2::{ NewExtendedMiningJob, SetNewPrevHash as SetNewPrevHashMp, SubmitSharesExtended, }; @@ -587,7 +584,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 1, ] .to_vec(); - let target = [0xff; 32].into(); + let target = Target::from_le_bytes([0xff; 32]); let nominal_hashrate = 1.0; let version_rolling = true; let rollable_extranonce_size = 4u16; @@ -669,7 +666,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 1, ] .to_vec(); - let target = [0xff; 32].into(); + let target = Target::from_le_bytes([0xff; 32]); let nominal_hashrate = 1.0; let version_rolling = true; let rollable_extranonce_size = 4u16; @@ -742,7 +739,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ] .to_vec(); - let target = [0xff; 32].into(); + let target = Target::from_le_bytes([0xff; 32]); let nominal_hashrate = 1.0; let version_rolling = true; let rollable_extranonce_size = 8u16; @@ -830,12 +827,11 @@ mod tests { ] .to_vec(); // channel target: 0000ffff00000000000000000000000000000000000000000000000000000000 - let target = [ + let target = Target::from_be_bytes([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, - ] - .into(); + ]); let nominal_hashrate = 1.0; let version_rolling = true; let rollable_extranonce_size = 8u16; @@ -926,12 +922,11 @@ mod tests { ] .to_vec(); // channel target: 0000ffff00000000000000000000000000000000000000000000000000000000 - let target = [ + let target = Target::from_le_bytes([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, - ] - .into(); + ]); let nominal_hashrate = 1.0; let version_rolling = true; let rollable_extranonce_size = 8u16; diff --git a/protocols/v2/channels-sv2/src/client/standard.rs b/protocols/v2/channels-sv2/src/client/standard.rs index d0bc8b492a..df56ec2d9d 100644 --- a/protocols/v2/channels-sv2/src/client/standard.rs +++ b/protocols/v2/channels-sv2/src/client/standard.rs @@ -13,7 +13,7 @@ use crate::{ share_accounting::{ShareAccounting, ShareValidationError, ShareValidationResult}, }, merkle_root::merkle_root_from_path, - target::{bytes_to_hex, target_to_difficulty, u256_to_block_hash}, + target::{bytes_to_hex, u256_to_block_hash}, MAX_EXTRANONCE_PREFIX_LEN, }; use alloc::{format, string::String, vec::Vec}; @@ -21,11 +21,10 @@ use binary_sv2::{self, Sv2Option}; use bitcoin::{ blockdata::block::{Header, Version}, hashes::sha256d::Hash, - CompactTarget, Target as BitcoinTarget, + CompactTarget, Target, }; use mining_sv2::{ NewExtendedMiningJob, NewMiningJob, SetNewPrevHash as SetNewPrevHashMp, SubmitSharesStandard, - Target, }; use tracing::debug; @@ -313,21 +312,17 @@ impl<'a> StandardChannel<'a> { // convert the header hash to a target type for easy comparison let hash = header.block_hash(); let raw_hash: [u8; 32] = *hash.to_raw_hash().as_ref(); - let hash_as_target: Target = raw_hash.into(); - let hash_as_diff = target_to_difficulty(hash_as_target.clone()); - let network_target = BitcoinTarget::from_compact(nbits); + let block_hash_target = Target::from_le_bytes(raw_hash); + let hash_as_diff = block_hash_target.difficulty_float(); + let network_target = Target::from_compact(nbits); // print hash_as_target and self.target as human readable hex - let hash_as_u256: binary_sv2::U256 = hash_as_target.clone().into(); - let mut hash_bytes = hash_as_u256.to_vec(); - hash_bytes.reverse(); // Convert to big-endian for display - let target_u256: binary_sv2::U256 = self.target.clone().into(); - let mut target_bytes = target_u256.to_vec(); - target_bytes.reverse(); // Convert to big-endian for display + let block_hash_target_bytes = block_hash_target.to_be_bytes(); + let target_bytes = self.target.to_be_bytes(); debug!( "share validation \nshare:\t\t{}\nchannel target:\t{}\nnetwork target:\t{}", - bytes_to_hex(&hash_bytes), + bytes_to_hex(&block_hash_target_bytes), bytes_to_hex(&target_bytes), format!("{:x}", network_target) ); @@ -335,7 +330,7 @@ impl<'a> StandardChannel<'a> { // check if a block was found if network_target.is_met_by(hash) { self.share_accounting.update_share_accounting( - target_to_difficulty(self.target.clone()) as u64, + self.target.difficulty_float() as u64, share.sequence_number, hash.to_raw_hash(), ); @@ -343,13 +338,13 @@ impl<'a> StandardChannel<'a> { } // check if the share hash meets the channel target - if hash_as_target < self.target { + if block_hash_target < self.target { if self.share_accounting.is_share_seen(hash.to_raw_hash()) { return Err(ShareValidationError::DuplicateShare); } self.share_accounting.update_share_accounting( - target_to_difficulty(self.target.clone()) as u64, + self.target.difficulty_float() as u64, share.sequence_number, hash.to_raw_hash(), ); @@ -371,6 +366,7 @@ mod tests { standard::StandardChannel, }; use binary_sv2::Sv2Option; + use bitcoin::Target; use mining_sv2::{NewMiningJob, SetNewPrevHash as SetNewPrevHashMp, SubmitSharesStandard}; #[test] @@ -382,7 +378,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 1, ] .to_vec(); - let target = [0xff; 32].into(); + let target = Target::from_le_bytes([0xff; 32]); let nominal_hashrate = 1.0; let mut channel = StandardChannel::new( @@ -442,7 +438,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 1, ] .to_vec(); - let target = [0xff; 32].into(); + let target = Target::from_le_bytes([0xff; 32]); let nominal_hashrate = 1.0; let mut channel = StandardChannel::new( @@ -490,7 +486,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 1, ] .to_vec(); - let target = [0xff; 32].into(); + let target = Target::from_le_bytes([0xff; 32]); let nominal_hashrate = 1.0; let mut channel = StandardChannel::new( @@ -559,12 +555,11 @@ mod tests { ] .to_vec(); // channel target: 0000ffff00000000000000000000000000000000000000000000000000000000 - let target = [ + let target = Target::from_le_bytes([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, - ] - .into(); + ]); let nominal_hashrate = 1.0; let mut channel = StandardChannel::new( @@ -636,12 +631,11 @@ mod tests { ] .to_vec(); // channel target: 0000ffff00000000000000000000000000000000000000000000000000000000 - let target = [ + let target = Target::from_le_bytes([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, - ] - .into(); + ]); let nominal_hashrate = 1.0; let mut channel = StandardChannel::new( diff --git a/protocols/v2/channels-sv2/src/server/extended.rs b/protocols/v2/channels-sv2/src/server/extended.rs index 198b779014..bd20a0d59f 100644 --- a/protocols/v2/channels-sv2/src/server/extended.rs +++ b/protocols/v2/channels-sv2/src/server/extended.rs @@ -47,17 +47,16 @@ use crate::{ jobs::{extended::ExtendedJob, factory::JobFactory, job_store::JobStore, JobOrigin}, share_accounting::{ShareAccounting, ShareValidationError, ShareValidationResult}, }, - target::{bytes_to_hex, hash_rate_to_target, target_to_difficulty, u256_to_block_hash}, + target::{bytes_to_hex, hash_rate_to_target, u256_to_block_hash}, MAX_EXTRANONCE_PREFIX_LEN, }; -use binary_sv2::{self}; use bitcoin::{ blockdata::block::{Header, Version}, hashes::sha256d::Hash, transaction::TxOut, - CompactTarget, Target as BitcoinTarget, + CompactTarget, Target, }; -use mining_sv2::{SetCustomMiningJob, SubmitSharesExtended, Target}; +use mining_sv2::{SetCustomMiningJob, SubmitSharesExtended}; use std::{collections::HashMap, convert::TryInto, marker::PhantomData}; use template_distribution_sv2::{NewTemplate, SetNewPrevHash as SetNewPrevHashTdp}; use tracing::debug; @@ -204,17 +203,17 @@ where pool_tag: Option, miner_tag: Option, ) -> Result { - let target_u256 = + let target = match hash_rate_to_target(nominal_hashrate.into(), expected_share_per_minute.into()) { - Ok(target_u256) => target_u256, + Ok(target) => target, Err(_) => { return Err(ExtendedChannelError::InvalidNominalHashrate); } }; - let target: Target = target_u256.clone().into(); - if target > max_target { + println!("target: {:?}", target.to_be_bytes()); + println!("max_target: {:?}", max_target.to_be_bytes()); return Err(ExtendedChannelError::RequestedMaxTargetOutOfRange); } @@ -359,35 +358,33 @@ where new_nominal_hashrate: f32, requested_max_target: Option, ) -> Result<(), ExtendedChannelError> { - let target_u256 = match hash_rate_to_target( + let target = match hash_rate_to_target( new_nominal_hashrate.into(), self.expected_share_per_minute.into(), ) { - Ok(target_u256) => target_u256, + Ok(target) => target, Err(_) => { return Err(ExtendedChannelError::InvalidNominalHashrate); } }; let requested_max_target = match requested_max_target { - Some(ref requested_max_target) => requested_max_target.clone(), - None => self.requested_max_target.clone(), + Some(ref requested_max_target) => requested_max_target, + None => &self.requested_max_target, }; // debug hex of target_u256 and max_Target // just like in share validation - let mut target_bytes = target_u256.to_vec(); - target_bytes.reverse(); // Convert to big-endian for display - let max_target_u256: binary_sv2::U256 = requested_max_target.clone().into(); - let mut max_target_bytes = max_target_u256.to_vec(); - max_target_bytes.reverse(); // Convert to big-endian for display + // big-endian for display + let target_bytes = target.to_be_bytes(); + let max_target = requested_max_target; + let max_target_bytes = max_target.to_be_bytes(); // Get the old target for comparison on the debug log // Not really needed for the actual method functionality // But it's useful to have for debugging purposes - let old_target_u256: binary_sv2::U256 = self.target.clone().into(); - let mut old_target_bytes = old_target_u256.to_vec(); - old_target_bytes.reverse(); // Convert to big-endian for display + let old_target = self.target; + let old_target_bytes = old_target.to_be_bytes(); debug!( "updating channel target \nold target:\t{}\nnew target:\t{}\nmax_target:\t{}", @@ -396,15 +393,15 @@ where bytes_to_hex(&max_target_bytes) ); - let new_target: Target = target_u256.into(); + let new_target: Target = target; - if new_target > requested_max_target { + if new_target > *requested_max_target { return Err(ExtendedChannelError::RequestedMaxTargetOutOfRange); } self.nominal_hashrate = new_nominal_hashrate; self.target = new_target; - self.requested_max_target = requested_max_target; + self.requested_max_target = *requested_max_target; Ok(()) } @@ -657,22 +654,18 @@ where // convert the header hash to a target type for easy comparison let hash = header.block_hash(); let raw_hash: [u8; 32] = *hash.to_raw_hash().as_ref(); - let hash_as_target: Target = raw_hash.into(); - let hash_as_diff = target_to_difficulty(hash_as_target.clone()); + let block_hash_target = Target::from_le_bytes(raw_hash); + let hash_as_diff = block_hash_target.difficulty_float(); - let network_target = BitcoinTarget::from_compact(nbits); + let network_target = Target::from_compact(nbits); // print hash_as_target and self.target as human readable hex - let hash_as_u256: binary_sv2::U256 = hash_as_target.clone().into(); - let mut hash_bytes = hash_as_u256.to_vec(); - hash_bytes.reverse(); // Convert to big-endian for display - let target_u256: binary_sv2::U256 = self.target.clone().into(); - let mut target_bytes = target_u256.to_vec(); - target_bytes.reverse(); // Convert to big-endian for display + let block_hash_target_bytes = block_hash_target.to_be_bytes(); + let target_bytes = self.target.to_be_bytes(); debug!( "share validation \nshare:\t\t{}\nchannel target:\t{}\nnetwork target:\t{}", - bytes_to_hex(&hash_bytes), + bytes_to_hex(&block_hash_target_bytes), bytes_to_hex(&target_bytes), format!("{:x}", network_target) ); @@ -680,7 +673,7 @@ where // check if a block was found if network_target.is_met_by(hash) { self.share_accounting.update_share_accounting( - target_to_difficulty(self.target.clone()) as u64, + self.target.difficulty_float() as u64, share.sequence_number, hash.to_raw_hash(), ); @@ -705,13 +698,13 @@ where } // check if the share hash meets the channel target - if hash_as_target <= self.target { + if block_hash_target <= self.target { if self.share_accounting.is_share_seen(hash.to_raw_hash()) { return Err(ShareValidationError::DuplicateShare); } self.share_accounting.update_share_accounting( - target_to_difficulty(self.target.clone()) as u64, + self.target.difficulty_float() as u64, share.sequence_number, hash.to_raw_hash(), ); @@ -752,8 +745,8 @@ mod tests { }, }; use binary_sv2::Sv2Option; - use bitcoin::{transaction::TxOut, Amount, ScriptBuf}; - use mining_sv2::{NewExtendedMiningJob, SubmitSharesExtended, Target}; + use bitcoin::{transaction::TxOut, Amount, ScriptBuf, Target}; + use mining_sv2::{NewExtendedMiningJob, SubmitSharesExtended}; use std::convert::TryInto; use template_distribution_sv2::{NewTemplate, SetNewPrevHash}; @@ -771,7 +764,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 1, ] .to_vec(); - let max_target = [0xff; 32].into(); + let max_target = Target::from_le_bytes([0xff; 32]); let expected_share_per_minute = 1.0; let nominal_hashrate = 1.0; let version_rolling_allowed = true; @@ -922,7 +915,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 1, ] .to_vec(); - let max_target = [0xff; 32].into(); + let max_target = Target::from_le_bytes([0xff; 32]); let expected_share_per_minute = 1.0; let nominal_hashrate = 1.0; let version_rolling_allowed = true; @@ -1042,7 +1035,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 1, ] .to_vec(); - let max_target = [0xff; 32].into(); + let max_target = Target::from_le_bytes([0xff; 32]); let expected_share_per_minute = 1.0; let nominal_hashrate = 1.0; let version_rolling_allowed = true; @@ -1120,7 +1113,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ] .to_vec(); - let max_target = [0xff; 32].into(); + let max_target = Target::from_le_bytes([0xff; 32]); let expected_share_per_minute = 1.0; let nominal_hashrate = 1.0; let version_rolling_allowed = true; @@ -1229,7 +1222,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ] .to_vec(); - let max_target = [0xff; 32].into(); + let max_target = Target::from_le_bytes([0xff; 32]); let expected_share_per_minute = 1.0; let nominal_hashrate = 100.0; // bigger hashrate to get higher difficulty let version_rolling_allowed = true; @@ -1341,7 +1334,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ] .to_vec(); - let max_target = [0xff; 32].into(); + let max_target = Target::from_le_bytes([0xff; 32]); let expected_share_per_minute = 1.0; let nominal_hashrate = 1_000.0; // bigger hashrate to get higher difficulty let version_rolling_allowed = true; @@ -1473,14 +1466,14 @@ mod tests { let job_store = DefaultJobStore::new(); // this is the most permissive possible max_target - let max_target: Target = [0xff; 32].into(); + let max_target = Target::from_le_bytes([0xff; 32]); // Create a channel with initial hashrate let mut channel = ExtendedChannel::new( channel_id, user_identity, extranonce_prefix, - max_target.clone(), + max_target, initial_hashrate, version_rolling_allowed, rollable_extranonce_size, @@ -1498,7 +1491,7 @@ mod tests { // Update the channel with a new hashrate (higher) let new_hashrate = 100.0; channel - .update_channel(new_hashrate, Some(max_target.clone())) + .update_channel(new_hashrate, Some(max_target)) .unwrap(); // Get the new target after update @@ -1513,7 +1506,7 @@ mod tests { assert_eq!(channel.get_nominal_hashrate(), new_hashrate); // Test invalid hashrate (negative) - let result = channel.update_channel(-1.0, Some(max_target.clone())); + let result = channel.update_channel(-1.0, Some(max_target)); assert!(result.is_err()); assert!(matches!( result, @@ -1521,21 +1514,18 @@ mod tests { )); // Create a not so permissive max_target so we can test a target that exceeds it - let not_so_permissive_max_target: Target = [ + let not_so_permissive_max_target = Target::from_le_bytes([ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - ] - .into(); + ]); // Try to update with a hashrate that would result in a target exceeding the max_target // new target: 2492492492492492492492492492492492492492492492492492492492492491 // max target: 00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff let very_small_hashrate = 0.1; - let result = channel.update_channel( - very_small_hashrate, - Some(not_so_permissive_max_target.clone()), - ); + let result = + channel.update_channel(very_small_hashrate, Some(not_so_permissive_max_target)); assert!(result.is_err()); assert!(matches!( result, @@ -1558,7 +1548,7 @@ mod tests { let channel_id = 1; let user_identity = "user_identity".to_string(); let extranonce_prefix = [0, 0, 0, 0, 0, 0, 0, 1].to_vec(); - let max_target = [0xff; 32].into(); + let max_target = Target::from_le_bytes([0xff; 32]); let expected_share_per_minute = 1.0; let nominal_hashrate = 1_000.0; let version_rolling_allowed = true; diff --git a/protocols/v2/channels-sv2/src/server/standard.rs b/protocols/v2/channels-sv2/src/server/standard.rs index 7a9ffdc5a1..1ffd3a1a8d 100644 --- a/protocols/v2/channels-sv2/src/server/standard.rs +++ b/protocols/v2/channels-sv2/src/server/standard.rs @@ -42,10 +42,9 @@ use crate::{ }, share_accounting::{ShareAccounting, ShareValidationError, ShareValidationResult}, }, - target::{bytes_to_hex, hash_rate_to_target, target_to_difficulty, u256_to_block_hash}, + target::{bytes_to_hex, hash_rate_to_target, u256_to_block_hash}, MAX_EXTRANONCE_PREFIX_LEN, }; -use binary_sv2::{self}; use bitcoin::{ absolute::LockTime, blockdata::{ @@ -55,9 +54,9 @@ use bitcoin::{ consensus::Encodable, hashes::sha256d::Hash, transaction::{OutPoint, Transaction, TxIn, TxOut, Version as TxVersion}, - CompactTarget, Sequence, Target as BitcoinTarget, + CompactTarget, Sequence, Target, }; -use mining_sv2::{SubmitSharesStandard, Target}; +use mining_sv2::SubmitSharesStandard; use std::{collections::HashMap, convert::TryInto, marker::PhantomData}; use template_distribution_sv2::{NewTemplate, SetNewPrevHash}; use tracing::debug; @@ -198,7 +197,7 @@ where } }; - let target: Target = calculated_target.into(); + let target: Target = calculated_target; if target > requested_max_target { return Err(StandardChannelError::RequestedMaxTargetOutOfRange); @@ -303,35 +302,32 @@ where nominal_hashrate: f32, requested_max_target: Option, ) -> Result<(), StandardChannelError> { - let target_u256 = match hash_rate_to_target( + let target = match hash_rate_to_target( nominal_hashrate.into(), self.expected_share_per_minute.into(), ) { - Ok(target_u256) => target_u256, + Ok(target) => target, Err(_) => { return Err(StandardChannelError::InvalidNominalHashrate); } }; let requested_max_target = match requested_max_target { - Some(ref requested_max_target) => requested_max_target.clone(), - None => self.requested_max_target.clone(), + Some(ref requested_max_target) => *requested_max_target, + None => self.requested_max_target, }; // debug hex of target_u256 and max_target // just like in share validation - let mut target_bytes = target_u256.to_vec(); - target_bytes.reverse(); // Convert to big-endian for display - let max_target_u256: binary_sv2::U256 = requested_max_target.clone().into(); - let mut max_target_bytes = max_target_u256.to_vec(); - max_target_bytes.reverse(); // Convert to big-endian for display + // to big-endian for display + let target_bytes = target.to_be_bytes(); + let max_target_bytes = requested_max_target.to_be_bytes(); // Get the old target for comparison on the debug log // Not really needed for the actual method functionality // But it's useful to have for debugging purposes - let old_target_u256: binary_sv2::U256 = self.target.clone().into(); - let mut old_target_bytes = old_target_u256.to_vec(); - old_target_bytes.reverse(); // Convert to big-endian for display + let old_target = self.target; + let old_target_bytes = old_target.to_be_bytes(); debug!( "updating channel target \nold target:\t{}\nnew target:\t{}\nmax_target:\t{}", @@ -340,7 +336,7 @@ where bytes_to_hex(&max_target_bytes) ); - let new_target: Target = target_u256.into(); + let new_target: Target = target; if new_target > requested_max_target { return Err(StandardChannelError::RequestedMaxTargetOutOfRange); @@ -580,21 +576,17 @@ where // convert the header hash to a target type for easy comparison let hash = header.block_hash(); let raw_hash: [u8; 32] = *hash.to_raw_hash().as_ref(); - let hash_as_target: Target = raw_hash.into(); - let hash_as_diff = target_to_difficulty(hash_as_target.clone()); - let network_target = BitcoinTarget::from_compact(nbits); + let block_hash_target = Target::from_le_bytes(raw_hash); + let hash_as_diff = block_hash_target.difficulty_float(); + let network_target = Target::from_compact(nbits); // print hash_as_target and self.target as human readable hex - let hash_as_u256: binary_sv2::U256 = hash_as_target.clone().into(); - let mut hash_bytes = hash_as_u256.to_vec(); - hash_bytes.reverse(); // Convert to big-endian for display - let target_u256: binary_sv2::U256 = self.target.clone().into(); - let mut target_bytes = target_u256.to_vec(); - target_bytes.reverse(); // Convert to big-endian for display + let block_hash_target_bytes = block_hash_target.to_be_bytes(); + let target_bytes = self.target.to_be_bytes(); debug!( "share validation \nshare:\t\t{}\nchannel target:\t{}\nnetwork target:\t{}", - bytes_to_hex(&hash_bytes), + bytes_to_hex(&block_hash_target_bytes), bytes_to_hex(&target_bytes), format!("{:x}", network_target) ); @@ -602,7 +594,7 @@ where // check if a block was found if network_target.is_met_by(hash) { self.share_accounting.update_share_accounting( - target_to_difficulty(self.target.clone()) as u64, + self.target.difficulty_float() as u64, share.sequence_number, hash.to_raw_hash(), ); @@ -642,13 +634,13 @@ where } // check if the share hash meets the channel target - if hash_as_target <= self.target { + if block_hash_target <= self.target { if self.share_accounting.is_share_seen(hash.to_raw_hash()) { return Err(ShareValidationError::DuplicateShare); } self.share_accounting.update_share_accounting( - target_to_difficulty(self.target.clone()) as u64, + self.target.difficulty_float() as u64, share.sequence_number, hash.to_raw_hash(), ); @@ -689,8 +681,8 @@ mod tests { }, }; use binary_sv2::Sv2Option; - use bitcoin::{transaction::TxOut, Amount, ScriptBuf}; - use mining_sv2::{NewMiningJob, SubmitSharesStandard, Target}; + use bitcoin::{transaction::TxOut, Amount, ScriptBuf, Target}; + use mining_sv2::{NewMiningJob, SubmitSharesStandard}; use std::convert::TryInto; use template_distribution_sv2::{NewTemplate, SetNewPrevHash as SetNewPrevHashTdp}; @@ -710,7 +702,7 @@ mod tests { ] .to_vec(); - let max_target: Target = [0xff; 32].into(); + let max_target = Target::from_le_bytes([0xff; 32]); let nominal_hashrate = 10.0; let share_batch_size = 100; let expected_share_per_minute = 1.0; @@ -837,7 +829,7 @@ mod tests { ] .to_vec(); - let max_target: Target = [0xff; 32].into(); + let max_target = Target::from_le_bytes([0xff; 32]); let nominal_hashrate = 10.0; let share_batch_size = 100; let expected_share_per_minute = 1.0; @@ -941,7 +933,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ] .to_vec(); - let max_target: Target = [0xff; 32].into(); + let max_target = Target::from_le_bytes([0xff; 32]); let nominal_hashrate = 1.0; let share_batch_size = 100; let expected_share_per_minute = 1.0; @@ -1047,7 +1039,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ] .to_vec(); - let max_target: Target = [0xff; 32].into(); + let max_target = Target::from_le_bytes([0xff; 32]); let nominal_hashrate = 100.0; // bigger hashrate to get higher difficulty let share_batch_size = 100; let expected_share_per_minute = 1.0; @@ -1156,7 +1148,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ] .to_vec(); - let max_target: Target = [0xff; 32].into(); + let max_target = Target::from_le_bytes([0xff; 32]); let nominal_hashrate = 1_000.0; // bigger hashrate to get higher difficulty let share_batch_size = 100; let expected_share_per_minute = 1.0; @@ -1263,14 +1255,14 @@ mod tests { let share_batch_size = 100; let job_store = DefaultJobStore::::new(); // this is the most permissive possible max_target - let max_target: Target = [0xff; 32].into(); + let max_target = Target::from_le_bytes([0xff; 32]); // Create a channel with initial hashrate let mut channel = StandardChannel::new( channel_id, user_identity, extranonce_prefix, - max_target.clone(), + max_target, initial_hashrate, share_batch_size, expected_share_per_minute, @@ -1286,7 +1278,7 @@ mod tests { // Update the channel with a new hashrate (higher) let new_hashrate = 100.0; channel - .update_channel(new_hashrate, Some(max_target.clone())) + .update_channel(new_hashrate, Some(max_target)) .unwrap(); // Get the new target after update @@ -1301,7 +1293,7 @@ mod tests { assert_eq!(channel.get_nominal_hashrate(), new_hashrate); // Test invalid hashrate (negative) - let result = channel.update_channel(-1.0, Some(max_target.clone())); + let result = channel.update_channel(-1.0, Some(max_target)); assert!(result.is_err()); assert!(matches!( result, @@ -1309,21 +1301,18 @@ mod tests { )); // Create a not so permissive max_target so we can test a target that exceeds it - let not_so_permissive_max_target: Target = [ + let not_so_permissive_max_target = Target::from_le_bytes([ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - ] - .into(); + ]); // Try to update with a hashrate that would result in a target exceeding the max_target // new target: 2492492492492492492492492492492492492492492492492492492492492491 // max target: 00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff let very_small_hashrate = 0.1; - let result = channel.update_channel( - very_small_hashrate, - Some(not_so_permissive_max_target.clone()), - ); + let result = + channel.update_channel(very_small_hashrate, Some(not_so_permissive_max_target)); assert!(result.is_err()); assert!(matches!( result, @@ -1350,7 +1339,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ] .to_vec(); - let max_target = [0xff; 32].into(); + let max_target = Target::from_le_bytes([0xff; 32]); let expected_share_per_minute = 1.0; let nominal_hashrate = 1_000.0; let share_batch_size = 100; diff --git a/protocols/v2/channels-sv2/src/target.rs b/protocols/v2/channels-sv2/src/target.rs index 305b25c530..abd233df1b 100644 --- a/protocols/v2/channels-sv2/src/target.rs +++ b/protocols/v2/channels-sv2/src/target.rs @@ -3,41 +3,9 @@ extern crate alloc; use alloc::string::String; use binary_sv2::U256; -use bitcoin::{hash_types::BlockHash, hashes::Hash}; +use bitcoin::{hash_types::BlockHash, hashes::Hash, Target}; use core::{cmp::max, fmt::Write, ops::Div}; -use mining_sv2::Target; use primitive_types::U256 as U256Primitive; -/// Converts a `Target` to a `f64` difficulty. -pub fn target_to_difficulty(target: Target) -> f64 { - // Genesis block target: 0x00000000ffff0000000000000000000000000000000000000000000000000000 - // (in little endian) - let max_target_bytes = [ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, - 0x00, 0x00, - ]; - let max_target = U256Primitive::from_little_endian(&max_target_bytes); - - // Convert input target to U256Primitive - let target_u256: U256<'static> = target.into(); - let mut target_bytes = [0u8; 32]; - target_bytes.copy_from_slice(target_u256.inner_as_ref()); - let target = U256Primitive::from_little_endian(&target_bytes); - - // Calculate difficulty = max_target / target - // We need to handle the full 256-bit values properly - // Convert to f64 by taking the ratio of the most significant bits - let max_target_high = (max_target >> 128).low_u128() as f64; - let max_target_low = max_target.low_u128() as f64; - let target_high = (target >> 128).low_u128() as f64; - let target_low = target.low_u128() as f64; - - // Combine high and low parts with appropriate scaling - let max_target_f64 = max_target_high * (2.0f64.powi(128)) + max_target_low; - let target_f64 = target_high * (2.0f64.powi(128)) + target_low; - - max_target_f64 / target_f64 -} /// Converts a `u256` to a [`BlockHash`] type. pub fn u256_to_block_hash(v: U256<'static>) -> BlockHash { @@ -109,7 +77,7 @@ pub fn bytes_to_hex(bytes: &[u8]) -> String { pub fn hash_rate_to_target( hashrate: f64, share_per_min: f64, -) -> Result, HashRateToTargetError> { +) -> Result { // checks that we are not dividing by zero if share_per_min == 0.0 { return Err(HashRateToTargetError::DivisionByZero); @@ -146,9 +114,9 @@ pub fn hash_rate_to_target( h_times_s_array[16..].copy_from_slice(&h_times_s.to_be_bytes()); let numerator = two_to_256_minus_one - U256Primitive::from_big_endian(h_times_s_array.as_ref()); - let mut target = numerator.div(denominator).to_big_endian(); - target.reverse(); - Ok(U256::<'static>::from(target)) + let mut target_bytes = numerator.div(denominator).to_big_endian(); + target_bytes.reverse(); + Ok(Target::from_le_bytes(target_bytes)) } /// Converts a `u128` to a [`U256`]. diff --git a/protocols/v2/channels-sv2/src/vardiff/classic.rs b/protocols/v2/channels-sv2/src/vardiff/classic.rs index 587be5cf2c..16d3f080d7 100644 --- a/protocols/v2/channels-sv2/src/vardiff/classic.rs +++ b/protocols/v2/channels-sv2/src/vardiff/classic.rs @@ -1,5 +1,5 @@ use crate::target::hash_rate_from_target; -use mining_sv2::Target; +use bitcoin::Target; use tracing::debug; /// Default minimum hashrate (H/s) if not specified. @@ -127,7 +127,7 @@ impl Vardiff for VardiffState { ); let mut new_hashrate = match hash_rate_from_target( - target.clone().into(), + target.to_le_bytes().into(), realized_share_per_min, ) { Ok(hashrate) => hashrate as f32, diff --git a/protocols/v2/channels-sv2/src/vardiff/mod.rs b/protocols/v2/channels-sv2/src/vardiff/mod.rs index ff8a9e6682..0993496b52 100644 --- a/protocols/v2/channels-sv2/src/vardiff/mod.rs +++ b/protocols/v2/channels-sv2/src/vardiff/mod.rs @@ -1,5 +1,5 @@ +use bitcoin::Target; use error::VardiffError; -use mining_sv2::Target; use std::fmt::Debug; pub mod classic; diff --git a/protocols/v2/channels-sv2/src/vardiff/test/mod.rs b/protocols/v2/channels-sv2/src/vardiff/test/mod.rs index eb79b5f918..de2c536731 100644 --- a/protocols/v2/channels-sv2/src/vardiff/test/mod.rs +++ b/protocols/v2/channels-sv2/src/vardiff/test/mod.rs @@ -6,7 +6,7 @@ mod classic; use super::Vardiff; use crate::target::hash_rate_to_target; -use mining_sv2::Target; +use bitcoin::Target; pub const TEST_INITIAL_HASHRATE: f32 = 1000.0; pub const TEST_SHARES_PER_MINUTE: f32 = 10.0; diff --git a/protocols/v2/handlers-sv2/Cargo.toml b/protocols/v2/handlers-sv2/Cargo.toml index be5de4c537..b338a8368e 100644 --- a/protocols/v2/handlers-sv2/Cargo.toml +++ b/protocols/v2/handlers-sv2/Cargo.toml @@ -16,6 +16,6 @@ trait-variant = "0.1.2" parsers_sv2 = { path = "../parsers-sv2", version = "^0.1.0"} binary_sv2 = { path = "../binary-sv2", version = "^5.0.0" } common_messages_sv2 = { path = "../subprotocols/common-messages", version = "^6.0.0" } -mining_sv2 = { path = "../subprotocols/mining", version = "^5.0.0" } +mining_sv2 = { path = "../subprotocols/mining", version = "^6.0.0" } template_distribution_sv2 = { path = "../subprotocols/template-distribution", version = "^4.0.0" } job_declaration_sv2 = { path = "../subprotocols/job-declaration", version = "^5.0.0" } diff --git a/protocols/v2/parsers-sv2/Cargo.toml b/protocols/v2/parsers-sv2/Cargo.toml index 29523032fa..8864e30a0f 100644 --- a/protocols/v2/parsers-sv2/Cargo.toml +++ b/protocols/v2/parsers-sv2/Cargo.toml @@ -15,7 +15,7 @@ keywords = ["stratum", "mining", "bitcoin", "protocol"] binary_sv2 = { path = "../binary-sv2", version = "^5.0.0" } framing_sv2 = { path = "../framing-sv2", version = "^5.0.0" } common_messages_sv2 = { path = "../subprotocols/common-messages", version = "^6.0.0" } -mining_sv2 = { path = "../subprotocols/mining", version = "^5.0.0" } +mining_sv2 = { path = "../subprotocols/mining", version = "^6.0.0" } template_distribution_sv2 = { path = "../subprotocols/template-distribution", version = "^4.0.0" } job_declaration_sv2 = { path = "../subprotocols/job-declaration", version = "^5.0.0" } diff --git a/protocols/v2/subprotocols/mining/Cargo.toml b/protocols/v2/subprotocols/mining/Cargo.toml index 090ad7f8bd..4b57534760 100644 --- a/protocols/v2/subprotocols/mining/Cargo.toml +++ b/protocols/v2/subprotocols/mining/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "mining_sv2" -version = "5.0.2" +version = "6.0.0" authors = ["The Stratum V2 Developers"] edition = "2021" readme = "README.md" diff --git a/protocols/v2/subprotocols/mining/src/lib.rs b/protocols/v2/subprotocols/mining/src/lib.rs index 8620967b03..986756289f 100644 --- a/protocols/v2/subprotocols/mining/src/lib.rs +++ b/protocols/v2/subprotocols/mining/src/lib.rs @@ -25,10 +25,7 @@ #![no_std] use binary_sv2::{B032, U256}; -use core::{ - cmp::{Ord, PartialOrd}, - convert::TryInto, -}; +use core::convert::TryInto; #[macro_use] extern crate alloc; @@ -112,71 +109,12 @@ pub const CHANNEL_BIT_UPDATE_CHANNEL_ERROR: bool = true; pub const MAX_EXTRANONCE_LEN: usize = 32; -/// Target is a 256-bit unsigned integer in little-endian -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Target { - head: u128, // least significant bits - tail: u128, // most significant bits -} - -impl Target { - pub fn new(head: u128, tail: u128) -> Self { - Self { head, tail } - } -} - -impl From<[u8; 32]> for Target { - fn from(v: [u8; 32]) -> Self { - // below unwraps never panics - let head = u128::from_le_bytes(v[0..16].try_into().unwrap()); - let tail = u128::from_le_bytes(v[16..32].try_into().unwrap()); - Self { head, tail } - } -} - impl From for alloc::vec::Vec { fn from(v: Extranonce) -> Self { v.extranonce } } -impl<'a> From> for Target { - fn from(v: U256<'a>) -> Self { - let inner = v.inner_as_ref(); - // below unwraps never panics - let head = u128::from_le_bytes(inner[0..16].try_into().unwrap()); - let tail = u128::from_le_bytes(inner[16..32].try_into().unwrap()); - Self { head, tail } - } -} - -impl From for U256<'static> { - fn from(v: Target) -> Self { - let mut inner = v.head.to_le_bytes().to_vec(); - inner.extend_from_slice(&v.tail.to_le_bytes()); - // below unwraps never panics - inner.try_into().unwrap() - } -} - -impl PartialOrd for Target { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Target { - fn cmp(&self, other: &Self) -> core::cmp::Ordering { - if self.tail == other.tail && self.head == other.head { - core::cmp::Ordering::Equal - } else if self.tail != other.tail { - self.tail.cmp(&other.tail) - } else { - self.head.cmp(&other.head) - } - } -} - // WARNING: do not derive Copy on this type. Some operations performed to a copy of an extranonce // do not affect the original, and this may lead to different extranonce inconsistency /// Extranonce bytes which need to be added to the coinbase to form a fully valid submission. @@ -1111,92 +1049,6 @@ pub mod tests { } } - #[quickcheck_macros::quickcheck] - fn test_target_from_u256(input: (u128, u128)) -> bool { - let target_expected = Target { - head: input.0, - tail: input.1, - }; - - let bytes = [&input.0.to_ne_bytes()[..], &input.1.to_ne_bytes()[..]].concat(); - let u256: U256 = bytes.try_into().unwrap(); - let target_final: Target = u256.clone().into(); - - let u256_final: U256 = target_final.clone().into(); - - target_expected == target_final && u256_final == u256 - } - #[quickcheck_macros::quickcheck] - fn test_target_to_u256(input: (u128, u128)) -> bool { - let target_start = Target { - head: input.0, - tail: input.1, - }; - let u256 = U256::<'static>::from(target_start.clone()); - let target_final = Target::from(u256); - target_final == target_final - } - - #[test] - fn test_ord_with_equal_head_tail() { - let target_1 = Target { head: 1, tail: 1 }; - let target_2 = Target { head: 1, tail: 2 }; - assert!(target_1 < target_2); - - //also test with equal tails - let target_3 = Target { head: 2, tail: 2 }; - assert!(target_2 < target_3); - } - - #[quickcheck_macros::quickcheck] - fn test_ord_for_target_positive_increment(input: (u128, u128, u128, u128)) -> bool { - let max = u128::MAX; - // we want input.0 and input.1 >= 0 and < u128::MAX - let input = (input.0 % max, input.1 % max, input.2, input.3); - let target_start = Target { - head: input.0, - tail: input.1, - }; - let positive_increment = ( - input.2 % (max - target_start.head) + 1, - input.3 % (max - target_start.tail) + 1, - ); - let target_final = Target { - head: target_start.head + positive_increment.0, - tail: target_start.tail + positive_increment.1, - }; - target_final > target_start - } - - #[quickcheck_macros::quickcheck] - fn test_ord_for_target_negative_increment(input: (u128, u128, u128, u128)) -> bool { - let max = u128::MAX; - let input = (input.0 % max + 1, input.1 % max + 1, input.2, input.3); - let target_start = Target { - head: input.0, - tail: input.1, - }; - let negative_increment = ( - input.2 % target_start.head + 1, - input.3 % target_start.tail + 1, - ); - let target_final = Target { - head: target_start.head - negative_increment.0, - tail: target_start.tail - negative_increment.1, - }; - target_final < target_start - } - - #[quickcheck_macros::quickcheck] - fn test_ord_for_target_zero_increment(input: (u128, u128)) -> bool { - let target_start = Target { - head: input.0, - tail: input.1, - }; - let target_final = target_start.clone(); - target_start == target_final - } - #[quickcheck_macros::quickcheck] fn test_vec_from_extranonce(input: Vec) -> bool { let input_start = from_arbitrary_vec_to_array(input).to_vec(); diff --git a/roles/Cargo.lock b/roles/Cargo.lock index 77c7383646..cbae272379 100644 --- a/roles/Cargo.lock +++ b/roles/Cargo.lock @@ -719,7 +719,7 @@ dependencies = [ "bitcoin", "common_messages_sv2 6.0.2", "job_declaration_sv2 5.0.2", - "mining_sv2 5.0.2", + "mining_sv2 6.0.0", "primitive-types", "template_distribution_sv2 4.0.2", "tracing", @@ -1443,7 +1443,7 @@ dependencies = [ "binary_sv2 5.0.0", "common_messages_sv2 6.0.2", "job_declaration_sv2 5.0.2", - "mining_sv2 5.0.2", + "mining_sv2 6.0.0", "parsers_sv2 0.1.2", "template_distribution_sv2 4.0.2", "trait-variant", @@ -1746,6 +1746,7 @@ name = "jd_client_sv2" version = "0.1.0" dependencies = [ "async-channel 1.9.0", + "bitcoin", "clap", "config", "serde", @@ -1942,7 +1943,7 @@ dependencies = [ [[package]] name = "mining_sv2" -version = "5.0.2" +version = "6.0.0" dependencies = [ "binary_sv2 5.0.0", ] @@ -2191,7 +2192,7 @@ dependencies = [ "common_messages_sv2 6.0.2", "framing_sv2 5.0.2", "job_declaration_sv2 5.0.2", - "mining_sv2 5.0.2", + "mining_sv2 6.0.0", "template_distribution_sv2 4.0.2", ] @@ -2333,6 +2334,7 @@ name = "pool_sv2" version = "0.2.0" dependencies = [ "async-channel 1.9.0", + "bitcoin", "clap", "config", "rand", @@ -2880,7 +2882,7 @@ dependencies = [ "framing_sv2 5.0.2", "handlers_sv2 0.2.1", "job_declaration_sv2 5.0.2", - "mining_sv2 5.0.2", + "mining_sv2 6.0.0", "noise_sv2 1.4.0", "parsers_sv2 0.1.2", "stratum_translation", @@ -2893,8 +2895,9 @@ name = "stratum_translation" version = "0.1.1" dependencies = [ "binary_sv2 5.0.0", + "bitcoin", "channels_sv2 2.0.1", - "mining_sv2 5.0.2", + "mining_sv2 6.0.0", "sv1_api", "tracing", ] @@ -3235,6 +3238,7 @@ name = "translator_sv2" version = "2.0.0" dependencies = [ "async-channel 1.9.0", + "bitcoin", "clap", "config", "serde", diff --git a/roles/jd-client/Cargo.toml b/roles/jd-client/Cargo.toml index a161bdce71..167525c8b2 100644 --- a/roles/jd-client/Cargo.toml +++ b/roles/jd-client/Cargo.toml @@ -16,6 +16,7 @@ name = "jd_client_sv2" path = "src/lib/mod.rs" [dependencies] +bitcoin = "0.32.5" stratum-apps = { path = "../stratum-apps", features = ["jd_client"] } async-channel = "1.5.1" serde = { version = "1.0.89", default-features = false, features = ["derive", "alloc"] } diff --git a/roles/jd-client/src/lib/channel_manager/downstream_message_handler.rs b/roles/jd-client/src/lib/channel_manager/downstream_message_handler.rs index 3e67c3e29b..5c291a7682 100644 --- a/roles/jd-client/src/lib/channel_manager/downstream_message_handler.rs +++ b/roles/jd-client/src/lib/channel_manager/downstream_message_handler.rs @@ -1,3 +1,4 @@ +use bitcoin::Target; use std::sync::atomic::Ordering; use stratum_apps::stratum_core::{ @@ -311,7 +312,9 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { } let nominal_hash_rate = msg.nominal_hash_rate; - let requested_max_target = msg.max_target.into_static(); + let requested_max_target = Target::from_le_bytes( + msg.max_target.inner_as_ref().try_into().unwrap(), + ); let group_channel_id = data .group_channels @@ -339,7 +342,7 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { standard_channel_id, user_identity.to_string(), extranonce_prefix.to_vec(), - requested_max_target.into(), + requested_max_target, nominal_hash_rate, self.share_batch_size, self.shares_per_minute, @@ -374,7 +377,7 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { OpenStandardMiningChannelSuccess { request_id: msg.request_id.clone(), channel_id: standard_channel_id, - target: standard_channel.get_target().clone().into(), + target: standard_channel.get_target().to_le_bytes().into(), extranonce_prefix: standard_channel .get_extranonce_prefix() .clone() @@ -524,7 +527,8 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { let request_id = msg.get_request_id_as_u32(); let nominal_hash_rate = msg.nominal_hash_rate; - let requested_max_target = msg.max_target.into_static(); + let requested_max_target = + Target::from_le_bytes(msg.max_target.inner_as_ref().try_into().unwrap()); let requested_min_rollable_extranonce_size = msg.min_extranonce_size; let build_error = |code: &str| { @@ -574,7 +578,7 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { extended_channel_id, user_identity.to_string(), extranonce_prefix.into(), - requested_max_target.into(), + requested_max_target, nominal_hash_rate, true, requested_min_rollable_extranonce_size, @@ -610,7 +614,7 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { OpenExtendedMiningChannelSuccess { request_id, channel_id: extended_channel_id, - target: extended_channel.get_target().clone().into(), + target: extended_channel.get_target().to_le_bytes().into(), extranonce_prefix: extended_channel .get_extranonce_prefix() .clone() @@ -727,7 +731,8 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { info!("Received: {}", msg); let channel_id = msg.channel_id; let new_nominal_hash_rate = msg.nominal_hash_rate; - let requested_maximum_target = msg.maximum_target.into_static(); + let requested_maximum_target = + Target::from_le_bytes(msg.maximum_target.inner_as_ref().try_into().unwrap()); let messages = self .channel_manager_data @@ -769,9 +774,9 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { { let update_channel = standard_channel.update_channel( new_nominal_hash_rate, - Some(requested_maximum_target.into()), + Some(requested_maximum_target), ); - let new_target = standard_channel.get_target().clone(); + let new_target = standard_channel.get_target(); if let Err(e) = update_channel { error!(channel_id, ?e, "StandardChannel update failed"); @@ -797,7 +802,7 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { downstream_id, Mining::SetTarget(SetTarget { channel_id, - maximum_target: new_target.into(), + maximum_target: new_target.to_le_bytes().into(), }), ) .into(), @@ -807,9 +812,9 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { { let update_channel = extended_channel.update_channel( new_nominal_hash_rate, - Some(requested_maximum_target.into()), + Some(requested_maximum_target), ); - let new_target = extended_channel.get_target().clone(); + let new_target = extended_channel.get_target(); if let Err(e) = update_channel { error!(channel_id, ?e, "StandardChannel update failed"); @@ -834,7 +839,7 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { downstream_id, Mining::SetTarget(SetTarget { channel_id, - maximum_target: new_target.into(), + maximum_target: new_target.to_le_bytes().into(), }), ) .into(), @@ -852,13 +857,13 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { } let mut downstream_hashrate = 0.0; - let mut min_target: Target = [0xff; 32].into(); + let mut min_target = Target::from_le_bytes([0xff; 32]); for (_, downstream) in channel_manager_data.downstream.iter() { downstream.downstream_data.super_safe_lock(|data| { let mut update_from_channel = |hashrate: f32, target: &Target| { downstream_hashrate += hashrate; - min_target = std::cmp::min(target.clone(), min_target.clone()); + min_target = std::cmp::min(*target, min_target); }; for (_, channel) in data.standard_channels.iter() { @@ -890,7 +895,7 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { Mining::UpdateChannel(UpdateChannel { channel_id: upstream_channel.get_channel_id(), nominal_hash_rate: downstream_hashrate, - maximum_target: min_target.into(), + maximum_target: min_target.to_le_bytes().into(), }) .into(), ) diff --git a/roles/jd-client/src/lib/channel_manager/mod.rs b/roles/jd-client/src/lib/channel_manager/mod.rs index bc267356eb..a710b85431 100644 --- a/roles/jd-client/src/lib/channel_manager/mod.rs +++ b/roles/jd-client/src/lib/channel_manager/mod.rs @@ -8,6 +8,7 @@ use std::{ }; use async_channel::{Receiver, Sender}; +use bitcoin::Target; use stratum_apps::{ custom_mutex::Mutex, key_utils::{Secp256k1PublicKey, Secp256k1SecretKey}, @@ -33,7 +34,7 @@ use stratum_apps::{ AllocateMiningJobToken, AllocateMiningJobTokenSuccess, DeclareMiningJob, }, mining_sv2::{ - ExtendedExtranonce, OpenExtendedMiningChannel, SetCustomMiningJob, SetTarget, Target, + ExtendedExtranonce, OpenExtendedMiningChannel, SetCustomMiningJob, SetTarget, UpdateChannel, MAX_EXTRANONCE_LEN, MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL, MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL, }, @@ -949,7 +950,7 @@ impl ChannelManager { downstream_id, Mining::SetTarget(SetTarget { channel_id, - maximum_target: updated_target.clone().into(), + maximum_target: updated_target.to_le_bytes().into(), }), ) .into(), @@ -989,7 +990,7 @@ impl ChannelManager { downstream_id, Mining::SetTarget(SetTarget { channel_id, - maximum_target: updated_target.clone().into(), + maximum_target: updated_target.to_le_bytes().into(), }), ) .into(), @@ -1062,13 +1063,13 @@ impl ChannelManager { if !messages.is_empty() { let mut downstream_hashrate = 0.0; - let mut min_target: Target = [0xff; 32].into(); + let mut min_target = [0xff; 32]; for (_, downstream) in channel_manager_data.downstream.iter() { downstream.downstream_data.super_safe_lock(|data| { let mut update_from_channel = |hashrate: f32, target: &Target| { downstream_hashrate += hashrate; - min_target = std::cmp::min(target.clone(), min_target.clone()); + min_target = std::cmp::min(target.to_le_bytes(), min_target); }; for (_, channel) in data.standard_channels.iter() { diff --git a/roles/jd-client/src/lib/channel_manager/upstream_message_handler.rs b/roles/jd-client/src/lib/channel_manager/upstream_message_handler.rs index 0fdf808c71..6e6142f9b6 100644 --- a/roles/jd-client/src/lib/channel_manager/upstream_message_handler.rs +++ b/roles/jd-client/src/lib/channel_manager/upstream_message_handler.rs @@ -1,3 +1,4 @@ +use bitcoin::Target; use std::sync::atomic::Ordering; use stratum_apps::stratum_core::{ @@ -139,7 +140,7 @@ impl HandleMiningMessagesFromServerAsync for ChannelManager { msg.channel_id, self.user_identity.clone(), msg.extranonce_prefix.to_vec(), - msg.target.into(), + Target::from_le_bytes(msg.target.inner_as_ref().try_into().unwrap()), hashrate, true, msg.extranonce_size, @@ -597,7 +598,9 @@ impl HandleMiningMessagesFromServerAsync for ChannelManager { info!("Received: {}", msg); self.channel_manager_data.super_safe_lock(|data| { if let Some(ref mut upstream) = data.upstream_channel { - upstream.set_target(msg.maximum_target.clone().into()); + upstream.set_target(Target::from_le_bytes( + msg.maximum_target.clone().as_ref().try_into().unwrap(), + )); } }); Ok(()) diff --git a/roles/pool/Cargo.toml b/roles/pool/Cargo.toml index dffd724e02..8a1a66ce6d 100644 --- a/roles/pool/Cargo.toml +++ b/roles/pool/Cargo.toml @@ -17,6 +17,7 @@ name = "pool_sv2" path = "src/lib/mod.rs" [dependencies] +bitcoin = "0.32.5" stratum-apps = { path = "../stratum-apps", features = ["pool"] } async-channel = "1.5.1" rand = "0.8.4" diff --git a/roles/pool/src/lib/channel_manager/mining_message_handler.rs b/roles/pool/src/lib/channel_manager/mining_message_handler.rs index 964fbaeb9f..4b8bf714d4 100644 --- a/roles/pool/src/lib/channel_manager/mining_message_handler.rs +++ b/roles/pool/src/lib/channel_manager/mining_message_handler.rs @@ -1,3 +1,4 @@ +use bitcoin::Target; use std::sync::atomic::Ordering; use stratum_apps::stratum_core::{ @@ -131,13 +132,13 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { downstream_data.group_channels = Some(group_channel); } let nominal_hash_rate = msg.nominal_hash_rate; - let requested_max_target = msg.max_target.into_static(); + let requested_max_target = Target::from_le_bytes(msg.max_target.inner_as_ref().try_into().unwrap()); let extranonce_prefix = channel_manager_data.extranonce_prefix_factory_standard.next_prefix_standard()?; let channel_id = downstream_data.channel_id_factory.fetch_add(1, Ordering::SeqCst); let job_store = DefaultJobStore::new(); - let mut standard_channel = match StandardChannel::new_for_pool(channel_id as u32, user_identity.to_string(), extranonce_prefix.to_vec(), requested_max_target.into(), nominal_hash_rate, self.share_batch_size, self.shares_per_minute, job_store, self.pool_tag_string.clone()) { + let mut standard_channel = match StandardChannel::new_for_pool(channel_id as u32, user_identity.to_string(), extranonce_prefix.to_vec(), requested_max_target, nominal_hash_rate, self.share_batch_size, self.shares_per_minute, job_store, self.pool_tag_string.clone()) { Ok(channel) => channel, Err(e) => match e { StandardChannelError::InvalidNominalHashrate => { @@ -174,7 +175,7 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { let open_standard_mining_channel_success = OpenStandardMiningChannelSuccess { request_id: msg.request_id, channel_id: channel_id as u32, - target: standard_channel.get_target().clone().into(), + target: standard_channel.get_target().to_le_bytes().into(), extranonce_prefix: standard_channel.get_extranonce_prefix().clone().try_into().expect("Extranonce_prefix must be valid"), group_channel_id }.into_static(); @@ -246,7 +247,8 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { info!("Received OpenExtendedMiningChannel: {}", msg); let nominal_hash_rate = msg.nominal_hash_rate; - let requested_max_target = msg.max_target.into_static(); + let requested_max_target = + Target::from_le_bytes(msg.max_target.inner_as_ref().try_into().unwrap()); let requested_min_rollable_extranonce_size = msg.min_extranonce_size; let messages = self @@ -294,7 +296,7 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { channel_id as u32, user_identity.to_string(), extranonce_prefix, - requested_max_target.into(), + requested_max_target, nominal_hash_rate, true, // version rolling always allowed requested_min_rollable_extranonce_size, @@ -370,7 +372,7 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { OpenExtendedMiningChannelSuccess { request_id, channel_id: channel_id as u32, - target: extended_channel.get_target().clone().into(), + target: extended_channel.get_target().to_le_bytes().into(), extranonce_prefix: extended_channel .get_extranonce_prefix() .clone() @@ -826,11 +828,11 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { let mut messages = Vec::new(); let channel_id = msg.channel_id; let new_nominal_hash_rate = msg.nominal_hash_rate; - let requested_maximum_target = msg.maximum_target.into_static(); + let requested_maximum_target = Target::from_le_bytes(msg.maximum_target.inner_as_ref().try_into().unwrap()); if let Some(standard_channel) = downstream_data.standard_channels.get_mut(&channel_id) { let res = standard_channel - .update_channel(new_nominal_hash_rate, Some(requested_maximum_target.into())); + .update_channel(new_nominal_hash_rate, Some(requested_maximum_target)); match res { Ok(_) => {} Err(e) => { @@ -867,12 +869,12 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { let new_target = standard_channel.get_target(); let set_target = SetTarget { channel_id, - maximum_target: new_target.clone().into(), + maximum_target: new_target.to_le_bytes().into(), }; messages.push((downstream_id, Mining::SetTarget(set_target)).into()); } else if let Some(extended_channel) = downstream_data.extended_channels.get_mut(&channel_id) { let res = extended_channel - .update_channel(new_nominal_hash_rate, Some(requested_maximum_target.into())); + .update_channel(new_nominal_hash_rate, Some(requested_maximum_target)); match res { Ok(_) => {} Err(e) => { @@ -909,7 +911,7 @@ impl HandleMiningMessagesFromClientAsync for ChannelManager { let new_target = extended_channel.get_target(); let set_target = SetTarget { channel_id, - maximum_target: new_target.clone().into(), + maximum_target: new_target.to_le_bytes().into(), }; messages.push((downstream_id, Mining::SetTarget(set_target)).into()); } else { diff --git a/roles/pool/src/lib/channel_manager/mod.rs b/roles/pool/src/lib/channel_manager/mod.rs index 733d3eda06..c0096cd30f 100644 --- a/roles/pool/src/lib/channel_manager/mod.rs +++ b/roles/pool/src/lib/channel_manager/mod.rs @@ -400,7 +400,7 @@ impl ChannelManager { downstream_id, Mining::SetTarget(SetTarget { channel_id, - maximum_target: updated_target.clone().into(), + maximum_target: updated_target.to_le_bytes().into(), }), ) .into(), @@ -440,7 +440,7 @@ impl ChannelManager { downstream_id, Mining::SetTarget(SetTarget { channel_id, - maximum_target: updated_target.clone().into(), + maximum_target: updated_target.to_le_bytes().into(), }), ) .into(), diff --git a/roles/stratum-apps/src/channel_logic/channel_factory.rs b/roles/stratum-apps/src/channel_logic/channel_factory.rs new file mode 100644 index 0000000000..069a83b71c --- /dev/null +++ b/roles/stratum-apps/src/channel_logic/channel_factory.rs @@ -0,0 +1,1368 @@ +//! # Channel Factory +//! +//! This module contains logic for creating and managing channels. + +use crate::{ + job_creator::{self, JobsCreators}, + utils::{GroupId, Id, Mutex}, + Error, +}; + +use codec_sv2::binary_sv2; +use mining_sv2::{ + ExtendedExtranonce, NewExtendedMiningJob, OpenExtendedMiningChannelSuccess, + OpenMiningChannelError, SetCustomMiningJob, SetCustomMiningJobSuccess, SetNewPrevHash, + SubmitSharesError, SubmitSharesExtended, SubmitSharesStandard, +}; +use parsers_sv2::Mining; + +use hex::DisplayHex; +use nohash_hasher::BuildNoHashHasher; +use std::{collections::HashMap, convert::TryInto, sync::Arc}; +use template_distribution_sv2::{NewTemplate, SetNewPrevHash as SetNewPrevHashFromTp}; + +use tracing::{debug, error, info, trace, warn}; + +use bitcoin::{ + block::{Header, Version}, + hash_types, + hashes::sha256d::Hash, + CompactTarget, Target, TxOut, +}; + +/// A stripped type of `SetCustomMiningJob` without the (`channel_id, `request_id` and `token`) +/// fields +#[derive(Debug)] +pub struct PartialSetCustomMiningJob { + pub version: u32, + pub prev_hash: binary_sv2::U256<'static>, + pub min_ntime: u32, + pub nbits: u32, + pub coinbase_tx_version: u32, + pub coinbase_prefix: binary_sv2::B0255<'static>, + pub coinbase_tx_input_n_sequence: u32, + pub coinbase_tx_value_remaining: u64, + pub coinbase_tx_outputs: binary_sv2::B064K<'static>, + pub coinbase_tx_locktime: u32, + pub merkle_path: binary_sv2::Seq0255<'static, binary_sv2::U256<'static>>, + pub future_job: bool, +} + +/// Represents the action that needs to be done when a new share is received. +#[derive(Debug, Clone)] +pub enum OnNewShare { + /// Used when the received is malformed, is for an inexistent channel or do not meet downstream + /// target. + SendErrorDownstream(SubmitSharesError<'static>), + /// Used when an extended channel in a proxy receive a share, and the share meet upstream + /// target, in this case a new share must be sent upstream. Also an optional template id is + /// returned, when a job declarator want to send a valid share upstream could use the + /// template for get the up job id. + SendSubmitShareUpstream((Share, Option)), + /// Used when a group channel in a proxy receive a share that is not malformed and is for a + /// valid channel in that case we relay the same exact share upstream with a new request id. + RelaySubmitShareUpstream, + /// Indicate that the share meet bitcoin target, when there is an upstream the we should send + /// the share upstream, whenever possible we should also notify the TP about it. + /// When a pool negotiate a job with downstream we do not have the template_id so we set it to + /// None + /// (share, template id, coinbase,complete extranonce) + ShareMeetBitcoinTarget((Share, Option, Vec, Vec)), + /// Indicate that the share meet downstream target, in the case we could send a success + /// response downstream. + ShareMeetDownstreamTarget, +} + +impl OnNewShare { + /// Converts standard share into extended share + pub fn into_extended(&mut self, extranonce: Vec, up_id: u32) { + match self { + OnNewShare::SendErrorDownstream(_) => (), + OnNewShare::SendSubmitShareUpstream((share, template_id)) => match share { + Share::Extended(_) => (), + Share::Standard((share, _)) => { + let share = SubmitSharesExtended { + channel_id: up_id, + sequence_number: share.sequence_number, + job_id: share.job_id, + nonce: share.nonce, + ntime: share.ntime, + version: share.version, + extranonce: extranonce.try_into().unwrap(), + }; + *self = Self::SendSubmitShareUpstream((Share::Extended(share), *template_id)); + } + }, + OnNewShare::RelaySubmitShareUpstream => (), + OnNewShare::ShareMeetBitcoinTarget((share, t_id, coinbase, ext)) => match share { + Share::Extended(_) => (), + Share::Standard((share, _)) => { + let share = SubmitSharesExtended { + channel_id: up_id, + sequence_number: share.sequence_number, + job_id: share.job_id, + nonce: share.nonce, + ntime: share.ntime, + version: share.version, + extranonce: extranonce.try_into().unwrap(), + }; + *self = Self::ShareMeetBitcoinTarget(( + Share::Extended(share), + *t_id, + coinbase.clone(), + ext.to_vec(), + )); + } + }, + OnNewShare::ShareMeetDownstreamTarget => todo!(), + } + } +} + +/// A share can be either extended or standard +#[derive(Clone, Debug)] +pub enum Share { + Extended(SubmitSharesExtended<'static>), + // share, group id + Standard((SubmitSharesStandard, u32)), +} + +/// Helper type used before a `SetNewPrevHash` has a channel_id +#[derive(Clone, Debug)] +pub struct StagedPhash { + job_id: u32, + prev_hash: binary_sv2::U256<'static>, + min_ntime: u32, + nbits: u32, +} + +impl StagedPhash { + /// Converts a Staged PrevHash into a SetNewPrevHash message + pub fn into_set_p_hash( + &self, + channel_id: u32, + new_job_id: Option, + ) -> SetNewPrevHash<'static> { + SetNewPrevHash { + channel_id, + job_id: new_job_id.unwrap_or(self.job_id), + prev_hash: self.prev_hash.clone(), + min_ntime: self.min_ntime, + nbits: self.nbits, + } + } +} + +impl Share { + /// Get share sequence number + pub fn get_sequence_number(&self) -> u32 { + match self { + Share::Extended(s) => s.sequence_number, + Share::Standard(s) => s.0.sequence_number, + } + } + + /// Get share channel id + pub fn get_channel_id(&self) -> u32 { + match self { + Share::Extended(s) => s.channel_id, + Share::Standard(s) => s.0.channel_id, + } + } + + /// Get share timestamp + pub fn get_n_time(&self) -> u32 { + match self { + Share::Extended(s) => s.ntime, + Share::Standard(s) => s.0.ntime, + } + } + + /// Get share nonce + pub fn get_nonce(&self) -> u32 { + match self { + Share::Extended(s) => s.nonce, + Share::Standard(s) => s.0.nonce, + } + } + + /// Get share job id + pub fn get_job_id(&self) -> u32 { + match self { + Share::Extended(s) => s.job_id, + Share::Standard(s) => s.0.job_id, + } + } + + /// Get share version + pub fn get_version(&self) -> u32 { + match self { + Share::Extended(s) => s.version, + Share::Standard(s) => s.0.version, + } + } +} + +#[derive(Debug)] +/// Basic logic shared between all the channel factories +struct ChannelFactory { + ids: Arc>, + extended_channels: + HashMap, BuildNoHashHasher>, + extranonces: ExtendedExtranonce, + share_per_min: f32, + // (NewExtendedMiningJob,group ids that already received the future job) + future_jobs: Vec<(NewExtendedMiningJob<'static>, Vec)>, + // (SetNewPrevHash,group ids that already received the set prev_hash) + last_prev_hash: Option<(StagedPhash, Vec)>, + last_prev_hash_: Option, + // (NewExtendedMiningJob,group ids that already received the job) + last_valid_job: Option<(NewExtendedMiningJob<'static>, Vec)>, + kind: ExtendedChannelKind, + job_ids: Id, + channel_to_group_id: HashMap>, + future_templates: HashMap, BuildNoHashHasher>, +} + +impl ChannelFactory { + /// Called when a `OpenExtendedMiningChannel` message is received. + /// Here we save the downstream's target (based on hashrate) and the + /// channel's extranonce details before returning the relevant SV2 mining messages + /// to be sent downstream. For the mining messages, we will first return an + /// `OpenExtendedMiningChannelSuccess` if the channel is successfully opened. Then we add + /// the `NewExtendedMiningJob` and `SetNewPrevHash` messages if the relevant data is + /// available. If the channel opening fails, we return `OpenExtendedMiningChannelError`. + pub fn new_extended_channel( + &mut self, + request_id: u32, + hash_rate: f32, + min_extranonce_size: u16, + ) -> Result>, Error> { + let extended_channels_group = 0; + let max_extranonce_size = self.extranonces.get_range2_len() as u16; + if min_extranonce_size <= max_extranonce_size { + // SECURITY is very unlikely to finish the ids btw this unwrap could be used by an + // attacker that want to disrupt the service maybe we should have a method + // to reuse ids that are no longer connected? + let channel_id = self + .ids + .safe_lock(|ids| ids.new_channel_id(extended_channels_group)) + .unwrap(); + self.channel_to_group_id.insert(channel_id, 0); + let target = match crate::utils::hash_rate_to_target( + hash_rate.into(), + self.share_per_min.into(), + ) { + Ok(target) => target, + Err(e) => { + error!( + "Impossible to get target: {:?}. Request id: {:?}", + e, request_id + ); + return Err(e); + } + }; + + let extranonce_prefix = self + .extranonces + .next_prefix_extended(max_extranonce_size as usize) + .unwrap() + .into_b032(); + let success = OpenExtendedMiningChannelSuccess { + request_id, + channel_id, + target: target.to_le_bytes().into(), + extranonce_size: max_extranonce_size, + extranonce_prefix, + }; + self.extended_channels.insert(channel_id, success.clone()); + let mut result = vec![Mining::OpenExtendedMiningChannelSuccess(success)]; + if let Some((job, _)) = &self.last_valid_job { + let mut job = job.clone(); + job.set_future(); + let j_id = job.job_id; + result.push(Mining::NewExtendedMiningJob(job)); + if let Some((new_prev_hash, _)) = &self.last_prev_hash { + let mut new_prev_hash = new_prev_hash.into_set_p_hash(channel_id, None); + new_prev_hash.job_id = j_id; + result.push(Mining::SetNewPrevHash(new_prev_hash.clone())) + }; + } else if let Some((new_prev_hash, _)) = &self.last_prev_hash { + let new_prev_hash = new_prev_hash.into_set_p_hash(channel_id, None); + result.push(Mining::SetNewPrevHash(new_prev_hash.clone())) + }; + for (job, _) in &self.future_jobs { + result.push(Mining::NewExtendedMiningJob(job.clone())) + } + Ok(result) + } else { + Ok(vec![Mining::OpenMiningChannelError( + OpenMiningChannelError::unsupported_extranonce_size(request_id), + )]) + } + } + + /// Called when we want to replicate a channel already opened by another actor. + /// It is used only in the jd client from the template provider module to mock a pool. + /// Anything else should open channel with the new_extended_channel function + pub fn replicate_upstream_extended_channel_only_jd( + &mut self, + target: binary_sv2::U256<'static>, + extranonce: mining_sv2::Extranonce, + channel_id: u32, + extranonce_size: u16, + ) -> Option<()> { + self.channel_to_group_id.insert(channel_id, 0); + let extranonce_prefix = extranonce.into(); + let success = OpenExtendedMiningChannelSuccess { + request_id: 0, + channel_id, + target, + extranonce_size, + extranonce_prefix, + }; + self.extended_channels.insert(channel_id, success.clone()); + Some(()) + } + + /// Called when a new prev hash is received. If the respective job is available in the future + /// job queue, we move the future job into the valid job slot and store the prev hash as the + /// current prev hash to be referenced. + fn on_new_prev_hash(&mut self, m: StagedPhash) -> Result<(), Error> { + while let Some(mut job) = self.future_jobs.pop() { + if job.0.job_id == m.job_id { + let now = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs() as u32; + job.0.set_no_future(now); + self.last_valid_job = Some(job); + break; + } + self.last_valid_job = None; + } + self.future_jobs = vec![]; + self.last_prev_hash_ = Some(crate::utils::u256_to_block_hash(m.prev_hash.clone())); + self.last_prev_hash = Some((m, vec![])); + Ok(()) + } + + /// Called when a `NewExtendedMiningJob` arrives. If the job is future, we add it to the future + /// queue. If the job is not future, we pair it with a the most recent prev hash + fn on_new_extended_mining_job( + &mut self, + m: NewExtendedMiningJob<'static>, + ) -> Result, BuildNoHashHasher>, Error> { + match (m.is_future(), &self.last_prev_hash) { + (true, _) => { + let mut result = HashMap::with_hasher(BuildNoHashHasher::default()); + self.prepare_jobs_for_downstream_on_new_extended(&mut result, &m)?; + self.future_jobs.push((m, vec![])); + Ok(result) + } + (false, Some(_)) => { + let mut result = HashMap::with_hasher(BuildNoHashHasher::default()); + self.prepare_jobs_for_downstream_on_new_extended(&mut result, &m)?; + // If job is not future it must always be paired with the last received prev hash + self.last_valid_job = Some((m, vec![])); + if let Some((_p_hash, _)) = &self.last_prev_hash { + Ok(result) + } else { + Err(Error::JobIsNotFutureButPrevHashNotPresent) + } + } + // This should not happen when a non future job is received we always need to have a + // prev hash + (false, None) => Err(Error::JobIsNotFutureButPrevHashNotPresent), + } + } + + // When a new extended job is received we use this function to prepare the jobs to be sent + // downstream (standard for hom and this job for non hom) + fn prepare_jobs_for_downstream_on_new_extended( + &mut self, + result: &mut HashMap>, + m: &NewExtendedMiningJob<'static>, + ) -> Result<(), Error> { + for id in self.extended_channels.keys() { + let mut extended = m.clone(); + extended.channel_id = *id; + let extended_job = Mining::NewExtendedMiningJob(extended); + result.insert(*id, extended_job); + } + Ok(()) + } + + // If there is job creator, bitcoin_target is retrieved from there. If not, it is set to 0. + // If there is a job creator we pass the correct template id. If not, we pass `None` + // allow comparison chain because clippy wants to make job management assertion into a match + // clause + #[allow(clippy::comparison_chain)] + #[allow(clippy::too_many_arguments)] + fn check_target>( + &mut self, + mut m: Share, + bitcoin_target: Target, + template_id: Option, + up_id: u32, + merkle_path: Vec, + coinbase_tx_prefix: &[u8], + coinbase_tx_suffix: &[u8], + prev_blockhash: hash_types::BlockHash, + bits: u32, + ) -> Result { + debug!("Checking target for share {:?}", m); + let upstream_target = match &self.kind { + ExtendedChannelKind::Pool => Target::ZERO, + ExtendedChannelKind::Proxy { + upstream_target, .. + } + | ExtendedChannelKind::ProxyJd { + upstream_target, .. + } => upstream_target.clone(), + }; + + let (downstream_target, extranonce) = self + .get_channel_specific_mining_info(&m) + .ok_or(Error::ShareDoNotMatchAnyChannel)?; + let extranonce_1_len = self.extranonces.get_range0_len(); + let extranonce_2 = extranonce[extranonce_1_len..].to_vec(); + match &mut m { + Share::Extended(extended_share) => { + extended_share.extranonce = extranonce_2.try_into()?; + } + Share::Standard(_) => (), + }; + trace!( + "On checking target coinbase prefix is: {:?}", + coinbase_tx_prefix + ); + trace!( + "On checking target coinbase suffix is: {:?}", + coinbase_tx_suffix + ); + // Safe unwrap a sha256 can always be converted into [u8;32] + let merkle_root: [u8; 32] = crate::utils::merkle_root_from_path( + coinbase_tx_prefix, + coinbase_tx_suffix, + &extranonce[..], + &merkle_path[..], + ) + .ok_or(Error::InvalidCoinbase)? + .try_into() + .unwrap(); + let version = match &m { + Share::Extended(share) => share.version as i32, + Share::Standard(share) => share.0.version as i32, + }; + + let header = Header { + version: Version::from_consensus(version), + prev_blockhash, + merkle_root: (*Hash::from_bytes_ref(&merkle_root)).into(), + time: m.get_n_time(), + bits: CompactTarget::from_consensus(bits), + nonce: m.get_nonce(), + }; + + trace!("On checking target header is: {:?}", header); + let hash_ = header.block_hash(); + let hash: [u8; 32] = *hash_.to_raw_hash().as_ref(); + + if tracing::level_enabled!(tracing::Level::DEBUG) + || tracing::level_enabled!(tracing::Level::TRACE) + { + let bitcoin_target_log = bitcoin_target.to_be_bytes(); + debug!("Bitcoin target : {:?}", bitcoin_target_log.as_hex()); + let upstream_target = upstream_target.to_be_bytes(); + debug!("Upstream target: {:?}", upstream_target.to_vec().as_hex()); + let mut hash = hash; + hash.reverse(); + debug!("Hash : {:?}", hash.to_vec().as_hex()); + } + let hash = Target::from_be_bytes(hash); + + if hash <= bitcoin_target { + let mut print_hash: [u8; 32] = *hash_.to_raw_hash().as_ref(); + print_hash.reverse(); + + info!( + "Share hash meet bitcoin target: {:?}", + print_hash.to_vec().as_hex() + ); + + let coinbase = [coinbase_tx_prefix, &extranonce[..], coinbase_tx_suffix] + .concat() + .to_vec(); + match self.kind { + ExtendedChannelKind::Proxy { .. } | ExtendedChannelKind::ProxyJd { .. } => { + let upstream_extranonce_space = self.extranonces.get_range0_len(); + let extranonce_ = extranonce[upstream_extranonce_space..].to_vec(); + let mut res = OnNewShare::ShareMeetBitcoinTarget(( + m, + template_id, + coinbase, + extranonce.to_vec(), + )); + res.into_extended(extranonce_, up_id); + Ok(res) + } + ExtendedChannelKind::Pool => Ok(OnNewShare::ShareMeetBitcoinTarget(( + m, + template_id, + coinbase, + extranonce.to_vec(), + ))), + } + } else if hash <= upstream_target { + match self.kind { + ExtendedChannelKind::Proxy { .. } | ExtendedChannelKind::ProxyJd { .. } => { + let upstream_extranonce_space = self.extranonces.get_range0_len(); + let extranonce = extranonce[upstream_extranonce_space..].to_vec(); + let mut res = OnNewShare::SendSubmitShareUpstream((m, template_id)); + res.into_extended(extranonce, up_id); + Ok(res) + } + ExtendedChannelKind::Pool => { + Ok(OnNewShare::SendSubmitShareUpstream((m, template_id))) + } + } + } else if hash <= downstream_target { + Ok(OnNewShare::ShareMeetDownstreamTarget) + } else { + error!("Share does not meet any target: {:?}", m); + let error = SubmitSharesError { + channel_id: m.get_channel_id(), + sequence_number: m.get_sequence_number(), + // Infallible unwrap we already know the len of the error code (is a + // static string) + error_code: SubmitSharesError::difficulty_too_low_error_code() + .to_string() + .try_into() + .unwrap(), + }; + Ok(OnNewShare::SendErrorDownstream(error)) + } + } + + /// Returns the downstream target and extranonce for the channel + fn get_channel_specific_mining_info(&self, m: &Share) -> Option<(Target, Vec)> { + match m { + Share::Extended(share) => { + let channel = self.extended_channels.get(&m.get_channel_id())?; + let extranonce_prefix = channel.extranonce_prefix.to_vec(); + let dowstream_target = + Target::from_le_bytes(channel.target.inner_as_ref().try_into().unwrap()); + let extranonce = [&extranonce_prefix[..], &share.extranonce.to_vec()[..]] + .concat() + .to_vec(); + if extranonce.len() != self.extranonces.get_len() { + error!( + "Extranonce is not of the right len expected {} actual {}", + self.extranonces.get_len(), + extranonce.len() + ); + } + Some((dowstream_target, extranonce)) + } + Share::Standard((_share, _group_id)) => { + unimplemented!() + } + } + } + /// Updates the downstream target for the given channel_id + fn update_target_for_channel(&mut self, channel_id: u32, new_target: Target) -> Option { + let channel = self.extended_channels.get_mut(&channel_id)?; + channel.target = new_target.to_le_bytes().into(); + Some(true) + } +} + +/// Used by a pool to in order to manage all downstream channel. It adds job creation capabilities +/// to ChannelFactory. +#[derive(Debug)] +pub struct PoolChannelFactory { + inner: ChannelFactory, + job_creator: JobsCreators, + pool_coinbase_outputs: Vec, + // extended_channel_id -> SetCustomMiningJob + negotiated_jobs: HashMap, BuildNoHashHasher>, +} + +impl PoolChannelFactory { + /// constructor + pub fn new( + ids: Arc>, + extranonces: ExtendedExtranonce, + job_creator: JobsCreators, + share_per_min: f32, + kind: ExtendedChannelKind, + pool_coinbase_outputs: Vec, + ) -> Self { + let inner = ChannelFactory { + ids, + extended_channels: HashMap::with_hasher(BuildNoHashHasher::default()), + extranonces, + share_per_min, + future_jobs: Vec::new(), + last_prev_hash: None, + last_prev_hash_: None, + last_valid_job: None, + kind, + job_ids: Id::new(), + channel_to_group_id: HashMap::with_hasher(BuildNoHashHasher::default()), + future_templates: HashMap::with_hasher(BuildNoHashHasher::default()), + }; + + Self { + inner, + job_creator, + pool_coinbase_outputs, + negotiated_jobs: HashMap::with_hasher(BuildNoHashHasher::default()), + } + } + + /// Calls [`ChannelFactory::new_extended_channel`] + pub fn new_extended_channel( + &mut self, + request_id: u32, + hash_rate: f32, + min_extranonce_size: u16, + ) -> Result>, Error> { + self.inner + .new_extended_channel(request_id, hash_rate, min_extranonce_size) + } + + /// Called when we want to replicate a channel already opened by another actor. + /// is used only in the jd client from the template provider module to mock a pool. + /// Anything else should open channel with the new_extended_channel function + pub fn replicate_upstream_extended_channel_only_jd( + &mut self, + target: binary_sv2::U256<'static>, + extranonce: mining_sv2::Extranonce, + channel_id: u32, + extranonce_size: u16, + ) -> Option<()> { + self.inner.replicate_upstream_extended_channel_only_jd( + target, + extranonce, + channel_id, + extranonce_size, + ) + } + + /// Called only when a new prev hash is received by a Template Provider. It matches the + /// message with a `job_id` and calls [`ChannelFactory::on_new_prev_hash`] + /// it return the job_id + pub fn on_new_prev_hash_from_tp( + &mut self, + m: &SetNewPrevHashFromTp<'static>, + ) -> Result { + let job_id = self.job_creator.on_new_prev_hash(m).unwrap_or(0); + let new_prev_hash = StagedPhash { + job_id, + prev_hash: m.prev_hash.clone(), + min_ntime: m.header_timestamp, + nbits: m.n_bits, + }; + self.inner.on_new_prev_hash(new_prev_hash)?; + Ok(job_id) + } + + /// Called only when a new template is received by a Template Provider + pub fn on_new_template( + &mut self, + m: &mut NewTemplate<'static>, + ) -> Result, BuildNoHashHasher>, Error> { + let new_job = + self.job_creator + .on_new_template(m, true, self.pool_coinbase_outputs.clone())?; + self.inner.on_new_extended_mining_job(new_job) + } + + /// Called when a `SubmitSharesStandard` message is received from the downstream. We check the + /// shares against the channel's respective target and return `OnNewShare` to let us know if + /// and where the shares should be relayed + pub fn on_submit_shares_standard( + &mut self, + m: SubmitSharesStandard, + ) -> Result { + match self.inner.channel_to_group_id.get(&m.channel_id) { + Some(g_id) => { + let referenced_job = self + .inner + .last_valid_job + .clone() + .ok_or(Error::ShareDoNotMatchAnyJob)? + .0; + let merkle_path = referenced_job.merkle_path.to_vec(); + let template_id = self + .job_creator + .get_template_id_from_job(referenced_job.job_id) + .ok_or(Error::NoTemplateForId)?; + let target = self.job_creator.last_target(); + let prev_blockhash = self + .inner + .last_prev_hash_ + .ok_or(Error::ShareDoNotMatchAnyJob)?; + let bits = self + .inner + .last_prev_hash + .as_ref() + .ok_or(Error::ShareDoNotMatchAnyJob)? + .0 + .nbits; + self.inner.check_target( + Share::Standard((m, *g_id)), + target, + Some(template_id), + 0, + merkle_path, + referenced_job.coinbase_tx_prefix.as_ref(), + referenced_job.coinbase_tx_suffix.as_ref(), + prev_blockhash, + bits, + ) + } + None => { + let err = SubmitSharesError { + channel_id: m.channel_id, + sequence_number: m.sequence_number, + error_code: SubmitSharesError::invalid_channel_error_code() + .to_string() + .try_into() + .unwrap(), + }; + Ok(OnNewShare::SendErrorDownstream(err)) + } + } + } + + /// Called when a `SubmitSharesExtended` message is received from the downstream. We check the + /// shares against the channel's respective target and return `OnNewShare` to let us know if + /// and where the shares should be relayed + pub fn on_submit_shares_extended( + &mut self, + m: SubmitSharesExtended, + ) -> Result { + let target = self.job_creator.last_target(); + // When downstream set a custom mining job we add the job to the negotiated job + // hashmap, with the extended channel id as a key. Whenever the pool receive a share must + // first check if the channel have a negotiated job if so we can not retrieve the template + // via the job creator but we create a new one from the set custom job. + if self.negotiated_jobs.contains_key(&m.channel_id) { + let referenced_job = self.negotiated_jobs.get(&m.channel_id).unwrap(); + let merkle_path = referenced_job.merkle_path.to_vec(); + let extended_job = job_creator::extended_job_from_custom_job( + referenced_job, + self.inner.extranonces.get_len() as u8, + ) + .unwrap(); + let prev_blockhash = crate::utils::u256_to_block_hash(referenced_job.prev_hash.clone()); + let bits = referenced_job.nbits; + self.inner.check_target( + Share::Extended(m.into_static()), + target, + None, + 0, + merkle_path, + extended_job.coinbase_tx_prefix.as_ref(), + extended_job.coinbase_tx_suffix.as_ref(), + prev_blockhash, + bits, + ) + } else { + let referenced_job = self + .inner + .last_valid_job + .clone() + .ok_or(Error::ShareDoNotMatchAnyJob)? + .0; + let merkle_path = referenced_job.merkle_path.to_vec(); + let template_id = self + .job_creator + .get_template_id_from_job(referenced_job.job_id) + .ok_or(Error::NoTemplateForId)?; + let prev_blockhash = self + .inner + .last_prev_hash_ + .ok_or(Error::ShareDoNotMatchAnyJob)?; + let bits = self + .inner + .last_prev_hash + .as_ref() + .ok_or(Error::ShareDoNotMatchAnyJob)? + .0 + .nbits; + self.inner.check_target( + Share::Extended(m.into_static()), + target, + Some(template_id), + 0, + merkle_path, + referenced_job.coinbase_tx_prefix.as_ref(), + referenced_job.coinbase_tx_suffix.as_ref(), + prev_blockhash, + bits, + ) + } + } + + /// Utility function to return a new group id + pub fn new_group_id(&mut self) -> u32 { + let new_id = self.inner.ids.safe_lock(|ids| ids.new_group_id()).unwrap(); + new_id + } + + /// Utility function to return a new standard channel id + pub fn new_standard_id_for_hom(&mut self) -> u32 { + let hom_group_id = 0; + let new_id = self + .inner + .ids + .safe_lock(|ids| ids.new_channel_id(hom_group_id)) + .unwrap(); + new_id + } + + /// Returns the full extranonce, extranonce1 (static for channel) + extranonce2 (miner nonce + /// space) + pub fn extranonce_from_downstream_extranonce( + &self, + ext: mining_sv2::Extranonce, + ) -> Option { + self.inner + .extranonces + .extranonce_from_downstream_extranonce(ext) + .ok() + } + + /// Called when a new custom mining job arrives + pub fn on_new_set_custom_mining_job( + &mut self, + set_custom_mining_job: SetCustomMiningJob<'static>, + ) -> SetCustomMiningJobSuccess { + if self.check_set_custom_mining_job(&set_custom_mining_job) { + self.negotiated_jobs.insert( + set_custom_mining_job.channel_id, + set_custom_mining_job.clone(), + ); + SetCustomMiningJobSuccess { + channel_id: set_custom_mining_job.channel_id, + request_id: set_custom_mining_job.request_id, + job_id: self.inner.job_ids.next(), + } + } else { + todo!() + } + } + + fn check_set_custom_mining_job( + &self, + _set_custom_mining_job: &SetCustomMiningJob<'static>, + ) -> bool { + true + } + + /// Get extended channel ids + pub fn get_extended_channels_ids(&self) -> Vec { + self.inner.extended_channels.keys().copied().collect() + } + + pub fn get_shares_per_minute(&self) -> f32 { + self.inner.share_per_min + } + + /// Update coinbase outputs + pub fn update_pool_outputs(&mut self, outs: Vec) { + self.pool_coinbase_outputs = outs; + } + + /// Calls [`ChannelFactory::update_target_for_channel`] + /// Set a particular downstream channel target. + pub fn update_target_for_channel( + &mut self, + channel_id: u32, + new_target: Target, + ) -> Option { + self.inner.update_target_for_channel(channel_id, new_target) + } + + /// Set the target for this channel. This is the upstream target. + pub fn set_target(&mut self, new_target: &mut Target) { + self.inner.kind.set_target(new_target); + } +} + +/// Used by proxies that want to open extended channels with upstream. If the proxy has job +/// declaration capabilities, we set the job creator and the coinbase outs. +#[derive(Debug)] +pub struct ProxyExtendedChannelFactory { + inner: ChannelFactory, + job_creator: Option, + pool_coinbase_outputs: Option>, + // Id assigned to the extended channel by upstream + extended_channel_id: u32, +} + +impl ProxyExtendedChannelFactory { + /// Constructor + #[allow(clippy::too_many_arguments)] + pub fn new( + ids: Arc>, + extranonces: ExtendedExtranonce, + job_creator: Option, + share_per_min: f32, + kind: ExtendedChannelKind, + pool_coinbase_outputs: Option>, + extended_channel_id: u32, + ) -> Self { + match &kind { + ExtendedChannelKind::Proxy { .. } => { + if job_creator.is_some() { + panic!("Channel factory of kind Proxy can not be initialized with a JobCreators"); + }; + }, + ExtendedChannelKind::ProxyJd { .. } => { + if job_creator.is_none() { + panic!("Channel factory of kind ProxyJd must be initialized with a JobCreators"); + }; + } + ExtendedChannelKind::Pool => panic!("Try to construct an ProxyExtendedChannelFactory with pool kind, kind must be Proxy or ProxyJd"), + }; + let inner = ChannelFactory { + ids, + extended_channels: HashMap::with_hasher(BuildNoHashHasher::default()), + extranonces, + share_per_min, + future_jobs: Vec::new(), + last_prev_hash: None, + last_prev_hash_: None, + last_valid_job: None, + kind, + job_ids: Id::new(), + channel_to_group_id: HashMap::with_hasher(BuildNoHashHasher::default()), + future_templates: HashMap::with_hasher(BuildNoHashHasher::default()), + }; + ProxyExtendedChannelFactory { + inner, + job_creator, + pool_coinbase_outputs, + extended_channel_id, + } + } + + /// Calls [`ChannelFactory::new_extended_channel`] + pub fn new_extended_channel( + &mut self, + request_id: u32, + hash_rate: f32, + min_extranonce_size: u16, + ) -> Result>, Error> { + self.inner + .new_extended_channel(request_id, hash_rate, min_extranonce_size) + } + + /// Called only when a new prev hash is received by a Template Provider when job declaration is + /// used. It matches the message with a `job_id`, creates a new custom job, and calls + /// [`ChannelFactory::on_new_prev_hash`] + pub fn on_new_prev_hash_from_tp( + &mut self, + m: &SetNewPrevHashFromTp<'static>, + ) -> Result, Error> { + if let Some(job_creator) = self.job_creator.as_mut() { + let job_id = job_creator.on_new_prev_hash(m).unwrap_or(0); + let new_prev_hash = StagedPhash { + job_id, + prev_hash: m.prev_hash.clone(), + min_ntime: m.header_timestamp, + nbits: m.n_bits, + }; + let mut custom_job = None; + if let Some(template) = self.inner.future_templates.get(&job_id) { + custom_job = Some(( + PartialSetCustomMiningJob { + version: template.version, + prev_hash: new_prev_hash.prev_hash.clone(), + min_ntime: new_prev_hash.min_ntime, + nbits: new_prev_hash.nbits, + coinbase_tx_version: template.coinbase_tx_version, + coinbase_prefix: template.coinbase_prefix.clone(), + coinbase_tx_input_n_sequence: template.coinbase_tx_input_sequence, + coinbase_tx_value_remaining: template.coinbase_tx_value_remaining, + coinbase_tx_outputs: template.coinbase_tx_outputs.clone(), + coinbase_tx_locktime: template.coinbase_tx_locktime, + merkle_path: template.merkle_path.clone(), + future_job: template.future_template, + }, + job_id, + )); + } + self.inner.future_templates = HashMap::with_hasher(BuildNoHashHasher::default()); + self.inner.on_new_prev_hash(new_prev_hash)?; + Ok(custom_job) + } else { + panic!("A channel factory without job creator do not have declaration capabilities") + } + } + + /// Called only when a new template is received by a Template Provider when job declaration is + /// used. It creates a new custom job and calls + /// [`ChannelFactory::on_new_extended_mining_job`] + #[allow(clippy::type_complexity)] + pub fn on_new_template( + &mut self, + m: &mut NewTemplate<'static>, + ) -> Result< + ( + // downstream job_id -> downstream message (newextjob or newjob) + HashMap, BuildNoHashHasher>, + // PartialSetCustomMiningJob to send to the pool + Option, + // job_id registered in the channel, the one that SetNewPrevHash refer to (upstsream + // job id) + u32, + ), + Error, + > { + if let (Some(job_creator), Some(pool_coinbase_outputs)) = ( + self.job_creator.as_mut(), + self.pool_coinbase_outputs.as_mut(), + ) { + let new_job = job_creator.on_new_template(m, true, pool_coinbase_outputs.clone())?; + let id = new_job.job_id; + if !new_job.is_future() && self.inner.last_prev_hash.is_some() { + let prev_hash = self.last_prev_hash().unwrap(); + let min_ntime = self.last_min_ntime().unwrap(); + let nbits = self.last_nbits().unwrap(); + let custom_mining_job = PartialSetCustomMiningJob { + version: m.version, + prev_hash, + min_ntime, + nbits, + coinbase_tx_version: m.coinbase_tx_version, + coinbase_prefix: m.coinbase_prefix.clone(), + coinbase_tx_input_n_sequence: m.coinbase_tx_input_sequence, + coinbase_tx_value_remaining: m.coinbase_tx_value_remaining, + coinbase_tx_outputs: m.coinbase_tx_outputs.clone(), + coinbase_tx_locktime: m.coinbase_tx_locktime, + merkle_path: m.merkle_path.clone(), + future_job: m.future_template, + }; + return Ok(( + self.inner.on_new_extended_mining_job(new_job)?, + Some(custom_mining_job), + id, + )); + } else if new_job.is_future() { + self.inner + .future_templates + .insert(new_job.job_id, m.clone()); + } + Ok((self.inner.on_new_extended_mining_job(new_job)?, None, id)) + } else { + panic!("Either channel factory has no job creator or pool_coinbase_outputs are not yet set") + } + } + + /// Called when a `SubmitSharesStandard` message is received from the downstream. We check the + /// shares against the channel's respective target and return `OnNewShare` to let us know if + /// and where the shares should be relayed + pub fn on_submit_shares_extended( + &mut self, + m: SubmitSharesExtended<'static>, + ) -> Result { + let merkle_path = self + .inner + .last_valid_job + .as_ref() + .ok_or(Error::ShareDoNotMatchAnyJob)? + .0 + .merkle_path + .to_vec(); + + let referenced_job = self + .inner + .last_valid_job + .clone() + .ok_or(Error::ShareDoNotMatchAnyJob)? + .0; + + if referenced_job.job_id != m.job_id { + let error = SubmitSharesError { + channel_id: m.channel_id, + sequence_number: m.sequence_number, + // Infallible unwrap we already know the len of the error code (is a + // static string) + error_code: SubmitSharesError::invalid_job_id_error_code() + .to_string() + .try_into() + .unwrap(), + }; + return Ok(OnNewShare::SendErrorDownstream(error)); + } + + if let Some(job_creator) = self.job_creator.as_mut() { + let template_id = job_creator + .get_template_id_from_job(referenced_job.job_id) + .ok_or(Error::NoTemplateForId)?; + let bitcoin_target = job_creator.last_target(); + let prev_blockhash = self + .inner + .last_prev_hash_ + .ok_or(Error::ShareDoNotMatchAnyJob)?; + let bits = self + .inner + .last_prev_hash + .as_ref() + .ok_or(Error::ShareDoNotMatchAnyJob)? + .0 + .nbits; + self.inner.check_target( + Share::Extended(m), + bitcoin_target, + Some(template_id), + self.extended_channel_id, + merkle_path, + referenced_job.coinbase_tx_prefix.as_ref(), + referenced_job.coinbase_tx_suffix.as_ref(), + prev_blockhash, + bits, + ) + } else { + let bitcoin_target = Target::ZERO; + // if there is not job_creator is not proxy duty to check if target is below or above + // bitcoin target so we set bitcoin_target = 0. + let prev_blockhash = self + .inner + .last_prev_hash_ + .ok_or(Error::ShareDoNotMatchAnyJob)?; + let bits = self + .inner + .last_prev_hash + .as_ref() + .ok_or(Error::ShareDoNotMatchAnyJob)? + .0 + .nbits; + self.inner.check_target( + Share::Extended(m), + bitcoin_target, + None, + self.extended_channel_id, + merkle_path, + referenced_job.coinbase_tx_prefix.as_ref(), + referenced_job.coinbase_tx_suffix.as_ref(), + prev_blockhash, + bits, + ) + } + } + + /// Called when a `SubmitSharesStandard` message is received from the Downstream. We check the + /// shares against the channel's respective target and return `OnNewShare` to let us know if + /// and where the shares should be relayed + pub fn on_submit_shares_standard( + &mut self, + m: SubmitSharesStandard, + ) -> Result { + let merkle_path = self + .inner + .last_valid_job + .as_ref() + .ok_or(Error::ShareDoNotMatchAnyJob)? + .0 + .merkle_path + .to_vec(); + let referenced_job = self + .inner + .last_valid_job + .clone() + .ok_or(Error::ShareDoNotMatchAnyJob)? + .0; + match self.inner.channel_to_group_id.get(&m.channel_id) { + Some(g_id) => { + if let Some(job_creator) = self.job_creator.as_mut() { + let template_id = job_creator + .get_template_id_from_job( + self.inner.last_valid_job.as_ref().unwrap().0.job_id, + ) + .ok_or(Error::NoTemplateForId)?; + let bitcoin_target = job_creator.last_target(); + let prev_blockhash = self + .inner + .last_prev_hash_ + .ok_or(Error::ShareDoNotMatchAnyJob)?; + let bits = self + .inner + .last_prev_hash + .as_ref() + .ok_or(Error::ShareDoNotMatchAnyJob)? + .0 + .nbits; + self.inner.check_target( + Share::Standard((m, *g_id)), + bitcoin_target, + Some(template_id), + self.extended_channel_id, + merkle_path, + referenced_job.coinbase_tx_prefix.as_ref(), + referenced_job.coinbase_tx_suffix.as_ref(), + prev_blockhash, + bits, + ) + } else { + let bitcoin_target = Target::ZERO; + let prev_blockhash = self + .inner + .last_prev_hash_ + .ok_or(Error::ShareDoNotMatchAnyJob)?; + let bits = self + .inner + .last_prev_hash + .as_ref() + .ok_or(Error::ShareDoNotMatchAnyJob)? + .0 + .nbits; + // if there is not job_creator is not proxy duty to check if target is below or + // above bitcoin target so we set bitcoin_target = 0. + self.inner.check_target( + Share::Standard((m, *g_id)), + bitcoin_target, + None, + self.extended_channel_id, + merkle_path, + referenced_job.coinbase_tx_prefix.as_ref(), + referenced_job.coinbase_tx_suffix.as_ref(), + prev_blockhash, + bits, + ) + } + } + None => { + let err = SubmitSharesError { + channel_id: m.channel_id, + sequence_number: m.sequence_number, + error_code: SubmitSharesError::invalid_channel_error_code() + .to_string() + .try_into() + .unwrap(), + }; + Ok(OnNewShare::SendErrorDownstream(err)) + } + } + } + + /// Calls [`ChannelFactory::on_new_prev_hash`] + pub fn on_new_prev_hash(&mut self, m: SetNewPrevHash<'static>) -> Result<(), Error> { + self.inner.on_new_prev_hash(StagedPhash { + job_id: m.job_id, + prev_hash: m.prev_hash.clone().into_static(), + min_ntime: m.min_ntime, + nbits: m.nbits, + }) + } + + /// Calls [`ChannelFactory::on_new_extended_mining_job`] + pub fn on_new_extended_mining_job( + &mut self, + m: NewExtendedMiningJob<'static>, + ) -> Result, BuildNoHashHasher>, Error> { + self.inner.on_new_extended_mining_job(m) + } + + /// Set new target + pub fn set_target(&mut self, new_target: &mut Target) { + self.inner.kind.set_target(new_target); + } + + /// Get last valid job version + pub fn last_valid_job_version(&self) -> Option { + self.inner.last_valid_job.as_ref().map(|j| j.0.version) + } + + /// Returns the full extranonce, extranonce1 (static for channel) + extranonce2 (miner nonce + /// space) + pub fn extranonce_from_downstream_extranonce( + &self, + ext: mining_sv2::Extranonce, + ) -> Option { + self.inner + .extranonces + .extranonce_from_downstream_extranonce(ext) + .ok() + } + + /// Returns the most recent prev hash + pub fn last_prev_hash(&self) -> Option> { + self.inner + .last_prev_hash + .as_ref() + .map(|f| f.0.prev_hash.clone()) + } + + /// Get last min ntime + pub fn last_min_ntime(&self) -> Option { + self.inner.last_prev_hash.as_ref().map(|f| f.0.min_ntime) + } + + /// Get last nbits + pub fn last_nbits(&self) -> Option { + self.inner.last_prev_hash.as_ref().map(|f| f.0.nbits) + } + + /// Get extranonce_size + pub fn extranonce_size(&self) -> usize { + self.inner.extranonces.get_len() + } + + /// Get extranonce_2 size + pub fn channel_extranonce2_size(&self) -> usize { + self.inner.extranonces.get_len() - self.inner.extranonces.get_range0_len() + } + + // Only used when the proxy is using Job Declaration + /// Updates pool outputs + pub fn update_pool_outputs(&mut self, outs: Vec) { + self.pool_coinbase_outputs = Some(outs); + } + + /// Get this channel id + pub fn get_this_channel_id(&self) -> u32 { + self.extended_channel_id + } + + /// Returns the extranonce1 len of the upstream. For a proxy, this would + /// be the extranonce_prefix len + pub fn get_upstream_extranonce1_len(&self) -> usize { + self.inner.extranonces.get_range0_len() + } + + /// Calls [`ChannelFactory::update_target_for_channel`] + pub fn update_target_for_channel( + &mut self, + channel_id: u32, + new_target: Target, + ) -> Option { + self.inner.update_target_for_channel(channel_id, new_target) + } +} + +/// Used by proxies for tracking upstream targets. +#[derive(Debug, Clone)] +pub enum ExtendedChannelKind { + Proxy { upstream_target: Target }, + ProxyJd { upstream_target: Target }, + Pool, +} +impl ExtendedChannelKind { + /// Set target + pub fn set_target(&mut self, new_target: &mut Target) { + match self { + ExtendedChannelKind::Proxy { upstream_target } + | ExtendedChannelKind::ProxyJd { upstream_target } => { + std::mem::swap(upstream_target, new_target) + } + ExtendedChannelKind::Pool => warn!("Try to set upstream target for a pool"), + } + } +} diff --git a/roles/stratum-apps/src/job_creator.rs b/roles/stratum-apps/src/job_creator.rs new file mode 100644 index 0000000000..830a4d1136 --- /dev/null +++ b/roles/stratum-apps/src/job_creator.rs @@ -0,0 +1,716 @@ +//! # Job Creator +//! +//! This module provides logic to create extended mining jobs given a template from +//! a template provider as well as logic to clean up old templates when new blocks are mined. +use crate::{errors, utils::Id, Error}; +use bitcoin::{ + absolute::LockTime, + blockdata::{ + transaction::{OutPoint, Transaction, TxIn, TxOut, Version}, + witness::Witness, + }, + consensus, + consensus::Decodable, + Amount, Target, +}; +use codec_sv2::binary_sv2::{self, B064K}; +use mining_sv2::NewExtendedMiningJob; +use nohash_hasher::BuildNoHashHasher; +use std::{collections::HashMap, convert::TryInto}; +use template_distribution_sv2::{NewTemplate, SetNewPrevHash}; +use tracing::debug; + +#[derive(Debug)] +pub struct JobsCreators { + lasts_new_template: Vec>, + job_to_template_id: HashMap>, + templte_to_job_id: HashMap>, + ids: Id, + last_target: Target, + last_ntime: Option, + extranonce_len: u8, +} + +/// Transforms the byte array `coinbase_outputs` in a vector of TxOut +/// It assumes the data to be valid data and does not do any kind of check +pub fn tx_outputs_to_costum_scripts(tx_outputs: &[u8]) -> Vec { + let mut txs = vec![]; + let mut cursor = 0; + let mut txouts = &tx_outputs[cursor..]; + while let Ok(out) = TxOut::consensus_decode(&mut txouts) { + let len = match out.script_pubkey.len() { + a @ 0..=252 => 8 + 1 + a, + a @ 253..=10000 => 8 + 3 + a, + _ => break, + }; + cursor += len; + txs.push(out) + } + txs +} + +impl JobsCreators { + /// Constructor + pub fn new(extranonce_len: u8) -> Self { + Self { + lasts_new_template: Vec::new(), + job_to_template_id: HashMap::with_hasher(BuildNoHashHasher::default()), + templte_to_job_id: HashMap::with_hasher(BuildNoHashHasher::default()), + ids: Id::new(), + last_target: Target::ZERO, + last_ntime: None, + extranonce_len, + } + } + + /// Get template id from job + pub fn get_template_id_from_job(&self, job_id: u32) -> Option { + self.job_to_template_id.get(&job_id).map(|x| x - 1) + } + + /// Used to create new jobs when a new template arrives + pub fn on_new_template( + &mut self, + template: &mut NewTemplate, + version_rolling_allowed: bool, + mut pool_coinbase_outputs: Vec, + ) -> Result, Error> { + let server_tx_outputs = template.coinbase_tx_outputs.to_vec(); + let mut outputs = tx_outputs_to_costum_scripts(&server_tx_outputs); + pool_coinbase_outputs.append(&mut outputs); + + // This is to make sure that 0 is never used, so we can use 0 for + // set_new_prev_hashes that do not refer to any future job/template if needed + // Then we will do the inverse (-1) where needed + let template_id = template.template_id + 1; + self.lasts_new_template.push(template.as_static()); + let next_job_id = self.ids.next(); + self.job_to_template_id.insert(next_job_id, template_id); + self.templte_to_job_id.insert(template_id, next_job_id); + new_extended_job( + template, + &mut pool_coinbase_outputs, + next_job_id, + version_rolling_allowed, + self.extranonce_len, + self.last_ntime, + ) + } + + pub(crate) fn reset_new_templates(&mut self, template: Option>) { + match template { + Some(t) => self.lasts_new_template = vec![t], + None => self.lasts_new_template = vec![], + } + } + + /// When we get a new `SetNewPrevHash` we need to clear all the other templates and only + /// keep the one that matches the template_id of the new prev hash. If none match then + /// we clear all the saved templates. + pub fn on_new_prev_hash(&mut self, prev_hash: &SetNewPrevHash<'static>) -> Option { + self.last_target = + Target::from_be_bytes(prev_hash.target.inner_as_ref().try_into().unwrap()); + self.last_ntime = prev_hash.header_timestamp.into(); // set correct ntime + let template: Vec> = self + .lasts_new_template + .clone() + .into_iter() + .filter(|a| a.template_id == prev_hash.template_id) + .collect(); + match template.len() { + 0 => { + self.reset_new_templates(None); + None + } + 1 => { + self.reset_new_templates(Some(template[0].clone())); + + self.templte_to_job_id + .get(&(prev_hash.template_id + 1)) + .copied() + } + // TODO how many templates can we have at max + _ => todo!("{:#?}", template.len()), + } + } + + /// Returns the latest mining target + pub fn last_target(&self) -> Target { + self.last_target.clone() + } +} + +/// Converts custom job into extended job +pub fn extended_job_from_custom_job( + referenced_job: &mining_sv2::SetCustomMiningJob, + extranonce_len: u8, +) -> Result, Error> { + let mut outputs = + tx_outputs_to_costum_scripts(referenced_job.coinbase_tx_outputs.clone().as_ref()); + + let mut template_value = 0; + for output in &outputs { + template_value += output.value.to_sat(); + } + + let mut template = NewTemplate { + template_id: 0, + future_template: false, + version: referenced_job.version, + coinbase_tx_version: referenced_job.coinbase_tx_version, + coinbase_prefix: referenced_job.coinbase_prefix.clone(), + coinbase_tx_input_sequence: referenced_job.coinbase_tx_input_n_sequence, + coinbase_tx_value_remaining: template_value, + coinbase_tx_outputs_count: outputs.len() as u32, + coinbase_tx_outputs: referenced_job.coinbase_tx_outputs.clone(), + coinbase_tx_locktime: referenced_job.coinbase_tx_locktime, + merkle_path: referenced_job.merkle_path.clone(), + }; + new_extended_job( + &mut template, + &mut outputs, + 0, + true, + extranonce_len, + Some(referenced_job.min_ntime), + ) +} + +// Returns an extended job given the provided template from the Template Provider and other +// Pool role related fields. +// +// Pool related arguments: +// +// * `coinbase_outputs`: coinbase output transactions specified by the pool. +// * `job_id`: incremented job identifier specified by the pool. +// * `version_rolling_allowed`: boolean specified by the channel. +// * `extranonce_len`: extranonce length specified by the channel. +fn new_extended_job( + new_template: &mut NewTemplate, + coinbase_outputs: &mut [TxOut], + job_id: u32, + version_rolling_allowed: bool, + extranonce_len: u8, + ntime: Option, +) -> Result, Error> { + coinbase_outputs[0].value = match new_template.coinbase_tx_value_remaining.checked_mul(1) { + //check that value_remaining is updated by TP + Some(result) => Amount::from_sat(result), + None => return Err(Error::ValueRemainingNotUpdated), + }; + let tx_version = new_template + .coinbase_tx_version + .try_into() + .map_err(|_| Error::TxVersionTooBig)?; + + let script_sig_prefix = new_template.coinbase_prefix.to_vec(); + let script_sig_prefix_len = script_sig_prefix.len(); + + let coinbase = coinbase( + script_sig_prefix, + tx_version, + new_template.coinbase_tx_locktime, + new_template.coinbase_tx_input_sequence, + coinbase_outputs, + extranonce_len, + )?; + + let min_ntime = binary_sv2::Sv2Option::new(if new_template.future_template { + None + } else { + ntime + }); + + let new_extended_mining_job: NewExtendedMiningJob<'static> = NewExtendedMiningJob { + channel_id: 0, + job_id, + min_ntime, + version: new_template.version, + version_rolling_allowed, + merkle_path: new_template.merkle_path.clone().into_static(), + coinbase_tx_prefix: coinbase_tx_prefix(&coinbase, script_sig_prefix_len)?, + coinbase_tx_suffix: coinbase_tx_suffix(&coinbase, extranonce_len, script_sig_prefix_len)?, + }; + + debug!( + "New extended mining job created: {:?}", + new_extended_mining_job + ); + Ok(new_extended_mining_job) +} + +// Used to extract the coinbase transaction prefix for extended jobs +// so the extranonce search space can be introduced +fn coinbase_tx_prefix( + coinbase: &Transaction, + script_sig_prefix_len: usize, +) -> Result, Error> { + let encoded = consensus::serialize(coinbase); + // If script_prefix_len is not 0 we are not in a test environment and the coinbase will have the + // 0 witness + let segwit_bytes = match script_sig_prefix_len { + 0 => 0, + _ => 2, + }; + let index = 4 // tx version + + segwit_bytes + + 1 // number of inputs TODO can be also 3 + + 32 // prev OutPoint + + 4 // index + + 1 // bytes in script TODO can be also 3 + + script_sig_prefix_len; // script_sig_prefix + let r = encoded[0..index].to_vec(); + r.try_into().map_err(Error::BinarySv2Error) +} + +// Used to extract the coinbase transaction suffix for extended jobs +// so the extranonce search space can be introduced +fn coinbase_tx_suffix( + coinbase: &Transaction, + extranonce_len: u8, + script_sig_prefix_len: usize, +) -> Result, Error> { + let encoded = consensus::serialize(coinbase); + // If script_sig_prefix_len is not 0 we are not in a test environment and the coinbase have the + // 0 witness + let segwit_bytes = match script_sig_prefix_len { + 0 => 0, + _ => 2, + }; + let r = encoded[4 // tx version + + segwit_bytes + + 1 // number of inputs TODO can be also 3 + + 32 // prev OutPoint + + 4 // index + + 1 // bytes in script TODO can be also 3 + + script_sig_prefix_len // script_sig_prefix + + (extranonce_len as usize)..] + .to_vec(); + r.try_into().map_err(Error::BinarySv2Error) +} + +// try to build a Transaction coinbase +fn coinbase( + script_sig_prefix: Vec, + version: i32, + lock_time: u32, + sequence: u32, + coinbase_outputs: &[TxOut], + extranonce_len: u8, +) -> Result { + // If script_sig_prefix_len is not 0 we are not in a test environment and the coinbase have the + // 0 witness + let witness = match script_sig_prefix.len() { + 0 => Witness::from(vec![] as Vec>), + _ => Witness::from(vec![vec![0; 32]]), + }; + let mut script_sig = script_sig_prefix; + script_sig.extend_from_slice(&vec![0; extranonce_len as usize]); + let tx_in = TxIn { + previous_output: OutPoint::null(), + script_sig: script_sig.into(), + sequence: bitcoin::Sequence(sequence), + witness, + }; + Ok(Transaction { + version: Version::non_standard(version), + lock_time: LockTime::from_consensus(lock_time), + input: vec![tx_in], + output: coinbase_outputs.to_vec(), + }) +} + +/// Helper type to strip a segwit data from the coinbase_tx_prefix and coinbase_tx_suffix +/// to ensure miners are hashing with the correct coinbase +pub fn extended_job_to_non_segwit( + job: NewExtendedMiningJob<'static>, + full_extranonce_len: usize, +) -> Result, Error> { + let mut encoded = job.coinbase_tx_prefix.to_vec(); + // just add empty extranonce space so it can be deserialized. The real extranonce + // should be inserted based on the miner's shares + let extranonce = vec![0_u8; full_extranonce_len]; + encoded.extend_from_slice(&extranonce[..]); + encoded.extend_from_slice(job.coinbase_tx_suffix.inner_as_ref()); + let coinbase = consensus::deserialize(&encoded).map_err(|_| Error::InvalidCoinbase)?; + let stripped_tx = StrippedCoinbaseTx::from_coinbase(coinbase, full_extranonce_len)?; + + Ok(NewExtendedMiningJob { + channel_id: job.channel_id, + job_id: job.job_id, + min_ntime: job.min_ntime, + version: job.version, + version_rolling_allowed: job.version_rolling_allowed, + merkle_path: job.merkle_path, + coinbase_tx_prefix: stripped_tx.into_coinbase_tx_prefix()?, + coinbase_tx_suffix: stripped_tx.into_coinbase_tx_suffix()?, + }) +} +// Helper type to strip a segwit data from the coinbase_tx_prefix and coinbase_tx_suffix +// to ensure miners are hashing with the correct coinbase +struct StrippedCoinbaseTx { + version: u32, + inputs: Vec>, + outputs: Vec>, + lock_time: u32, + // helper field + bip141_bytes_len: usize, +} + +impl StrippedCoinbaseTx { + // create + fn from_coinbase(tx: Transaction, full_extranonce_len: usize) -> Result { + let bip141_bytes_len = tx + .input + .last() + .ok_or(Error::BadPayloadSize)? + .script_sig + .len() + - full_extranonce_len; + Ok(Self { + version: tx.version.0 as u32, + inputs: tx + .input + .iter() + .map(|txin| { + let mut ser: Vec = vec![]; + ser.extend_from_slice(txin.previous_output.txid.as_ref()); + ser.extend_from_slice(&txin.previous_output.vout.to_le_bytes()); + ser.push(txin.script_sig.len() as u8); + ser.extend_from_slice(txin.script_sig.as_bytes()); + ser.extend_from_slice(&txin.sequence.0.to_le_bytes()); + ser + }) + .collect(), + outputs: tx.output.iter().map(consensus::serialize).collect(), + lock_time: tx.lock_time.to_consensus_u32(), + bip141_bytes_len, + }) + } + + // The coinbase tx prefix is the LE bytes concatenation of the tx version and all + // of the tx inputs minus the 32 bytes after the script_sig_prefix bytes + // and the last input's sequence (used as the first entry in the coinbase tx suffix). + // The last 32 bytes after the bip34 bytes in the script will be used to allow extranonce + // space for the miner. We remove the bip141 marker and flag since it is only used for + // computing the `wtxid` and the legacy `txid` is what is used for computing the merkle root + // clippy allow because we don't want to consume self + #[allow(clippy::wrong_self_convention)] + fn into_coinbase_tx_prefix(&self) -> Result, errors::Error> { + let mut inputs = self.inputs.clone(); + let last_input = inputs.last_mut().ok_or(Error::BadPayloadSize)?; + let new_last_input_len = + 32 // outpoint + + 4 // vout + + 1 // script length byte -> TODO can be also 3 (based on TODO in `coinbase_tx_prefix()`) + + self.bip141_bytes_len // space for bip34 bytes + ; + last_input.truncate(new_last_input_len); + let mut prefix: Vec = vec![]; + prefix.extend_from_slice(&self.version.to_le_bytes()); + prefix.push(self.inputs.len() as u8); + prefix.extend_from_slice(&inputs.concat()); + prefix.try_into().map_err(Error::BinarySv2Error) + } + + // This coinbase tx suffix is the sequence of the last tx input plus + // the serialized tx outputs and the lock time. Note we do not use the witnesses + // (placed between txouts and lock time) since it is only used for + // computing the `wtxid` and the legacy `txid` is what is used for computing the merkle root + // clippy allow because we don't want to consume self + #[allow(clippy::wrong_self_convention)] + fn into_coinbase_tx_suffix(&self) -> Result, errors::Error> { + let mut suffix: Vec = vec![]; + let last_input = self.inputs.last().ok_or(Error::BadPayloadSize)?; + // only take the last intput's sequence u32 (bytes after the extranonce space) + let last_input_sequence = &last_input[last_input.len() - 4..]; + suffix.extend_from_slice(last_input_sequence); + suffix.push(self.outputs.len() as u8); + suffix.extend_from_slice(&self.outputs.concat()); + suffix.extend_from_slice(&self.lock_time.to_le_bytes()); + suffix.try_into().map_err(Error::BinarySv2Error) + } +} + +// Test +#[cfg(test)] + +pub mod tests { + use super::*; + use crate::utils::merkle_root_from_path; + #[cfg(feature = "prop_test")] + use codec_sv2::binary_sv2::u256_from_int; + use quickcheck::{Arbitrary, Gen}; + use std::{cmp, vec}; + + #[cfg(feature = "prop_test")] + use std::borrow::BorrowMut; + + use bitcoin::{consensus::Encodable, secp256k1::Secp256k1, Network, PrivateKey, PublicKey}; + + pub fn template_from_gen(g: &mut Gen) -> NewTemplate<'static> { + let mut coinbase_prefix_gen = Gen::new(255); + let mut coinbase_prefix: vec::Vec = vec::Vec::new(); + + let max_num_for_script_sig_prefix = 253; + let prefix_len = cmp::min(u8::arbitrary(&mut coinbase_prefix_gen), 6); + coinbase_prefix.push(prefix_len); + coinbase_prefix.resize_with(prefix_len as usize + 2, || { + cmp::min( + u8::arbitrary(&mut coinbase_prefix_gen), + max_num_for_script_sig_prefix, + ) + }); + let coinbase_prefix: binary_sv2::B0255 = coinbase_prefix.try_into().unwrap(); + + let mut coinbase_tx_outputs_gen = Gen::new(32); + let mut coinbase_tx_outputs_inner: vec::Vec = vec::Vec::new(); + coinbase_tx_outputs_inner.resize_with(32, || u8::arbitrary(&mut coinbase_tx_outputs_gen)); + let coinbase_tx_outputs: binary_sv2::B064K = coinbase_tx_outputs_inner.try_into().unwrap(); + + let mut merkle_path_inner_gen = Gen::new(32); + let mut merkle_path_inner: vec::Vec = vec::Vec::new(); + merkle_path_inner.resize_with(32, || u8::arbitrary(&mut merkle_path_inner_gen)); + let merkle_path_inner: binary_sv2::U256 = merkle_path_inner.try_into().unwrap(); + let merkle_path: binary_sv2::Seq0255 = vec![merkle_path_inner].into(); + + NewTemplate { + template_id: u64::arbitrary(g), + future_template: bool::arbitrary(g), + version: u32::arbitrary(g), + coinbase_tx_version: 2, + coinbase_prefix, + coinbase_tx_input_sequence: u32::arbitrary(g), + coinbase_tx_value_remaining: u64::arbitrary(g), + coinbase_tx_outputs_count: 0, + coinbase_tx_outputs, + coinbase_tx_locktime: u32::arbitrary(g), + merkle_path, + } + } + + const PRIVATE_KEY_BTC: [u8; 32] = [34; 32]; + const NETWORK: Network = Network::Testnet; + + #[cfg(feature = "prop_test")] + const BLOCK_REWARD: u64 = 625_000_000_000; + + pub fn new_pub_key() -> PublicKey { + let priv_k = PrivateKey::from_slice(&PRIVATE_KEY_BTC, NETWORK).unwrap(); + let secp = Secp256k1::default(); + + PublicKey::from_private_key(&secp, &priv_k) + } + + #[cfg(feature = "prop_test")] + use bitcoin::ScriptBuf; + + // Test job_id_from_template + #[cfg(feature = "prop_test")] + #[quickcheck_macros::quickcheck] + fn test_job_id_from_template(mut template: NewTemplate<'static>) { + let mut prefix = template.coinbase_prefix.to_vec(); + if prefix.len() > 0 { + let len = u8::min(prefix[0], 6); + prefix[0] = len; + prefix.resize(len as usize + 2, 0); + template.coinbase_prefix = prefix.try_into().unwrap(); + }; + let out = TxOut { + value: Amount::from_sat(BLOCK_REWARD), + script_pubkey: ScriptBuf::new_p2pk(&new_pub_key()), + }; + let mut jobs_creators = JobsCreators::new(32); + + let job = jobs_creators + .on_new_template(template.borrow_mut(), false, vec![out]) + .unwrap(); + + assert_eq!( + jobs_creators.get_template_id_from_job(job.job_id), + Some(template.template_id) + ); + + // Assert returns non if no match + assert_eq!(jobs_creators.get_template_id_from_job(70), None); + } + + // Test reset new template + #[cfg(feature = "prop_test")] + #[quickcheck_macros::quickcheck] + fn test_reset_new_template(mut template: NewTemplate<'static>) { + let out = TxOut { + value: Amount::from_sat(BLOCK_REWARD), + script_pubkey: ScriptBuf::new_p2pk(&new_pub_key()), + }; + let mut jobs_creators = JobsCreators::new(32); + + assert_eq!(jobs_creators.lasts_new_template.len(), 0); + + let _ = jobs_creators.on_new_template(template.borrow_mut(), false, vec![out]); + + assert_eq!(jobs_creators.lasts_new_template.len(), 1); + assert_eq!(jobs_creators.lasts_new_template[0], template); + + //Create a 2nd template + let mut template2 = template_from_gen(&mut Gen::new(255)); + template2.template_id = template.template_id.checked_sub(1).unwrap_or(0); + + // Reset new template + jobs_creators.reset_new_templates(Some(template2.clone())); + + // Should be pointing at new template + assert_eq!(jobs_creators.lasts_new_template.len(), 1); + assert_eq!(jobs_creators.lasts_new_template[0], template2); + + // Reset new template + jobs_creators.reset_new_templates(None); + + // Should be pointing at new template + assert_eq!(jobs_creators.lasts_new_template.len(), 0); + } + + // Test on_new_prev_hash + #[cfg(feature = "prop_test")] + #[quickcheck_macros::quickcheck] + fn test_on_new_prev_hash(mut template: NewTemplate<'static>) { + let out = TxOut { + value: Amount::from_sat(BLOCK_REWARD), + script_pubkey: ScriptBuf::new_p2pk(&new_pub_key()), + }; + let mut jobs_creators = JobsCreators::new(32); + + //Create a template + let _ = jobs_creators.on_new_template(template.borrow_mut(), false, vec![out]); + let test_id = template.template_id; + + // Create a SetNewPrevHash with matching template_id + let prev_hash = SetNewPrevHash { + template_id: test_id, + prev_hash: u256_from_int(45_u32), + header_timestamp: 0, + n_bits: 0, + target: ([0_u8; 32]).try_into().unwrap(), + }; + + jobs_creators.on_new_prev_hash(&prev_hash); + + //Validate that we still have the same template loaded as there were matching templateIds + assert_eq!(jobs_creators.lasts_new_template.len(), 1); + assert_eq!(jobs_creators.lasts_new_template[0], template); + + // Create a SetNewPrevHash with matching template_id + let test_id_2 = test_id.wrapping_add(1); + let prev_hash2 = SetNewPrevHash { + template_id: test_id_2, + prev_hash: u256_from_int(45_u32), + header_timestamp: 0, + n_bits: 0, + target: ([0_u8; 32]).try_into().unwrap(), + }; + + jobs_creators.on_new_prev_hash(&prev_hash2); + + //Validate that templates were cleared as we got a new templateId in setNewPrevHash + assert_eq!(jobs_creators.lasts_new_template.len(), 0); + } + + #[quickcheck_macros::quickcheck] + fn it_parse_valid_tx_outs( + mut hash1: Vec, + mut hash2: Vec, + value1: u64, + value2: u64, + size1: u8, + size2: u8, + ) { + hash1.resize(size1 as usize + 2, 0); + hash2.resize(size2 as usize + 2, 0); + let tx1 = TxOut { + value: Amount::from_sat(value1), + script_pubkey: hash1.into(), + }; + let tx2 = TxOut { + value: Amount::from_sat(value2), + script_pubkey: hash2.into(), + }; + let mut encoded1 = vec![]; + let mut encoded2 = vec![]; + tx1.consensus_encode(&mut encoded1).unwrap(); + tx2.consensus_encode(&mut encoded2).unwrap(); + let mut encoded = vec![]; + encoded.append(&mut encoded1.clone()); + encoded.append(&mut encoded2.clone()); + let outs = tx_outputs_to_costum_scripts(&encoded[..]); + assert!(outs[0] == tx1); + assert!(outs[1] == tx2); + } + + // test that witness stripped tx id matches that of the txid of the coinbase + #[test] + fn stripped_tx_id() { + let encoded: &[u8] = &[ + 2, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 36, 2, 107, 22, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, + 255, 255, 2, 0, 0, 0, 0, 0, 0, 0, 0, 67, 65, 4, 70, 109, 127, 202, 229, 99, 229, 203, + 9, 160, 209, 135, 11, 181, 128, 52, 72, 4, 97, 120, 121, 161, 73, 73, 207, 34, 40, 95, + 27, 174, 63, 39, 103, 40, 23, 108, 60, 100, 49, 248, 238, 218, 69, 56, 220, 55, 200, + 101, 226, 120, 79, 58, 158, 119, 208, 68, 243, 62, 64, 119, 151, 225, 39, 138, 172, 0, + 0, 0, 0, 0, 0, 0, 0, 38, 106, 36, 170, 33, 169, 237, 226, 246, 28, 63, 113, 209, 222, + 253, 63, 169, 153, 223, 163, 105, 83, 117, 92, 105, 6, 137, 121, 153, 98, 180, 139, + 235, 216, 54, 151, 78, 140, 249, 1, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let coinbase: Transaction = consensus::deserialize(encoded).unwrap(); + let stripped = StrippedCoinbaseTx::from_coinbase(coinbase.clone(), 32).unwrap(); + let prefix = stripped.into_coinbase_tx_prefix().unwrap().to_vec(); + let suffix = stripped.into_coinbase_tx_suffix().unwrap().to_vec(); + let extranonce = &[0_u8; 32]; + let path: &[binary_sv2::U256] = &[]; + let stripped_merkle_root = + merkle_root_from_path(&prefix[..], &suffix[..], extranonce, path).unwrap(); + let txid = coinbase.compute_txid(); + let txid_bytes: &[u8; 32] = txid.as_ref(); + let og_merkle_root = txid_bytes.to_vec(); + assert!( + stripped_merkle_root == og_merkle_root, + "stripped tx hash is not the same as bitcoin crate" + ); + } + #[test] + fn stripped_tx_id_braiins_example() { + let mut encoded = vec![]; + let coinbase_prefix = &[ + 1_u8, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 75, 3, 176, 235, 11, 250, 190, 109, 109, + 50, 247, 22, 140, 225, 176, 1, 231, 78, 225, 50, 226, 181, 165, 55, 145, 137, 154, 46, + 9, 44, 65, 72, 231, 173, 111, 131, 26, 81, 223, 179, 225, 1, 0, 0, 0, 0, 0, 0, 0, + ]; + let coinbase_suffix = &[ + 245_u8, 192, 42, 69, 19, 47, 115, 108, 117, 115, 104, 47, 0, 0, 0, 0, 3, 78, 213, 148, + 39, 0, 0, 0, 0, 25, 118, 169, 20, 124, 21, 78, 209, 220, 89, 96, 158, 61, 38, 171, 178, + 223, 46, 163, 213, 135, 205, 140, 65, 136, 172, 0, 0, 0, 0, 0, 0, 0, 0, 44, 106, 76, + 41, 82, 83, 75, 66, 76, 79, 67, 75, 58, 214, 9, 239, 96, 221, 25, 108, 87, 155, 50, 55, + 47, 91, 115, 172, 168, 0, 12, 86, 195, 26, 241, 10, 22, 190, 151, 254, 24, 0, 78, 106, + 26, 0, 0, 0, 0, 0, 0, 0, 0, 38, 106, 36, 170, 33, 169, 237, 103, 66, 68, 105, 2, 55, + 65, 241, 216, 46, 82, 223, 150, 0, 97, 103, 2, 82, 186, 233, 145, 90, 210, 231, 35, + 100, 107, 52, 171, 233, 50, 200, 0, 0, 0, 0, + ]; + let extranonce = [0_u8; 15]; // braiins pool requires 15 bytes for extranonce + encoded.extend_from_slice(coinbase_prefix); + let mut encoded_clone = encoded.clone(); + encoded_clone.extend_from_slice(&extranonce); + encoded_clone.extend_from_slice(coinbase_suffix); + // let mut i = 1; + // while let Err(_) = Transaction::deserialize(&encoded_clone) { + // encoded_clone = encoded.clone(); + // extranonce.push(0); + // encoded_clone.extend_from_slice(&extranonce[..]); + // encoded_clone.extend_from_slice(coinbase_suffix); + // i+=1; + // } + // println!("SIZE: {:?}", i); + let _tx: Transaction = consensus::deserialize(&encoded_clone).unwrap(); + } +} diff --git a/roles/translator/Cargo.toml b/roles/translator/Cargo.toml index 529f8ceae1..9d0a655b2d 100644 --- a/roles/translator/Cargo.toml +++ b/roles/translator/Cargo.toml @@ -20,6 +20,7 @@ name = "translator_sv2" path = "src/main.rs" [dependencies] +bitcoin = "0.32.5" stratum-apps = { path = "../stratum-apps", features = ["translator"] } async-channel = "1.5.1" serde = { version = "1.0.89", default-features = false, features = ["derive", "alloc"] } diff --git a/roles/translator/src/lib/sv1/downstream/data.rs b/roles/translator/src/lib/sv1/downstream/data.rs index cf09dbf0a3..c28a60b44d 100644 --- a/roles/translator/src/lib/sv1/downstream/data.rs +++ b/roles/translator/src/lib/sv1/downstream/data.rs @@ -1,13 +1,11 @@ +use bitcoin::Target; use std::{ cell::RefCell, sync::{atomic::AtomicBool, Arc}, }; use stratum_apps::{ custom_mutex::Mutex, - stratum_core::{ - mining_sv2::Target, - sv1_api::{json_rpc, utils::HexU32Be}, - }, + stratum_core::sv1_api::{json_rpc, utils::HexU32Be}, }; use tracing::debug; @@ -89,7 +87,7 @@ impl DownstreamData { } pub fn set_upstream_target(&mut self, upstream_target: Target) { - self.upstream_target = Some(upstream_target.clone()); + self.upstream_target = Some(upstream_target); debug!( "Downstream {}: Set upstream target to {:?}", self.downstream_id, upstream_target diff --git a/roles/translator/src/lib/sv1/downstream/downstream.rs b/roles/translator/src/lib/sv1/downstream/downstream.rs index 3190fbf9ba..eeb11639e8 100644 --- a/roles/translator/src/lib/sv1/downstream/downstream.rs +++ b/roles/translator/src/lib/sv1/downstream/downstream.rs @@ -10,15 +10,13 @@ use crate::{ utils::ShutdownMessage, }; use async_channel::{Receiver, Sender}; +use bitcoin::Target; use std::sync::Arc; use stratum_apps::{ custom_mutex::Mutex, - stratum_core::{ - mining_sv2::Target, - sv1_api::{ - json_rpc::{self, Message}, - server_to_client, IsServer, - }, + stratum_core::sv1_api::{ + json_rpc::{self, Message}, + server_to_client, IsServer, }, }; use tokio::sync::{broadcast, mpsc}; diff --git a/roles/translator/src/lib/sv1/downstream/message_handler.rs b/roles/translator/src/lib/sv1/downstream/message_handler.rs index f92f751ef4..6ab75c619e 100644 --- a/roles/translator/src/lib/sv1/downstream/message_handler.rs +++ b/roles/translator/src/lib/sv1/downstream/message_handler.rs @@ -67,7 +67,7 @@ impl IsServer<'static> for DownstreamData { ); let is_valid_share = validate_sv1_share( request, - self.target.clone(), + self.target, self.extranonce1.clone(), self.version_rolling_mask.clone(), self.sv1_server_data.clone(), diff --git a/roles/translator/src/lib/sv1/sv1_server/data.rs b/roles/translator/src/lib/sv1/sv1_server/data.rs index a038c8b2fa..f0f20452df 100644 --- a/roles/translator/src/lib/sv1/sv1_server/data.rs +++ b/roles/translator/src/lib/sv1/sv1_server/data.rs @@ -1,11 +1,11 @@ use crate::sv1::downstream::downstream::Downstream; +use bitcoin::Target; use std::{ collections::HashMap, sync::{atomic::AtomicU32, Arc, RwLock}, }; use stratum_apps::stratum_core::{ - channels_sv2::vardiff::classic::VardiffState, - mining_sv2::{SetNewPrevHash, Target}, + channels_sv2::vardiff::classic::VardiffState, mining_sv2::SetNewPrevHash, sv1_api::server_to_client, }; diff --git a/roles/translator/src/lib/sv1/sv1_server/difficulty_manager.rs b/roles/translator/src/lib/sv1/sv1_server/difficulty_manager.rs index 0774c99c02..08f750051a 100644 --- a/roles/translator/src/lib/sv1/sv1_server/difficulty_manager.rs +++ b/roles/translator/src/lib/sv1/sv1_server/difficulty_manager.rs @@ -3,12 +3,13 @@ use crate::{ utils::ShutdownMessage, }; use async_channel::Sender; +use bitcoin::Target; use std::{collections::HashMap, sync::Arc, time::Duration}; use stratum_apps::{ custom_mutex::Mutex, stratum_core::{ channels_sv2::{target::hash_rate_to_target, Vardiff}, - mining_sv2::{SetTarget, Target, UpdateChannel}, + mining_sv2::{SetTarget, UpdateChannel}, parsers_sv2::Mining, stratum_translation::sv2_to_sv1::build_sv1_set_difficulty_from_sv2_target, sv1_api::json_rpc, @@ -149,8 +150,8 @@ impl DifficultyManager { d.hashrate.unwrap(), /* It's safe to unwrap because we know that * the downstream has a hashrate (we are * doing vardiff) */ - d.target.clone(), - d.upstream_target.clone(), + d.target, + d.upstream_target, )) }) }) @@ -170,7 +171,7 @@ impl DifficultyManager { // Calculate new target based on new hashrate let new_target: Target = match hash_rate_to_target(new_hashrate as f64, self.shares_per_minute as f64) { - Ok(target) => target.into(), + Ok(target) => target, Err(e) => { error!( "Failed to calculate target for hashrate {}: {:?}", @@ -184,14 +185,14 @@ impl DifficultyManager { _ = sv1_server_data.safe_lock(|dmap| { if let Some(d) = dmap.downstreams.get(downstream_id) { _ = d.downstream_data.safe_lock(|d| { - d.set_pending_target(new_target.clone()); + d.set_pending_target(new_target); d.set_pending_hashrate(Some(new_hashrate)); }); } }); // All updates will be sent as UpdateChannel messages - all_updates.push((*downstream_id, channel_id, new_target.clone(), new_hashrate)); + all_updates.push((*downstream_id, channel_id, new_target, new_hashrate)); // Determine if we should send set_difficulty immediately or wait match upstream_target { @@ -203,11 +204,7 @@ impl DifficultyManager { "✅ Target comparison: new_target ({:?}) >= upstream_target ({:?}) for downstream {}, will send set_difficulty immediately", new_target, upstream_target, downstream_id ); - immediate_updates.push(( - channel_id, - Some(*downstream_id), - new_target.clone(), - )); + immediate_updates.push((channel_id, Some(*downstream_id), new_target)); } else { // Case 2: new_target < upstream_target, delay set_difficulty until // SetTarget @@ -219,7 +216,7 @@ impl DifficultyManager { sv1_server_data.super_safe_lock(|data| { data.pending_target_updates.push(PendingTargetUpdate { downstream_id: *downstream_id, - new_target: new_target.clone(), + new_target, new_hashrate, }); }); @@ -231,11 +228,7 @@ impl DifficultyManager { "No upstream target set for downstream {}, will send set_difficulty immediately", downstream_id ); - immediate_updates.push(( - channel_id, - Some(*downstream_id), - new_target.clone(), - )); + immediate_updates.push((channel_id, Some(*downstream_id), new_target)); } } } @@ -295,7 +288,7 @@ impl DifficultyManager { .map(|downstream| { downstream.downstream_data.super_safe_lock(|d| { // Use pending_target if available, otherwise current target - d.pending_target.as_ref().unwrap_or(&d.target).clone() + *d.pending_target.as_ref().unwrap_or(&d.target) }) }) .min() @@ -320,7 +313,7 @@ impl DifficultyManager { let update_channel = UpdateChannel { channel_id: *channel_id, nominal_hash_rate: total_hashrate, - maximum_target: min_target.clone().into(), + maximum_target: min_target.to_le_bytes().into(), }; debug!( @@ -343,7 +336,7 @@ impl DifficultyManager { let update_channel = UpdateChannel { channel_id: *channel_id, nominal_hash_rate: *new_hashrate, - maximum_target: new_target.clone().into(), + maximum_target: new_target.to_le_bytes().into(), }; debug!( @@ -376,7 +369,8 @@ impl DifficultyManager { sv1_server_to_downstream_sender: &broadcast::Sender<(u32, Option, json_rpc::Message)>, is_aggregated: bool, ) { - let new_upstream_target: Target = set_target.maximum_target.clone().into(); + let new_upstream_target = + Target::from_le_bytes(set_target.maximum_target.inner_as_ref().try_into().unwrap()); debug!( "Received SetTarget for channel {}: new_upstream_target = {:?}", set_target.channel_id, new_upstream_target @@ -422,7 +416,7 @@ impl DifficultyManager { _ = sv1_server_data.safe_lock(|data| { if let Some(downstream) = data.downstreams.get(&downstream_id) { _ = downstream.downstream_data.safe_lock(|d| { - d.set_upstream_target(new_upstream_target.clone()); + d.set_upstream_target(new_upstream_target); }); } }); @@ -476,7 +470,7 @@ impl DifficultyManager { _ = sv1_server_data.safe_lock(|data| { if let Some(downstream) = data.downstreams.get(&downstream_id) { _ = downstream.downstream_data.safe_lock(|d| { - d.set_upstream_target(new_upstream_target.clone()); + d.set_upstream_target(new_upstream_target); }); } }); @@ -557,7 +551,7 @@ impl DifficultyManager { if let Some(channel_id) = channel_id { // Send set_difficulty message if let Ok(set_difficulty_msg) = - build_sv1_set_difficulty_from_sv2_target(pending_update.new_target.clone()) + build_sv1_set_difficulty_from_sv2_target(pending_update.new_target) { if let Err(e) = sv1_server_to_downstream_sender.send(( channel_id, @@ -616,7 +610,7 @@ impl DifficultyManager { .map(|downstream| { downstream.downstream_data.super_safe_lock(|d| { // Use pending_target if available, otherwise current target - d.pending_target.as_ref().unwrap_or(&d.target).clone() + *d.pending_target.as_ref().unwrap_or(&d.target) }) }) .min(); @@ -633,7 +627,7 @@ impl DifficultyManager { let update_channel = UpdateChannel { channel_id, nominal_hash_rate: total_hashrate, - maximum_target: min_target.clone().into(), + maximum_target: min_target.to_le_bytes().into(), }; if let Err(e) = channel_manager_sender @@ -742,7 +736,7 @@ mod tests { #[test] fn test_get_pending_difficulty_updates_basic() { let sv1_server_data = create_test_sv1_server_data(); - let upstream_target: Target = hash_rate_to_target(150.0, 5.0).unwrap().into(); + let upstream_target: Target = hash_rate_to_target(150.0, 5.0).unwrap(); // Test with empty pending updates let applicable_updates = DifficultyManager::get_pending_difficulty_updates( diff --git a/roles/translator/src/lib/sv1/sv1_server/sv1_server.rs b/roles/translator/src/lib/sv1/sv1_server/sv1_server.rs index 373c2af3a7..9d16a79ab1 100644 --- a/roles/translator/src/lib/sv1/sv1_server/sv1_server.rs +++ b/roles/translator/src/lib/sv1/sv1_server/sv1_server.rs @@ -13,6 +13,7 @@ use crate::{ utils::ShutdownMessage, }; use async_channel::{Receiver, Sender}; +use bitcoin::Target; use std::{ collections::HashMap, net::SocketAddr, @@ -27,7 +28,7 @@ use stratum_apps::{ stratum_core::{ binary_sv2::Str0255, channels_sv2::{target::hash_rate_to_target, Vardiff, VardiffState}, - mining_sv2::{CloseChannel, SetTarget, Target}, + mining_sv2::{CloseChannel, SetTarget}, parsers_sv2::Mining, stratum_translation::{ sv1_to_sv2::{ @@ -144,8 +145,7 @@ impl Sv1Server { .min_individual_miner_hashrate as f64, self.config.downstream_difficulty_config.shares_per_minute as f64, ) - .unwrap() - .into(); + .unwrap(); // Spawn vardiff loop only if enabled if self.config.downstream_difficulty_config.enable_vardiff { @@ -271,7 +271,7 @@ impl Sv1Server { connection.receiver().clone(), self.sv1_server_channel_state.downstream_to_sv1_server_sender.clone(), self.sv1_server_channel_state.sv1_server_to_downstream_sender.clone().subscribe(), - first_target.clone(), + first_target, Some(self.config.downstream_difficulty_config.min_individual_miner_hashrate), self.sv1_server_data.clone(), )); @@ -316,7 +316,7 @@ impl Sv1Server { } res = Self::handle_upstream_message( Arc::clone(&self), - first_target.clone(), + first_target, ) => { if let Err(e) = res { handle_error(&sv1_status_sender, e).await; @@ -467,13 +467,14 @@ impl Sv1Server { .sv1_server_data .super_safe_lock(|v| v.downstreams.clone()); if let Some(downstream) = Self::get_downstream(downstream_id, downstreams) { - let initial_target: Target = m.target.clone().into(); + let initial_target = + Target::from_le_bytes(m.target.inner_as_ref().try_into().unwrap()); downstream.downstream_data.safe_lock(|d| { d.extranonce1 = m.extranonce_prefix.to_vec(); d.extranonce2_len = m.extranonce_size.into(); d.channel_id = Some(m.channel_id); // Set the initial upstream target from OpenExtendedMiningChannelSuccess - d.set_upstream_target(initial_target.clone()); + d.set_upstream_target(initial_target); })?; // Process all queued messages now that channel is established @@ -644,19 +645,17 @@ impl Sv1Server { let vardiff_enabled = config.enable_vardiff; let max_target = if vardiff_enabled { - hash_rate_to_target(hashrate, shares_per_min) - .unwrap() - .into() + hash_rate_to_target(hashrate, shares_per_min).unwrap() } else { // If translator doesn't manage vardiff, we rely on upstream to do that, // so we give it more freedom by setting max_target to maximum possible value - Target::from([0xff; 32]) + Target::from_le_bytes([0xff; 32]) }; // Store the initial target for use when no downstreams remain self.sv1_server_data.super_safe_lock(|data| { if data.initial_target.is_none() { - data.initial_target = Some(max_target.clone()); + data.initial_target = Some(max_target); } }); @@ -723,7 +722,8 @@ impl Sv1Server { /// without any variable difficulty logic. It respects the aggregated/non-aggregated /// channel configuration. async fn handle_set_target_without_vardiff(&self, set_target: SetTarget<'_>) { - let new_target: Target = set_target.maximum_target.clone().into(); + let new_target = + Target::from_le_bytes(set_target.maximum_target.inner_as_ref().try_into().unwrap()); debug!( "Forwarding SetTarget to downstreams: channel_id={}, target={:?}", set_target.channel_id, new_target @@ -753,14 +753,12 @@ impl Sv1Server { if let Some(channel_id) = channel_id { // Update the downstream's target _ = downstream.downstream_data.safe_lock(|d| { - d.set_upstream_target(target.clone()); - d.set_pending_target(target.clone()); + d.set_upstream_target(target); + d.set_pending_target(target); }); // Send set_difficulty message - if let Ok(set_difficulty_msg) = - build_sv1_set_difficulty_from_sv2_target(target.clone()) - { + if let Ok(set_difficulty_msg) = build_sv1_set_difficulty_from_sv2_target(target) { if let Err(e) = self .sv1_server_channel_state .sv1_server_to_downstream_sender @@ -802,8 +800,8 @@ impl Sv1Server { if let Some((downstream_id, downstream)) = affected_downstream { // Update the downstream's target _ = downstream.downstream_data.safe_lock(|d| { - d.set_upstream_target(target.clone()); - d.set_pending_target(target.clone()); + d.set_upstream_target(target); + d.set_pending_target(target); }); // Send set_difficulty message @@ -925,7 +923,7 @@ mod tests { #[tokio::test] async fn test_send_set_difficulty_to_all_downstreams_empty() { let server = create_test_sv1_server(); - let target: Target = hash_rate_to_target(200.0, 5.0).unwrap().into(); + let target: Target = hash_rate_to_target(200.0, 5.0).unwrap(); // Test with empty downstreams server.send_set_difficulty_to_all_downstreams(target).await; @@ -936,7 +934,7 @@ mod tests { #[tokio::test] async fn test_send_set_difficulty_to_specific_downstream_not_found() { let server = create_test_sv1_server(); - let target: Target = hash_rate_to_target(200.0, 5.0).unwrap().into(); + let target: Target = hash_rate_to_target(200.0, 5.0).unwrap(); let channel_id = 1u32; // Test with no downstreams @@ -958,11 +956,11 @@ mod tests { let addr = "127.0.0.1:3333".parse().unwrap(); let server = Sv1Server::new(addr, cm_receiver, cm_sender, config); - let target: Target = hash_rate_to_target(200.0, 5.0).unwrap().into(); + let target: Target = hash_rate_to_target(200.0, 5.0).unwrap(); let set_target = SetTarget { channel_id: 1, - maximum_target: target.clone().into(), + maximum_target: target.to_le_bytes().into(), }; // Test should not panic and should handle the message @@ -980,11 +978,11 @@ mod tests { let addr = "127.0.0.1:3333".parse().unwrap(); let server = Sv1Server::new(addr, cm_receiver, cm_sender, config); - let target: Target = hash_rate_to_target(200.0, 5.0).unwrap().into(); + let target: Target = hash_rate_to_target(200.0, 5.0).unwrap(); let set_target = SetTarget { channel_id: 1, - maximum_target: target.clone().into(), + maximum_target: target.to_le_bytes().into(), }; // Test should not panic and should handle the message diff --git a/roles/translator/src/lib/sv2/channel_manager/channel_manager.rs b/roles/translator/src/lib/sv2/channel_manager/channel_manager.rs index 397894c522..e242b32a4e 100644 --- a/roles/translator/src/lib/sv2/channel_manager/channel_manager.rs +++ b/roles/translator/src/lib/sv2/channel_manager/channel_manager.rs @@ -273,13 +273,12 @@ impl ChannelManager { // OpenExtendedMiningChannelSuccess message directly to the sv1 // server let target = self.channel_manager_data.super_safe_lock(|c| { - c.upstream_extended_channel + *c.upstream_extended_channel .as_ref() .unwrap() .read() .unwrap() .get_target() - .clone() }); let new_extranonce_prefix = self.channel_manager_data.super_safe_lock(|c| { @@ -316,7 +315,7 @@ impl ChannelManager { .into_b032() .into_static() .to_vec(), - target.clone(), + target, hashrate, true, new_extranonce_size as u16, @@ -331,7 +330,7 @@ impl ChannelManager { OpenExtendedMiningChannelSuccess { request_id: open_channel_msg.request_id, channel_id: next_channel_id, - target: target.clone().into(), + target: target.to_le_bytes().into(), extranonce_size: new_extranonce_size as u16, extranonce_prefix: new_extranonce_prefix.clone().into(), }, diff --git a/roles/translator/src/lib/sv2/channel_manager/message_handler.rs b/roles/translator/src/lib/sv2/channel_manager/message_handler.rs index d5d946576b..350ea49eac 100644 --- a/roles/translator/src/lib/sv2/channel_manager/message_handler.rs +++ b/roles/translator/src/lib/sv2/channel_manager/message_handler.rs @@ -1,3 +1,4 @@ +use bitcoin::Target; use std::sync::{Arc, RwLock}; use crate::{ @@ -75,13 +76,13 @@ impl HandleMiningMessagesFromServerAsync for ChannelManager { m, user_identity, nominal_hashrate ); let extranonce_prefix = m.extranonce_prefix.clone().into_static().to_vec(); - let target = m.target.clone().into_static(); + let target = Target::from_le_bytes(m.target.clone().inner_as_ref().try_into().unwrap()); let version_rolling = true; // we assume this is always true on extended channels let extended_channel = ExtendedChannel::new( m.channel_id, user_identity.clone(), extranonce_prefix.clone(), - target.clone().into(), + target, nominal_hashrate, version_rolling, m.extranonce_size, @@ -134,7 +135,7 @@ impl HandleMiningMessagesFromServerAsync for ChannelManager { m.channel_id, user_identity.clone(), new_extranonce_prefix.clone().into_static().to_vec(), - target.clone().into(), + target, nominal_hashrate, true, new_extranonce_size, @@ -189,7 +190,7 @@ impl HandleMiningMessagesFromServerAsync for ChannelManager { m.channel_id, user_identity.clone(), new_extranonce_prefix.clone().into_static().to_vec(), - target.clone().into(), + target, nominal_hashrate, true, downstream_extranonce_len as u16, @@ -475,7 +476,9 @@ impl HandleMiningMessagesFromServerAsync for ChannelManager { if channel_manager_data.mode == ChannelMode::Aggregated { if let Some(upstream_channel) = &channel_manager_data.upstream_extended_channel { if let Ok(mut upstream_extended_channel) = upstream_channel.write() { - upstream_extended_channel.set_target(m.maximum_target.clone().into()); + upstream_extended_channel.set_target(Target::from_le_bytes( + m.maximum_target.inner_as_ref().try_into().unwrap(), + )); } } channel_manager_data @@ -483,13 +486,17 @@ impl HandleMiningMessagesFromServerAsync for ChannelManager { .iter() .for_each(|(_, channel)| { if let Ok(mut channel) = channel.write() { - channel.set_target(m.maximum_target.clone().into()); + channel.set_target(Target::from_le_bytes( + m.maximum_target.inner_as_ref().try_into().unwrap(), + )); } }); } else if let Some(channel) = channel_manager_data.extended_channels.get(&m.channel_id) { if let Ok(mut channel) = channel.write() { - channel.set_target(m.maximum_target.clone().into()); + channel.set_target(Target::from_le_bytes( + m.maximum_target.inner_as_ref().try_into().unwrap(), + )); } } }); diff --git a/roles/translator/src/lib/utils.rs b/roles/translator/src/lib/utils.rs index 75bea673a2..944e70471e 100644 --- a/roles/translator/src/lib/utils.rs +++ b/roles/translator/src/lib/utils.rs @@ -5,7 +5,7 @@ use stratum_apps::{ bitcoin::{ block::{Header, Version}, hashes::Hash, - CompactTarget, TxMerkleNode, + CompactTarget, Target, TxMerkleNode, }, buffer_sv2::Slice, channels_sv2::{ @@ -13,7 +13,6 @@ use stratum_apps::{ target::{bytes_to_hex, u256_to_block_hash}, }, framing_sv2::framing::Frame, - mining_sv2::Target, parsers_sv2::{AnyMessage, CommonMessages}, sv1_api::{client_to_server, utils::HexU32Be}, }, @@ -117,15 +116,11 @@ pub fn validate_sv1_share( // convert the header hash to a target type for easy comparison let hash = header.block_hash(); let raw_hash: [u8; 32] = *hash.to_raw_hash().as_ref(); - let hash_as_target: Target = raw_hash.into(); + let hash_as_target = Target::from_le_bytes(raw_hash); // print hash_as_target and self.target as human readable hex - let hash_as_u256: U256 = hash_as_target.clone().into(); - let mut hash_bytes = hash_as_u256.to_vec(); - hash_bytes.reverse(); // Convert to big-endian for display - let target_u256: U256 = target.clone().into(); - let mut target_bytes = target_u256.to_vec(); - target_bytes.reverse(); // Convert to big-endian for display + let hash_bytes = hash_as_target.to_be_bytes(); + let target_bytes = target.to_be_bytes(); debug!( "share validation \nshare:\t\t{}\ndownstream target:\t{}\n", diff --git a/stratum-core/Cargo.toml b/stratum-core/Cargo.toml index 6cab53effd..aeea186af3 100644 --- a/stratum-core/Cargo.toml +++ b/stratum-core/Cargo.toml @@ -22,7 +22,7 @@ parsers_sv2 = { path = "../protocols/v2/parsers-sv2", version = "^0.1.0" } handlers_sv2 = { path = "../protocols/v2/handlers-sv2", version = "^0.2.0" } channels_sv2 = { path = "../protocols/v2/channels-sv2", version = "^2.0.0" } common_messages_sv2 = { path = "../protocols/v2/subprotocols/common-messages", version = "^6.0.0" } -mining_sv2 = { path = "../protocols/v2/subprotocols/mining", version = "^5.0.0" } +mining_sv2 = { path = "../protocols/v2/subprotocols/mining", version = "^6.0.0" } template_distribution_sv2 = { path = "../protocols/v2/subprotocols/template-distribution", version = "^4.0.0" } job_declaration_sv2 = { path = "../protocols/v2/subprotocols/job-declaration", version = "^5.0.0" } sv1_api = { path = "../protocols/v1", version = "^2.1.0", optional = true } diff --git a/test/integration-tests/Cargo.lock b/test/integration-tests/Cargo.lock index add84820d6..54d09132bd 100644 --- a/test/integration-tests/Cargo.lock +++ b/test/integration-tests/Cargo.lock @@ -506,7 +506,7 @@ dependencies = [ "bitcoin", "common_messages_sv2 6.0.2", "job_declaration_sv2 5.0.2", - "mining_sv2 5.0.2", + "mining_sv2 6.0.0", "primitive-types", "template_distribution_sv2 4.0.2", "tracing", @@ -1128,7 +1128,7 @@ dependencies = [ "binary_sv2 5.0.0", "common_messages_sv2 6.0.2", "job_declaration_sv2 5.0.2", - "mining_sv2 5.0.2", + "mining_sv2 6.0.0", "parsers_sv2 0.1.2", "template_distribution_sv2 4.0.2", "trait-variant", @@ -1402,6 +1402,7 @@ name = "jd_client_sv2" version = "0.1.0" dependencies = [ "async-channel", + "bitcoin", "clap", "config", "serde", @@ -1584,7 +1585,7 @@ dependencies = [ [[package]] name = "mining_sv2" -version = "5.0.2" +version = "6.0.0" dependencies = [ "binary_sv2 5.0.0", ] @@ -1818,7 +1819,7 @@ dependencies = [ "common_messages_sv2 6.0.2", "framing_sv2 5.0.2", "job_declaration_sv2 5.0.2", - "mining_sv2 5.0.2", + "mining_sv2 6.0.0", "template_distribution_sv2 4.0.2", ] @@ -1919,6 +1920,7 @@ name = "pool_sv2" version = "0.2.0" dependencies = [ "async-channel", + "bitcoin", "clap", "config", "rand 0.8.5", @@ -2474,7 +2476,7 @@ dependencies = [ "framing_sv2 5.0.2", "handlers_sv2 0.2.1", "job_declaration_sv2 5.0.2", - "mining_sv2 5.0.2", + "mining_sv2 6.0.0", "noise_sv2 1.4.0", "parsers_sv2 0.1.2", "stratum_translation", @@ -2487,8 +2489,9 @@ name = "stratum_translation" version = "0.1.1" dependencies = [ "binary_sv2 5.0.0", + "bitcoin", "channels_sv2 2.0.1", - "mining_sv2 5.0.2", + "mining_sv2 6.0.0", "sv1_api", "tracing", ] @@ -2842,6 +2845,7 @@ name = "translator_sv2" version = "2.0.0" dependencies = [ "async-channel", + "bitcoin", "clap", "config", "serde",