-
Notifications
You must be signed in to change notification settings - Fork 28
feat: implement subnet topology fork transition with centralized routing #754
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: unstable
Are you sure you want to change the base?
Changes from all commits
b5f75f4
b3b0153
7590118
f578abf
bde1dcb
dbe7b3f
248bc14
08baea2
285212e
34dd0b7
a18d133
bc3a954
638ac24
452d8d9
d7d4aa1
bc1a408
d0a3447
cc7af2c
7c28a24
5f5ae8b
ea4faa1
a291191
e41da40
cb06d7e
21ae7a5
93d1769
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -190,9 +190,10 @@ impl EventProcessor { | |
|
|
||
| let max_seen = self.db.state().get_max_operator_id_seen(); | ||
|
|
||
| // Only check for missing operators if we have a previous max (not a migrated database) | ||
| // Only check for missing operators if we have a previous max (not a migrated database). | ||
| // Use checked_sub to avoid underflow if operatorId is 0. | ||
| if let Some(max_seen) = max_seen | ||
| && max_seen != operatorId - 1 | ||
| && operatorId.checked_sub(1) != Some(max_seen) | ||
|
Comment on lines
+193
to
+196
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This appears to be a unrelated change
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, this is a fix for a potential underflow. Would you like to create a new PR for that? |
||
| { | ||
| return Err(ExecutionError::InvalidEvent(format!( | ||
| "Missing OperatorAdded events: database has only seen up to id {max_seen}, \ | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -11,6 +11,7 @@ use qbft_manager::QbftManager; | |
| use signature_collector::SignatureCollectorManager; | ||
| use slot_clock::SlotClock; | ||
| use ssv_types::msgid::DutyExecutor; | ||
| use subnet_service::SubnetRouter; | ||
| use tokio::sync::{mpsc, mpsc::error::TrySendError, watch}; | ||
| use tracing::{debug, debug_span, error, trace}; | ||
|
|
||
|
|
@@ -34,6 +35,8 @@ pub struct NetworkMessageReceiver<S: SlotClock, D: DutiesProvider> { | |
| outcome_tx: mpsc::Sender<Outcome>, | ||
| validator: Arc<Validator<S, D>>, | ||
| doppelganger_service: Option<Arc<OperatorDoppelgangerService>>, | ||
| /// Centralized router for fork-aware subnet calculations | ||
| router: Arc<SubnetRouter<S>>, | ||
| } | ||
|
|
||
| impl<S: SlotClock + 'static, D: DutiesProvider> NetworkMessageReceiver<S, D> { | ||
|
|
@@ -47,6 +50,7 @@ impl<S: SlotClock + 'static, D: DutiesProvider> NetworkMessageReceiver<S, D> { | |
| outcome_tx: mpsc::Sender<Outcome>, | ||
| validator: Arc<Validator<S, D>>, | ||
| doppelganger_service: Option<Arc<OperatorDoppelgangerService>>, | ||
| router: Arc<SubnetRouter<S>>, | ||
| ) -> Arc<Self> { | ||
| Arc::new(Self { | ||
| processor, | ||
|
|
@@ -57,6 +61,7 @@ impl<S: SlotClock + 'static, D: DutiesProvider> NetworkMessageReceiver<S, D> { | |
| outcome_tx, | ||
| validator, | ||
| doppelganger_service, | ||
| router, | ||
| }) | ||
| } | ||
| } | ||
|
|
@@ -76,7 +81,7 @@ impl<S: SlotClock + 'static, D: DutiesProvider> MessageReceiver | |
| let span = debug_span!("message_receiver", msg=%message_id); | ||
| let _enter = span.enter(); | ||
|
|
||
| let result = receiver.validator.validate(&message.data); | ||
| let result = receiver.validator.validate(&message.data, &message.topic); | ||
|
|
||
| let mut action = MessageAcceptance::from(&result); | ||
|
|
||
|
|
@@ -104,6 +109,7 @@ impl<S: SlotClock + 'static, D: DutiesProvider> MessageReceiver | |
| let ValidatedMessage { | ||
| signed_ssv_message, | ||
| ssv_message, | ||
| matched_schema, | ||
| } = match result { | ||
| ValidationResult::Success(message) => message, | ||
| ValidationResult::PreDecodeFailure(failure) => { | ||
|
|
@@ -120,6 +126,18 @@ impl<S: SlotClock + 'static, D: DutiesProvider> MessageReceiver | |
| } | ||
| }; | ||
|
|
||
| // Check if the matched schema should be processed at the current epoch. | ||
| // During pre-subscribe window (F-2, F-1), PostFork messages are accepted for | ||
| // gossipsub propagation but not processed for committee work. | ||
| if !receiver.router.should_process(matched_schema) { | ||
| trace!( | ||
| gossipsub_message_id = ?message_id, | ||
| ?matched_schema, | ||
| "Message accepted for propagation but not processed (pre-subscribe window)" | ||
| ); | ||
| return; | ||
| } | ||
|
Comment on lines
+129
to
+139
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This idea does not work. As validation is stateful, validating but discarding a message received on a post fork topic may cause the message to fail validation if it is then received on a pre fork topic. |
||
|
|
||
| let msg_id = signed_ssv_message.ssv_message().msg_id().clone(); | ||
|
|
||
| match msg_id.duty_executor() { | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This should be
boole_fork_epoch.