From b5b6d8ae8c5c5fb6e7c5a9387aa550708752289e Mon Sep 17 00:00:00 2001 From: kamilsa Date: Tue, 13 Jan 2026 09:47:45 +0500 Subject: [PATCH 01/39] networking: update committee docs --- docs/client/networking.md | 31 +++++++++++++--- docs/client/validator.md | 35 +++++++++++++++---- src/lean_spec/subspecs/forkchoice/store.py | 23 ++++++++---- .../subspecs/networking/gossipsub/topic.py | 31 ++++++++++++++++ 4 files changed, 103 insertions(+), 17 deletions(-) diff --git a/docs/client/networking.md b/docs/client/networking.md index 8160d6eb..75574eb5 100644 --- a/docs/client/networking.md +++ b/docs/client/networking.md @@ -33,6 +33,7 @@ Each node entry contains an ENR. This is an Ethereum Node Record. It includes: - The node's public key - Network address - Port numbers +- Committee assignments (for aggregators) - Other metadata In production, dynamic discovery would replace static configuration. @@ -62,15 +63,35 @@ Messages are organized by topic. Topic names follow a pattern that includes: This structure lets clients subscribe to relevant messages and ignore others. +The payload carried in the gossipsub message is the SSZ-encoded, +Snappy-compressed message, which type is identified by the topic: + +| Topic Name | Message Type | Encoding | +|-------------------------------------------------------------|--------------------------------|-------------------------------| +| /lean/consensus/devnet-0/blocks/ssz_snappy | SignedBlockWithAttestation | SSZ + Snappy | +| /lean/consensus/devnet-0/attestations/ssz_snappy | SignedAttestation | SSZ + Snappy | +| /lean/consensus/devnet-0/attestation_{subnet_id}/ssz_snappy | SignedAttestation | SSZ + Snappy | +| /lean/consensus/devnet-0/aggregation/ssz_snappy | LeanAggregatedSignature | SSZ + Snappy | + ### Message Types -Two main message types exist: +Three main message types exist: + +* _Blocks_, defined by the `SignedBlockWithAttestation` type, are proposed by +validators and propagated on the block topic. Every node needs to see blocks +quickly. -Blocks are proposed by validators. They propagate on the block topic. Every -node needs to see blocks quickly. +* _Attestations_, defined by the `SignedAttestation` type, come from all +validators. They propagate on the global attestation topic. Additionally, +each committee has its own attestation topic. Validators publish to their +committee's attestation topic and global attestation topic. Non-aggregating +validators subscribe only to the global attestation topic, while aggregators +subscribe to both the global and their committee's attestation topic. -Attestations come from all validators. They propagate on the attestation topic. High volume -but small messages. +* _Committee aggregations_, defined by the `LeanAggregatedSignature` type, +created by committee aggregators. These combine attestations from committee +members. Aggregations propagate on the aggregation topic to which every +validator subscribes. ### Encoding diff --git a/docs/client/validator.md b/docs/client/validator.md index 3284c4f2..3d7f4b69 100644 --- a/docs/client/validator.md +++ b/docs/client/validator.md @@ -2,8 +2,9 @@ ## Overview -Validators participate in consensus by proposing blocks and producing attestations. This -document describes what honest validators do. +Validators participate in consensus by proposing blocks and producing attestations. +Optionally validators can opt-in to behave as aggregators in a single or multiple +committees. This document describes what honest validators do. ## Validator Assignment @@ -16,6 +17,28 @@ diversity helps test interoperability. In production, validator assignment will work differently. The current approach is temporary for devnet testing. +## Committees and Subnets + +Committee is a group of validators assigned to aggregate attestations. +Beacon chain uses subnets as network channels for specific committees. + +In the current design, however, there is one global subnet for signatures propagation, +in addition to direct sending to aggregators, who form aggregation committees. +This is due to 3SF-mini consensus design, that requires 2/3 + 1 of all +attestations to be observed by any validator to compute safe target correctly. + +Every validator is assigned to a single committee. Number of committees is +defined in config.yaml. Each committee maps to a subnet ID. Validators +subnet ID is derived using their validator index modulo number of committees. +This is to simplify debugging and testing. In the future, validators subnet id +will be assigned randomly per epoch. + +## Aggregator assignment + +Some validators are self-assigned as aggregators. Aggregators collect and combine +attestations from other validators in their committee. To become an aggregator, +a validator sets `is_validator` flag to true as ENR record field. + ## Proposing Blocks Each slot has exactly one designated proposer. The proposer is determined by @@ -52,7 +75,7 @@ receive and validate it. ## Attesting -Every validator attestations in every slot. Attesting happens in the second interval, +Every validator attests in every slot. Attesting happens in the second interval, after proposals are made. ### What to Attest For @@ -79,7 +102,8 @@ compute the head. ### Broadcasting Attestations Validators sign their attestations and broadcast them. The network uses a single topic -for all attestations. No subnets or committees in the current design. +for all attestations. In addition to gossipsub topic, attestations are also sent to +aggregators directly. ## Timing @@ -98,8 +122,7 @@ blocks and attestations. Attestation aggregation combines multiple attestations into one. This saves bandwidth and block space. -Devnet 0 has no aggregation. Each attestation is separate. Future devnets will add -aggregation. +Devnet 2 introduced signatures aggregation. Aggregations are produced by block proposers. When aggregation is added, aggregators will collect attestations and combine them. Aggregated attestations will be broadcast separately. diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 3f6934a8..332e49b8 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -151,6 +151,13 @@ class Store(Container): Keyed by SignatureKey(validator_id, attestation_data_root). """ + committee_signatures: Dict[SignatureKey, Signature] = {} + """ + Per-validator XMSS signatures learned from committee attesters. + + Keyed by SignatureKey(validator_id, attestation_data_root). + """ + aggregated_payloads: Dict[SignatureKey, list[AggregatedSignatureProof]] = {} """ Aggregated signature proofs learned from blocks. @@ -270,6 +277,8 @@ def validate_attestation(self, attestation: Attestation) -> None: def on_gossip_attestation( self, signed_attestation: SignedAttestation, + is_aggregator: bool, + validator_index: Uint64, scheme: GeneralizedXmssScheme = TARGET_SIGNATURE_SCHEME, ) -> "Store": """ @@ -319,11 +328,17 @@ def on_gossip_attestation( sig_key = SignatureKey(validator_id, attestation_data.data_root_bytes()) new_gossip_sigs[sig_key] = signature + new_committee_sigs = dict(self.committee_signatures) + if is_aggregator: + # If this validator is an aggregator, also store in committee signatures + new_committee_sigs[sig_key] = signature + # Process the attestation data store = self.on_attestation(attestation=attestation, is_from_block=False) - # Return store with updated signature map - return store.model_copy(update={"gossip_signatures": new_gossip_sigs}) + # Return store with updated signature maps + return store.model_copy(update={"gossip_signatures": new_gossip_sigs, + "committee_signatures": new_committee_sigs}) def on_attestation( self, @@ -834,10 +849,6 @@ def tick_interval(self, has_proposal: bool) -> "Store": - If proposal exists, immediately accept new attestations - This ensures validators see the block before attesting - **Interval 1 (Validator Attesting)**: - - Validators create and gossip attestations - - No store action (waiting for attestations to arrive) - **Interval 2 (Safe Target Update)**: - Compute safe target with 2/3+ majority - Provides validators with a stable attestation target diff --git a/src/lean_spec/subspecs/networking/gossipsub/topic.py b/src/lean_spec/subspecs/networking/gossipsub/topic.py index 0bb2040b..40cb7684 100644 --- a/src/lean_spec/subspecs/networking/gossipsub/topic.py +++ b/src/lean_spec/subspecs/networking/gossipsub/topic.py @@ -87,6 +87,19 @@ Used in the topic string to identify signed attestation messages. """ +ATTESTATION_SUBNET_TOPIC_NAME: str = "attestation_{subnet_id}" +"""Template topic name for attestation subnet messages. + +Used in the topic string to identify attestation messages for a specific subnet. +`{subnet_id}` should be replaced with the subnet identifier (0-63). +""" + +COMMITTEE_AGGREGATION_TOPIC_NAME: str = "committee_aggregation" +"""Topic name for committee aggregation messages. + +Used in the topic string to identify committee's aggregation messages. +""" + class TopicKind(Enum): """Gossip topic types. @@ -103,6 +116,12 @@ class TopicKind(Enum): ATTESTATION = ATTESTATION_TOPIC_NAME """Signed attestation messages.""" + ATTESTATION_SUBNET = ATTESTATION_SUBNET_TOPIC_NAME + """Attestation subnet messages.""" + + COMMITTEE_AGGREGATION = COMMITTEE_AGGREGATION_TOPIC_NAME + """Committee aggregated signatures messages.""" + def __str__(self) -> str: """Return the topic name string.""" return self.value @@ -207,6 +226,18 @@ def attestation(cls, fork_digest: str) -> GossipTopic: """ return cls(kind=TopicKind.ATTESTATION, fork_digest=fork_digest) + @classmethod + def committee_aggregation(cls, fork_digest: str) -> GossipTopic: + """Create a committee aggregation topic for the given fork. + + Args: + fork_digest: Fork digest as 0x-prefixed hex string. + + Returns: + GossipTopic for committee aggregation messages. + """ + return cls(kind=TopicKind.COMMITTEE_AGGREGATION, fork_digest=fork_digest) + def format_topic_string( topic_name: str, From 4867d7d8d1e48c8f130e3e68a4c2ce634475b98d Mon Sep 17 00:00:00 2001 From: kamilsa Date: Tue, 13 Jan 2026 10:20:29 +0500 Subject: [PATCH 02/39] networking: add committee size configuration --- src/lean_spec/subspecs/chain/config.py | 3 ++ src/lean_spec/subspecs/containers/config.py | 3 ++ .../subspecs/containers/state/state.py | 2 ++ src/lean_spec/subspecs/networking/__init__.py | 2 ++ src/lean_spec/subspecs/networking/subnet.py | 28 +++++++++++++++++++ 5 files changed, 38 insertions(+) create mode 100644 src/lean_spec/subspecs/networking/subnet.py diff --git a/src/lean_spec/subspecs/chain/config.py b/src/lean_spec/subspecs/chain/config.py index aa00fee7..4ce8aaa4 100644 --- a/src/lean_spec/subspecs/chain/config.py +++ b/src/lean_spec/subspecs/chain/config.py @@ -37,6 +37,9 @@ VALIDATOR_REGISTRY_LIMIT: Final = Uint64(2**12) """The maximum number of validators that can be in the registry.""" +AGGREGATION_COMMITTEE_SIZE: Final = Uint64(1) +"""The size of the aggregation committee for each slot.""" + class _ChainConfig(StrictBaseModel): """ diff --git a/src/lean_spec/subspecs/containers/config.py b/src/lean_spec/subspecs/containers/config.py index 18289e88..f0b00723 100644 --- a/src/lean_spec/subspecs/containers/config.py +++ b/src/lean_spec/subspecs/containers/config.py @@ -14,3 +14,6 @@ class Config(Container): genesis_time: Uint64 """The timestamp of the genesis block.""" + + attestation_subnet_count: Uint64 + """The number of attestation subnets in the network.""" diff --git a/src/lean_spec/subspecs/containers/state/state.py b/src/lean_spec/subspecs/containers/state/state.py index 3326c2dc..90114157 100644 --- a/src/lean_spec/subspecs/containers/state/state.py +++ b/src/lean_spec/subspecs/containers/state/state.py @@ -30,6 +30,7 @@ JustifiedSlots, Validators, ) +from ...chain.config import AGGREGATION_COMMITTEE_SIZE class State(Container): @@ -90,6 +91,7 @@ def generate_genesis(cls, genesis_time: Uint64, validators: Validators) -> "Stat # Configure the genesis state. genesis_config = Config( genesis_time=genesis_time, + attestation_subnet_count=AGGREGATION_COMMITTEE_SIZE, ) # Build the genesis block header for the state. diff --git a/src/lean_spec/subspecs/networking/__init__.py b/src/lean_spec/subspecs/networking/__init__.py index 33ed0b00..254c5351 100644 --- a/src/lean_spec/subspecs/networking/__init__.py +++ b/src/lean_spec/subspecs/networking/__init__.py @@ -16,6 +16,7 @@ Status, ) from .types import DomainType, ForkDigest, ProtocolId +from .subnet import compute_subnet_id __all__ = [ "MAX_REQUEST_BLOCKS", @@ -32,4 +33,5 @@ "DomainType", "ProtocolId", "ForkDigest", + "compute_subnet_id", ] diff --git a/src/lean_spec/subspecs/networking/subnet.py b/src/lean_spec/subspecs/networking/subnet.py new file mode 100644 index 00000000..f8ff07d6 --- /dev/null +++ b/src/lean_spec/subspecs/networking/subnet.py @@ -0,0 +1,28 @@ +"""Subnet helpers for networking. + +Provides a small utility to compute a validator's attestation subnet id from +its validator index and number of committees. +""" +from __future__ import annotations + +def compute_subnet_id(validator_index: int, num_committees: int) -> int: + """Compute the attestation subnet id for a validator. + + Args: + validator_index: Non-negative validator index (int). + num_committees: Positive number of committees (int). + + Returns: + An integer subnet id in 0..(num_committees-1). + + Raises: + ValueError: If validator_index is negative or num_committees is not + a positive integer. + """ + if not isinstance(validator_index, int) or validator_index < 0: + raise ValueError("validator_index must be a non-negative integer") + if not isinstance(num_committees, int) or num_committees <= 0: + raise ValueError("num_committees must be a positive integer") + + subnet_id = validator_index % num_committees + return subnet_id From 7bcedca0ce9e9dffa947295eaee927418b7a1fb7 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Tue, 13 Jan 2026 10:21:06 +0500 Subject: [PATCH 03/39] store committee attestations --- src/lean_spec/subspecs/forkchoice/store.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 332e49b8..a47a2ef5 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -47,6 +47,7 @@ is_proposer, ) from lean_spec.types.container import Container +from lean_spec.subspecs.networking import compute_subnet_id class Store(Container): @@ -156,6 +157,7 @@ class Store(Container): Per-validator XMSS signatures learned from committee attesters. Keyed by SignatureKey(validator_id, attestation_data_root). + TODO: should we also index by subnet id? """ aggregated_payloads: Dict[SignatureKey, list[AggregatedSignatureProof]] = {} @@ -278,7 +280,7 @@ def on_gossip_attestation( self, signed_attestation: SignedAttestation, is_aggregator: bool, - validator_index: Uint64, + current_validator_id: Uint64, scheme: GeneralizedXmssScheme = TARGET_SIGNATURE_SCHEME, ) -> "Store": """ @@ -292,6 +294,8 @@ def on_gossip_attestation( Args: signed_attestation: The signed attestation from gossip. scheme: XMSS signature scheme for verification. + is_aggregator: True if current validator holds aggregator role. + current_validator_id: Index of the current validator processing this attestation. Returns: New Store with attestation processed and signature stored. @@ -304,6 +308,7 @@ def on_gossip_attestation( attestation_data = signed_attestation.message signature = signed_attestation.signature + # Validate the attestation first so unknown blocks are rejected cleanly # (instead of raising a raw KeyError when state is missing). attestation = Attestation(validator_id=validator_id, data=attestation_data) @@ -323,14 +328,18 @@ def on_gossip_attestation( public_key, attestation_data.slot, attestation_data.data_root_bytes(), scheme ), "Signature verification failed" + current_validator_subnet = compute_subnet_id(int(current_validator_id), self.config.attestation_subnet_count) + attester_subnet = compute_subnet_id(int(validator_id), self.config.attestation_subnet_count) + # Store signature for later lookup during block building new_gossip_sigs = dict(self.gossip_signatures) sig_key = SignatureKey(validator_id, attestation_data.data_root_bytes()) new_gossip_sigs[sig_key] = signature new_committee_sigs = dict(self.committee_signatures) - if is_aggregator: - # If this validator is an aggregator, also store in committee signatures + if is_aggregator and current_validator_subnet == attester_subnet: + # If this validator is an aggregator for this attestation, + # also store the signature in the committee signatures map. new_committee_sigs[sig_key] = signature # Process the attestation data @@ -338,7 +347,7 @@ def on_gossip_attestation( # Return store with updated signature maps return store.model_copy(update={"gossip_signatures": new_gossip_sigs, - "committee_signatures": new_committee_sigs}) + "committee_signatures": new_committee_sigs}) def on_attestation( self, @@ -776,7 +785,7 @@ def accept_new_attestations(self) -> "Store": - Interval 0: Block proposal - Interval 1: Validators cast attestations (enter "new") - Interval 2: Safe target update - - Interval 3: Attestations accepted (move to "known") + - Interval 3: Process accumulated attestations This staged progression ensures proper timing and prevents premature influence on fork choice decisions. From 980b5e800dd74ebe9954568884b62f38434f7d32 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Tue, 13 Jan 2026 10:59:21 +0500 Subject: [PATCH 04/39] Add aggregation in 2nd interval --- src/lean_spec/subspecs/forkchoice/store.py | 56 ++++++++++++++++++++-- 1 file changed, 53 insertions(+), 3 deletions(-) diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index a47a2ef5..0ec988a2 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -839,7 +839,53 @@ def update_safe_target(self) -> "Store": return self.model_copy(update={"safe_target": safe_target}) - def tick_interval(self, has_proposal: bool) -> "Store": + def aggregate_committee_signatures(self) -> "Store": + """ + Aggregate committee signatures for attestations in committee_signatures. + + This method aggregates signatures from the committee_signatures map if + the node possesses >= 90% of the signatures of the committee + + Returns: + New Store with updated aggregated_payloads. + """ + new_aggregated_payloads = dict(self.aggregated_payloads) + + # Group signatures by attestation data root + signatures_by_data_root: Dict[Bytes32, List[Tuple[Uint64, Signature]]] = defaultdict(list) + for sig_key, signature in self.committee_signatures.items(): + signatures_by_data_root[sig_key.attestation_data_root].append((sig_key.validator_id, signature)) + + for data_root, sig_list in signatures_by_data_root.items(): + num_signatures = len(sig_list) + # get head state to determine committee size + head_state = self.states[self.head] + committee_size = len(head_state.validators) / self.config.attestation_subnet_count + if num_signatures >= committee_size * 90 // 100: + # Aggregate signatures + participant_bits = Bitfield(committee_size) + signatures = [] + for validator_id, signature in sig_list: + participant_bits.set_bit(int(validator_id)) + signatures.append(signature) + + # Note: in a real implementation, signatures aggregation may be executed in a separate thread + aggregated_signature = aggregate_signatures(signatures) + aggregated_proof = AggregatedSignatureProof( + aggregated_signature=aggregated_signature, + participants=participant_bits, + ) + + # Store the aggregated proof + sig_key = SignatureKey(validator_id=Uint64(0), attestation_data_root=data_root) + if sig_key not in new_aggregated_payloads: + new_aggregated_payloads[sig_key] = [] + new_aggregated_payloads[sig_key].append(aggregated_proof) + # Note: here we should broadcast the aggregated signature to committee_aggregators topic + + return self.model_copy(update={"aggregated_payloads": new_aggregated_payloads}) + + def tick_interval(self, has_proposal: bool, is_aggregator: bool) -> "Store": """ Advance store time by one interval and perform interval-specific actions. @@ -869,6 +915,7 @@ def tick_interval(self, has_proposal: bool) -> "Store": Args: has_proposal: Whether a proposal exists for this interval. + is_aggregator: Whether the node is an aggregator. Returns: New Store with advanced time and interval-specific updates applied. @@ -884,13 +931,15 @@ def tick_interval(self, has_proposal: bool) -> "Store": elif current_interval == Uint64(2): # Mid-slot - update safe target for validators store = store.update_safe_target() + if is_aggregator: + store = store.aggregate_committee_signatures() elif current_interval == Uint64(3): # End of slot - accept accumulated attestations store = store.accept_new_attestations() return store - def on_tick(self, time: Uint64, has_proposal: bool) -> "Store": + def on_tick(self, time: Uint64, has_proposal: bool, is_aggregator: bool) -> "Store": """ Advance forkchoice store time to given timestamp. @@ -901,6 +950,7 @@ def on_tick(self, time: Uint64, has_proposal: bool) -> "Store": Args: time: Target time in seconds since genesis. has_proposal: Whether node has proposal for current slot. + is_aggregator: Whether the node is an aggregator. Returns: New Store with time advanced and all interval actions performed. @@ -920,7 +970,7 @@ def on_tick(self, time: Uint64, has_proposal: bool) -> "Store": should_signal_proposal = has_proposal and (store.time + Uint64(1)) == tick_interval_time # Advance by one interval with appropriate signaling - store = store.tick_interval(should_signal_proposal) + store = store.tick_interval(should_signal_proposal, is_aggregator) return store From 60468af66b142b430c7d6f48e505fa58a87bec6a Mon Sep 17 00:00:00 2001 From: kamilsa Date: Tue, 13 Jan 2026 15:53:29 +0500 Subject: [PATCH 05/39] Committee aggregation --- docs/client/networking.md | 14 +- docs/client/validator.md | 8 +- .../containers/attestation/attestation.py | 8 + src/lean_spec/subspecs/forkchoice/store.py | 139 ++++++++++++++---- 4 files changed, 131 insertions(+), 38 deletions(-) diff --git a/docs/client/networking.md b/docs/client/networking.md index 75574eb5..0110b4f6 100644 --- a/docs/client/networking.md +++ b/docs/client/networking.md @@ -66,12 +66,12 @@ This structure lets clients subscribe to relevant messages and ignore others. The payload carried in the gossipsub message is the SSZ-encoded, Snappy-compressed message, which type is identified by the topic: -| Topic Name | Message Type | Encoding | -|-------------------------------------------------------------|--------------------------------|-------------------------------| -| /lean/consensus/devnet-0/blocks/ssz_snappy | SignedBlockWithAttestation | SSZ + Snappy | -| /lean/consensus/devnet-0/attestations/ssz_snappy | SignedAttestation | SSZ + Snappy | -| /lean/consensus/devnet-0/attestation_{subnet_id}/ssz_snappy | SignedAttestation | SSZ + Snappy | -| /lean/consensus/devnet-0/aggregation/ssz_snappy | LeanAggregatedSignature | SSZ + Snappy | +| Topic Name | Message Type | Encoding | +|-------------------------------------------------------------|-----------------------------|--------------| +| /lean/consensus/devnet-0/blocks/ssz_snappy | SignedBlockWithAttestation | SSZ + Snappy | +| /lean/consensus/devnet-0/attestations/ssz_snappy | SignedAttestation | SSZ + Snappy | +| /lean/consensus/devnet-0/attestation_{subnet_id}/ssz_snappy | SignedAttestation | SSZ + Snappy | +| /lean/consensus/devnet-0/aggregation/ssz_snappy | SignedAggregatedAttestation | SSZ + Snappy | ### Message Types @@ -88,7 +88,7 @@ committee's attestation topic and global attestation topic. Non-aggregating validators subscribe only to the global attestation topic, while aggregators subscribe to both the global and their committee's attestation topic. -* _Committee aggregations_, defined by the `LeanAggregatedSignature` type, +* _Committee aggregations_, defined by the `SignedAggregatedAttestation` type, created by committee aggregators. These combine attestations from committee members. Aggregations propagate on the aggregation topic to which every validator subscribes. diff --git a/docs/client/validator.md b/docs/client/validator.md index 3d7f4b69..3cc1f9d0 100644 --- a/docs/client/validator.md +++ b/docs/client/validator.md @@ -22,11 +22,15 @@ is temporary for devnet testing. Committee is a group of validators assigned to aggregate attestations. Beacon chain uses subnets as network channels for specific committees. -In the current design, however, there is one global subnet for signatures propagation, -in addition to direct sending to aggregators, who form aggregation committees. +In the devnet-3 design, however, there is one global subnet for signed +attestations propagation, in addition to publishing into per committee subnets. This is due to 3SF-mini consensus design, that requires 2/3 + 1 of all attestations to be observed by any validator to compute safe target correctly. +Note that non-aggregating validators do not need to subscribe to committee +attestation subnets. They only need to subscribe to the global attestation +subnet. + Every validator is assigned to a single committee. Number of committees is defined in config.yaml. Each committee maps to a subnet ID. Validators subnet ID is derived using their validator index modulo number of committees. diff --git a/src/lean_spec/subspecs/containers/attestation/attestation.py b/src/lean_spec/subspecs/containers/attestation/attestation.py index 1a0e7fb6..26e6e79f 100644 --- a/src/lean_spec/subspecs/containers/attestation/attestation.py +++ b/src/lean_spec/subspecs/containers/attestation/attestation.py @@ -20,6 +20,7 @@ from lean_spec.subspecs.ssz import hash_tree_root from lean_spec.types import Bytes32, Container, Uint64 +from ...xmss.aggregation import AggregatedSignatureProof from ...xmss.containers import Signature from ..checkpoint import Checkpoint from .aggregation_bits import AggregationBits @@ -107,3 +108,10 @@ def aggregate_by_data( ) for data, validator_ids in data_to_validator_ids.items() ] + +class SignedAggregatedAttestation(Container): + data: AttestationData + """Combined attestation data similar to the beacon chain format.""" + + proof: AggregatedSignatureProof + """Aggregated signature proof covering all participating validators.""" diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 0ec988a2..86c83da3 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -49,6 +49,9 @@ from lean_spec.types.container import Container from lean_spec.subspecs.networking import compute_subnet_id +from src.lean_spec.subspecs.containers.attestation.attestation import SignedAggregatedAttestation +from src.lean_spec.subspecs.xmss.aggregation import AggregationError + class Store(Container): """ @@ -479,6 +482,86 @@ def on_attestation( } ) + def on_gossip_committee_aggregation(self, signed_attestation: SignedAggregatedAttestation) -> "Store": + """ + Process a signed aggregated attestation received via aggregation topic + + This method: + 1. Verifies the aggregated attestation + 2. Stores the aggregation in aggregation_payloads map + + Args: + signed_attestation: The signed aggregated attestation from committee aggregation. + + Returns: + New Store with aggregation processed and stored. + + Raises: + ValueError: If validator not found in state. + AssertionError: If signature verification fails. + """ + data = signed_attestation.data + proof = signed_attestation.proof + + # Get validator IDs who participated in this aggregation + validator_ids = proof.participants.to_validator_indices() + + # Retrieve the relevant state to look up public keys for verification. + key_state = self.states.get(data.target.root) + assert key_state is not None, ( + f"No state available to verify committee aggregation for target " + f"{data.target.root.hex()}" + ) + + # Ensure all participants exist in the active set + validators = key_state.validators + for validator_id in validator_ids: + assert validator_id < Uint64(len(validators)), ( + f"Validator {validator_id} not found in state {data.target.root.hex()}" + ) + + # Prepare public keys for verification + public_keys = [validators[vid].get_pubkey() for vid in validator_ids] + + # Verify the leanVM aggregated proof + try: + proof.verify( + public_keys=public_keys, + message=data.data_root_bytes(), + epoch=data.slot, + ) + except AggregationError as exc: + raise AssertionError( + f"Committee aggregation signature verification failed: {exc}" + ) from exc + + # Copy the aggregated proof map for updates + # Must deep copy the lists to maintain immutability of previous store snapshots + new_aggregated_payloads = copy.deepcopy(self.aggregated_payloads) + data_root = data.data_root_bytes() + + store = self + for vid in validator_ids: + # Update Proof Map + # + # Store the proof so future block builders can reuse this aggregation + key = SignatureKey(vid, data_root) + new_aggregated_payloads.setdefault(key, []).append(proof) + + # TODO: Update Fork Choice? + # + # Process the attestation data. Since it's from gossip, is_from_block=False. + # store = store.on_attestation( + # attestation=Attestation(validator_id=vid, data=data), + # is_from_block=False, + # ) + + # Return store with updated aggregated payloads + return store.model_copy(update={"aggregated_payloads": new_aggregated_payloads}) + + + + def on_block( self, signed_block_with_attestation: SignedBlockWithAttestation, @@ -851,38 +934,36 @@ def aggregate_committee_signatures(self) -> "Store": """ new_aggregated_payloads = dict(self.aggregated_payloads) - # Group signatures by attestation data root - signatures_by_data_root: Dict[Bytes32, List[Tuple[Uint64, Signature]]] = defaultdict(list) - for sig_key, signature in self.committee_signatures.items(): - signatures_by_data_root[sig_key.attestation_data_root].append((sig_key.validator_id, signature)) - - for data_root, sig_list in signatures_by_data_root.items(): - num_signatures = len(sig_list) - # get head state to determine committee size - head_state = self.states[self.head] - committee_size = len(head_state.validators) / self.config.attestation_subnet_count - if num_signatures >= committee_size * 90 // 100: - # Aggregate signatures - participant_bits = Bitfield(committee_size) - signatures = [] - for validator_id, signature in sig_list: - participant_bits.set_bit(int(validator_id)) - signatures.append(signature) - - # Note: in a real implementation, signatures aggregation may be executed in a separate thread - aggregated_signature = aggregate_signatures(signatures) - aggregated_proof = AggregatedSignatureProof( - aggregated_signature=aggregated_signature, - participants=participant_bits, - ) + attestations = self.latest_new_attestations + committee_signatures = self.committee_signatures + aggregated_payloads = self.aggregated_payloads + + head_state = self.states[self.head] + aggregated_attestations, aggregated_signatures = head_state.compute_aggregated_signatures( + attestations, + committee_signatures, + aggregated_payloads, + ) + + # iterate to broadcast aggregated attestations + for aggregated_attestation, aggregated_signature in zip(aggregated_attestations, aggregated_signatures, + strict=True): + signed_aggregated_attestation = SignedAggregatedAttestation( + data = aggregated_attestation.data, + proof = aggregated_signature, + ) + # Note: here we should broadcast the aggregated signature to committee_aggregators topic - # Store the aggregated proof - sig_key = SignatureKey(validator_id=Uint64(0), attestation_data_root=data_root) + # Compute new aggregated payloads + for aggregated_attestation, aggregated_signature in zip(aggregated_attestations, aggregated_signatures, + strict=True): + data_root = aggregated_attestation.data.data_root_bytes() + validator_ids = aggregated_signature.participants.to_validator_indices() + for vid in validator_ids: + sig_key = SignatureKey(vid, data_root) if sig_key not in new_aggregated_payloads: new_aggregated_payloads[sig_key] = [] - new_aggregated_payloads[sig_key].append(aggregated_proof) - # Note: here we should broadcast the aggregated signature to committee_aggregators topic - + new_aggregated_payloads[sig_key].append(aggregated_signature) return self.model_copy(update={"aggregated_payloads": new_aggregated_payloads}) def tick_interval(self, has_proposal: bool, is_aggregator: bool) -> "Store": From 213504ae527b27e8edb6d67b0c25c25c5ac860f8 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Tue, 13 Jan 2026 18:10:01 +0500 Subject: [PATCH 06/39] Rename aggregation committee size to count for clarity --- src/lean_spec/subspecs/chain/config.py | 4 ++-- src/lean_spec/subspecs/containers/state/state.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/lean_spec/subspecs/chain/config.py b/src/lean_spec/subspecs/chain/config.py index 4ce8aaa4..3b6188f0 100644 --- a/src/lean_spec/subspecs/chain/config.py +++ b/src/lean_spec/subspecs/chain/config.py @@ -37,8 +37,8 @@ VALIDATOR_REGISTRY_LIMIT: Final = Uint64(2**12) """The maximum number of validators that can be in the registry.""" -AGGREGATION_COMMITTEE_SIZE: Final = Uint64(1) -"""The size of the aggregation committee for each slot.""" +AGGREGATION_COMMITTEE_COUNT: Final = Uint64(1) +"""The number of committees for aggregation per slot.""" class _ChainConfig(StrictBaseModel): diff --git a/src/lean_spec/subspecs/containers/state/state.py b/src/lean_spec/subspecs/containers/state/state.py index 90114157..895a335c 100644 --- a/src/lean_spec/subspecs/containers/state/state.py +++ b/src/lean_spec/subspecs/containers/state/state.py @@ -30,7 +30,7 @@ JustifiedSlots, Validators, ) -from ...chain.config import AGGREGATION_COMMITTEE_SIZE +from ...chain.config import AGGREGATION_COMMITTEE_COUNT class State(Container): @@ -91,7 +91,7 @@ def generate_genesis(cls, genesis_time: Uint64, validators: Validators) -> "Stat # Configure the genesis state. genesis_config = Config( genesis_time=genesis_time, - attestation_subnet_count=AGGREGATION_COMMITTEE_SIZE, + attestation_subnet_count=AGGREGATION_COMMITTEE_COUNT, ) # Build the genesis block header for the state. From 4fac983167d757ededf6f12823d017201d8553b6 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Wed, 14 Jan 2026 09:47:27 +0500 Subject: [PATCH 07/39] Remove committee signatures --- src/lean_spec/subspecs/forkchoice/store.py | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 86c83da3..01336aca 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -150,17 +150,9 @@ class Store(Container): gossip_signatures: Dict[SignatureKey, Signature] = {} """ - Per-validator XMSS signatures learned from gossip. - - Keyed by SignatureKey(validator_id, attestation_data_root). - """ - - committee_signatures: Dict[SignatureKey, Signature] = {} - """ Per-validator XMSS signatures learned from committee attesters. - + Keyed by SignatureKey(validator_id, attestation_data_root). - TODO: should we also index by subnet id? """ aggregated_payloads: Dict[SignatureKey, list[AggregatedSignatureProof]] = {} @@ -336,21 +328,17 @@ def on_gossip_attestation( # Store signature for later lookup during block building new_gossip_sigs = dict(self.gossip_signatures) - sig_key = SignatureKey(validator_id, attestation_data.data_root_bytes()) - new_gossip_sigs[sig_key] = signature - - new_committee_sigs = dict(self.committee_signatures) if is_aggregator and current_validator_subnet == attester_subnet: # If this validator is an aggregator for this attestation, # also store the signature in the committee signatures map. - new_committee_sigs[sig_key] = signature + sig_key = SignatureKey(validator_id, attestation_data.data_root_bytes()) + new_gossip_sigs[sig_key] = signature # Process the attestation data store = self.on_attestation(attestation=attestation, is_from_block=False) # Return store with updated signature maps - return store.model_copy(update={"gossip_signatures": new_gossip_sigs, - "committee_signatures": new_committee_sigs}) + return store.model_copy(update={"gossip_signatures": new_gossip_sigs) def on_attestation( self, @@ -935,7 +923,7 @@ def aggregate_committee_signatures(self) -> "Store": new_aggregated_payloads = dict(self.aggregated_payloads) attestations = self.latest_new_attestations - committee_signatures = self.committee_signatures + committee_signatures = self.gossip_signatures aggregated_payloads = self.aggregated_payloads head_state = self.states[self.head] From f2651d8b3fe9f3f00aa278c3ed8636093c7c0197 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Wed, 14 Jan 2026 11:00:44 +0500 Subject: [PATCH 08/39] Refactor build_block: use committee aggregated signature proofs --- .../subspecs/containers/state/state.py | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/src/lean_spec/subspecs/containers/state/state.py b/src/lean_spec/subspecs/containers/state/state.py index 895a335c..2fa43d70 100644 --- a/src/lean_spec/subspecs/containers/state/state.py +++ b/src/lean_spec/subspecs/containers/state/state.py @@ -717,13 +717,21 @@ def build_block( # Add new attestations and continue iteration attestations.extend(new_attestations) - # Compute the aggregated signatures for the attestations. - # If the attestations cannot be aggregated, split it in a greedy way. - aggregated_attestations, aggregated_signatures = self.compute_aggregated_signatures( - attestations, - gossip_signatures, - aggregated_payloads, - ) + aggregated_attestations = AggregatedAttestation.aggregate_by_data(attestations) + aggregated_signatures: list[AggregatedSignatureProof] = [] + + # Collect aggregated signatures for the included attestations + for aggregated_attestation in aggregated_attestations: + data = aggregated_attestation.data + data_root = data.data_root_bytes() + + # Look up aggregated signature proof in aggregated_payloads using first validator as key + validator_id = aggregated_attestation.aggregation_bits.to_validator_indices()[0] + sig_key = SignatureKey(validator_id, data_root) + aggregated_signature_proof = aggregated_payloads[sig_key] + + # Append the found proof to the list + aggregated_signatures.append(aggregated_signature_proof) # Update the block with the aggregated attestations final_block = candidate_block.model_copy( From cc7548c421c92958ca421d75dc8c7bde9e700c80 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Wed, 14 Jan 2026 12:19:12 +0500 Subject: [PATCH 09/39] Clarify attestation broadcasting and update Devnet reference --- docs/client/validator.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/client/validator.md b/docs/client/validator.md index 3cc1f9d0..af2a12bc 100644 --- a/docs/client/validator.md +++ b/docs/client/validator.md @@ -105,9 +105,8 @@ compute the head. ### Broadcasting Attestations -Validators sign their attestations and broadcast them. The network uses a single topic -for all attestations. In addition to gossipsub topic, attestations are also sent to -aggregators directly. +Validators sign their attestations and broadcast them into the global +attestation topic and its corresponding subnet topic. ## Timing @@ -126,7 +125,7 @@ blocks and attestations. Attestation aggregation combines multiple attestations into one. This saves bandwidth and block space. -Devnet 2 introduced signatures aggregation. Aggregations are produced by block proposers. +Devnet-2 introduced signatures aggregation. Aggregations are produced by block proposers. When aggregation is added, aggregators will collect attestations and combine them. Aggregated attestations will be broadcast separately. From cb1a21b3d06316df8b1eaf4e16201b141545e06e Mon Sep 17 00:00:00 2001 From: kamilsa Date: Wed, 14 Jan 2026 12:35:52 +0500 Subject: [PATCH 10/39] remove adding proposer signatures to gossip_signatures also rename gossip_signatures to gossip_committee_signatures --- src/lean_spec/subspecs/forkchoice/store.py | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 01336aca..a8eca665 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -148,7 +148,7 @@ class Store(Container): - Only stores the attestation data, not signatures. """ - gossip_signatures: Dict[SignatureKey, Signature] = {} + gossip_committee_signatures: Dict[SignatureKey, Signature] = {} """ Per-validator XMSS signatures learned from committee attesters. @@ -327,7 +327,7 @@ def on_gossip_attestation( attester_subnet = compute_subnet_id(int(validator_id), self.config.attestation_subnet_count) # Store signature for later lookup during block building - new_gossip_sigs = dict(self.gossip_signatures) + new_gossip_sigs = dict(self.gossip_committee_signatures) if is_aggregator and current_validator_subnet == attester_subnet: # If this validator is an aggregator for this attestation, # also store the signature in the committee signatures map. @@ -338,7 +338,7 @@ def on_gossip_attestation( store = self.on_attestation(attestation=attestation, is_from_block=False) # Return store with updated signature maps - return store.model_copy(update={"gossip_signatures": new_gossip_sigs) + return store.model_copy(update={"gossip_committee_signatures": new_gossip_sigs) def on_attestation( self, @@ -694,16 +694,12 @@ def on_block( # 1. NOT affect this block's fork choice position (processed as "new") # 2. Be available for inclusion in future blocks # 3. Influence fork choice only after interval 3 (end of slot) - # - # We also store the proposer's signature for potential future block building. + proposer_sig_key = SignatureKey( proposer_attestation.validator_id, proposer_attestation.data.data_root_bytes(), ) - new_gossip_sigs = dict(store.gossip_signatures) - new_gossip_sigs[proposer_sig_key] = ( - signed_block_with_attestation.signature.proposer_signature - ) + new_gossip_sigs = dict(store.gossip_committee_signatures) store = store.on_attestation( attestation=proposer_attestation, @@ -711,7 +707,7 @@ def on_block( ) # Update store with proposer signature - store = store.model_copy(update={"gossip_signatures": new_gossip_sigs}) + store = store.model_copy(update={"gossip_committee_signatures": new_gossip_sigs}) return store @@ -923,7 +919,7 @@ def aggregate_committee_signatures(self) -> "Store": new_aggregated_payloads = dict(self.aggregated_payloads) attestations = self.latest_new_attestations - committee_signatures = self.gossip_signatures + committee_signatures = self.gossip_committee_signatures aggregated_payloads = self.aggregated_payloads head_state = self.states[self.head] @@ -1235,7 +1231,7 @@ def produce_block_with_signatures( parent_root=head_root, available_attestations=available_attestations, known_block_roots=set(store.blocks.keys()), - gossip_signatures=store.gossip_signatures, + gossip_signatures=store.gossip_committee_signatures, aggregated_payloads=store.aggregated_payloads, ) From e39882347929c388aba91a0df25faf7bfff82a5c Mon Sep 17 00:00:00 2001 From: kamilsa Date: Wed, 14 Jan 2026 12:40:07 +0500 Subject: [PATCH 11/39] Refactor subnet ID computation and rename committee signatures variable --- src/lean_spec/subspecs/forkchoice/store.py | 10 +++++----- src/lean_spec/subspecs/networking/subnet.py | 18 ++++++------------ 2 files changed, 11 insertions(+), 17 deletions(-) diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index a8eca665..bcf29579 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -323,22 +323,22 @@ def on_gossip_attestation( public_key, attestation_data.slot, attestation_data.data_root_bytes(), scheme ), "Signature verification failed" - current_validator_subnet = compute_subnet_id(int(current_validator_id), self.config.attestation_subnet_count) - attester_subnet = compute_subnet_id(int(validator_id), self.config.attestation_subnet_count) + current_validator_subnet = compute_subnet_id(current_validator_id, self.config.attestation_subnet_count) + attester_subnet = compute_subnet_id(validator_id, self.config.attestation_subnet_count) # Store signature for later lookup during block building - new_gossip_sigs = dict(self.gossip_committee_signatures) + new_commitee_sigs = dict(self.gossip_committee_signatures) if is_aggregator and current_validator_subnet == attester_subnet: # If this validator is an aggregator for this attestation, # also store the signature in the committee signatures map. sig_key = SignatureKey(validator_id, attestation_data.data_root_bytes()) - new_gossip_sigs[sig_key] = signature + new_commitee_sigs[sig_key] = signature # Process the attestation data store = self.on_attestation(attestation=attestation, is_from_block=False) # Return store with updated signature maps - return store.model_copy(update={"gossip_committee_signatures": new_gossip_sigs) + return store.model_copy(update={"gossip_committee_signatures": new_commitee_sigs}) def on_attestation( self, diff --git a/src/lean_spec/subspecs/networking/subnet.py b/src/lean_spec/subspecs/networking/subnet.py index f8ff07d6..01d19bf5 100644 --- a/src/lean_spec/subspecs/networking/subnet.py +++ b/src/lean_spec/subspecs/networking/subnet.py @@ -5,24 +5,18 @@ """ from __future__ import annotations -def compute_subnet_id(validator_index: int, num_committees: int) -> int: +from src.lean_spec.types import Uint64 + + +def compute_subnet_id(validator_index: Uint64, num_committees: Uint64) -> int: """Compute the attestation subnet id for a validator. Args: - validator_index: Non-negative validator index (int). - num_committees: Positive number of committees (int). + validator_index: Non-negative validator index . + num_committees: Positive number of committees. Returns: An integer subnet id in 0..(num_committees-1). - - Raises: - ValueError: If validator_index is negative or num_committees is not - a positive integer. """ - if not isinstance(validator_index, int) or validator_index < 0: - raise ValueError("validator_index must be a non-negative integer") - if not isinstance(num_committees, int) or num_committees <= 0: - raise ValueError("num_committees must be a positive integer") - subnet_id = validator_index % num_committees return subnet_id From 90fc114f5760f9f9fdd0d5297e500c2b18ec9216 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Wed, 14 Jan 2026 13:14:27 +0500 Subject: [PATCH 12/39] Store proposer signature if same subnet --- src/lean_spec/subspecs/forkchoice/store.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index bcf29579..569fbc7f 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -553,6 +553,7 @@ def on_gossip_committee_aggregation(self, signed_attestation: SignedAggregatedAt def on_block( self, signed_block_with_attestation: SignedBlockWithAttestation, + current_validator: Uint64, scheme: GeneralizedXmssScheme = TARGET_SIGNATURE_SCHEME, ) -> "Store": """ @@ -588,6 +589,7 @@ def on_block( Args: signed_block_with_attestation: Complete signed block with proposer attestation. + current_validator: Index of the current validator processing this block. scheme: XMSS signature scheme to use for signature verification. Returns: @@ -695,12 +697,21 @@ def on_block( # 2. Be available for inclusion in future blocks # 3. Influence fork choice only after interval 3 (end of slot) - proposer_sig_key = SignatureKey( - proposer_attestation.validator_id, - proposer_attestation.data.data_root_bytes(), - ) new_gossip_sigs = dict(store.gossip_committee_signatures) + # Store proposer signature for future lookup if he belongs to the same committee as current validator + proposer_validator_id = proposer_attestation.validator_id + proposer_subnet_id = compute_subnet_id(proposer_validator_id, self.config.attestation_subnet_count) + current_validator_subnet_id = compute_subnet_id(current_validator, self.config.attestation_subnet_count) + if proposer_subnet_id == current_validator_subnet_id: + proposer_sig_key = SignatureKey( + proposer_attestation.validator_id, + proposer_attestation.data.data_root_bytes(), + ) + new_gossip_sigs[proposer_sig_key] = ( + signed_block_with_attestation.signature.proposer_signature + ) + store = store.on_attestation( attestation=proposer_attestation, is_from_block=False, From cdae6a4e9b9b4cba06c5279bd29e8c2ba55ae3d0 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Wed, 14 Jan 2026 16:28:03 +0500 Subject: [PATCH 13/39] Update build block with selecting aggregations --- .../subspecs/containers/state/state.py | 130 +++++++++--------- src/lean_spec/subspecs/forkchoice/store.py | 14 +- 2 files changed, 73 insertions(+), 71 deletions(-) diff --git a/src/lean_spec/subspecs/containers/state/state.py b/src/lean_spec/subspecs/containers/state/state.py index 2fa43d70..fa52c27a 100644 --- a/src/lean_spec/subspecs/containers/state/state.py +++ b/src/lean_spec/subspecs/containers/state/state.py @@ -717,23 +717,16 @@ def build_block( # Add new attestations and continue iteration attestations.extend(new_attestations) - aggregated_attestations = AggregatedAttestation.aggregate_by_data(attestations) - aggregated_signatures: list[AggregatedSignatureProof] = [] - - # Collect aggregated signatures for the included attestations - for aggregated_attestation in aggregated_attestations: - data = aggregated_attestation.data - data_root = data.data_root_bytes() - - # Look up aggregated signature proof in aggregated_payloads using first validator as key - validator_id = aggregated_attestation.aggregation_bits.to_validator_indices()[0] - sig_key = SignatureKey(validator_id, data_root) - aggregated_signature_proof = aggregated_payloads[sig_key] - - # Append the found proof to the list - aggregated_signatures.append(aggregated_signature_proof) + # Use two-phase signature aggregation to build the final attestations and proofs + # Phase 1: Collect gossip signatures + # Phase 2: Fall back to aggregated payloads for uncovered validators + aggregated_attestations, aggregated_signatures = self.select_aggregated_proofs( + attestations, + gossip_signatures, + aggregated_payloads, + ) - # Update the block with the aggregated attestations + # Update the block with the aggregated attestations and proofs final_block = candidate_block.model_copy( update={ "body": BlockBody( @@ -748,26 +741,17 @@ def build_block( return final_block, post_state, aggregated_attestations, aggregated_signatures - def compute_aggregated_signatures( + def aggregate_gossip_signatures( self, attestations: list[Attestation], gossip_signatures: dict[SignatureKey, "Signature"] | None = None, - aggregated_payloads: dict[SignatureKey, list[AggregatedSignatureProof]] | None = None, - ) -> tuple[list[AggregatedAttestation], list[AggregatedSignatureProof]]: + ) -> list[tuple[AggregatedAttestation, AggregatedSignatureProof]]: """ - Compute aggregated signatures for a set of attestations. + Collect aggregated signatures from gossip network and aggregate them. - This method implements a two-phase signature collection strategy: - - 1. **Gossip Phase**: For each attestation group, first attempt to collect - individual XMSS signatures from the gossip network. These are fresh - signatures that validators broadcast when they attest. - - 2. **Fallback Phase**: For any validators not covered by gossip, fall back - to previously-seen aggregated proofs from blocks. This uses a greedy - set-cover approach to minimize the number of proofs needed. - - The result is a list of (attestation, proof) pairs ready for block inclusion. + For each attestation group, attempt to collect individual XMSS signatures + from the gossip network. These are fresh signatures that validators + broadcast when they attest. Parameters ---------- @@ -775,15 +759,12 @@ def compute_aggregated_signatures( Individual attestations to aggregate and sign. gossip_signatures : dict[SignatureKey, Signature] | None Per-validator XMSS signatures learned from the gossip network. - aggregated_payloads : dict[SignatureKey, list[AggregatedSignatureProof]] | None - Aggregated proofs learned from previously-seen blocks. Returns: ------- - tuple[list[AggregatedAttestation], list[AggregatedSignatureProof]] - Paired attestations and their corresponding proofs. + list[tuple[AggregatedAttestation, AggregatedSignatureProof]] + - List of (attestation, proof) pairs from gossip collection. """ - # Accumulator for (attestation, proof) pairs. results: list[tuple[AggregatedAttestation, AggregatedSignatureProof]] = [] # Group individual attestations by data @@ -800,8 +781,6 @@ def compute_aggregated_signatures( # Get the list of validators who attested to this data. validator_ids = aggregated.aggregation_bits.to_validator_indices() - # Phase 1: Gossip Collection - # # When a validator creates an attestation, it broadcasts the # individual XMSS signature over the gossip network. If we have # received these signatures, we can aggregate them ourselves. @@ -813,16 +792,10 @@ def compute_aggregated_signatures( gossip_keys: list[PublicKey] = [] gossip_ids: list[Uint64] = [] - # Track validators we couldn't find signatures for. - # - # These will need to be covered by Phase 2 (existing proofs). - remaining: set[Uint64] = set() - # Attempt to collect each validator's signature from gossip. # # Signatures are keyed by (validator ID, data root). # - If a signature exists, we add it to our collection. - # - Otherwise, we mark that validator as "remaining" for the fallback phase. if gossip_signatures: for vid in validator_ids: key = SignatureKey(vid, data_root) @@ -831,12 +804,6 @@ def compute_aggregated_signatures( gossip_sigs.append(sig) gossip_keys.append(self.validators[vid].get_pubkey()) gossip_ids.append(vid) - else: - # No signature available: mark for fallback coverage. - remaining.add(vid) - else: - # No gossip data at all: all validators need fallback coverage. - remaining = set(validator_ids) # If we collected any gossip signatures, aggregate them into a proof. # @@ -851,14 +818,57 @@ def compute_aggregated_signatures( message=data_root, epoch=data.slot, ) - results.append( - ( - AggregatedAttestation(aggregation_bits=participants, data=data), - proof, - ) - ) + attestation = AggregatedAttestation(aggregation_bits=participants, data=data) + results.append((attestation, proof)) + + return results + + def select_aggregated_proofs( + self, + attestations: list[Attestation], + aggregated_payloads: dict[SignatureKey, list[AggregatedSignatureProof]] | None = None, + ) -> tuple[list[AggregatedAttestation], list[AggregatedSignatureProof]]: + """ + Select aggregated proofs for a set of attestations. + + This method selects aggregated proofs from aggregated_payloads, + prioritizing proofs from the most recent blocks. + + Strategy: + 1. For each attestation group, aggregate as many signatures as possible + from the most recent block's proofs. + 2. If remaining validators exist after step 1, include proofs from + previous blocks that cover them. + + Parameters: + ---------- + attestations : list[Attestation] + Individual attestations to aggregate and sign. + gossip_signatures : dict[SignatureKey, Signature] | None + Per-validator XMSS signatures learned from the gossip network. + (Not used in this implementation - for compatibility with build_block) + aggregated_payloads : dict[SignatureKey, list[AggregatedSignatureProof]] | None + Aggregated proofs learned from previously-seen blocks. + The list for each key should be ordered with most recent proofs first. - # Phase 2: Fallback to existing proofs + Returns: + ------- + tuple[list[AggregatedAttestation], list[AggregatedSignatureProof]] + Paired attestations and their corresponding proofs. + """ + results: list[tuple[AggregatedAttestation, AggregatedSignatureProof]] = [] + + # Group individual attestations by data + for aggregated in AggregatedAttestation.aggregate_by_data(attestations): + data = aggregated.data + data_root = data.data_root_bytes() + validator_ids = aggregated.aggregation_bits.to_validator_indices() # validators contributed to this attestation + all_validator_ids = [v.index for v in self.validators] + + # Validators that are missing in the current aggregation are put into remaining. + remaining: set[Uint64] = set(all_validator_ids) - set(validator_ids) + + # Fallback to existing proofs # # Some validators may not have broadcast their signatures over gossip, # but we might have seen proofs for them in previously-received blocks. @@ -934,14 +944,10 @@ def compute_aggregated_signatures( remaining -= covered # Final Assembly - # - # - We built a list of (attestation, proof) tuples. - # - Now we unzip them into two parallel lists for the return value. - - # Handle the empty case explicitly. if not results: return [], [] # Unzip the results into parallel lists. aggregated_attestations, aggregated_proofs = zip(*results, strict=True) return list(aggregated_attestations), list(aggregated_proofs) + diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 569fbc7f..801fc493 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -921,8 +921,7 @@ def aggregate_committee_signatures(self) -> "Store": """ Aggregate committee signatures for attestations in committee_signatures. - This method aggregates signatures from the committee_signatures map if - the node possesses >= 90% of the signatures of the committee + This method aggregates signatures from the gossip_committee_signatures map Returns: New Store with updated aggregated_payloads. @@ -931,18 +930,16 @@ def aggregate_committee_signatures(self) -> "Store": attestations = self.latest_new_attestations committee_signatures = self.gossip_committee_signatures - aggregated_payloads = self.aggregated_payloads head_state = self.states[self.head] - aggregated_attestations, aggregated_signatures = head_state.compute_aggregated_signatures( + # Perform aggregation + aggregated_results = head_state.aggregate_gossip_signatures( attestations, committee_signatures, - aggregated_payloads, ) # iterate to broadcast aggregated attestations - for aggregated_attestation, aggregated_signature in zip(aggregated_attestations, aggregated_signatures, - strict=True): + for aggregated_attestation, aggregated_signature in aggregated_results: signed_aggregated_attestation = SignedAggregatedAttestation( data = aggregated_attestation.data, proof = aggregated_signature, @@ -950,8 +947,7 @@ def aggregate_committee_signatures(self) -> "Store": # Note: here we should broadcast the aggregated signature to committee_aggregators topic # Compute new aggregated payloads - for aggregated_attestation, aggregated_signature in zip(aggregated_attestations, aggregated_signatures, - strict=True): + for aggregated_attestation, aggregated_signature in aggregated_results: data_root = aggregated_attestation.data.data_root_bytes() validator_ids = aggregated_signature.participants.to_validator_indices() for vid in validator_ids: From b24d3ed71247fbcef3da3b617568bb420445fb45 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Wed, 14 Jan 2026 17:27:50 +0500 Subject: [PATCH 14/39] Uncomment on_attestation during on_gossip_aggregation --- .../subspecs/containers/state/state.py | 11 ++--------- src/lean_spec/subspecs/forkchoice/store.py | 19 +++++++++++-------- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/src/lean_spec/subspecs/containers/state/state.py b/src/lean_spec/subspecs/containers/state/state.py index fa52c27a..d335c0ab 100644 --- a/src/lean_spec/subspecs/containers/state/state.py +++ b/src/lean_spec/subspecs/containers/state/state.py @@ -717,12 +717,9 @@ def build_block( # Add new attestations and continue iteration attestations.extend(new_attestations) - # Use two-phase signature aggregation to build the final attestations and proofs - # Phase 1: Collect gossip signatures - # Phase 2: Fall back to aggregated payloads for uncovered validators + # Select aggregated attestations and proofs for the final block aggregated_attestations, aggregated_signatures = self.select_aggregated_proofs( attestations, - gossip_signatures, aggregated_payloads, ) @@ -844,9 +841,6 @@ def select_aggregated_proofs( ---------- attestations : list[Attestation] Individual attestations to aggregate and sign. - gossip_signatures : dict[SignatureKey, Signature] | None - Per-validator XMSS signatures learned from the gossip network. - (Not used in this implementation - for compatibility with build_block) aggregated_payloads : dict[SignatureKey, list[AggregatedSignatureProof]] | None Aggregated proofs learned from previously-seen blocks. The list for each key should be ordered with most recent proofs first. @@ -863,10 +857,9 @@ def select_aggregated_proofs( data = aggregated.data data_root = data.data_root_bytes() validator_ids = aggregated.aggregation_bits.to_validator_indices() # validators contributed to this attestation - all_validator_ids = [v.index for v in self.validators] # Validators that are missing in the current aggregation are put into remaining. - remaining: set[Uint64] = set(all_validator_ids) - set(validator_ids) + remaining: set[Uint64] = set(validator_ids) # Fallback to existing proofs # diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 801fc493..a6f64e44 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -283,7 +283,8 @@ def on_gossip_attestation( This method: 1. Verifies the XMSS signature - 2. Stores the signature in the gossip signature map + 2. If current node is aggregator, stores the signature in the gossip signature map if it belongs + to the current validator's subnet 3. Processes the attestation data via on_attestation Args: @@ -326,7 +327,7 @@ def on_gossip_attestation( current_validator_subnet = compute_subnet_id(current_validator_id, self.config.attestation_subnet_count) attester_subnet = compute_subnet_id(validator_id, self.config.attestation_subnet_count) - # Store signature for later lookup during block building + # Store signature for later aggregation if applicable new_commitee_sigs = dict(self.gossip_committee_signatures) if is_aggregator and current_validator_subnet == attester_subnet: # If this validator is an aggregator for this attestation, @@ -536,13 +537,15 @@ def on_gossip_committee_aggregation(self, signed_attestation: SignedAggregatedAt key = SignatureKey(vid, data_root) new_aggregated_payloads.setdefault(key, []).append(proof) - # TODO: Update Fork Choice? - # + # Process the attestation data. Since it's from gossip, is_from_block=False. - # store = store.on_attestation( - # attestation=Attestation(validator_id=vid, data=data), - # is_from_block=False, - # ) + # Note, we could have already processed individual attestations from this aggregation, + # during votes propagation into attestation topic, but it's safe to re-process here as + # on_attestation has idempotent behavior. + store = store.on_attestation( + attestation=Attestation(validator_id=vid, data=data), + is_from_block=False, + ) # Return store with updated aggregated payloads return store.model_copy(update={"aggregated_payloads": new_aggregated_payloads}) From 5c952ff00fb9f25ce57cfc240e48395b41d2e071 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Wed, 14 Jan 2026 18:55:03 +0500 Subject: [PATCH 15/39] Update gossipsub topic names to reflect devnet3 --- docs/client/networking.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/client/networking.md b/docs/client/networking.md index 0110b4f6..39989a5e 100644 --- a/docs/client/networking.md +++ b/docs/client/networking.md @@ -66,12 +66,12 @@ This structure lets clients subscribe to relevant messages and ignore others. The payload carried in the gossipsub message is the SSZ-encoded, Snappy-compressed message, which type is identified by the topic: -| Topic Name | Message Type | Encoding | -|-------------------------------------------------------------|-----------------------------|--------------| -| /lean/consensus/devnet-0/blocks/ssz_snappy | SignedBlockWithAttestation | SSZ + Snappy | -| /lean/consensus/devnet-0/attestations/ssz_snappy | SignedAttestation | SSZ + Snappy | -| /lean/consensus/devnet-0/attestation_{subnet_id}/ssz_snappy | SignedAttestation | SSZ + Snappy | -| /lean/consensus/devnet-0/aggregation/ssz_snappy | SignedAggregatedAttestation | SSZ + Snappy | +| Topic Name | Message Type | Encoding | +|------------------------------------------------------------|-----------------------------|--------------| +| /lean/consensus/devnet3/blocks/ssz_snappy | SignedBlockWithAttestation | SSZ + Snappy | +| /lean/consensus/devnet3/attestations/ssz_snappy | SignedAttestation | SSZ + Snappy | +| /lean/consensus/devnet3/attestation_{subnet_id}/ssz_snappy | SignedAttestation | SSZ + Snappy | +| /lean/consensus/devnet3/aggregation/ssz_snappy | SignedAggregatedAttestation | SSZ + Snappy | ### Message Types From 8a0c121f13291075596621e8cdfa093dcea45910 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Thu, 15 Jan 2026 16:13:03 +0500 Subject: [PATCH 16/39] Rename aggregation committee to attestation committee and update related references --- docs/client/validator.md | 20 +++++++++---------- src/lean_spec/subspecs/chain/config.py | 4 ++-- .../subspecs/containers/state/state.py | 4 ++-- src/lean_spec/subspecs/forkchoice/store.py | 2 +- 4 files changed, 14 insertions(+), 16 deletions(-) diff --git a/docs/client/validator.md b/docs/client/validator.md index af2a12bc..202c9cd7 100644 --- a/docs/client/validator.md +++ b/docs/client/validator.md @@ -17,14 +17,15 @@ diversity helps test interoperability. In production, validator assignment will work differently. The current approach is temporary for devnet testing. -## Committees and Subnets +## Attestation Committees and Subnets -Committee is a group of validators assigned to aggregate attestations. -Beacon chain uses subnets as network channels for specific committees. +Attestation committee is a group of validators contributing to the common +aggregated attestations. Beacon chain uses subnets as network channels for +specific committees. In the devnet-3 design, however, there is one global subnet for signed attestations propagation, in addition to publishing into per committee subnets. -This is due to 3SF-mini consensus design, that requires 2/3 + 1 of all +This is due to 3SF-mini consensus design, that requires 2/3+ of all attestations to be observed by any validator to compute safe target correctly. Note that non-aggregating validators do not need to subscribe to committee @@ -32,16 +33,16 @@ attestation subnets. They only need to subscribe to the global attestation subnet. Every validator is assigned to a single committee. Number of committees is -defined in config.yaml. Each committee maps to a subnet ID. Validators +defined in config.yaml. Each committee maps to a subnet ID. Validator's subnet ID is derived using their validator index modulo number of committees. -This is to simplify debugging and testing. In the future, validators subnet id +This is to simplify debugging and testing. In the future, validator's subnet ID will be assigned randomly per epoch. ## Aggregator assignment Some validators are self-assigned as aggregators. Aggregators collect and combine attestations from other validators in their committee. To become an aggregator, -a validator sets `is_validator` flag to true as ENR record field. +a validator sets `is_aggregator` flag to true as ENR record field. ## Proposing Blocks @@ -125,10 +126,7 @@ blocks and attestations. Attestation aggregation combines multiple attestations into one. This saves bandwidth and block space. -Devnet-2 introduced signatures aggregation. Aggregations are produced by block proposers. - -When aggregation is added, aggregators will collect attestations and combine them. -Aggregated attestations will be broadcast separately. +Devnet-2 introduces signatures aggregation. Aggregators will collect attestations and combine them. Aggregated attestations will be broadcast separately. ## Signature Handling diff --git a/src/lean_spec/subspecs/chain/config.py b/src/lean_spec/subspecs/chain/config.py index 3b6188f0..5f7add98 100644 --- a/src/lean_spec/subspecs/chain/config.py +++ b/src/lean_spec/subspecs/chain/config.py @@ -37,8 +37,8 @@ VALIDATOR_REGISTRY_LIMIT: Final = Uint64(2**12) """The maximum number of validators that can be in the registry.""" -AGGREGATION_COMMITTEE_COUNT: Final = Uint64(1) -"""The number of committees for aggregation per slot.""" +ATTESTATION_COMMITTEE_COUNT: Final = Uint64(1) +"""The number of attestation committees per slot.""" class _ChainConfig(StrictBaseModel): diff --git a/src/lean_spec/subspecs/containers/state/state.py b/src/lean_spec/subspecs/containers/state/state.py index d335c0ab..ab543588 100644 --- a/src/lean_spec/subspecs/containers/state/state.py +++ b/src/lean_spec/subspecs/containers/state/state.py @@ -30,7 +30,7 @@ JustifiedSlots, Validators, ) -from ...chain.config import AGGREGATION_COMMITTEE_COUNT +from ...chain.config import ATTESTATION_COMMITTEE_COUNT class State(Container): @@ -91,7 +91,7 @@ def generate_genesis(cls, genesis_time: Uint64, validators: Validators) -> "Stat # Configure the genesis state. genesis_config = Config( genesis_time=genesis_time, - attestation_subnet_count=AGGREGATION_COMMITTEE_COUNT, + attestation_subnet_count=ATTESTATION_COMMITTEE_COUNT, ) # Build the genesis block header for the state. diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index a6f64e44..71185a15 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -471,7 +471,7 @@ def on_attestation( } ) - def on_gossip_committee_aggregation(self, signed_attestation: SignedAggregatedAttestation) -> "Store": + def on_gossip_aggregated_attestation(self, signed_attestation: SignedAggregatedAttestation) -> "Store": """ Process a signed aggregated attestation received via aggregation topic From 9d721bd0119d0c15b654f166b233f40bbe0e4b29 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Thu, 15 Jan 2026 19:59:23 +0500 Subject: [PATCH 17/39] refactor: rename committee aggregation topic to aggregated attestation --- src/lean_spec/subspecs/networking/gossipsub/topic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lean_spec/subspecs/networking/gossipsub/topic.py b/src/lean_spec/subspecs/networking/gossipsub/topic.py index 40cb7684..b9faa8ef 100644 --- a/src/lean_spec/subspecs/networking/gossipsub/topic.py +++ b/src/lean_spec/subspecs/networking/gossipsub/topic.py @@ -94,7 +94,7 @@ `{subnet_id}` should be replaced with the subnet identifier (0-63). """ -COMMITTEE_AGGREGATION_TOPIC_NAME: str = "committee_aggregation" +AGGREGATED_ATTESTATION_TOPIC_NAME: str = "aggregation" """Topic name for committee aggregation messages. Used in the topic string to identify committee's aggregation messages. @@ -119,7 +119,7 @@ class TopicKind(Enum): ATTESTATION_SUBNET = ATTESTATION_SUBNET_TOPIC_NAME """Attestation subnet messages.""" - COMMITTEE_AGGREGATION = COMMITTEE_AGGREGATION_TOPIC_NAME + AGGREGATED_ATTESTATION = AGGREGATED_ATTESTATION_TOPIC_NAME """Committee aggregated signatures messages.""" def __str__(self) -> str: From baddbeba0aceb8ebd5930495309ae0f7bfad60ab Mon Sep 17 00:00:00 2001 From: kamilsa Date: Thu, 15 Jan 2026 19:59:51 +0500 Subject: [PATCH 18/39] update validator.md to clarify subnet usage in attestation committees --- docs/client/validator.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/client/validator.md b/docs/client/validator.md index 202c9cd7..40b62f18 100644 --- a/docs/client/validator.md +++ b/docs/client/validator.md @@ -20,8 +20,7 @@ is temporary for devnet testing. ## Attestation Committees and Subnets Attestation committee is a group of validators contributing to the common -aggregated attestations. Beacon chain uses subnets as network channels for -specific committees. +aggregated attestations. Subnets are network channels dedicated to specific committees. In the devnet-3 design, however, there is one global subnet for signed attestations propagation, in addition to publishing into per committee subnets. @@ -126,7 +125,7 @@ blocks and attestations. Attestation aggregation combines multiple attestations into one. This saves bandwidth and block space. -Devnet-2 introduces signatures aggregation. Aggregators will collect attestations and combine them. Aggregated attestations will be broadcast separately. +Devnet-3 introduces signatures aggregation. Aggregators will collect attestations and combine them. Aggregated attestations will be broadcast separately. ## Signature Handling From 6556e81fd1df08cb3ba399f1b136e5088cf3f633 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Fri, 16 Jan 2026 09:58:49 +0500 Subject: [PATCH 19/39] feat: add threshold ratio for committee signature aggregation --- .../subspecs/containers/state/state.py | 22 +++++++++ src/lean_spec/subspecs/forkchoice/store.py | 47 +++++++++++++++---- src/lean_spec/subspecs/networking/subnet.py | 20 +++++++- 3 files changed, 80 insertions(+), 9 deletions(-) diff --git a/src/lean_spec/subspecs/containers/state/state.py b/src/lean_spec/subspecs/containers/state/state.py index ab543588..16a9010a 100644 --- a/src/lean_spec/subspecs/containers/state/state.py +++ b/src/lean_spec/subspecs/containers/state/state.py @@ -2,6 +2,7 @@ from typing import AbstractSet, Iterable +from lean_spec.subspecs.networking.subnet import compute_subnet_id, compute_subnet_size from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.subspecs.xmss.aggregation import ( AggregatedSignatureProof, @@ -742,6 +743,7 @@ def aggregate_gossip_signatures( self, attestations: list[Attestation], gossip_signatures: dict[SignatureKey, "Signature"] | None = None, + threshold_ratio: float = 0.0, ) -> list[tuple[AggregatedAttestation, AggregatedSignatureProof]]: """ Collect aggregated signatures from gossip network and aggregate them. @@ -756,6 +758,9 @@ def aggregate_gossip_signatures( Individual attestations to aggregate and sign. gossip_signatures : dict[SignatureKey, Signature] | None Per-validator XMSS signatures learned from the gossip network. + threshold_ratio : float + Minimum ratio of committee signatures required to produce an aggregation. + Defaults to 0.0 (aggregate even if only 1 signature). Returns: ------- @@ -807,6 +812,23 @@ def aggregate_gossip_signatures( # The aggregation combines multiple XMSS signatures into a single # compact proof that can verify all participants signed the message. if gossip_ids: + # Check participation threshold if required + if threshold_ratio > 0.0: + # Calculate committee size for the subnet of these validators + # We assume all validators in an aggregation group belong to the same subnet + first_validator_id = gossip_ids[0] + subnet_id = compute_subnet_id(first_validator_id, self.config.attestation_subnet_count) + + # Count total validators in this subnet + committee_size = compute_subnet_size( + subnet_id, + self.config.attestation_subnet_count, + len(self.validators), + ) + + if len(gossip_ids) < committee_size * threshold_ratio: + continue + participants = AggregationBits.from_validator_indices(gossip_ids) proof = AggregatedSignatureProof.aggregate( participants=participants, diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index aac85515..1a1f35d0 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -147,6 +147,12 @@ class Store(Container): - Only stores the attestation data, not signatures. """ + aggregated_in_current_slot: Boolean = Boolean(False) + """ + Tracks whether committee signatures have been successfully aggregated in the current slot. + Reset at the start of each slot (Interval 0). + """ + gossip_committee_signatures: dict[SignatureKey, Signature] = {} """ Per-validator XMSS signatures learned from committee attesters. @@ -919,14 +925,18 @@ def update_safe_target(self) -> "Store": return self.model_copy(update={"safe_target": safe_target}) - def aggregate_committee_signatures(self) -> "Store": + def aggregate_committee_signatures(self, threshold_ratio: float = 0.0) -> "Store": """ Aggregate committee signatures for attestations in committee_signatures. - This method aggregates signatures from the gossip_committee_signatures map + This method aggregates signatures from the gossip_committee_signatures map. + + Args: + threshold_ratio: Minimum participation ratio (0.0 to 1.0). + Aggregates only if signature count / committee size >= ratio. Returns: - New Store with updated aggregated_payloads. + New Store with updated aggregated_payloads and aggregated_in_current_slot flag. """ new_aggregated_payloads = dict(self.aggregated_payloads) @@ -938,6 +948,7 @@ def aggregate_committee_signatures(self) -> "Store": aggregated_results = head_state.aggregate_gossip_signatures( attestations, committee_signatures, + threshold_ratio=threshold_ratio, ) # iterate to broadcast aggregated attestations @@ -957,7 +968,18 @@ def aggregate_committee_signatures(self) -> "Store": if sig_key not in new_aggregated_payloads: new_aggregated_payloads[sig_key] = [] new_aggregated_payloads[sig_key].append(aggregated_signature) - return self.model_copy(update={"aggregated_payloads": new_aggregated_payloads}) + + # If we produced any aggregations, mark as done for this slot + aggregated_flag = self.aggregated_in_current_slot + if aggregated_results: + aggregated_flag = Boolean(True) + + return self.model_copy( + update={ + "aggregated_payloads": new_aggregated_payloads, + "aggregated_in_current_slot": aggregated_flag, + } + ) def tick_interval(self, has_proposal: bool, is_aggregator: bool) -> "Store": """ @@ -981,11 +1003,13 @@ def tick_interval(self, has_proposal: bool, is_aggregator: bool) -> "Store": **Interval 2 (Safe Target Update)**: - Compute safe target with 2/3+ majority - Provides validators with a stable attestation target + - Aggregators check for 90% participation before aggregating **Interval 3 (Attestation Acceptance)**: - Accept accumulated attestations (new → known) - Update head based on new attestation weights - Prepare for next slot + - Aggregators force aggregation if not done yet Args: has_proposal: Whether a proposal exists for this interval. @@ -999,16 +1023,21 @@ def tick_interval(self, has_proposal: bool, is_aggregator: bool) -> "Store": current_interval = store.time % SECONDS_PER_SLOT % INTERVALS_PER_SLOT if current_interval == Uint64(0): - # Start of slot - process attestations if proposal exists + # Start of slot - reset flags and process attestations if proposal exists + store = store.model_copy(update={"aggregated_in_current_slot": Boolean(False)}) if has_proposal: store = store.accept_new_attestations() elif current_interval == Uint64(2): # Mid-slot - update safe target for validators store = store.update_safe_target() if is_aggregator: - store = store.aggregate_committee_signatures() + # Wait for 90% signatures from subnet validators + store = store.aggregate_committee_signatures(threshold_ratio=0.9) elif current_interval == Uint64(3): - # End of slot - accept accumulated attestations + # End of slot - finalize aggregation and accept attestations + if is_aggregator and not store.aggregated_in_current_slot: + # Aggregate no matter how many signatures if not done before + store = store.aggregate_committee_signatures(threshold_ratio=0.0) store = store.accept_new_attestations() return store @@ -1073,7 +1102,9 @@ def get_proposal_head(self, slot: Slot) -> tuple["Store", Bytes32]: slot_time = self.config.genesis_time + slot * SECONDS_PER_SLOT # Advance time to current slot (ticking intervals) - store = self.on_tick(slot_time, True) + # It is safe not to aggregate during advancement, as it is too + # late to aggregate committee signatures anyway when proposing + store = self.on_tick(slot_time, True, is_aggregator=False) # Process any pending attestations before proposal store = store.accept_new_attestations() diff --git a/src/lean_spec/subspecs/networking/subnet.py b/src/lean_spec/subspecs/networking/subnet.py index 01d19bf5..75b0f268 100644 --- a/src/lean_spec/subspecs/networking/subnet.py +++ b/src/lean_spec/subspecs/networking/subnet.py @@ -8,7 +8,7 @@ from src.lean_spec.types import Uint64 -def compute_subnet_id(validator_index: Uint64, num_committees: Uint64) -> int: +def compute_subnet_id(validator_index: Uint64, num_committees: Uint64) -> Uint64: """Compute the attestation subnet id for a validator. Args: @@ -20,3 +20,21 @@ def compute_subnet_id(validator_index: Uint64, num_committees: Uint64) -> int: """ subnet_id = validator_index % num_committees return subnet_id + +def compute_subnet_size(subnet_id: Uint64, num_committees: Uint64, total_validators: Uint64) -> Uint64: + """Compute the size of a given subnet. + + Args: + subnet_id: The subnet id to compute the size for. + num_committees: Positive number of committees. + total_validators: Total number of validators. + + Returns: + The size of the specified subnet. + """ + base_size = total_validators // num_committees + remainder = total_validators % num_committees + if subnet_id < remainder: + return base_size + 1 + else: + return base_size \ No newline at end of file From 3477d6eb31b441b01fcc65fb7d479e92a6db4fb2 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Fri, 16 Jan 2026 10:31:17 +0500 Subject: [PATCH 20/39] feat: replace attestation_subnet_count with attestation_committee_count in configuration --- src/lean_spec/subspecs/chain/config.py | 4 ++++ src/lean_spec/subspecs/containers/config.py | 3 --- src/lean_spec/subspecs/containers/state/state.py | 5 ++--- src/lean_spec/subspecs/forkchoice/store.py | 9 +++++---- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/lean_spec/subspecs/chain/config.py b/src/lean_spec/subspecs/chain/config.py index 5f7add98..71949a67 100644 --- a/src/lean_spec/subspecs/chain/config.py +++ b/src/lean_spec/subspecs/chain/config.py @@ -55,6 +55,9 @@ class _ChainConfig(StrictBaseModel): historical_roots_limit: Uint64 validator_registry_limit: Uint64 + # Attestation / Networking + attestation_committee_count: Uint64 + # The Devnet Chain Configuration. DEVNET_CONFIG: Final = _ChainConfig( @@ -62,4 +65,5 @@ class _ChainConfig(StrictBaseModel): justification_lookback_slots=JUSTIFICATION_LOOKBACK_SLOTS, historical_roots_limit=HISTORICAL_ROOTS_LIMIT, validator_registry_limit=VALIDATOR_REGISTRY_LIMIT, + attestation_committee_count=ATTESTATION_COMMITTEE_COUNT, ) diff --git a/src/lean_spec/subspecs/containers/config.py b/src/lean_spec/subspecs/containers/config.py index f0b00723..18289e88 100644 --- a/src/lean_spec/subspecs/containers/config.py +++ b/src/lean_spec/subspecs/containers/config.py @@ -14,6 +14,3 @@ class Config(Container): genesis_time: Uint64 """The timestamp of the genesis block.""" - - attestation_subnet_count: Uint64 - """The number of attestation subnets in the network.""" diff --git a/src/lean_spec/subspecs/containers/state/state.py b/src/lean_spec/subspecs/containers/state/state.py index 16a9010a..d5b5266e 100644 --- a/src/lean_spec/subspecs/containers/state/state.py +++ b/src/lean_spec/subspecs/containers/state/state.py @@ -92,7 +92,6 @@ def generate_genesis(cls, genesis_time: Uint64, validators: Validators) -> "Stat # Configure the genesis state. genesis_config = Config( genesis_time=genesis_time, - attestation_subnet_count=ATTESTATION_COMMITTEE_COUNT, ) # Build the genesis block header for the state. @@ -817,12 +816,12 @@ def aggregate_gossip_signatures( # Calculate committee size for the subnet of these validators # We assume all validators in an aggregation group belong to the same subnet first_validator_id = gossip_ids[0] - subnet_id = compute_subnet_id(first_validator_id, self.config.attestation_subnet_count) + subnet_id = compute_subnet_id(first_validator_id, ATTESTATION_COMMITTEE_COUNT) # Count total validators in this subnet committee_size = compute_subnet_size( subnet_id, - self.config.attestation_subnet_count, + ATTESTATION_COMMITTEE_COUNT, len(self.validators), ) diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 1a1f35d0..3e0961cb 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -19,6 +19,7 @@ JUSTIFICATION_LOOKBACK_SLOTS, SECONDS_PER_INTERVAL, SECONDS_PER_SLOT, + ATTESTATION_COMMITTEE_COUNT, ) from lean_spec.subspecs.containers import ( Attestation, @@ -329,8 +330,8 @@ def on_gossip_attestation( public_key, attestation_data.slot, attestation_data.data_root_bytes(), scheme ), "Signature verification failed" - current_validator_subnet = compute_subnet_id(current_validator_id, self.config.attestation_subnet_count) - attester_subnet = compute_subnet_id(validator_id, self.config.attestation_subnet_count) + current_validator_subnet = compute_subnet_id(current_validator_id, ATTESTATION_COMMITTEE_COUNT) + attester_subnet = compute_subnet_id(validator_id, ATTESTATION_COMMITTEE_COUNT) # Store signature for later aggregation if applicable new_commitee_sigs = dict(self.gossip_committee_signatures) @@ -709,8 +710,8 @@ def on_block( # Store proposer signature for future lookup if he belongs to the same committee as current validator proposer_validator_id = proposer_attestation.validator_id - proposer_subnet_id = compute_subnet_id(proposer_validator_id, self.config.attestation_subnet_count) - current_validator_subnet_id = compute_subnet_id(current_validator, self.config.attestation_subnet_count) + proposer_subnet_id = compute_subnet_id(proposer_validator_id, ATTESTATION_COMMITTEE_COUNT) + current_validator_subnet_id = compute_subnet_id(current_validator, ATTESTATION_COMMITTEE_COUNT) if proposer_subnet_id == current_validator_subnet_id: proposer_sig_key = SignatureKey( proposer_attestation.validator_id, From 9174f5b95a3698f326110d0251798d5026e12fa4 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Fri, 16 Jan 2026 10:55:27 +0500 Subject: [PATCH 21/39] feat: add committee signature threshold ratio chain config --- src/lean_spec/subspecs/chain/config.py | 7 +++++++ src/lean_spec/subspecs/forkchoice/store.py | 5 +++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/lean_spec/subspecs/chain/config.py b/src/lean_spec/subspecs/chain/config.py index 71949a67..22adf314 100644 --- a/src/lean_spec/subspecs/chain/config.py +++ b/src/lean_spec/subspecs/chain/config.py @@ -40,6 +40,9 @@ ATTESTATION_COMMITTEE_COUNT: Final = Uint64(1) """The number of attestation committees per slot.""" +COMMITTEE_SIGNATURE_THRESHOLD_RATIO: Final = 0.9 +"""Default ratio of committee signature participation required to trigger aggregation.""" + class _ChainConfig(StrictBaseModel): """ @@ -58,6 +61,9 @@ class _ChainConfig(StrictBaseModel): # Attestation / Networking attestation_committee_count: Uint64 + # Aggregation behavior + committee_signature_threshold_ratio: float + # The Devnet Chain Configuration. DEVNET_CONFIG: Final = _ChainConfig( @@ -66,4 +72,5 @@ class _ChainConfig(StrictBaseModel): historical_roots_limit=HISTORICAL_ROOTS_LIMIT, validator_registry_limit=VALIDATOR_REGISTRY_LIMIT, attestation_committee_count=ATTESTATION_COMMITTEE_COUNT, + committee_signature_threshold_ratio=COMMITTEE_SIGNATURE_THRESHOLD_RATIO, ) diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 3e0961cb..d678abd2 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -20,6 +20,7 @@ SECONDS_PER_INTERVAL, SECONDS_PER_SLOT, ATTESTATION_COMMITTEE_COUNT, + COMMITTEE_SIGNATURE_THRESHOLD_RATIO, ) from lean_spec.subspecs.containers import ( Attestation, @@ -1032,8 +1033,8 @@ def tick_interval(self, has_proposal: bool, is_aggregator: bool) -> "Store": # Mid-slot - update safe target for validators store = store.update_safe_target() if is_aggregator: - # Wait for 90% signatures from subnet validators - store = store.aggregate_committee_signatures(threshold_ratio=0.9) + # Wait for configured ratio of signatures from subnet validators + store = store.aggregate_committee_signatures(threshold_ratio=COMMITTEE_SIGNATURE_THRESHOLD_RATIO) elif current_interval == Uint64(3): # End of slot - finalize aggregation and accept attestations if is_aggregator and not store.aggregated_in_current_slot: From 3115ef5da0bcff2d0eddf09e742fb5b0b5d7f170 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Fri, 16 Jan 2026 11:24:19 +0500 Subject: [PATCH 22/39] feat: aggregate on gossip Aggregate during interval 2 if more threshold signatures were received --- src/lean_spec/subspecs/forkchoice/store.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index d678abd2..7adc1188 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -311,7 +311,6 @@ def on_gossip_attestation( attestation_data = signed_attestation.message signature = signed_attestation.signature - # Validate the attestation first so unknown blocks are rejected cleanly # (instead of raising a raw KeyError when state is missing). attestation = Attestation(validator_id=validator_id, data=attestation_data) @@ -331,6 +330,9 @@ def on_gossip_attestation( public_key, attestation_data.slot, attestation_data.data_root_bytes(), scheme ), "Signature verification failed" + # Process the attestation data + store = self.on_attestation(attestation=attestation, is_from_block=False) + current_validator_subnet = compute_subnet_id(current_validator_id, ATTESTATION_COMMITTEE_COUNT) attester_subnet = compute_subnet_id(validator_id, ATTESTATION_COMMITTEE_COUNT) @@ -342,8 +344,12 @@ def on_gossip_attestation( sig_key = SignatureKey(validator_id, attestation_data.data_root_bytes()) new_commitee_sigs[sig_key] = signature - # Process the attestation data - store = self.on_attestation(attestation=attestation, is_from_block=False) + # If in the interval 2 of the slot and not yet aggregated, try to aggregate + current_interval = (self.time // SECONDS_PER_INTERVAL) % INTERVALS_PER_SLOT + if current_interval == 2 and not store.aggregated_in_current_slot: + store = store.aggregate_committee_signatures( + threshold_ratio=COMMITTEE_SIGNATURE_THRESHOLD_RATIO + ) # Return store with updated signature maps return store.model_copy(update={"gossip_committee_signatures": new_commitee_sigs}) From 3fffe71891d6b7e0d41c3a9cc5b82b7638c8f0d9 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Thu, 22 Jan 2026 18:33:44 +0500 Subject: [PATCH 23/39] docs: clarify aggregator role in validator participation --- docs/client/validator.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/client/validator.md b/docs/client/validator.md index 40b62f18..43391448 100644 --- a/docs/client/validator.md +++ b/docs/client/validator.md @@ -3,8 +3,8 @@ ## Overview Validators participate in consensus by proposing blocks and producing attestations. -Optionally validators can opt-in to behave as aggregators in a single or multiple -committees. This document describes what honest validators do. +Optionally validators can opt-in to behave as aggregators in their committee . +This document describes what honest validators do. ## Validator Assignment From d0462aa1edb671cdce7a7b45330ff7a7b42c47d9 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Fri, 23 Jan 2026 10:36:39 +0500 Subject: [PATCH 24/39] Revert "feat: aggregate on gossip" This reverts commit 3115ef5da0bcff2d0eddf09e742fb5b0b5d7f170. --- src/lean_spec/subspecs/forkchoice/store.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 7adc1188..d678abd2 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -311,6 +311,7 @@ def on_gossip_attestation( attestation_data = signed_attestation.message signature = signed_attestation.signature + # Validate the attestation first so unknown blocks are rejected cleanly # (instead of raising a raw KeyError when state is missing). attestation = Attestation(validator_id=validator_id, data=attestation_data) @@ -330,9 +331,6 @@ def on_gossip_attestation( public_key, attestation_data.slot, attestation_data.data_root_bytes(), scheme ), "Signature verification failed" - # Process the attestation data - store = self.on_attestation(attestation=attestation, is_from_block=False) - current_validator_subnet = compute_subnet_id(current_validator_id, ATTESTATION_COMMITTEE_COUNT) attester_subnet = compute_subnet_id(validator_id, ATTESTATION_COMMITTEE_COUNT) @@ -344,12 +342,8 @@ def on_gossip_attestation( sig_key = SignatureKey(validator_id, attestation_data.data_root_bytes()) new_commitee_sigs[sig_key] = signature - # If in the interval 2 of the slot and not yet aggregated, try to aggregate - current_interval = (self.time // SECONDS_PER_INTERVAL) % INTERVALS_PER_SLOT - if current_interval == 2 and not store.aggregated_in_current_slot: - store = store.aggregate_committee_signatures( - threshold_ratio=COMMITTEE_SIGNATURE_THRESHOLD_RATIO - ) + # Process the attestation data + store = self.on_attestation(attestation=attestation, is_from_block=False) # Return store with updated signature maps return store.model_copy(update={"gossip_committee_signatures": new_commitee_sigs}) From e2fd644c05648fb4bef6eab2486d2c182225f66a Mon Sep 17 00:00:00 2001 From: kamilsa Date: Fri, 23 Jan 2026 10:36:39 +0500 Subject: [PATCH 25/39] Revert "feat: add committee signature threshold ratio chain config" This reverts commit 9174f5b95a3698f326110d0251798d5026e12fa4. --- src/lean_spec/subspecs/chain/config.py | 7 ------- src/lean_spec/subspecs/forkchoice/store.py | 5 ++--- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/src/lean_spec/subspecs/chain/config.py b/src/lean_spec/subspecs/chain/config.py index 22adf314..71949a67 100644 --- a/src/lean_spec/subspecs/chain/config.py +++ b/src/lean_spec/subspecs/chain/config.py @@ -40,9 +40,6 @@ ATTESTATION_COMMITTEE_COUNT: Final = Uint64(1) """The number of attestation committees per slot.""" -COMMITTEE_SIGNATURE_THRESHOLD_RATIO: Final = 0.9 -"""Default ratio of committee signature participation required to trigger aggregation.""" - class _ChainConfig(StrictBaseModel): """ @@ -61,9 +58,6 @@ class _ChainConfig(StrictBaseModel): # Attestation / Networking attestation_committee_count: Uint64 - # Aggregation behavior - committee_signature_threshold_ratio: float - # The Devnet Chain Configuration. DEVNET_CONFIG: Final = _ChainConfig( @@ -72,5 +66,4 @@ class _ChainConfig(StrictBaseModel): historical_roots_limit=HISTORICAL_ROOTS_LIMIT, validator_registry_limit=VALIDATOR_REGISTRY_LIMIT, attestation_committee_count=ATTESTATION_COMMITTEE_COUNT, - committee_signature_threshold_ratio=COMMITTEE_SIGNATURE_THRESHOLD_RATIO, ) diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index d678abd2..3e0961cb 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -20,7 +20,6 @@ SECONDS_PER_INTERVAL, SECONDS_PER_SLOT, ATTESTATION_COMMITTEE_COUNT, - COMMITTEE_SIGNATURE_THRESHOLD_RATIO, ) from lean_spec.subspecs.containers import ( Attestation, @@ -1033,8 +1032,8 @@ def tick_interval(self, has_proposal: bool, is_aggregator: bool) -> "Store": # Mid-slot - update safe target for validators store = store.update_safe_target() if is_aggregator: - # Wait for configured ratio of signatures from subnet validators - store = store.aggregate_committee_signatures(threshold_ratio=COMMITTEE_SIGNATURE_THRESHOLD_RATIO) + # Wait for 90% signatures from subnet validators + store = store.aggregate_committee_signatures(threshold_ratio=0.9) elif current_interval == Uint64(3): # End of slot - finalize aggregation and accept attestations if is_aggregator and not store.aggregated_in_current_slot: From d40199cd7334aa7be3b20366e37ebc325cf49221 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Fri, 23 Jan 2026 10:36:40 +0500 Subject: [PATCH 26/39] Revert "feat: replace attestation_subnet_count with attestation_committee_count in configuration" This reverts commit 3477d6eb31b441b01fcc65fb7d479e92a6db4fb2. --- src/lean_spec/subspecs/chain/config.py | 4 ---- src/lean_spec/subspecs/containers/config.py | 3 +++ src/lean_spec/subspecs/containers/state/state.py | 5 +++-- src/lean_spec/subspecs/forkchoice/store.py | 9 ++++----- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/lean_spec/subspecs/chain/config.py b/src/lean_spec/subspecs/chain/config.py index 71949a67..5f7add98 100644 --- a/src/lean_spec/subspecs/chain/config.py +++ b/src/lean_spec/subspecs/chain/config.py @@ -55,9 +55,6 @@ class _ChainConfig(StrictBaseModel): historical_roots_limit: Uint64 validator_registry_limit: Uint64 - # Attestation / Networking - attestation_committee_count: Uint64 - # The Devnet Chain Configuration. DEVNET_CONFIG: Final = _ChainConfig( @@ -65,5 +62,4 @@ class _ChainConfig(StrictBaseModel): justification_lookback_slots=JUSTIFICATION_LOOKBACK_SLOTS, historical_roots_limit=HISTORICAL_ROOTS_LIMIT, validator_registry_limit=VALIDATOR_REGISTRY_LIMIT, - attestation_committee_count=ATTESTATION_COMMITTEE_COUNT, ) diff --git a/src/lean_spec/subspecs/containers/config.py b/src/lean_spec/subspecs/containers/config.py index 18289e88..f0b00723 100644 --- a/src/lean_spec/subspecs/containers/config.py +++ b/src/lean_spec/subspecs/containers/config.py @@ -14,3 +14,6 @@ class Config(Container): genesis_time: Uint64 """The timestamp of the genesis block.""" + + attestation_subnet_count: Uint64 + """The number of attestation subnets in the network.""" diff --git a/src/lean_spec/subspecs/containers/state/state.py b/src/lean_spec/subspecs/containers/state/state.py index d5b5266e..16a9010a 100644 --- a/src/lean_spec/subspecs/containers/state/state.py +++ b/src/lean_spec/subspecs/containers/state/state.py @@ -92,6 +92,7 @@ def generate_genesis(cls, genesis_time: Uint64, validators: Validators) -> "Stat # Configure the genesis state. genesis_config = Config( genesis_time=genesis_time, + attestation_subnet_count=ATTESTATION_COMMITTEE_COUNT, ) # Build the genesis block header for the state. @@ -816,12 +817,12 @@ def aggregate_gossip_signatures( # Calculate committee size for the subnet of these validators # We assume all validators in an aggregation group belong to the same subnet first_validator_id = gossip_ids[0] - subnet_id = compute_subnet_id(first_validator_id, ATTESTATION_COMMITTEE_COUNT) + subnet_id = compute_subnet_id(first_validator_id, self.config.attestation_subnet_count) # Count total validators in this subnet committee_size = compute_subnet_size( subnet_id, - ATTESTATION_COMMITTEE_COUNT, + self.config.attestation_subnet_count, len(self.validators), ) diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 3e0961cb..1a1f35d0 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -19,7 +19,6 @@ JUSTIFICATION_LOOKBACK_SLOTS, SECONDS_PER_INTERVAL, SECONDS_PER_SLOT, - ATTESTATION_COMMITTEE_COUNT, ) from lean_spec.subspecs.containers import ( Attestation, @@ -330,8 +329,8 @@ def on_gossip_attestation( public_key, attestation_data.slot, attestation_data.data_root_bytes(), scheme ), "Signature verification failed" - current_validator_subnet = compute_subnet_id(current_validator_id, ATTESTATION_COMMITTEE_COUNT) - attester_subnet = compute_subnet_id(validator_id, ATTESTATION_COMMITTEE_COUNT) + current_validator_subnet = compute_subnet_id(current_validator_id, self.config.attestation_subnet_count) + attester_subnet = compute_subnet_id(validator_id, self.config.attestation_subnet_count) # Store signature for later aggregation if applicable new_commitee_sigs = dict(self.gossip_committee_signatures) @@ -710,8 +709,8 @@ def on_block( # Store proposer signature for future lookup if he belongs to the same committee as current validator proposer_validator_id = proposer_attestation.validator_id - proposer_subnet_id = compute_subnet_id(proposer_validator_id, ATTESTATION_COMMITTEE_COUNT) - current_validator_subnet_id = compute_subnet_id(current_validator, ATTESTATION_COMMITTEE_COUNT) + proposer_subnet_id = compute_subnet_id(proposer_validator_id, self.config.attestation_subnet_count) + current_validator_subnet_id = compute_subnet_id(current_validator, self.config.attestation_subnet_count) if proposer_subnet_id == current_validator_subnet_id: proposer_sig_key = SignatureKey( proposer_attestation.validator_id, From d46dd084cf088f04c5888780c7f54ace6a608e59 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Fri, 23 Jan 2026 10:36:40 +0500 Subject: [PATCH 27/39] Revert "feat: add threshold ratio for committee signature aggregation" This reverts commit 6556e81fd1df08cb3ba399f1b136e5088cf3f633. --- .../subspecs/containers/state/state.py | 22 --------- src/lean_spec/subspecs/forkchoice/store.py | 47 ++++--------------- src/lean_spec/subspecs/networking/subnet.py | 20 +------- 3 files changed, 9 insertions(+), 80 deletions(-) diff --git a/src/lean_spec/subspecs/containers/state/state.py b/src/lean_spec/subspecs/containers/state/state.py index 16a9010a..ab543588 100644 --- a/src/lean_spec/subspecs/containers/state/state.py +++ b/src/lean_spec/subspecs/containers/state/state.py @@ -2,7 +2,6 @@ from typing import AbstractSet, Iterable -from lean_spec.subspecs.networking.subnet import compute_subnet_id, compute_subnet_size from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.subspecs.xmss.aggregation import ( AggregatedSignatureProof, @@ -743,7 +742,6 @@ def aggregate_gossip_signatures( self, attestations: list[Attestation], gossip_signatures: dict[SignatureKey, "Signature"] | None = None, - threshold_ratio: float = 0.0, ) -> list[tuple[AggregatedAttestation, AggregatedSignatureProof]]: """ Collect aggregated signatures from gossip network and aggregate them. @@ -758,9 +756,6 @@ def aggregate_gossip_signatures( Individual attestations to aggregate and sign. gossip_signatures : dict[SignatureKey, Signature] | None Per-validator XMSS signatures learned from the gossip network. - threshold_ratio : float - Minimum ratio of committee signatures required to produce an aggregation. - Defaults to 0.0 (aggregate even if only 1 signature). Returns: ------- @@ -812,23 +807,6 @@ def aggregate_gossip_signatures( # The aggregation combines multiple XMSS signatures into a single # compact proof that can verify all participants signed the message. if gossip_ids: - # Check participation threshold if required - if threshold_ratio > 0.0: - # Calculate committee size for the subnet of these validators - # We assume all validators in an aggregation group belong to the same subnet - first_validator_id = gossip_ids[0] - subnet_id = compute_subnet_id(first_validator_id, self.config.attestation_subnet_count) - - # Count total validators in this subnet - committee_size = compute_subnet_size( - subnet_id, - self.config.attestation_subnet_count, - len(self.validators), - ) - - if len(gossip_ids) < committee_size * threshold_ratio: - continue - participants = AggregationBits.from_validator_indices(gossip_ids) proof = AggregatedSignatureProof.aggregate( participants=participants, diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 1a1f35d0..aac85515 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -147,12 +147,6 @@ class Store(Container): - Only stores the attestation data, not signatures. """ - aggregated_in_current_slot: Boolean = Boolean(False) - """ - Tracks whether committee signatures have been successfully aggregated in the current slot. - Reset at the start of each slot (Interval 0). - """ - gossip_committee_signatures: dict[SignatureKey, Signature] = {} """ Per-validator XMSS signatures learned from committee attesters. @@ -925,18 +919,14 @@ def update_safe_target(self) -> "Store": return self.model_copy(update={"safe_target": safe_target}) - def aggregate_committee_signatures(self, threshold_ratio: float = 0.0) -> "Store": + def aggregate_committee_signatures(self) -> "Store": """ Aggregate committee signatures for attestations in committee_signatures. - This method aggregates signatures from the gossip_committee_signatures map. - - Args: - threshold_ratio: Minimum participation ratio (0.0 to 1.0). - Aggregates only if signature count / committee size >= ratio. + This method aggregates signatures from the gossip_committee_signatures map Returns: - New Store with updated aggregated_payloads and aggregated_in_current_slot flag. + New Store with updated aggregated_payloads. """ new_aggregated_payloads = dict(self.aggregated_payloads) @@ -948,7 +938,6 @@ def aggregate_committee_signatures(self, threshold_ratio: float = 0.0) -> "Store aggregated_results = head_state.aggregate_gossip_signatures( attestations, committee_signatures, - threshold_ratio=threshold_ratio, ) # iterate to broadcast aggregated attestations @@ -968,18 +957,7 @@ def aggregate_committee_signatures(self, threshold_ratio: float = 0.0) -> "Store if sig_key not in new_aggregated_payloads: new_aggregated_payloads[sig_key] = [] new_aggregated_payloads[sig_key].append(aggregated_signature) - - # If we produced any aggregations, mark as done for this slot - aggregated_flag = self.aggregated_in_current_slot - if aggregated_results: - aggregated_flag = Boolean(True) - - return self.model_copy( - update={ - "aggregated_payloads": new_aggregated_payloads, - "aggregated_in_current_slot": aggregated_flag, - } - ) + return self.model_copy(update={"aggregated_payloads": new_aggregated_payloads}) def tick_interval(self, has_proposal: bool, is_aggregator: bool) -> "Store": """ @@ -1003,13 +981,11 @@ def tick_interval(self, has_proposal: bool, is_aggregator: bool) -> "Store": **Interval 2 (Safe Target Update)**: - Compute safe target with 2/3+ majority - Provides validators with a stable attestation target - - Aggregators check for 90% participation before aggregating **Interval 3 (Attestation Acceptance)**: - Accept accumulated attestations (new → known) - Update head based on new attestation weights - Prepare for next slot - - Aggregators force aggregation if not done yet Args: has_proposal: Whether a proposal exists for this interval. @@ -1023,21 +999,16 @@ def tick_interval(self, has_proposal: bool, is_aggregator: bool) -> "Store": current_interval = store.time % SECONDS_PER_SLOT % INTERVALS_PER_SLOT if current_interval == Uint64(0): - # Start of slot - reset flags and process attestations if proposal exists - store = store.model_copy(update={"aggregated_in_current_slot": Boolean(False)}) + # Start of slot - process attestations if proposal exists if has_proposal: store = store.accept_new_attestations() elif current_interval == Uint64(2): # Mid-slot - update safe target for validators store = store.update_safe_target() if is_aggregator: - # Wait for 90% signatures from subnet validators - store = store.aggregate_committee_signatures(threshold_ratio=0.9) + store = store.aggregate_committee_signatures() elif current_interval == Uint64(3): - # End of slot - finalize aggregation and accept attestations - if is_aggregator and not store.aggregated_in_current_slot: - # Aggregate no matter how many signatures if not done before - store = store.aggregate_committee_signatures(threshold_ratio=0.0) + # End of slot - accept accumulated attestations store = store.accept_new_attestations() return store @@ -1102,9 +1073,7 @@ def get_proposal_head(self, slot: Slot) -> tuple["Store", Bytes32]: slot_time = self.config.genesis_time + slot * SECONDS_PER_SLOT # Advance time to current slot (ticking intervals) - # It is safe not to aggregate during advancement, as it is too - # late to aggregate committee signatures anyway when proposing - store = self.on_tick(slot_time, True, is_aggregator=False) + store = self.on_tick(slot_time, True) # Process any pending attestations before proposal store = store.accept_new_attestations() diff --git a/src/lean_spec/subspecs/networking/subnet.py b/src/lean_spec/subspecs/networking/subnet.py index 75b0f268..01d19bf5 100644 --- a/src/lean_spec/subspecs/networking/subnet.py +++ b/src/lean_spec/subspecs/networking/subnet.py @@ -8,7 +8,7 @@ from src.lean_spec.types import Uint64 -def compute_subnet_id(validator_index: Uint64, num_committees: Uint64) -> Uint64: +def compute_subnet_id(validator_index: Uint64, num_committees: Uint64) -> int: """Compute the attestation subnet id for a validator. Args: @@ -20,21 +20,3 @@ def compute_subnet_id(validator_index: Uint64, num_committees: Uint64) -> Uint64 """ subnet_id = validator_index % num_committees return subnet_id - -def compute_subnet_size(subnet_id: Uint64, num_committees: Uint64, total_validators: Uint64) -> Uint64: - """Compute the size of a given subnet. - - Args: - subnet_id: The subnet id to compute the size for. - num_committees: Positive number of committees. - total_validators: Total number of validators. - - Returns: - The size of the specified subnet. - """ - base_size = total_validators // num_committees - remainder = total_validators % num_committees - if subnet_id < remainder: - return base_size + 1 - else: - return base_size \ No newline at end of file From 61b8100d9d44e4d44d7f47dcd4428f7e6484e4c9 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Tue, 27 Jan 2026 11:33:50 +0500 Subject: [PATCH 28/39] refactor: update current_validator_id type from Uint64 to ValidatorIndex --- src/lean_spec/subspecs/forkchoice/store.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 55749fdb..59982bb0 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -274,7 +274,7 @@ def on_gossip_attestation( self, signed_attestation: SignedAttestation, is_aggregator: bool, - current_validator_id: Uint64, + current_validator_id: ValidatorIndex, scheme: GeneralizedXmssScheme = TARGET_SIGNATURE_SCHEME, ) -> "Store": """ @@ -504,7 +504,7 @@ def on_gossip_aggregated_attestation(self, signed_attestation: SignedAggregatedA # Ensure all participants exist in the active set validators = key_state.validators for validator_id in validator_ids: - assert validator_id < Uint64(len(validators)), ( + assert validator_id < ValidatorIndex(len(validators)), ( f"Validator {validator_id} not found in state {data.target.root.hex()}" ) @@ -555,7 +555,7 @@ def on_gossip_aggregated_attestation(self, signed_attestation: SignedAggregatedA def on_block( self, signed_block_with_attestation: SignedBlockWithAttestation, - current_validator: Uint64, + current_validator: ValidatorIndex, scheme: GeneralizedXmssScheme = TARGET_SIGNATURE_SCHEME, ) -> "Store": """ From d66dfd31026a0981fdec5c0e8e16fc862b2cc428 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Tue, 27 Jan 2026 12:22:29 +0500 Subject: [PATCH 29/39] Fix tests --- src/lean_spec/subspecs/containers/config.py | 2 +- .../subspecs/containers/state/state.py | 39 ++++++++- src/lean_spec/subspecs/forkchoice/store.py | 80 +++++++++++-------- .../subspecs/networking/gossipsub/topic.py | 2 +- src/lean_spec/subspecs/networking/subnet.py | 2 +- .../lean_spec/subspecs/genesis/test_state.py | 6 +- tests/lean_spec/subspecs/ssz/test_state.py | 7 +- 7 files changed, 91 insertions(+), 47 deletions(-) diff --git a/src/lean_spec/subspecs/containers/config.py b/src/lean_spec/subspecs/containers/config.py index f0b00723..6840f889 100644 --- a/src/lean_spec/subspecs/containers/config.py +++ b/src/lean_spec/subspecs/containers/config.py @@ -15,5 +15,5 @@ class Config(Container): genesis_time: Uint64 """The timestamp of the genesis block.""" - attestation_subnet_count: Uint64 + attestation_subnet_count: Uint64 = Uint64(1) """The number of attestation subnets in the network.""" diff --git a/src/lean_spec/subspecs/containers/state/state.py b/src/lean_spec/subspecs/containers/state/state.py index 1ebc68b0..ebf4797c 100644 --- a/src/lean_spec/subspecs/containers/state/state.py +++ b/src/lean_spec/subspecs/containers/state/state.py @@ -755,10 +755,12 @@ def build_block( # Add new attestations and continue iteration attestations.extend(new_attestations) - # Select aggregated attestations and proofs for the final block - aggregated_attestations, aggregated_signatures = self.select_aggregated_proofs( + # Select aggregated attestations and proofs for the final block. + # Prefer fresh gossip signatures; fall back to previously-seen aggregated proofs. + aggregated_attestations, aggregated_signatures = self.compute_aggregated_signatures( attestations, - aggregated_payloads, + gossip_signatures=gossip_signatures, + aggregated_payloads=aggregated_payloads, ) # Create the final block with aggregated attestations and proofs @@ -868,6 +870,37 @@ def aggregate_gossip_signatures( return results + def compute_aggregated_signatures( + self, + attestations: list[Attestation], + gossip_signatures: dict[SignatureKey, "Signature"] | None = None, + aggregated_payloads: dict[SignatureKey, list[AggregatedSignatureProof]] | None = None, + ) -> tuple[list[AggregatedAttestation], list[AggregatedSignatureProof]]: + """ + Backwards-compatible wrapper for signature aggregation. + + Older code/tests expect a single method that returns two parallel lists: + (aggregated_attestations, aggregated_proofs). + + The current implementation separates: + - `aggregate_gossip_signatures` (fresh per-validator signatures collected via gossip) + - `select_aggregated_proofs` (reusing previously-seen aggregated proofs from blocks) + """ + results = self.aggregate_gossip_signatures(attestations, gossip_signatures=gossip_signatures) + if aggregated_payloads: + # Note: This may add additional proofs for the same attestation data. + # Callers that rely on strict minimality should use the split APIs. + fallback_atts, fallback_proofs = self.select_aggregated_proofs( + attestations, aggregated_payloads=aggregated_payloads + ) + results.extend(zip(fallback_atts, fallback_proofs, strict=True)) + + if not results: + return [], [] + + atts, proofs = zip(*results, strict=True) + return list(atts), list(proofs) + def select_aggregated_proofs( self, attestations: list[Attestation], diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 59982bb0..23f481c9 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -48,8 +48,8 @@ from lean_spec.types.container import Container from lean_spec.subspecs.networking import compute_subnet_id -from src.lean_spec.subspecs.containers.attestation.attestation import SignedAggregatedAttestation -from src.lean_spec.subspecs.xmss.aggregation import AggregationError +from lean_spec.subspecs.containers.attestation.attestation import SignedAggregatedAttestation +from lean_spec.subspecs.xmss.aggregation import AggregationError class Store(Container): @@ -147,7 +147,7 @@ class Store(Container): - Only stores the attestation data, not signatures. """ - gossip_committee_signatures: dict[SignatureKey, Signature] = {} + gossip_signatures: dict[SignatureKey, Signature] = {} """ Per-validator XMSS signatures learned from committee attesters. @@ -273,8 +273,8 @@ def validate_attestation(self, attestation: Attestation) -> None: def on_gossip_attestation( self, signed_attestation: SignedAttestation, - is_aggregator: bool, - current_validator_id: ValidatorIndex, + is_aggregator: bool = False, + current_validator_id: ValidatorIndex | None = None, scheme: GeneralizedXmssScheme = TARGET_SIGNATURE_SCHEME, ) -> "Store": """ @@ -323,22 +323,32 @@ def on_gossip_attestation( public_key, attestation_data.slot, attestation_data.data_root_bytes(), scheme ), "Signature verification failed" - current_validator_subnet = compute_subnet_id(current_validator_id, self.config.attestation_subnet_count) - attester_subnet = compute_subnet_id(validator_id, self.config.attestation_subnet_count) - - # Store signature for later aggregation if applicable - new_commitee_sigs = dict(self.gossip_committee_signatures) - if is_aggregator and current_validator_subnet == attester_subnet: + # Store signature for later aggregation if applicable. + # + # For backwards compatibility, if the caller does not provide + # `current_validator_id`, we treat this as "not aggregating committee sigs". + new_commitee_sigs = dict(self.gossip_signatures) + if is_aggregator and current_validator_id is not None: + current_validator_subnet = compute_subnet_id( + current_validator_id, self.config.attestation_subnet_count + ) + attester_subnet = compute_subnet_id(validator_id, self.config.attestation_subnet_count) + if current_validator_subnet != attester_subnet: + # Not part of our committee; ignore for committee aggregation. + pass + else: + sig_key = SignatureKey(validator_id, attestation_data.data_root_bytes()) + new_commitee_sigs[sig_key] = signature + else: # If this validator is an aggregator for this attestation, # also store the signature in the committee signatures map. - sig_key = SignatureKey(validator_id, attestation_data.data_root_bytes()) - new_commitee_sigs[sig_key] = signature + pass # Process the attestation data store = self.on_attestation(attestation=attestation, is_from_block=False) # Return store with updated signature maps - return store.model_copy(update={"gossip_committee_signatures": new_commitee_sigs}) + return store.model_copy(update={"gossip_signatures": new_commitee_sigs}) def on_attestation( self, @@ -555,7 +565,7 @@ def on_gossip_aggregated_attestation(self, signed_attestation: SignedAggregatedA def on_block( self, signed_block_with_attestation: SignedBlockWithAttestation, - current_validator: ValidatorIndex, + current_validator: ValidatorIndex | None = None, scheme: GeneralizedXmssScheme = TARGET_SIGNATURE_SCHEME, ) -> "Store": """ @@ -699,20 +709,26 @@ def on_block( # 2. Be available for inclusion in future blocks # 3. Influence fork choice only after interval 3 (end of slot) - new_gossip_sigs = dict(store.gossip_committee_signatures) + new_gossip_sigs = dict(store.gossip_signatures) - # Store proposer signature for future lookup if he belongs to the same committee as current validator - proposer_validator_id = proposer_attestation.validator_id - proposer_subnet_id = compute_subnet_id(proposer_validator_id, self.config.attestation_subnet_count) - current_validator_subnet_id = compute_subnet_id(current_validator, self.config.attestation_subnet_count) - if proposer_subnet_id == current_validator_subnet_id: - proposer_sig_key = SignatureKey( - proposer_attestation.validator_id, - proposer_attestation.data.data_root_bytes(), + # Store proposer signature for future lookup if it belongs to the same committee + # as the current validator (if provided). + if current_validator is not None: + proposer_validator_id = proposer_attestation.validator_id + proposer_subnet_id = compute_subnet_id( + proposer_validator_id, self.config.attestation_subnet_count ) - new_gossip_sigs[proposer_sig_key] = ( - signed_block_with_attestation.signature.proposer_signature + current_validator_subnet_id = compute_subnet_id( + current_validator, self.config.attestation_subnet_count ) + if proposer_subnet_id == current_validator_subnet_id: + proposer_sig_key = SignatureKey( + proposer_attestation.validator_id, + proposer_attestation.data.data_root_bytes(), + ) + new_gossip_sigs[proposer_sig_key] = ( + signed_block_with_attestation.signature.proposer_signature + ) store = store.on_attestation( attestation=proposer_attestation, @@ -720,7 +736,7 @@ def on_block( ) # Update store with proposer signature - store = store.model_copy(update={"gossip_committee_signatures": new_gossip_sigs}) + store = store.model_copy(update={"gossip_signatures": new_gossip_sigs}) return store @@ -923,7 +939,7 @@ def aggregate_committee_signatures(self) -> "Store": """ Aggregate committee signatures for attestations in committee_signatures. - This method aggregates signatures from the gossip_committee_signatures map + This method aggregates signatures from the gossip_signatures map Returns: New Store with updated aggregated_payloads. @@ -931,7 +947,7 @@ def aggregate_committee_signatures(self) -> "Store": new_aggregated_payloads = dict(self.aggregated_payloads) attestations = self.latest_new_attestations - committee_signatures = self.gossip_committee_signatures + committee_signatures = self.gossip_signatures head_state = self.states[self.head] # Perform aggregation @@ -959,7 +975,7 @@ def aggregate_committee_signatures(self) -> "Store": new_aggregated_payloads[sig_key].append(aggregated_signature) return self.model_copy(update={"aggregated_payloads": new_aggregated_payloads}) - def tick_interval(self, has_proposal: bool, is_aggregator: bool) -> "Store": + def tick_interval(self, has_proposal: bool, is_aggregator: bool = False) -> "Store": """ Advance store time by one interval and perform interval-specific actions. @@ -1013,7 +1029,7 @@ def tick_interval(self, has_proposal: bool, is_aggregator: bool) -> "Store": return store - def on_tick(self, time: Uint64, has_proposal: bool, is_aggregator: bool) -> "Store": + def on_tick(self, time: Uint64, has_proposal: bool, is_aggregator: bool = False) -> "Store": """ Advance forkchoice store time to given timestamp. @@ -1240,7 +1256,7 @@ def produce_block_with_signatures( parent_root=head_root, available_attestations=available_attestations, known_block_roots=set(store.blocks.keys()), - gossip_signatures=store.gossip_committee_signatures, + gossip_signatures=store.gossip_signatures, aggregated_payloads=store.aggregated_payloads, ) diff --git a/src/lean_spec/subspecs/networking/gossipsub/topic.py b/src/lean_spec/subspecs/networking/gossipsub/topic.py index b9faa8ef..0d3d25af 100644 --- a/src/lean_spec/subspecs/networking/gossipsub/topic.py +++ b/src/lean_spec/subspecs/networking/gossipsub/topic.py @@ -236,7 +236,7 @@ def committee_aggregation(cls, fork_digest: str) -> GossipTopic: Returns: GossipTopic for committee aggregation messages. """ - return cls(kind=TopicKind.COMMITTEE_AGGREGATION, fork_digest=fork_digest) + return cls(kind=TopicKind.AGGREGATED_ATTESTATION, fork_digest=fork_digest) def format_topic_string( diff --git a/src/lean_spec/subspecs/networking/subnet.py b/src/lean_spec/subspecs/networking/subnet.py index 01d19bf5..72025249 100644 --- a/src/lean_spec/subspecs/networking/subnet.py +++ b/src/lean_spec/subspecs/networking/subnet.py @@ -5,7 +5,7 @@ """ from __future__ import annotations -from src.lean_spec.types import Uint64 +from lean_spec.types import Uint64 def compute_subnet_id(validator_index: Uint64, num_committees: Uint64) -> int: diff --git a/tests/lean_spec/subspecs/genesis/test_state.py b/tests/lean_spec/subspecs/genesis/test_state.py index e6a0d12d..b94b8020 100644 --- a/tests/lean_spec/subspecs/genesis/test_state.py +++ b/tests/lean_spec/subspecs/genesis/test_state.py @@ -112,10 +112,10 @@ def test_genesis_block_hash_comparison() -> None: # Compare genesis block hashes with expected hex values hash1_hex = f"0x{genesis_block_hash1.hex()}" - assert hash1_hex == "0xcc03f11dd80dd79a4add86265fad0a141d0a553812d43b8f2c03aa43e4b002e3" + assert hash1_hex == "0x71555f7f28d7475af64371eb3ae8fad01c76271c02fe2a7799464b25ae3335ee" hash2_hex = f"0x{genesis_block_hash2.hex()}" - assert hash2_hex == "0x6bd5347aa1397c63ed8558079fdd3042112a5f4258066e3a659a659ff75ba14f" + assert hash2_hex == "0x846150f171dbaf07433cd16475e36d7a213fef8bda7a0643242dc38e23870f58" hash3_hex = f"0x{genesis_block_hash3.hex()}" - assert hash3_hex == "0xce48a709189aa2b23b6858800996176dc13eb49c0c95d717c39e60042de1ac91" + assert hash3_hex == "0x69b339f5373f45d91435cdabb85b072e9378768aa588a3642295afe01a1b4682" diff --git a/tests/lean_spec/subspecs/ssz/test_state.py b/tests/lean_spec/subspecs/ssz/test_state.py index 59c43c53..08b65da6 100644 --- a/tests/lean_spec/subspecs/ssz/test_state.py +++ b/tests/lean_spec/subspecs/ssz/test_state.py @@ -42,12 +42,7 @@ def test_encode_decode_state_roundtrip() -> None: encode = state.encode_bytes() expected_value = ( - "e80300000000000000000000000000000000000000000000000000000000000000000000000000000000000000" - "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" - "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" - "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" - "00000000000000000000000000000000000000000000000000000000e4000000e4000000e5000000e5000000e5" - "0000000101" + "e80300000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ec000000ec000000ed000000ed000000ed0000000101" ) assert encode.hex() == expected_value assert State.decode_bytes(encode) == state From 360bfb0fb955f21a1abe19282b250252f412fb3a Mon Sep 17 00:00:00 2001 From: kamilsa Date: Tue, 27 Jan 2026 12:39:44 +0500 Subject: [PATCH 30/39] refactor: remove attestation_subnet_count from configuration --- src/lean_spec/subspecs/containers/config.py | 3 --- src/lean_spec/subspecs/containers/state/state.py | 2 -- 2 files changed, 5 deletions(-) diff --git a/src/lean_spec/subspecs/containers/config.py b/src/lean_spec/subspecs/containers/config.py index 6840f889..18289e88 100644 --- a/src/lean_spec/subspecs/containers/config.py +++ b/src/lean_spec/subspecs/containers/config.py @@ -14,6 +14,3 @@ class Config(Container): genesis_time: Uint64 """The timestamp of the genesis block.""" - - attestation_subnet_count: Uint64 = Uint64(1) - """The number of attestation subnets in the network.""" diff --git a/src/lean_spec/subspecs/containers/state/state.py b/src/lean_spec/subspecs/containers/state/state.py index ebf4797c..278a7680 100644 --- a/src/lean_spec/subspecs/containers/state/state.py +++ b/src/lean_spec/subspecs/containers/state/state.py @@ -30,7 +30,6 @@ JustifiedSlots, Validators, ) -from ...chain.config import ATTESTATION_COMMITTEE_COUNT class State(Container): @@ -91,7 +90,6 @@ def generate_genesis(cls, genesis_time: Uint64, validators: Validators) -> "Stat # Configure the genesis state. genesis_config = Config( genesis_time=genesis_time, - attestation_subnet_count=ATTESTATION_COMMITTEE_COUNT, ) # Build the genesis block header for the state. From 379ddd60588743a064a10586496933e66f4eff42 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Tue, 27 Jan 2026 12:40:03 +0500 Subject: [PATCH 31/39] Fix tests after attestation_subnet_count from Config --- src/lean_spec/subspecs/forkchoice/store.py | 9 +++++---- tests/lean_spec/subspecs/genesis/test_state.py | 6 +++--- tests/lean_spec/subspecs/ssz/test_state.py | 2 +- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 23f481c9..f59af52f 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -19,6 +19,7 @@ JUSTIFICATION_LOOKBACK_SLOTS, SECONDS_PER_INTERVAL, SECONDS_PER_SLOT, + ATTESTATION_COMMITTEE_COUNT, ) from lean_spec.subspecs.containers import ( Attestation, @@ -330,9 +331,9 @@ def on_gossip_attestation( new_commitee_sigs = dict(self.gossip_signatures) if is_aggregator and current_validator_id is not None: current_validator_subnet = compute_subnet_id( - current_validator_id, self.config.attestation_subnet_count + current_validator_id, ATTESTATION_COMMITTEE_COUNT ) - attester_subnet = compute_subnet_id(validator_id, self.config.attestation_subnet_count) + attester_subnet = compute_subnet_id(validator_id, ATTESTATION_COMMITTEE_COUNT) if current_validator_subnet != attester_subnet: # Not part of our committee; ignore for committee aggregation. pass @@ -716,10 +717,10 @@ def on_block( if current_validator is not None: proposer_validator_id = proposer_attestation.validator_id proposer_subnet_id = compute_subnet_id( - proposer_validator_id, self.config.attestation_subnet_count + proposer_validator_id, ATTESTATION_COMMITTEE_COUNT ) current_validator_subnet_id = compute_subnet_id( - current_validator, self.config.attestation_subnet_count + current_validator, ATTESTATION_COMMITTEE_COUNT ) if proposer_subnet_id == current_validator_subnet_id: proposer_sig_key = SignatureKey( diff --git a/tests/lean_spec/subspecs/genesis/test_state.py b/tests/lean_spec/subspecs/genesis/test_state.py index b94b8020..e6a0d12d 100644 --- a/tests/lean_spec/subspecs/genesis/test_state.py +++ b/tests/lean_spec/subspecs/genesis/test_state.py @@ -112,10 +112,10 @@ def test_genesis_block_hash_comparison() -> None: # Compare genesis block hashes with expected hex values hash1_hex = f"0x{genesis_block_hash1.hex()}" - assert hash1_hex == "0x71555f7f28d7475af64371eb3ae8fad01c76271c02fe2a7799464b25ae3335ee" + assert hash1_hex == "0xcc03f11dd80dd79a4add86265fad0a141d0a553812d43b8f2c03aa43e4b002e3" hash2_hex = f"0x{genesis_block_hash2.hex()}" - assert hash2_hex == "0x846150f171dbaf07433cd16475e36d7a213fef8bda7a0643242dc38e23870f58" + assert hash2_hex == "0x6bd5347aa1397c63ed8558079fdd3042112a5f4258066e3a659a659ff75ba14f" hash3_hex = f"0x{genesis_block_hash3.hex()}" - assert hash3_hex == "0x69b339f5373f45d91435cdabb85b072e9378768aa588a3642295afe01a1b4682" + assert hash3_hex == "0xce48a709189aa2b23b6858800996176dc13eb49c0c95d717c39e60042de1ac91" diff --git a/tests/lean_spec/subspecs/ssz/test_state.py b/tests/lean_spec/subspecs/ssz/test_state.py index 08b65da6..2a5ec560 100644 --- a/tests/lean_spec/subspecs/ssz/test_state.py +++ b/tests/lean_spec/subspecs/ssz/test_state.py @@ -42,7 +42,7 @@ def test_encode_decode_state_roundtrip() -> None: encode = state.encode_bytes() expected_value = ( - "e80300000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ec000000ec000000ed000000ed000000ed0000000101" + "e8030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e4000000e4000000e5000000e5000000e50000000101" ) assert encode.hex() == expected_value assert State.decode_bytes(encode) == state From fe8317c05b5d9d75c76e220902d7e2ef6fe96424 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Tue, 27 Jan 2026 17:31:33 +0500 Subject: [PATCH 32/39] Add validator_id to store & fix tests --- .../test_fixtures/fork_choice.py | 4 ++ src/lean_spec/__main__.py | 5 +- src/lean_spec/subspecs/containers/__init__.py | 2 + .../containers/attestation/__init__.py | 2 + .../containers/attestation/attestation.py | 1 + .../subspecs/containers/state/state.py | 9 ++- src/lean_spec/subspecs/forkchoice/store.py | 66 +++++++++---------- .../subspecs/networking/service/service.py | 7 +- src/lean_spec/subspecs/networking/subnet.py | 1 + src/lean_spec/subspecs/node/__init__.py | 4 +- src/lean_spec/subspecs/node/helpers.py | 22 +++++++ src/lean_spec/subspecs/node/node.py | 27 ++++++-- src/lean_spec/subspecs/sync/service.py | 23 +++++-- tests/lean_spec/conftest.py | 7 +- tests/lean_spec/helpers/__init__.py | 6 ++ tests/lean_spec/subspecs/api/test_server.py | 1 + .../forkchoice/test_store_attestations.py | 13 +++- .../forkchoice/test_time_management.py | 9 ++- .../subspecs/forkchoice/test_validator.py | 4 ++ .../networking/test_network_service.py | 61 +++++++++++++---- tests/lean_spec/subspecs/node/test_node.py | 2 +- tests/lean_spec/subspecs/ssz/test_state.py | 4 +- .../subspecs/validator/test_service.py | 13 +++- 23 files changed, 217 insertions(+), 76 deletions(-) create mode 100644 src/lean_spec/subspecs/node/helpers.py diff --git a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py index b0ed9e21..dd5b202a 100644 --- a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py +++ b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py @@ -51,6 +51,9 @@ from .base import BaseConsensusFixture +DEFAULT_VALIDATOR_ID = ValidatorIndex(0) + + class ForkChoiceTest(BaseConsensusFixture): """ Test fixture for event-driven fork choice scenarios. @@ -212,6 +215,7 @@ def make_fixture(self) -> Self: store = Store.get_forkchoice_store( state=self.anchor_state, anchor_block=self.anchor_block, + validator_id=DEFAULT_VALIDATOR_ID, ) # Block registry for fork creation diff --git a/src/lean_spec/__main__.py b/src/lean_spec/__main__.py index 28390133..7e638bd2 100644 --- a/src/lean_spec/__main__.py +++ b/src/lean_spec/__main__.py @@ -34,7 +34,7 @@ from lean_spec.subspecs.networking.client import LiveNetworkEventSource from lean_spec.subspecs.networking.gossipsub import GossipTopic from lean_spec.subspecs.networking.reqresp.message import Status -from lean_spec.subspecs.node import Node, NodeConfig +from lean_spec.subspecs.node import Node, NodeConfig, get_local_validator_id from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.subspecs.validator import ValidatorRegistry from lean_spec.types import Bytes32, Uint64 @@ -263,7 +263,8 @@ async def _init_from_checkpoint( # # The store treats this as the new "genesis" for fork choice purposes. # All blocks before the checkpoint are effectively pruned. - store = Store.get_forkchoice_store(state, anchor_block) + validator_id = get_local_validator_id(validator_registry) + store = Store.get_forkchoice_store(state, anchor_block, validator_id) logger.info( "Initialized from checkpoint at slot %d (finalized=%s)", state.slot, diff --git a/src/lean_spec/subspecs/containers/__init__.py b/src/lean_spec/subspecs/containers/__init__.py index 263e6dd7..4a269a68 100644 --- a/src/lean_spec/subspecs/containers/__init__.py +++ b/src/lean_spec/subspecs/containers/__init__.py @@ -12,6 +12,7 @@ AggregatedAttestation, Attestation, AttestationData, + SignedAggregatedAttestation, SignedAttestation, ) from .block import ( @@ -37,6 +38,7 @@ "BlockWithAttestation", "Checkpoint", "Config", + "SignedAggregatedAttestation", "SignedAttestation", "SignedBlockWithAttestation", "Slot", diff --git a/src/lean_spec/subspecs/containers/attestation/__init__.py b/src/lean_spec/subspecs/containers/attestation/__init__.py index febbf61e..8a2c4537 100644 --- a/src/lean_spec/subspecs/containers/attestation/__init__.py +++ b/src/lean_spec/subspecs/containers/attestation/__init__.py @@ -5,6 +5,7 @@ AggregatedAttestation, Attestation, AttestationData, + SignedAggregatedAttestation, SignedAttestation, ) @@ -13,5 +14,6 @@ "AggregationBits", "Attestation", "AttestationData", + "SignedAggregatedAttestation", "SignedAttestation", ] diff --git a/src/lean_spec/subspecs/containers/attestation/attestation.py b/src/lean_spec/subspecs/containers/attestation/attestation.py index 541f1e36..1de0f587 100644 --- a/src/lean_spec/subspecs/containers/attestation/attestation.py +++ b/src/lean_spec/subspecs/containers/attestation/attestation.py @@ -110,6 +110,7 @@ def aggregate_by_data( for data, validator_ids in data_to_validator_ids.items() ] + class SignedAggregatedAttestation(Container): data: AttestationData """Combined attestation data similar to the beacon chain format.""" diff --git a/src/lean_spec/subspecs/containers/state/state.py b/src/lean_spec/subspecs/containers/state/state.py index 278a7680..7901e478 100644 --- a/src/lean_spec/subspecs/containers/state/state.py +++ b/src/lean_spec/subspecs/containers/state/state.py @@ -884,7 +884,9 @@ def compute_aggregated_signatures( - `aggregate_gossip_signatures` (fresh per-validator signatures collected via gossip) - `select_aggregated_proofs` (reusing previously-seen aggregated proofs from blocks) """ - results = self.aggregate_gossip_signatures(attestations, gossip_signatures=gossip_signatures) + results = self.aggregate_gossip_signatures( + attestations, gossip_signatures=gossip_signatures + ) if aggregated_payloads: # Note: This may add additional proofs for the same attestation data. # Callers that rely on strict minimality should use the split APIs. @@ -935,7 +937,9 @@ def select_aggregated_proofs( for aggregated in AggregatedAttestation.aggregate_by_data(attestations): data = aggregated.data data_root = data.data_root_bytes() - validator_ids = aggregated.aggregation_bits.to_validator_indices() # validators contributed to this attestation + validator_ids = ( + aggregated.aggregation_bits.to_validator_indices() + ) # validators contributed to this attestation # Validators that are missing in the current aggregation are put into remaining. remaining: set[Uint64] = set(validator_ids) @@ -1024,4 +1028,3 @@ def select_aggregated_proofs( # Unzip the results into parallel lists. aggregated_attestations, aggregated_proofs = zip(*results, strict=True) return list(aggregated_attestations), list(aggregated_proofs) - diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index f59af52f..91cc4e68 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -129,6 +129,9 @@ class Store(Container): `Store`'s latest justified and latest finalized checkpoints. """ + validator_id: ValidatorIndex | None + """Index of the validator running this store instance.""" + latest_known_attestations: dict[ValidatorIndex, AttestationData] = {} """ Latest attestation data by validator that have been processed. @@ -167,7 +170,12 @@ class Store(Container): """ @classmethod - def get_forkchoice_store(cls, state: State, anchor_block: Block) -> "Store": + def get_forkchoice_store( + cls, + anchor_state: State, + anchor_block: Block, + validator_id: ValidatorIndex | None, + ) -> "Store": """ Initialize forkchoice store from an anchor state and block. @@ -175,10 +183,9 @@ def get_forkchoice_store(cls, state: State, anchor_block: Block) -> "Store": We treat this anchor as both justified and finalized. Args: - state: - The trusted post-state corresponding to the anchor block. - anchor_block: - The trusted block acting as the initial chain root. + anchor_state: The state corresponding to the anchor block. + anchor_block: A trusted block (e.g. genesis or checkpoint). + validator_id: Index of the validator running this store. Returns: A new Store instance, ready to accept blocks and attestations. @@ -191,7 +198,7 @@ def get_forkchoice_store(cls, state: State, anchor_block: Block) -> "Store": # Compute the SSZ root of the given state. # # This is the canonical hash that should appear in the block's state root. - computed_state_root = hash_tree_root(state) + computed_state_root = hash_tree_root(anchor_state) # Check that the block actually points to this state. # @@ -214,17 +221,22 @@ def get_forkchoice_store(cls, state: State, anchor_block: Block) -> "Store": # Build an initial checkpoint using the anchor block. # # Both the root and the slot come directly from the anchor. - anchor_checkpoint = Checkpoint(root=anchor_root, slot=anchor_slot) + # Initialize checkpoints from the anchor state + # + # We explicitly set the root to the anchor block root. + # The anchor state internally might have zero-hash checkpoints (if genesis), + # but the Store must treat the anchor block as the justified/finalized point. return cls( time=Uint64(anchor_slot * INTERVALS_PER_SLOT), - config=state.config, + config=anchor_state.config, head=anchor_root, safe_target=anchor_root, - latest_justified=anchor_checkpoint, - latest_finalized=anchor_checkpoint, - blocks={anchor_root: copy.copy(anchor_block)}, - states={anchor_root: copy.copy(state)}, + latest_justified=anchor_state.latest_justified.model_copy(update={"root": anchor_root}), + latest_finalized=anchor_state.latest_finalized.model_copy(update={"root": anchor_root}), + blocks={anchor_root: anchor_block}, + states={anchor_root: anchor_state}, + validator_id=validator_id, ) def validate_attestation(self, attestation: Attestation) -> None: @@ -274,9 +286,8 @@ def validate_attestation(self, attestation: Attestation) -> None: def on_gossip_attestation( self, signed_attestation: SignedAttestation, - is_aggregator: bool = False, - current_validator_id: ValidatorIndex | None = None, scheme: GeneralizedXmssScheme = TARGET_SIGNATURE_SCHEME, + is_aggregator: bool = False, ) -> "Store": """ Process a signed attestation received via gossip network. @@ -291,7 +302,6 @@ def on_gossip_attestation( signed_attestation: The signed attestation from gossip. scheme: XMSS signature scheme for verification. is_aggregator: True if current validator holds aggregator role. - current_validator_id: Index of the current validator processing this attestation. Returns: New Store with attestation processed and signature stored. @@ -304,7 +314,6 @@ def on_gossip_attestation( attestation_data = signed_attestation.message signature = signed_attestation.signature - # Validate the attestation first so unknown blocks are rejected cleanly # (instead of raising a raw KeyError when state is missing). attestation = Attestation(validator_id=validator_id, data=attestation_data) @@ -326,12 +335,11 @@ def on_gossip_attestation( # Store signature for later aggregation if applicable. # - # For backwards compatibility, if the caller does not provide - # `current_validator_id`, we treat this as "not aggregating committee sigs". new_commitee_sigs = dict(self.gossip_signatures) - if is_aggregator and current_validator_id is not None: + if is_aggregator: + assert self.validator_id is not None, "Current validator ID must be set for aggregation" current_validator_subnet = compute_subnet_id( - current_validator_id, ATTESTATION_COMMITTEE_COUNT + self.validator_id, ATTESTATION_COMMITTEE_COUNT ) attester_subnet = compute_subnet_id(validator_id, ATTESTATION_COMMITTEE_COUNT) if current_validator_subnet != attester_subnet: @@ -340,10 +348,6 @@ def on_gossip_attestation( else: sig_key = SignatureKey(validator_id, attestation_data.data_root_bytes()) new_commitee_sigs[sig_key] = signature - else: - # If this validator is an aggregator for this attestation, - # also store the signature in the committee signatures map. - pass # Process the attestation data store = self.on_attestation(attestation=attestation, is_from_block=False) @@ -481,7 +485,9 @@ def on_attestation( } ) - def on_gossip_aggregated_attestation(self, signed_attestation: SignedAggregatedAttestation) -> "Store": + def on_gossip_aggregated_attestation( + self, signed_attestation: SignedAggregatedAttestation + ) -> "Store": """ Process a signed aggregated attestation received via aggregation topic @@ -547,7 +553,6 @@ def on_gossip_aggregated_attestation(self, signed_attestation: SignedAggregatedA key = SignatureKey(vid, data_root) new_aggregated_payloads.setdefault(key, []).append(proof) - # Process the attestation data. Since it's from gossip, is_from_block=False. # Note, we could have already processed individual attestations from this aggregation, # during votes propagation into attestation topic, but it's safe to re-process here as @@ -560,9 +565,6 @@ def on_gossip_aggregated_attestation(self, signed_attestation: SignedAggregatedA # Return store with updated aggregated payloads return store.model_copy(update={"aggregated_payloads": new_aggregated_payloads}) - - - def on_block( self, signed_block_with_attestation: SignedBlockWithAttestation, @@ -685,8 +687,6 @@ def on_block( key = SignatureKey(vid, data_root) new_block_proofs.setdefault(key, []).append(proof) - # Update Fork Choice - # # Register the vote immediately (historical/on-chain) store = store.on_attestation( attestation=Attestation(validator_id=vid, data=att.data), @@ -960,8 +960,8 @@ def aggregate_committee_signatures(self) -> "Store": # iterate to broadcast aggregated attestations for aggregated_attestation, aggregated_signature in aggregated_results: signed_aggregated_attestation = SignedAggregatedAttestation( - data = aggregated_attestation.data, - proof = aggregated_signature, + data=aggregated_attestation.data, + proof=aggregated_signature, ) # Note: here we should broadcast the aggregated signature to committee_aggregators topic diff --git a/src/lean_spec/subspecs/networking/service/service.py b/src/lean_spec/subspecs/networking/service/service.py index 26244ea5..45bb4cc5 100644 --- a/src/lean_spec/subspecs/networking/service/service.py +++ b/src/lean_spec/subspecs/networking/service/service.py @@ -36,7 +36,6 @@ GossipBlockEvent, NetworkEvent, NetworkEventSource, - PeerConnectedEvent, PeerDisconnectedEvent, PeerStatusEvent, ) @@ -138,10 +137,12 @@ async def _handle_event(self, event: NetworkEvent) -> None: await self.sync_service.on_gossip_block(block, peer_id) case GossipAttestationEvent(attestation=attestation, peer_id=peer_id): - # Route gossip attestations to the sync service. # # SyncService will validate signature and update forkchoice. - await self.sync_service.on_gossip_attestation(attestation, peer_id) + await self.sync_service.on_gossip_attestation( + attestation=attestation, + peer_id=peer_id, + ) case PeerStatusEvent(peer_id=peer_id, status=status): # Route peer status updates to sync service. diff --git a/src/lean_spec/subspecs/networking/subnet.py b/src/lean_spec/subspecs/networking/subnet.py index 72025249..8a3c8fd1 100644 --- a/src/lean_spec/subspecs/networking/subnet.py +++ b/src/lean_spec/subspecs/networking/subnet.py @@ -3,6 +3,7 @@ Provides a small utility to compute a validator's attestation subnet id from its validator index and number of committees. """ + from __future__ import annotations from lean_spec.types import Uint64 diff --git a/src/lean_spec/subspecs/node/__init__.py b/src/lean_spec/subspecs/node/__init__.py index a5d8bcb1..d497ebb1 100644 --- a/src/lean_spec/subspecs/node/__init__.py +++ b/src/lean_spec/subspecs/node/__init__.py @@ -1,5 +1,5 @@ """Node orchestrator for the Lean Ethereum consensus client.""" -from .node import Node, NodeConfig +from .node import Node, NodeConfig, get_local_validator_id -__all__ = ["Node", "NodeConfig"] +__all__ = ["Node", "NodeConfig", "get_local_validator_id"] diff --git a/src/lean_spec/subspecs/node/helpers.py b/src/lean_spec/subspecs/node/helpers.py new file mode 100644 index 00000000..1c50e52a --- /dev/null +++ b/src/lean_spec/subspecs/node/helpers.py @@ -0,0 +1,22 @@ +""" +Helper functions for node operations. +""" + +from lean_spec.subspecs.containers.validator import ValidatorIndex + + +def is_aggregator(validator_id: ValidatorIndex | None) -> bool: + """ + Determine if a validator is an aggregator. + + Args: + validator_id: The index of the validator. + + Returns: + True if the validator is an aggregator, False otherwise. + """ + if validator_id is None: + return False + return ( + False # Placeholder implementation, in future should be defined by node operator settings + ) diff --git a/src/lean_spec/subspecs/node/node.py b/src/lean_spec/subspecs/node/node.py index 61493aef..fcb2ca01 100644 --- a/src/lean_spec/subspecs/node/node.py +++ b/src/lean_spec/subspecs/node/node.py @@ -92,6 +92,20 @@ class NodeConfig: """ +def get_local_validator_id(registry: ValidatorRegistry | None) -> ValidatorIndex | None: + """ + Get the validator index for this node. + + For now, returns None as a default for passive nodes or simple setups. + Future implementations will look up keys in the registry. + """ + if registry is None or len(registry.validators) == 0: + return None + + # For simplicity, use the first validator in the registry. + return registry.validators[0].index + + @dataclass(slots=True) class Node: """ @@ -146,11 +160,11 @@ def from_genesis(cls, config: NodeConfig) -> Node: if config.database_path is not None: database = cls._create_database(config.database_path) - # Try to load existing state from database. # # If database contains valid state, resume from there. # Otherwise, fall through to genesis initialization. - store = cls._try_load_from_database(database) + validator_id = get_local_validator_id(config.validator_registry) + store = cls._try_load_from_database(database, validator_id) if store is None: # Generate genesis state from validators. @@ -173,7 +187,7 @@ def from_genesis(cls, config: NodeConfig) -> Node: # Initialize forkchoice store. # # Genesis block is both justified and finalized. - store = Store.get_forkchoice_store(state, block) + store = Store.get_forkchoice_store(state, block, validator_id) # Persist genesis to database if available. if database is not None: @@ -262,7 +276,10 @@ def _create_database(path: Path | str) -> Database: return SQLiteDatabase(path) @staticmethod - def _try_load_from_database(database: Database | None) -> Store | None: + def _try_load_from_database( + database: Database | None, + validator_id: ValidatorIndex, + ) -> Store | None: """ Try to load forkchoice store from existing database state. @@ -270,6 +287,7 @@ def _try_load_from_database(database: Database | None) -> Store | None: Args: database: Database to load from. + validator_id: Validator index for the store instance. Returns: Loaded Store or None if no valid state exists. @@ -309,6 +327,7 @@ def _try_load_from_database(database: Database | None) -> Store | None: latest_finalized=finalized, blocks={head_root: head_block}, states={head_root: head_state}, + validator_id=validator_id, ) async def run(self, *, install_signal_handlers: bool = True) -> None: diff --git a/src/lean_spec/subspecs/sync/service.py b/src/lean_spec/subspecs/sync/service.py index f20e7376..ece6c240 100644 --- a/src/lean_spec/subspecs/sync/service.py +++ b/src/lean_spec/subspecs/sync/service.py @@ -43,11 +43,16 @@ from lean_spec.subspecs import metrics from lean_spec.subspecs.chain.clock import SlotClock -from lean_spec.subspecs.containers import Block, SignedBlockWithAttestation -from lean_spec.subspecs.containers.attestation import SignedAttestation -from lean_spec.subspecs.forkchoice import Store -from lean_spec.subspecs.networking import PeerId +from lean_spec.subspecs.containers import ( + Block, + SignedAggregatedAttestation, + SignedAttestation, + SignedBlockWithAttestation, +) +from lean_spec.subspecs.forkchoice.store import Store + from lean_spec.subspecs.networking.reqresp.message import Status +from lean_spec.subspecs.networking.transport.peer_id import PeerId from lean_spec.subspecs.ssz.hash import hash_tree_root from .backfill_sync import BackfillSync, NetworkRequester @@ -409,13 +414,21 @@ async def on_gossip_attestation( if not self._state.accepts_gossip: return + from lean_spec.subspecs.node.helpers import is_aggregator + + # Check if we are an aggregator + is_aggregator_role = is_aggregator(self.store.validator_id) + # Integrate the attestation into forkchoice state. # # The store validates the signature and updates branch weights. # Invalid attestations (bad signature, unknown target) are rejected. # Validation failures are logged but don't crash the event loop. try: - self.store = self.store.on_gossip_attestation(attestation) + self.store = self.store.on_gossip_attestation( + signed_attestation=attestation, + is_aggregator=is_aggregator_role, + ) except (AssertionError, KeyError): # Attestation validation failed. # diff --git a/tests/lean_spec/conftest.py b/tests/lean_spec/conftest.py index e590bae8..eb8abc74 100644 --- a/tests/lean_spec/conftest.py +++ b/tests/lean_spec/conftest.py @@ -11,6 +11,7 @@ from lean_spec.subspecs.containers import Block, State from lean_spec.subspecs.forkchoice import Store +from lean_spec.subspecs.containers.validator import ValidatorIndex from tests.lean_spec.helpers import make_genesis_block, make_genesis_state @@ -29,4 +30,8 @@ def genesis_block(genesis_state: State) -> Block: @pytest.fixture def base_store(genesis_state: State, genesis_block: Block) -> Store: """Fork choice store initialized with genesis.""" - return Store.get_forkchoice_store(genesis_state, genesis_block) + return Store.get_forkchoice_store( + genesis_state, + genesis_block, + validator_id=ValidatorIndex(0), + ) diff --git a/tests/lean_spec/helpers/__init__.py b/tests/lean_spec/helpers/__init__.py index 8a93bf8f..c59acccb 100644 --- a/tests/lean_spec/helpers/__init__.py +++ b/tests/lean_spec/helpers/__init__.py @@ -15,6 +15,10 @@ make_validators_with_keys, ) from .mocks import MockNoiseSession +from lean_spec.subspecs.containers.validator import ValidatorIndex + +TEST_VALIDATOR_ID = ValidatorIndex(0) + __all__ = [ # Builders @@ -32,4 +36,6 @@ "make_validators_with_keys", # Mocks "MockNoiseSession", + # Constants + "TEST_VALIDATOR_ID", ] diff --git a/tests/lean_spec/subspecs/api/test_server.py b/tests/lean_spec/subspecs/api/test_server.py index 55c007d4..5abcc560 100644 --- a/tests/lean_spec/subspecs/api/test_server.py +++ b/tests/lean_spec/subspecs/api/test_server.py @@ -15,6 +15,7 @@ from lean_spec.subspecs.containers import State from lean_spec.subspecs.containers.slot import Slot from lean_spec.subspecs.containers.state import Validators +from lean_spec.subspecs.containers.validator import ValidatorIndex from lean_spec.subspecs.forkchoice import Store diff --git a/tests/lean_spec/subspecs/forkchoice/test_store_attestations.py b/tests/lean_spec/subspecs/forkchoice/test_store_attestations.py index d70898e4..f8ad28fe 100644 --- a/tests/lean_spec/subspecs/forkchoice/test_store_attestations.py +++ b/tests/lean_spec/subspecs/forkchoice/test_store_attestations.py @@ -25,6 +25,7 @@ from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.subspecs.xmss.aggregation import SignatureKey from lean_spec.types import Bytes32, Bytes52, Uint64 +from tests.lean_spec.helpers import TEST_VALIDATOR_ID def test_on_block_processes_multi_validator_aggregations() -> None: @@ -48,7 +49,11 @@ def test_on_block_processes_multi_validator_aggregations() -> None: body=BlockBody(attestations=AggregatedAttestations(data=[])), ) - base_store = Store.get_forkchoice_store(genesis_state, genesis_block) + base_store = Store.get_forkchoice_store( + genesis_state, + genesis_block, + validator_id=TEST_VALIDATOR_ID, + ) consumer_store = base_store # Producer view knows about attestations from validators 1 and 2 @@ -145,7 +150,11 @@ def test_on_block_preserves_immutability_of_aggregated_payloads() -> None: body=BlockBody(attestations=AggregatedAttestations(data=[])), ) - base_store = Store.get_forkchoice_store(genesis_state, genesis_block) + base_store = Store.get_forkchoice_store( + genesis_state, + genesis_block, + validator_id=TEST_VALIDATOR_ID, + ) # First block: create and process a block with attestations to populate # `aggregated_payloads`. diff --git a/tests/lean_spec/subspecs/forkchoice/test_time_management.py b/tests/lean_spec/subspecs/forkchoice/test_time_management.py index 83954b8d..912870e2 100644 --- a/tests/lean_spec/subspecs/forkchoice/test_time_management.py +++ b/tests/lean_spec/subspecs/forkchoice/test_time_management.py @@ -20,7 +20,7 @@ from lean_spec.subspecs.forkchoice import Store from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.types import Bytes32, Bytes52, Uint64 -from tests.lean_spec.helpers import make_signed_attestation +from tests.lean_spec.helpers import make_signed_attestation, TEST_VALIDATOR_ID @pytest.fixture @@ -62,6 +62,7 @@ def sample_store(sample_config: Config) -> Store: latest_finalized=checkpoint, blocks={genesis_hash: genesis_block}, states={genesis_hash: state}, + validator_id=TEST_VALIDATOR_ID, ) @@ -89,7 +90,11 @@ def test_store_time_from_anchor_slot(self, anchor_slot: int) -> None: body=BlockBody(attestations=AggregatedAttestations(data=[])), ) - store = Store.get_forkchoice_store(state=state, anchor_block=anchor_block) + store = Store.get_forkchoice_store( + anchor_state=state, + anchor_block=anchor_block, + validator_id=TEST_VALIDATOR_ID, + ) assert store.time == INTERVALS_PER_SLOT * Uint64(anchor_slot) diff --git a/tests/lean_spec/subspecs/forkchoice/test_validator.py b/tests/lean_spec/subspecs/forkchoice/test_validator.py index 68c3d332..69a21977 100644 --- a/tests/lean_spec/subspecs/forkchoice/test_validator.py +++ b/tests/lean_spec/subspecs/forkchoice/test_validator.py @@ -29,6 +29,7 @@ from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.subspecs.xmss.aggregation import SignatureKey from lean_spec.types import Bytes32, Bytes52, Uint64 +from tests.lean_spec.helpers import TEST_VALIDATOR_ID @pytest.fixture @@ -121,6 +122,7 @@ def sample_store(config: Config, sample_state: State) -> Store: latest_finalized=finalized, blocks={genesis_hash: genesis_block}, states={genesis_hash: consistent_state}, # States are indexed by block hash + validator_id=TEST_VALIDATOR_ID, ) @@ -490,6 +492,7 @@ def test_validator_operations_empty_store(self) -> None: latest_finalized=final_checkpoint, blocks={genesis_hash: genesis}, states={genesis_hash: state}, + validator_id=TEST_VALIDATOR_ID, ) # Should be able to produce block and attestation @@ -532,6 +535,7 @@ def test_produce_block_missing_parent_state(self) -> None: latest_finalized=checkpoint, blocks={}, # No blocks states={}, # No states + validator_id=TEST_VALIDATOR_ID, ) with pytest.raises(KeyError): # Missing head in get_proposal_head diff --git a/tests/lean_spec/subspecs/networking/test_network_service.py b/tests/lean_spec/subspecs/networking/test_network_service.py index a7c15f8a..849ce3fd 100644 --- a/tests/lean_spec/subspecs/networking/test_network_service.py +++ b/tests/lean_spec/subspecs/networking/test_network_service.py @@ -36,7 +36,7 @@ from lean_spec.subspecs.sync.service import SyncService from lean_spec.subspecs.sync.states import SyncState from lean_spec.types import Bytes32, Uint64 -from tests.lean_spec.helpers import make_mock_signature, make_signed_block +from tests.lean_spec.helpers import make_mock_signature, make_signed_block, TEST_VALIDATOR_ID @dataclass @@ -90,6 +90,7 @@ def __init__(self, head_slot: int = 0) -> None: """Initialize mock store with genesis block.""" self._head_slot = head_slot self.head = Bytes32.zero() + self.validator_id: ValidatorIndex = TEST_VALIDATOR_ID self.blocks: dict[Bytes32, Any] = {} self.states: dict[Bytes32, Any] = {} self._attestations_received: list[SignedAttestation] = [] @@ -118,14 +119,18 @@ def on_block(self, block: SignedBlockWithAttestation) -> "MockStore": new_store.head = root return new_store - def on_gossip_attestation(self, attestation: SignedAttestation) -> "MockStore": + def on_gossip_attestation( + self, + signed_attestation: SignedAttestation, + is_aggregator: bool = False, + ) -> "MockStore": """Process an attestation: track it for verification.""" new_store = MockStore(self._head_slot) new_store.blocks = dict(self.blocks) new_store.states = dict(self.states) new_store.head = self.head new_store._attestations_received = list(self._attestations_received) - new_store._attestations_received.append(attestation) + new_store._attestations_received.append(signed_attestation) return new_store @@ -192,7 +197,10 @@ def test_block_added_to_store_blocks_dict( GossipBlockEvent(block=block, peer_id=peer_id, topic=block_topic), ] source = MockEventSource(events=events) - network_service = NetworkService(sync_service=sync_service, event_source=source) + network_service = NetworkService( + sync_service=sync_service, + event_source=source, + ) asyncio.run(network_service.run()) @@ -224,7 +232,10 @@ def test_store_head_updated_after_block( GossipBlockEvent(block=block, peer_id=peer_id, topic=block_topic), ] source = MockEventSource(events=events) - network_service = NetworkService(sync_service=sync_service, event_source=source) + network_service = NetworkService( + sync_service=sync_service, + event_source=source, + ) asyncio.run(network_service.run()) @@ -255,7 +266,10 @@ def test_block_ignored_in_idle_state_store_unchanged( GossipBlockEvent(block=block, peer_id=peer_id, topic=block_topic), ] source = MockEventSource(events=events) - network_service = NetworkService(sync_service=sync_service, event_source=source) + network_service = NetworkService( + sync_service=sync_service, + event_source=source, + ) asyncio.run(network_service.run()) @@ -299,7 +313,10 @@ def test_attestation_processed_by_store( ), ] source = MockEventSource(events=events) - network_service = NetworkService(sync_service=sync_service, event_source=source) + network_service = NetworkService( + sync_service=sync_service, + event_source=source, + ) asyncio.run(network_service.run()) @@ -339,7 +356,10 @@ def test_attestation_ignored_in_idle_state( ), ] source = MockEventSource(events=events) - network_service = NetworkService(sync_service=sync_service, event_source=source) + network_service = NetworkService( + sync_service=sync_service, + event_source=source, + ) asyncio.run(network_service.run()) @@ -369,7 +389,10 @@ def test_peer_status_triggers_idle_to_syncing( PeerStatusEvent(peer_id=peer_id, status=status), ] source = MockEventSource(events=events) - network_service = NetworkService(sync_service=sync_service, event_source=source) + network_service = NetworkService( + sync_service=sync_service, + event_source=source, + ) asyncio.run(network_service.run()) @@ -392,7 +415,10 @@ def test_peer_status_updates_peer_manager( PeerStatusEvent(peer_id=peer_id, status=status), ] source = MockEventSource(events=events) - network_service = NetworkService(sync_service=sync_service, event_source=source) + network_service = NetworkService( + sync_service=sync_service, + event_source=source, + ) asyncio.run(network_service.run()) @@ -444,7 +470,10 @@ def test_full_sync_flow_status_then_block( GossipBlockEvent(block=block, peer_id=peer_id, topic=block_topic), ] source = MockEventSource(events=events) - network_service = NetworkService(sync_service=sync_service, event_source=source) + network_service = NetworkService( + sync_service=sync_service, + event_source=source, + ) asyncio.run(network_service.run()) @@ -487,7 +516,10 @@ def test_block_before_status_is_ignored( PeerStatusEvent(peer_id=peer_id, status=status), ] source = MockEventSource(events=events) - network_service = NetworkService(sync_service=sync_service, event_source=source) + network_service = NetworkService( + sync_service=sync_service, + event_source=source, + ) asyncio.run(network_service.run()) @@ -529,7 +561,10 @@ def test_multiple_blocks_chain_extension( GossipBlockEvent(block=block2, peer_id=peer_id, topic=block_topic), ] source = MockEventSource(events=events) - network_service = NetworkService(sync_service=sync_service, event_source=source) + network_service = NetworkService( + sync_service=sync_service, + event_source=source, + ) asyncio.run(network_service.run()) diff --git a/tests/lean_spec/subspecs/node/test_node.py b/tests/lean_spec/subspecs/node/test_node.py index dea0e580..8e931cee 100644 --- a/tests/lean_spec/subspecs/node/test_node.py +++ b/tests/lean_spec/subspecs/node/test_node.py @@ -175,7 +175,7 @@ def test_store_time_from_database_uses_intervals_not_seconds(self) -> None: # Patching to 8 distinguishes from the seconds per slot. patched_intervals = Uint64(8) with patch("lean_spec.subspecs.node.node.INTERVALS_PER_SLOT", patched_intervals): - store = Node._try_load_from_database(mock_db) + store = Node._try_load_from_database(mock_db, validator_id=ValidatorIndex(0)) assert store is not None expected_time = Uint64(test_slot * patched_intervals) diff --git a/tests/lean_spec/subspecs/ssz/test_state.py b/tests/lean_spec/subspecs/ssz/test_state.py index 2a5ec560..da2e2a9e 100644 --- a/tests/lean_spec/subspecs/ssz/test_state.py +++ b/tests/lean_spec/subspecs/ssz/test_state.py @@ -41,8 +41,6 @@ def test_encode_decode_state_roundtrip() -> None: ) encode = state.encode_bytes() - expected_value = ( - "e8030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e4000000e4000000e5000000e5000000e50000000101" - ) + expected_value = "e8030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e4000000e4000000e5000000e5000000e50000000101" assert encode.hex() == expected_value assert State.decode_bytes(encode) == state diff --git a/tests/lean_spec/subspecs/validator/test_service.py b/tests/lean_spec/subspecs/validator/test_service.py index 579fdc29..144563d8 100644 --- a/tests/lean_spec/subspecs/validator/test_service.py +++ b/tests/lean_spec/subspecs/validator/test_service.py @@ -34,6 +34,7 @@ from lean_spec.subspecs.xmss.aggregation import SignatureKey from lean_spec.subspecs.xmss.containers import Signature from lean_spec.types import Bytes32, Bytes52, Uint64 +from tests.lean_spec.helpers import TEST_VALIDATOR_ID class MockNetworkRequester(NetworkRequester): @@ -51,7 +52,11 @@ async def request_block_by_root( @pytest.fixture def store(genesis_state: State, genesis_block: Block) -> Store: """Forkchoice store initialized with genesis.""" - return Store.get_forkchoice_store(genesis_state, genesis_block) + return Store.get_forkchoice_store( + genesis_state, + genesis_block, + validator_id=TEST_VALIDATOR_ID, + ) @pytest.fixture @@ -532,7 +537,11 @@ def real_store(self, key_manager: XmssKeyManager) -> Store: state_root=hash_tree_root(genesis_state), body=BlockBody(attestations=AggregatedAttestations(data=[])), ) - return Store.get_forkchoice_store(genesis_state, genesis_block) + return Store.get_forkchoice_store( + genesis_state, + genesis_block, + validator_id=TEST_VALIDATOR_ID, + ) @pytest.fixture def real_sync_service(self, real_store: Store) -> SyncService: From 6af933b5282867d1100ca0c9e458093749234b0b Mon Sep 17 00:00:00 2001 From: kamilsa Date: Tue, 27 Jan 2026 17:57:30 +0500 Subject: [PATCH 33/39] rrely on aggregated payloads for block production --- .../subspecs/containers/state/state.py | 16 +++----- src/lean_spec/subspecs/forkchoice/store.py | 1 - .../containers/test_state_aggregation.py | 41 +++++++++++-------- .../forkchoice/test_store_attestations.py | 30 ++++++++++---- .../subspecs/validator/test_service.py | 36 ++++++++++++---- 5 files changed, 79 insertions(+), 45 deletions(-) diff --git a/src/lean_spec/subspecs/containers/state/state.py b/src/lean_spec/subspecs/containers/state/state.py index 7901e478..d7138567 100644 --- a/src/lean_spec/subspecs/containers/state/state.py +++ b/src/lean_spec/subspecs/containers/state/state.py @@ -656,7 +656,6 @@ def build_block( attestations: list[Attestation] | None = None, available_attestations: Iterable[Attestation] | None = None, known_block_roots: AbstractSet[Bytes32] | None = None, - gossip_signatures: dict[SignatureKey, "Signature"] | None = None, aggregated_payloads: dict[SignatureKey, list[AggregatedSignatureProof]] | None = None, ) -> tuple[Block, "State", list[AggregatedAttestation], list[AggregatedSignatureProof]]: """ @@ -736,14 +735,13 @@ def build_block( continue # We can only include an attestation if we have some way to later provide - # an aggregated proof for its group: - # - either a per validator XMSS signature from gossip, or - # - at least one aggregated proof learned from a block that references - # this validator+data. - has_gossip_sig = bool(gossip_signatures and sig_key in gossip_signatures) + # an aggregated proof for its group. + # + # We strictly rely on existing aggregated proofs learned from blocks. + # We do NOT aggregate fresh gossip signatures during block production. has_block_proof = bool(aggregated_payloads and sig_key in aggregated_payloads) - if has_gossip_sig or has_block_proof: + if has_block_proof: new_attestations.append(attestation) # Fixed point reached: no new attestations found @@ -754,10 +752,8 @@ def build_block( attestations.extend(new_attestations) # Select aggregated attestations and proofs for the final block. - # Prefer fresh gossip signatures; fall back to previously-seen aggregated proofs. - aggregated_attestations, aggregated_signatures = self.compute_aggregated_signatures( + aggregated_attestations, aggregated_signatures = self.select_aggregated_proofs( attestations, - gossip_signatures=gossip_signatures, aggregated_payloads=aggregated_payloads, ) diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 91cc4e68..4100b857 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -1257,7 +1257,6 @@ def produce_block_with_signatures( parent_root=head_root, available_attestations=available_attestations, known_block_roots=set(store.blocks.keys()), - gossip_signatures=store.gossip_signatures, aggregated_payloads=store.aggregated_payloads, ) diff --git a/tests/lean_spec/subspecs/containers/test_state_aggregation.py b/tests/lean_spec/subspecs/containers/test_state_aggregation.py index 1620adcf..4bda5e6c 100644 --- a/tests/lean_spec/subspecs/containers/test_state_aggregation.py +++ b/tests/lean_spec/subspecs/containers/test_state_aggregation.py @@ -207,10 +207,17 @@ def test_build_block_collects_valid_available_attestations() -> None: attestation = Attestation(validator_id=ValidatorIndex(0), data=att_data) data_root = att_data.data_root_bytes() - gossip_signatures = { - SignatureKey(ValidatorIndex(0), data_root): key_manager.sign_attestation_data( - ValidatorIndex(0), att_data - ) + # Calculate aggregated proof directly + signature = key_manager.sign_attestation_data(ValidatorIndex(0), att_data) + proof = AggregatedSignatureProof.aggregate( + participants=AggregationBits.from_validator_indices([ValidatorIndex(0)]), + public_keys=[key_manager.get_public_key(ValidatorIndex(0))], + signatures=[signature], + message=data_root, + epoch=att_data.slot, + ) + aggregated_payloads = { + SignatureKey(ValidatorIndex(0), data_root): [proof] } # Proposer for slot 1 with 2 validators: slot % num_validators = 1 % 2 = 1 @@ -221,8 +228,7 @@ def test_build_block_collects_valid_available_attestations() -> None: attestations=[], available_attestations=[attestation], known_block_roots={head_root}, - gossip_signatures=gossip_signatures, - aggregated_payloads={}, + aggregated_payloads=aggregated_payloads, ) assert post_state.latest_block_header.slot == Slot(1) @@ -270,7 +276,6 @@ def test_build_block_skips_attestations_without_signatures() -> None: attestations=[], available_attestations=[attestation], known_block_roots={head_root}, - gossip_signatures={}, aggregated_payloads={}, ) @@ -468,15 +473,15 @@ def test_build_block_state_root_valid_when_signatures_split() -> None: # Three validators attest to identical data. attestations = [Attestation(validator_id=ValidatorIndex(i), data=att_data) for i in range(3)] - # Simulate partial gossip coverage. - # - # Only one signature arrived via the gossip network. - # This happens when network partitions delay some messages. - gossip_signatures = { - SignatureKey(ValidatorIndex(0), data_root): key_manager.sign_attestation_data( - ValidatorIndex(0), att_data - ) - } + # Use a second aggregated proof for Validator 0 instead of gossip. + # This simulates receiving an aggregated signature for this validator from another source. + proof_0 = AggregatedSignatureProof.aggregate( + participants=AggregationBits.from_validator_indices([ValidatorIndex(0)]), + public_keys=[key_manager.get_public_key(ValidatorIndex(0))], + signatures=[key_manager.sign_attestation_data(ValidatorIndex(0), att_data)], + message=data_root, + epoch=att_data.slot, + ) # Simulate the remaining signatures arriving via aggregated proof. # @@ -496,6 +501,7 @@ def test_build_block_state_root_valid_when_signatures_split() -> None: epoch=att_data.slot, ) aggregated_payloads = { + SignatureKey(ValidatorIndex(0), data_root): [proof_0], SignatureKey(ValidatorIndex(1), data_root): [fallback_proof], SignatureKey(ValidatorIndex(2), data_root): [fallback_proof], } @@ -508,7 +514,6 @@ def test_build_block_state_root_valid_when_signatures_split() -> None: proposer_index=ValidatorIndex(1), parent_root=parent_root, attestations=attestations, - gossip_signatures=gossip_signatures, aggregated_payloads=aggregated_payloads, ) @@ -520,7 +525,7 @@ def test_build_block_state_root_valid_when_signatures_split() -> None: # Confirm each attestation covers the expected validators. actual_bits = [set(att.aggregation_bits.to_validator_indices()) for att in aggregated_atts] - assert {ValidatorIndex(0)} in actual_bits, "Gossip attestation should cover only validator 0" + assert {ValidatorIndex(0)} in actual_bits, "First attestation should cover only validator 0" assert {ValidatorIndex(1), ValidatorIndex(2)} in actual_bits, ( "Fallback should cover validators 1,2" ) diff --git a/tests/lean_spec/subspecs/forkchoice/test_store_attestations.py b/tests/lean_spec/subspecs/forkchoice/test_store_attestations.py index f8ad28fe..d2f0edd5 100644 --- a/tests/lean_spec/subspecs/forkchoice/test_store_attestations.py +++ b/tests/lean_spec/subspecs/forkchoice/test_store_attestations.py @@ -65,19 +65,35 @@ def test_on_block_processes_multi_validator_aggregations() -> None: validator_id: attestation_data for validator_id in (ValidatorIndex(1), ValidatorIndex(2)) } - # Store signatures in gossip_signatures + # Aggregate signatures manually for aggregated_payloads data_root = attestation_data.data_root_bytes() - gossip_sigs = { - SignatureKey(validator_id, data_root): key_manager.sign_attestation_data( - validator_id, attestation_data - ) - for validator_id in (ValidatorIndex(1), ValidatorIndex(2)) + signatures_list = [ + key_manager.sign_attestation_data(vid, attestation_data) + for vid in (ValidatorIndex(1), ValidatorIndex(2)) + ] + participants = [ValidatorIndex(1), ValidatorIndex(2)] + + from lean_spec.subspecs.containers.attestation import AggregationBits + from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof + + proof = AggregatedSignatureProof.aggregate( + participants=AggregationBits.from_validator_indices(participants), + public_keys=[key_manager.get_public_key(vid) for vid in participants], + signatures=signatures_list, + message=data_root, + epoch=attestation_data.slot, + ) + + aggregated_payloads = { + SignatureKey(vid, data_root): [proof] + for vid in participants } producer_store = base_store.model_copy( update={ "latest_known_attestations": attestation_data_map, - "gossip_signatures": gossip_sigs, + # No gossip signatures needed for block production now + "aggregated_payloads": aggregated_payloads, } ) diff --git a/tests/lean_spec/subspecs/validator/test_service.py b/tests/lean_spec/subspecs/validator/test_service.py index 144563d8..c7c88589 100644 --- a/tests/lean_spec/subspecs/validator/test_service.py +++ b/tests/lean_spec/subspecs/validator/test_service.py @@ -781,21 +781,39 @@ def test_block_includes_pending_attestations( attestation_data = store.produce_attestation_data(Slot(0)) data_root = attestation_data.data_root_bytes() - # Simulate gossip attestations from validators 3 and 4 + # Simulate aggregated payloads for validators 3 and 4 + from lean_spec.subspecs.containers.attestation import AggregationBits + from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof + attestation_map: dict[ValidatorIndex, AttestationData] = {} - gossip_sigs: dict[SignatureKey, Signature] = {} + signatures = [] + participants = [ValidatorIndex(3), ValidatorIndex(4)] + public_keys = [] + + for vid in participants: + sig = key_manager.sign_attestation_data(vid, attestation_data) + signatures.append(sig) + public_keys.append(key_manager.get_public_key(vid)) + attestation_map[vid] = attestation_data + + proof = AggregatedSignatureProof.aggregate( + participants=AggregationBits.from_validator_indices(participants), + public_keys=public_keys, + signatures=signatures, + message=data_root, + epoch=attestation_data.slot, + ) - for validator_id in (ValidatorIndex(3), ValidatorIndex(4)): - attestation_map[validator_id] = attestation_data - gossip_sigs[SignatureKey(validator_id, data_root)] = key_manager.sign_attestation_data( - validator_id, attestation_data - ) + aggregated_payloads = { + SignatureKey(vid, data_root): [proof] + for vid in participants + } - # Update store with pending attestations + # Update store with pending attestations and aggregated payloads updated_store = store.model_copy( update={ "latest_known_attestations": attestation_map, - "gossip_signatures": gossip_sigs, + "aggregated_payloads": aggregated_payloads, } ) real_sync_service.store = updated_store From 2b68c0ccfdcae52155d2379ea903a9962ed081a4 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Tue, 27 Jan 2026 18:26:13 +0500 Subject: [PATCH 34/39] Fix uvx tox --- docs/client/networking.md | 42 +++++++++---------- docs/client/validator.md | 16 +++---- .../test_fixtures/fork_choice.py | 11 ++--- .../test_fixtures/state_transition.py | 17 -------- .../test_fixtures/verify_signatures.py | 9 +--- .../containers/attestation/attestation.py | 6 +++ .../subspecs/containers/state/state.py | 9 ++-- src/lean_spec/subspecs/forkchoice/store.py | 21 ++++++---- src/lean_spec/subspecs/networking/__init__.py | 2 +- .../networking/client/event_source.py | 2 +- .../subspecs/networking/service/service.py | 1 + src/lean_spec/subspecs/node/helpers.py | 4 +- src/lean_spec/subspecs/node/node.py | 6 +-- src/lean_spec/subspecs/sync/service.py | 2 - tests/lean_spec/conftest.py | 2 +- tests/lean_spec/helpers/__init__.py | 3 +- tests/lean_spec/subspecs/api/test_server.py | 1 - .../containers/test_state_aggregation.py | 4 +- .../forkchoice/test_store_attestations.py | 7 +--- .../forkchoice/test_time_management.py | 2 +- .../client/test_gossip_reception.py | 7 +++- .../networking/test_network_service.py | 2 +- tests/lean_spec/subspecs/ssz/test_state.py | 2 +- .../subspecs/validator/test_service.py | 10 ++--- 24 files changed, 82 insertions(+), 106 deletions(-) diff --git a/docs/client/networking.md b/docs/client/networking.md index 39989a5e..137e132e 100644 --- a/docs/client/networking.md +++ b/docs/client/networking.md @@ -63,35 +63,35 @@ Messages are organized by topic. Topic names follow a pattern that includes: This structure lets clients subscribe to relevant messages and ignore others. -The payload carried in the gossipsub message is the SSZ-encoded, +The payload carried in the gossipsub message is the SSZ-encoded, Snappy-compressed message, which type is identified by the topic: -| Topic Name | Message Type | Encoding | +| Topic Name | Message Type | Encoding | |------------------------------------------------------------|-----------------------------|--------------| -| /lean/consensus/devnet3/blocks/ssz_snappy | SignedBlockWithAttestation | SSZ + Snappy | -| /lean/consensus/devnet3/attestations/ssz_snappy | SignedAttestation | SSZ + Snappy | -| /lean/consensus/devnet3/attestation_{subnet_id}/ssz_snappy | SignedAttestation | SSZ + Snappy | -| /lean/consensus/devnet3/aggregation/ssz_snappy | SignedAggregatedAttestation | SSZ + Snappy | +| /lean/consensus/devnet3/blocks/ssz_snappy | SignedBlockWithAttestation | SSZ + Snappy | +| /lean/consensus/devnet3/attestations/ssz_snappy | SignedAttestation | SSZ + Snappy | +| /lean/consensus/devnet3/attestation\_{subnet_id}/ssz_snappy | SignedAttestation | SSZ + Snappy | +| /lean/consensus/devnet3/aggregation/ssz_snappy | SignedAggregatedAttestation | SSZ + Snappy | ### Message Types Three main message types exist: -* _Blocks_, defined by the `SignedBlockWithAttestation` type, are proposed by -validators and propagated on the block topic. Every node needs to see blocks -quickly. - -* _Attestations_, defined by the `SignedAttestation` type, come from all -validators. They propagate on the global attestation topic. Additionally, -each committee has its own attestation topic. Validators publish to their -committee's attestation topic and global attestation topic. Non-aggregating -validators subscribe only to the global attestation topic, while aggregators -subscribe to both the global and their committee's attestation topic. - -* _Committee aggregations_, defined by the `SignedAggregatedAttestation` type, -created by committee aggregators. These combine attestations from committee -members. Aggregations propagate on the aggregation topic to which every -validator subscribes. +- _Blocks_, defined by the `SignedBlockWithAttestation` type, are proposed by + validators and propagated on the block topic. Every node needs to see blocks + quickly. + +- _Attestations_, defined by the `SignedAttestation` type, come from all + validators. They propagate on the global attestation topic. Additionally, + each committee has its own attestation topic. Validators publish to their + committee's attestation topic and global attestation topic. Non-aggregating + validators subscribe only to the global attestation topic, while aggregators + subscribe to both the global and their committee's attestation topic. + +- _Committee aggregations_, defined by the `SignedAggregatedAttestation` type, + created by committee aggregators. These combine attestations from committee + members. Aggregations propagate on the aggregation topic to which every + validator subscribes. ### Encoding diff --git a/docs/client/validator.md b/docs/client/validator.md index 43391448..305140e2 100644 --- a/docs/client/validator.md +++ b/docs/client/validator.md @@ -2,8 +2,8 @@ ## Overview -Validators participate in consensus by proposing blocks and producing attestations. -Optionally validators can opt-in to behave as aggregators in their committee . +Validators participate in consensus by proposing blocks and producing attestations. +Optionally validators can opt-in to behave as aggregators in their committee . This document describes what honest validators do. ## Validator Assignment @@ -19,19 +19,19 @@ is temporary for devnet testing. ## Attestation Committees and Subnets -Attestation committee is a group of validators contributing to the common +Attestation committee is a group of validators contributing to the common aggregated attestations. Subnets are network channels dedicated to specific committees. -In the devnet-3 design, however, there is one global subnet for signed +In the devnet-3 design, however, there is one global subnet for signed attestations propagation, in addition to publishing into per committee subnets. -This is due to 3SF-mini consensus design, that requires 2/3+ of all +This is due to 3SF-mini consensus design, that requires 2/3+ of all attestations to be observed by any validator to compute safe target correctly. Note that non-aggregating validators do not need to subscribe to committee -attestation subnets. They only need to subscribe to the global attestation +attestation subnets. They only need to subscribe to the global attestation subnet. -Every validator is assigned to a single committee. Number of committees is +Every validator is assigned to a single committee. Number of committees is defined in config.yaml. Each committee maps to a subnet ID. Validator's subnet ID is derived using their validator index modulo number of committees. This is to simplify debugging and testing. In the future, validator's subnet ID @@ -105,7 +105,7 @@ compute the head. ### Broadcasting Attestations -Validators sign their attestations and broadcast them into the global +Validators sign their attestations and broadcast them into the global attestation topic and its corresponding subnet topic. ## Timing diff --git a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py index dd5b202a..1db15330 100644 --- a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py +++ b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py @@ -50,7 +50,6 @@ ) from .base import BaseConsensusFixture - DEFAULT_VALIDATOR_ID = ValidatorIndex(0) @@ -213,7 +212,7 @@ def make_fixture(self) -> Self: # The Store is the node's local view of the chain. # It starts from a trusted anchor (usually genesis). store = Store.get_forkchoice_store( - state=self.anchor_state, + anchor_state=self.anchor_state, anchor_block=self.anchor_block, validator_id=DEFAULT_VALIDATOR_ID, ) @@ -265,7 +264,10 @@ def make_fixture(self) -> Self: # Process the block through Store. # This validates, applies state transition, and updates head. - store = store.on_block(signed_block, LEAN_ENV_TO_SCHEMES[self.lean_env]) + store = store.on_block( + signed_block, + scheme=LEAN_ENV_TO_SCHEMES[self.lean_env], + ) elif isinstance(step, AttestationStep): # Process a gossip attestation. @@ -397,10 +399,9 @@ def _build_block_from_spec( slot=spec.slot, proposer_index=proposer_index, parent_root=parent_root, - attestations=attestations, + attestations=available_attestations, available_attestations=available_attestations, known_block_roots=known_block_roots, - gossip_signatures=gossip_signatures, aggregated_payloads=store.aggregated_payloads, ) diff --git a/packages/testing/src/consensus_testing/test_fixtures/state_transition.py b/packages/testing/src/consensus_testing/test_fixtures/state_transition.py index 04cd2a9c..f1097447 100644 --- a/packages/testing/src/consensus_testing/test_fixtures/state_transition.py +++ b/packages/testing/src/consensus_testing/test_fixtures/state_transition.py @@ -10,10 +10,8 @@ from lean_spec.subspecs.containers.state.state import State from lean_spec.subspecs.containers.validator import ValidatorIndex from lean_spec.subspecs.ssz.hash import hash_tree_root -from lean_spec.subspecs.xmss.aggregation import SignatureKey from lean_spec.types import Bytes32 -from ..keys import get_shared_key_manager from ..test_types import BlockSpec, StateExpectation from .base import BaseConsensusFixture @@ -263,26 +261,11 @@ def _build_block_from_spec(self, spec: BlockSpec, state: State) -> tuple[Block, for vid in agg.aggregation_bits.to_validator_indices() ] - if plain_attestations: - key_manager = get_shared_key_manager(max_slot=spec.slot) - gossip_signatures = { - SignatureKey( - att.validator_id, att.data.data_root_bytes() - ): key_manager.sign_attestation_data( - att.validator_id, - att.data, - ) - for att in plain_attestations - } - else: - gossip_signatures = {} - block, post_state, _, _ = state.build_block( slot=spec.slot, proposer_index=proposer_index, parent_root=parent_root, attestations=plain_attestations, - gossip_signatures=gossip_signatures, aggregated_payloads={}, ) return block, post_state diff --git a/packages/testing/src/consensus_testing/test_fixtures/verify_signatures.py b/packages/testing/src/consensus_testing/test_fixtures/verify_signatures.py index f11aad4e..a4ec903b 100644 --- a/packages/testing/src/consensus_testing/test_fixtures/verify_signatures.py +++ b/packages/testing/src/consensus_testing/test_fixtures/verify_signatures.py @@ -26,7 +26,7 @@ from lean_spec.subspecs.containers.validator import ValidatorIndex from lean_spec.subspecs.koalabear import Fp from lean_spec.subspecs.ssz import hash_tree_root -from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof, SignatureKey +from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof from lean_spec.subspecs.xmss.constants import TARGET_CONFIG from lean_spec.subspecs.xmss.containers import Signature from lean_spec.subspecs.xmss.types import ( @@ -233,19 +233,12 @@ def _build_block_from_spec( spec, state, key_manager ) - # Provide signatures to State.build_block for valid attestations - gossip_signatures = { - SignatureKey(att.validator_id, att.data.data_root_bytes()): sig - for att, sig in zip(valid_attestations, valid_signatures, strict=True) - } - # Use State.build_block for valid attestations (pure spec logic) final_block, _, _, aggregated_signatures = state.build_block( slot=spec.slot, proposer_index=proposer_index, parent_root=parent_root, attestations=valid_attestations, - gossip_signatures=gossip_signatures, aggregated_payloads={}, ) diff --git a/src/lean_spec/subspecs/containers/attestation/attestation.py b/src/lean_spec/subspecs/containers/attestation/attestation.py index 1de0f587..683310f7 100644 --- a/src/lean_spec/subspecs/containers/attestation/attestation.py +++ b/src/lean_spec/subspecs/containers/attestation/attestation.py @@ -112,6 +112,12 @@ def aggregate_by_data( class SignedAggregatedAttestation(Container): + """ + A signed aggregated attestation for broadcasting. + + Contains the attestation data and the aggregated signature proof. + """ + data: AttestationData """Combined attestation data similar to the beacon chain format.""" diff --git a/src/lean_spec/subspecs/containers/state/state.py b/src/lean_spec/subspecs/containers/state/state.py index d7138567..aa38f8a3 100644 --- a/src/lean_spec/subspecs/containers/state/state.py +++ b/src/lean_spec/subspecs/containers/state/state.py @@ -1,6 +1,6 @@ """State Container for the Lean Ethereum consensus specification.""" -from typing import AbstractSet, Iterable +from typing import AbstractSet, Collection, Iterable from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.subspecs.xmss.aggregation import ( @@ -779,7 +779,7 @@ def build_block( def aggregate_gossip_signatures( self, - attestations: list[Attestation], + attestations: Collection[Attestation], gossip_signatures: dict[SignatureKey, "Signature"] | None = None, ) -> list[tuple[AggregatedAttestation, AggregatedSignatureProof]]: """ @@ -791,7 +791,7 @@ def aggregate_gossip_signatures( Parameters ---------- - attestations : list[Attestation] + attestations : Collection[Attestation] Individual attestations to aggregate and sign. gossip_signatures : dict[SignatureKey, Signature] | None Per-validator XMSS signatures learned from the gossip network. @@ -807,7 +807,7 @@ def aggregate_gossip_signatures( # # Multiple validators may attest to the same data (slot, head, target, source). # We aggregate them into groups so each group can share a single proof. - for aggregated in AggregatedAttestation.aggregate_by_data(attestations): + for aggregated in AggregatedAttestation.aggregate_by_data(list(attestations)): # Extract the common attestation data and its hash. # # All validators in this group signed the same message (the data root). @@ -831,7 +831,6 @@ def aggregate_gossip_signatures( # Track validators we couldn't find signatures for. # # These will need to be covered by Phase 2 (existing proofs). - remaining: set[ValidatorIndex] = set() # Attempt to collect each validator's signature from gossip. # diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 4100b857..93bfb8f0 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -15,11 +15,11 @@ from collections import defaultdict from lean_spec.subspecs.chain.config import ( + ATTESTATION_COMMITTEE_COUNT, INTERVALS_PER_SLOT, JUSTIFICATION_LOOKBACK_SLOTS, SECONDS_PER_INTERVAL, SECONDS_PER_SLOT, - ATTESTATION_COMMITTEE_COUNT, ) from lean_spec.subspecs.containers import ( Attestation, @@ -32,11 +32,14 @@ State, ValidatorIndex, ) +from lean_spec.subspecs.containers.attestation.attestation import SignedAggregatedAttestation from lean_spec.subspecs.containers.block import BlockLookup from lean_spec.subspecs.containers.slot import Slot +from lean_spec.subspecs.networking import compute_subnet_id from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.subspecs.xmss.aggregation import ( AggregatedSignatureProof, + AggregationError, SignatureKey, ) from lean_spec.subspecs.xmss.containers import Signature @@ -47,10 +50,6 @@ Uint64, ) from lean_spec.types.container import Container -from lean_spec.subspecs.networking import compute_subnet_id - -from lean_spec.subspecs.containers.attestation.attestation import SignedAggregatedAttestation -from lean_spec.subspecs.xmss.aggregation import AggregationError class Store(Container): @@ -294,8 +293,8 @@ def on_gossip_attestation( This method: 1. Verifies the XMSS signature - 2. If current node is aggregator, stores the signature in the gossip signature map if it belongs - to the current validator's subnet + 2. If current node is aggregator, stores the signature in the gossip + signature map if it belongs to the current validator's subnet 3. Processes the attestation data via on_attestation Args: @@ -950,16 +949,20 @@ def aggregate_committee_signatures(self) -> "Store": attestations = self.latest_new_attestations committee_signatures = self.gossip_signatures + attestation_list = [ + Attestation(validator_id=vid, data=data) for vid, data in attestations.items() + ] + head_state = self.states[self.head] # Perform aggregation aggregated_results = head_state.aggregate_gossip_signatures( - attestations, + attestation_list, committee_signatures, ) # iterate to broadcast aggregated attestations for aggregated_attestation, aggregated_signature in aggregated_results: - signed_aggregated_attestation = SignedAggregatedAttestation( + _ = SignedAggregatedAttestation( data=aggregated_attestation.data, proof=aggregated_signature, ) diff --git a/src/lean_spec/subspecs/networking/__init__.py b/src/lean_spec/subspecs/networking/__init__.py index 70c00424..3192e919 100644 --- a/src/lean_spec/subspecs/networking/__init__.py +++ b/src/lean_spec/subspecs/networking/__init__.py @@ -32,9 +32,9 @@ PeerDisconnectedEvent, PeerStatusEvent, ) +from .subnet import compute_subnet_id from .transport import PeerId from .types import DomainType, ForkDigest, ProtocolId -from .subnet import compute_subnet_id __all__ = [ # Config diff --git a/src/lean_spec/subspecs/networking/client/event_source.py b/src/lean_spec/subspecs/networking/client/event_source.py index 3e30446b..ed48ddbb 100644 --- a/src/lean_spec/subspecs/networking/client/event_source.py +++ b/src/lean_spec/subspecs/networking/client/event_source.py @@ -324,7 +324,7 @@ def decode_message( self, topic_str: str, compressed_data: bytes, - ) -> SignedBlockWithAttestation | SignedAttestation: + ) -> SignedBlockWithAttestation | SignedAttestation | None: """ Decode a gossip message from topic and compressed data. diff --git a/src/lean_spec/subspecs/networking/service/service.py b/src/lean_spec/subspecs/networking/service/service.py index 45bb4cc5..529f8969 100644 --- a/src/lean_spec/subspecs/networking/service/service.py +++ b/src/lean_spec/subspecs/networking/service/service.py @@ -36,6 +36,7 @@ GossipBlockEvent, NetworkEvent, NetworkEventSource, + PeerConnectedEvent, PeerDisconnectedEvent, PeerStatusEvent, ) diff --git a/src/lean_spec/subspecs/node/helpers.py b/src/lean_spec/subspecs/node/helpers.py index 1c50e52a..f1cdf7f7 100644 --- a/src/lean_spec/subspecs/node/helpers.py +++ b/src/lean_spec/subspecs/node/helpers.py @@ -1,6 +1,4 @@ -""" -Helper functions for node operations. -""" +"""Helper functions for node operations.""" from lean_spec.subspecs.containers.validator import ValidatorIndex diff --git a/src/lean_spec/subspecs/node/node.py b/src/lean_spec/subspecs/node/node.py index fcb2ca01..fcaf7a9f 100644 --- a/src/lean_spec/subspecs/node/node.py +++ b/src/lean_spec/subspecs/node/node.py @@ -99,11 +99,11 @@ def get_local_validator_id(registry: ValidatorRegistry | None) -> ValidatorIndex For now, returns None as a default for passive nodes or simple setups. Future implementations will look up keys in the registry. """ - if registry is None or len(registry.validators) == 0: + if registry is None or len(registry) == 0: return None # For simplicity, use the first validator in the registry. - return registry.validators[0].index + return registry.indices()[0] @dataclass(slots=True) @@ -278,7 +278,7 @@ def _create_database(path: Path | str) -> Database: @staticmethod def _try_load_from_database( database: Database | None, - validator_id: ValidatorIndex, + validator_id: ValidatorIndex | None, ) -> Store | None: """ Try to load forkchoice store from existing database state. diff --git a/src/lean_spec/subspecs/sync/service.py b/src/lean_spec/subspecs/sync/service.py index ece6c240..dc591605 100644 --- a/src/lean_spec/subspecs/sync/service.py +++ b/src/lean_spec/subspecs/sync/service.py @@ -45,12 +45,10 @@ from lean_spec.subspecs.chain.clock import SlotClock from lean_spec.subspecs.containers import ( Block, - SignedAggregatedAttestation, SignedAttestation, SignedBlockWithAttestation, ) from lean_spec.subspecs.forkchoice.store import Store - from lean_spec.subspecs.networking.reqresp.message import Status from lean_spec.subspecs.networking.transport.peer_id import PeerId from lean_spec.subspecs.ssz.hash import hash_tree_root diff --git a/tests/lean_spec/conftest.py b/tests/lean_spec/conftest.py index eb8abc74..d1a1d025 100644 --- a/tests/lean_spec/conftest.py +++ b/tests/lean_spec/conftest.py @@ -10,8 +10,8 @@ import pytest from lean_spec.subspecs.containers import Block, State -from lean_spec.subspecs.forkchoice import Store from lean_spec.subspecs.containers.validator import ValidatorIndex +from lean_spec.subspecs.forkchoice import Store from tests.lean_spec.helpers import make_genesis_block, make_genesis_state diff --git a/tests/lean_spec/helpers/__init__.py b/tests/lean_spec/helpers/__init__.py index c59acccb..34d3f0a4 100644 --- a/tests/lean_spec/helpers/__init__.py +++ b/tests/lean_spec/helpers/__init__.py @@ -1,5 +1,7 @@ """Test helpers for leanSpec unit tests.""" +from lean_spec.subspecs.containers.validator import ValidatorIndex + from .builders import ( make_aggregated_attestation, make_block, @@ -15,7 +17,6 @@ make_validators_with_keys, ) from .mocks import MockNoiseSession -from lean_spec.subspecs.containers.validator import ValidatorIndex TEST_VALIDATOR_ID = ValidatorIndex(0) diff --git a/tests/lean_spec/subspecs/api/test_server.py b/tests/lean_spec/subspecs/api/test_server.py index 5abcc560..55c007d4 100644 --- a/tests/lean_spec/subspecs/api/test_server.py +++ b/tests/lean_spec/subspecs/api/test_server.py @@ -15,7 +15,6 @@ from lean_spec.subspecs.containers import State from lean_spec.subspecs.containers.slot import Slot from lean_spec.subspecs.containers.state import Validators -from lean_spec.subspecs.containers.validator import ValidatorIndex from lean_spec.subspecs.forkchoice import Store diff --git a/tests/lean_spec/subspecs/containers/test_state_aggregation.py b/tests/lean_spec/subspecs/containers/test_state_aggregation.py index 4bda5e6c..fbcf32d2 100644 --- a/tests/lean_spec/subspecs/containers/test_state_aggregation.py +++ b/tests/lean_spec/subspecs/containers/test_state_aggregation.py @@ -216,9 +216,7 @@ def test_build_block_collects_valid_available_attestations() -> None: message=data_root, epoch=att_data.slot, ) - aggregated_payloads = { - SignatureKey(ValidatorIndex(0), data_root): [proof] - } + aggregated_payloads = {SignatureKey(ValidatorIndex(0), data_root): [proof]} # Proposer for slot 1 with 2 validators: slot % num_validators = 1 % 2 = 1 block, post_state, aggregated_atts, aggregated_proofs = state.build_block( diff --git a/tests/lean_spec/subspecs/forkchoice/test_store_attestations.py b/tests/lean_spec/subspecs/forkchoice/test_store_attestations.py index d2f0edd5..b761db96 100644 --- a/tests/lean_spec/subspecs/forkchoice/test_store_attestations.py +++ b/tests/lean_spec/subspecs/forkchoice/test_store_attestations.py @@ -72,7 +72,7 @@ def test_on_block_processes_multi_validator_aggregations() -> None: for vid in (ValidatorIndex(1), ValidatorIndex(2)) ] participants = [ValidatorIndex(1), ValidatorIndex(2)] - + from lean_spec.subspecs.containers.attestation import AggregationBits from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof @@ -84,10 +84,7 @@ def test_on_block_processes_multi_validator_aggregations() -> None: epoch=attestation_data.slot, ) - aggregated_payloads = { - SignatureKey(vid, data_root): [proof] - for vid in participants - } + aggregated_payloads = {SignatureKey(vid, data_root): [proof] for vid in participants} producer_store = base_store.model_copy( update={ diff --git a/tests/lean_spec/subspecs/forkchoice/test_time_management.py b/tests/lean_spec/subspecs/forkchoice/test_time_management.py index 912870e2..94622501 100644 --- a/tests/lean_spec/subspecs/forkchoice/test_time_management.py +++ b/tests/lean_spec/subspecs/forkchoice/test_time_management.py @@ -20,7 +20,7 @@ from lean_spec.subspecs.forkchoice import Store from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.types import Bytes32, Bytes52, Uint64 -from tests.lean_spec.helpers import make_signed_attestation, TEST_VALIDATOR_ID +from tests.lean_spec.helpers import TEST_VALIDATOR_ID, make_signed_attestation @pytest.fixture diff --git a/tests/lean_spec/subspecs/networking/client/test_gossip_reception.py b/tests/lean_spec/subspecs/networking/client/test_gossip_reception.py index e3f1b485..d1b5a559 100644 --- a/tests/lean_spec/subspecs/networking/client/test_gossip_reception.py +++ b/tests/lean_spec/subspecs/networking/client/test_gossip_reception.py @@ -511,7 +511,7 @@ class TestGossipReceptionIntegration: def test_full_block_reception_flow(self) -> None: """Tests complete flow: stream -> parse -> decompress -> decode.""" - async def run() -> tuple[SignedBlockWithAttestation | SignedAttestation, bytes]: + async def run() -> tuple[SignedBlockWithAttestation | SignedAttestation | None, bytes]: handler = GossipHandler(fork_digest="0x00000000") original_block = make_test_signed_block() ssz_bytes = original_block.encode_bytes() @@ -536,7 +536,9 @@ async def run() -> tuple[SignedBlockWithAttestation | SignedAttestation, bytes]: def test_full_attestation_reception_flow(self) -> None: """Tests complete flow for attestation messages.""" - async def run() -> tuple[SignedBlockWithAttestation | SignedAttestation, bytes, TopicKind]: + async def run() -> tuple[ + SignedBlockWithAttestation | SignedAttestation | None, bytes, TopicKind + ]: handler = GossipHandler(fork_digest="0x00000000") original_attestation = make_test_signed_attestation() ssz_bytes = original_attestation.encode_bytes() @@ -586,6 +588,7 @@ async def run() -> tuple[bytes, bytes]: # Decode decoded = handler.decode_message(topic_str, compressed) + assert decoded is not None, "decode_message should not return None for valid input" decoded_bytes = decoded.encode_bytes() return decoded_bytes, original_bytes diff --git a/tests/lean_spec/subspecs/networking/test_network_service.py b/tests/lean_spec/subspecs/networking/test_network_service.py index 849ce3fd..4488d33c 100644 --- a/tests/lean_spec/subspecs/networking/test_network_service.py +++ b/tests/lean_spec/subspecs/networking/test_network_service.py @@ -36,7 +36,7 @@ from lean_spec.subspecs.sync.service import SyncService from lean_spec.subspecs.sync.states import SyncState from lean_spec.types import Bytes32, Uint64 -from tests.lean_spec.helpers import make_mock_signature, make_signed_block, TEST_VALIDATOR_ID +from tests.lean_spec.helpers import TEST_VALIDATOR_ID, make_mock_signature, make_signed_block @dataclass diff --git a/tests/lean_spec/subspecs/ssz/test_state.py b/tests/lean_spec/subspecs/ssz/test_state.py index da2e2a9e..20203f93 100644 --- a/tests/lean_spec/subspecs/ssz/test_state.py +++ b/tests/lean_spec/subspecs/ssz/test_state.py @@ -41,6 +41,6 @@ def test_encode_decode_state_roundtrip() -> None: ) encode = state.encode_bytes() - expected_value = "e8030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e4000000e4000000e5000000e5000000e50000000101" + expected_value = "e8030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e4000000e4000000e5000000e5000000e50000000101" # noqa: E501 assert encode.hex() == expected_value assert State.decode_bytes(encode) == state diff --git a/tests/lean_spec/subspecs/validator/test_service.py b/tests/lean_spec/subspecs/validator/test_service.py index c7c88589..896907f0 100644 --- a/tests/lean_spec/subspecs/validator/test_service.py +++ b/tests/lean_spec/subspecs/validator/test_service.py @@ -32,7 +32,6 @@ from lean_spec.subspecs.validator.registry import ValidatorEntry from lean_spec.subspecs.xmss import TARGET_SIGNATURE_SCHEME from lean_spec.subspecs.xmss.aggregation import SignatureKey -from lean_spec.subspecs.xmss.containers import Signature from lean_spec.types import Bytes32, Bytes52, Uint64 from tests.lean_spec.helpers import TEST_VALIDATOR_ID @@ -784,12 +783,12 @@ def test_block_includes_pending_attestations( # Simulate aggregated payloads for validators 3 and 4 from lean_spec.subspecs.containers.attestation import AggregationBits from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof - + attestation_map: dict[ValidatorIndex, AttestationData] = {} signatures = [] participants = [ValidatorIndex(3), ValidatorIndex(4)] public_keys = [] - + for vid in participants: sig = key_manager.sign_attestation_data(vid, attestation_data) signatures.append(sig) @@ -804,10 +803,7 @@ def test_block_includes_pending_attestations( epoch=attestation_data.slot, ) - aggregated_payloads = { - SignatureKey(vid, data_root): [proof] - for vid in participants - } + aggregated_payloads = {SignatureKey(vid, data_root): [proof] for vid in participants} # Update store with pending attestations and aggregated payloads updated_store = store.model_copy( From 22bd960dd0577b4290665383d43fb61afc64231a Mon Sep 17 00:00:00 2001 From: kamilsa Date: Tue, 27 Jan 2026 18:30:44 +0500 Subject: [PATCH 35/39] Small fixes --- docs/client/networking.md | 8 ++++---- docs/client/validator.md | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/client/networking.md b/docs/client/networking.md index 137e132e..3db1c00c 100644 --- a/docs/client/networking.md +++ b/docs/client/networking.md @@ -68,10 +68,10 @@ Snappy-compressed message, which type is identified by the topic: | Topic Name | Message Type | Encoding | |------------------------------------------------------------|-----------------------------|--------------| -| /lean/consensus/devnet3/blocks/ssz_snappy | SignedBlockWithAttestation | SSZ + Snappy | -| /lean/consensus/devnet3/attestations/ssz_snappy | SignedAttestation | SSZ + Snappy | -| /lean/consensus/devnet3/attestation\_{subnet_id}/ssz_snappy | SignedAttestation | SSZ + Snappy | -| /lean/consensus/devnet3/aggregation/ssz_snappy | SignedAggregatedAttestation | SSZ + Snappy | +| /leanconsensus/devnet3/blocks/ssz_snappy | SignedBlockWithAttestation | SSZ + Snappy | +| /leanconsensus/devnet3/attestations/ssz_snappy | SignedAttestation | SSZ + Snappy | +| /leanconsensus/devnet3/attestation\_{subnet_id}/ssz_snappy | SignedAttestation | SSZ + Snappy | +| /leanconsensus/devnet3/aggregation/ssz_snappy | SignedAggregatedAttestation | SSZ + Snappy | ### Message Types diff --git a/docs/client/validator.md b/docs/client/validator.md index 305140e2..ab68f10d 100644 --- a/docs/client/validator.md +++ b/docs/client/validator.md @@ -3,7 +3,7 @@ ## Overview Validators participate in consensus by proposing blocks and producing attestations. -Optionally validators can opt-in to behave as aggregators in their committee . +Optionally validators can opt-in to behave as aggregators in their committee. This document describes what honest validators do. ## Validator Assignment From 7cf9773e5f96a9c049fb8d62314b41971c508daf Mon Sep 17 00:00:00 2001 From: kamilsa Date: Tue, 27 Jan 2026 19:27:45 +0500 Subject: [PATCH 36/39] Fix ci: refactor attestation handling for block construction --- .../test_fixtures/fork_choice.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py index 1db15330..6b4949ec 100644 --- a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py +++ b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py @@ -371,11 +371,14 @@ def _build_block_from_spec( gossip_signatures = dict(store.gossip_signatures) gossip_signatures.update(attestation_signatures) - # Collect attestations from the store if requested. + # Prepare attestations for block construction. # - # Previous proposers' attestations become available for inclusion. - # This makes test vectors more realistic. - available_attestations: list[Attestation] | None = None + # Two sources of attestations: + # 1. Explicit attestations from the spec (always included) + # 2. Store attestations (only if include_store_attestations is True) + # + # When both are present, they are merged during block construction. + available_attestations: list[Attestation] known_block_roots: set[Bytes32] | None = None if spec.include_store_attestations: @@ -388,7 +391,12 @@ def _build_block_from_spec( Attestation(validator_id=vid, data=data) for vid, data in store.latest_new_attestations.items() ) + # Add explicit attestations from the spec + available_attestations.extend(attestations) known_block_roots = set(store.blocks.keys()) + else: + # Use only explicit attestations from the spec + available_attestations = attestations # Build the block using spec logic # From 6e853567d6bcca7da90d3056bb311a9b39633736 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Tue, 27 Jan 2026 19:41:35 +0500 Subject: [PATCH 37/39] Fix ci --- .../test_fixtures/fork_choice.py | 70 ++++++++++++++++--- 1 file changed, 62 insertions(+), 8 deletions(-) diff --git a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py index 6b4949ec..89d79918 100644 --- a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py +++ b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py @@ -266,6 +266,7 @@ def make_fixture(self) -> Self: # This validates, applies state transition, and updates head. store = store.on_block( signed_block, + current_validator=DEFAULT_VALIDATOR_ID, scheme=LEAN_ENV_TO_SCHEMES[self.lean_env], ) @@ -371,32 +372,85 @@ def _build_block_from_spec( gossip_signatures = dict(store.gossip_signatures) gossip_signatures.update(attestation_signatures) - # Prepare attestations for block construction. + # Prepare attestations and aggregated payloads for block construction. # # Two sources of attestations: # 1. Explicit attestations from the spec (always included) # 2. Store attestations (only if include_store_attestations is True) # - # When both are present, they are merged during block construction. + # For all attestations, we need to create aggregated proofs + # so build_block can include them in the block body. + # Attestations with the same data should be merged into a single proof. available_attestations: list[Attestation] known_block_roots: set[Bytes32] | None = None - + + # Create aggregated payloads from explicit attestations + # Group attestations by data to create one proof per group + from lean_spec.subspecs.containers.block.types import AggregatedAttestation + from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof, SignatureKey + from lean_spec.subspecs.containers.attestation import AggregationBits + + aggregated_payloads = dict(store.aggregated_payloads) if store.aggregated_payloads else {} + + # Collect all attestations that need aggregated proofs + all_attestations_for_proofs: list[Attestation] = list(attestations) + if spec.include_store_attestations: # Gather all attestations: both active and recently received. - available_attestations = [ + store_attestations = [ Attestation(validator_id=vid, data=data) for vid, data in store.latest_known_attestations.items() ] - available_attestations.extend( + store_attestations.extend( Attestation(validator_id=vid, data=data) for vid, data in store.latest_new_attestations.items() ) - # Add explicit attestations from the spec - available_attestations.extend(attestations) + + # Add store attestations to the list for proof creation + all_attestations_for_proofs.extend(store_attestations) + + # Combine for block construction + available_attestations = store_attestations + attestations known_block_roots = set(store.blocks.keys()) else: # Use only explicit attestations from the spec available_attestations = attestations + + # Create aggregated proofs for all attestations (merged by data) + # This ensures attestations with the same data are aggregated together + for agg_att in AggregatedAttestation.aggregate_by_data(all_attestations_for_proofs): + validator_ids = list(agg_att.aggregation_bits.to_validator_indices()) + message = agg_att.data.data_root_bytes() + epoch = agg_att.data.slot + + # Check if we have signatures for all validators + all_sigs_available = all( + SignatureKey(vid, message) in gossip_signatures + for vid in validator_ids + ) + + if all_sigs_available: + # Collect public keys and signatures for these validators + public_keys = [key_manager.get_public_key(vid) for vid in validator_ids] + signatures = [gossip_signatures[SignatureKey(vid, message)] for vid in validator_ids] + + # Create aggregated proof + participants = AggregationBits.from_validator_indices(validator_ids) + proof = AggregatedSignatureProof.aggregate( + participants=participants, + public_keys=public_keys, + signatures=signatures, + message=message, + epoch=epoch, + ) + + # Add to aggregated_payloads for each validator + for vid in validator_ids: + sig_key = SignatureKey(vid, message) + if sig_key not in aggregated_payloads: + aggregated_payloads[sig_key] = [] + # Insert at the beginning (most recent) + aggregated_payloads[sig_key].insert(0, proof) # Build the block using spec logic # @@ -410,7 +464,7 @@ def _build_block_from_spec( attestations=available_attestations, available_attestations=available_attestations, known_block_roots=known_block_roots, - aggregated_payloads=store.aggregated_payloads, + aggregated_payloads=aggregated_payloads, ) # Create proposer attestation From da211843ac2f6b1abf398ee3d536578b9398d238 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Tue, 27 Jan 2026 20:17:23 +0500 Subject: [PATCH 38/39] Fix ci --- .../test_fixtures/fork_choice.py | 31 ++++++++++--------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py index 89d79918..be121bad 100644 --- a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py +++ b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py @@ -383,18 +383,18 @@ def _build_block_from_spec( # Attestations with the same data should be merged into a single proof. available_attestations: list[Attestation] known_block_roots: set[Bytes32] | None = None - + # Create aggregated payloads from explicit attestations # Group attestations by data to create one proof per group + from lean_spec.subspecs.containers.attestation import AggregationBits from lean_spec.subspecs.containers.block.types import AggregatedAttestation from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof, SignatureKey - from lean_spec.subspecs.containers.attestation import AggregationBits - + aggregated_payloads = dict(store.aggregated_payloads) if store.aggregated_payloads else {} - + # Collect all attestations that need aggregated proofs all_attestations_for_proofs: list[Attestation] = list(attestations) - + if spec.include_store_attestations: # Gather all attestations: both active and recently received. store_attestations = [ @@ -405,35 +405,36 @@ def _build_block_from_spec( Attestation(validator_id=vid, data=data) for vid, data in store.latest_new_attestations.items() ) - + # Add store attestations to the list for proof creation all_attestations_for_proofs.extend(store_attestations) - + # Combine for block construction available_attestations = store_attestations + attestations known_block_roots = set(store.blocks.keys()) else: # Use only explicit attestations from the spec available_attestations = attestations - + # Create aggregated proofs for all attestations (merged by data) # This ensures attestations with the same data are aggregated together for agg_att in AggregatedAttestation.aggregate_by_data(all_attestations_for_proofs): validator_ids = list(agg_att.aggregation_bits.to_validator_indices()) message = agg_att.data.data_root_bytes() epoch = agg_att.data.slot - + # Check if we have signatures for all validators all_sigs_available = all( - SignatureKey(vid, message) in gossip_signatures - for vid in validator_ids + SignatureKey(vid, message) in gossip_signatures for vid in validator_ids ) - + if all_sigs_available: # Collect public keys and signatures for these validators public_keys = [key_manager.get_public_key(vid) for vid in validator_ids] - signatures = [gossip_signatures[SignatureKey(vid, message)] for vid in validator_ids] - + signatures = [ + gossip_signatures[SignatureKey(vid, message)] for vid in validator_ids + ] + # Create aggregated proof participants = AggregationBits.from_validator_indices(validator_ids) proof = AggregatedSignatureProof.aggregate( @@ -443,7 +444,7 @@ def _build_block_from_spec( message=message, epoch=epoch, ) - + # Add to aggregated_payloads for each validator for vid in validator_ids: sig_key = SignatureKey(vid, message) From 6ad7b19522a147dc32a7e2cdf75b2e6c721172a1 Mon Sep 17 00:00:00 2001 From: kamilsa Date: Wed, 28 Jan 2026 14:39:58 +0500 Subject: [PATCH 39/39] Refactor attestation handling to support committee signature aggregation --- .../test_fixtures/fork_choice.py | 98 +++++++++---------- 1 file changed, 46 insertions(+), 52 deletions(-) diff --git a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py index be121bad..4239c962 100644 --- a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py +++ b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py @@ -15,6 +15,7 @@ from lean_spec.subspecs.containers.attestation import ( Attestation, AttestationData, + SignedAttestation, ) from lean_spec.subspecs.containers.block import ( Block, @@ -363,14 +364,32 @@ def _build_block_from_spec( # # Attestations vote for blocks and influence fork choice weight. # The spec may include attestations to include in this block. - attestations, attestation_signatures = self._build_attestations_from_spec( - spec, store, block_registry, parent_root, key_manager + attestations, attestation_signatures, valid_signature_keys = ( + self._build_attestations_from_spec( + spec, store, block_registry, parent_root, key_manager + ) ) - # Merge new attestation signatures with existing gossip signatures. - # These are needed for signature aggregation later. - gossip_signatures = dict(store.gossip_signatures) - gossip_signatures.update(attestation_signatures) + # Merge per-attestation signatures into the Store's gossip signature cache. + # Required so the Store can aggregate committee signatures later when building payloads. + working_store = store + for attestation in attestations: + sig_key = SignatureKey(attestation.validator_id, attestation.data.data_root_bytes()) + if sig_key not in valid_signature_keys: + continue + signature = attestation_signatures.get(sig_key) + if signature is None: + continue + signed_attestation = SignedAttestation( + validator_id=attestation.validator_id, + message=attestation.data, + signature=signature, + ) + working_store = working_store.on_gossip_attestation( + signed_attestation, + scheme=LEAN_ENV_TO_SCHEMES[self.lean_env], + is_aggregator=True, + ) # Prepare attestations and aggregated payloads for block construction. # @@ -384,12 +403,6 @@ def _build_block_from_spec( available_attestations: list[Attestation] known_block_roots: set[Bytes32] | None = None - # Create aggregated payloads from explicit attestations - # Group attestations by data to create one proof per group - from lean_spec.subspecs.containers.attestation import AggregationBits - from lean_spec.subspecs.containers.block.types import AggregatedAttestation - from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof, SignatureKey - aggregated_payloads = dict(store.aggregated_payloads) if store.aggregated_payloads else {} # Collect all attestations that need aggregated proofs @@ -416,42 +429,20 @@ def _build_block_from_spec( # Use only explicit attestations from the spec available_attestations = attestations - # Create aggregated proofs for all attestations (merged by data) - # This ensures attestations with the same data are aggregated together - for agg_att in AggregatedAttestation.aggregate_by_data(all_attestations_for_proofs): - validator_ids = list(agg_att.aggregation_bits.to_validator_indices()) - message = agg_att.data.data_root_bytes() - epoch = agg_att.data.slot - - # Check if we have signatures for all validators - all_sigs_available = all( - SignatureKey(vid, message) in gossip_signatures for vid in validator_ids - ) - - if all_sigs_available: - # Collect public keys and signatures for these validators - public_keys = [key_manager.get_public_key(vid) for vid in validator_ids] - signatures = [ - gossip_signatures[SignatureKey(vid, message)] for vid in validator_ids - ] - - # Create aggregated proof - participants = AggregationBits.from_validator_indices(validator_ids) - proof = AggregatedSignatureProof.aggregate( - participants=participants, - public_keys=public_keys, - signatures=signatures, - message=message, - epoch=epoch, - ) - - # Add to aggregated_payloads for each validator - for vid in validator_ids: - sig_key = SignatureKey(vid, message) - if sig_key not in aggregated_payloads: - aggregated_payloads[sig_key] = [] - # Insert at the beginning (most recent) - aggregated_payloads[sig_key].insert(0, proof) + # Build aggregated proofs via Store aggregation logic. + attestation_map = { + attestation.validator_id: attestation.data + for attestation in all_attestations_for_proofs + } + aggregation_store = working_store.model_copy( + update={ + "head": parent_root, + "latest_new_attestations": attestation_map, + "aggregated_payloads": aggregated_payloads, + } + ) + aggregation_store = aggregation_store.aggregate_committee_signatures() + aggregated_payloads = aggregation_store.aggregated_payloads # Build the block using spec logic # @@ -573,7 +564,7 @@ def _build_attestations_from_spec( block_registry: dict[str, Block], parent_root: Bytes32, key_manager: XmssKeyManager, - ) -> tuple[list[Attestation], dict[SignatureKey, Signature]]: + ) -> tuple[list[Attestation], dict[SignatureKey, Signature], set[SignatureKey]]: """ Build attestations and signatures from block specification. @@ -589,15 +580,16 @@ def _build_attestations_from_spec( key_manager: Key manager for signing. Returns: - Tuple of (attestations list, signature lookup dict). + Tuple of (attestations list, signature lookup dict, valid signature keys). """ # No attestations specified means empty block body. if spec.attestations is None: - return [], {} + return [], {}, set() parent_state = store.states[parent_root] attestations = [] signature_lookup: dict[SignatureKey, Signature] = {} + valid_signature_keys: set[SignatureKey] = set() for aggregated_spec in spec.attestations: # Build attestation data once. @@ -635,8 +627,10 @@ def _build_attestations_from_spec( # This enables lookup during signature aggregation. sig_key = SignatureKey(validator_id, attestation_data.data_root_bytes()) signature_lookup[sig_key] = signature + if aggregated_spec.valid_signature: + valid_signature_keys.add(sig_key) - return attestations, signature_lookup + return attestations, signature_lookup, valid_signature_keys def _build_attestation_data_from_spec( self,