hotshot_testing/byzantine/
byzantine_behaviour.rs

1use std::{
2    collections::{BTreeMap, HashMap, HashSet},
3    iter::once,
4    sync::Arc,
5};
6
7use anyhow::Context;
8use async_lock::RwLock;
9use async_trait::async_trait;
10use hotshot::{
11    tasks::EventTransformerState,
12    types::{SignatureKey, SystemContextHandle},
13};
14use hotshot_task_impls::{
15    events::HotShotEvent,
16    network::{
17        NetworkEventTaskState,
18        test::{ModifierClosure, NetworkEventTaskStateModifier},
19    },
20};
21use hotshot_types::{
22    consensus::OuterConsensus,
23    data::{EpochNumber, QuorumProposalWrapper, ViewNumber},
24    epoch_membership::EpochMembershipCoordinator,
25    message::{
26        GeneralConsensusMessage, Message, MessageKind, Proposal, SequencingMessage, UpgradeLock,
27        convert_proposal,
28    },
29    simple_vote::{
30        HasEpoch, QuorumVote2, ViewSyncPreCommitData, ViewSyncPreCommitData2,
31        ViewSyncPreCommitVote, ViewSyncPreCommitVote2,
32    },
33    traits::{
34        election::Membership,
35        network::ConnectedNetwork,
36        node_implementation::{NodeImplementation, NodeType},
37    },
38    vote::HasViewNumber,
39};
40
41#[derive(Debug)]
42/// An `EventTransformerState` that multiplies `QuorumProposalSend` events, incrementing the view number of the proposal
43pub struct BadProposalViewDos {
44    /// The number of times to duplicate a `QuorumProposalSend` event
45    pub multiplier: u64,
46    /// The view number increment each time it's duplicatedjust
47    pub increment: u64,
48}
49
50#[async_trait]
51impl<TYPES: NodeType, I: NodeImplementation<TYPES>> EventTransformerState<TYPES, I>
52    for BadProposalViewDos
53{
54    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
55        vec![event.clone()]
56    }
57
58    async fn send_handler(
59        &mut self,
60        event: &HotShotEvent<TYPES>,
61        _public_key: &TYPES::SignatureKey,
62        _private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
63        _upgrade_lock: &UpgradeLock<TYPES>,
64        consensus: OuterConsensus<TYPES>,
65        _membership_coordinator: EpochMembershipCoordinator<TYPES>,
66        _network: Arc<I::Network>,
67    ) -> Vec<HotShotEvent<TYPES>> {
68        match event {
69            HotShotEvent::QuorumProposalSend(proposal, signature) => {
70                let mut result = Vec::new();
71
72                for n in 1..self.multiplier {
73                    let mut modified_proposal = proposal.clone();
74
75                    modified_proposal.data.proposal.view_number += n * self.increment;
76
77                    result.push(HotShotEvent::QuorumProposalSend(
78                        modified_proposal,
79                        signature.clone(),
80                    ));
81                }
82
83                consensus.write().await.reset_actions();
84                result
85            },
86            _ => vec![event.clone()],
87        }
88    }
89}
90
91#[derive(Debug)]
92/// An `EventHandlerState` that doubles the `QuorumVoteSend` and `QuorumProposalSend` events
93pub struct DoubleProposeVote;
94
95#[async_trait]
96impl<TYPES: NodeType, I: NodeImplementation<TYPES>> EventTransformerState<TYPES, I>
97    for DoubleProposeVote
98{
99    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
100        vec![event.clone()]
101    }
102
103    async fn send_handler(
104        &mut self,
105        event: &HotShotEvent<TYPES>,
106        _public_key: &TYPES::SignatureKey,
107        _private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
108        _upgrade_lock: &UpgradeLock<TYPES>,
109        _consensus: OuterConsensus<TYPES>,
110        _membership_coordinator: EpochMembershipCoordinator<TYPES>,
111        _network: Arc<I::Network>,
112    ) -> Vec<HotShotEvent<TYPES>> {
113        match event {
114            HotShotEvent::QuorumProposalSend(..) | HotShotEvent::QuorumVoteSend(_) => {
115                vec![event.clone(), event.clone()]
116            },
117            _ => vec![event.clone()],
118        }
119    }
120}
121
122#[derive(Debug)]
123/// An `EventHandlerState` that modifies justify_qc on `QuorumProposalSend` to that of a previous view to mock dishonest leader
124pub struct DishonestLeader<TYPES: NodeType> {
125    /// Store events from previous views
126    pub validated_proposals: Vec<QuorumProposalWrapper<TYPES>>,
127    /// How many times current node has been elected leader and sent proposal
128    pub total_proposals_from_node: u64,
129    /// Which proposals to be dishonest at
130    pub dishonest_at_proposal_numbers: HashSet<u64>,
131    /// How far back to look for a QC
132    pub view_look_back: usize,
133    /// Shared state of all view numbers we send bad proposal at
134    pub dishonest_proposal_view_numbers: Arc<RwLock<HashSet<ViewNumber>>>,
135}
136
137/// Add method that will handle `QuorumProposalSend` events
138/// If we have previous proposals stored and the total_proposals_from_node matches a value specified in dishonest_at_proposal_numbers
139/// Then send out the event with the modified proposal that has an older QC
140impl<TYPES: NodeType> DishonestLeader<TYPES> {
141    /// When a leader is sending a proposal this method will mock a dishonest leader
142    /// We accomplish this by looking back a number of specified views and using that cached proposals QC
143    async fn handle_proposal_send_event(
144        &self,
145        event: &HotShotEvent<TYPES>,
146        proposal: &Proposal<TYPES, QuorumProposalWrapper<TYPES>>,
147        sender: &TYPES::SignatureKey,
148    ) -> HotShotEvent<TYPES> {
149        let length = self.validated_proposals.len();
150        if !self
151            .dishonest_at_proposal_numbers
152            .contains(&self.total_proposals_from_node)
153            || length == 0
154        {
155            return event.clone();
156        }
157
158        // Grab proposal from specified view look back
159        let proposal_from_look_back = if length - 1 < self.view_look_back {
160            // If look back is too far just take the first proposal
161            self.validated_proposals[0].clone()
162        } else {
163            let index = (self.validated_proposals.len() - 1) - self.view_look_back;
164            self.validated_proposals[index].clone()
165        };
166
167        // Create a dishonest proposal by using the old proposals qc
168        let mut dishonest_proposal = proposal.clone();
169        dishonest_proposal.data.proposal.justify_qc = proposal_from_look_back.proposal.justify_qc;
170
171        // Save the view we sent the dishonest proposal on (used for coordination attacks with other byzantine replicas)
172        let mut dishonest_proposal_sent = self.dishonest_proposal_view_numbers.write().await;
173        dishonest_proposal_sent.insert(proposal.data.view_number());
174
175        HotShotEvent::QuorumProposalSend(dishonest_proposal, sender.clone())
176    }
177}
178
179#[async_trait]
180impl<TYPES: NodeType, I: NodeImplementation<TYPES> + std::fmt::Debug>
181    EventTransformerState<TYPES, I> for DishonestLeader<TYPES>
182{
183    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
184        vec![event.clone()]
185    }
186
187    async fn send_handler(
188        &mut self,
189        event: &HotShotEvent<TYPES>,
190        _public_key: &TYPES::SignatureKey,
191        _private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
192        _upgrade_lock: &UpgradeLock<TYPES>,
193        _consensus: OuterConsensus<TYPES>,
194        _membership_coordinator: EpochMembershipCoordinator<TYPES>,
195        _network: Arc<I::Network>,
196    ) -> Vec<HotShotEvent<TYPES>> {
197        match event {
198            HotShotEvent::QuorumProposalSend(proposal, sender) => {
199                self.total_proposals_from_node += 1;
200                return vec![
201                    self.handle_proposal_send_event(event, proposal, sender)
202                        .await,
203                ];
204            },
205            HotShotEvent::QuorumProposalValidated(proposal, _) => {
206                self.validated_proposals.push(proposal.data.clone());
207            },
208            _ => {},
209        }
210        vec![event.clone()]
211    }
212}
213
214#[derive(Debug)]
215/// An `EventHandlerState` that modifies view number on the certificate of `DacSend` event to that of a future view
216pub struct DishonestDa {
217    /// How many times current node has been elected leader and sent Da Cert
218    pub total_da_certs_sent_from_node: u64,
219    /// Which proposals to be dishonest at
220    pub dishonest_at_da_cert_sent_numbers: HashSet<u64>,
221    /// When leader how many times we will send DacSend and increment view number
222    pub total_views_add_to_cert: u64,
223}
224
225#[async_trait]
226impl<TYPES: NodeType, I: NodeImplementation<TYPES> + std::fmt::Debug>
227    EventTransformerState<TYPES, I> for DishonestDa
228{
229    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
230        vec![event.clone()]
231    }
232
233    async fn send_handler(
234        &mut self,
235        event: &HotShotEvent<TYPES>,
236        _public_key: &TYPES::SignatureKey,
237        _private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
238        _upgrade_lock: &UpgradeLock<TYPES>,
239        _consensus: OuterConsensus<TYPES>,
240        _membership_coordinator: EpochMembershipCoordinator<TYPES>,
241        _network: Arc<I::Network>,
242    ) -> Vec<HotShotEvent<TYPES>> {
243        if let HotShotEvent::DacSend(cert, sender) = event {
244            self.total_da_certs_sent_from_node += 1;
245            if self
246                .dishonest_at_da_cert_sent_numbers
247                .contains(&self.total_da_certs_sent_from_node)
248            {
249                let mut result = vec![HotShotEvent::DacSend(cert.clone(), sender.clone())];
250                for i in 1..=self.total_views_add_to_cert {
251                    let mut bad_cert = cert.clone();
252                    bad_cert.view_number = cert.view_number + i;
253                    result.push(HotShotEvent::DacSend(bad_cert, sender.clone()));
254                }
255                return result;
256            }
257        }
258        vec![event.clone()]
259    }
260}
261
262/// View delay configuration
263#[derive(Debug)]
264pub struct ViewDelay<TYPES: NodeType> {
265    /// How many views the node will be delayed
266    pub number_of_views_to_delay: u64,
267    /// A map that is from view number to vector of events
268    pub events_for_view: HashMap<ViewNumber, Vec<HotShotEvent<TYPES>>>,
269    /// Specify which view number to stop delaying
270    pub stop_view_delay_at_view_number: u64,
271}
272
273#[async_trait]
274impl<TYPES: NodeType, I: NodeImplementation<TYPES> + std::fmt::Debug>
275    EventTransformerState<TYPES, I> for ViewDelay<TYPES>
276{
277    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
278        let correct_event = vec![event.clone()];
279        if let Some(view_number) = event.view_number() {
280            if *view_number >= self.stop_view_delay_at_view_number {
281                return correct_event;
282            }
283
284            // add current view or push event to the map if view number has been added
285            let events_for_current_view = self.events_for_view.entry(view_number).or_default();
286            events_for_current_view.push(event.clone());
287
288            // ensure we are actually able to lookback enough views
289            let view_diff = (*view_number).saturating_sub(self.number_of_views_to_delay);
290            if view_diff > 0 {
291                return match self.events_for_view.remove(&ViewNumber::new(view_diff)) {
292                    Some(lookback_events) => lookback_events.clone(),
293                    // we have already return all received events for this view
294                    None => vec![],
295                };
296            }
297        }
298
299        correct_event
300    }
301
302    async fn send_handler(
303        &mut self,
304        event: &HotShotEvent<TYPES>,
305        _public_key: &TYPES::SignatureKey,
306        _private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
307        _upgrade_lock: &UpgradeLock<TYPES>,
308        _consensus: OuterConsensus<TYPES>,
309        _membership_coordinator: EpochMembershipCoordinator<TYPES>,
310        _network: Arc<I::Network>,
311    ) -> Vec<HotShotEvent<TYPES>> {
312        vec![event.clone()]
313    }
314}
315
316/// An `EventHandlerState` that modifies view number on the vote of `QuorumVoteSend` event to that of a future view and correctly signs the vote
317pub struct DishonestVoting<TYPES: NodeType> {
318    /// Number added to the original vote's view number
319    pub view_increment: u64,
320    /// A function passed to `NetworkEventTaskStateModifier` to modify `NetworkEventTaskState` behaviour.
321    pub modifier: Arc<ModifierClosure<TYPES>>,
322}
323
324#[async_trait]
325impl<TYPES: NodeType, I: NodeImplementation<TYPES> + std::fmt::Debug>
326    EventTransformerState<TYPES, I> for DishonestVoting<TYPES>
327{
328    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
329        vec![event.clone()]
330    }
331
332    async fn send_handler(
333        &mut self,
334        event: &HotShotEvent<TYPES>,
335        public_key: &TYPES::SignatureKey,
336        private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
337        upgrade_lock: &UpgradeLock<TYPES>,
338        _consensus: OuterConsensus<TYPES>,
339        _membership_coordinator: EpochMembershipCoordinator<TYPES>,
340        _network: Arc<I::Network>,
341    ) -> Vec<HotShotEvent<TYPES>> {
342        if let HotShotEvent::QuorumVoteSend(vote) = event {
343            let new_view = vote.view_number + self.view_increment;
344            let spoofed_vote = QuorumVote2::<TYPES>::create_signed_vote(
345                vote.data.clone(),
346                new_view,
347                public_key,
348                private_key,
349                upgrade_lock,
350            )
351            .await
352            .context("Failed to sign vote")
353            .unwrap();
354            tracing::debug!("Sending Quorum Vote for view: {new_view:?}");
355            return vec![HotShotEvent::QuorumVoteSend(spoofed_vote)];
356        }
357        vec![event.clone()]
358    }
359
360    fn add_network_event_task(
361        &self,
362        handle: &mut SystemContextHandle<TYPES, I>,
363        network: Arc<<I as NodeImplementation<TYPES>>::Network>,
364    ) {
365        let network_state: NetworkEventTaskState<_, _, _> = NetworkEventTaskState {
366            network,
367            view: ViewNumber::genesis(),
368            epoch: None,
369            membership_coordinator: handle.membership_coordinator.clone(),
370            storage: handle.storage(),
371            storage_metrics: handle.storage_metrics(),
372            consensus: OuterConsensus::new(handle.consensus()),
373            upgrade_lock: handle.hotshot.upgrade_lock.clone(),
374            transmit_tasks: BTreeMap::new(),
375            epoch_height: handle.epoch_height,
376            id: handle.hotshot.id,
377        };
378        let modified_network_state = NetworkEventTaskStateModifier {
379            network_event_task_state: network_state,
380            modifier: Arc::clone(&self.modifier),
381        };
382        handle.add_task(modified_network_state);
383    }
384}
385
386impl<TYPES: NodeType> std::fmt::Debug for DishonestVoting<TYPES> {
387    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
388        f.debug_struct("DishonestVoting")
389            .field("view_increment", &self.view_increment)
390            .finish_non_exhaustive()
391    }
392}
393
394#[derive(Debug)]
395/// An `EventHandlerState` that will send a vote for a bad proposal
396pub struct DishonestVoter<TYPES: NodeType> {
397    /// Collect all votes the node sends
398    pub votes_sent: Vec<QuorumVote2<TYPES>>,
399    /// Shared state with views numbers that leaders were dishonest at
400    pub dishonest_proposal_view_numbers: Arc<RwLock<HashSet<ViewNumber>>>,
401}
402
403#[async_trait]
404impl<TYPES: NodeType, I: NodeImplementation<TYPES> + std::fmt::Debug>
405    EventTransformerState<TYPES, I> for DishonestVoter<TYPES>
406{
407    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
408        vec![event.clone()]
409    }
410
411    async fn send_handler(
412        &mut self,
413        event: &HotShotEvent<TYPES>,
414        public_key: &TYPES::SignatureKey,
415        private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
416        upgrade_lock: &UpgradeLock<TYPES>,
417        _consensus: OuterConsensus<TYPES>,
418        _membership_coordinator: EpochMembershipCoordinator<TYPES>,
419        _network: Arc<I::Network>,
420    ) -> Vec<HotShotEvent<TYPES>> {
421        match event {
422            HotShotEvent::QuorumProposalRecv(proposal, _sender) => {
423                // Check if view is a dishonest proposal, if true send a vote
424                let dishonest_proposals = self.dishonest_proposal_view_numbers.read().await;
425                if dishonest_proposals.contains(&proposal.data.view_number()) {
426                    // Create a vote using data from most recent vote and the current event number
427                    // We wont update internal consensus state for this Byzantine replica but we are at least
428                    // Going to send a vote to the next honest leader
429                    let vote = QuorumVote2::<TYPES>::create_signed_vote(
430                        self.votes_sent.last().unwrap().data.clone(),
431                        event.view_number().unwrap(),
432                        public_key,
433                        private_key,
434                        upgrade_lock,
435                    )
436                    .await
437                    .context("Failed to sign vote")
438                    .unwrap();
439                    return vec![HotShotEvent::QuorumVoteSend(vote)];
440                }
441            },
442            HotShotEvent::TimeoutVoteSend(vote) => {
443                // Check if this view was a dishonest proposal view, if true dont send timeout
444                let dishonest_proposals = self.dishonest_proposal_view_numbers.read().await;
445                if dishonest_proposals.contains(&vote.view_number) {
446                    // We craft the vote upon `QuorumProposalRecv` and send out a vote.
447                    // So, dont send the timeout to the next leader from this byzantine replica
448                    return vec![];
449                }
450            },
451            HotShotEvent::QuorumVoteSend(vote) => {
452                self.votes_sent.push(vote.clone());
453            },
454            _ => {},
455        }
456        vec![event.clone()]
457    }
458}
459
460/// Implements a byzantine behaviour which aims at splitting the honest nodes during view sync protocol
461/// so that the honest nodes cannot view sync on their own.
462///
463/// Requirement: The scenario requires at least 4 dishonest nodes so total number of nodes need to be
464/// at least 13.
465///
466/// Scenario:
467/// 1. The first dishonest leader sends a proposal to only f + 1 honest nodes and f dishonest nodes
468/// 2. The second dishonest leader sends a proposal to only f + 1 honest nodes.
469/// 3. All dishonest nodes do not send timeout votes.
470/// 4. The first dishonest relay sends a correctly formed precommit certificate to f + 1 honest nodes
471///    and f dishonest nodes.
472/// 5. The first dishonest relay sends a correctly formed commit certificate to only one honest node.
473/// 6. The second dishonest relay behaves in the same way as the first dishonest relay.
474#[derive(Debug)]
475pub struct DishonestViewSyncRelay {
476    pub dishonest_proposal_view_numbers: Vec<u64>,
477    pub dishonest_vote_view_numbers: Vec<u64>,
478    pub first_f_honest_nodes: Vec<u64>,
479    pub second_f_honest_nodes: Vec<u64>,
480    pub one_honest_node: u64,
481    pub f_dishonest_nodes: Vec<u64>,
482}
483
484#[async_trait]
485impl<TYPES: NodeType, I: NodeImplementation<TYPES>> EventTransformerState<TYPES, I>
486    for DishonestViewSyncRelay
487{
488    async fn send_handler(
489        &mut self,
490        event: &HotShotEvent<TYPES>,
491        _public_key: &TYPES::SignatureKey,
492        _private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
493        upgrade_lock: &UpgradeLock<TYPES>,
494        _consensus: OuterConsensus<TYPES>,
495        membership_coordinator: EpochMembershipCoordinator<TYPES>,
496        network: Arc<I::Network>,
497    ) -> Vec<HotShotEvent<TYPES>> {
498        match event {
499            HotShotEvent::QuorumProposalSend(proposal, sender) => {
500                let view_number = proposal.data.view_number();
501                if !self.dishonest_proposal_view_numbers.contains(&view_number) {
502                    return vec![event.clone()];
503                }
504                let message_kind = if upgrade_lock.epochs_enabled(view_number) {
505                    MessageKind::<TYPES>::from_consensus_message(SequencingMessage::General(
506                        GeneralConsensusMessage::Proposal2(convert_proposal(proposal.clone())),
507                    ))
508                } else {
509                    MessageKind::<TYPES>::from_consensus_message(SequencingMessage::General(
510                        GeneralConsensusMessage::Proposal(convert_proposal(proposal.clone())),
511                    ))
512                };
513                let message = Message {
514                    sender: sender.clone(),
515                    kind: message_kind,
516                };
517                let serialized_message = match upgrade_lock.serialize(&message) {
518                    Ok(serialized) => serialized,
519                    Err(e) => {
520                        panic!("Failed to serialize message: {e}");
521                    },
522                };
523                let second_f_honest_it = self.second_f_honest_nodes.iter();
524                let f_dishonest_it = self.f_dishonest_nodes.iter();
525                let one_honest_it = once(&self.one_honest_node);
526                let chained_it: Box<dyn Iterator<Item = &u64> + Send> =
527                    if &*view_number == self.dishonest_proposal_view_numbers.first().unwrap() {
528                        // The first dishonest proposal is sent to f + 1 honest nodes and f dishonest nodes
529                        Box::new(second_f_honest_it.chain(one_honest_it.chain(f_dishonest_it)))
530                    } else {
531                        // All other dishonest proposals are sent to f + 1 honest nodes
532                        Box::new(second_f_honest_it.chain(one_honest_it))
533                    };
534                for node_id in chained_it {
535                    let dummy_view = ViewNumber::new(*node_id);
536                    let Ok(node) = membership_coordinator
537                        .membership()
538                        .read()
539                        .await
540                        .leader(dummy_view, proposal.data.epoch())
541                    else {
542                        panic!(
543                            "Failed to find leader for view {} and epoch {:?}",
544                            dummy_view,
545                            proposal.data.epoch()
546                        );
547                    };
548                    let transmit_result = network
549                        .direct_message(
550                            view_number.u64().into(),
551                            serialized_message.clone(),
552                            node.clone(),
553                        )
554                        .await;
555                    match transmit_result {
556                        Ok(()) => tracing::info!(
557                            "Sent proposal for view {} to node {}",
558                            proposal.data.view_number(),
559                            node_id
560                        ),
561                        Err(e) => panic!("Failed to send message task: {e:?}"),
562                    }
563                }
564                vec![]
565            },
566            HotShotEvent::QuorumVoteSend(vote) => {
567                if !self.dishonest_vote_view_numbers.contains(&vote.view_number) {
568                    return vec![event.clone()];
569                }
570                vec![]
571            },
572            HotShotEvent::TimeoutVoteSend(vote) => {
573                if !self.dishonest_vote_view_numbers.contains(&vote.view_number) {
574                    return vec![event.clone()];
575                }
576                vec![]
577            },
578            HotShotEvent::ViewSyncPreCommitVoteSend(vote) => {
579                if !self.dishonest_vote_view_numbers.contains(&vote.view_number) {
580                    return vec![event.clone()];
581                }
582                vec![]
583            },
584            HotShotEvent::ViewSyncPreCommitCertificateSend(certificate, sender) => {
585                let view_number = certificate.data.round;
586                if !self.dishonest_proposal_view_numbers.contains(&view_number) {
587                    return vec![event.clone()];
588                }
589                let message_kind = if upgrade_lock.epochs_enabled(view_number) {
590                    MessageKind::<TYPES>::from_consensus_message(SequencingMessage::General(
591                        GeneralConsensusMessage::ViewSyncPreCommitCertificate2(certificate.clone()),
592                    ))
593                } else {
594                    MessageKind::<TYPES>::from_consensus_message(SequencingMessage::General(
595                        GeneralConsensusMessage::ViewSyncPreCommitCertificate(
596                            certificate.clone().to_vsc(),
597                        ),
598                    ))
599                };
600                let message = Message {
601                    sender: sender.clone(),
602                    kind: message_kind,
603                };
604                let serialized_message = match upgrade_lock.serialize(&message) {
605                    Ok(serialized) => serialized,
606                    Err(e) => {
607                        panic!("Failed to serialize message: {e}");
608                    },
609                };
610                let second_f_honest_it = self.second_f_honest_nodes.iter();
611                let f_dishonest_it = self.f_dishonest_nodes.iter();
612                let one_honest_it = once(&self.one_honest_node);
613                // The pre-commit certificate is sent to f + 1 honest nodes and f dishonest nodes
614                let chained_it: Box<dyn Iterator<Item = &u64> + Send> =
615                    Box::new(second_f_honest_it.chain(one_honest_it.chain(f_dishonest_it)));
616                for node_id in chained_it {
617                    let dummy_view = ViewNumber::new(*node_id);
618                    let Ok(node) = membership_coordinator
619                        .membership()
620                        .read()
621                        .await
622                        .leader(dummy_view, certificate.epoch())
623                    else {
624                        panic!(
625                            "Failed to find leader for view {} and epoch {:?}",
626                            dummy_view,
627                            certificate.epoch()
628                        );
629                    };
630                    let transmit_result = network
631                        .direct_message(
632                            view_number.u64().into(),
633                            serialized_message.clone(),
634                            node.clone(),
635                        )
636                        .await;
637                    match transmit_result {
638                        Ok(()) => tracing::info!(
639                            "Sent ViewSyncPreCommitCertificate for view {} to node {}",
640                            view_number,
641                            node_id
642                        ),
643                        Err(e) => panic!("Failed to send message task: {e:?}"),
644                    }
645                }
646                vec![]
647            },
648            HotShotEvent::ViewSyncCommitCertificateSend(certificate, sender) => {
649                let view_number = certificate.data.round;
650                if !self.dishonest_proposal_view_numbers.contains(&view_number) {
651                    return vec![event.clone()];
652                }
653                let message_kind = if upgrade_lock.epochs_enabled(view_number) {
654                    MessageKind::<TYPES>::from_consensus_message(SequencingMessage::General(
655                        GeneralConsensusMessage::ViewSyncCommitCertificate2(certificate.clone()),
656                    ))
657                } else {
658                    MessageKind::<TYPES>::from_consensus_message(SequencingMessage::General(
659                        GeneralConsensusMessage::ViewSyncCommitCertificate(
660                            certificate.clone().to_vsc(),
661                        ),
662                    ))
663                };
664                let message = Message {
665                    sender: sender.clone(),
666                    kind: message_kind,
667                };
668                let serialized_message = match upgrade_lock.serialize(&message) {
669                    Ok(serialized) => serialized,
670                    Err(e) => {
671                        panic!("Failed to serialize message: {e}");
672                    },
673                };
674                let one_honest_it = once(&self.one_honest_node);
675                // The commit certificate is sent to 1 honest node
676                let chained_it: Box<dyn Iterator<Item = &u64> + Send> = Box::new(one_honest_it);
677                for node_id in chained_it {
678                    let dummy_view = ViewNumber::new(*node_id);
679                    let Ok(node) = membership_coordinator
680                        .membership()
681                        .read()
682                        .await
683                        .leader(dummy_view, certificate.epoch())
684                    else {
685                        panic!(
686                            "Failed to find leader for view {} and epoch {:?}",
687                            dummy_view,
688                            certificate.epoch()
689                        );
690                    };
691                    let transmit_result = network
692                        .direct_message(
693                            view_number.u64().into(),
694                            serialized_message.clone(),
695                            node.clone(),
696                        )
697                        .await;
698                    match transmit_result {
699                        Ok(()) => tracing::info!(
700                            "Sent ViewSyncCommitCertificate for view {} to node {}",
701                            view_number,
702                            node_id
703                        ),
704                        Err(e) => panic!("Failed to send message task: {e:?}"),
705                    }
706                }
707                vec![]
708            },
709            _ => vec![event.clone()],
710        }
711    }
712
713    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
714        vec![event.clone()]
715    }
716}
717
718#[derive(Debug)]
719pub struct DishonestViewSyncWrongEpoch {
720    pub first_dishonest_view_number: u64,
721    pub epoch_modifier: fn(EpochNumber) -> EpochNumber,
722}
723
724#[async_trait]
725impl<TYPES: NodeType, I: NodeImplementation<TYPES>> EventTransformerState<TYPES, I>
726    for DishonestViewSyncWrongEpoch
727{
728    async fn send_handler(
729        &mut self,
730        event: &HotShotEvent<TYPES>,
731        public_key: &TYPES::SignatureKey,
732        private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
733        upgrade_lock: &UpgradeLock<TYPES>,
734        _consensus: OuterConsensus<TYPES>,
735        _membership_coordinator: EpochMembershipCoordinator<TYPES>,
736        _network: Arc<I::Network>,
737    ) -> Vec<HotShotEvent<TYPES>> {
738        match event {
739            HotShotEvent::QuorumProposalSend(proposal, _) => {
740                if self.first_dishonest_view_number > proposal.data.view_number().u64() {
741                    return vec![event.clone()];
742                }
743                vec![]
744            },
745            HotShotEvent::QuorumVoteSend(vote) => {
746                if self.first_dishonest_view_number > vote.view_number().u64() {
747                    return vec![event.clone()];
748                }
749                vec![]
750            },
751            HotShotEvent::TimeoutVoteSend(vote) => {
752                if self.first_dishonest_view_number > vote.view_number().u64() {
753                    return vec![event.clone()];
754                }
755                vec![]
756            },
757            HotShotEvent::ViewSyncPreCommitVoteSend(vote) => {
758                if self.first_dishonest_view_number > vote.view_number().u64() {
759                    return vec![event.clone()];
760                }
761                let view_number = vote.data.round;
762                let vote = if upgrade_lock.epochs_enabled(view_number) {
763                    ViewSyncPreCommitVote2::<TYPES>::create_signed_vote(
764                        ViewSyncPreCommitData2 {
765                            relay: 0,
766                            round: view_number,
767                            epoch: vote.data.epoch.map(self.epoch_modifier),
768                        },
769                        view_number,
770                        public_key,
771                        private_key,
772                        upgrade_lock,
773                    )
774                    .await
775                    .context("Failed to sign pre commit vote!")
776                    .unwrap()
777                } else {
778                    let vote = ViewSyncPreCommitVote::<TYPES>::create_signed_vote(
779                        ViewSyncPreCommitData {
780                            relay: 0,
781                            round: view_number,
782                        },
783                        view_number,
784                        public_key,
785                        private_key,
786                        upgrade_lock,
787                    )
788                    .await
789                    .context("Failed to sign pre commit vote!")
790                    .unwrap();
791                    vote.to_vote2()
792                };
793                vec![HotShotEvent::ViewSyncPreCommitVoteSend(vote)]
794            },
795            _ => vec![event.clone()],
796        }
797    }
798
799    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
800        vec![event.clone()]
801    }
802}