hotshot_types/data/
vid_disperse.rs

1// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
2// This file is part of the HotShot repository.
3
4// You should have received a copy of the MIT License
5// along with the HotShot repository. If not, see <https://mit-license.org/>.
6
7//! This module provides types for VID disperse related data structures.
8//!
9//! We have three types of VID disperse related structs:
10//!
11//! 1. `ADVZ*`: VID V0, The most original VID scheme, which has guaranteed recovery but very inefficient.
12//! 2. `AvidM*`: VID V1, the efficient VID scheme, where we use it after the epocn upgrade. It's more
13//!    efficient but doesn't guarantee recovery. A VID V1 commitment could correspond to some junk
14//!    data, there'll be a proof of incorrect encoding in this case.
15//! 3. `AvidmGf2*`: VID V2, almost the same as VID V1 but we have a much more efficient recovery
16//!    implementation.
17
18use std::{collections::BTreeMap, fmt::Debug, hash::Hash, marker::PhantomData, time::Duration};
19
20use alloy::primitives::U256;
21use hotshot_utils::anytrace::*;
22use jf_advz::{VidDisperse as JfVidDisperse, VidScheme};
23use serde::{Deserialize, Serialize};
24use tokio::{task::spawn_blocking, time::Instant};
25
26use super::ns_table::parse_ns_table;
27use crate::{
28    epoch_membership::{EpochMembership, EpochMembershipCoordinator},
29    impl_has_epoch, impl_has_none_epoch,
30    message::Proposal,
31    simple_vote::HasEpoch,
32    stake_table::HSStakeTable,
33    traits::{
34        block_contents::EncodeBytes,
35        node_implementation::NodeType,
36        signature_key::{SignatureKey, StakeTableEntryType},
37        BlockPayload,
38    },
39    vid::{
40        advz::{advz_scheme, ADVZCommitment, ADVZCommon, ADVZScheme, ADVZShare},
41        avidm::{init_avidm_param, AvidMCommitment, AvidMCommon, AvidMScheme, AvidMShare},
42        avidm_gf2::{
43            init_avidm_gf2_param, AvidmGf2Commitment, AvidmGf2Common, AvidmGf2Scheme, AvidmGf2Share,
44        },
45    },
46    vote::HasViewNumber,
47};
48
49impl_has_epoch!(
50    ADVZDisperse<TYPES>,
51    AvidMDisperse<TYPES>,
52    AvidMDisperseShare<TYPES>,
53    AvidmGf2Disperse<TYPES>,
54    AvidmGf2DisperseShare<TYPES>
55);
56
57/// ADVZ dispersal data
58#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]
59pub struct ADVZDisperse<TYPES: NodeType> {
60    /// The view number for which this VID data is intended
61    pub view_number: TYPES::View,
62    /// Epoch the data of this proposal belongs to
63    pub epoch: Option<TYPES::Epoch>,
64    /// Epoch to which the recipients of this VID belong to
65    pub target_epoch: Option<TYPES::Epoch>,
66    /// VidCommitment calculated based on the number of nodes in `target_epoch`.
67    pub payload_commitment: ADVZCommitment,
68    /// A storage node's key and its corresponding VID share
69    pub shares: BTreeMap<TYPES::SignatureKey, ADVZShare>,
70    /// VID common data sent to all storage nodes
71    pub common: ADVZCommon,
72}
73
74impl<TYPES: NodeType> HasViewNumber<TYPES> for ADVZDisperse<TYPES> {
75    fn view_number(&self) -> TYPES::View {
76        self.view_number
77    }
78}
79
80impl<TYPES: NodeType> ADVZDisperse<TYPES> {
81    /// Create VID dispersal from a specified membership for the target epoch.
82    /// Uses the specified function to calculate share dispersal
83    /// Allows for more complex stake table functionality
84    async fn from_membership(
85        view_number: TYPES::View,
86        mut vid_disperse: JfVidDisperse<ADVZScheme>,
87        membership: &EpochMembershipCoordinator<TYPES>,
88        target_epoch: Option<TYPES::Epoch>,
89        data_epoch: Option<TYPES::Epoch>,
90    ) -> Result<Self> {
91        let shares = membership
92            .stake_table_for_epoch(target_epoch)
93            .await?
94            .stake_table()
95            .await
96            .iter()
97            .map(|entry| entry.stake_table_entry.public_key())
98            .map(|node| (node.clone(), vid_disperse.shares.remove(0)))
99            .collect();
100
101        Ok(Self {
102            view_number,
103            shares,
104            common: vid_disperse.common,
105            payload_commitment: vid_disperse.commit,
106            epoch: data_epoch,
107            target_epoch,
108        })
109    }
110
111    /// Calculate the vid disperse information from the payload given a view, epoch and membership,
112    /// If the sender epoch is missing, it means it's the same as the target epoch.
113    ///
114    /// # Errors
115    /// Returns an error if the disperse or commitment calculation fails
116    #[allow(clippy::panic)]
117    pub async fn calculate_vid_disperse(
118        payload: &TYPES::BlockPayload,
119        membership: &EpochMembershipCoordinator<TYPES>,
120        view: TYPES::View,
121        target_epoch: Option<TYPES::Epoch>,
122        data_epoch: Option<TYPES::Epoch>,
123    ) -> Result<(Self, Duration)> {
124        let num_nodes = membership
125            .stake_table_for_epoch(target_epoch)
126            .await?
127            .total_nodes()
128            .await;
129
130        let txns = payload.encode();
131
132        let now = Instant::now();
133        let vid_disperse = spawn_blocking(move || advz_scheme(num_nodes).disperse(&txns))
134            .await
135            .wrap()
136            .context(error!("Join error"))?
137            .wrap()
138            .context(|err| error!("Failed to calculate VID disperse. Error: {err}"))?;
139        let advz_scheme_duration = now.elapsed();
140
141        Ok((
142            Self::from_membership(view, vid_disperse, membership, target_epoch, data_epoch).await?,
143            advz_scheme_duration,
144        ))
145    }
146
147    /// This function splits a VID disperse into individual shares.
148    pub fn to_shares(self) -> Vec<ADVZDisperseShare<TYPES>> {
149        self.shares
150            .into_iter()
151            .map(|(recipient_key, share)| ADVZDisperseShare {
152                share,
153                recipient_key,
154                view_number: self.view_number,
155                common: self.common.clone(),
156                payload_commitment: self.payload_commitment,
157            })
158            .collect()
159    }
160
161    /// Split a VID disperse into a share proposal for each recipient.
162    pub fn to_share_proposals(
163        self,
164        signature: &<<TYPES as NodeType>::SignatureKey as SignatureKey>::PureAssembledSignatureType,
165    ) -> Vec<Proposal<TYPES, ADVZDisperseShare<TYPES>>> {
166        self.shares
167            .into_iter()
168            .map(|(recipient_key, share)| Proposal {
169                data: ADVZDisperseShare {
170                    share,
171                    recipient_key,
172                    view_number: self.view_number,
173                    payload_commitment: self.payload_commitment,
174                    common: self.common.clone(),
175                },
176                signature: signature.clone(),
177                _pd: PhantomData,
178            })
179            .collect()
180    }
181
182    /// Returns the payload length in bytes.
183    pub fn payload_byte_len(&self) -> u32 {
184        ADVZScheme::get_payload_byte_len(&self.common)
185    }
186}
187
188#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]
189/// ADVZ share and associated metadata for a single node
190pub struct ADVZDisperseShare<TYPES: NodeType> {
191    /// The view number for which this VID data is intended
192    pub view_number: TYPES::View,
193    /// Block payload commitment
194    pub payload_commitment: ADVZCommitment,
195    /// A storage node's key and its corresponding VID share
196    pub share: ADVZShare,
197    /// VID common data sent to all storage nodes
198    pub common: ADVZCommon,
199    /// a public key of the share recipient
200    pub recipient_key: TYPES::SignatureKey,
201}
202
203impl_has_none_epoch!(ADVZDisperseShare<TYPES>);
204
205impl<TYPES: NodeType> HasViewNumber<TYPES> for ADVZDisperseShare<TYPES> {
206    fn view_number(&self) -> TYPES::View {
207        self.view_number
208    }
209}
210
211impl<TYPES: NodeType> ADVZDisperseShare<TYPES> {
212    /// Consume `self` and return a `Proposal`
213    pub fn to_proposal(
214        self,
215        private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
216    ) -> Option<Proposal<TYPES, Self>> {
217        let Ok(signature) =
218            TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref())
219        else {
220            tracing::error!("VID: failed to sign dispersal share payload");
221            return None;
222        };
223        Some(Proposal {
224            signature,
225            _pd: PhantomData,
226            data: self,
227        })
228    }
229
230    /// Create `VidDisperse` out of an iterator to `VidDisperseShare`s
231    pub fn to_advz_disperse<'a, I>(mut it: I) -> Option<ADVZDisperse<TYPES>>
232    where
233        I: Iterator<Item = &'a Self>,
234    {
235        let first_vid_disperse_share = it.next()?.clone();
236        let mut share_map = BTreeMap::new();
237        share_map.insert(
238            first_vid_disperse_share.recipient_key,
239            first_vid_disperse_share.share,
240        );
241        let mut vid_disperse = ADVZDisperse {
242            view_number: first_vid_disperse_share.view_number,
243            epoch: None,
244            target_epoch: None,
245            payload_commitment: first_vid_disperse_share.payload_commitment,
246            common: first_vid_disperse_share.common,
247            shares: share_map,
248        };
249        let _ = it.map(|vid_disperse_share| {
250            vid_disperse.shares.insert(
251                vid_disperse_share.recipient_key.clone(),
252                vid_disperse_share.share.clone(),
253            )
254        });
255        Some(vid_disperse)
256    }
257
258    /// Internally verify the share given necessary information
259    pub fn verify(&self, total_weight: usize) -> bool {
260        advz_scheme(total_weight)
261            .verify_share(&self.share, &self.common, &self.payload_commitment)
262            .is_ok_and(|r| r.is_ok())
263    }
264
265    /// Returns the payload length in bytes.
266    pub fn payload_byte_len(&self) -> u32 {
267        ADVZScheme::get_payload_byte_len(&self.common)
268    }
269}
270
271/// AvidM dispersal data
272#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]
273pub struct AvidMDisperse<TYPES: NodeType> {
274    /// The view number for which this VID data is intended
275    pub view_number: TYPES::View,
276    /// Epoch the data of this proposal belongs to
277    pub epoch: Option<TYPES::Epoch>,
278    /// Epoch to which the recipients of this VID belong to
279    pub target_epoch: Option<TYPES::Epoch>,
280    /// VidCommitment calculated based on the number of nodes in `target_epoch`.
281    pub payload_commitment: AvidMCommitment,
282    /// A storage node's key and its corresponding VID share
283    pub shares: BTreeMap<TYPES::SignatureKey, AvidMShare>,
284    /// Length of payload in bytes
285    pub payload_byte_len: usize,
286    /// VID common data sent to all storage nodes
287    pub common: AvidMCommon,
288}
289
290impl<TYPES: NodeType> HasViewNumber<TYPES> for AvidMDisperse<TYPES> {
291    fn view_number(&self) -> TYPES::View {
292        self.view_number
293    }
294}
295
296/// The target total stake to scale to for VID.
297pub const VID_TARGET_TOTAL_STAKE: u32 = 1000;
298
299/// The weights and total weight used in VID calculations
300struct Weights {
301    // weights, in stake table order
302    weights: Vec<u32>,
303
304    // total weight
305    total_weight: usize,
306}
307
308pub fn vid_total_weight<TYPES: NodeType>(
309    stake_table: &HSStakeTable<TYPES>,
310    epoch: Option<TYPES::Epoch>,
311) -> usize {
312    if epoch.is_none() {
313        stake_table
314            .iter()
315            .fold(U256::ZERO, |acc, entry| {
316                acc + entry.stake_table_entry.stake()
317            })
318            .to::<usize>()
319    } else {
320        approximate_weights(stake_table).total_weight
321    }
322}
323
324fn approximate_weights<TYPES: NodeType>(stake_table: &HSStakeTable<TYPES>) -> Weights {
325    let total_stake = stake_table.iter().fold(U256::ZERO, |acc, entry| {
326        acc + entry.stake_table_entry.stake()
327    });
328
329    let mut total_weight: usize = 0;
330
331    // don't attempt to scale if the total stake is small enough
332    if total_stake <= U256::from(VID_TARGET_TOTAL_STAKE) {
333        let weights = stake_table
334            .iter()
335            .map(|entry| entry.stake_table_entry.stake().to::<u32>())
336            .collect();
337
338        // Note: this panics if `total_stake` exceeds `usize::MAX`, but this shouldn't happen.
339        total_weight = total_stake.to::<usize>();
340
341        Weights {
342            weights,
343            total_weight,
344        }
345    } else {
346        let weights = stake_table
347            .iter()
348            .map(|entry| {
349                let weight: U256 = ((entry.stake_table_entry.stake()
350                    * U256::from(VID_TARGET_TOTAL_STAKE))
351                    / total_stake)
352                    + U256::ONE;
353
354                // Note: this panics if `weight` exceeds `usize::MAX`, but this shouldn't happen.
355                total_weight += weight.to::<usize>();
356
357                // Note: this panics if `weight` exceeds `u32::MAX`, but this shouldn't happen
358                // and would likely cause a stack overflow in the VID calculation anyway
359                weight.to::<u32>()
360            })
361            .collect();
362
363        Weights {
364            weights,
365            total_weight,
366        }
367    }
368}
369
370impl<TYPES: NodeType> AvidMDisperse<TYPES> {
371    /// Create VID dispersal from a specified membership for the target epoch.
372    /// Uses the specified function to calculate share dispersal
373    /// Allows for more complex stake table functionality
374    async fn from_membership(
375        view_number: TYPES::View,
376        commit: AvidMCommitment,
377        shares: &[AvidMShare],
378        common: AvidMCommon,
379        membership: &EpochMembership<TYPES>,
380        target_epoch: Option<TYPES::Epoch>,
381        data_epoch: Option<TYPES::Epoch>,
382    ) -> Result<Self> {
383        let payload_byte_len = shares[0].payload_byte_len();
384        let shares = membership
385            .coordinator
386            .stake_table_for_epoch(target_epoch)
387            .await?
388            .stake_table()
389            .await
390            .iter()
391            .map(|entry| entry.stake_table_entry.public_key())
392            .zip(shares)
393            .map(|(node, share)| (node.clone(), share.clone()))
394            .collect();
395
396        Ok(Self {
397            view_number,
398            shares,
399            payload_commitment: commit,
400            epoch: data_epoch,
401            target_epoch,
402            payload_byte_len,
403            common,
404        })
405    }
406
407    /// Calculate the vid disperse information from the payload given a view, epoch and membership,
408    /// If the sender epoch is missing, it means it's the same as the target epoch.
409    ///
410    /// # Errors
411    /// Returns an error if the disperse or commitment calculation fails
412    #[allow(clippy::panic)]
413    #[allow(clippy::single_range_in_vec_init)]
414    pub async fn calculate_vid_disperse(
415        payload: &TYPES::BlockPayload,
416        membership: &EpochMembershipCoordinator<TYPES>,
417        view: TYPES::View,
418        target_epoch: Option<TYPES::Epoch>,
419        data_epoch: Option<TYPES::Epoch>,
420        metadata: &<TYPES::BlockPayload as BlockPayload<TYPES>>::Metadata,
421    ) -> Result<(Self, Duration)> {
422        let target_mem = membership.stake_table_for_epoch(target_epoch).await?;
423        let stake_table = target_mem.stake_table().await;
424        let approximate_weights = approximate_weights(&stake_table);
425
426        let txns = payload.encode();
427        let num_txns = txns.len();
428
429        let avidm_param = init_avidm_param(approximate_weights.total_weight)?;
430        let common = avidm_param.clone();
431
432        let ns_table = parse_ns_table(num_txns, &metadata.encode());
433        let ns_table_clone = ns_table.clone();
434
435        let now = Instant::now();
436        let (commit, shares) = spawn_blocking(move || {
437            AvidMScheme::ns_disperse(
438                &avidm_param,
439                &approximate_weights.weights,
440                &txns,
441                ns_table_clone,
442            )
443        })
444        .await
445        .wrap()
446        .context(error!("Join error"))?
447        .wrap()
448        .context(|err| error!("Failed to calculate VID disperse. Error: {err}"))?;
449        let ns_disperse_duration = now.elapsed();
450
451        Ok((
452            Self::from_membership(
453                view,
454                commit,
455                &shares,
456                common,
457                &target_mem,
458                target_epoch,
459                data_epoch,
460            )
461            .await?,
462            ns_disperse_duration,
463        ))
464    }
465
466    /// This function splits a VID disperse into individual shares.
467    pub fn to_shares(self) -> Vec<AvidMDisperseShare<TYPES>> {
468        self.shares
469            .into_iter()
470            .map(|(recipient_key, share)| AvidMDisperseShare {
471                share,
472                recipient_key,
473                view_number: self.view_number,
474                payload_commitment: self.payload_commitment,
475                epoch: self.epoch,
476                target_epoch: self.target_epoch,
477                common: self.common.clone(),
478            })
479            .collect()
480    }
481
482    /// Split a VID disperse into a share proposal for each recipient.
483    pub fn to_share_proposals(
484        self,
485        signature: &<<TYPES as NodeType>::SignatureKey as SignatureKey>::PureAssembledSignatureType,
486    ) -> Vec<Proposal<TYPES, AvidMDisperseShare<TYPES>>> {
487        self.shares
488            .into_iter()
489            .map(|(recipient_key, share)| Proposal {
490                data: AvidMDisperseShare {
491                    share,
492                    recipient_key,
493                    view_number: self.view_number,
494                    payload_commitment: self.payload_commitment,
495                    epoch: self.epoch,
496                    target_epoch: self.target_epoch,
497                    common: self.common.clone(),
498                },
499                signature: signature.clone(),
500                _pd: PhantomData,
501            })
502            .collect()
503    }
504
505    /// Construct a VID disperse from an iterator of disperse shares.
506    pub fn try_from_shares<'a, I>(mut it: I) -> Option<Self>
507    where
508        I: Iterator<Item = &'a AvidMDisperseShare<TYPES>>,
509    {
510        let first_vid_disperse_share = it.next()?.clone();
511        let payload_byte_len = first_vid_disperse_share.share.payload_byte_len();
512        let mut share_map = BTreeMap::new();
513        share_map.insert(
514            first_vid_disperse_share.recipient_key,
515            first_vid_disperse_share.share,
516        );
517        let mut vid_disperse = Self {
518            view_number: first_vid_disperse_share.view_number,
519            epoch: first_vid_disperse_share.epoch,
520            target_epoch: first_vid_disperse_share.target_epoch,
521            payload_commitment: first_vid_disperse_share.payload_commitment,
522            shares: share_map,
523            payload_byte_len,
524            common: first_vid_disperse_share.common,
525        };
526        let _ = it.map(|vid_disperse_share| {
527            vid_disperse.shares.insert(
528                vid_disperse_share.recipient_key.clone(),
529                vid_disperse_share.share.clone(),
530            )
531        });
532        Some(vid_disperse)
533    }
534
535    /// Returns the payload length in bytes.
536    pub fn payload_byte_len(&self) -> u32 {
537        self.payload_byte_len as u32
538    }
539}
540
541#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]
542/// VID share and associated metadata for a single node
543pub struct AvidMDisperseShare<TYPES: NodeType> {
544    /// The view number for which this VID data is intended
545    pub view_number: TYPES::View,
546    /// The epoch number for which this VID data belongs to
547    pub epoch: Option<TYPES::Epoch>,
548    /// The epoch number to which the recipient of this VID belongs to
549    pub target_epoch: Option<TYPES::Epoch>,
550    /// Block payload commitment
551    pub payload_commitment: AvidMCommitment,
552    /// A storage node's key and its corresponding VID share
553    pub share: AvidMShare,
554    /// a public key of the share recipient
555    pub recipient_key: TYPES::SignatureKey,
556    /// VID common data sent to all storage nodes
557    pub common: AvidMCommon,
558}
559
560impl<TYPES: NodeType> HasViewNumber<TYPES> for AvidMDisperseShare<TYPES> {
561    fn view_number(&self) -> TYPES::View {
562        self.view_number
563    }
564}
565
566impl<TYPES: NodeType> AvidMDisperseShare<TYPES> {
567    /// Consume `self` and return a `Proposal`
568    pub fn to_proposal(
569        self,
570        private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
571    ) -> Option<Proposal<TYPES, Self>> {
572        let Ok(signature) =
573            TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref())
574        else {
575            tracing::error!("VID: failed to sign dispersal share payload");
576            return None;
577        };
578        Some(Proposal {
579            signature,
580            _pd: PhantomData,
581            data: self,
582        })
583    }
584
585    /// Returns the payload length in bytes.
586    pub fn payload_byte_len(&self) -> u32 {
587        self.share.payload_byte_len() as u32
588    }
589
590    /// Internally verify the share given necessary information
591    pub fn verify(&self, _total_weight: usize) -> bool {
592        AvidMScheme::verify_share(&self.common, &self.payload_commitment, &self.share)
593            .is_ok_and(|r| r.is_ok())
594    }
595}
596
597/// AvidmGf2 dispersal data
598#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]
599pub struct AvidmGf2Disperse<TYPES: NodeType> {
600    /// The view number for which this VID data is intended
601    pub view_number: TYPES::View,
602    /// Epoch the data of this proposal belongs to
603    pub epoch: Option<TYPES::Epoch>,
604    /// Epoch to which the recipients of this VID belong to
605    pub target_epoch: Option<TYPES::Epoch>,
606    /// VidCommitment calculated based on the number of nodes in `target_epoch`.
607    pub payload_commitment: AvidmGf2Commitment,
608    /// A storage node's key and its corresponding VID share
609    pub shares: BTreeMap<TYPES::SignatureKey, AvidmGf2Share>,
610    /// Length of payload in bytes
611    pub payload_byte_len: usize,
612    /// VID common data sent to all storage nodes
613    pub common: AvidmGf2Common,
614}
615
616impl<TYPES: NodeType> HasViewNumber<TYPES> for AvidmGf2Disperse<TYPES> {
617    fn view_number(&self) -> TYPES::View {
618        self.view_number
619    }
620}
621
622impl<TYPES: NodeType> AvidmGf2Disperse<TYPES> {
623    /// Create VID dispersal from a specified membership for the target epoch.
624    /// Uses the specified function to calculate share dispersal
625    /// Allows for more complex stake table functionality
626    async fn from_membership(
627        view_number: TYPES::View,
628        commit: AvidmGf2Commitment,
629        shares: &[AvidmGf2Share],
630        common: AvidmGf2Common,
631        membership: &EpochMembership<TYPES>,
632        target_epoch: Option<TYPES::Epoch>,
633        data_epoch: Option<TYPES::Epoch>,
634    ) -> Result<Self> {
635        let payload_byte_len = common.payload_byte_len();
636        let shares = membership
637            .coordinator
638            .stake_table_for_epoch(target_epoch)
639            .await?
640            .stake_table()
641            .await
642            .iter()
643            .map(|entry| entry.stake_table_entry.public_key())
644            .zip(shares)
645            .map(|(node, share)| (node.clone(), share.clone()))
646            .collect();
647
648        Ok(Self {
649            view_number,
650            shares,
651            payload_commitment: commit,
652            epoch: data_epoch,
653            target_epoch,
654            payload_byte_len,
655            common,
656        })
657    }
658
659    /// Calculate the vid disperse information from the payload given a view, epoch and membership,
660    /// If the sender epoch is missing, it means it's the same as the target epoch.
661    ///
662    /// # Errors
663    /// Returns an error if the disperse or commitment calculation fails
664    pub async fn calculate_vid_disperse(
665        payload: &TYPES::BlockPayload,
666        membership: &EpochMembershipCoordinator<TYPES>,
667        view: TYPES::View,
668        target_epoch: Option<TYPES::Epoch>,
669        data_epoch: Option<TYPES::Epoch>,
670        metadata: &<TYPES::BlockPayload as BlockPayload<TYPES>>::Metadata,
671    ) -> Result<(Self, Duration)> {
672        let target_mem = membership.stake_table_for_epoch(target_epoch).await?;
673        let stake_table = target_mem.stake_table().await;
674        let approximate_weights = approximate_weights(&stake_table);
675
676        let txns = payload.encode();
677        let num_txns = txns.len();
678
679        let avidm_param = init_avidm_gf2_param(approximate_weights.total_weight)?;
680
681        let ns_table = parse_ns_table(num_txns, &metadata.encode());
682        let ns_table_clone = ns_table.clone();
683
684        let now = Instant::now();
685        let (commit, common, shares) = spawn_blocking(move || {
686            AvidmGf2Scheme::ns_disperse(
687                &avidm_param,
688                &approximate_weights.weights,
689                &txns,
690                ns_table_clone,
691            )
692        })
693        .await
694        .wrap()
695        .context(error!("Join error"))?
696        .wrap()
697        .context(|err| error!("Failed to calculate VID disperse. Error: {err}"))?;
698        let ns_disperse_duration = now.elapsed();
699
700        Ok((
701            Self::from_membership(
702                view,
703                commit,
704                &shares,
705                common,
706                &target_mem,
707                target_epoch,
708                data_epoch,
709            )
710            .await?,
711            ns_disperse_duration,
712        ))
713    }
714
715    /// This function splits a VID disperse into individual shares.
716    pub fn to_shares(self) -> Vec<AvidmGf2DisperseShare<TYPES>> {
717        self.shares
718            .into_iter()
719            .map(|(recipient_key, share)| AvidmGf2DisperseShare {
720                share,
721                recipient_key,
722                view_number: self.view_number,
723                payload_commitment: self.payload_commitment,
724                epoch: self.epoch,
725                target_epoch: self.target_epoch,
726                common: self.common.clone(),
727            })
728            .collect()
729    }
730
731    /// Split a VID disperse into a share proposal for each recipient.
732    pub fn to_share_proposals(
733        self,
734        signature: &<<TYPES as NodeType>::SignatureKey as SignatureKey>::PureAssembledSignatureType,
735    ) -> Vec<Proposal<TYPES, AvidmGf2DisperseShare<TYPES>>> {
736        self.shares
737            .into_iter()
738            .map(|(recipient_key, share)| Proposal {
739                data: AvidmGf2DisperseShare {
740                    share,
741                    recipient_key,
742                    view_number: self.view_number,
743                    payload_commitment: self.payload_commitment,
744                    epoch: self.epoch,
745                    target_epoch: self.target_epoch,
746                    common: self.common.clone(),
747                },
748                signature: signature.clone(),
749                _pd: PhantomData,
750            })
751            .collect()
752    }
753
754    /// Construct a VID disperse from an iterator of disperse shares.
755    pub fn try_from_shares<'a, I>(mut it: I) -> Option<Self>
756    where
757        I: Iterator<Item = &'a AvidmGf2DisperseShare<TYPES>>,
758    {
759        let first_vid_disperse_share = it.next()?.clone();
760        let payload_byte_len = first_vid_disperse_share.common.payload_byte_len();
761        let mut share_map = BTreeMap::new();
762        share_map.insert(
763            first_vid_disperse_share.recipient_key,
764            first_vid_disperse_share.share,
765        );
766        let mut vid_disperse = Self {
767            view_number: first_vid_disperse_share.view_number,
768            epoch: first_vid_disperse_share.epoch,
769            target_epoch: first_vid_disperse_share.target_epoch,
770            payload_commitment: first_vid_disperse_share.payload_commitment,
771            shares: share_map,
772            payload_byte_len,
773            common: first_vid_disperse_share.common,
774        };
775        let _ = it.map(|vid_disperse_share| {
776            vid_disperse.shares.insert(
777                vid_disperse_share.recipient_key.clone(),
778                vid_disperse_share.share.clone(),
779            )
780        });
781        Some(vid_disperse)
782    }
783
784    /// Returns the payload length in bytes.
785    pub fn payload_byte_len(&self) -> u32 {
786        self.payload_byte_len as u32
787    }
788}
789
790#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]
791/// VID share and associated metadata for a single node
792pub struct AvidmGf2DisperseShare<TYPES: NodeType> {
793    /// The view number for which this VID data is intended
794    pub view_number: TYPES::View,
795    /// The epoch number for which this VID data belongs to
796    pub epoch: Option<TYPES::Epoch>,
797    /// The epoch number to which the recipient of this VID belongs to
798    pub target_epoch: Option<TYPES::Epoch>,
799    /// Block payload commitment
800    pub payload_commitment: AvidmGf2Commitment,
801    /// A storage node's key and its corresponding VID share
802    pub share: AvidmGf2Share,
803    /// a public key of the share recipient
804    pub recipient_key: TYPES::SignatureKey,
805    /// VID common data sent to all storage nodes
806    pub common: AvidmGf2Common,
807}
808
809impl<TYPES: NodeType> HasViewNumber<TYPES> for AvidmGf2DisperseShare<TYPES> {
810    fn view_number(&self) -> TYPES::View {
811        self.view_number
812    }
813}
814
815impl<TYPES: NodeType> AvidmGf2DisperseShare<TYPES> {
816    /// Consume `self` and return a `Proposal`
817    pub fn to_proposal(
818        self,
819        private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
820    ) -> Option<Proposal<TYPES, Self>> {
821        let Ok(signature) =
822            TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref())
823        else {
824            tracing::error!("VID: failed to sign dispersal share payload");
825            return None;
826        };
827        Some(Proposal {
828            signature,
829            _pd: PhantomData,
830            data: self,
831        })
832    }
833    /// Returns the payload length in bytes.
834    pub fn payload_byte_len(&self) -> u32 {
835        self.common.payload_byte_len() as u32
836    }
837    /// Internally verify the share given necessary information
838    pub fn verify(&self, _total_weight: usize) -> bool {
839        AvidmGf2Scheme::verify_share(&self.payload_commitment, &self.common, &self.share)
840            .is_ok_and(|r| r.is_ok())
841    }
842}