hotshot_types/data/
vid_disperse.rs

1// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
2// This file is part of the HotShot repository.
3
4// You should have received a copy of the MIT License
5// along with the HotShot repository. If not, see <https://mit-license.org/>.
6
7//! This module provides types for VID disperse related data structures.
8//!
9//! We have three types of VID disperse related structs:
10//!
11//! 1. `ADVZ*`: VID V0, The most original VID scheme, which has guaranteed recovery but very inefficient.
12//! 2. `AvidM*`: VID V1, the efficient VID scheme, where we use it after the epocn upgrade. It's more
13//!    efficient but doesn't guarantee recovery. A VID V1 commitment could correspond to some junk
14//!    data, there'll be a proof of incorrect encoding in this case.
15//! 3. `AvidmGf2*`: VID V2, almost the same as VID V1 but we have a much more efficient recovery
16//!    implementation.
17
18use std::{collections::BTreeMap, fmt::Debug, hash::Hash, marker::PhantomData, time::Duration};
19
20use alloy::primitives::U256;
21use hotshot_utils::anytrace::*;
22use jf_advz::{VidDisperse as JfVidDisperse, VidScheme};
23use serde::{Deserialize, Serialize};
24use tokio::{task::spawn_blocking, time::Instant};
25
26use super::ns_table::parse_ns_table;
27use crate::{
28    data::{EpochNumber, ViewNumber},
29    epoch_membership::{EpochMembership, EpochMembershipCoordinator},
30    message::Proposal,
31    simple_vote::HasEpoch,
32    stake_table::HSStakeTable,
33    traits::{
34        BlockPayload,
35        block_contents::EncodeBytes,
36        node_implementation::NodeType,
37        signature_key::{SignatureKey, StakeTableEntryType},
38    },
39    vid::{
40        advz::{ADVZCommitment, ADVZCommon, ADVZScheme, ADVZShare, advz_scheme},
41        avidm::{AvidMCommitment, AvidMCommon, AvidMScheme, AvidMShare, init_avidm_param},
42        avidm_gf2::{
43            AvidmGf2Commitment, AvidmGf2Common, AvidmGf2Scheme, AvidmGf2Share, init_avidm_gf2_param,
44        },
45    },
46    vote::HasViewNumber,
47};
48
49impl<NODE: NodeType> HasEpoch for ADVZDisperse<NODE> {
50    fn epoch(&self) -> Option<EpochNumber> {
51        self.epoch
52    }
53}
54
55impl<NODE: NodeType> HasEpoch for AvidMDisperse<NODE> {
56    fn epoch(&self) -> Option<EpochNumber> {
57        self.epoch
58    }
59}
60
61impl<NODE: NodeType> HasEpoch for AvidMDisperseShare<NODE> {
62    fn epoch(&self) -> Option<EpochNumber> {
63        self.epoch
64    }
65}
66
67impl<NODE: NodeType> HasEpoch for AvidmGf2Disperse<NODE> {
68    fn epoch(&self) -> Option<EpochNumber> {
69        self.epoch
70    }
71}
72
73impl<NODE: NodeType> HasEpoch for AvidmGf2DisperseShare<NODE> {
74    fn epoch(&self) -> Option<EpochNumber> {
75        self.epoch
76    }
77}
78
79/// ADVZ dispersal data
80#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]
81pub struct ADVZDisperse<TYPES: NodeType> {
82    /// The view number for which this VID data is intended
83    pub view_number: ViewNumber,
84    /// Epoch the data of this proposal belongs to
85    pub epoch: Option<EpochNumber>,
86    /// Epoch to which the recipients of this VID belong to
87    pub target_epoch: Option<EpochNumber>,
88    /// VidCommitment calculated based on the number of nodes in `target_epoch`.
89    pub payload_commitment: ADVZCommitment,
90    /// A storage node's key and its corresponding VID share
91    pub shares: BTreeMap<TYPES::SignatureKey, ADVZShare>,
92    /// VID common data sent to all storage nodes
93    pub common: ADVZCommon,
94}
95
96impl<TYPES: NodeType> HasViewNumber for ADVZDisperse<TYPES> {
97    fn view_number(&self) -> ViewNumber {
98        self.view_number
99    }
100}
101
102impl<TYPES: NodeType> ADVZDisperse<TYPES> {
103    /// Create VID dispersal from a specified membership for the target epoch.
104    /// Uses the specified function to calculate share dispersal
105    /// Allows for more complex stake table functionality
106    async fn from_membership(
107        view_number: ViewNumber,
108        mut vid_disperse: JfVidDisperse<ADVZScheme>,
109        membership: &EpochMembershipCoordinator<TYPES>,
110        target_epoch: Option<EpochNumber>,
111        data_epoch: Option<EpochNumber>,
112    ) -> Result<Self> {
113        let shares = membership
114            .stake_table_for_epoch(target_epoch)
115            .await?
116            .stake_table()
117            .await
118            .iter()
119            .map(|entry| entry.stake_table_entry.public_key())
120            .map(|node| (node.clone(), vid_disperse.shares.remove(0)))
121            .collect();
122
123        Ok(Self {
124            view_number,
125            shares,
126            common: vid_disperse.common,
127            payload_commitment: vid_disperse.commit,
128            epoch: data_epoch,
129            target_epoch,
130        })
131    }
132
133    /// Calculate the vid disperse information from the payload given a view, epoch and membership,
134    /// If the sender epoch is missing, it means it's the same as the target epoch.
135    ///
136    /// # Errors
137    /// Returns an error if the disperse or commitment calculation fails
138    #[allow(clippy::panic)]
139    pub async fn calculate_vid_disperse(
140        payload: &TYPES::BlockPayload,
141        membership: &EpochMembershipCoordinator<TYPES>,
142        view: ViewNumber,
143        target_epoch: Option<EpochNumber>,
144        data_epoch: Option<EpochNumber>,
145    ) -> Result<(Self, Duration)> {
146        let num_nodes = membership
147            .stake_table_for_epoch(target_epoch)
148            .await?
149            .total_nodes()
150            .await;
151
152        let txns = payload.encode();
153
154        let now = Instant::now();
155        let vid_disperse = spawn_blocking(move || advz_scheme(num_nodes).disperse(&txns))
156            .await
157            .wrap()
158            .context(error!("Join error"))?
159            .wrap()
160            .context(|err| error!("Failed to calculate VID disperse. Error: {err}"))?;
161        let advz_scheme_duration = now.elapsed();
162
163        Ok((
164            Self::from_membership(view, vid_disperse, membership, target_epoch, data_epoch).await?,
165            advz_scheme_duration,
166        ))
167    }
168
169    /// This function splits a VID disperse into individual shares.
170    pub fn to_shares(self) -> Vec<ADVZDisperseShare<TYPES>> {
171        self.shares
172            .into_iter()
173            .map(|(recipient_key, share)| ADVZDisperseShare {
174                share,
175                recipient_key,
176                view_number: self.view_number,
177                common: self.common.clone(),
178                payload_commitment: self.payload_commitment,
179            })
180            .collect()
181    }
182
183    /// Split a VID disperse into a share proposal for each recipient.
184    pub fn to_share_proposals(
185        self,
186        signature: &<<TYPES as NodeType>::SignatureKey as SignatureKey>::PureAssembledSignatureType,
187    ) -> Vec<Proposal<TYPES, ADVZDisperseShare<TYPES>>> {
188        self.shares
189            .into_iter()
190            .map(|(recipient_key, share)| Proposal {
191                data: ADVZDisperseShare {
192                    share,
193                    recipient_key,
194                    view_number: self.view_number,
195                    payload_commitment: self.payload_commitment,
196                    common: self.common.clone(),
197                },
198                signature: signature.clone(),
199                _pd: PhantomData,
200            })
201            .collect()
202    }
203
204    /// Returns the payload length in bytes.
205    pub fn payload_byte_len(&self) -> u32 {
206        ADVZScheme::get_payload_byte_len(&self.common)
207    }
208}
209
210#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]
211/// ADVZ share and associated metadata for a single node
212pub struct ADVZDisperseShare<TYPES: NodeType> {
213    /// The view number for which this VID data is intended
214    pub view_number: ViewNumber,
215    /// Block payload commitment
216    pub payload_commitment: ADVZCommitment,
217    /// A storage node's key and its corresponding VID share
218    pub share: ADVZShare,
219    /// VID common data sent to all storage nodes
220    pub common: ADVZCommon,
221    /// a public key of the share recipient
222    pub recipient_key: TYPES::SignatureKey,
223}
224
225impl<NODE: NodeType> HasEpoch for ADVZDisperseShare<NODE> {
226    fn epoch(&self) -> Option<EpochNumber> {
227        None
228    }
229}
230
231impl<TYPES: NodeType> HasViewNumber for ADVZDisperseShare<TYPES> {
232    fn view_number(&self) -> ViewNumber {
233        self.view_number
234    }
235}
236
237impl<TYPES: NodeType> ADVZDisperseShare<TYPES> {
238    /// Consume `self` and return a `Proposal`
239    pub fn to_proposal(
240        self,
241        private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
242    ) -> Option<Proposal<TYPES, Self>> {
243        let Ok(signature) =
244            TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref())
245        else {
246            tracing::error!("VID: failed to sign dispersal share payload");
247            return None;
248        };
249        Some(Proposal {
250            signature,
251            _pd: PhantomData,
252            data: self,
253        })
254    }
255
256    /// Create `VidDisperse` out of an iterator to `VidDisperseShare`s
257    pub fn to_advz_disperse<'a, I>(mut it: I) -> Option<ADVZDisperse<TYPES>>
258    where
259        I: Iterator<Item = &'a Self>,
260    {
261        let first_vid_disperse_share = it.next()?.clone();
262        let mut share_map = BTreeMap::new();
263        share_map.insert(
264            first_vid_disperse_share.recipient_key,
265            first_vid_disperse_share.share,
266        );
267        let mut vid_disperse = ADVZDisperse {
268            view_number: first_vid_disperse_share.view_number,
269            epoch: None,
270            target_epoch: None,
271            payload_commitment: first_vid_disperse_share.payload_commitment,
272            common: first_vid_disperse_share.common,
273            shares: share_map,
274        };
275        let _ = it.map(|vid_disperse_share| {
276            vid_disperse.shares.insert(
277                vid_disperse_share.recipient_key.clone(),
278                vid_disperse_share.share.clone(),
279            )
280        });
281        Some(vid_disperse)
282    }
283
284    /// Check if vid common is consistent with the commitment.
285    pub fn is_consistent(&self) -> bool {
286        ADVZScheme::is_consistent(&self.payload_commitment, &self.common).is_ok()
287    }
288
289    /// Verify share assuming common data is already verified consistent.
290    /// Caller MUST call `is_consistent()` first.
291    pub fn verify_with_verified_common(&self) -> bool {
292        let total_weight = ADVZScheme::get_num_storage_nodes(&self.common) as usize;
293        advz_scheme(total_weight)
294            .verify_share(&self.share, &self.common, &self.payload_commitment)
295            .is_ok_and(|r| r.is_ok())
296    }
297
298    /// Internally verify the share given necessary information
299    pub fn verify(&self, _total_weight: usize) -> bool {
300        self.is_consistent() && self.verify_with_verified_common()
301    }
302
303    /// Returns the payload length in bytes.
304    pub fn payload_byte_len(&self) -> u32 {
305        ADVZScheme::get_payload_byte_len(&self.common)
306    }
307}
308
309/// AvidM dispersal data
310#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]
311pub struct AvidMDisperse<TYPES: NodeType> {
312    /// The view number for which this VID data is intended
313    pub view_number: ViewNumber,
314    /// Epoch the data of this proposal belongs to
315    pub epoch: Option<EpochNumber>,
316    /// Epoch to which the recipients of this VID belong to
317    pub target_epoch: Option<EpochNumber>,
318    /// VidCommitment calculated based on the number of nodes in `target_epoch`.
319    pub payload_commitment: AvidMCommitment,
320    /// A storage node's key and its corresponding VID share
321    pub shares: BTreeMap<TYPES::SignatureKey, AvidMShare>,
322    /// Length of payload in bytes
323    pub payload_byte_len: usize,
324    /// VID common data sent to all storage nodes
325    pub common: AvidMCommon,
326}
327
328impl<TYPES: NodeType> HasViewNumber for AvidMDisperse<TYPES> {
329    fn view_number(&self) -> ViewNumber {
330        self.view_number
331    }
332}
333
334/// The target total stake to scale to for VID.
335pub const VID_TARGET_TOTAL_STAKE: u32 = 1000;
336
337/// The weights and total weight used in VID calculations
338struct Weights {
339    // weights, in stake table order
340    weights: Vec<u32>,
341
342    // total weight
343    total_weight: usize,
344}
345
346pub fn vid_total_weight<TYPES: NodeType>(
347    stake_table: &HSStakeTable<TYPES>,
348    epoch: Option<EpochNumber>,
349) -> usize {
350    if epoch.is_none() {
351        stake_table
352            .iter()
353            .fold(U256::ZERO, |acc, entry| {
354                acc + entry.stake_table_entry.stake()
355            })
356            .to::<usize>()
357    } else {
358        approximate_weights(stake_table).total_weight
359    }
360}
361
362fn approximate_weights<TYPES: NodeType>(stake_table: &HSStakeTable<TYPES>) -> Weights {
363    let total_stake = stake_table.iter().fold(U256::ZERO, |acc, entry| {
364        acc + entry.stake_table_entry.stake()
365    });
366
367    let mut total_weight: usize = 0;
368
369    // don't attempt to scale if the total stake is small enough
370    if total_stake <= U256::from(VID_TARGET_TOTAL_STAKE) {
371        let weights = stake_table
372            .iter()
373            .map(|entry| entry.stake_table_entry.stake().to::<u32>())
374            .collect();
375
376        // Note: this panics if `total_stake` exceeds `usize::MAX`, but this shouldn't happen.
377        total_weight = total_stake.to::<usize>();
378
379        Weights {
380            weights,
381            total_weight,
382        }
383    } else {
384        let weights = stake_table
385            .iter()
386            .map(|entry| {
387                let weight: U256 = ((entry.stake_table_entry.stake()
388                    * U256::from(VID_TARGET_TOTAL_STAKE))
389                    / total_stake)
390                    + U256::ONE;
391
392                // Note: this panics if `weight` exceeds `usize::MAX`, but this shouldn't happen.
393                total_weight += weight.to::<usize>();
394
395                // Note: this panics if `weight` exceeds `u32::MAX`, but this shouldn't happen
396                // and would likely cause a stack overflow in the VID calculation anyway
397                weight.to::<u32>()
398            })
399            .collect();
400
401        Weights {
402            weights,
403            total_weight,
404        }
405    }
406}
407
408impl<TYPES: NodeType> AvidMDisperse<TYPES> {
409    /// Create VID dispersal from a specified membership for the target epoch.
410    /// Uses the specified function to calculate share dispersal
411    /// Allows for more complex stake table functionality
412    async fn from_membership(
413        view_number: ViewNumber,
414        commit: AvidMCommitment,
415        shares: &[AvidMShare],
416        common: AvidMCommon,
417        membership: &EpochMembership<TYPES>,
418        target_epoch: Option<EpochNumber>,
419        data_epoch: Option<EpochNumber>,
420    ) -> Result<Self> {
421        let payload_byte_len = shares[0].payload_byte_len();
422        let shares = membership
423            .coordinator
424            .stake_table_for_epoch(target_epoch)
425            .await?
426            .stake_table()
427            .await
428            .iter()
429            .map(|entry| entry.stake_table_entry.public_key())
430            .zip(shares)
431            .map(|(node, share)| (node.clone(), share.clone()))
432            .collect();
433
434        Ok(Self {
435            view_number,
436            shares,
437            payload_commitment: commit,
438            epoch: data_epoch,
439            target_epoch,
440            payload_byte_len,
441            common,
442        })
443    }
444
445    /// Calculate the vid disperse information from the payload given a view, epoch and membership,
446    /// If the sender epoch is missing, it means it's the same as the target epoch.
447    ///
448    /// # Errors
449    /// Returns an error if the disperse or commitment calculation fails
450    #[allow(clippy::panic)]
451    #[allow(clippy::single_range_in_vec_init)]
452    pub async fn calculate_vid_disperse(
453        payload: &TYPES::BlockPayload,
454        membership: &EpochMembershipCoordinator<TYPES>,
455        view: ViewNumber,
456        target_epoch: Option<EpochNumber>,
457        data_epoch: Option<EpochNumber>,
458        metadata: &<TYPES::BlockPayload as BlockPayload<TYPES>>::Metadata,
459    ) -> Result<(Self, Duration)> {
460        let target_mem = membership.stake_table_for_epoch(target_epoch).await?;
461        let stake_table = target_mem.stake_table().await;
462        let approximate_weights = approximate_weights(&stake_table);
463
464        let txns = payload.encode();
465        let num_txns = txns.len();
466
467        let avidm_param = init_avidm_param(approximate_weights.total_weight)?;
468        let common = avidm_param.clone();
469
470        let ns_table = parse_ns_table(num_txns, &metadata.encode());
471        let ns_table_clone = ns_table.clone();
472
473        let now = Instant::now();
474        let (commit, shares) = spawn_blocking(move || {
475            AvidMScheme::ns_disperse(
476                &avidm_param,
477                &approximate_weights.weights,
478                &txns,
479                ns_table_clone,
480            )
481        })
482        .await
483        .wrap()
484        .context(error!("Join error"))?
485        .wrap()
486        .context(|err| error!("Failed to calculate VID disperse. Error: {err}"))?;
487        let ns_disperse_duration = now.elapsed();
488
489        Ok((
490            Self::from_membership(
491                view,
492                commit,
493                &shares,
494                common,
495                &target_mem,
496                target_epoch,
497                data_epoch,
498            )
499            .await?,
500            ns_disperse_duration,
501        ))
502    }
503
504    /// This function splits a VID disperse into individual shares.
505    pub fn to_shares(self) -> Vec<AvidMDisperseShare<TYPES>> {
506        self.shares
507            .into_iter()
508            .map(|(recipient_key, share)| AvidMDisperseShare {
509                share,
510                recipient_key,
511                view_number: self.view_number,
512                payload_commitment: self.payload_commitment,
513                epoch: self.epoch,
514                target_epoch: self.target_epoch,
515                common: self.common.clone(),
516            })
517            .collect()
518    }
519
520    /// Split a VID disperse into a share proposal for each recipient.
521    pub fn to_share_proposals(
522        self,
523        signature: &<<TYPES as NodeType>::SignatureKey as SignatureKey>::PureAssembledSignatureType,
524    ) -> Vec<Proposal<TYPES, AvidMDisperseShare<TYPES>>> {
525        self.shares
526            .into_iter()
527            .map(|(recipient_key, share)| Proposal {
528                data: AvidMDisperseShare {
529                    share,
530                    recipient_key,
531                    view_number: self.view_number,
532                    payload_commitment: self.payload_commitment,
533                    epoch: self.epoch,
534                    target_epoch: self.target_epoch,
535                    common: self.common.clone(),
536                },
537                signature: signature.clone(),
538                _pd: PhantomData,
539            })
540            .collect()
541    }
542
543    /// Construct a VID disperse from an iterator of disperse shares.
544    pub fn try_from_shares<'a, I>(mut it: I) -> Option<Self>
545    where
546        I: Iterator<Item = &'a AvidMDisperseShare<TYPES>>,
547    {
548        let first_vid_disperse_share = it.next()?.clone();
549        let payload_byte_len = first_vid_disperse_share.share.payload_byte_len();
550        let mut share_map = BTreeMap::new();
551        share_map.insert(
552            first_vid_disperse_share.recipient_key,
553            first_vid_disperse_share.share,
554        );
555        let mut vid_disperse = Self {
556            view_number: first_vid_disperse_share.view_number,
557            epoch: first_vid_disperse_share.epoch,
558            target_epoch: first_vid_disperse_share.target_epoch,
559            payload_commitment: first_vid_disperse_share.payload_commitment,
560            shares: share_map,
561            payload_byte_len,
562            common: first_vid_disperse_share.common,
563        };
564        let _ = it.map(|vid_disperse_share| {
565            vid_disperse.shares.insert(
566                vid_disperse_share.recipient_key.clone(),
567                vid_disperse_share.share.clone(),
568            )
569        });
570        Some(vid_disperse)
571    }
572
573    /// Returns the payload length in bytes.
574    pub fn payload_byte_len(&self) -> u32 {
575        self.payload_byte_len as u32
576    }
577}
578
579#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]
580/// VID share and associated metadata for a single node
581pub struct AvidMDisperseShare<TYPES: NodeType> {
582    /// The view number for which this VID data is intended
583    pub view_number: ViewNumber,
584    /// The epoch number for which this VID data belongs to
585    pub epoch: Option<EpochNumber>,
586    /// The epoch number to which the recipient of this VID belongs to
587    pub target_epoch: Option<EpochNumber>,
588    /// Block payload commitment
589    pub payload_commitment: AvidMCommitment,
590    /// A storage node's key and its corresponding VID share
591    pub share: AvidMShare,
592    /// a public key of the share recipient
593    pub recipient_key: TYPES::SignatureKey,
594    /// VID common data sent to all storage nodes
595    pub common: AvidMCommon,
596}
597
598impl<TYPES: NodeType> HasViewNumber for AvidMDisperseShare<TYPES> {
599    fn view_number(&self) -> ViewNumber {
600        self.view_number
601    }
602}
603
604impl<TYPES: NodeType> AvidMDisperseShare<TYPES> {
605    /// Consume `self` and return a `Proposal`
606    pub fn to_proposal(
607        self,
608        private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
609    ) -> Option<Proposal<TYPES, Self>> {
610        let Ok(signature) =
611            TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref())
612        else {
613            tracing::error!("VID: failed to sign dispersal share payload");
614            return None;
615        };
616        Some(Proposal {
617            signature,
618            _pd: PhantomData,
619            data: self,
620        })
621    }
622
623    /// Returns the payload length in bytes.
624    pub fn payload_byte_len(&self) -> u32 {
625        self.share.payload_byte_len() as u32
626    }
627
628    /// Check if vid common is consistent with the commitment.
629    /// For AvidM, ns_commits is inside the share, so there's no separate consistency check.
630    pub fn is_consistent(&self) -> bool {
631        true
632    }
633
634    /// Verify share assuming common data is already verified consistent.
635    /// For AvidM, this is equivalent to the full verify since there's
636    /// no separate consistency check (ns_commits is inside the share).
637    pub fn verify_with_verified_common(&self) -> bool {
638        AvidMScheme::verify_share(&self.common, &self.payload_commitment, &self.share)
639            .is_ok_and(|r| r.is_ok())
640    }
641
642    /// Internally verify the share given necessary information
643    pub fn verify(&self, _total_weight: usize) -> bool {
644        self.is_consistent() && self.verify_with_verified_common()
645    }
646}
647
648/// AvidmGf2 dispersal data
649#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]
650pub struct AvidmGf2Disperse<TYPES: NodeType> {
651    /// The view number for which this VID data is intended
652    pub view_number: ViewNumber,
653    /// Epoch the data of this proposal belongs to
654    pub epoch: Option<EpochNumber>,
655    /// Epoch to which the recipients of this VID belong to
656    pub target_epoch: Option<EpochNumber>,
657    /// VidCommitment calculated based on the number of nodes in `target_epoch`.
658    pub payload_commitment: AvidmGf2Commitment,
659    /// A storage node's key and its corresponding VID share
660    pub shares: BTreeMap<TYPES::SignatureKey, AvidmGf2Share>,
661    /// Length of payload in bytes
662    pub payload_byte_len: usize,
663    /// VID common data sent to all storage nodes
664    pub common: AvidmGf2Common,
665}
666
667impl<TYPES: NodeType> HasViewNumber for AvidmGf2Disperse<TYPES> {
668    fn view_number(&self) -> ViewNumber {
669        self.view_number
670    }
671}
672
673impl<TYPES: NodeType> AvidmGf2Disperse<TYPES> {
674    /// Create VID dispersal from a specified membership for the target epoch.
675    /// Uses the specified function to calculate share dispersal
676    /// Allows for more complex stake table functionality
677    async fn from_membership(
678        view_number: ViewNumber,
679        commit: AvidmGf2Commitment,
680        shares: &[AvidmGf2Share],
681        common: AvidmGf2Common,
682        membership: &EpochMembership<TYPES>,
683        target_epoch: Option<EpochNumber>,
684        data_epoch: Option<EpochNumber>,
685    ) -> Result<Self> {
686        let payload_byte_len = common.payload_byte_len();
687        let shares = membership
688            .coordinator
689            .stake_table_for_epoch(target_epoch)
690            .await?
691            .stake_table()
692            .await
693            .iter()
694            .map(|entry| entry.stake_table_entry.public_key())
695            .zip(shares)
696            .map(|(node, share)| (node.clone(), share.clone()))
697            .collect();
698
699        Ok(Self {
700            view_number,
701            shares,
702            payload_commitment: commit,
703            epoch: data_epoch,
704            target_epoch,
705            payload_byte_len,
706            common,
707        })
708    }
709
710    /// Calculate the vid disperse information from the payload given a view, epoch and membership,
711    /// If the sender epoch is missing, it means it's the same as the target epoch.
712    ///
713    /// # Errors
714    /// Returns an error if the disperse or commitment calculation fails
715    pub async fn calculate_vid_disperse(
716        payload: &TYPES::BlockPayload,
717        membership: &EpochMembershipCoordinator<TYPES>,
718        view: ViewNumber,
719        target_epoch: Option<EpochNumber>,
720        data_epoch: Option<EpochNumber>,
721        metadata: &<TYPES::BlockPayload as BlockPayload<TYPES>>::Metadata,
722    ) -> Result<(Self, Duration)> {
723        let target_mem = membership.stake_table_for_epoch(target_epoch).await?;
724        let stake_table = target_mem.stake_table().await;
725        let approximate_weights = approximate_weights(&stake_table);
726
727        let txns = payload.encode();
728        let num_txns = txns.len();
729
730        let avidm_param = init_avidm_gf2_param(approximate_weights.total_weight)?;
731
732        let ns_table = parse_ns_table(num_txns, &metadata.encode());
733        let ns_table_clone = ns_table.clone();
734
735        let now = Instant::now();
736        let (commit, common, shares) = spawn_blocking(move || {
737            AvidmGf2Scheme::ns_disperse(
738                &avidm_param,
739                &approximate_weights.weights,
740                &txns,
741                ns_table_clone,
742            )
743        })
744        .await
745        .wrap()
746        .context(error!("Join error"))?
747        .wrap()
748        .context(|err| error!("Failed to calculate VID disperse. Error: {err}"))?;
749        let ns_disperse_duration = now.elapsed();
750
751        Ok((
752            Self::from_membership(
753                view,
754                commit,
755                &shares,
756                common,
757                &target_mem,
758                target_epoch,
759                data_epoch,
760            )
761            .await?,
762            ns_disperse_duration,
763        ))
764    }
765
766    /// This function splits a VID disperse into individual shares.
767    pub fn to_shares(self) -> Vec<AvidmGf2DisperseShare<TYPES>> {
768        self.shares
769            .into_iter()
770            .map(|(recipient_key, share)| AvidmGf2DisperseShare {
771                share,
772                recipient_key,
773                view_number: self.view_number,
774                payload_commitment: self.payload_commitment,
775                epoch: self.epoch,
776                target_epoch: self.target_epoch,
777                common: self.common.clone(),
778            })
779            .collect()
780    }
781
782    /// Split a VID disperse into a share proposal for each recipient.
783    pub fn to_share_proposals(
784        self,
785        signature: &<<TYPES as NodeType>::SignatureKey as SignatureKey>::PureAssembledSignatureType,
786    ) -> Vec<Proposal<TYPES, AvidmGf2DisperseShare<TYPES>>> {
787        self.shares
788            .into_iter()
789            .map(|(recipient_key, share)| Proposal {
790                data: AvidmGf2DisperseShare {
791                    share,
792                    recipient_key,
793                    view_number: self.view_number,
794                    payload_commitment: self.payload_commitment,
795                    epoch: self.epoch,
796                    target_epoch: self.target_epoch,
797                    common: self.common.clone(),
798                },
799                signature: signature.clone(),
800                _pd: PhantomData,
801            })
802            .collect()
803    }
804
805    /// Construct a VID disperse from an iterator of disperse shares.
806    pub fn try_from_shares<'a, I>(mut it: I) -> Option<Self>
807    where
808        I: Iterator<Item = &'a AvidmGf2DisperseShare<TYPES>>,
809    {
810        let first_vid_disperse_share = it.next()?.clone();
811        let payload_byte_len = first_vid_disperse_share.common.payload_byte_len();
812        let mut share_map = BTreeMap::new();
813        share_map.insert(
814            first_vid_disperse_share.recipient_key,
815            first_vid_disperse_share.share,
816        );
817        let mut vid_disperse = Self {
818            view_number: first_vid_disperse_share.view_number,
819            epoch: first_vid_disperse_share.epoch,
820            target_epoch: first_vid_disperse_share.target_epoch,
821            payload_commitment: first_vid_disperse_share.payload_commitment,
822            shares: share_map,
823            payload_byte_len,
824            common: first_vid_disperse_share.common,
825        };
826        let _ = it.map(|vid_disperse_share| {
827            vid_disperse.shares.insert(
828                vid_disperse_share.recipient_key.clone(),
829                vid_disperse_share.share.clone(),
830            )
831        });
832        Some(vid_disperse)
833    }
834
835    /// Returns the payload length in bytes.
836    pub fn payload_byte_len(&self) -> u32 {
837        self.payload_byte_len as u32
838    }
839}
840
841#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]
842/// VID share and associated metadata for a single node
843pub struct AvidmGf2DisperseShare<TYPES: NodeType> {
844    /// The view number for which this VID data is intended
845    pub view_number: ViewNumber,
846    /// The epoch number for which this VID data belongs to
847    pub epoch: Option<EpochNumber>,
848    /// The epoch number to which the recipient of this VID belongs to
849    pub target_epoch: Option<EpochNumber>,
850    /// Block payload commitment
851    pub payload_commitment: AvidmGf2Commitment,
852    /// A storage node's key and its corresponding VID share
853    pub share: AvidmGf2Share,
854    /// a public key of the share recipient
855    pub recipient_key: TYPES::SignatureKey,
856    /// VID common data sent to all storage nodes
857    pub common: AvidmGf2Common,
858}
859
860impl<TYPES: NodeType> HasViewNumber for AvidmGf2DisperseShare<TYPES> {
861    fn view_number(&self) -> ViewNumber {
862        self.view_number
863    }
864}
865
866impl<TYPES: NodeType> AvidmGf2DisperseShare<TYPES> {
867    /// Consume `self` and return a `Proposal`
868    pub fn to_proposal(
869        self,
870        private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
871    ) -> Option<Proposal<TYPES, Self>> {
872        let Ok(signature) =
873            TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref())
874        else {
875            tracing::error!("VID: failed to sign dispersal share payload");
876            return None;
877        };
878        Some(Proposal {
879            signature,
880            _pd: PhantomData,
881            data: self,
882        })
883    }
884    /// Returns the payload length in bytes.
885    pub fn payload_byte_len(&self) -> u32 {
886        self.common.payload_byte_len() as u32
887    }
888    /// Check if vid common is consistent with the commitment.
889    pub fn is_consistent(&self) -> bool {
890        AvidmGf2Scheme::is_consistent(&self.payload_commitment, &self.common)
891    }
892
893    /// Verify share assuming common data is already verified consistent.
894    /// Caller MUST call `is_consistent()` first.
895    pub fn verify_with_verified_common(&self) -> bool {
896        AvidmGf2Scheme::verify_share_with_verified_common(&self.common, &self.share)
897            .is_ok_and(|r| r.is_ok())
898    }
899
900    /// Internally verify the share given necessary information
901    pub fn verify(&self, _total_weight: usize) -> bool {
902        self.is_consistent() && self.verify_with_verified_common()
903    }
904}