hotshot_types/data/
vid_disperse.rs

1// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
2// This file is part of the HotShot repository.
3
4// You should have received a copy of the MIT License
5// along with the HotShot repository. If not, see <https://mit-license.org/>.
6
7//! This module provides types for VID disperse related data structures.
8
9use std::{collections::BTreeMap, fmt::Debug, hash::Hash, marker::PhantomData};
10
11use alloy::primitives::U256;
12use hotshot_utils::anytrace::*;
13use jf_vid::{VidDisperse as JfVidDisperse, VidScheme};
14use serde::{Deserialize, Serialize};
15use tokio::task::spawn_blocking;
16
17use super::ns_table::parse_ns_table;
18use crate::{
19    epoch_membership::{EpochMembership, EpochMembershipCoordinator},
20    impl_has_epoch,
21    message::Proposal,
22    simple_vote::HasEpoch,
23    stake_table::HSStakeTable,
24    traits::{
25        block_contents::EncodeBytes,
26        node_implementation::NodeType,
27        signature_key::{SignatureKey, StakeTableEntryType},
28        BlockPayload,
29    },
30    vid::{
31        advz::{advz_scheme, ADVZCommitment, ADVZCommon, ADVZScheme, ADVZShare},
32        avidm::{init_avidm_param, AvidMCommitment, AvidMCommon, AvidMScheme, AvidMShare},
33    },
34    vote::HasViewNumber,
35};
36
37impl_has_epoch!(
38    ADVZDisperse<TYPES>,
39    AvidMDisperse<TYPES>,
40    VidDisperseShare2<TYPES>
41);
42
43/// ADVZ dispersal data
44#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]
45pub struct ADVZDisperse<TYPES: NodeType> {
46    /// The view number for which this VID data is intended
47    pub view_number: TYPES::View,
48    /// Epoch the data of this proposal belongs to
49    pub epoch: Option<TYPES::Epoch>,
50    /// Epoch to which the recipients of this VID belong to
51    pub target_epoch: Option<TYPES::Epoch>,
52    /// VidCommitment calculated based on the number of nodes in `target_epoch`.
53    pub payload_commitment: ADVZCommitment,
54    /// A storage node's key and its corresponding VID share
55    pub shares: BTreeMap<TYPES::SignatureKey, ADVZShare>,
56    /// VID common data sent to all storage nodes
57    pub common: ADVZCommon,
58}
59
60impl<TYPES: NodeType> HasViewNumber<TYPES> for ADVZDisperse<TYPES> {
61    fn view_number(&self) -> TYPES::View {
62        self.view_number
63    }
64}
65
66impl<TYPES: NodeType> ADVZDisperse<TYPES> {
67    /// Create VID dispersal from a specified membership for the target epoch.
68    /// Uses the specified function to calculate share dispersal
69    /// Allows for more complex stake table functionality
70    async fn from_membership(
71        view_number: TYPES::View,
72        mut vid_disperse: JfVidDisperse<ADVZScheme>,
73        membership: &EpochMembershipCoordinator<TYPES>,
74        target_epoch: Option<TYPES::Epoch>,
75        data_epoch: Option<TYPES::Epoch>,
76    ) -> Self {
77        let shares = membership
78            .membership_for_epoch(target_epoch)
79            .await
80            .unwrap()
81            .stake_table()
82            .await
83            .iter()
84            .map(|entry| entry.stake_table_entry.public_key())
85            .map(|node| (node.clone(), vid_disperse.shares.remove(0)))
86            .collect();
87
88        Self {
89            view_number,
90            shares,
91            common: vid_disperse.common,
92            payload_commitment: vid_disperse.commit,
93            epoch: data_epoch,
94            target_epoch,
95        }
96    }
97
98    /// Calculate the vid disperse information from the payload given a view, epoch and membership,
99    /// If the sender epoch is missing, it means it's the same as the target epoch.
100    ///
101    /// # Errors
102    /// Returns an error if the disperse or commitment calculation fails
103    #[allow(clippy::panic)]
104    pub async fn calculate_vid_disperse(
105        payload: &TYPES::BlockPayload,
106        membership: &EpochMembershipCoordinator<TYPES>,
107        view: TYPES::View,
108        target_epoch: Option<TYPES::Epoch>,
109        data_epoch: Option<TYPES::Epoch>,
110    ) -> Result<Self> {
111        let num_nodes = membership
112            .membership_for_epoch(target_epoch)
113            .await?
114            .total_nodes()
115            .await;
116
117        let txns = payload.encode();
118
119        let vid_disperse = spawn_blocking(move || advz_scheme(num_nodes).disperse(&txns))
120            .await
121            .wrap()
122            .context(error!("Join error"))?
123            .wrap()
124            .context(|err| error!("Failed to calculate VID disperse. Error: {}", err))?;
125
126        Ok(Self::from_membership(view, vid_disperse, membership, target_epoch, data_epoch).await)
127    }
128
129    /// Returns the payload length in bytes.
130    pub fn payload_byte_len(&self) -> u32 {
131        ADVZScheme::get_payload_byte_len(&self.common)
132    }
133}
134
135#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]
136/// ADVZ share and associated metadata for a single node
137pub struct ADVZDisperseShare<TYPES: NodeType> {
138    /// The view number for which this VID data is intended
139    pub view_number: TYPES::View,
140    /// Block payload commitment
141    pub payload_commitment: ADVZCommitment,
142    /// A storage node's key and its corresponding VID share
143    pub share: ADVZShare,
144    /// VID common data sent to all storage nodes
145    pub common: ADVZCommon,
146    /// a public key of the share recipient
147    pub recipient_key: TYPES::SignatureKey,
148}
149
150impl<TYPES: NodeType> HasViewNumber<TYPES> for ADVZDisperseShare<TYPES> {
151    fn view_number(&self) -> TYPES::View {
152        self.view_number
153    }
154}
155
156impl<TYPES: NodeType> ADVZDisperseShare<TYPES> {
157    /// Create a vector of `VidDisperseShare` from `VidDisperse`
158    pub fn from_advz_disperse(vid_disperse: ADVZDisperse<TYPES>) -> Vec<Self> {
159        vid_disperse
160            .shares
161            .into_iter()
162            .map(|(recipient_key, share)| Self {
163                share,
164                recipient_key,
165                view_number: vid_disperse.view_number,
166                common: vid_disperse.common.clone(),
167                payload_commitment: vid_disperse.payload_commitment,
168            })
169            .collect()
170    }
171
172    /// Consume `self` and return a `Proposal`
173    pub fn to_proposal(
174        self,
175        private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
176    ) -> Option<Proposal<TYPES, Self>> {
177        let Ok(signature) =
178            TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref())
179        else {
180            tracing::error!("VID: failed to sign dispersal share payload");
181            return None;
182        };
183        Some(Proposal {
184            signature,
185            _pd: PhantomData,
186            data: self,
187        })
188    }
189
190    /// Create `VidDisperse` out of an iterator to `VidDisperseShare`s
191    pub fn to_advz_disperse<'a, I>(mut it: I) -> Option<ADVZDisperse<TYPES>>
192    where
193        I: Iterator<Item = &'a Self>,
194    {
195        let first_vid_disperse_share = it.next()?.clone();
196        let mut share_map = BTreeMap::new();
197        share_map.insert(
198            first_vid_disperse_share.recipient_key,
199            first_vid_disperse_share.share,
200        );
201        let mut vid_disperse = ADVZDisperse {
202            view_number: first_vid_disperse_share.view_number,
203            epoch: None,
204            target_epoch: None,
205            payload_commitment: first_vid_disperse_share.payload_commitment,
206            common: first_vid_disperse_share.common,
207            shares: share_map,
208        };
209        let _ = it.map(|vid_disperse_share| {
210            vid_disperse.shares.insert(
211                vid_disperse_share.recipient_key.clone(),
212                vid_disperse_share.share.clone(),
213            )
214        });
215        Some(vid_disperse)
216    }
217
218    /// Split a VID share proposal into a proposal for each recipient.
219    pub fn to_vid_share_proposals(
220        vid_disperse: ADVZDisperse<TYPES>,
221        signature: &<TYPES::SignatureKey as SignatureKey>::PureAssembledSignatureType,
222    ) -> Vec<Proposal<TYPES, Self>> {
223        vid_disperse
224            .shares
225            .into_iter()
226            .map(|(recipient_key, share)| Proposal {
227                data: Self {
228                    share,
229                    recipient_key,
230                    view_number: vid_disperse.view_number,
231                    common: vid_disperse.common.clone(),
232                    payload_commitment: vid_disperse.payload_commitment,
233                },
234                signature: signature.clone(),
235                _pd: PhantomData,
236            })
237            .collect()
238    }
239
240    /// Internally verify the share given necessary information
241    ///
242    /// # Errors
243    /// Verification fail
244    #[allow(clippy::result_unit_err)]
245    pub fn verify_share(&self, total_weight: usize) -> std::result::Result<(), ()> {
246        advz_scheme(total_weight)
247            .verify_share(&self.share, &self.common, &self.payload_commitment)
248            .unwrap_or(Err(()))
249    }
250
251    /// Returns the payload length in bytes.
252    pub fn payload_byte_len(&self) -> u32 {
253        ADVZScheme::get_payload_byte_len(&self.common)
254    }
255}
256
257/// ADVZ dispersal data
258#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]
259pub struct AvidMDisperse<TYPES: NodeType> {
260    /// The view number for which this VID data is intended
261    pub view_number: TYPES::View,
262    /// Epoch the data of this proposal belongs to
263    pub epoch: Option<TYPES::Epoch>,
264    /// Epoch to which the recipients of this VID belong to
265    pub target_epoch: Option<TYPES::Epoch>,
266    /// VidCommitment calculated based on the number of nodes in `target_epoch`.
267    pub payload_commitment: AvidMCommitment,
268    /// A storage node's key and its corresponding VID share
269    pub shares: BTreeMap<TYPES::SignatureKey, AvidMShare>,
270    /// Length of payload in bytes
271    pub payload_byte_len: usize,
272    /// VID common data sent to all storage nodes
273    pub common: AvidMCommon,
274}
275
276impl<TYPES: NodeType> HasViewNumber<TYPES> for AvidMDisperse<TYPES> {
277    fn view_number(&self) -> TYPES::View {
278        self.view_number
279    }
280}
281
282/// The target total stake to scale to for VID.
283pub const VID_TARGET_TOTAL_STAKE: u32 = 1000;
284
285/// The weights and total weight used in VID calculations
286struct Weights {
287    // weights, in stake table order
288    weights: Vec<u32>,
289
290    // total weight
291    total_weight: usize,
292}
293
294pub fn vid_total_weight<TYPES: NodeType>(
295    stake_table: &HSStakeTable<TYPES>,
296    epoch: Option<TYPES::Epoch>,
297) -> usize {
298    if epoch.is_none() {
299        stake_table
300            .iter()
301            .fold(U256::ZERO, |acc, entry| {
302                acc + entry.stake_table_entry.stake()
303            })
304            .to::<usize>()
305    } else {
306        approximate_weights(stake_table).total_weight
307    }
308}
309
310fn approximate_weights<TYPES: NodeType>(stake_table: &HSStakeTable<TYPES>) -> Weights {
311    let total_stake = stake_table.iter().fold(U256::ZERO, |acc, entry| {
312        acc + entry.stake_table_entry.stake()
313    });
314
315    let mut total_weight: usize = 0;
316
317    // don't attempt to scale if the total stake is small enough
318    if total_stake <= U256::from(VID_TARGET_TOTAL_STAKE) {
319        let weights = stake_table
320            .iter()
321            .map(|entry| entry.stake_table_entry.stake().to::<u32>())
322            .collect();
323
324        // Note: this panics if `total_stake` exceeds `usize::MAX`, but this shouldn't happen.
325        total_weight = total_stake.to::<usize>();
326
327        Weights {
328            weights,
329            total_weight,
330        }
331    } else {
332        let weights = stake_table
333            .iter()
334            .map(|entry| {
335                let weight: U256 = ((entry.stake_table_entry.stake()
336                    * U256::from(VID_TARGET_TOTAL_STAKE))
337                    / total_stake)
338                    + U256::ONE;
339
340                // Note: this panics if `weight` exceeds `usize::MAX`, but this shouldn't happen.
341                total_weight += weight.to::<usize>();
342
343                // Note: this panics if `weight` exceeds `u32::MAX`, but this shouldn't happen
344                // and would likely cause a stack overflow in the VID calculation anyway
345                weight.to::<u32>()
346            })
347            .collect();
348
349        Weights {
350            weights,
351            total_weight,
352        }
353    }
354}
355
356impl<TYPES: NodeType> AvidMDisperse<TYPES> {
357    /// Create VID dispersal from a specified membership for the target epoch.
358    /// Uses the specified function to calculate share dispersal
359    /// Allows for more complex stake table functionality
360    async fn from_membership(
361        view_number: TYPES::View,
362        commit: AvidMCommitment,
363        shares: &[AvidMShare],
364        common: AvidMCommon,
365        membership: &EpochMembership<TYPES>,
366        target_epoch: Option<TYPES::Epoch>,
367        data_epoch: Option<TYPES::Epoch>,
368    ) -> Self {
369        let payload_byte_len = shares[0].payload_byte_len();
370        let shares = membership
371            .coordinator
372            .membership_for_epoch(target_epoch)
373            .await
374            .unwrap()
375            .stake_table()
376            .await
377            .iter()
378            .map(|entry| entry.stake_table_entry.public_key())
379            .zip(shares)
380            .map(|(node, share)| (node.clone(), share.clone()))
381            .collect();
382
383        Self {
384            view_number,
385            shares,
386            payload_commitment: commit,
387            epoch: data_epoch,
388            target_epoch,
389            payload_byte_len,
390            common,
391        }
392    }
393
394    /// Calculate the vid disperse information from the payload given a view, epoch and membership,
395    /// If the sender epoch is missing, it means it's the same as the target epoch.
396    ///
397    /// # Errors
398    /// Returns an error if the disperse or commitment calculation fails
399    #[allow(clippy::panic)]
400    #[allow(clippy::single_range_in_vec_init)]
401    pub async fn calculate_vid_disperse(
402        payload: &TYPES::BlockPayload,
403        membership: &EpochMembershipCoordinator<TYPES>,
404        view: TYPES::View,
405        target_epoch: Option<TYPES::Epoch>,
406        data_epoch: Option<TYPES::Epoch>,
407        metadata: &<TYPES::BlockPayload as BlockPayload<TYPES>>::Metadata,
408    ) -> Result<Self> {
409        let target_mem = membership.membership_for_epoch(target_epoch).await?;
410        let stake_table = target_mem.stake_table().await;
411        let approximate_weights = approximate_weights(&stake_table);
412
413        let txns = payload.encode();
414        let num_txns = txns.len();
415
416        let avidm_param = init_avidm_param(approximate_weights.total_weight)?;
417        let common = avidm_param.clone();
418
419        let ns_table = parse_ns_table(num_txns, &metadata.encode());
420        let ns_table_clone = ns_table.clone();
421        let (commit, shares) = spawn_blocking(move || {
422            AvidMScheme::ns_disperse(
423                &avidm_param,
424                &approximate_weights.weights,
425                &txns,
426                ns_table_clone,
427            )
428        })
429        .await
430        .wrap()
431        .context(error!("Join error"))?
432        .wrap()
433        .context(|err| error!("Failed to calculate VID disperse. Error: {}", err))?;
434
435        Ok(Self::from_membership(
436            view,
437            commit,
438            &shares,
439            common,
440            &target_mem,
441            target_epoch,
442            data_epoch,
443        )
444        .await)
445    }
446
447    /// Returns the payload length in bytes.
448    pub fn payload_byte_len(&self) -> u32 {
449        self.payload_byte_len as u32
450    }
451}
452
453#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]
454/// VID share and associated metadata for a single node
455pub struct VidDisperseShare2<TYPES: NodeType> {
456    /// The view number for which this VID data is intended
457    pub view_number: TYPES::View,
458    /// The epoch number for which this VID data belongs to
459    pub epoch: Option<TYPES::Epoch>,
460    /// The epoch number to which the recipient of this VID belongs to
461    pub target_epoch: Option<TYPES::Epoch>,
462    /// Block payload commitment
463    pub payload_commitment: AvidMCommitment,
464    /// A storage node's key and its corresponding VID share
465    pub share: AvidMShare,
466    /// a public key of the share recipient
467    pub recipient_key: TYPES::SignatureKey,
468    /// VID common data sent to all storage nodes
469    pub common: AvidMCommon,
470}
471
472impl<TYPES: NodeType> HasViewNumber<TYPES> for VidDisperseShare2<TYPES> {
473    fn view_number(&self) -> TYPES::View {
474        self.view_number
475    }
476}
477
478impl<TYPES: NodeType> VidDisperseShare2<TYPES> {
479    /// Create a vector of `VidDisperseShare` from `VidDisperse`
480    pub fn from_vid_disperse(vid_disperse: AvidMDisperse<TYPES>) -> Vec<Self> {
481        vid_disperse
482            .shares
483            .into_iter()
484            .map(|(recipient_key, share)| Self {
485                share,
486                recipient_key,
487                view_number: vid_disperse.view_number,
488                payload_commitment: vid_disperse.payload_commitment,
489                epoch: vid_disperse.epoch,
490                target_epoch: vid_disperse.target_epoch,
491                common: vid_disperse.common.clone(),
492            })
493            .collect()
494    }
495
496    /// Consume `self` and return a `Proposal`
497    pub fn to_proposal(
498        self,
499        private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
500    ) -> Option<Proposal<TYPES, Self>> {
501        let Ok(signature) =
502            TYPES::SignatureKey::sign(private_key, self.payload_commitment.as_ref())
503        else {
504            tracing::error!("VID: failed to sign dispersal share payload");
505            return None;
506        };
507        Some(Proposal {
508            signature,
509            _pd: PhantomData,
510            data: self,
511        })
512    }
513
514    /// Create `VidDisperse` out of an iterator to `VidDisperseShare`s
515    pub fn to_vid_disperse<'a, I>(mut it: I) -> Option<AvidMDisperse<TYPES>>
516    where
517        I: Iterator<Item = &'a Self>,
518    {
519        let first_vid_disperse_share = it.next()?.clone();
520        let payload_byte_len = first_vid_disperse_share.share.payload_byte_len();
521        let mut share_map = BTreeMap::new();
522        share_map.insert(
523            first_vid_disperse_share.recipient_key,
524            first_vid_disperse_share.share,
525        );
526        let mut vid_disperse = AvidMDisperse {
527            view_number: first_vid_disperse_share.view_number,
528            epoch: first_vid_disperse_share.epoch,
529            target_epoch: first_vid_disperse_share.target_epoch,
530            payload_commitment: first_vid_disperse_share.payload_commitment,
531            shares: share_map,
532            payload_byte_len,
533            common: first_vid_disperse_share.common,
534        };
535        let _ = it.map(|vid_disperse_share| {
536            vid_disperse.shares.insert(
537                vid_disperse_share.recipient_key.clone(),
538                vid_disperse_share.share.clone(),
539            )
540        });
541        Some(vid_disperse)
542    }
543
544    /// Returns the payload length in bytes.
545    pub fn payload_byte_len(&self) -> u32 {
546        self.share.payload_byte_len() as u32
547    }
548
549    /// Split a VID share proposal into a proposal for each recipient.
550    pub fn to_vid_share_proposals(
551        vid_disperse: AvidMDisperse<TYPES>,
552        signature: &<TYPES::SignatureKey as SignatureKey>::PureAssembledSignatureType,
553    ) -> Vec<Proposal<TYPES, Self>> {
554        vid_disperse
555            .shares
556            .into_iter()
557            .map(|(recipient_key, share)| Proposal {
558                data: Self {
559                    share,
560                    recipient_key,
561                    view_number: vid_disperse.view_number,
562                    payload_commitment: vid_disperse.payload_commitment,
563                    epoch: vid_disperse.epoch,
564                    target_epoch: vid_disperse.target_epoch,
565                    common: vid_disperse.common.clone(),
566                },
567                signature: signature.clone(),
568                _pd: PhantomData,
569            })
570            .collect()
571    }
572
573    /// Internally verify the share given necessary information
574    ///
575    /// # Errors
576    #[allow(clippy::result_unit_err)]
577    pub fn verify_share(&self, total_weight: usize) -> std::result::Result<(), ()> {
578        let avidm_param = init_avidm_param(total_weight).map_err(|_| ())?;
579        AvidMScheme::verify_share(&avidm_param, &self.payload_commitment, &self.share)
580            .unwrap_or(Err(()))
581    }
582}