vid/avidm_gf2/
namespaced.rs

1//! This file implements the namespaced AvidmGf2 scheme.
2
3use std::ops::Range;
4
5use jf_merkle_tree::MerkleTreeScheme;
6use serde::{Deserialize, Serialize};
7
8use super::{AvidmGf2Commit, AvidmGf2Share};
9use crate::{
10    avidm_gf2::{AvidmGf2Scheme, MerkleTree},
11    VidError, VidResult, VidScheme,
12};
13
14/// Dummy struct for namespaced AvidmGf2 scheme
15pub struct NsAvidmGf2Scheme;
16
17/// Namespaced commitment type
18pub type NsAvidmGf2Commit = super::AvidmGf2Commit;
19/// Namespaced parameter type
20pub type NsAvidmGf2Param = super::AvidmGf2Param;
21
22/// VID Common data that needs to be broadcasted to all storage nodes
23#[derive(Clone, Debug, Hash, Serialize, Deserialize, Eq, PartialEq)]
24pub struct NsAvidmGf2Common {
25    /// The AvidmGf2 parameters
26    pub param: NsAvidmGf2Param,
27    /// The list of all namespace commitments
28    pub ns_commits: Vec<AvidmGf2Commit>,
29    /// The size of each namespace
30    pub ns_lens: Vec<usize>,
31}
32
33impl NsAvidmGf2Common {
34    /// Return the total payload byte length
35    pub fn payload_byte_len(&self) -> usize {
36        self.ns_lens.iter().sum()
37    }
38}
39
40/// Namespaced share for each storage node, contains one [`AvidmGf2Share`] for each namespace.
41#[derive(Clone, Debug, Hash, Serialize, Deserialize, Eq, PartialEq, Default)]
42pub struct NsAvidmGf2Share(pub(crate) Vec<AvidmGf2Share>);
43
44impl NsAvidmGf2Share {
45    /// Return the number of namespaces in this share
46    pub fn num_nss(&self) -> usize {
47        self.0.len()
48    }
49
50    /// Return the weight of this share
51    pub fn weight(&self) -> usize {
52        self.0.first().map_or(0, |share| share.weight())
53    }
54
55    /// Validate the share structure
56    pub fn validate(&self) -> bool {
57        let weight = self.weight();
58        self.0
59            .iter()
60            .all(|share| share.validate() && share.weight() == weight)
61    }
62
63    /// Check whether this share contains a given namespace
64    pub fn contains_ns(&self, ns_index: usize) -> bool {
65        ns_index < self.num_nss()
66    }
67
68    /// Return the inner share for a given namespace if there exists one.
69    pub fn inner_ns_share(&self, ns_index: usize) -> Option<AvidmGf2Share> {
70        self.0.get(ns_index).cloned()
71    }
72}
73
74impl NsAvidmGf2Scheme {
75    /// Setup an instance for AVID-M scheme
76    pub fn setup(recovery_threshold: usize, total_weights: usize) -> VidResult<NsAvidmGf2Param> {
77        NsAvidmGf2Param::new(recovery_threshold, total_weights)
78    }
79
80    /// Commit to a payload given namespace table.
81    /// WARN: it assumes that the namespace table is well formed, i.e. ranges
82    /// are non-overlapping and cover the whole payload.
83    pub fn commit(
84        param: &NsAvidmGf2Param,
85        payload: &[u8],
86        ns_table: impl IntoIterator<Item = Range<usize>>,
87    ) -> VidResult<(NsAvidmGf2Commit, NsAvidmGf2Common)> {
88        let ns_table = ns_table.into_iter().collect::<Vec<_>>();
89        let ns_lens = ns_table.iter().map(|r| r.len()).collect::<Vec<_>>();
90        let ns_commits = ns_table
91            .into_iter()
92            .map(|ns_range| AvidmGf2Scheme::commit(param, &payload[ns_range]))
93            .collect::<Result<Vec<_>, _>>()?;
94        let common = NsAvidmGf2Common {
95            param: param.clone(),
96            ns_commits,
97            ns_lens,
98        };
99        let commit = MerkleTree::from_elems(None, common.ns_commits.iter().map(|c| c.commit))
100            .map_err(|err| VidError::Internal(err.into()))?
101            .commitment();
102        Ok((NsAvidmGf2Commit { commit }, common))
103    }
104
105    /// Check whether the namespaced commitment is consistent with the common data
106    pub fn is_consistent(commit: &NsAvidmGf2Commit, common: &NsAvidmGf2Common) -> bool {
107        let Ok(mt) =
108            MerkleTree::from_elems(None, common.ns_commits.iter().map(|commit| commit.commit))
109        else {
110            return false;
111        };
112        commit.commit == mt.commitment()
113    }
114
115    /// Disperse a payload according to a distribution table and a namespace
116    /// table.
117    /// WARN: it assumes that the namespace table is well formed, i.e. ranges
118    /// are non-overlapping and cover the whole payload.
119    pub fn ns_disperse(
120        param: &NsAvidmGf2Param,
121        distribution: &[u32],
122        payload: &[u8],
123        ns_table: impl IntoIterator<Item = Range<usize>>,
124    ) -> VidResult<(NsAvidmGf2Commit, NsAvidmGf2Common, Vec<NsAvidmGf2Share>)> {
125        let num_storage_nodes = distribution.len();
126        let mut ns_commits = vec![];
127        let mut disperses = vec![];
128        let mut ns_lens = vec![];
129        for ns_range in ns_table {
130            ns_lens.push(ns_range.len());
131            let (commit, shares) =
132                AvidmGf2Scheme::disperse(param, distribution, &payload[ns_range])?;
133            ns_commits.push(commit);
134            disperses.push(shares);
135        }
136        let common = NsAvidmGf2Common {
137            param: param.clone(),
138            ns_commits,
139            ns_lens,
140        };
141        let commit = NsAvidmGf2Commit {
142            commit: MerkleTree::from_elems(None, common.ns_commits.iter().map(|c| c.commit))
143                .map_err(|err| VidError::Internal(err.into()))?
144                .commitment(),
145        };
146        let mut shares = vec![NsAvidmGf2Share::default(); num_storage_nodes];
147        disperses.into_iter().for_each(|ns_disperse| {
148            shares
149                .iter_mut()
150                .zip(ns_disperse)
151                .for_each(|(share, ns_share)| share.0.push(ns_share))
152        });
153        Ok((commit, common, shares))
154    }
155
156    /// Verify a namespaced share
157    pub fn verify_share(
158        commit: &NsAvidmGf2Commit,
159        common: &NsAvidmGf2Common,
160        share: &NsAvidmGf2Share,
161    ) -> VidResult<crate::VerificationResult> {
162        if !(common.ns_commits.len() == common.ns_lens.len()
163            && common.ns_commits.len() == share.num_nss()
164            && share.validate())
165        {
166            return Err(VidError::InvalidShare);
167        }
168        // Verify the share for each namespace
169        for (commit, content) in common.ns_commits.iter().zip(share.0.iter()) {
170            if AvidmGf2Scheme::verify_share(&common.param, commit, content)?.is_err() {
171                return Ok(Err(()));
172            }
173        }
174        // Verify the namespace MT commitment
175        let expected_commit = NsAvidmGf2Commit {
176            commit: MerkleTree::from_elems(
177                None,
178                common.ns_commits.iter().map(|commit| commit.commit),
179            )
180            .map_err(|err| VidError::Internal(err.into()))?
181            .commitment(),
182        };
183        Ok(if &expected_commit == commit {
184            Ok(())
185        } else {
186            Err(())
187        })
188    }
189
190    /// Recover the entire payload from enough share
191    pub fn recover(common: &NsAvidmGf2Common, shares: &[NsAvidmGf2Share]) -> VidResult<Vec<u8>> {
192        if shares.is_empty() {
193            return Err(VidError::InsufficientShares);
194        }
195        let mut result = vec![];
196        for ns_index in 0..common.ns_lens.len() {
197            result.append(&mut Self::ns_recover(common, ns_index, shares)?)
198        }
199        Ok(result)
200    }
201
202    /// Recover the payload for a given namespace.
203    /// Given namespace ID should be valid for all shares, i.e. `ns_commits` and `content` have
204    /// at least `ns_index` elements for all shares.
205    pub fn ns_recover(
206        common: &NsAvidmGf2Common,
207        ns_index: usize,
208        shares: &[NsAvidmGf2Share],
209    ) -> VidResult<Vec<u8>> {
210        if shares.is_empty() {
211            return Err(VidError::InsufficientShares);
212        }
213        if ns_index >= common.ns_lens.len()
214            || !shares.iter().all(|share| share.contains_ns(ns_index))
215        {
216            return Err(VidError::IndexOutOfBound);
217        }
218        let ns_commit = &common.ns_commits[ns_index];
219        let shares: Vec<_> = shares
220            .iter()
221            .filter_map(|share| share.inner_ns_share(ns_index))
222            .collect();
223        AvidmGf2Scheme::recover(&common.param, ns_commit, &shares)
224    }
225}
226
227/// Unit tests
228#[cfg(test)]
229pub mod tests {
230    use rand::{seq::SliceRandom, RngCore};
231
232    use crate::avidm_gf2::namespaced::NsAvidmGf2Scheme;
233
234    #[test]
235    fn round_trip() {
236        // play with these items
237        let num_storage_nodes = 9;
238        let ns_lens = [15, 33];
239        let ns_table = [(0usize..15), (15..48)];
240        let payload_byte_len = ns_lens.iter().sum();
241
242        let mut rng = jf_utils::test_rng();
243
244        // more items as a function of the above
245        let weights: Vec<u32> = (0..num_storage_nodes)
246            .map(|_| rng.next_u32() % 5 + 1)
247            .collect();
248        let total_weights: u32 = weights.iter().sum();
249        let recovery_threshold = total_weights.div_ceil(3) as usize;
250        let params = NsAvidmGf2Scheme::setup(recovery_threshold, total_weights as usize).unwrap();
251
252        println!(
253            "recovery_threshold:: {recovery_threshold} num_storage_nodes: {num_storage_nodes} \
254             payload_byte_len: {payload_byte_len}"
255        );
256        println!("weights: {weights:?}");
257
258        let payload = {
259            let mut bytes_random = vec![0u8; payload_byte_len];
260            rng.fill_bytes(&mut bytes_random);
261            bytes_random
262        };
263
264        let (commit, common, mut shares) =
265            NsAvidmGf2Scheme::ns_disperse(&params, &weights, &payload, ns_table.iter().cloned())
266                .unwrap();
267
268        assert_eq!(shares.len(), num_storage_nodes);
269
270        assert_eq!(
271            commit,
272            NsAvidmGf2Scheme::commit(&params, &payload, ns_table.iter().cloned())
273                .unwrap()
274                .0
275        );
276
277        // verify shares
278        shares.iter().for_each(|share| {
279            assert!(NsAvidmGf2Scheme::verify_share(&commit, &common, share).is_ok_and(|r| r.is_ok()))
280        });
281
282        // test payload recovery on a random subset of shares
283        shares.shuffle(&mut rng);
284        let mut cumulated_weights = 0;
285        let mut cut_index = 0;
286        while cumulated_weights <= recovery_threshold {
287            cumulated_weights += shares[cut_index].weight();
288            cut_index += 1;
289        }
290        let ns0_payload_recovered =
291            NsAvidmGf2Scheme::ns_recover(&common, 0, &shares[..cut_index]).unwrap();
292        assert_eq!(ns0_payload_recovered[..], payload[ns_table[0].clone()]);
293        let ns1_payload_recovered =
294            NsAvidmGf2Scheme::ns_recover(&common, 1, &shares[..cut_index]).unwrap();
295        assert_eq!(ns1_payload_recovered[..], payload[ns_table[1].clone()]);
296        let payload_recovered = NsAvidmGf2Scheme::recover(&common, &shares[..cut_index]).unwrap();
297        assert_eq!(payload_recovered, payload);
298    }
299}