vid/avid_m/
namespaced.rs

1//! This file implements the namespaced AvidM scheme.
2
3use std::ops::Range;
4
5use jf_merkle_tree::MerkleTreeScheme;
6use serde::{Deserialize, Serialize};
7
8use super::{AvidMCommit, AvidMShare, RawAvidMShare};
9use crate::{
10    avid_m::{AvidMScheme, MerkleTree},
11    VidError, VidResult, VidScheme,
12};
13
14/// Dummy struct for namespaced AvidM scheme
15pub struct NsAvidMScheme;
16
17/// Namespaced commitment type
18pub type NsAvidMCommit = super::AvidMCommit;
19/// Namespaced parameter type
20pub type NsAvidMParam = super::AvidMParam;
21
22/// Namespaced share for each storage node
23#[derive(Clone, Debug, Hash, Serialize, Deserialize, Eq, PartialEq, Default)]
24pub struct NsAvidMShare {
25    /// Index number of the given share.
26    index: u32,
27    /// The list of all namespace commitments
28    ns_commits: Vec<AvidMCommit>,
29    /// The size of each namespace
30    ns_lens: Vec<usize>,
31    /// Actual share content
32    content: Vec<RawAvidMShare>,
33}
34
35impl NsAvidMShare {
36    fn inner_ns_share(&self, ns_id: usize) -> AvidMShare {
37        AvidMShare {
38            index: self.index,
39            payload_byte_len: self.ns_lens[ns_id],
40            content: self.content[ns_id].clone(),
41        }
42    }
43
44    /// Return the length of underlying payload in bytes
45    pub fn payload_byte_len(&self) -> usize {
46        self.ns_lens.iter().sum()
47    }
48}
49
50impl NsAvidMScheme {
51    /// Setup an instance for AVID-M scheme
52    pub fn setup(recovery_threshold: usize, total_weights: usize) -> VidResult<NsAvidMParam> {
53        NsAvidMParam::new(recovery_threshold, total_weights)
54    }
55
56    /// Commit to a payload given namespace table.
57    /// WARN: it assumes that the namespace table is well formed, i.e. ranges
58    /// are non-overlapping and cover the whole payload.
59    pub fn commit(
60        param: &NsAvidMParam,
61        payload: &[u8],
62        ns_table: impl IntoIterator<Item = Range<usize>>,
63    ) -> VidResult<NsAvidMCommit> {
64        let ns_commits = ns_table
65            .into_iter()
66            .map(|ns_range| {
67                AvidMScheme::commit(param, &payload[ns_range]).map(|commit| commit.commit)
68            })
69            .collect::<Result<Vec<_>, _>>()?;
70        Ok(NsAvidMCommit {
71            commit: MerkleTree::from_elems(None, &ns_commits)
72                .map_err(|err| VidError::Internal(err.into()))?
73                .commitment(),
74        })
75    }
76
77    /// Disperse a payload according to a distribution table and a namespace
78    /// table.
79    /// WARN: it assumes that the namespace table is well formed, i.e. ranges
80    /// are non-overlapping and cover the whole payload.
81    pub fn ns_disperse(
82        param: &NsAvidMParam,
83        distribution: &[u32],
84        payload: &[u8],
85        ns_table: impl IntoIterator<Item = Range<usize>>,
86    ) -> VidResult<(NsAvidMCommit, Vec<NsAvidMShare>)> {
87        let mut ns_commits = vec![];
88        let mut disperses = vec![];
89        let mut ns_lens = vec![];
90        for ns_range in ns_table {
91            ns_lens.push(ns_range.len());
92            let (commit, shares) = AvidMScheme::disperse(param, distribution, &payload[ns_range])?;
93            ns_commits.push(commit.commit);
94            disperses.push(shares);
95        }
96        let commit = NsAvidMCommit {
97            commit: MerkleTree::from_elems(None, &ns_commits)
98                .map_err(|err| VidError::Internal(err.into()))?
99                .commitment(),
100        };
101        let ns_commits: Vec<_> = ns_commits
102            .into_iter()
103            .map(|comm| AvidMCommit { commit: comm })
104            .collect();
105        let mut shares = vec![NsAvidMShare::default(); disperses[0].len()];
106        shares.iter_mut().for_each(|share| {
107            share.index = disperses[0][0].index;
108            share.ns_commits = ns_commits.clone();
109            share.ns_lens = ns_lens.clone();
110        });
111        disperses.into_iter().for_each(|ns_disperse| {
112            shares
113                .iter_mut()
114                .zip(ns_disperse)
115                .for_each(|(share, ns_share)| share.content.push(ns_share.content))
116        });
117        Ok((commit, shares))
118    }
119
120    /// Verify a namespaced share
121    pub fn verify_share(
122        param: &NsAvidMParam,
123        commit: &NsAvidMCommit,
124        share: &NsAvidMShare,
125    ) -> VidResult<crate::VerificationResult> {
126        if !(share.ns_commits.len() == share.ns_lens.len()
127            && share.ns_commits.len() == share.content.len())
128        {
129            return Err(VidError::InvalidShare);
130        }
131        // Verify the share for each namespace
132        for (commit, content) in share.ns_commits.iter().zip(share.content.iter()) {
133            if AvidMScheme::verify_internal(param, commit, content)?.is_err() {
134                return Ok(Err(()));
135            }
136        }
137        // Verify the namespace MT commitment
138        let expected_commit = NsAvidMCommit {
139            commit: MerkleTree::from_elems(
140                None,
141                share.ns_commits.iter().map(|commit| commit.commit),
142            )
143            .map_err(|err| VidError::Internal(err.into()))?
144            .commitment(),
145        };
146        Ok(if &expected_commit == commit {
147            Ok(())
148        } else {
149            Err(())
150        })
151    }
152
153    /// Recover the entire payload from enough share
154    pub fn recover(param: &NsAvidMParam, shares: &[NsAvidMShare]) -> VidResult<Vec<u8>> {
155        if shares.is_empty() {
156            return Err(VidError::InsufficientShares);
157        }
158        let mut result = vec![];
159        for ns_id in 0..shares[0].ns_lens.len() {
160            result.append(&mut Self::ns_recover(param, ns_id, shares)?)
161        }
162        Ok(result)
163    }
164
165    /// Recover the payload for a given namespace.
166    /// Given namespace ID should be valid for all shares, i.e. `ns_commits` and `content` have
167    /// at least `ns_id` elements for all shares.
168    pub fn ns_recover(
169        param: &NsAvidMParam,
170        ns_id: usize,
171        shares: &[NsAvidMShare],
172    ) -> VidResult<Vec<u8>> {
173        if shares.is_empty() {
174            return Err(VidError::InsufficientShares);
175        }
176        if shares
177            .iter()
178            .any(|share| ns_id >= share.ns_lens.len() || ns_id >= share.content.len())
179        {
180            return Err(VidError::IndexOutOfBound);
181        }
182        let ns_commit = shares[0].ns_commits[ns_id];
183        let shares: Vec<_> = shares
184            .iter()
185            .map(|share| share.inner_ns_share(ns_id))
186            .collect();
187        AvidMScheme::recover(param, &ns_commit, &shares)
188    }
189}
190
191/// Unit tests
192#[cfg(test)]
193pub mod tests {
194    use rand::{seq::SliceRandom, RngCore};
195
196    use crate::avid_m::namespaced::NsAvidMScheme;
197
198    #[test]
199    fn round_trip() {
200        // play with these items
201        let num_storage_nodes = 9;
202        let recovery_threshold = 3;
203        let ns_lens = [15, 33];
204        let ns_table = [(0usize..15), (15..48)];
205        let payload_byte_len = ns_lens.iter().sum();
206
207        // more items as a function of the above
208
209        let mut rng = jf_utils::test_rng();
210
211        let weights: Vec<u32> = (0..num_storage_nodes)
212            .map(|_| rng.next_u32() % 5 + 1)
213            .collect();
214        let total_weights: u32 = weights.iter().sum();
215        let params = NsAvidMScheme::setup(recovery_threshold, total_weights as usize).unwrap();
216
217        println!(
218            "recovery_threshold:: {} num_storage_nodes: {} payload_byte_len: {}",
219            recovery_threshold, num_storage_nodes, payload_byte_len
220        );
221        println!("weights: {:?}", weights);
222
223        let payload = {
224            let mut bytes_random = vec![0u8; payload_byte_len];
225            rng.fill_bytes(&mut bytes_random);
226            bytes_random
227        };
228
229        let (commit, mut shares) =
230            NsAvidMScheme::ns_disperse(&params, &weights, &payload, ns_table.iter().cloned())
231                .unwrap();
232
233        assert_eq!(shares.len(), num_storage_nodes);
234
235        assert_eq!(
236            commit,
237            NsAvidMScheme::commit(&params, &payload, ns_table.iter().cloned()).unwrap()
238        );
239
240        // verify shares
241        shares.iter().for_each(|share| {
242            assert!(NsAvidMScheme::verify_share(&params, &commit, share).is_ok_and(|r| r.is_ok()))
243        });
244
245        // test payload recovery on a random subset of shares
246        shares.shuffle(&mut rng);
247        let mut cumulated_weights = 0;
248        let mut cut_index = 0;
249        while cumulated_weights <= recovery_threshold {
250            cumulated_weights += shares[cut_index].content[0].range.len();
251            cut_index += 1;
252        }
253        let ns0_payload_recovered =
254            NsAvidMScheme::ns_recover(&params, 0, &shares[..cut_index]).unwrap();
255        assert_eq!(ns0_payload_recovered[..], payload[ns_table[0].clone()]);
256        let ns1_payload_recovered =
257            NsAvidMScheme::ns_recover(&params, 1, &shares[..cut_index]).unwrap();
258        assert_eq!(ns1_payload_recovered[..], payload[ns_table[1].clone()]);
259        let payload_recovered = NsAvidMScheme::recover(&params, &shares[..cut_index]).unwrap();
260        assert_eq!(payload_recovered, payload);
261    }
262}