1use std::ops::Range;
4
5use jf_merkle_tree::MerkleTreeScheme;
6use serde::{Deserialize, Serialize};
7
8use super::{AvidmGf2Commit, AvidmGf2Share};
9use crate::{
10 avidm_gf2::{AvidmGf2Scheme, MerkleTree},
11 VidError, VidResult, VidScheme,
12};
13
14pub struct NsAvidmGf2Scheme;
16
17pub type NsAvidmGf2Commit = super::AvidmGf2Commit;
19pub type NsAvidmGf2Param = super::AvidmGf2Param;
21
22#[derive(Clone, Debug, Hash, Serialize, Deserialize, Eq, PartialEq)]
24pub struct NsAvidmGf2Common {
25 pub param: NsAvidmGf2Param,
27 pub ns_commits: Vec<AvidmGf2Commit>,
29 pub ns_lens: Vec<usize>,
31}
32
33#[derive(Clone, Debug, Hash, Serialize, Deserialize, Eq, PartialEq, Default)]
35pub struct NsAvidmGf2Share(pub(crate) Vec<AvidmGf2Share>);
36
37impl NsAvidmGf2Share {
38 pub fn num_nss(&self) -> usize {
40 self.0.len()
41 }
42
43 pub fn weight(&self) -> usize {
45 self.0.first().map_or(0, |share| share.weight())
46 }
47
48 pub fn validate(&self) -> bool {
50 let weight = self.weight();
51 self.0
52 .iter()
53 .all(|share| share.validate() && share.weight() == weight)
54 }
55
56 pub fn contains_ns(&self, ns_index: usize) -> bool {
58 ns_index < self.num_nss()
59 }
60
61 pub fn inner_ns_share(&self, ns_index: usize) -> Option<AvidmGf2Share> {
63 self.0.get(ns_index).cloned()
64 }
65}
66
67impl NsAvidmGf2Scheme {
68 pub fn setup(recovery_threshold: usize, total_weights: usize) -> VidResult<NsAvidmGf2Param> {
70 NsAvidmGf2Param::new(recovery_threshold, total_weights)
71 }
72
73 pub fn commit(
77 param: &NsAvidmGf2Param,
78 payload: &[u8],
79 ns_table: impl IntoIterator<Item = Range<usize>>,
80 ) -> VidResult<(NsAvidmGf2Commit, NsAvidmGf2Common)> {
81 let ns_table = ns_table.into_iter().collect::<Vec<_>>();
82 let ns_lens = ns_table.iter().map(|r| r.len()).collect::<Vec<_>>();
83 let ns_commits = ns_table
84 .into_iter()
85 .map(|ns_range| AvidmGf2Scheme::commit(param, &payload[ns_range]))
86 .collect::<Result<Vec<_>, _>>()?;
87 let common = NsAvidmGf2Common {
88 param: param.clone(),
89 ns_commits,
90 ns_lens,
91 };
92 let commit = MerkleTree::from_elems(None, common.ns_commits.iter().map(|c| c.commit))
93 .map_err(|err| VidError::Internal(err.into()))?
94 .commitment();
95 Ok((NsAvidmGf2Commit { commit }, common))
96 }
97
98 pub fn ns_disperse(
103 param: &NsAvidmGf2Param,
104 distribution: &[u32],
105 payload: &[u8],
106 ns_table: impl IntoIterator<Item = Range<usize>>,
107 ) -> VidResult<(NsAvidmGf2Commit, NsAvidmGf2Common, Vec<NsAvidmGf2Share>)> {
108 let num_storage_nodes = distribution.len();
109 let mut ns_commits = vec![];
110 let mut disperses = vec![];
111 let mut ns_lens = vec![];
112 for ns_range in ns_table {
113 ns_lens.push(ns_range.len());
114 let (commit, shares) =
115 AvidmGf2Scheme::disperse(param, distribution, &payload[ns_range])?;
116 ns_commits.push(commit);
117 disperses.push(shares);
118 }
119 let common = NsAvidmGf2Common {
120 param: param.clone(),
121 ns_commits,
122 ns_lens,
123 };
124 let commit = NsAvidmGf2Commit {
125 commit: MerkleTree::from_elems(None, common.ns_commits.iter().map(|c| c.commit))
126 .map_err(|err| VidError::Internal(err.into()))?
127 .commitment(),
128 };
129 let mut shares = vec![NsAvidmGf2Share::default(); num_storage_nodes];
130 disperses.into_iter().for_each(|ns_disperse| {
131 shares
132 .iter_mut()
133 .zip(ns_disperse)
134 .for_each(|(share, ns_share)| share.0.push(ns_share))
135 });
136 Ok((commit, common, shares))
137 }
138
139 pub fn verify_share(
141 commit: &NsAvidmGf2Commit,
142 common: &NsAvidmGf2Common,
143 share: &NsAvidmGf2Share,
144 ) -> VidResult<crate::VerificationResult> {
145 if !(common.ns_commits.len() == common.ns_lens.len()
146 && common.ns_commits.len() == share.num_nss()
147 && share.validate())
148 {
149 return Err(VidError::InvalidShare);
150 }
151 for (commit, content) in common.ns_commits.iter().zip(share.0.iter()) {
153 if AvidmGf2Scheme::verify_share(&common.param, commit, content)?.is_err() {
154 return Ok(Err(()));
155 }
156 }
157 let expected_commit = NsAvidmGf2Commit {
159 commit: MerkleTree::from_elems(
160 None,
161 common.ns_commits.iter().map(|commit| commit.commit),
162 )
163 .map_err(|err| VidError::Internal(err.into()))?
164 .commitment(),
165 };
166 Ok(if &expected_commit == commit {
167 Ok(())
168 } else {
169 Err(())
170 })
171 }
172
173 pub fn recover(common: &NsAvidmGf2Common, shares: &[NsAvidmGf2Share]) -> VidResult<Vec<u8>> {
175 if shares.is_empty() {
176 return Err(VidError::InsufficientShares);
177 }
178 let mut result = vec![];
179 for ns_index in 0..common.ns_lens.len() {
180 result.append(&mut Self::ns_recover(common, ns_index, shares)?)
181 }
182 Ok(result)
183 }
184
185 pub fn ns_recover(
189 common: &NsAvidmGf2Common,
190 ns_index: usize,
191 shares: &[NsAvidmGf2Share],
192 ) -> VidResult<Vec<u8>> {
193 if shares.is_empty() {
194 return Err(VidError::InsufficientShares);
195 }
196 if ns_index >= common.ns_lens.len()
197 || !shares.iter().all(|share| share.contains_ns(ns_index))
198 {
199 return Err(VidError::IndexOutOfBound);
200 }
201 let ns_commit = &common.ns_commits[ns_index];
202 let shares: Vec<_> = shares
203 .iter()
204 .filter_map(|share| share.inner_ns_share(ns_index))
205 .collect();
206 AvidmGf2Scheme::recover(&common.param, ns_commit, &shares)
207 }
208}
209
210#[cfg(test)]
212pub mod tests {
213 use rand::{seq::SliceRandom, RngCore};
214
215 use crate::avidm_gf2::namespaced::NsAvidmGf2Scheme;
216
217 #[test]
218 fn round_trip() {
219 let num_storage_nodes = 9;
221 let ns_lens = [15, 33];
222 let ns_table = [(0usize..15), (15..48)];
223 let payload_byte_len = ns_lens.iter().sum();
224
225 let mut rng = jf_utils::test_rng();
226
227 let weights: Vec<u32> = (0..num_storage_nodes)
229 .map(|_| rng.next_u32() % 5 + 1)
230 .collect();
231 let total_weights: u32 = weights.iter().sum();
232 let recovery_threshold = total_weights.div_ceil(3) as usize;
233 let params = NsAvidmGf2Scheme::setup(recovery_threshold, total_weights as usize).unwrap();
234
235 println!(
236 "recovery_threshold:: {recovery_threshold} num_storage_nodes: {num_storage_nodes} \
237 payload_byte_len: {payload_byte_len}"
238 );
239 println!("weights: {weights:?}");
240
241 let payload = {
242 let mut bytes_random = vec![0u8; payload_byte_len];
243 rng.fill_bytes(&mut bytes_random);
244 bytes_random
245 };
246
247 let (commit, common, mut shares) =
248 NsAvidmGf2Scheme::ns_disperse(¶ms, &weights, &payload, ns_table.iter().cloned())
249 .unwrap();
250
251 assert_eq!(shares.len(), num_storage_nodes);
252
253 assert_eq!(
254 commit,
255 NsAvidmGf2Scheme::commit(¶ms, &payload, ns_table.iter().cloned())
256 .unwrap()
257 .0
258 );
259
260 shares.iter().for_each(|share| {
262 assert!(NsAvidmGf2Scheme::verify_share(&commit, &common, share).is_ok_and(|r| r.is_ok()))
263 });
264
265 shares.shuffle(&mut rng);
267 let mut cumulated_weights = 0;
268 let mut cut_index = 0;
269 while cumulated_weights <= recovery_threshold {
270 cumulated_weights += shares[cut_index].weight();
271 cut_index += 1;
272 }
273 let ns0_payload_recovered =
274 NsAvidmGf2Scheme::ns_recover(&common, 0, &shares[..cut_index]).unwrap();
275 assert_eq!(ns0_payload_recovered[..], payload[ns_table[0].clone()]);
276 let ns1_payload_recovered =
277 NsAvidmGf2Scheme::ns_recover(&common, 1, &shares[..cut_index]).unwrap();
278 assert_eq!(ns1_payload_recovered[..], payload[ns_table[1].clone()]);
279 let payload_recovered = NsAvidmGf2Scheme::recover(&common, &shares[..cut_index]).unwrap();
280 assert_eq!(payload_recovered, payload);
281 }
282}