1use std::ops::Range;
4
5use jf_merkle_tree::MerkleTreeScheme;
6use serde::{Deserialize, Serialize};
7
8use super::{AvidmGf2Commit, AvidmGf2Share};
9use crate::{
10 avidm_gf2::{AvidmGf2Scheme, MerkleTree},
11 VidError, VidResult, VidScheme,
12};
13
14pub struct NsAvidmGf2Scheme;
16
17pub type NsAvidmGf2Commit = super::AvidmGf2Commit;
19pub type NsAvidmGf2Param = super::AvidmGf2Param;
21
22#[derive(Clone, Debug, Hash, Serialize, Deserialize, Eq, PartialEq)]
24pub struct NsAvidmGf2Common {
25 pub param: NsAvidmGf2Param,
27 pub ns_commits: Vec<AvidmGf2Commit>,
29 pub ns_lens: Vec<usize>,
31}
32
33impl NsAvidmGf2Common {
34 pub fn payload_byte_len(&self) -> usize {
36 self.ns_lens.iter().sum()
37 }
38}
39
40#[derive(Clone, Debug, Hash, Serialize, Deserialize, Eq, PartialEq, Default)]
42pub struct NsAvidmGf2Share(pub(crate) Vec<AvidmGf2Share>);
43
44impl NsAvidmGf2Share {
45 pub fn num_nss(&self) -> usize {
47 self.0.len()
48 }
49
50 pub fn weight(&self) -> usize {
52 self.0.first().map_or(0, |share| share.weight())
53 }
54
55 pub fn validate(&self) -> bool {
57 let weight = self.weight();
58 self.0
59 .iter()
60 .all(|share| share.validate() && share.weight() == weight)
61 }
62
63 pub fn contains_ns(&self, ns_index: usize) -> bool {
65 ns_index < self.num_nss()
66 }
67
68 pub fn inner_ns_share(&self, ns_index: usize) -> Option<AvidmGf2Share> {
70 self.0.get(ns_index).cloned()
71 }
72}
73
74impl NsAvidmGf2Scheme {
75 pub fn setup(recovery_threshold: usize, total_weights: usize) -> VidResult<NsAvidmGf2Param> {
77 NsAvidmGf2Param::new(recovery_threshold, total_weights)
78 }
79
80 pub fn commit(
84 param: &NsAvidmGf2Param,
85 payload: &[u8],
86 ns_table: impl IntoIterator<Item = Range<usize>>,
87 ) -> VidResult<(NsAvidmGf2Commit, NsAvidmGf2Common)> {
88 let ns_table = ns_table.into_iter().collect::<Vec<_>>();
89 let ns_lens = ns_table.iter().map(|r| r.len()).collect::<Vec<_>>();
90 let ns_commits = ns_table
91 .into_iter()
92 .map(|ns_range| AvidmGf2Scheme::commit(param, &payload[ns_range]))
93 .collect::<Result<Vec<_>, _>>()?;
94 let common = NsAvidmGf2Common {
95 param: param.clone(),
96 ns_commits,
97 ns_lens,
98 };
99 let commit = MerkleTree::from_elems(None, common.ns_commits.iter().map(|c| c.commit))
100 .map_err(|err| VidError::Internal(err.into()))?
101 .commitment();
102 Ok((NsAvidmGf2Commit { commit }, common))
103 }
104
105 pub fn is_consistent(commit: &NsAvidmGf2Commit, common: &NsAvidmGf2Common) -> bool {
107 let Ok(mt) =
108 MerkleTree::from_elems(None, common.ns_commits.iter().map(|commit| commit.commit))
109 else {
110 return false;
111 };
112 commit.commit == mt.commitment()
113 }
114
115 pub fn ns_disperse(
120 param: &NsAvidmGf2Param,
121 distribution: &[u32],
122 payload: &[u8],
123 ns_table: impl IntoIterator<Item = Range<usize>>,
124 ) -> VidResult<(NsAvidmGf2Commit, NsAvidmGf2Common, Vec<NsAvidmGf2Share>)> {
125 let num_storage_nodes = distribution.len();
126 let mut ns_commits = vec![];
127 let mut disperses = vec![];
128 let mut ns_lens = vec![];
129 for ns_range in ns_table {
130 ns_lens.push(ns_range.len());
131 let (commit, shares) =
132 AvidmGf2Scheme::disperse(param, distribution, &payload[ns_range])?;
133 ns_commits.push(commit);
134 disperses.push(shares);
135 }
136 let common = NsAvidmGf2Common {
137 param: param.clone(),
138 ns_commits,
139 ns_lens,
140 };
141 let commit = NsAvidmGf2Commit {
142 commit: MerkleTree::from_elems(None, common.ns_commits.iter().map(|c| c.commit))
143 .map_err(|err| VidError::Internal(err.into()))?
144 .commitment(),
145 };
146 let mut shares = vec![NsAvidmGf2Share::default(); num_storage_nodes];
147 disperses.into_iter().for_each(|ns_disperse| {
148 shares
149 .iter_mut()
150 .zip(ns_disperse)
151 .for_each(|(share, ns_share)| share.0.push(ns_share))
152 });
153 Ok((commit, common, shares))
154 }
155
156 pub fn verify_share_with_verified_common(
163 common: &NsAvidmGf2Common,
164 share: &NsAvidmGf2Share,
165 ) -> VidResult<crate::VerificationResult> {
166 if !(common.ns_commits.len() == common.ns_lens.len()
167 && common.ns_commits.len() == share.num_nss()
168 && share.validate())
169 {
170 return Err(VidError::InvalidShare);
171 }
172 for (commit, content) in common.ns_commits.iter().zip(share.0.iter()) {
173 if AvidmGf2Scheme::verify_share(&common.param, commit, content)?.is_err() {
174 return Ok(Err(()));
175 }
176 }
177 Ok(Ok(()))
178 }
179
180 pub fn verify_share(
182 commit: &NsAvidmGf2Commit,
183 common: &NsAvidmGf2Common,
184 share: &NsAvidmGf2Share,
185 ) -> VidResult<crate::VerificationResult> {
186 if !Self::is_consistent(commit, common) {
187 return Ok(Err(()));
188 }
189 Self::verify_share_with_verified_common(common, share)
190 }
191
192 pub fn recover(common: &NsAvidmGf2Common, shares: &[NsAvidmGf2Share]) -> VidResult<Vec<u8>> {
194 if shares.is_empty() {
195 return Err(VidError::InsufficientShares);
196 }
197 let mut result = vec![];
198 for ns_index in 0..common.ns_lens.len() {
199 result.append(&mut Self::ns_recover(common, ns_index, shares)?)
200 }
201 Ok(result)
202 }
203
204 pub fn ns_recover(
208 common: &NsAvidmGf2Common,
209 ns_index: usize,
210 shares: &[NsAvidmGf2Share],
211 ) -> VidResult<Vec<u8>> {
212 if shares.is_empty() {
213 return Err(VidError::InsufficientShares);
214 }
215 if ns_index >= common.ns_lens.len()
216 || !shares.iter().all(|share| share.contains_ns(ns_index))
217 {
218 return Err(VidError::IndexOutOfBound);
219 }
220 let ns_commit = &common.ns_commits[ns_index];
221 let shares: Vec<_> = shares
222 .iter()
223 .filter_map(|share| share.inner_ns_share(ns_index))
224 .collect();
225 AvidmGf2Scheme::recover(&common.param, ns_commit, &shares)
226 }
227}
228
229#[cfg(test)]
231pub mod tests {
232 use rand::{seq::SliceRandom, RngCore};
233
234 use crate::avidm_gf2::namespaced::NsAvidmGf2Scheme;
235
236 fn disperse_with_payload(
237 payload: &[u8],
238 ) -> (
239 crate::avidm_gf2::namespaced::NsAvidmGf2Commit,
240 crate::avidm_gf2::namespaced::NsAvidmGf2Common,
241 Vec<crate::avidm_gf2::namespaced::NsAvidmGf2Share>,
242 ) {
243 let num_storage_nodes = 9;
244 let ns_table = [(0usize..15), (15..48)];
245
246 let mut rng = jf_utils::test_rng();
247 let weights: Vec<u32> = (0..num_storage_nodes)
248 .map(|_| rng.next_u32() % 5 + 1)
249 .collect();
250 let total_weights: u32 = weights.iter().sum();
251 let recovery_threshold = total_weights.div_ceil(3) as usize;
252 let params = NsAvidmGf2Scheme::setup(recovery_threshold, total_weights as usize).unwrap();
253
254 NsAvidmGf2Scheme::ns_disperse(¶ms, &weights, payload, ns_table.iter().cloned()).unwrap()
255 }
256
257 fn setup_test_data() -> (
258 crate::avidm_gf2::namespaced::NsAvidmGf2Commit,
259 crate::avidm_gf2::namespaced::NsAvidmGf2Common,
260 Vec<crate::avidm_gf2::namespaced::NsAvidmGf2Share>,
261 ) {
262 let payload: Vec<u8> = (0u8..48).collect();
263 disperse_with_payload(&payload)
264 }
265
266 #[test]
267 fn verify_share_with_verified_common_accepts_valid() {
268 let (commit, common, shares) = setup_test_data();
269 assert!(NsAvidmGf2Scheme::is_consistent(&commit, &common));
270 for share in &shares {
271 assert!(
272 NsAvidmGf2Scheme::verify_share_with_verified_common(&common, share)
273 .is_ok_and(|r| r.is_ok())
274 );
275 }
276 }
277
278 #[test]
279 fn verify_share_with_verified_common_rejects_tampered_share() {
280 let (_commit, common, shares) = setup_test_data();
281 let mut tampered = shares[0].clone();
283 tampered.0.pop();
284 assert!(NsAvidmGf2Scheme::verify_share_with_verified_common(&common, &tampered).is_err());
285
286 let (_commit2, _common2, shares2) = disperse_with_payload(&[0xAB; 48]);
288 let mut mixed = shares[0].clone();
289 mixed.0[0] = shares2[0].0[0].clone();
290 assert!(
291 NsAvidmGf2Scheme::verify_share_with_verified_common(&common, &mixed)
292 .is_ok_and(|r| r.is_err())
293 );
294 }
295
296 #[test]
297 fn composition_equivalence() {
298 let (commit, common, shares) = setup_test_data();
299 for share in &shares {
300 let full_result = NsAvidmGf2Scheme::verify_share(&commit, &common, share)
301 .unwrap()
302 .is_ok();
303 let composed_result = NsAvidmGf2Scheme::is_consistent(&commit, &common)
304 && NsAvidmGf2Scheme::verify_share_with_verified_common(&common, share)
305 .unwrap()
306 .is_ok();
307 assert_eq!(full_result, composed_result);
308 }
309 }
310
311 #[test]
312 fn is_consistent_rejects_tampered_commit() {
313 let (commit, common, _shares) = setup_test_data();
314 let (different_commit, ..) = disperse_with_payload(&[0xCD; 48]);
316 assert!(NsAvidmGf2Scheme::is_consistent(&commit, &common));
318 assert!(!NsAvidmGf2Scheme::is_consistent(&different_commit, &common));
320 }
321
322 #[test]
323 fn is_consistent_rejects_tampered_common() {
324 let (commit, common, _shares) = setup_test_data();
325 let (_, different_common, _) = disperse_with_payload(&[0xCD; 48]);
327 let mut tampered_common = common;
328 tampered_common.ns_commits = different_common.ns_commits;
329 assert!(!NsAvidmGf2Scheme::is_consistent(&commit, &tampered_common));
330 }
331
332 #[test]
333 fn round_trip() {
334 let num_storage_nodes = 9;
336 let ns_lens = [15, 33];
337 let ns_table = [(0usize..15), (15..48)];
338 let payload_byte_len = ns_lens.iter().sum();
339
340 let mut rng = jf_utils::test_rng();
341
342 let weights: Vec<u32> = (0..num_storage_nodes)
344 .map(|_| rng.next_u32() % 5 + 1)
345 .collect();
346 let total_weights: u32 = weights.iter().sum();
347 let recovery_threshold = total_weights.div_ceil(3) as usize;
348 let params = NsAvidmGf2Scheme::setup(recovery_threshold, total_weights as usize).unwrap();
349
350 println!(
351 "recovery_threshold:: {recovery_threshold} num_storage_nodes: {num_storage_nodes} \
352 payload_byte_len: {payload_byte_len}"
353 );
354 println!("weights: {weights:?}");
355
356 let payload = {
357 let mut bytes_random = vec![0u8; payload_byte_len];
358 rng.fill_bytes(&mut bytes_random);
359 bytes_random
360 };
361
362 let (commit, common, mut shares) =
363 NsAvidmGf2Scheme::ns_disperse(¶ms, &weights, &payload, ns_table.iter().cloned())
364 .unwrap();
365
366 assert_eq!(shares.len(), num_storage_nodes);
367
368 assert_eq!(
369 commit,
370 NsAvidmGf2Scheme::commit(¶ms, &payload, ns_table.iter().cloned())
371 .unwrap()
372 .0
373 );
374
375 shares.iter().for_each(|share| {
377 assert!(NsAvidmGf2Scheme::verify_share(&commit, &common, share).is_ok_and(|r| r.is_ok()))
378 });
379
380 shares.shuffle(&mut rng);
382 let mut cumulated_weights = 0;
383 let mut cut_index = 0;
384 while cumulated_weights <= recovery_threshold {
385 cumulated_weights += shares[cut_index].weight();
386 cut_index += 1;
387 }
388 let ns0_payload_recovered =
389 NsAvidmGf2Scheme::ns_recover(&common, 0, &shares[..cut_index]).unwrap();
390 assert_eq!(ns0_payload_recovered[..], payload[ns_table[0].clone()]);
391 let ns1_payload_recovered =
392 NsAvidmGf2Scheme::ns_recover(&common, 1, &shares[..cut_index]).unwrap();
393 assert_eq!(ns1_payload_recovered[..], payload[ns_table[1].clone()]);
394 let payload_recovered = NsAvidmGf2Scheme::recover(&common, &shares[..cut_index]).unwrap();
395 assert_eq!(payload_recovered, payload);
396 }
397}