1use std::ops::Range;
4
5use jf_merkle_tree::MerkleTreeScheme;
6use serde::{Deserialize, Serialize};
7
8use super::{AvidmGf2Commit, AvidmGf2Share};
9use crate::{
10 avidm_gf2::{AvidmGf2Scheme, MerkleTree},
11 VidError, VidResult, VidScheme,
12};
13
14pub struct NsAvidmGf2Scheme;
16
17pub type NsAvidmGf2Commit = super::AvidmGf2Commit;
19pub type NsAvidmGf2Param = super::AvidmGf2Param;
21
22#[derive(Clone, Debug, Hash, Serialize, Deserialize, Eq, PartialEq)]
24pub struct NsAvidmGf2Common {
25 pub param: NsAvidmGf2Param,
27 pub ns_commits: Vec<AvidmGf2Commit>,
29 pub ns_lens: Vec<usize>,
31}
32
33impl NsAvidmGf2Common {
34 pub fn payload_byte_len(&self) -> usize {
36 self.ns_lens.iter().sum()
37 }
38}
39
40#[derive(Clone, Debug, Hash, Serialize, Deserialize, Eq, PartialEq, Default)]
42pub struct NsAvidmGf2Share(pub(crate) Vec<AvidmGf2Share>);
43
44impl NsAvidmGf2Share {
45 pub fn num_nss(&self) -> usize {
47 self.0.len()
48 }
49
50 pub fn weight(&self) -> usize {
52 self.0.first().map_or(0, |share| share.weight())
53 }
54
55 pub fn validate(&self) -> bool {
57 let weight = self.weight();
58 self.0
59 .iter()
60 .all(|share| share.validate() && share.weight() == weight)
61 }
62
63 pub fn contains_ns(&self, ns_index: usize) -> bool {
65 ns_index < self.num_nss()
66 }
67
68 pub fn inner_ns_share(&self, ns_index: usize) -> Option<AvidmGf2Share> {
70 self.0.get(ns_index).cloned()
71 }
72}
73
74impl NsAvidmGf2Scheme {
75 pub fn setup(recovery_threshold: usize, total_weights: usize) -> VidResult<NsAvidmGf2Param> {
77 NsAvidmGf2Param::new(recovery_threshold, total_weights)
78 }
79
80 pub fn commit(
84 param: &NsAvidmGf2Param,
85 payload: &[u8],
86 ns_table: impl IntoIterator<Item = Range<usize>>,
87 ) -> VidResult<(NsAvidmGf2Commit, NsAvidmGf2Common)> {
88 let ns_table = ns_table.into_iter().collect::<Vec<_>>();
89 let ns_lens = ns_table.iter().map(|r| r.len()).collect::<Vec<_>>();
90 let ns_commits = ns_table
91 .into_iter()
92 .map(|ns_range| AvidmGf2Scheme::commit(param, &payload[ns_range]))
93 .collect::<Result<Vec<_>, _>>()?;
94 let common = NsAvidmGf2Common {
95 param: param.clone(),
96 ns_commits,
97 ns_lens,
98 };
99 let commit = MerkleTree::from_elems(None, common.ns_commits.iter().map(|c| c.commit))
100 .map_err(|err| VidError::Internal(err.into()))?
101 .commitment();
102 Ok((NsAvidmGf2Commit { commit }, common))
103 }
104
105 pub fn is_consistent(commit: &NsAvidmGf2Commit, common: &NsAvidmGf2Common) -> bool {
107 let Ok(mt) =
108 MerkleTree::from_elems(None, common.ns_commits.iter().map(|commit| commit.commit))
109 else {
110 return false;
111 };
112 commit.commit == mt.commitment()
113 }
114
115 pub fn ns_disperse(
120 param: &NsAvidmGf2Param,
121 distribution: &[u32],
122 payload: &[u8],
123 ns_table: impl IntoIterator<Item = Range<usize>>,
124 ) -> VidResult<(NsAvidmGf2Commit, NsAvidmGf2Common, Vec<NsAvidmGf2Share>)> {
125 let num_storage_nodes = distribution.len();
126 let mut ns_commits = vec![];
127 let mut disperses = vec![];
128 let mut ns_lens = vec![];
129 for ns_range in ns_table {
130 ns_lens.push(ns_range.len());
131 let (commit, shares) =
132 AvidmGf2Scheme::disperse(param, distribution, &payload[ns_range])?;
133 ns_commits.push(commit);
134 disperses.push(shares);
135 }
136 let common = NsAvidmGf2Common {
137 param: param.clone(),
138 ns_commits,
139 ns_lens,
140 };
141 let commit = NsAvidmGf2Commit {
142 commit: MerkleTree::from_elems(None, common.ns_commits.iter().map(|c| c.commit))
143 .map_err(|err| VidError::Internal(err.into()))?
144 .commitment(),
145 };
146 let mut shares = vec![NsAvidmGf2Share::default(); num_storage_nodes];
147 disperses.into_iter().for_each(|ns_disperse| {
148 shares
149 .iter_mut()
150 .zip(ns_disperse)
151 .for_each(|(share, ns_share)| share.0.push(ns_share))
152 });
153 Ok((commit, common, shares))
154 }
155
156 pub fn verify_share(
158 commit: &NsAvidmGf2Commit,
159 common: &NsAvidmGf2Common,
160 share: &NsAvidmGf2Share,
161 ) -> VidResult<crate::VerificationResult> {
162 if !(common.ns_commits.len() == common.ns_lens.len()
163 && common.ns_commits.len() == share.num_nss()
164 && share.validate())
165 {
166 return Err(VidError::InvalidShare);
167 }
168 for (commit, content) in common.ns_commits.iter().zip(share.0.iter()) {
170 if AvidmGf2Scheme::verify_share(&common.param, commit, content)?.is_err() {
171 return Ok(Err(()));
172 }
173 }
174 let expected_commit = NsAvidmGf2Commit {
176 commit: MerkleTree::from_elems(
177 None,
178 common.ns_commits.iter().map(|commit| commit.commit),
179 )
180 .map_err(|err| VidError::Internal(err.into()))?
181 .commitment(),
182 };
183 Ok(if &expected_commit == commit {
184 Ok(())
185 } else {
186 Err(())
187 })
188 }
189
190 pub fn recover(common: &NsAvidmGf2Common, shares: &[NsAvidmGf2Share]) -> VidResult<Vec<u8>> {
192 if shares.is_empty() {
193 return Err(VidError::InsufficientShares);
194 }
195 let mut result = vec![];
196 for ns_index in 0..common.ns_lens.len() {
197 result.append(&mut Self::ns_recover(common, ns_index, shares)?)
198 }
199 Ok(result)
200 }
201
202 pub fn ns_recover(
206 common: &NsAvidmGf2Common,
207 ns_index: usize,
208 shares: &[NsAvidmGf2Share],
209 ) -> VidResult<Vec<u8>> {
210 if shares.is_empty() {
211 return Err(VidError::InsufficientShares);
212 }
213 if ns_index >= common.ns_lens.len()
214 || !shares.iter().all(|share| share.contains_ns(ns_index))
215 {
216 return Err(VidError::IndexOutOfBound);
217 }
218 let ns_commit = &common.ns_commits[ns_index];
219 let shares: Vec<_> = shares
220 .iter()
221 .filter_map(|share| share.inner_ns_share(ns_index))
222 .collect();
223 AvidmGf2Scheme::recover(&common.param, ns_commit, &shares)
224 }
225}
226
227#[cfg(test)]
229pub mod tests {
230 use rand::{seq::SliceRandom, RngCore};
231
232 use crate::avidm_gf2::namespaced::NsAvidmGf2Scheme;
233
234 #[test]
235 fn round_trip() {
236 let num_storage_nodes = 9;
238 let ns_lens = [15, 33];
239 let ns_table = [(0usize..15), (15..48)];
240 let payload_byte_len = ns_lens.iter().sum();
241
242 let mut rng = jf_utils::test_rng();
243
244 let weights: Vec<u32> = (0..num_storage_nodes)
246 .map(|_| rng.next_u32() % 5 + 1)
247 .collect();
248 let total_weights: u32 = weights.iter().sum();
249 let recovery_threshold = total_weights.div_ceil(3) as usize;
250 let params = NsAvidmGf2Scheme::setup(recovery_threshold, total_weights as usize).unwrap();
251
252 println!(
253 "recovery_threshold:: {recovery_threshold} num_storage_nodes: {num_storage_nodes} \
254 payload_byte_len: {payload_byte_len}"
255 );
256 println!("weights: {weights:?}");
257
258 let payload = {
259 let mut bytes_random = vec![0u8; payload_byte_len];
260 rng.fill_bytes(&mut bytes_random);
261 bytes_random
262 };
263
264 let (commit, common, mut shares) =
265 NsAvidmGf2Scheme::ns_disperse(¶ms, &weights, &payload, ns_table.iter().cloned())
266 .unwrap();
267
268 assert_eq!(shares.len(), num_storage_nodes);
269
270 assert_eq!(
271 commit,
272 NsAvidmGf2Scheme::commit(¶ms, &payload, ns_table.iter().cloned())
273 .unwrap()
274 .0
275 );
276
277 shares.iter().for_each(|share| {
279 assert!(NsAvidmGf2Scheme::verify_share(&commit, &common, share).is_ok_and(|r| r.is_ok()))
280 });
281
282 shares.shuffle(&mut rng);
284 let mut cumulated_weights = 0;
285 let mut cut_index = 0;
286 while cumulated_weights <= recovery_threshold {
287 cumulated_weights += shares[cut_index].weight();
288 cut_index += 1;
289 }
290 let ns0_payload_recovered =
291 NsAvidmGf2Scheme::ns_recover(&common, 0, &shares[..cut_index]).unwrap();
292 assert_eq!(ns0_payload_recovered[..], payload[ns_table[0].clone()]);
293 let ns1_payload_recovered =
294 NsAvidmGf2Scheme::ns_recover(&common, 1, &shares[..cut_index]).unwrap();
295 assert_eq!(ns1_payload_recovered[..], payload[ns_table[1].clone()]);
296 let payload_recovered = NsAvidmGf2Scheme::recover(&common, &shares[..cut_index]).unwrap();
297 assert_eq!(payload_recovered, payload);
298 }
299}