hotshot_query_service/data_source/
update.rs

1// Copyright (c) 2022 Espresso Systems (espressosys.com)
2// This file is part of the HotShot Query Service library.
3//
4// This program is free software: you can redistribute it and/or modify it under the terms of the GNU
5// General Public License as published by the Free Software Foundation, either version 3 of the
6// License, or (at your option) any later version.
7// This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
8// even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9// General Public License for more details.
10// You should have received a copy of the GNU General Public License along with this program. If not,
11// see <https://www.gnu.org/licenses/>.
12
13//! A generic algorithm for updating a HotShot Query Service data source with new data.
14use std::iter::once;
15
16use anyhow::{ensure, Context};
17use async_trait::async_trait;
18use futures::future::Future;
19use hotshot::types::{Event, EventType};
20use hotshot_types::{
21    data::{ns_table::parse_ns_table, Leaf2, VidCommitment, VidCommon, VidDisperseShare, VidShare},
22    event::LeafInfo,
23    traits::{
24        block_contents::{BlockHeader, BlockPayload, EncodeBytes, GENESIS_VID_NUM_STORAGE_NODES},
25        node_implementation::{ConsensusTime, NodeType},
26    },
27    vid::{
28        advz::advz_scheme,
29        avidm::{init_avidm_param, AvidMScheme},
30        avidm_gf2::{init_avidm_gf2_param, AvidmGf2Scheme},
31    },
32    vote::HasViewNumber,
33};
34use jf_advz::VidScheme;
35
36use crate::{
37    availability::{
38        BlockInfo, BlockQueryData, LeafQueryData, QueryableHeader, QueryablePayload,
39        UpdateAvailabilityData, VidCommonQueryData,
40    },
41    Header, Payload,
42};
43
44/// An extension trait for types which implement the update trait for each API module.
45///
46/// If a type implements [UpdateAvailabilityData] and
47/// [UpdateStatusData](crate::status::UpdateStatusData), then it can be fully kept up to date
48/// through two interfaces:
49/// * [populate_metrics](crate::status::UpdateStatusData::populate_metrics), to get a handle for
50///   populating the status metrics, which should be used when initializing a
51///   [SystemContextHandle](hotshot::types::SystemContextHandle)
52/// * [update](Self::update), provided by this extension trait, to update the query state when a new
53///   HotShot event is emitted
54#[async_trait]
55pub trait UpdateDataSource<Types: NodeType>: UpdateAvailabilityData<Types> {
56    /// Update query state based on a new consensus event.
57    ///
58    /// The caller is responsible for authenticating `event`. This function does not perform any
59    /// authentication, and if given an invalid `event` (one which does not follow from the latest
60    /// known state of the ledger) it may panic or silently accept the invalid `event`. This allows
61    /// the best possible performance in the case where the query service and the HotShot instance
62    /// are running in the same process (and thus the event stream, directly from HotShot) is
63    /// trusted.
64    ///
65    /// If you want to update the data source with an untrusted event, for example one received from
66    /// a peer over the network, you must authenticate it first.
67    ///
68    /// # Returns
69    ///
70    /// If all provided data is successfully inserted into the database, returns `Ok(())`. If any
71    /// error occurred, the error is logged, and the return value is the height of the first leaf
72    /// which failed to be inserted.
73    async fn update(&self, event: &Event<Types>) -> Result<(), u64>;
74}
75
76#[async_trait]
77impl<Types: NodeType, T> UpdateDataSource<Types> for T
78where
79    T: UpdateAvailabilityData<Types> + Send + Sync,
80    Header<Types>: QueryableHeader<Types>,
81    Payload<Types>: QueryablePayload<Types>,
82{
83    async fn update(&self, event: &Event<Types>) -> Result<(), u64> {
84        if let EventType::Decide {
85            leaf_chain,
86            committing_qc,
87            deciding_qc,
88            ..
89        } = &event.event
90        {
91            // `qc` justifies the first (most recent) leaf...
92            let qcs = once(committing_qc.qc().clone())
93                // ...and each leaf in the chain justifies the subsequent leaf (its parent) through
94                // `leaf.justify_qc`.
95                .chain(leaf_chain.iter().map(|leaf| leaf.leaf.justify_qc()))
96                // Put the QCs in chronological order.
97                .rev()
98                // The oldest QC is the `justify_qc` of the oldest leaf, which does not justify any
99                // leaf in the new chain, so we don't need it.
100                .skip(1);
101            for (
102                qc2,
103                LeafInfo {
104                    leaf: leaf2,
105                    vid_share,
106                    state_cert: _,
107                    ..
108                },
109            ) in qcs.zip(leaf_chain.iter().rev())
110            {
111                let height = leaf2.block_header().block_number();
112
113                let leaf_data = match LeafQueryData::new(leaf2.clone(), qc2.clone()) {
114                    Ok(leaf) => leaf,
115                    Err(err) => {
116                        tracing::error!(
117                            height,
118                            ?leaf2,
119                            ?committing_qc,
120                            "inconsistent leaf; cannot append leaf information: {err:#}"
121                        );
122                        return Err(leaf2.block_header().block_number());
123                    },
124                };
125                let block_data = leaf2
126                    .block_payload()
127                    .map(|payload| BlockQueryData::new(leaf2.block_header().clone(), payload));
128                if block_data.is_none() {
129                    tracing::info!(height, "block not available at decide");
130                }
131
132                let (vid_common, vid_share) = match vid_share {
133                    Some(VidDisperseShare::V0(share)) => (
134                        Some(VidCommonQueryData::new(
135                            leaf2.block_header().clone(),
136                            VidCommon::V0(share.common.clone()),
137                        )),
138                        Some(VidShare::V0(share.share.clone())),
139                    ),
140                    Some(VidDisperseShare::V1(share)) => (
141                        Some(VidCommonQueryData::new(
142                            leaf2.block_header().clone(),
143                            VidCommon::V1(share.common.clone()),
144                        )),
145                        Some(VidShare::V1(share.share.clone())),
146                    ),
147                    Some(VidDisperseShare::V2(share)) => (
148                        Some(VidCommonQueryData::new(
149                            leaf2.block_header().clone(),
150                            VidCommon::V2(share.common.clone()),
151                        )),
152                        Some(VidShare::V2(share.share.clone())),
153                    ),
154                    None => {
155                        if leaf2.view_number().u64() == 0 {
156                            // HotShot does not run VID in consensus for the genesis block. In this case,
157                            // the block payload is guaranteed to always be empty, so VID isn't really
158                            // necessary. But for consistency, we will still store the VID dispersal data,
159                            // computing it ourselves based on the well-known genesis VID commitment.
160                            match genesis_vid(leaf2) {
161                                Ok((common, share)) => (Some(common), Some(share)),
162                                Err(err) => {
163                                    tracing::warn!("failed to compute genesis VID: {err:#}");
164                                    (None, None)
165                                },
166                            }
167                        } else {
168                            (None, None)
169                        }
170                    },
171                };
172
173                if vid_common.is_none() {
174                    tracing::info!(height, "VID not available at decide");
175                }
176
177                let mut info = BlockInfo::new(leaf_data, block_data, vid_common, vid_share);
178                if let Some(deciding_qc) = deciding_qc {
179                    if committing_qc.view_number() == info.leaf.leaf().view_number() {
180                        let qc_chain =
181                            [committing_qc.as_ref().clone(), deciding_qc.as_ref().clone()];
182                        info = info.with_qc_chain(qc_chain);
183                    }
184                }
185                if let Err(err) = self.append(info).await {
186                    tracing::error!(height, "failed to append leaf information: {err:#}");
187                    return Err(leaf2.block_header().block_number());
188                }
189            }
190        }
191        Ok(())
192    }
193}
194
195fn genesis_vid<Types: NodeType>(
196    leaf: &Leaf2<Types>,
197) -> anyhow::Result<(VidCommonQueryData<Types>, VidShare)> {
198    let payload = Payload::<Types>::empty().0;
199    let bytes = payload.encode();
200
201    match leaf.block_header().payload_commitment() {
202        VidCommitment::V0(commit) => {
203            let mut disperse = advz_scheme(GENESIS_VID_NUM_STORAGE_NODES)
204                .disperse(bytes)
205                .context("unable to compute VID dispersal for genesis block")?;
206
207            ensure!(
208                disperse.commit == commit,
209                "computed VID commit {} for genesis block does not match header commit {}",
210                disperse.commit,
211                commit
212            );
213            Ok((
214                VidCommonQueryData::new(
215                    leaf.block_header().clone(),
216                    VidCommon::V0(disperse.common),
217                ),
218                VidShare::V0(disperse.shares.remove(0)),
219            ))
220        },
221        VidCommitment::V1(commit) => {
222            let avidm_param = init_avidm_param(GENESIS_VID_NUM_STORAGE_NODES)?;
223            let weights = vec![1; GENESIS_VID_NUM_STORAGE_NODES];
224            let ns_table = parse_ns_table(bytes.len(), &leaf.block_header().metadata().encode());
225
226            let (calculated_commit, mut shares) =
227                AvidMScheme::ns_disperse(&avidm_param, &weights, &bytes, ns_table).unwrap();
228
229            ensure!(
230                calculated_commit == commit,
231                "computed VID commit {} for genesis block does not match header commit {}",
232                calculated_commit,
233                commit
234            );
235
236            Ok((
237                VidCommonQueryData::new(leaf.block_header().clone(), VidCommon::V1(avidm_param)),
238                VidShare::V1(shares.remove(0)),
239            ))
240        },
241        VidCommitment::V2(commit) => {
242            let avidm_gf2_param = init_avidm_gf2_param(GENESIS_VID_NUM_STORAGE_NODES)?;
243            let weights = vec![1; GENESIS_VID_NUM_STORAGE_NODES];
244            let ns_table = parse_ns_table(bytes.len(), &leaf.block_header().metadata().encode());
245
246            let (calculated_commit, common, mut shares) =
247                AvidmGf2Scheme::ns_disperse(&avidm_gf2_param, &weights, &bytes, ns_table).unwrap();
248
249            ensure!(
250                calculated_commit == commit,
251                "computed VID commit {} for genesis block does not match header commit {}",
252                calculated_commit,
253                commit
254            );
255
256            Ok((
257                VidCommonQueryData::new(leaf.block_header().clone(), VidCommon::V2(common)),
258                VidShare::V2(shares.remove(0)),
259            ))
260        },
261    }
262}
263
264/// A data source with an atomic transaction-based synchronization interface.
265///
266/// Changes are made to a versioned data source through a [`Transaction`]. Any changes made in a
267/// [`Transaction`] are initially visible only when queried through that same [`Transaction`]. They
268/// are not immediately written back to storage, which means that a new data source object opened
269/// against the same persistent storage will not reflect the changes. In particular, this means that
270/// if the process restarts and reopens its storage, uncommitted changes will be lost.
271///
272/// Only when a [`Transaction`] is committed are changes written back to storage, synchronized with
273/// any concurrent changes, and made visible to other connections to the same data source.
274pub trait VersionedDataSource: Send + Sync {
275    /// A transaction which can read and modify the data source.
276    type Transaction<'a>: Transaction
277    where
278        Self: 'a;
279
280    type ReadOnly<'a>: Transaction
281    where
282        Self: 'a;
283
284    /// Start an atomic transaction on the data source.
285    fn write(&self) -> impl Future<Output = anyhow::Result<Self::Transaction<'_>>> + Send;
286
287    /// Start a read-only transaction on the data source.
288    ///
289    /// A read-only transaction allows the owner to string together multiple queries of the data
290    /// source, which otherwise would not be atomic with respect to concurrent writes, in an atomic
291    /// fashion. Upon returning, [`read`](Self::read) locks in a fully consistent snapshot of the
292    /// data source, and any read operations performed upon the transaction thereafter read from the
293    /// same consistent snapshot. Concurrent modifications to the data source may occur (for
294    /// example, from concurrent [`write`](Self::write) transactions being committed), but their
295    /// results will not be reflected in a successful read-only transaction which was opened before
296    /// the write was committed.
297    ///
298    /// Read-only transactions do not need to be committed, and reverting has no effect.
299    fn read(&self) -> impl Future<Output = anyhow::Result<Self::ReadOnly<'_>>> + Send;
300}
301
302/// A unit of atomicity for updating a shared data source.
303///
304/// The methods provided by this trait can be used to write such pending changes back to persistent
305/// storage ([commit](Self::commit)) so that they become visible to other clients of the same
306/// underlying storage, and are saved if the process restarts. It also allows pending changes to be
307/// rolled back ([revert](Self::revert)) so that they are never written back to storage and are no
308/// longer reflected even through the data source object which was used to make the changes.
309pub trait Transaction: Send + Sync + Sized {
310    fn commit(self) -> impl Future<Output = anyhow::Result<()>> + Send;
311    fn revert(self) -> impl Future + Send;
312}