hotshot_query_service/data_source/
update.rs

1// Copyright (c) 2022 Espresso Systems (espressosys.com)
2// This file is part of the HotShot Query Service library.
3//
4// This program is free software: you can redistribute it and/or modify it under the terms of the GNU
5// General Public License as published by the Free Software Foundation, either version 3 of the
6// License, or (at your option) any later version.
7// This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
8// even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9// General Public License for more details.
10// You should have received a copy of the GNU General Public License along with this program. If not,
11// see <https://www.gnu.org/licenses/>.
12
13//! A generic algorithm for updating a HotShot Query Service data source with new data.
14use std::iter::once;
15
16use anyhow::{ensure, Context};
17use async_trait::async_trait;
18use futures::future::Future;
19use hotshot::types::{Event, EventType};
20use hotshot_types::{
21    data::{ns_table::parse_ns_table, Leaf2, VidCommitment, VidDisperseShare, VidShare},
22    event::LeafInfo,
23    traits::{
24        block_contents::{BlockHeader, BlockPayload, EncodeBytes, GENESIS_VID_NUM_STORAGE_NODES},
25        node_implementation::{ConsensusTime, NodeType},
26    },
27    vid::{
28        advz::advz_scheme,
29        avidm::{init_avidm_param, AvidMScheme},
30    },
31};
32use jf_advz::VidScheme;
33
34use crate::{
35    availability::{
36        BlockInfo, BlockQueryData, LeafQueryData, QueryableHeader, QueryablePayload,
37        StateCertQueryDataV2, UpdateAvailabilityData, VidCommonQueryData,
38    },
39    Header, Payload, VidCommon,
40};
41
42/// An extension trait for types which implement the update trait for each API module.
43///
44/// If a type implements [UpdateAvailabilityData] and
45/// [UpdateStatusData](crate::status::UpdateStatusData), then it can be fully kept up to date
46/// through two interfaces:
47/// * [populate_metrics](crate::status::UpdateStatusData::populate_metrics), to get a handle for
48///   populating the status metrics, which should be used when initializing a
49///   [SystemContextHandle](hotshot::types::SystemContextHandle)
50/// * [update](Self::update), provided by this extension trait, to update the query state when a new
51///   HotShot event is emitted
52#[async_trait]
53pub trait UpdateDataSource<Types: NodeType>: UpdateAvailabilityData<Types> {
54    /// Update query state based on a new consensus event.
55    ///
56    /// The caller is responsible for authenticating `event`. This function does not perform any
57    /// authentication, and if given an invalid `event` (one which does not follow from the latest
58    /// known state of the ledger) it may panic or silently accept the invalid `event`. This allows
59    /// the best possible performance in the case where the query service and the HotShot instance
60    /// are running in the same process (and thus the event stream, directly from HotShot) is
61    /// trusted.
62    ///
63    /// If you want to update the data source with an untrusted event, for example one received from
64    /// a peer over the network, you must authenticate it first.
65    ///
66    /// # Returns
67    ///
68    /// If all provided data is successfully inserted into the database, returns `Ok(())`. If any
69    /// error occurred, the error is logged, and the return value is the height of the first leaf
70    /// which failed to be inserted.
71    async fn update(&self, event: &Event<Types>) -> Result<(), u64>;
72}
73
74#[async_trait]
75impl<Types: NodeType, T> UpdateDataSource<Types> for T
76where
77    T: UpdateAvailabilityData<Types> + Send + Sync,
78    Header<Types>: QueryableHeader<Types>,
79    Payload<Types>: QueryablePayload<Types>,
80{
81    async fn update(&self, event: &Event<Types>) -> Result<(), u64> {
82        if let EventType::Decide {
83            leaf_chain,
84            committing_qc,
85            ..
86        } = &event.event
87        {
88            // `qc` justifies the first (most recent) leaf...
89            let qcs = once((**committing_qc).clone())
90                // ...and each leaf in the chain justifies the subsequent leaf (its parent) through
91                // `leaf.justify_qc`.
92                .chain(leaf_chain.iter().map(|leaf| leaf.leaf.justify_qc()))
93                // Put the QCs in chronological order.
94                .rev()
95                // The oldest QC is the `justify_qc` of the oldest leaf, which does not justify any
96                // leaf in the new chain, so we don't need it.
97                .skip(1);
98            for (
99                qc2,
100                LeafInfo {
101                    leaf: leaf2,
102                    vid_share,
103                    state_cert,
104                    ..
105                },
106            ) in qcs.zip(leaf_chain.iter().rev())
107            {
108                let height = leaf2.block_header().block_number();
109
110                let leaf_data = match LeafQueryData::new(leaf2.clone(), qc2.clone()) {
111                    Ok(leaf) => leaf,
112                    Err(err) => {
113                        tracing::error!(
114                            height,
115                            ?leaf2,
116                            ?committing_qc,
117                            "inconsistent leaf; cannot append leaf information: {err:#}"
118                        );
119                        return Err(leaf2.block_header().block_number());
120                    },
121                };
122                let block_data = leaf2
123                    .block_payload()
124                    .map(|payload| BlockQueryData::new(leaf2.block_header().clone(), payload));
125                if block_data.is_none() {
126                    tracing::info!(height, "block not available at decide");
127                }
128
129                let (vid_common, vid_share) = match vid_share {
130                    Some(VidDisperseShare::V0(share)) => (
131                        Some(VidCommonQueryData::new(
132                            leaf2.block_header().clone(),
133                            VidCommon::V0(share.common.clone()),
134                        )),
135                        Some(VidShare::V0(share.share.clone())),
136                    ),
137                    Some(VidDisperseShare::V1(share)) => (
138                        Some(VidCommonQueryData::new(
139                            leaf2.block_header().clone(),
140                            VidCommon::V1(share.common.clone()),
141                        )),
142                        Some(VidShare::V1(share.share.clone())),
143                    ),
144                    None => {
145                        if leaf2.view_number().u64() == 0 {
146                            // HotShot does not run VID in consensus for the genesis block. In this case,
147                            // the block payload is guaranteed to always be empty, so VID isn't really
148                            // necessary. But for consistency, we will still store the VID dispersal data,
149                            // computing it ourselves based on the well-known genesis VID commitment.
150                            match genesis_vid(leaf2) {
151                                Ok((common, share)) => (Some(common), Some(share)),
152                                Err(err) => {
153                                    tracing::warn!("failed to compute genesis VID: {err:#}");
154                                    (None, None)
155                                },
156                            }
157                        } else {
158                            (None, None)
159                        }
160                    },
161                };
162
163                if vid_common.is_none() {
164                    tracing::info!(height, "VID not available at decide");
165                }
166
167                if let Err(err) = self
168                    .append(BlockInfo::new(
169                        leaf_data,
170                        block_data,
171                        vid_common,
172                        vid_share,
173                        state_cert.clone().map(StateCertQueryDataV2),
174                    ))
175                    .await
176                {
177                    tracing::error!(height, "failed to append leaf information: {err:#}");
178                    return Err(leaf2.block_header().block_number());
179                }
180            }
181        }
182        Ok(())
183    }
184}
185
186fn genesis_vid<Types: NodeType>(
187    leaf: &Leaf2<Types>,
188) -> anyhow::Result<(VidCommonQueryData<Types>, VidShare)> {
189    let payload = Payload::<Types>::empty().0;
190    let bytes = payload.encode();
191
192    match leaf.block_header().payload_commitment() {
193        VidCommitment::V0(commit) => {
194            let mut disperse = advz_scheme(GENESIS_VID_NUM_STORAGE_NODES)
195                .disperse(bytes)
196                .context("unable to compute VID dispersal for genesis block")?;
197
198            ensure!(
199                disperse.commit == commit,
200                "computed VID commit {} for genesis block does not match header commit {}",
201                disperse.commit,
202                commit
203            );
204            Ok((
205                VidCommonQueryData::new(
206                    leaf.block_header().clone(),
207                    VidCommon::V0(disperse.common),
208                ),
209                VidShare::V0(disperse.shares.remove(0)),
210            ))
211        },
212        VidCommitment::V1(commit) => {
213            let avidm_param = init_avidm_param(GENESIS_VID_NUM_STORAGE_NODES)?;
214            let weights = vec![1; GENESIS_VID_NUM_STORAGE_NODES];
215            let ns_table = parse_ns_table(bytes.len(), &leaf.block_header().metadata().encode());
216
217            let (calculated_commit, mut shares) =
218                AvidMScheme::ns_disperse(&avidm_param, &weights, &bytes, ns_table).unwrap();
219
220            ensure!(
221                calculated_commit == commit,
222                "computed VID commit {} for genesis block does not match header commit {}",
223                calculated_commit,
224                commit
225            );
226
227            Ok((
228                VidCommonQueryData::new(leaf.block_header().clone(), VidCommon::V1(avidm_param)),
229                VidShare::V1(shares.remove(0)),
230            ))
231        },
232    }
233}
234
235/// A data source with an atomic transaction-based synchronization interface.
236///
237/// Changes are made to a versioned data source through a [`Transaction`]. Any changes made in a
238/// [`Transaction`] are initially visible only when queried through that same [`Transaction`]. They
239/// are not immediately written back to storage, which means that a new data source object opened
240/// against the same persistent storage will not reflect the changes. In particular, this means that
241/// if the process restarts and reopens its storage, uncommitted changes will be lost.
242///
243/// Only when a [`Transaction`] is committed are changes written back to storage, synchronized with
244/// any concurrent changes, and made visible to other connections to the same data source.
245pub trait VersionedDataSource: Send + Sync {
246    /// A transaction which can read and modify the data source.
247    type Transaction<'a>: Transaction
248    where
249        Self: 'a;
250
251    type ReadOnly<'a>: Transaction
252    where
253        Self: 'a;
254
255    /// Start an atomic transaction on the data source.
256    fn write(&self) -> impl Future<Output = anyhow::Result<Self::Transaction<'_>>> + Send;
257
258    /// Start a read-only transaction on the data source.
259    ///
260    /// A read-only transaction allows the owner to string together multiple queries of the data
261    /// source, which otherwise would not be atomic with respect to concurrent writes, in an atomic
262    /// fashion. Upon returning, [`read`](Self::read) locks in a fully consistent snapshot of the
263    /// data source, and any read operations performed upon the transaction thereafter read from the
264    /// same consistent snapshot. Concurrent modifications to the data source may occur (for
265    /// example, from concurrent [`write`](Self::write) transactions being committed), but their
266    /// results will not be reflected in a successful read-only transaction which was opened before
267    /// the write was committed.
268    ///
269    /// Read-only transactions do not need to be committed, and reverting has no effect.
270    fn read(&self) -> impl Future<Output = anyhow::Result<Self::ReadOnly<'_>>> + Send;
271}
272
273/// A unit of atomicity for updating a shared data source.
274///
275/// The methods provided by this trait can be used to write such pending changes back to persistent
276/// storage ([commit](Self::commit)) so that they become visible to other clients of the same
277/// underlying storage, and are saved if the process restarts. It also allows pending changes to be
278/// rolled back ([revert](Self::revert)) so that they are never written back to storage and are no
279/// longer reflected even through the data source object which was used to make the changes.
280pub trait Transaction: Send + Sync + Sized {
281    fn commit(self) -> impl Future<Output = anyhow::Result<()>> + Send;
282    fn revert(self) -> impl Future + Send;
283}