espresso_types/v0/v0_1/
l1.rs

1use alloy::{
2    network::Ethereum,
3    primitives::{B256, U256},
4    providers::{
5        fillers::{FillProvider, JoinFill, RecommendedFillers},
6        Identity, RootProvider,
7    },
8    transports::http::{Client, Http},
9};
10use alloy_compat::ethers_serde;
11use async_broadcast::{InactiveReceiver, Sender};
12use clap::Parser;
13use derive_more::Deref;
14use hotshot_types::traits::metrics::{Counter, Gauge, Metrics, NoMetrics};
15use lru::LruCache;
16use parking_lot::RwLock;
17use serde::{Deserialize, Serialize};
18use std::{
19    num::NonZeroUsize,
20    sync::Arc,
21    time::{Duration, Instant},
22};
23use tokio::{
24    sync::{Mutex, Notify},
25    task::JoinHandle,
26};
27use url::Url;
28
29use crate::v0::utils::parse_duration;
30
31#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, Hash, PartialEq, Eq)]
32pub struct L1BlockInfo {
33    pub number: u64,
34    #[serde(with = "ethers_serde::u256")]
35    pub timestamp: U256,
36    #[serde(with = "ethers_serde::b256")]
37    pub hash: B256,
38}
39
40#[derive(Clone, Copy, Debug, PartialOrd, Ord, Hash, PartialEq, Eq)]
41pub(crate) struct L1BlockInfoWithParent {
42    pub(crate) info: L1BlockInfo,
43    pub(crate) parent_hash: B256,
44}
45
46#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, Hash, PartialEq, Eq)]
47pub struct L1Snapshot {
48    /// The relevant snapshot of the L1 includes a reference to the current head of the L1 chain.
49    ///
50    /// Note that the L1 head is subject to changing due to a reorg. However, no reorg will change
51    /// the _number_ of this block in the chain: L1 block numbers will always be sequentially
52    /// increasing. Therefore, the sequencer does not have to worry about reorgs invalidating this
53    /// snapshot.
54    pub head: u64,
55
56    /// The snapshot also includes information about the latest finalized L1 block.
57    ///
58    /// Since this block is finalized (ie cannot be reorged) we can include specific information
59    /// about the particular block, such as its hash and timestamp.
60    ///
61    /// This block may be `None` in the rare case where Espresso has started shortly after the
62    /// genesis of the L1, and the L1 has yet to finalize a block. In all other cases it will be
63    /// `Some`.
64    pub finalized: Option<L1BlockInfo>,
65}
66
67/// Configuration for an L1 client.
68#[derive(Clone, Debug, Parser)]
69pub struct L1ClientOptions {
70    /// Delay when retrying failed L1 queries.
71    #[clap(
72        long,
73        env = "ESPRESSO_SEQUENCER_L1_RETRY_DELAY",
74        default_value = "1s",
75        value_parser = parse_duration,
76    )]
77    pub l1_retry_delay: Duration,
78
79    /// Request rate when polling L1.
80    #[clap(
81        long,
82        env = "ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL",
83        default_value = "7s",
84        value_parser = parse_duration,
85    )]
86    pub l1_polling_interval: Duration,
87
88    /// Maximum number of L1 blocks to keep in cache at once.
89    #[clap(
90        long,
91        env = "ESPRESSO_SEQUENCER_L1_BLOCKS_CACHE_SIZE",
92        default_value = "100"
93    )]
94    pub l1_blocks_cache_size: NonZeroUsize,
95
96    /// Number of L1 events to buffer before discarding.
97    #[clap(
98        long,
99        env = "ESPRESSO_SEQUENCER_L1_EVENTS_CHANNEL_CAPACITY",
100        default_value = "100"
101    )]
102    pub l1_events_channel_capacity: usize,
103
104    /// Maximum number of L1 blocks that can be scanned for events in a single query.
105    #[clap(
106        long,
107        env = "ESPRESSO_SEQUENCER_L1_EVENTS_MAX_BLOCK_RANGE",
108        default_value = "10000"
109    )]
110    pub l1_events_max_block_range: u64,
111
112    /// Maximum time to wait for new heads before considering a stream invalid and reconnecting.
113    #[clap(
114        long,
115        env = "ESPRESSO_SEQUENCER_L1_SUBSCRIPTION_TIMEOUT",
116        default_value = "1m",
117        value_parser = parse_duration,
118    )]
119    pub subscription_timeout: Duration,
120
121    /// Fail over to another provider if the current provider fails twice within this window.
122    #[clap(
123        long,
124        env = "ESPRESSO_SEQUENCER_L1_FREQUENT_FAILURE_TOLERANCE",
125        default_value = "1m",
126        value_parser = parse_duration,
127    )]
128    pub l1_frequent_failure_tolerance: Duration,
129
130    /// Fail over to another provider if the current provider fails many times in a row, within any
131    /// time window.
132    #[clap(
133        long,
134        env = "ESPRESSO_SEQUENCER_L1_CONSECUTIVE_FAILURE_TOLERANCE",
135        default_value = "10"
136    )]
137    pub l1_consecutive_failure_tolerance: usize,
138
139    /// Revert back to the first provider this duration after failing over.
140    #[clap(
141        long,
142        env = "ESPRESSO_SEQUENCER_L1_FAILOVER_REVERT",
143        default_value = "30m",
144        value_parser = parse_duration,
145    )]
146    pub l1_failover_revert: Duration,
147
148    /// Amount of time to wait after receiving a 429 response before making more L1 RPC requests.
149    ///
150    /// If not set, the general l1-retry-delay will be used.
151    #[clap(
152        long,
153        env = "ESPRESSO_SEQUENCER_L1_RATE_LIMIT_DELAY",
154        value_parser = parse_duration,
155    )]
156    pub l1_rate_limit_delay: Option<Duration>,
157
158    /// Separate provider to use for subscription feeds.
159    ///
160    /// Typically this would be a WebSockets endpoint while the main provider uses HTTP.
161    #[clap(long, env = "ESPRESSO_SEQUENCER_L1_WS_PROVIDER", value_delimiter = ',')]
162    pub l1_ws_provider: Option<Vec<Url>>,
163
164    /// Interval at which the background update loop polls the L1 stake table contract for new events
165    /// and updates local persistence.
166    ///
167    #[clap(
168        long,
169        env = "ESPRESSO_SEQUENCER_L1_STAKE_TABLE_UPDATE_INTERVAL",
170        default_value = "60m",
171        value_parser = parse_duration,
172    )]
173    pub stake_table_update_interval: Duration,
174
175    /// A block range which is expected to contain the finalized heads of all L1 provider chains.
176    ///
177    /// If specified, it is assumed that if a block `n` is known to be finalized according to a
178    /// certain provider, then any block less than `n - L1_FINALIZED_SAFETY_MARGIN` is finalized
179    /// _according to any provider_. In other words, if we fail over from one provider to another,
180    /// the second provider will never be lagging the first by more than this margin.
181    ///
182    /// This allows us to quickly query for very old finalized blocks by number. Without this
183    /// assumption, we always need to verify that a block is finalized by fetching all blocks in a
184    /// hash chain between the known finalized block and the desired block, recomputing and checking
185    /// the hashes. This is fine and good for blocks very near the finalized head, but for
186    /// extremely old blocks it is prohibitively expensive, and these old blocks are extremely
187    /// unlikely to be unfinalized anyways.
188    #[clap(long, env = "ESPRESSO_SEQUENCER_L1_FINALIZED_SAFETY_MARGIN")]
189    pub l1_finalized_safety_margin: Option<u64>,
190
191    #[clap(skip = Arc::<Box<dyn Metrics>>::new(Box::new(NoMetrics)))]
192    pub metrics: Arc<Box<dyn Metrics>>,
193}
194
195/// Type alias for alloy provider
196pub type L1Provider = FillProvider<
197    JoinFill<Identity, <Ethereum as RecommendedFillers>::RecommendedFillers>,
198    RootProvider,
199>;
200
201#[derive(Clone, Debug, Deref)]
202/// An Ethereum provider and configuration to interact with the L1.
203///
204/// This client runs asynchronously, updating an in-memory snapshot of the relevant L1 information
205/// each time a new L1 block is published. The main advantage of this is that we can update the L1
206/// state at the pace of the L1, instead of the much faster pace of HotShot consensus.This makes it
207/// easy to use a subscription instead of polling for new blocks, vastly reducing the number of L1
208/// RPC calls we make.
209pub struct L1Client {
210    /// The alloy provider used for L1 communication with wallet and default fillers
211    #[deref]
212    pub provider: L1Provider,
213    /// Actual transport used in `self.provider`
214    /// i.e. the `t` variable in `ProviderBuilder::new().on_client(RpcClient::new(t, is_local))`
215    pub transport: SwitchingTransport,
216    /// Shared state updated by an asynchronous task which polls the L1.
217    pub(crate) state: Arc<Mutex<L1State>>,
218    /// Channel used by the async update task to send events to clients.
219    pub(crate) sender: Sender<L1Event>,
220    /// Receiver for events from the async update task.
221    pub(crate) receiver: InactiveReceiver<L1Event>,
222    /// Async task which updates the shared state.
223    pub(crate) update_task: Arc<L1UpdateTask>,
224}
225
226/// In-memory view of the L1 state, updated asynchronously.
227#[derive(Debug)]
228pub(crate) struct L1State {
229    pub(crate) snapshot: L1Snapshot,
230    pub(crate) finalized: LruCache<u64, L1BlockInfoWithParent>,
231    pub(crate) last_finalized: Option<u64>,
232}
233
234#[derive(Clone, Debug)]
235pub(crate) enum L1Event {
236    NewHead { head: u64 },
237    NewFinalized { finalized: L1BlockInfoWithParent },
238}
239
240#[derive(Debug, Default)]
241pub(crate) struct L1UpdateTask(pub(crate) Mutex<Option<JoinHandle<()>>>);
242
243#[derive(Clone, Debug)]
244pub(crate) struct L1ClientMetrics {
245    pub(crate) head: Arc<dyn Gauge>,
246    pub(crate) finalized: Arc<dyn Gauge>,
247    pub(crate) reconnects: Arc<dyn Counter>,
248    pub(crate) failovers: Arc<dyn Counter>,
249    pub(crate) failures: Arc<Vec<Box<dyn Counter>>>,
250}
251
252/// An RPC client with multiple remote (HTTP) providers.
253///
254/// This client utilizes one RPC provider at a time, but if it detects that the provider is in a
255/// failing state, it will automatically switch to the next provider in its list.
256#[derive(Clone, Debug)]
257pub struct SwitchingTransport {
258    /// The transport currently being used by the client
259    pub(crate) current_transport: Arc<RwLock<SingleTransport>>,
260    /// The list of configured HTTP URLs to use for RPC requests
261    pub(crate) urls: Arc<Vec<Url>>,
262    pub(crate) opt: Arc<L1ClientOptions>,
263    pub(crate) metrics: L1ClientMetrics,
264    pub(crate) switch_notify: Arc<Notify>,
265}
266
267/// The state of the current provider being used by a [`SwitchingTransport`].
268/// This is cloneable and returns a reference to the same underlying data.
269#[derive(Debug, Clone)]
270pub(crate) struct SingleTransport {
271    pub(crate) generation: usize,
272    pub(crate) client: Http<Client>,
273    pub(crate) status: Arc<RwLock<SingleTransportStatus>>,
274    /// Time at which to revert back to the primary provider after a failover.
275    pub(crate) revert_at: Option<Instant>,
276}
277
278/// The status of a single transport
279#[derive(Debug, Default)]
280pub(crate) struct SingleTransportStatus {
281    pub(crate) last_failure: Option<Instant>,
282    pub(crate) consecutive_failures: usize,
283    pub(crate) rate_limited_until: Option<Instant>,
284    /// Whether or not this current transport is being shut down (switching to the next transport)
285    pub(crate) shutting_down: bool,
286}