1use std::{
2 cmp::min,
3 collections::{HashMap, VecDeque},
4 sync::OnceLock,
5};
67use blake3::Hasher;
8use monero_serai::{
9 block::Block,
10 transaction::{Input, Transaction},
11};
12use tower::{Service, ServiceExt};
1314use cuprate_blockchain::service::BlockchainReadHandle;
15use cuprate_consensus::transactions::new_tx_verification_data;
16use cuprate_consensus_context::BlockchainContext;
17use cuprate_p2p::block_downloader::ChainEntry;
18use cuprate_p2p_core::NetworkZone;
19use cuprate_types::{
20 blockchain::{BlockchainReadRequest, BlockchainResponse},
21 Chain, VerifiedBlockInformation, VerifiedTransactionInformation,
22};
2324/// A [`OnceLock`] representing the fast sync hashes.
25static FAST_SYNC_HASHES: OnceLock<&[[u8; 32]]> = OnceLock::new();
2627/// The size of a batch of block hashes to hash to create a fast sync hash.
28pub const FAST_SYNC_BATCH_LEN: usize = 512;
2930/// Returns the height of the first block not included in the embedded hashes.
31///
32/// # Panics
33///
34/// This function will panic if [`set_fast_sync_hashes`] has not been called.
35pub fn fast_sync_stop_height() -> usize {
36 FAST_SYNC_HASHES.get().unwrap().len() * FAST_SYNC_BATCH_LEN
37}
3839/// Sets the hashes to use for fast-sync.
40///
41/// # Panics
42///
43/// This will panic if this is called more than once.
44pub fn set_fast_sync_hashes(hashes: &'static [[u8; 32]]) {
45 FAST_SYNC_HASHES.set(hashes).unwrap();
46}
4748/// Validates that the given [`ChainEntry`]s are in the fast-sync hashes.
49///
50/// `entries` should be a list of sequential entries.
51/// `start_height` should be the height of the first block in the first entry.
52///
53/// Returns a tuple, the first element being the entries that are valid* the second
54/// the entries we do not know are valid and should be passed in again when we have more entries.
55///
56/// *once we are passed the fast sync blocks all entries will be returned as valid as
57/// we can not check their validity here.
58///
59/// There may be more entries returned than passed in as entries could be split.
60///
61/// # Panics
62///
63/// This will panic if [`set_fast_sync_hashes`] has not been called.
64pub async fn validate_entries<N: NetworkZone>(
65mut entries: VecDeque<ChainEntry<N>>,
66 start_height: usize,
67 blockchain_read_handle: &mut BlockchainReadHandle,
68) -> Result<(VecDeque<ChainEntry<N>>, VecDeque<ChainEntry<N>>), tower::BoxError> {
69// if we are past the top fast sync block return all entries as valid.
70if start_height >= fast_sync_stop_height() {
71return Ok((entries, VecDeque::new()));
72 }
7374/*
75 The algorithm used here needs to preserve which peer told us about which blocks, so we cannot
76 simply join all the hashes together return all the ones that can be validated and the ones that
77 can't, we need to keep the batches separate.
7879 The first step is to calculate how many hashes we need from the blockchain to make up the first
80 fast-sync hash.
8182 Then will take out all the batches at the end for which we cannot make up a full fast-sync hash
83 for, we will split a batch if it can only be partially validated.
8485 With the remaining hashes from the blockchain and the hashes in the batches we can validate we
86 work on calculating the fast sync hashes and comparing them to the ones in [`FAST_SYNC_HASHES`].
87 */
8889 // First calculate the start and stop for this range of hashes.
90let hashes_start_height = (start_height / FAST_SYNC_BATCH_LEN) * FAST_SYNC_BATCH_LEN;
91let amount_of_hashes = entries.iter().map(|e| e.ids.len()).sum::<usize>();
92let last_height = amount_of_hashes + start_height;
9394let hashes_stop_height = min(
95 (last_height / FAST_SYNC_BATCH_LEN) * FAST_SYNC_BATCH_LEN,
96 fast_sync_stop_height(),
97 );
9899let mut hashes_stop_diff_last_height = last_height - hashes_stop_height;
100101// get the hashes we are missing to create the first fast-sync hash.
102let BlockchainResponse::BlockHashInRange(starting_hashes) = blockchain_read_handle
103 .ready()
104 .await?
105.call(BlockchainReadRequest::BlockHashInRange(
106 hashes_start_height..start_height,
107 Chain::Main,
108 ))
109 .await?
110else {
111unreachable!()
112 };
113114// If we don't have enough hashes to make up a batch we can't validate any.
115if amount_of_hashes + starting_hashes.len() < FAST_SYNC_BATCH_LEN {
116return Ok((VecDeque::new(), entries));
117 }
118119let mut unknown = VecDeque::new();
120121// start moving from the back of the batches taking enough hashes out so we are only left with hashes
122 // that can be verified.
123while !entries.is_empty() && hashes_stop_diff_last_height != 0 {
124let back = entries.back_mut().unwrap();
125126if back.ids.len() >= hashes_stop_diff_last_height {
127// This batch is partially valid so split it.
128unknown.push_front(ChainEntry {
129 ids: back
130 .ids
131 .drain((back.ids.len() - hashes_stop_diff_last_height)..)
132 .collect(),
133 peer: back.peer,
134 handle: back.handle.clone(),
135 });
136137break;
138 }
139140// Add this batch to the front of the unknowns, we do not know its validity.
141let back = entries.pop_back().unwrap();
142 hashes_stop_diff_last_height -= back.ids.len();
143 unknown.push_front(back);
144 }
145146// Start verifying the hashes.
147let mut hasher = Hasher::default();
148let mut last_i = 1;
149for (i, hash) in starting_hashes
150 .iter()
151 .chain(entries.iter().flat_map(|e| e.ids.iter()))
152 .enumerate()
153 {
154 hasher.update(hash);
155156if (i + 1) % FAST_SYNC_BATCH_LEN == 0 {
157let got_hash = hasher.finalize();
158159if got_hash
160 != FAST_SYNC_HASHES.get().unwrap()
161 [get_hash_index_for_height(hashes_start_height + i)]
162 {
163return Err("Hashes do not match".into());
164 }
165 hasher.reset();
166 }
167168 last_i = i + 1;
169 }
170// Make sure we actually checked all hashes.
171assert_eq!(last_i % FAST_SYNC_BATCH_LEN, 0);
172173Ok((entries, unknown))
174}
175176/// Get the index of the hash that contains this block in the fast sync hashes.
177const fn get_hash_index_for_height(height: usize) -> usize {
178 height / FAST_SYNC_BATCH_LEN
179}
180181/// Creates a [`VerifiedBlockInformation`] from a block known to be valid.
182///
183/// # Panics
184///
185/// This may panic if used on an invalid block.
186pub fn block_to_verified_block_information(
187 block: Block,
188 txs: Vec<Transaction>,
189 blockchin_ctx: &BlockchainContext,
190) -> VerifiedBlockInformation {
191let block_hash = block.hash();
192193let block_blob = block.serialize();
194195let Some(Input::Gen(height)) = block.miner_transaction.prefix().inputs.first() else {
196panic!("fast sync block invalid");
197 };
198199assert_eq!(
200*height, blockchin_ctx.chain_height,
201"fast sync block invalid"
202);
203204let mut txs = txs
205 .into_iter()
206 .map(|tx| {
207let data = new_tx_verification_data(tx).expect("fast sync block invalid");
208209 (data.tx_hash, data)
210 })
211 .collect::<HashMap<_, _>>();
212213let mut verified_txs = Vec::with_capacity(txs.len());
214for tx in &block.transactions {
215let data = txs.remove(tx).expect("fast sync block invalid");
216217 verified_txs.push(VerifiedTransactionInformation {
218 tx_blob: data.tx_blob,
219 tx_weight: data.tx_weight,
220 fee: data.fee,
221 tx_hash: data.tx_hash,
222 tx: data.tx,
223 });
224 }
225226let total_fees = verified_txs.iter().map(|tx| tx.fee).sum::<u64>();
227let total_outputs = block
228 .miner_transaction
229 .prefix()
230 .outputs
231 .iter()
232 .map(|output| output.amount.unwrap_or(0))
233 .sum::<u64>();
234235let generated_coins = total_outputs - total_fees;
236237let weight = block.miner_transaction.weight()
238 + verified_txs.iter().map(|tx| tx.tx_weight).sum::<usize>();
239240 VerifiedBlockInformation {
241 block_blob,
242 txs: verified_txs,
243 block_hash,
244 pow_hash: [u8::MAX; 32],
245 height: *height,
246 generated_coins,
247 weight,
248 long_term_weight: blockchin_ctx.next_block_long_term_weight(weight),
249 cumulative_difficulty: blockchin_ctx.cumulative_difficulty + blockchin_ctx.next_difficulty,
250 block,
251 }
252}
253254#[cfg(test)]
255mod tests {
256use std::{collections::VecDeque, slice, sync::LazyLock};
257258use proptest::proptest;
259260use cuprate_p2p::block_downloader::ChainEntry;
261use cuprate_p2p_core::{client::InternalPeerID, handles::HandleBuilder, ClearNet};
262263use crate::{
264 fast_sync_stop_height, set_fast_sync_hashes, validate_entries, FAST_SYNC_BATCH_LEN,
265 };
266267static HASHES: LazyLock<&[[u8; 32]]> = LazyLock::new(|| {
268let hashes = (0..FAST_SYNC_BATCH_LEN * 2000)
269 .map(|i| {
270let mut ret = [0; 32];
271 ret[..8].copy_from_slice(&i.to_le_bytes());
272 ret
273 })
274 .collect::<Vec<_>>();
275276let hashes = hashes.leak();
277278let fast_sync_hashes = hashes
279 .chunks(FAST_SYNC_BATCH_LEN)
280 .map(|chunk| {
281let len = chunk.len() * 32;
282let bytes = chunk.as_ptr().cast::<u8>();
283284// SAFETY:
285 // We are casting a valid [[u8; 32]] to a [u8], no alignment requirements and we are using it
286 // within the [[u8; 32]]'s lifetime.
287unsafe { blake3::hash(slice::from_raw_parts(bytes, len)).into() }
288 })
289 .collect::<Vec<_>>();
290291 set_fast_sync_hashes(fast_sync_hashes.leak());
292293 hashes
294 });
295296proptest! {
297#[test]
298fn valid_entry(len in 0_usize..1_500_000) {
299let mut ids = HASHES.to_vec();
300 ids.resize(len, [0_u8; 32]);
301302let handle = HandleBuilder::new().build();
303304let entry = ChainEntry {
305 ids,
306 peer: InternalPeerID::Unknown(1),
307 handle: handle.1
308};
309310let data_dir = tempfile::tempdir().unwrap();
311312 tokio_test::block_on(async move {
313let blockchain_config = cuprate_blockchain::config::ConfigBuilder::new()
314 .data_directory(data_dir.path().to_path_buf())
315 .build();
316317let (mut blockchain_read_handle, _, _) =
318 cuprate_blockchain::service::init(blockchain_config).unwrap();
319320321let ret = validate_entries::<ClearNet>(VecDeque::from([entry]), 0, &mut blockchain_read_handle).await.unwrap();
322323let len_left = ret.0.iter().map(|e| e.ids.len()).sum::<usize>();
324let len_right = ret.1.iter().map(|e| e.ids.len()).sum::<usize>();
325326assert_eq!(len_left + len_right, len);
327assert!(len_left <= fast_sync_stop_height());
328assert!(len_right < FAST_SYNC_BATCH_LEN || len > fast_sync_stop_height());
329 });
330 }
331332#[test]
333fn single_hash_entries(len in 0_usize..1_500_000) {
334let handle = HandleBuilder::new().build();
335let entries = (0..len).map(|i| {
336 ChainEntry {
337 ids: vec![HASHES.get(i).copied().unwrap_or_default()],
338 peer: InternalPeerID::Unknown(1),
339 handle: handle.1.clone()
340 }
341 }).collect();
342343let data_dir = tempfile::tempdir().unwrap();
344345 tokio_test::block_on(async move {
346let blockchain_config = cuprate_blockchain::config::ConfigBuilder::new()
347 .data_directory(data_dir.path().to_path_buf())
348 .build();
349350let (mut blockchain_read_handle, _, _) =
351 cuprate_blockchain::service::init(blockchain_config).unwrap();
352353354let ret = validate_entries::<ClearNet>(entries, 0, &mut blockchain_read_handle).await.unwrap();
355356let len_left = ret.0.iter().map(|e| e.ids.len()).sum::<usize>();
357let len_right = ret.1.iter().map(|e| e.ids.len()).sum::<usize>();
358359assert_eq!(len_left + len_right, len);
360assert!(len_left <= fast_sync_stop_height());
361assert!(len_right < FAST_SYNC_BATCH_LEN || len > fast_sync_stop_height());
362 });
363 }
364365#[test]
366fn not_enough_hashes(len in 0_usize..FAST_SYNC_BATCH_LEN) {
367let hashes_start_height = FAST_SYNC_BATCH_LEN * 1234;
368369let handle = HandleBuilder::new().build();
370let entry = ChainEntry {
371 ids: HASHES[hashes_start_height..(hashes_start_height + len)].to_vec(),
372 peer: InternalPeerID::Unknown(1),
373 handle: handle.1
374};
375376let data_dir = tempfile::tempdir().unwrap();
377378 tokio_test::block_on(async move {
379let blockchain_config = cuprate_blockchain::config::ConfigBuilder::new()
380 .data_directory(data_dir.path().to_path_buf())
381 .build();
382383let (mut blockchain_read_handle, _, _) =
384 cuprate_blockchain::service::init(blockchain_config).unwrap();
385386387let ret = validate_entries::<ClearNet>(VecDeque::from([entry]), 0, &mut blockchain_read_handle).await.unwrap();
388389let len_left = ret.0.iter().map(|e| e.ids.len()).sum::<usize>();
390let len_right = ret.1.iter().map(|e| e.ids.len()).sum::<usize>();
391392assert_eq!(len_right, len);
393assert_eq!(len_left, 0);
394 });
395 }
396 }
397}