Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Block: Add test that double-spending blocks are rejected #254

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/bin/dashboard_src/send_screen.rs
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ impl SendScreen {
*reset_me.lock().await = ResetType::Form;
}
Err(e) => {
*notice_arc.lock().await = format!("send error. {}", e.to_string());
*notice_arc.lock().await = format!("send error. {e}");
*reset_me.lock().await = ResetType::Notice;
}
}
Expand Down
74 changes: 44 additions & 30 deletions src/models/blockchain/block/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -169,26 +169,26 @@ impl Eq for Block {}

impl Block {
fn template_header(
predecessor: &Block,
predecessor_header: &BlockHeader,
predecessor_digest: Digest,
timestamp: Timestamp,
nonce: Digest,
target_block_interval: Option<Timestamp>,
) -> BlockHeader {
let difficulty = difficulty_control(
timestamp,
predecessor.header().timestamp,
predecessor.header().difficulty,
predecessor_header.timestamp,
predecessor_header.difficulty,
target_block_interval,
predecessor.header().height,
predecessor_header.height,
);

let new_cumulative_proof_of_work: ProofOfWork =
predecessor.kernel.header.cumulative_proof_of_work
+ predecessor.kernel.header.difficulty;
predecessor_header.cumulative_proof_of_work + predecessor_header.difficulty;
BlockHeader {
version: BLOCK_HEADER_VERSION,
height: predecessor.kernel.header.height.next(),
prev_block_digest: predecessor.hash(),
height: predecessor_header.height.next(),
prev_block_digest: predecessor_digest,
timestamp,
nonce,
cumulative_proof_of_work: new_cumulative_proof_of_work,
Expand All @@ -209,8 +209,7 @@ impl Block {
) -> Block {
let primitive_witness = BlockPrimitiveWitness::new(predecessor.to_owned(), transaction);
let body = primitive_witness.body().to_owned();
let header = Self::template_header(
predecessor,
let header = primitive_witness.header(
block_timestamp,
nonce_preimage.hash(),
target_block_interval,
Expand All @@ -220,32 +219,17 @@ impl Block {
Block::new(header, body, appendix, proof)
}

async fn make_block_template_with_valid_proof(
predecessor: &Block,
transaction: Transaction,
block_timestamp: Timestamp,
pub(crate) async fn block_template_from_block_primitive_witness(
primitive_witness: BlockPrimitiveWitness,
timestamp: Timestamp,
nonce_preimage: Digest,
target_block_interval: Option<Timestamp>,
triton_vm_job_queue: &TritonVmJobQueue,
proof_job_options: TritonVmProofJobOptions,
) -> anyhow::Result<Block> {
let tx_claim = SingleProof::claim(transaction.kernel.mast_hash());
assert!(
triton_vm::verify(
Stark::default(),
&tx_claim,
&transaction.proof.clone().into_single_proof()
),
"Transaction proof must be valid to generate a block"
);
let primitive_witness = BlockPrimitiveWitness::new(predecessor.to_owned(), transaction);
let body = primitive_witness.body().to_owned();
let header = Self::template_header(
predecessor,
block_timestamp,
nonce_preimage.hash(),
target_block_interval,
);
let header =
primitive_witness.header(timestamp, nonce_preimage.hash(), target_block_interval);
let (appendix, proof) = {
let appendix_witness =
AppendixWitness::produce(primitive_witness, triton_vm_job_queue).await?;
Expand All @@ -265,6 +249,36 @@ impl Block {
Ok(Block::new(header, body, appendix, proof))
}

async fn make_block_template_with_valid_proof(
predecessor: &Block,
transaction: Transaction,
block_timestamp: Timestamp,
nonce_preimage: Digest,
target_block_interval: Option<Timestamp>,
triton_vm_job_queue: &TritonVmJobQueue,
proof_job_options: TritonVmProofJobOptions,
) -> anyhow::Result<Block> {
let tx_claim = SingleProof::claim(transaction.kernel.mast_hash());
assert!(
triton_vm::verify(
Stark::default(),
&tx_claim,
&transaction.proof.clone().into_single_proof()
),
"Transaction proof must be valid to generate a block"
);
let primitive_witness = BlockPrimitiveWitness::new(predecessor.to_owned(), transaction);
Self::block_template_from_block_primitive_witness(
primitive_witness,
block_timestamp,
nonce_preimage,
target_block_interval,
triton_vm_job_queue,
proof_job_options,
)
.await
}

/// Compose a block.
///
/// Create a block with valid block proof, but without proof-of-work.
Expand Down
50 changes: 46 additions & 4 deletions src/models/blockchain/block/mutator_set_update.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,14 @@ impl MutatorSetUpdate {
}
}

/// Like `apply_to_accumulator` but does not verify that the removal records
/// could be removed. In other words: This does not check if double spend is
/// happening.
pub(crate) fn apply_to_accumulator_unsafe(&self, ms_accumulator: &mut MutatorSetAccumulator) {
let _valid_removal_records =
self.apply_to_accumulator_and_records_inner(ms_accumulator, &mut []);
}

/// Apply a mutator-set-update to a mutator-set-accumulator.
///
/// Changes the mutator
Expand All @@ -32,7 +40,13 @@ impl MutatorSetUpdate {
///
/// Returns an error if some removal record could not be removed.
pub fn apply_to_accumulator(&self, ms_accumulator: &mut MutatorSetAccumulator) -> Result<()> {
self.apply_to_accumulator_and_records(ms_accumulator, &mut [])
let valid_removal_records =
self.apply_to_accumulator_and_records_inner(ms_accumulator, &mut []);
if valid_removal_records {
Ok(())
} else {
bail!("Cannot remove item from mutator set.");
}
}

/// Apply a mutator-set-update to a mutator-set-accumulator and a bunch of
Expand All @@ -45,12 +59,39 @@ impl MutatorSetUpdate {
///
/// # Return Value
///
/// Returns an error if some removal record could not be removed.
/// Returns an error if some removal record could not be removed. This
/// return value **must** be verified to be OK. If it is not, then the
/// mutator set will be in an invalid state.
pub fn apply_to_accumulator_and_records(
&self,
ms_accumulator: &mut MutatorSetAccumulator,
removal_records: &mut [&mut RemovalRecord],
) -> Result<()> {
let valid_removal_records =
self.apply_to_accumulator_and_records_inner(ms_accumulator, removal_records);
if valid_removal_records {
Ok(())
} else {
bail!("Cannot remove item from mutator set.");
}
}

/// Apply a mutator set update to a mutator set accumulator. Modifies the
/// mutator set according to the content of the mutator set update and
/// returns a boolean indicating if all removal records were valid.
///
/// If this boolean is false, then at least one removal record was invalid
/// which could for example mean a double-spend, or an invalid MMR
/// membership proof into the sliding-window Bloom filter.
///
/// This function should *not* be made public, as the caller should always
/// explicitly decide if they want the safe or unsafe version which checks
/// the returned boolean.
fn apply_to_accumulator_and_records_inner(
&self,
ms_accumulator: &mut MutatorSetAccumulator,
removal_records: &mut [&mut RemovalRecord],
) -> bool {
let mut cloned_removals = self.removals.clone();
let mut applied_removal_records = cloned_removals.iter_mut().rev().collect::<Vec<_>>();
for addition_record in self.additions.iter() {
Expand All @@ -61,6 +102,7 @@ impl MutatorSetUpdate {
ms_accumulator.add(addition_record);
}

let mut removal_records_are_valid = true;
while let Some(applied_removal_record) = applied_removal_records.pop() {
RemovalRecord::batch_update_from_remove(
&mut applied_removal_records,
Expand All @@ -70,12 +112,12 @@ impl MutatorSetUpdate {
RemovalRecord::batch_update_from_remove(removal_records, applied_removal_record);

if !ms_accumulator.can_remove(applied_removal_record) {
bail!("Cannot remove item from mutator set.");
removal_records_are_valid = false;
}
ms_accumulator.remove(applied_removal_record);
}

Ok(())
removal_records_are_valid
}
}

Expand Down
43 changes: 37 additions & 6 deletions src/models/blockchain/block/validity/block_primitive_witness.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
use std::sync::OnceLock;

use tasm_lib::twenty_first::prelude::Mmr;
use tasm_lib::Digest;

use crate::models::blockchain::block::block_body::BlockBody;
use crate::models::blockchain::block::block_header::BlockHeader;
use crate::models::blockchain::block::mutator_set_update::MutatorSetUpdate;
use crate::models::blockchain::block::Block;
use crate::models::blockchain::transaction::Transaction;
use crate::models::proof_abstractions::timestamp::Timestamp;

/// Wraps all information necessary to produce a block.
///
Expand Down Expand Up @@ -57,20 +60,48 @@ impl BlockPrimitiveWitness {
&self.transaction
}

pub(crate) fn header(
&self,
timestamp: Timestamp,
nonce: Digest,
target_block_interval: Option<Timestamp>,
) -> BlockHeader {
let parent_header = self.predecessor_block.header();
let parent_digest = self.predecessor_block.hash();
Block::template_header(
parent_header,
parent_digest,
timestamp,
nonce,
target_block_interval,
)
}

#[cfg(test)]
pub(crate) fn predecessor_block(&self) -> &Block {
&self.predecessor_block
}

pub(crate) fn body(&self) -> &BlockBody {
self.maybe_body.get_or_init(||{
self.maybe_body.get_or_init(|| {
assert_eq!(
self.predecessor_block.mutator_set_accumulator_after().hash(),
self.predecessor_block
.mutator_set_accumulator_after()
.hash(),
self.transaction.kernel.mutator_set_hash,
"Mutator set of transaction must agree with mutator set after previous block."
);

let mut mutator_set = self.predecessor_block.mutator_set_accumulator_after();
let mutator_set_update = MutatorSetUpdate::new(self.transaction.kernel.inputs.clone(), self.transaction.kernel.outputs.clone());
let mutator_set_update = MutatorSetUpdate::new(
self.transaction.kernel.inputs.clone(),
self.transaction.kernel.outputs.clone(),
);

mutator_set_update.apply_to_accumulator(&mut mutator_set).unwrap_or_else(|e| {
panic!("attempting to produce a block body from a transaction whose mutator set update is incompatible: {e:?}");
});
// Due to tests, we don't verify that the removal records can be applied. That is
// the caller's responsibility to ensure by e.g. calling block.is_valid() after
// constructing a block.
mutator_set_update.apply_to_accumulator_unsafe(&mut mutator_set);

let predecessor_body = self.predecessor_block.body();
let lock_free_mmr = predecessor_body.lock_free_mmr_accumulator.clone();
Expand Down
67 changes: 67 additions & 0 deletions src/models/blockchain/block/validity/block_program.rs
Original file line number Diff line number Diff line change
Expand Up @@ -179,11 +179,18 @@ pub(crate) mod test {
use itertools::Itertools;
use tasm_lib::triton_vm::vm::PublicInput;
use tracing_test::traced_test;
use triton_vm::prelude::Digest;

use super::*;
use crate::job_queue::triton_vm::TritonVmJobPriority;
use crate::job_queue::triton_vm::TritonVmJobQueue;
use crate::models::blockchain::block::validity::block_primitive_witness::test::deterministic_block_primitive_witness;
use crate::models::blockchain::block::Block;
use crate::models::blockchain::block::BlockPrimitiveWitness;
use crate::models::blockchain::block::TritonVmProofJobOptions;
use crate::models::blockchain::transaction::Transaction;
use crate::models::proof_abstractions::mast_hash::MastHash;
use crate::models::proof_abstractions::timestamp::Timestamp;
use crate::models::proof_abstractions::SecretWitness;

#[traced_test]
Expand Down Expand Up @@ -228,4 +235,64 @@ pub(crate) mod test {
.collect_vec();
assert_eq!(expected_output, tasm_output);
}

// TODO: Add test that verifies that double spends *within* one block is
// disallowed.

#[traced_test]
#[test]
fn disallow_double_spends_across_blocks() {
let current_pw = deterministic_block_primitive_witness();
let tx = current_pw.transaction().to_owned();
assert!(
!tx.kernel.inputs.is_empty(),
"Transaction in double-spend test cannot be empty"
);
let predecessor = current_pw.predecessor_block().to_owned();
let mock_now = predecessor.header().timestamp + Timestamp::months(12);

let rt = tokio::runtime::Runtime::new().unwrap();
let _guard = rt.enter();
let current_block = rt
.block_on(Block::block_template_from_block_primitive_witness(
current_pw,
mock_now,
Digest::default(),
None,
&TritonVmJobQueue::dummy(),
TritonVmProofJobOptions::default(),
))
.unwrap();

assert!(current_block.is_valid(&predecessor, mock_now));

let mutator_set_update = current_block.mutator_set_update();
let updated_tx = rt
.block_on(
Transaction::new_with_updated_mutator_set_records_given_proof(
tx.kernel,
&predecessor.mutator_set_accumulator_after(),
&mutator_set_update,
tx.proof.into_single_proof(),
&TritonVmJobQueue::dummy(),
TritonVmJobPriority::default().into(),
),
)
.unwrap();
assert!(rt.block_on(updated_tx.is_valid()));

let mock_later = mock_now + Timestamp::hours(3);
let next_pw = BlockPrimitiveWitness::new(current_block.clone(), updated_tx);
let next_block = rt
.block_on(Block::block_template_from_block_primitive_witness(
next_pw,
mock_later,
Digest::default(),
None,
&TritonVmJobQueue::dummy(),
TritonVmProofJobOptions::default(),
))
.unwrap();
assert!(!next_block.is_valid(&current_block, mock_later));
}
}