Skip to content

Commit

Permalink
Merge pull request #2694 from o1-labs/fizzixnerd/marc/o1vm/verif
Browse files Browse the repository at this point in the history
o1vm/pickles: Add the verifier
  • Loading branch information
dannywillems authored Oct 31, 2024
2 parents c3a8b83 + 0d8e46d commit 6ce40b0
Show file tree
Hide file tree
Showing 10 changed files with 456 additions and 53 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions o1vm/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ name = "pickles_o1vm"
path = "src/pickles/main.rs"

[dependencies]
o1-utils.workspace = true
# FIXME: Only activate this when legacy_o1vm is built
ark-bn254.workspace = true
# FIXME: Only activate this when legacy_o1vm is built
Expand Down
1 change: 1 addition & 0 deletions o1vm/run-code.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,5 @@ else
./run-op-program.sh
./run-cannon.sh
fi

./run-vm.sh
44 changes: 31 additions & 13 deletions o1vm/src/pickles/column_env.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use ark_ff::FftField;
use ark_poly::{Evaluations, Radix2EvaluationDomain};
use kimchi_msm::columns::Column;

use crate::{
interpreters::mips::{column::N_MIPS_SEL_COLS, witness::SCRATCH_SIZE},
Expand Down Expand Up @@ -34,42 +35,59 @@ pub struct ColumnEnvironment<'a, F: FftField> {
pub domain: EvaluationDomains<F>,
}

impl<'a, F: FftField> TColumnEnvironment<'a, F, BerkeleyChallengeTerm, BerkeleyChallenges<F>>
for ColumnEnvironment<'a, F>
{
// FIXME: do we change to the MIPS column type?
// We do not want to keep kimchi_msm/generic prover
type Column = kimchi_msm::columns::Column;
pub fn get_all_columns() -> Vec<Column> {
let mut cols = Vec::<Column>::with_capacity(SCRATCH_SIZE + 2 + N_MIPS_SEL_COLS);
for i in 0..SCRATCH_SIZE + 2 {
cols.push(Column::Relation(i));
}
for i in 0..N_MIPS_SEL_COLS {
cols.push(Column::DynamicSelector(i));
}
cols
}

fn get_column(&self, col: &Self::Column) -> Option<&'a Evals<F>> {
impl<G> WitnessColumns<G, [G; N_MIPS_SEL_COLS]> {
pub fn get_column(&self, col: &Column) -> Option<&G> {
match *col {
Self::Column::Relation(i) => {
Column::Relation(i) => {
if i < SCRATCH_SIZE {
let res = &self.witness.scratch[i];
let res = &self.scratch[i];
Some(res)
} else if i == SCRATCH_SIZE {
let res = &self.witness.instruction_counter;
let res = &self.instruction_counter;
Some(res)
} else if i == SCRATCH_SIZE + 1 {
let res = &self.witness.error;
let res = &self.error;
Some(res)
} else {
panic!("We should not have that many relation columns");
}
}
Self::Column::DynamicSelector(i) => {
Column::DynamicSelector(i) => {
assert!(
i < N_MIPS_SEL_COLS,
"We do not have that many dynamic selector columns"
);
let res = &self.witness.selector[i];
let res = &self.selector[i];
Some(res)
}
_ => {
panic!("We should not have any other type of columns")
}
}
}
}

impl<'a, F: FftField> TColumnEnvironment<'a, F, BerkeleyChallengeTerm, BerkeleyChallenges<F>>
for ColumnEnvironment<'a, F>
{
// FIXME: do we change to the MIPS column type?
// We do not want to keep kimchi_msm/generic prover
type Column = Column;

fn get_column(&self, col: &Self::Column) -> Option<&'a Evals<F>> {
self.witness.get_column(col)
}

fn get_domain(&self, d: Domain) -> Radix2EvaluationDomain<F> {
match d {
Expand Down
26 changes: 15 additions & 11 deletions o1vm/src/pickles/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,7 @@ use o1vm::{
witness::{self as mips_witness},
Instruction,
},
pickles::{
proof::{Proof, ProofInputs},
prover,
},
pickles::{proof::ProofInputs, prover, verifier},
preimage_oracle::PreImageOracle,
};
use poly_commitment::{ipa::SRS, SRS as _};
Expand Down Expand Up @@ -124,19 +121,26 @@ pub fn main() -> ExitCode {
// FIXME
let start_iteration = Instant::now();
debug!("Limit of {DOMAIN_SIZE} reached. We make a proof, verify it (for testing) and start with a new chunk");
let _proof: Result<Proof<Vesta>, prover::ProverError> =
prover::prove::<
Vesta,
DefaultFqSponge<VestaParameters, PlonkSpongeConstantsKimchi>,
DefaultFrSponge<Fp, PlonkSpongeConstantsKimchi>,
_,
>(domain_fp, &srs, curr_proof_inputs, &constraints, &mut rng);
let proof = prover::prove::<
Vesta,
DefaultFqSponge<VestaParameters, PlonkSpongeConstantsKimchi>,
DefaultFrSponge<Fp, PlonkSpongeConstantsKimchi>,
_,
>(domain_fp, &srs, curr_proof_inputs, &constraints, &mut rng)
.unwrap();
// FIXME: check that the proof is correct. This is for testing purposes.
// Leaving like this for now.
debug!(
"Proof generated in {elapsed} μs",
elapsed = start_iteration.elapsed().as_micros()
);
let verif = verifier::verify::<
Vesta,
DefaultFqSponge<VestaParameters, PlonkSpongeConstantsKimchi>,
DefaultFrSponge<Fp, PlonkSpongeConstantsKimchi>,
>(domain_fp, &srs, &constraints, &proof);
assert!(verif);

curr_proof_inputs = ProofInputs::new(DOMAIN_SIZE);
}
}
Expand Down
1 change: 1 addition & 0 deletions o1vm/src/pickles/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
pub mod column_env;
pub mod proof;
pub mod prover;
pub mod verifier;

/// Maximum degree of the constraints.
/// It does include the additional degree induced by the multiplication of the
Expand Down
5 changes: 4 additions & 1 deletion o1vm/src/pickles/proof.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
use kimchi::curve::KimchiCurve;
use kimchi::{curve::KimchiCurve, proof::PointEvaluations};
use poly_commitment::{ipa::OpeningProof, PolyComm};

use crate::interpreters::mips::column::N_MIPS_SEL_COLS;

#[derive(Debug)]
pub struct WitnessColumns<G, S> {
pub scratch: [G; crate::interpreters::mips::witness::SCRATCH_SIZE],
pub instruction_counter: G,
Expand Down Expand Up @@ -32,6 +33,8 @@ pub struct Proof<G: KimchiCurve> {
pub commitments: WitnessColumns<PolyComm<G>, [PolyComm<G>; N_MIPS_SEL_COLS]>,
pub zeta_evaluations: WitnessColumns<G::ScalarField, [G::ScalarField; N_MIPS_SEL_COLS]>,
pub zeta_omega_evaluations: WitnessColumns<G::ScalarField, [G::ScalarField; N_MIPS_SEL_COLS]>,
pub quotient_commitment: PolyComm<G>,
pub quotient_evaluations: PointEvaluations<Vec<G::ScalarField>>,
/// IPA opening proof
pub opening_proof: OpeningProof<G>,
}
90 changes: 66 additions & 24 deletions o1vm/src/pickles/prover.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,11 @@ use kimchi::{
curve::KimchiCurve,
groupmap::GroupMap,
plonk_sponge::FrSponge,
proof::PointEvaluations,
};
use log::debug;
use mina_poseidon::{sponge::ScalarChallenge, FqSponge};
use o1_utils::ExtendedDensePolynomial;
use poly_commitment::{
commitment::{absorb_commitment, PolyComm},
ipa::{DensePolynomialOrEvaluations, OpeningProof, SRS},
Expand Down Expand Up @@ -126,9 +128,16 @@ where
error,
selector,
} = &polys;
// Note: we do not blind. We might want in the near future in case we
// have a column with only zeroes.
let comm = |poly: &DensePolynomial<G::ScalarField>| srs.commit_non_hiding(poly, num_chunks);

let comm = |poly: &DensePolynomial<G::ScalarField>| {
srs.commit_custom(
poly,
num_chunks,
&PolyComm::new(vec![G::ScalarField::one()]),
)
.unwrap()
.commitment
};
// Doing in parallel
let scratch = scratch.par_iter().map(comm).collect::<Vec<_>>();
let selector = selector.par_iter().map(comm).collect::<Vec<_>>();
Expand Down Expand Up @@ -240,27 +249,36 @@ where
// And we interpolate using the evaluations
let expr_evaluation_interpolated = expr_evaluation.interpolate();

let fail_final_q_division = || {
panic!("Division by vanishing poly must not fail at this point, we checked it before")
};
let fail_final_q_division = || panic!("Fail division by vanishing poly");
let fail_remainder_not_zero =
|| panic!("The constraints are not satisifed since the remainder is not zero");
// We compute the polynomial t(X) by dividing the constraints polynomial
// by the vanishing polynomial, i.e. Z_H(X).
let (quotient, res) = expr_evaluation_interpolated
let (quotient, rem) = expr_evaluation_interpolated
// FIXME: Should this be d8?
.divide_by_vanishing_poly(domain.d1)
.unwrap_or_else(fail_final_q_division);
// As the constraints must be verified on H, the rest of the division
// must be equal to 0 as the constraints polynomial and Z_H(X) are both
// equal on H.
if !res.is_zero() {
fail_final_q_division();
if !rem.is_zero() {
fail_remainder_not_zero();
}

quotient
};

let t_comm = srs.commit_non_hiding(&quotient_poly, DEGREE_QUOTIENT_POLYNOMIAL as usize);

absorb_commitment(&mut fq_sponge, &t_comm);
let quotient_commitment = srs
.commit_custom(
&quotient_poly,
DEGREE_QUOTIENT_POLYNOMIAL as usize,
&PolyComm::new(vec![
G::ScalarField::one();
DEGREE_QUOTIENT_POLYNOMIAL as usize
]),
)
.unwrap();
absorb_commitment(&mut fq_sponge, &quotient_commitment.commitment);

////////////////////////////////////////////////////////////////////////////
// Round 3: Evaluations at ζ and ζω
Expand Down Expand Up @@ -294,22 +312,34 @@ where
<<G as AffineRepr>::Group as Group>::ScalarField,
[<<G as AffineRepr>::Group as Group>::ScalarField; N_MIPS_SEL_COLS],
> = evals(&zeta);

// All evaluations at ζω
let zeta_omega_evaluations: WitnessColumns<
<<G as AffineRepr>::Group as Group>::ScalarField,
[<<G as AffineRepr>::Group as Group>::ScalarField; N_MIPS_SEL_COLS],
> = evals(&zeta_omega);

let chunked_quotient = quotient_poly
.to_chunked_polynomial(DEGREE_QUOTIENT_POLYNOMIAL as usize, domain.d1.size as usize);
let quotient_evaluations = PointEvaluations {
zeta: chunked_quotient
.polys
.iter()
.map(|p| p.evaluate(&zeta))
.collect::<Vec<_>>(),
zeta_omega: chunked_quotient
.polys
.iter()
.map(|p| p.evaluate(&zeta_omega))
.collect(),
};

// Absorbing evaluations with a sponge for the other field
// We initialize the state with the previous state of the fq_sponge
let fq_sponge_before_evaluations = fq_sponge.clone();
let mut fr_sponge = EFrSponge::new(G::sponge_params());
fr_sponge.absorb(&fq_sponge.digest());

// Quotient poly evals
let quotient_zeta_eval = quotient_poly.evaluate(&zeta);
let quotient_zeta_omega_eval = quotient_poly.evaluate(&zeta_omega);

for (zeta_eval, zeta_omega_eval) in zeta_evaluations
.scratch
.iter()
Expand All @@ -330,30 +360,40 @@ where
fr_sponge.absorb(zeta_eval);
fr_sponge.absorb(zeta_omega_eval);
}
fr_sponge.absorb(&quotient_zeta_eval);
fr_sponge.absorb(&quotient_zeta_omega_eval);

for (quotient_zeta_eval, quotient_zeta_omega_eval) in quotient_evaluations
.zeta
.iter()
.zip(quotient_evaluations.zeta_omega.iter())
{
fr_sponge.absorb(quotient_zeta_eval);
fr_sponge.absorb(quotient_zeta_omega_eval);
}
////////////////////////////////////////////////////////////////////////////
// Round 4: Opening proof w/o linearization polynomial
////////////////////////////////////////////////////////////////////////////

// Preparing the polynomials for the opening proof
let mut polynomials: Vec<_> = polys.scratch.into_iter().collect();
polynomials.push(polys.instruction_counter);
polynomials.push(polys.error);
polynomials.extend(polys.selector);
polynomials.push(quotient_poly);

let polynomials: Vec<_> = polynomials
// Preparing the polynomials for the opening proof
let mut polynomials: Vec<_> = polynomials
.iter()
.map(|poly| {
(
DensePolynomialOrEvaluations::DensePolynomial(poly),
// We do not have any blinder, therefore we set to 0.
PolyComm::new(vec![G::ScalarField::zero()]),
// We do not have any blinder, therefore we set to 1.
PolyComm::new(vec![G::ScalarField::one()]),
)
})
.collect();
// we handle the quotient separately because the number of blinders =
// number of chunks, which is different for just the quotient polynomial.
polynomials.push((
DensePolynomialOrEvaluations::DensePolynomial(&quotient_poly),
quotient_commitment.blinders,
));

// poly scale
let v_chal = fr_sponge.challenge();
Expand Down Expand Up @@ -381,6 +421,8 @@ where
commitments,
zeta_evaluations,
zeta_omega_evaluations,
quotient_commitment: quotient_commitment.commitment,
quotient_evaluations,
opening_proof,
})
}
Loading

0 comments on commit 6ce40b0

Please sign in to comment.