diff --git a/Cargo.toml b/Cargo.toml index 2868edb4..7bdfa478 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "nova-snark" -version = "0.68.0" +version = "0.68.1" authors = ["Srinath Setty "] edition = "2021" description = "High-speed recursive arguments from folding schemes" diff --git a/src/r1cs/mod.rs b/src/r1cs/mod.rs index 0e8a90f5..38ef633d 100644 --- a/src/r1cs/mod.rs +++ b/src/r1cs/mod.rs @@ -27,7 +27,7 @@ use sparse::PrecomputedSparseMatrix; pub use sparse::SparseMatrix; /// A type that holds the shape of the R1CS matrices -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize)] pub struct R1CSShape { pub(crate) num_cons: usize, pub(crate) num_vars: usize, @@ -35,15 +35,38 @@ pub struct R1CSShape { pub(crate) A: SparseMatrix, pub(crate) B: SparseMatrix, pub(crate) C: SparseMatrix, - #[serde(skip, default = "OnceCell::new")] + #[serde(skip)] pub(crate) digest: OnceCell, - /// Precomputed SpMV accelerators (built lazily on first use) - #[serde(skip, default = "OnceCell::new")] - precomputed_A: OnceCell>, - #[serde(skip, default = "OnceCell::new")] - precomputed_B: OnceCell>, - #[serde(skip, default = "OnceCell::new")] - precomputed_C: OnceCell>, + #[serde(skip)] + precomputed_A: PrecomputedSparseMatrix, + #[serde(skip)] + precomputed_B: PrecomputedSparseMatrix, + #[serde(skip)] + precomputed_C: PrecomputedSparseMatrix, +} + +// Custom Deserialize: rebuild precomputed SpMV tables from the matrices. +impl<'de, E: Engine> Deserialize<'de> for R1CSShape { + fn deserialize>(deserializer: D) -> Result { + #[derive(Deserialize)] + struct R1CSShapeRaw { + num_cons: usize, + num_vars: usize, + num_io: usize, + A: SparseMatrix, + B: SparseMatrix, + C: SparseMatrix, + } + let raw = R1CSShapeRaw::::deserialize(deserializer)?; + Ok(Self::make( + raw.num_cons, + raw.num_vars, + raw.num_io, + raw.A, + raw.B, + raw.C, + )) + } } impl SimpleDigestible for R1CSShape {} @@ -140,6 +163,38 @@ impl R1CSShape { &self.C } + /// Internal constructor: builds precomputed SpMV tables eagerly. + fn make( + num_cons: usize, + num_vars: usize, + num_io: usize, + A: SparseMatrix, + B: SparseMatrix, + C: SparseMatrix, + ) -> Self { + let (precomputed_A, (precomputed_B, precomputed_C)) = rayon::join( + || PrecomputedSparseMatrix::from_sparse(&A), + || { + rayon::join( + || PrecomputedSparseMatrix::from_sparse(&B), + || PrecomputedSparseMatrix::from_sparse(&C), + ) + }, + ); + Self { + num_cons, + num_vars, + num_io, + A, + B, + C, + digest: OnceCell::new(), + precomputed_A, + precomputed_B, + precomputed_C, + } + } + /// Create an object of type `R1CSShape` from the explicitly specified R1CS matrices pub fn new( num_cons: usize, @@ -169,18 +224,7 @@ impl R1CSShape { is_valid(num_cons, num_vars, num_io, &B)?; is_valid(num_cons, num_vars, num_io, &C)?; - Ok(R1CSShape { - num_cons, - num_vars, - num_io, - A, - B, - C, - digest: OnceCell::new(), - precomputed_A: OnceCell::new(), - precomputed_B: OnceCell::new(), - precomputed_C: OnceCell::new(), - }) + Ok(Self::make(num_cons, num_vars, num_io, A, B, C)) } /// Computes the maximum number of generators needed for a set of R1CS shapes. @@ -351,21 +395,6 @@ impl R1CSShape { .expect("Failure in retrieving digest") } - /// Eagerly build the precomputed SpMV tables for A, B, C. - /// Call this once during setup so that folding steps don't pay the - /// one-time construction cost on their first multiply_vec_pair call. - pub fn precompute_sparse_matrices(&self) { - self - .precomputed_A - .get_or_init(|| PrecomputedSparseMatrix::from_sparse(&self.A)); - self - .precomputed_B - .get_or_init(|| PrecomputedSparseMatrix::from_sparse(&self.B)); - self - .precomputed_C - .get_or_init(|| PrecomputedSparseMatrix::from_sparse(&self.C)); - } - // Checks regularity conditions on the R1CSShape, required in Spartan-class SNARKs // Returns false if num_cons or num_vars are not powers of two, or if num_io > num_vars #[inline] @@ -385,19 +414,14 @@ impl R1CSShape { return Err(NovaError::InvalidWitnessLength); } - let pa = self - .precomputed_A - .get_or_init(|| PrecomputedSparseMatrix::from_sparse(&self.A)); - let pb = self - .precomputed_B - .get_or_init(|| PrecomputedSparseMatrix::from_sparse(&self.B)); - let pc = self - .precomputed_C - .get_or_init(|| PrecomputedSparseMatrix::from_sparse(&self.C)); - let (Az, (Bz, Cz)) = rayon::join( - || pa.multiply_vec(z), - || rayon::join(|| pb.multiply_vec(z), || pc.multiply_vec(z)), + || self.precomputed_A.multiply_vec(z), + || { + rayon::join( + || self.precomputed_B.multiply_vec(z), + || self.precomputed_C.multiply_vec(z), + ) + }, ); Ok((Az, Bz, Cz)) @@ -420,22 +444,12 @@ impl R1CSShape { return Err(NovaError::InvalidWitnessLength); } - let pa = self - .precomputed_A - .get_or_init(|| PrecomputedSparseMatrix::from_sparse(&self.A)); - let pb = self - .precomputed_B - .get_or_init(|| PrecomputedSparseMatrix::from_sparse(&self.B)); - let pc = self - .precomputed_C - .get_or_init(|| PrecomputedSparseMatrix::from_sparse(&self.C)); - let ((Az1, Az2), ((Bz1, Bz2), (Cz1, Cz2))) = rayon::join( - || pa.multiply_vec_pair(z1, z2), + || self.precomputed_A.multiply_vec_pair(z1, z2), || { rayon::join( - || pb.multiply_vec_pair(z1, z2), - || pc.multiply_vec_pair(z1, z2), + || self.precomputed_B.multiply_vec_pair(z1, z2), + || self.precomputed_C.multiply_vec_pair(z1, z2), ) }, ); @@ -632,18 +646,14 @@ impl R1CSShape { // check if the number of variables are as expected, then // we simply set the number of constraints to the next power of two if self.num_vars == m { - return R1CSShape { - num_cons: m, - num_vars: m, - num_io: self.num_io, - A: self.A.clone(), - B: self.B.clone(), - C: self.C.clone(), - digest: OnceCell::new(), - precomputed_A: OnceCell::new(), - precomputed_B: OnceCell::new(), - precomputed_C: OnceCell::new(), - }; + return Self::make( + m, + m, + self.num_io, + self.A.clone(), + self.B.clone(), + self.C.clone(), + ); } // otherwise, we need to pad the number of variables and renumber variable accesses @@ -671,18 +681,14 @@ impl R1CSShape { let B_padded = apply_pad(self.B.clone()); let C_padded = apply_pad(self.C.clone()); - R1CSShape { - num_cons: num_cons_padded, - num_vars: num_vars_padded, - num_io: self.num_io, - A: A_padded, - B: B_padded, - C: C_padded, - digest: OnceCell::new(), - precomputed_A: OnceCell::new(), - precomputed_B: OnceCell::new(), - precomputed_C: OnceCell::new(), - } + Self::make( + num_cons_padded, + num_vars_padded, + self.num_io, + A_padded, + B_padded, + C_padded, + ) } /// Pads the `R1CSShape` so that `num_cons` and `num_vars` are each @@ -723,18 +729,14 @@ impl R1CSShape { let B_padded = apply_pad(self.B.clone()); let C_padded = apply_pad(self.C.clone()); - R1CSShape { - num_cons: num_cons_padded, - num_vars: num_vars_padded, - num_io: self.num_io, - A: A_padded, - B: B_padded, - C: C_padded, - digest: OnceCell::new(), - precomputed_A: OnceCell::new(), - precomputed_B: OnceCell::new(), - precomputed_C: OnceCell::new(), - } + Self::make( + num_cons_padded, + num_vars_padded, + self.num_io, + A_padded, + B_padded, + C_padded, + ) } /// Samples a new random `RelaxedR1CSInstance`/`RelaxedR1CSWitness` pair