Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

improve: apply tachyon optimizations(1) #342

Merged
merged 7 commits into from
Jun 3, 2024
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 21 additions & 13 deletions halo2_backend/src/plonk/evaluation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -408,8 +408,16 @@ impl<C: CurveAffine> Evaluator<C> {
let chunk_len = pk.vk.cs.degree() - 2;
let delta_start = beta * C::Scalar::ZETA;

let first_set = sets.first().unwrap();
let last_set = sets.last().unwrap();
let permutation_product_cosets: Vec<
Polynomial<C::ScalarExt, ExtendedLagrangeCoeff>,
> = sets
.iter()
.map(|set| domain.coeff_to_extended(set.permutation_product_poly.clone()))
.collect();

let first_set_permutation_product_coset =
permutation_product_cosets.first().unwrap();
let last_set_permutation_product_coset = permutation_product_cosets.last().unwrap();

// Permutation constraints
parallelize(&mut values, |values, start| {
Expand All @@ -422,22 +430,21 @@ impl<C: CurveAffine> Evaluator<C> {
// Enforce only for the first set.
// l_0(X) * (1 - z_0(X)) = 0
*value = *value * y
+ ((one - first_set.permutation_product_coset[idx]) * l0[idx]);
+ ((one - first_set_permutation_product_coset[idx]) * l0[idx]);
// Enforce only for the last set.
// l_last(X) * (z_l(X)^2 - z_l(X)) = 0
*value = *value * y
+ ((last_set.permutation_product_coset[idx]
* last_set.permutation_product_coset[idx]
- last_set.permutation_product_coset[idx])
+ ((last_set_permutation_product_coset[idx]
* last_set_permutation_product_coset[idx]
- last_set_permutation_product_coset[idx])
* l_last[idx]);
// Except for the first set, enforce.
// l_0(X) * (z_i(X) - z_{i-1}(\omega^(last) X)) = 0
for (set_idx, set) in sets.iter().enumerate() {
for (set_idx, _) in sets.iter().enumerate() {
if set_idx != 0 {
*value = *value * y
+ ((set.permutation_product_coset[idx]
- permutation.sets[set_idx - 1].permutation_product_coset
[r_last])
+ ((permutation_product_cosets[set_idx][idx]
- permutation_product_cosets[set_idx - 1][r_last])
* l0[idx]);
}
}
Expand All @@ -447,12 +454,13 @@ impl<C: CurveAffine> Evaluator<C> {
// - z_i(X) \prod_j (p(X) + \delta^j \beta X + \gamma)
// )
let mut current_delta = delta_start * beta_term;
for ((set, columns), cosets) in sets
for (((set_idx, _), columns), cosets) in sets
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How about iterating over permutation_product_cosets so that we don't need to manually index at each iteration? Something like this:

                        let mut current_delta = delta_start * beta_term;
                        for ((permutation_product_coset, columns), cosets) in permutation_product_cosets
                            .iter()
                            .zip(p.columns.chunks(chunk_len))
                            .zip(pk.permutation.cosets.chunks(chunk_len))
                        {
                            let mut left = permutation_product_coset[r_next];

I think the code looks slightly more clear this way.

.iter()
.enumerate()
.zip(p.columns.chunks(chunk_len))
.zip(pk.permutation.cosets.chunks(chunk_len))
{
let mut left = set.permutation_product_coset[r_next];
let mut left = permutation_product_cosets[set_idx][r_next];
for (values, permutation) in columns
.iter()
.map(|&column| match column.column_type {
Expand All @@ -465,7 +473,7 @@ impl<C: CurveAffine> Evaluator<C> {
left *= values[idx] + beta * permutation[idx] + gamma;
}

let mut right = set.permutation_product_coset[idx];
let mut right = permutation_product_cosets[set_idx][idx];
for values in columns.iter().map(|&column| match column.column_type {
Any::Advice => &advice[column.index],
Any::Fixed => &fixed[column.index],
Expand Down
9 changes: 2 additions & 7 deletions halo2_backend/src/plonk/permutation/prover.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use crate::{
plonk::{self, permutation::ProvingKey, ChallengeBeta, ChallengeGamma, ChallengeX, Error},
poly::{
commitment::{Blind, Params},
Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, ProverQuery,
Coeff, LagrangeCoeff, Polynomial, ProverQuery,
},
transcript::{EncodedChallenge, TranscriptWrite},
};
Expand All @@ -25,7 +25,6 @@ use halo2_middleware::poly::Rotation;

pub(crate) struct CommittedSet<C: CurveAffine> {
pub(crate) permutation_product_poly: Polynomial<C::Scalar, Coeff>,
pub(crate) permutation_product_coset: Polynomial<C::Scalar, ExtendedLagrangeCoeff>,

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

After removing this, how is CommittedSet different from ConstructedSet?
This part of the code is quite poorly documented so it's a bit hard to follow the function of each struct...

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The function construct() for Committed may be simplified now to a simple clone() (not sure, some types may need to be fixed).
In fact, after this change I think we should re-evaluate the need of Commited and Constructed, as these may be redundant.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I agree with you.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How about this update? 4628c24
I am sure that we don't need Constructed struct any more.
It also removes unnecessary conversion, which needs .clone().

permutation_product_blind: Blind<C::Scalar>,
}

Expand Down Expand Up @@ -177,17 +176,13 @@ pub(in crate::plonk) fn permutation_commit<
.commit_lagrange(&engine.msm_backend, &z, blind)
.to_affine();
let permutation_product_blind = blind;
let z = domain.lagrange_to_coeff(z);
let permutation_product_poly = z.clone();

let permutation_product_coset = domain.coeff_to_extended(z);
ed255 marked this conversation as resolved.
Show resolved Hide resolved
let permutation_product_poly = domain.lagrange_to_coeff(z);

// Hash the permutation product commitment
transcript.write_point(permutation_product_commitment)?;

sets.push(CommittedSet {
permutation_product_poly,
permutation_product_coset,
permutation_product_blind,
});
}
Expand Down
22 changes: 12 additions & 10 deletions halo2_backend/src/plonk/vanishing/prover.rs
Original file line number Diff line number Diff line change
Expand Up @@ -131,18 +131,20 @@ impl<C: CurveAffine> Committed<C> {
.collect();

// Compute commitments to each h(X) piece
let h_commitments_projective: Vec<_> = h_pieces
.iter()
.zip(h_blinds.iter())
.map(|(h_piece, blind)| params.commit(&engine.msm_backend, h_piece, *blind))
.collect();
let mut h_commitments = vec![C::identity(); h_commitments_projective.len()];
C::Curve::batch_normalize(&h_commitments_projective, &mut h_commitments);
let h_commitments = h_commitments;
let h_commitments = {
ed255 marked this conversation as resolved.
Show resolved Hide resolved
let h_commitments_projective: Vec<_> = h_pieces
.iter()
.zip(h_blinds.iter())
.map(|(h_piece, blind)| params.commit(&engine.msm_backend, h_piece, *blind))
.collect();
let mut h_commitments = vec![C::identity(); h_commitments_projective.len()];
C::Curve::batch_normalize(&h_commitments_projective, &mut h_commitments);
h_commitments
};

// Hash each h(X) piece
for c in h_commitments.iter() {
transcript.write_point(*c)?;
for c in h_commitments {
transcript.write_point(c)?;
}

Ok(Constructed {
Expand Down
Loading