diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fecfc5872d..b20e60c748 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,14 +1,25 @@ name: CI checks -on: [pull_request, push] +on: + merge_group: + pull_request: + push: + branches: + - main jobs: test: - name: Test on ${{ matrix.os }} + name: Test on ${{ matrix.os }} with ${{ matrix.feature_set }} features runs-on: ${{ matrix.os }} strategy: matrix: + feature_set: [basic, all] os: [ubuntu-latest] + include: + - feature_set: basic + features: batch,dev-graph,gadget-traces + - feature_set: all + features: batch,dev-graph,gadget-traces,multicore,test-dev-graph,thread-safe-region,sanity-checks,circuit-params steps: - uses: actions/checkout@v3 @@ -19,7 +30,67 @@ jobs: uses: actions-rs/cargo@v1 with: command: test - args: --verbose --release --all --all-features + args: --verbose --release --workspace --no-default-features --features "${{ matrix.features }}" + + build: + name: Build target ${{ matrix.target }} + runs-on: ubuntu-latest + strategy: + matrix: + target: + - wasm32-unknown-unknown + - wasm32-wasi + + steps: + - uses: actions/checkout@v3 + - uses: actions-rs/toolchain@v1 + with: + override: false + - name: Add target + run: rustup target add ${{ matrix.target }} + - name: cargo build + uses: actions-rs/cargo@v1 + with: + command: build + args: --no-default-features --features batch,dev-graph,gadget-traces --target ${{ matrix.target }} + + bitrot: + name: Bitrot check + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - uses: actions-rs/toolchain@v1 + with: + override: false + # Build benchmarks to prevent bitrot + - name: Build benchmarks + uses: actions-rs/cargo@v1 + with: + command: build + args: --benches --examples --all-features + + doc-links: + name: Intra-doc links + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - uses: actions-rs/toolchain@v1 + with: + override: false + - name: cargo fetch + uses: actions-rs/cargo@v1 + with: + command: fetch + + # Ensure intra-documentation links all resolve correctly + # Requires #![deny(intra_doc_link_resolution_failure)] in crates. + - name: Check intra-doc links + uses: actions-rs/cargo@v1 + with: + command: doc + args: --all --document-private-items example: name: Examples on ubuntu diff --git a/.github/workflows/ci_main.yml b/.github/workflows/ci_main.yml deleted file mode 100644 index 400bff09bd..0000000000 --- a/.github/workflows/ci_main.yml +++ /dev/null @@ -1,106 +0,0 @@ -name: CI checks main - -on: - push: - branches: - - main -jobs: - test: - name: Test on ${{ matrix.os }} - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macOS-latest] - - steps: - - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 - with: - override: false - - name: Run tests - uses: actions-rs/cargo@v1 - with: - command: test - args: --verbose --release --all --all-features - bitrot: - name: Bitrot check - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 - with: - override: false - # Build benchmarks to prevent bitrot - - name: Build benchmarks - uses: actions-rs/cargo@v1 - with: - command: build - args: --benches --examples --all-features - - codecov: - name: Code coverage - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - # Use stable for this to ensure that cargo-tarpaulin can be built. - - uses: actions-rs/toolchain@v1 - with: - override: false - - name: Install cargo-tarpaulin - uses: actions-rs/cargo@v1 - with: - command: install - args: cargo-tarpaulin - - name: Generate coverage report - uses: actions-rs/cargo@v1 - with: - command: tarpaulin - args: --all-features --timeout 600 --out Xml - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3.1.0 - - doc-links: - name: Intra-doc links - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 - with: - override: false - - name: cargo fetch - uses: actions-rs/cargo@v1 - with: - command: fetch - - # Ensure intra-documentation links all resolve correctly - # Requires #![deny(intra_doc_link_resolution_failure)] in crates. - - name: Check intra-doc links - uses: actions-rs/cargo@v1 - with: - command: doc - args: --all --document-private-items - - build: - name: Build target ${{ matrix.target }} - runs-on: ubuntu-latest - strategy: - matrix: - target: - - wasm32-unknown-unknown - - wasm32-wasi - - steps: - - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 - with: - override: false - - name: Add target - run: rustup target add ${{ matrix.target }} - - name: cargo build - uses: actions-rs/cargo@v1 - with: - command: build - args: --features dev-graph,gadget-traces,unstable --target ${{ matrix.target }} diff --git a/README.md b/README.md index 69167e0716..4de513c4b7 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ ## Minimum Supported Rust Version -Requires Rust **1.56.1** or higher. +Requires Rust **1.65.0** or higher. Minimum supported Rust version can be changed in the future, but it will be done with a minor version bump. @@ -14,6 +14,10 @@ minor version bump. `halo2` currently uses [rayon](https://github.com/rayon-rs/rayon) for parallel computation. The `RAYON_NUM_THREADS` environment variable can be used to set the number of threads. +You can disable `rayon` by disabling the `"multicore"` feature. +Warning! Halo2 will lose access to parallelism if you disable the `"multicore"` feature. +This will significantly degrade performance. + ## License Licensed under either of diff --git a/halo2/Cargo.toml b/halo2/Cargo.toml index 09c367b781..0c29ca6cc5 100644 --- a/halo2/Cargo.toml +++ b/halo2/Cargo.toml @@ -19,7 +19,7 @@ all-features = true rustdoc-args = ["--cfg", "docsrs", "--html-in-header", "katex-header.html"] [dependencies] -halo2_proofs = { version = "0.2", path = "../halo2_proofs" } +halo2_proofs = { version = "0.2", path = "../halo2_proofs", default-features = false } [lib] bench = false diff --git a/halo2_proofs/Cargo.toml b/halo2_proofs/Cargo.toml index 85c2e92584..276dfb2563 100644 --- a/halo2_proofs/Cargo.toml +++ b/halo2_proofs/Cargo.toml @@ -50,7 +50,6 @@ harness = false [dependencies] backtrace = { version = "0.3", optional = true } -rayon = "1.7" crossbeam = "0.8" ff = "0.13" group = "0.13" @@ -63,11 +62,15 @@ blake2b_simd = "1" rustc-hash = "1.1" sha3 = "0.10" ark-std = { version = "0.3.0", features = ["print-trace"], optional = true } +maybe-rayon = { version = "0.1.0", default-features = false } # Developer tooling dependencies -plotters = { version = "0.3.0", optional = true } +plotters = { version = "0.3.0", default-features = false, optional = true } tabbycat = { version = "0.1", features = ["attributes"], optional = true } +# Legacy circuit compatibility +halo2_legacy_pdqsort = { version = "0.1.0", optional = true } + [dev-dependencies] assert_matches = "1.5" criterion = "0.3" @@ -80,8 +83,15 @@ rand_chacha = "0.3.1" getrandom = { version = "0.2", features = ["js"] } [features] -default = ["batch", "circuit-params"] +default = ["batch", "multicore", "circuit-params"] +multicore = ["maybe-rayon/threads"] dev-graph = ["plotters", "tabbycat"] +test-dev-graph = [ + "dev-graph", + "plotters/bitmap_backend", + "plotters/bitmap_encoder", + "plotters/ttf", +] gadget-traces = ["backtrace"] # thread-safe-region = [] sanity-checks = [] @@ -100,4 +110,4 @@ name = "serialization" name = "shuffle" [[example]] -name = "shuffle_api" \ No newline at end of file +name = "shuffle_api" diff --git a/halo2_proofs/README.md b/halo2_proofs/README.md index 7c226ff24c..7aeebeb849 100644 --- a/halo2_proofs/README.md +++ b/halo2_proofs/README.md @@ -4,7 +4,7 @@ ## Minimum Supported Rust Version -Requires Rust **1.56.1** or higher. +Requires Rust **1.65.0** or higher. Minimum supported Rust version can be changed in the future, but it will be done with a minor version bump. @@ -15,6 +15,10 @@ minor version bump. computation. The `RAYON_NUM_THREADS` environment variable can be used to set the number of threads. +You can disable `rayon` by disabling the `"multicore"` feature. +Warning! Halo2 will lose access to parallelism if you disable the `"multicore"` feature. +This will significantly degrade performance. + ## License Licensed under either of diff --git a/halo2_proofs/benches/commit_zk.rs b/halo2_proofs/benches/commit_zk.rs index 3496dbd4be..bdf5c88a51 100644 --- a/halo2_proofs/benches/commit_zk.rs +++ b/halo2_proofs/benches/commit_zk.rs @@ -2,12 +2,20 @@ extern crate criterion; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use group::ff::Field; -use halo2_proofs::*; +use halo2_proofs::arithmetic::parallelize; use halo2curves::pasta::pallas::Scalar; use rand_chacha::rand_core::RngCore; use rand_chacha::ChaCha20Rng; use rand_core::SeedableRng; -use rayon::{current_num_threads, prelude::*}; +use std::{collections::HashMap, iter}; + +#[cfg(feature = "multicore")] +use maybe_rayon::current_num_threads; + +#[cfg(not(feature = "multicore"))] +fn current_num_threads() -> usize { + 1 +} fn rand_poly_serial(mut rng: ChaCha20Rng, domain: usize) -> Vec { // Sample a random polynomial of degree n - 1 @@ -21,25 +29,33 @@ fn rand_poly_serial(mut rng: ChaCha20Rng, domain: usize) -> Vec { fn rand_poly_par(mut rng: ChaCha20Rng, domain: usize) -> Vec { // Sample a random polynomial of degree n - 1 - let n_threads = current_num_threads(); - let n = 1usize << domain; - let n_chunks = n_threads + usize::from(n % n_threads != 0); - let mut rand_vec = vec![Scalar::zero(); n]; + let n = 1usize << domain as usize; + let mut random_poly = vec![Scalar::ZERO; n]; - let mut thread_seeds: Vec = (0..n_chunks) - .map(|_| { + let num_threads = current_num_threads(); + let chunk_size = n / num_threads; + let thread_seeds = (0..) + .step_by(chunk_size + 1) + .take(n % num_threads) + .chain( + (chunk_size != 0) + .then(|| ((n % num_threads) * (chunk_size + 1)..).step_by(chunk_size)) + .into_iter() + .flatten(), + ) + .take(num_threads) + .zip(iter::repeat_with(|| { let mut seed = [0u8; 32]; rng.fill_bytes(&mut seed); ChaCha20Rng::from_seed(seed) - }) - .collect(); + })) + .collect::>(); - thread_seeds - .par_iter_mut() - .zip_eq(rand_vec.par_chunks_mut(n / n_threads)) - .for_each(|(mut rng, chunk)| chunk.iter_mut().for_each(|v| *v = Scalar::random(&mut rng))); - - rand_vec + parallelize(&mut random_poly, |chunk, offset| { + let mut rng = thread_seeds[&offset].clone(); + chunk.iter_mut().for_each(|v| *v = Scalar::random(&mut rng)); + }); + random_poly } fn bench_commit(c: &mut Criterion) { diff --git a/halo2_proofs/src/arithmetic.rs b/halo2_proofs/src/arithmetic.rs index b44e9bdc7d..3a67afbcb2 100644 --- a/halo2_proofs/src/arithmetic.rs +++ b/halo2_proofs/src/arithmetic.rs @@ -56,7 +56,7 @@ fn multiexp_serial(coeffs: &[C::Scalar], bases: &[C], acc: &mut let mut tmp = u64::from_le_bytes(v); tmp >>= skip_bits - (skip_bytes * 8); - tmp = tmp % (1 << c); + tmp %= 1 << c; tmp as usize } @@ -141,7 +141,7 @@ fn multiexp_serial(coeffs: &[C::Scalar], bases: &[C], acc: &mut let mut running_sum = C::Curve::identity(); for exp in buckets.into_iter().take(max_bits).rev() { running_sum = exp.add(running_sum); - *acc = *acc + &running_sum; + *acc += &running_sum; } } } @@ -304,7 +304,7 @@ pub fn recursive_butterfly_arithmetic>( a[1] -= &t; } else { let (left, right) = a.split_at_mut(n / 2); - rayon::join( + multicore::join( || recursive_butterfly_arithmetic(left, n / 2, twiddle_chunk * 2, twiddles), || recursive_butterfly_arithmetic(right, n / 2, twiddle_chunk * 2, twiddles), ); diff --git a/halo2_proofs/src/circuit.rs b/halo2_proofs/src/circuit.rs index a17d3f2627..8a7a9b8007 100644 --- a/halo2_proofs/src/circuit.rs +++ b/halo2_proofs/src/circuit.rs @@ -1,6 +1,6 @@ //! Traits and structs for implementing circuit components. -use std::{convert::TryInto, fmt, marker::PhantomData}; +use std::{fmt, marker::PhantomData}; use ff::Field; @@ -13,9 +13,11 @@ pub use value::Value; pub mod floor_planner; pub use floor_planner::single_pass::SimpleFloorPlanner; -pub use floor_planner::single_pass::SimpleTableLayouter; pub mod layouter; +mod table_layouter; + +pub use table_layouter::{SimpleTableLayouter, TableLayouter}; /// A chip implements a set of instructions that can be used by gadgets. /// @@ -308,6 +310,19 @@ impl<'r, F: Field> Region<'r, F> { }) } + /// Returns the value of the instance column's cell at absolute location `row`. + /// + /// This method is only provided for convenience; it does not create any constraints. + /// Callers still need to use [`Self::assign_advice_from_instance`] to constrain the + /// instance values in their circuit. + pub fn instance_value( + &mut self, + instance: Column, + row: usize, + ) -> Result, Error> { + self.region.instance_value(instance, row) + } + /// Assign a fixed value. /// /// Even though `to` has `FnMut` bounds, it is guaranteed to be called at most once. @@ -363,11 +378,11 @@ impl<'r, F: Field> Region<'r, F> { /// A lookup table in the circuit. #[derive(Debug)] pub struct Table<'r, F: Field> { - table: &'r mut dyn layouter::TableLayouter, + table: &'r mut dyn TableLayouter, } -impl<'r, F: Field> From<&'r mut dyn layouter::TableLayouter> for Table<'r, F> { - fn from(table: &'r mut dyn layouter::TableLayouter) -> Self { +impl<'r, F: Field> From<&'r mut dyn TableLayouter> for Table<'r, F> { + fn from(table: &'r mut dyn TableLayouter) -> Self { Table { table } } } diff --git a/halo2_proofs/src/circuit/floor_planner/single_pass.rs b/halo2_proofs/src/circuit/floor_planner/single_pass.rs index cd2c7624f1..824ad427f8 100644 --- a/halo2_proofs/src/circuit/floor_planner/single_pass.rs +++ b/halo2_proofs/src/circuit/floor_planner/single_pass.rs @@ -1,4 +1,3 @@ -use std::cmp; use std::fmt; use std::marker::PhantomData; @@ -8,8 +7,9 @@ use rustc_hash::FxHashMap; use crate::circuit::AssignedCell; use crate::{ circuit::{ - layouter::{RegionColumn, RegionLayouter, RegionShape, SyncDeps, TableLayouter}, - Cell, Layouter, Region, RegionIndex, RegionStart, Table, Value, + layouter::{RegionColumn, RegionLayouter, SyncDeps, TableLayouter}, + table_layouter::{compute_table_lengths, SimpleTableLayouter}, + Cell, Layouter, Region, RegionIndex, Table, Value, }, plonk::{ Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, Error, Fixed, FloorPlanner, @@ -170,24 +170,7 @@ impl<'a, F: Field, CS: Assignment + 'a + SyncDeps> Layouter // Check that all table columns have the same length `first_unused`, // and all cells up to that length are assigned. - let first_unused = { - match default_and_assigned - .values() - .map(|(_, assigned)| { - if assigned.iter().all(|b| *b) { - Some(assigned.len()) - } else { - None - } - }) - .reduce(|acc, item| match (acc, item) { - (Some(a), Some(b)) if a == b => Some(a), - _ => None, - }) { - Some(Some(len)) => len, - _ => return Err(Error::Synthesis), // TODO better error - } - }; + let first_unused = compute_table_lengths(&default_and_assigned)?; // Record these columns so that we can prevent them from being used again. for column in default_and_assigned.keys() { @@ -267,11 +250,6 @@ impl<'r, 'a, F: Field, CS: Assignment + 'a> SingleChipLayouterRegion<'r, 'a, } } -impl<'r, 'a, F: Field, CS: Assignment + 'a + SyncDeps> SyncDeps - for SingleChipLayouterRegion<'r, 'a, F, CS> -{ -} - impl<'r, 'a, F: Field, CS: Assignment + 'a + SyncDeps> RegionLayouter for SingleChipLayouterRegion<'r, 'a, F, CS> { @@ -357,6 +335,14 @@ impl<'r, 'a, F: Field, CS: Assignment + 'a + SyncDeps> RegionLayouter Ok((cell, value)) } + fn instance_value( + &mut self, + instance: Column, + row: usize, + ) -> Result, Error> { + self.layouter.cs.query_instance(instance, row) + } + fn assign_fixed( &mut self, // annotation: &'v (dyn Fn() -> String + 'v), @@ -399,88 +385,6 @@ impl<'r, 'a, F: Field, CS: Assignment + 'a + SyncDeps> RegionLayouter } } -/// The default value to fill a table column with. -/// -/// - The outer `Option` tracks whether the value in row 0 of the table column has been -/// assigned yet. This will always be `Some` once a valid table has been completely -/// assigned. -/// - The inner `Value` tracks whether the underlying `Assignment` is evaluating -/// witnesses or not. -type DefaultTableValue = Option>>; - -/// A table layouter that can be used to assign values to a table. -pub struct SimpleTableLayouter<'r, 'a, F: Field, CS: Assignment + 'a> { - cs: &'a mut CS, - used_columns: &'r [TableColumn], - /// maps from a fixed column to a pair (default value, vector saying which rows are assigned) - pub default_and_assigned: FxHashMap, Vec)>, -} - -impl<'r, 'a, F: Field, CS: Assignment + 'a> fmt::Debug for SimpleTableLayouter<'r, 'a, F, CS> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SimpleTableLayouter") - .field("used_columns", &self.used_columns) - .field("default_and_assigned", &self.default_and_assigned) - .finish() - } -} - -impl<'r, 'a, F: Field, CS: Assignment + 'a> SimpleTableLayouter<'r, 'a, F, CS> { - /// Returns a new SimpleTableLayouter - pub fn new(cs: &'a mut CS, used_columns: &'r [TableColumn]) -> Self { - SimpleTableLayouter { - cs, - used_columns, - default_and_assigned: FxHashMap::default(), - } - } -} - -impl<'r, 'a, F: Field, CS: Assignment + 'a> TableLayouter - for SimpleTableLayouter<'r, 'a, F, CS> -{ - fn assign_cell<'v>( - &'v mut self, - _: &'v (dyn Fn() -> String + 'v), - column: TableColumn, - offset: usize, - to: &'v mut (dyn FnMut() -> Value> + 'v), - ) -> Result<(), Error> { - if self.used_columns.contains(&column) { - return Err(Error::Synthesis); // TODO better error - } - - let entry = self.default_and_assigned.entry(column).or_default(); - - let value; - self.cs.assign_fixed( - // annotation, - column.inner(), - offset, // tables are always assigned starting at row 0 - { - let res = to(); - value = res; - res.assign()? - }, - ); - - match (entry.0.is_none(), offset) { - // Use the value at offset 0 as the default value for this table column. - (true, 0) => entry.0 = Some(value), - // Since there is already an existing default value for this table column, - // the caller should not be attempting to assign another value at offset 0. - (false, 0) => return Err(Error::Synthesis), // TODO better error - _ => (), - } - if entry.1.len() <= offset { - entry.1.resize(offset + 1, false); - } - entry.1[offset] = true; - - Ok(()) - } -} - #[cfg(test)] mod tests { use halo2curves::pasta::vesta; diff --git a/halo2_proofs/src/circuit/floor_planner/v1.rs b/halo2_proofs/src/circuit/floor_planner/v1.rs index c2308b007f..ea788aef95 100644 --- a/halo2_proofs/src/circuit/floor_planner/v1.rs +++ b/halo2_proofs/src/circuit/floor_planner/v1.rs @@ -4,8 +4,8 @@ use ff::Field; use crate::{ circuit::{ - floor_planner::single_pass::SimpleTableLayouter, layouter::{RegionColumn, RegionLayouter, RegionShape, SyncDeps, TableLayouter}, + table_layouter::{compute_table_lengths, SimpleTableLayouter}, Cell, Layouter, Region, RegionIndex, RegionStart, Table, Value, }, plonk::{ @@ -315,24 +315,7 @@ impl<'p, 'a, F: Field, CS: Assignment + SyncDeps> AssignmentPass<'p, 'a, F, C // Check that all table columns have the same length `first_unused`, // and all cells up to that length are assigned. - let first_unused = { - match default_and_assigned - .values() - .map(|(_, assigned)| { - if assigned.iter().all(|b| *b) { - Some(assigned.len()) - } else { - None - } - }) - .reduce(|acc, item| match (acc, item) { - (Some(a), Some(b)) if a == b => Some(a), - _ => None, - }) { - Some(Some(len)) => len, - _ => return Err(Error::Synthesis), // TODO better error - } - }; + let first_unused = compute_table_lengths(&default_and_assigned)?; // Record these columns so that we can prevent them from being used again. for column in default_and_assigned.keys() { @@ -386,8 +369,6 @@ impl<'r, 'a, F: Field, CS: Assignment + 'a> V1Region<'r, 'a, F, CS> { } } -impl<'r, 'a, F: Field, CS: Assignment + SyncDeps> SyncDeps for V1Region<'r, 'a, F, CS> {} - impl<'r, 'a, F: Field, CS: Assignment + SyncDeps> RegionLayouter for V1Region<'r, 'a, F, CS> { fn enable_selector<'v>( &'v mut self, @@ -459,6 +440,14 @@ impl<'r, 'a, F: Field, CS: Assignment + SyncDeps> RegionLayouter for V1Reg Ok((cell, value)) } + fn instance_value( + &mut self, + instance: Column, + row: usize, + ) -> Result, Error> { + self.plan.cs.query_instance(instance, row) + } + fn assign_fixed<'v>( &'v mut self, annotation: &'v (dyn Fn() -> String + 'v), diff --git a/halo2_proofs/src/circuit/floor_planner/v1/strategy.rs b/halo2_proofs/src/circuit/floor_planner/v1/strategy.rs index f9acd0f57d..71745de245 100644 --- a/halo2_proofs/src/circuit/floor_planner/v1/strategy.rs +++ b/halo2_proofs/src/circuit/floor_planner/v1/strategy.rs @@ -199,7 +199,7 @@ pub fn slot_in_biggest_advice_first( region_shapes: Vec, ) -> (Vec, CircuitAllocations) { let mut sorted_regions: Vec<_> = region_shapes.into_iter().collect(); - sorted_regions.sort_unstable_by_key(|shape| { + let sort_key = |shape: &RegionShape| { // Count the number of advice columns let advice_cols = shape .columns() @@ -211,7 +211,24 @@ pub fn slot_in_biggest_advice_first( .count(); // Sort by advice area (since this has the most contention). advice_cols * shape.row_count() - }); + }; + + // This used to incorrectly use `sort_unstable_by_key` with non-unique keys, which gave + // output that differed between 32-bit and 64-bit platforms, and potentially between Rust + // versions. + // We now use `sort_by_cached_key` with non-unique keys, and rely on `region_shapes` + // being sorted by region index (which we also rely on below to return `RegionStart`s + // in the correct order). + #[cfg(not(feature = "floor-planner-v1-legacy-pdqsort"))] + sorted_regions.sort_by_cached_key(sort_key); + + // To preserve compatibility, when the "floor-planner-v1-legacy-pdqsort" feature is enabled, + // we use a copy of the pdqsort implementation from the Rust 1.56.1 standard library, fixed + // to its behaviour on 64-bit platforms. + // https://github.com/rust-lang/rust/blob/1.56.1/library/core/src/slice/mod.rs#L2365-L2402 + #[cfg(feature = "floor-planner-v1-legacy-pdqsort")] + halo2_legacy_pdqsort::sort::quicksort(&mut sorted_regions, |a, b| sort_key(a).lt(&sort_key(b))); + sorted_regions.reverse(); // Lay out the sorted regions. diff --git a/halo2_proofs/src/circuit/layouter.rs b/halo2_proofs/src/circuit/layouter.rs index b1bf8b8111..0161e25d0f 100644 --- a/halo2_proofs/src/circuit/layouter.rs +++ b/halo2_proofs/src/circuit/layouter.rs @@ -6,19 +6,24 @@ use std::fmt; use ff::Field; +pub use super::table_layouter::TableLayouter; use super::{AssignedCell, Cell, RegionIndex, Value}; -use crate::plonk::{ - Advice, Any, Assigned, Challenge, Column, Error, Fixed, Instance, Selector, TableColumn, -}; +use crate::plonk::{Advice, Any, Assigned, Challenge, Column, Error, Fixed, Instance, Selector}; /// Intermediate trait requirements for [`RegionLayouter`] when thread-safe regions are enabled. #[cfg(feature = "thread-safe-region")] pub trait SyncDeps: Send + Sync {} +#[cfg(feature = "thread-safe-region")] +impl SyncDeps for T {} + /// Intermediate trait requirements for [`RegionLayouter`]. #[cfg(not(feature = "thread-safe-region"))] pub trait SyncDeps {} +#[cfg(not(feature = "thread-safe-region"))] +impl SyncDeps for T {} + /// Helper trait for implementing a custom [`Layouter`]. /// /// This trait is used for implementing region assignments: @@ -94,7 +99,8 @@ pub trait RegionLayouter: fmt::Debug + SyncDeps { /// Assign the value of the instance column's cell at absolute location /// `row` to the column `advice` at `offset` within this region. /// - /// Returns the advice cell, and its value if known. + /// Returns the advice cell that has been equality-constrained to the + /// instance cell, and its value if known. fn assign_advice_from_instance<'v>( &mut self, annotation: &'v (dyn Fn() -> String + 'v), @@ -104,6 +110,10 @@ pub trait RegionLayouter: fmt::Debug + SyncDeps { offset: usize, ) -> Result<(Cell, Value), Error>; + /// Returns the value of the instance column's cell at absolute location `row`. + fn instance_value(&mut self, instance: Column, row: usize) + -> Result, Error>; + /// Assign a fixed value fn assign_fixed( &mut self, @@ -133,24 +143,6 @@ pub trait RegionLayouter: fmt::Debug + SyncDeps { fn next_phase(&mut self); } -/// Helper trait for implementing a custom [`Layouter`]. -/// -/// This trait is used for implementing table assignments. -/// -/// [`Layouter`]: super::Layouter -pub trait TableLayouter: fmt::Debug { - /// Assigns a fixed value to a table cell. - /// - /// Returns an error if the table cell has already been assigned to. - fn assign_cell<'v>( - &'v mut self, - annotation: &'v (dyn Fn() -> String + 'v), - column: TableColumn, - offset: usize, - to: &'v mut (dyn FnMut() -> Value> + 'v), - ) -> Result<(), Error>; -} - /// The shape of a region. For a region at a certain index, we track /// the set of columns it uses as well as the number of rows it uses. #[derive(Clone, Debug)] @@ -161,8 +153,6 @@ pub struct RegionShape { pub(super) row_count: usize, } -impl SyncDeps for RegionShape {} - /// The virtual column involved in a region. This includes concrete columns, /// as well as selectors that are not concrete columns at this stage. #[derive(Eq, PartialEq, Copy, Clone, Debug, Hash)] @@ -292,6 +282,14 @@ impl RegionLayouter for RegionShape { )) } + fn instance_value( + &mut self, + _instance: Column, + _row: usize, + ) -> Result, Error> { + Ok(Value::unknown()) + } + fn assign_fixed<'v>( &'v mut self, _: &'v (dyn Fn() -> String + 'v), diff --git a/halo2_proofs/src/circuit/table_layouter.rs b/halo2_proofs/src/circuit/table_layouter.rs new file mode 100644 index 0000000000..47759abf35 --- /dev/null +++ b/halo2_proofs/src/circuit/table_layouter.rs @@ -0,0 +1,413 @@ +//! Implementations of common table layouters. + +use std::{ + collections::HashMap, + fmt::{self, Debug}, +}; + +use ff::Field; + +use crate::plonk::{Assigned, Assignment, Error, TableColumn, TableError}; + +use super::Value; + +/// Helper trait for implementing a custom [`Layouter`]. +/// +/// This trait is used for implementing table assignments. +/// +/// [`Layouter`]: super::Layouter +pub trait TableLayouter: std::fmt::Debug { + /// Assigns a fixed value to a table cell. + /// + /// Returns an error if the table cell has already been assigned to. + fn assign_cell<'v>( + &'v mut self, + annotation: &'v (dyn Fn() -> String + 'v), + column: TableColumn, + offset: usize, + to: &'v mut (dyn FnMut() -> Value> + 'v), + ) -> Result<(), Error>; +} + +/// The default value to fill a table column with. +/// +/// - The outer `Option` tracks whether the value in row 0 of the table column has been +/// assigned yet. This will always be `Some` once a valid table has been completely +/// assigned. +/// - The inner `Value` tracks whether the underlying `Assignment` is evaluating +/// witnesses or not. +type DefaultTableValue = Option>>; + +/// A table layouter that can be used to assign values to a table. +pub struct SimpleTableLayouter<'r, 'a, F: Field, CS: Assignment + 'a> { + cs: &'a mut CS, + used_columns: &'r [TableColumn], + /// maps from a fixed column to a pair (default value, vector saying which rows are assigned) + pub default_and_assigned: HashMap, Vec)>, +} + +impl<'r, 'a, F: Field, CS: Assignment + 'a> fmt::Debug for SimpleTableLayouter<'r, 'a, F, CS> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SimpleTableLayouter") + .field("used_columns", &self.used_columns) + .field("default_and_assigned", &self.default_and_assigned) + .finish() + } +} + +impl<'r, 'a, F: Field, CS: Assignment + 'a> SimpleTableLayouter<'r, 'a, F, CS> { + /// Returns a new SimpleTableLayouter + pub fn new(cs: &'a mut CS, used_columns: &'r [TableColumn]) -> Self { + SimpleTableLayouter { + cs, + used_columns, + default_and_assigned: HashMap::default(), + } + } +} + +impl<'r, 'a, F: Field, CS: Assignment + 'a> TableLayouter + for SimpleTableLayouter<'r, 'a, F, CS> +{ + fn assign_cell<'v>( + &'v mut self, + _annotation: &'v (dyn Fn() -> String + 'v), + column: TableColumn, + offset: usize, + to: &'v mut (dyn FnMut() -> Value> + 'v), + ) -> Result<(), Error> { + if self.used_columns.contains(&column) { + return Err(Error::TableError(TableError::UsedColumn(column))); + } + + let entry = self.default_and_assigned.entry(column).or_default(); + + let mut value = Value::unknown(); + self.cs.assign_fixed( + // annotation, + column.inner(), + offset, // tables are always assigned starting at row 0 + { + let res = to(); + value = res; + res.assign()? + }, + ); + + match (entry.0.is_none(), offset) { + // Use the value at offset 0 as the default value for this table column. + (true, 0) => entry.0 = Some(value), + // Since there is already an existing default value for this table column, + // the caller should not be attempting to assign another value at offset 0. + (false, 0) => { + return Err(Error::TableError(TableError::OverwriteDefault( + column, + format!("{:?}", entry.0.unwrap()), + format!("{:?}", value), + ))) + } + _ => (), + } + if entry.1.len() <= offset { + entry.1.resize(offset + 1, false); + } + entry.1[offset] = true; + + Ok(()) + } +} + +pub(crate) fn compute_table_lengths( + default_and_assigned: &HashMap, Vec)>, +) -> Result { + let column_lengths: Result, Error> = default_and_assigned + .iter() + .map(|(col, (default_value, assigned))| { + if default_value.is_none() || assigned.is_empty() { + return Err(Error::TableError(TableError::ColumnNotAssigned(*col))); + } + if assigned.iter().all(|b| *b) { + // All values in the column have been assigned + Ok((col, assigned.len())) + } else { + Err(Error::TableError(TableError::ColumnNotAssigned(*col))) + } + }) + .collect(); + let column_lengths = column_lengths?; + column_lengths + .into_iter() + .try_fold((None, 0), |acc, (col, col_len)| { + if acc.1 == 0 || acc.1 == col_len { + Ok((Some(*col), col_len)) + } else { + let mut cols = [(*col, col_len), (acc.0.unwrap(), acc.1)]; + cols.sort(); + Err(Error::TableError(TableError::UnevenColumnLengths( + cols[0], cols[1], + ))) + } + }) + .map(|col_len| col_len.1) +} + +#[cfg(test)] +mod tests { + use halo2curves::pasta::Fp; + + use crate::{ + circuit::{Layouter, SimpleFloorPlanner}, + dev::MockProver, + plonk::{Circuit, ConstraintSystem}, + poly::Rotation, + }; + + use super::*; + + #[test] + fn table_no_default() { + const K: u32 = 4; + + #[derive(Clone)] + struct FaultyCircuitConfig { + table: TableColumn, + } + + struct FaultyCircuit; + + impl Circuit for FaultyCircuit { + type Config = FaultyCircuitConfig; + type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + Self + } + + fn configure(meta: &mut ConstraintSystem) -> Self::Config { + let a = meta.advice_column(); + let table = meta.lookup_table_column(); + + meta.lookup("", |cells| { + let a = cells.query_advice(a, Rotation::cur()); + vec![(a, table)] + }); + + Self::Config { table } + } + + fn synthesize( + &self, + config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + layouter.assign_table( + || "duplicate assignment", + |mut table| { + table.assign_cell( + || "default", + config.table, + 1, + || Value::known(Fp::zero()), + ) + }, + ) + } + } + + let prover = MockProver::run(K, &FaultyCircuit, vec![]); + assert_eq!( + format!("{}", prover.unwrap_err()), + "TableColumn { inner: Column { index: 0, column_type: Fixed } } not fully assigned. Help: assign a value at offset 0." + ); + } + + #[test] + fn table_overwrite_default() { + const K: u32 = 4; + + #[derive(Clone)] + struct FaultyCircuitConfig { + table: TableColumn, + } + + struct FaultyCircuit; + + impl Circuit for FaultyCircuit { + type Config = FaultyCircuitConfig; + type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + Self + } + + fn configure(meta: &mut ConstraintSystem) -> Self::Config { + let a = meta.advice_column(); + let table = meta.lookup_table_column(); + + meta.lookup("", |cells| { + let a = cells.query_advice(a, Rotation::cur()); + vec![(a, table)] + }); + + Self::Config { table } + } + + fn synthesize( + &self, + config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + layouter.assign_table( + || "duplicate assignment", + |mut table| { + table.assign_cell( + || "default", + config.table, + 0, + || Value::known(Fp::zero()), + )?; + table.assign_cell( + || "duplicate", + config.table, + 0, + || Value::known(Fp::zero()), + ) + }, + ) + } + } + + let prover = MockProver::run(K, &FaultyCircuit, vec![]); + assert_eq!( + format!("{}", prover.unwrap_err()), + "Attempted to overwrite default value Value { inner: Some(Trivial(0x0000000000000000000000000000000000000000000000000000000000000000)) } with Value { inner: Some(Trivial(0x0000000000000000000000000000000000000000000000000000000000000000)) } in TableColumn { inner: Column { index: 0, column_type: Fixed } }" + ); + } + + #[test] + fn table_reuse_column() { + const K: u32 = 4; + + #[derive(Clone)] + struct FaultyCircuitConfig { + table: TableColumn, + } + + struct FaultyCircuit; + + impl Circuit for FaultyCircuit { + type Config = FaultyCircuitConfig; + type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + Self + } + + fn configure(meta: &mut ConstraintSystem) -> Self::Config { + let a = meta.advice_column(); + let table = meta.lookup_table_column(); + + meta.lookup("", |cells| { + let a = cells.query_advice(a, Rotation::cur()); + vec![(a, table)] + }); + + Self::Config { table } + } + + fn synthesize( + &self, + config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + layouter.assign_table( + || "first assignment", + |mut table| { + table.assign_cell( + || "default", + config.table, + 0, + || Value::known(Fp::zero()), + ) + }, + )?; + + layouter.assign_table( + || "reuse", + |mut table| { + table.assign_cell(|| "reuse", config.table, 1, || Value::known(Fp::zero())) + }, + ) + } + } + + let prover = MockProver::run(K, &FaultyCircuit, vec![]); + assert_eq!( + format!("{}", prover.unwrap_err()), + "TableColumn { inner: Column { index: 0, column_type: Fixed } } has already been used" + ); + } + + #[test] + fn table_uneven_columns() { + const K: u32 = 4; + + #[derive(Clone)] + struct FaultyCircuitConfig { + table: (TableColumn, TableColumn), + } + + struct FaultyCircuit; + + impl Circuit for FaultyCircuit { + type Config = FaultyCircuitConfig; + type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + Self + } + + fn configure(meta: &mut ConstraintSystem) -> Self::Config { + let a = meta.advice_column(); + let table = (meta.lookup_table_column(), meta.lookup_table_column()); + meta.lookup("", |cells| { + let a = cells.query_advice(a, Rotation::cur()); + + vec![(a.clone(), table.0), (a, table.1)] + }); + + Self::Config { table } + } + + fn synthesize( + &self, + config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + layouter.assign_table( + || "table with uneven columns", + |mut table| { + table.assign_cell(|| "", config.table.0, 0, || Value::known(Fp::zero()))?; + table.assign_cell(|| "", config.table.0, 1, || Value::known(Fp::zero()))?; + + table.assign_cell(|| "", config.table.1, 0, || Value::known(Fp::zero())) + }, + ) + } + } + + let prover = MockProver::run(K, &FaultyCircuit, vec![]); + assert_eq!( + format!("{}", prover.unwrap_err()), + "TableColumn { inner: Column { index: 0, column_type: Fixed } } has length 2 while TableColumn { inner: Column { index: 1, column_type: Fixed } } has length 1" + ); + } +} diff --git a/halo2_proofs/src/circuit/value.rs b/halo2_proofs/src/circuit/value.rs index e6ae26cd1b..f3ea6a39ea 100644 --- a/halo2_proofs/src/circuit/value.rs +++ b/halo2_proofs/src/circuit/value.rs @@ -63,6 +63,11 @@ impl Value { } } + /// ONLY FOR INTERNAL CRATE USAGE; DO NOT EXPOSE! + pub(crate) fn into_option(self) -> Option { + self.inner + } + /// Enforces an assertion on the contained value, if known. /// /// The assertion is ignored if `self` is [`Value::unknown()`]. Do not try to enforce diff --git a/halo2_proofs/src/dev.rs b/halo2_proofs/src/dev.rs index 010852ae93..09a419c6e4 100644 --- a/halo2_proofs/src/dev.rs +++ b/halo2_proofs/src/dev.rs @@ -1,37 +1,30 @@ //! Tools for developing circuits. -use std::collections::BTreeMap; use std::collections::HashMap; use std::collections::HashSet; -use std::fmt; use std::iter; use std::ops::{Add, Mul, Neg, Range}; use std::sync::Arc; -// use std::time::{Duration, Instant}; use blake2b_simd::blake2b; use ff::Field; use ff::FromUniformBytes; -use group::Group; -use crate::circuit::layouter::SyncDeps; use crate::plonk::permutation::keygen::Assembly; use crate::{ circuit, plonk::{ permutation, sealed::{self, SealedPhase}, - Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, ColumnType, - ConstraintSystem, Error, Expression, FirstPhase, Fixed, FloorPlanner, Instance, Phase, - Selector, VirtualCell, + Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, ConstraintSystem, Error, + Expression, FirstPhase, Fixed, FloorPlanner, Instance, Phase, Selector, }, - poly::Rotation, }; -use rayon::{ - iter::{ - IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, ParallelIterator, - }, - slice::ParallelSliceMut, + +#[cfg(feature = "multicore")] +use crate::multicore::{ + IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, ParallelIterator, + ParallelSliceMut, }; pub mod metadata; @@ -47,6 +40,9 @@ pub use failure::{FailureLocation, VerifyFailure}; mod gates; pub use gates::CircuitGates; +mod tfp; +pub use tfp::TracingFloorPlanner; + #[cfg(feature = "dev-graph")] mod graph; @@ -316,7 +312,7 @@ pub struct MockProver { // The advice cells in the circuit, arranged as [column][row]. advice: Vec>>, // The instance cells in the circuit, arranged as [column][row]. - instance: Vec>, + instance: Vec>>, selectors: Vec>, @@ -330,6 +326,21 @@ pub struct MockProver { current_phase: sealed::Phase, } +#[derive(Debug, Clone, PartialEq, Eq)] +enum InstanceValue { + Assigned(F), + Padding, +} + +impl InstanceValue { + fn value(&self) -> F { + match self { + InstanceValue::Assigned(v) => *v, + InstanceValue::Padding => F::ZERO, + } + } +} + impl MockProver { fn in_phase(&self, phase: P) -> bool { self.current_phase == phase.to_sealed() @@ -430,7 +441,7 @@ impl Assignment for MockProver { .instance .get(column.index()) .and_then(|column| column.get(row)) - .map(|v| circuit::Value::known(*v)) + .map(|v| circuit::Value::known(v.value())) .expect("bound failure")) } @@ -613,7 +624,7 @@ impl + Ord> MockProver { let instance = instance .into_iter() - .map(|mut instance| { + .map(|instance| { assert!( instance.len() <= n - (cs.blinding_factors() + 1), "instance.len={}, n={}, cs.blinding_factors={}", @@ -622,8 +633,12 @@ impl + Ord> MockProver { cs.blinding_factors() ); - instance.resize(n, F::ZERO); - instance + let mut instance_values = vec![InstanceValue::Padding; n]; + for (idx, value) in instance.into_iter().enumerate() { + instance_values[idx] = InstanceValue::Assigned(value); + } + + instance_values }) .collect::>(); @@ -771,17 +786,42 @@ impl + Ord> MockProver { // Determine where this cell should have been assigned. let cell_row = ((gate_row + n + cell.rotation.0) % n) as usize; - // Check that it was assigned! - if r.cells.get(&(cell.column, cell_row)).is_some() { - None - } else { - Some(VerifyFailure::CellNotAssigned { - gate: (gate_index, gate.name()).into(), - region: (r_i, r.name.clone(), r.annotations.clone()).into(), - gate_offset: *selector_row, - column: cell.column, - offset: cell_row as isize - r.rows.unwrap().0 as isize, - }) + match cell.column.column_type() { + Any::Instance => { + // Handle instance cells, which are not in the region. + let instance_value = + &self.instance[cell.column.index()][cell_row]; + match instance_value { + InstanceValue::Assigned(_) => None, + _ => Some(VerifyFailure::InstanceCellNotAssigned { + gate: (gate_index, gate.name()).into(), + region: (r_i, r.name.clone()).into(), + gate_offset: *selector_row, + column: cell.column.try_into().unwrap(), + row: cell_row, + }), + } + } + _ => { + // Check that it was assigned! + if r.cells.contains_key(&(cell.column, cell_row)) { + None + } else { + Some(VerifyFailure::CellNotAssigned { + gate: (gate_index, gate.name()).into(), + region: ( + r_i, + r.name.clone(), + r.annotations.clone(), + ) + .into(), + gate_offset: *selector_row, + column: cell.column, + offset: cell_row as isize + - r.rows.unwrap().0 as isize, + }) + } + } } }) }) @@ -903,7 +943,8 @@ impl + Ord> MockProver { let rotation = query.1 .0; Value::Real( self.instance[column_index] - [(row as i32 + n + rotation) as usize % n as usize], + [(row as i32 + n + rotation) as usize % n as usize] + .value(), ) }, &|challenge| Value::Real(self.challenges[challenge.index()]), @@ -1088,7 +1129,10 @@ impl + Ord> MockProver { .map(|c: &Column| match c.column_type() { Any::Advice(_) => advice[c.index()][row], Any::Fixed => self.fixed[c.index()][row], - Any::Instance => CellValue::Assigned(self.instance[c.index()][row]), + Any::Instance => { + let cell: &InstanceValue = &self.instance[c.index()][row]; + CellValue::Assigned(cell.value()) + } }) .unwrap() }; @@ -1148,6 +1192,7 @@ impl + Ord> MockProver { /// Returns `Ok(())` if this `MockProver` is satisfied, or a list of errors indicating /// the reasons that the circuit is not satisfied. /// Constraints and lookup are checked at `usable_rows`, parallelly. + #[cfg(feature = "multicore")] pub fn verify_par(&self) -> Result<(), Vec> { self.verify_at_rows_par(self.usable_rows.clone(), self.usable_rows.clone()) } @@ -1155,6 +1200,7 @@ impl + Ord> MockProver { /// Returns `Ok(())` if this `MockProver` is satisfied, or a list of errors indicating /// the reasons that the circuit is not satisfied. /// Constraints are only checked at `gate_row_ids`, and lookup inputs are only checked at `lookup_input_row_ids`, parallelly. + #[cfg(feature = "multicore")] pub fn verify_at_rows_par>( &self, gate_row_ids: I, @@ -1207,23 +1253,44 @@ impl + Ord> MockProver { let cell_row = ((gate_row + n + cell.rotation.0) % n) as usize; - // Check that it was assigned! - if r.cells.contains_key(&(cell.column, cell_row)) { - None - } else { - Some(VerifyFailure::CellNotAssigned { - gate: (gate_index, gate.name()).into(), - region: ( - r_i, - r.name.clone(), - r.annotations.clone(), - ) - .into(), - gate_offset: *selector_row, - column: cell.column, - offset: cell_row as isize - - r.rows.unwrap().0 as isize, - }) + match cell.column.column_type() { + Any::Instance => { + // Handle instance cells, which are not in the region. + let instance_value = + &self.instance[cell.column.index()][cell_row]; + match instance_value { + InstanceValue::Assigned(_) => None, + _ => Some( + VerifyFailure::InstanceCellNotAssigned { + gate: (gate_index, gate.name()).into(), + region: (r_i, r.name.clone()).into(), + gate_offset: *selector_row, + column: cell.column.try_into().unwrap(), + row: cell_row, + }, + ), + } + } + _ => { + // Check that it was assigned! + if r.cells.contains_key(&(cell.column, cell_row)) { + None + } else { + Some(VerifyFailure::CellNotAssigned { + gate: (gate_index, gate.name()).into(), + region: ( + r_i, + r.name.clone(), + r.annotations.clone(), + ) + .into(), + gate_offset: *selector_row, + column: cell.column, + offset: cell_row as isize + - r.rows.unwrap().0 as isize, + }) + } + } } }) .collect::>() @@ -1344,7 +1411,8 @@ impl + Ord> MockProver { &|query| { Value::Real( self.instance[query.column_index] - [(row as i32 + n + query.rotation.0) as usize % n as usize], + [(row as i32 + n + query.rotation.0) as usize % n as usize] + .value(), ) }, &|challenge| Value::Real(self.challenges[challenge.index()]), @@ -1525,7 +1593,10 @@ impl + Ord> MockProver { .map(|c: &Column| match c.column_type() { Any::Advice(_) => advice[c.index()][row], Any::Fixed => self.fixed[c.index()][row], - Any::Instance => CellValue::Assigned(self.instance[c.index()][row]), + Any::Instance => { + let cell: &InstanceValue = &self.instance[c.index()][row]; + CellValue::Assigned(cell.value()) + } }) .unwrap() }; @@ -1612,6 +1683,7 @@ impl + Ord> MockProver { /// ```ignore /// assert_eq!(prover.verify_par(), Ok(())); /// ``` + #[cfg(feature = "multicore")] pub fn assert_satisfied_par(&self) { if let Err(errs) = self.verify_par() { for err in errs { @@ -1633,6 +1705,7 @@ impl + Ord> MockProver { /// ```ignore /// assert_eq!(prover.verify_at_rows_par(), Ok(())); /// ``` + #[cfg(feature = "multicore")] pub fn assert_satisfied_at_rows_par>( &self, gate_row_ids: I, @@ -2185,5 +2258,3 @@ mod tests { ) } } - -impl SyncDeps for MockProver {} diff --git a/halo2_proofs/src/dev/cost.rs b/halo2_proofs/src/dev/cost.rs index 28874fc84e..5cceed1e91 100644 --- a/halo2_proofs/src/dev/cost.rs +++ b/halo2_proofs/src/dev/cost.rs @@ -1,6 +1,7 @@ //! Developer tools for investigating the cost of a circuit. use std::{ + cmp, collections::{HashMap, HashSet}, iter, marker::PhantomData, @@ -11,7 +12,7 @@ use ff::{Field, PrimeField}; use group::prime::PrimeGroup; use crate::{ - circuit::{layouter::SyncDeps, Value}, + circuit::{layouter::RegionColumn, Value}, plonk::{ Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, ConstraintSystem, Error, Fixed, FloorPlanner, Instance, Selector, @@ -20,10 +21,11 @@ use crate::{ }; /// Measures a circuit to determine its costs, and explain what contributes to them. +#[allow(dead_code)] #[derive(Debug)] pub struct CircuitCost> { /// Power-of-2 bound on the number of rows in the circuit. - k: usize, + k: u32, /// Maximum degree of the circuit. max_deg: usize, /// Number of advice columns. @@ -38,27 +40,141 @@ pub struct CircuitCost> { permutation_cols: usize, /// Number of distinct sets of points in the multiopening argument. point_sets: usize, + /// Maximum rows used over all columns + max_rows: usize, + /// Maximum rows used over all advice columns + max_advice_rows: usize, + /// Maximum rows used over all fixed columns + max_fixed_rows: usize, + num_fixed_columns: usize, + num_advice_columns: usize, + num_instance_columns: usize, + num_total_columns: usize, _marker: PhantomData<(G, ConcreteCircuit)>, } -struct Assembly { - selectors: Vec>, +/// Region implementation used by Layout +#[allow(dead_code)] +#[derive(Debug)] +pub(crate) struct LayoutRegion { + /// The name of the region. Not required to be unique. + pub(crate) name: String, + /// The columns used by this region. + pub(crate) columns: HashSet, + /// The row that this region starts on, if known. + pub(crate) offset: Option, + /// The number of rows that this region takes up. + pub(crate) rows: usize, + /// The cells assigned in this region. + pub(crate) cells: Vec<(RegionColumn, usize)>, +} + +/// Cost and graphing layouter +#[derive(Default, Debug)] +pub(crate) struct Layout { + /// k = 1 << n + pub(crate) k: u32, + /// Regions of the layout + pub(crate) regions: Vec, + current_region: Option, + /// Total row count + pub(crate) total_rows: usize, + /// Total advice rows + pub(crate) total_advice_rows: usize, + /// Total fixed rows + pub(crate) total_fixed_rows: usize, + /// Any cells assigned outside of a region. + pub(crate) loose_cells: Vec<(RegionColumn, usize)>, + /// Pairs of cells between which we have equality constraints. + pub(crate) equality: Vec<(Column, usize, Column, usize)>, + /// Selector assignments used for optimization pass + pub(crate) selectors: Vec>, } -impl SyncDeps for Assembly {} +impl Layout { + /// Creates a empty layout + pub fn new(k: u32, n: usize, num_selectors: usize) -> Self { + Layout { + k, + regions: vec![], + current_region: None, + total_rows: 0, + total_advice_rows: 0, + total_fixed_rows: 0, + /// Any cells assigned outside of a region. + loose_cells: vec![], + /// Pairs of cells between which we have equality constraints. + equality: vec![], + /// Selector assignments used for optimization pass + selectors: vec![vec![false; n]; num_selectors], + } + } + + /// Update layout metadata + pub fn update(&mut self, column: RegionColumn, row: usize) { + self.total_rows = cmp::max(self.total_rows, row + 1); + + if let RegionColumn::Column(col) = column { + match col.column_type() { + Any::Advice(_) => { + self.total_advice_rows = cmp::max(self.total_advice_rows, row + 1) + } + Any::Fixed => self.total_fixed_rows = cmp::max(self.total_fixed_rows, row + 1), + _ => {} + } + } + + if let Some(region) = self.current_region { + let region = &mut self.regions[region]; + region.columns.insert(column); + + // The region offset is the earliest row assigned to. + let mut offset = region.offset.unwrap_or(row); + if row < offset { + // The first row assigned was not at offset 0 within the region. + region.rows += offset - row; + offset = row; + } + // The number of rows in this region is the gap between the earliest and + // latest rows assigned. + region.rows = cmp::max(region.rows, row - offset + 1); + region.offset = Some(offset); + + region.cells.push((column, row)); + } else { + self.loose_cells.push((column, row)); + } + } +} -impl Assignment for Assembly { - fn enter_region(&mut self, _: N) +impl Assignment for Layout { + fn enter_region(&mut self, name_fn: N) where NR: Into, N: FnOnce() -> NR, { - // Do nothing; we don't care about regions in this context. + assert!(self.current_region.is_none()); + self.current_region = Some(self.regions.len()); + self.regions.push(LayoutRegion { + name: name_fn().into(), + columns: HashSet::default(), + offset: None, + rows: 0, + cells: vec![], + }) + } + + fn annotate_column(&mut self, _: A, _: Column) + where + A: FnOnce() -> AR, + AR: Into, + { } fn exit_region(&mut self) { - // Do nothing; we don't care about regions in this context. + assert!(self.current_region.is_some()); + self.current_region = None; } fn enable_selector(&mut self, _: A, selector: &Selector, row: usize) -> Result<(), Error> @@ -66,8 +182,13 @@ impl Assignment for Assembly { A: FnOnce() -> AR, AR: Into, { - self.selectors[selector.0][row] = true; + if let Some(cell) = self.selectors[selector.0].get_mut(row) { + *cell = true; + } else { + return Err(Error::not_enough_rows_available(self.k)); + } + self.update((*selector).into(), row); Ok(()) } @@ -88,14 +209,15 @@ impl Assignment for Assembly { VR: Into>, A: FnOnce() -> AR, AR: Into,*/ { + self.update(Column::::from(column).into(), row); Ok(to.as_ref()) } fn assign_fixed( &mut self, _: A, - _: Column, - _: usize, + column: Column, + row: usize, _: V, ) -> Result<(), Error> where @@ -104,10 +226,18 @@ impl Assignment for Assembly { A: FnOnce() -> AR, AR: Into, { + self.update(Column::::from(column).into(), row); Ok(()) } - fn copy(&mut self, _: Column, _: usize, _: Column, _: usize) -> Result<(), Error> { + fn copy( + &mut self, + l_col: Column, + l_row: usize, + r_col: Column, + r_row: usize, + ) -> Result<(), crate::plonk::Error> { + self.equality.push((l_col, l_row, r_col, r_row)); Ok(()) } @@ -124,14 +254,6 @@ impl Assignment for Assembly { Value::unknown() } - fn annotate_column(&mut self, _annotation: A, _column: Column) - where - A: FnOnce() -> AR, - AR: Into, - { - // Do nothing - } - fn push_namespace(&mut self, _: N) where NR: Into, @@ -149,24 +271,19 @@ impl> CircuitCost Self { + pub fn measure(k: u32, circuit: &ConcreteCircuit) -> Self { // Collect the layout details. let mut cs = ConstraintSystem::default(); - #[cfg(feature = "circuit-params")] - let config = ConcreteCircuit::configure_with_params(&mut cs, circuit.params()); - #[cfg(not(feature = "circuit-params"))] let config = ConcreteCircuit::configure(&mut cs); - let mut assembly = Assembly { - selectors: vec![vec![false; 1 << k]; cs.num_selectors], - }; + let mut layout = Layout::new(k, 1 << k, cs.num_selectors); ConcreteCircuit::FloorPlanner::synthesize( - &mut assembly, + &mut layout, circuit, config, cs.constants.clone(), ) .unwrap(); - let (cs, _) = cs.compress_selectors(assembly.selectors); + let (cs, _) = cs.compress_selectors(layout.selectors); assert!((1 << k) >= cs.minimum_rows()); @@ -222,6 +339,15 @@ impl> CircuitCost> CircuitCost> CircuitCost From> for usize { + proof.polycomm.len(point, scalar) } } + +#[cfg(test)] +mod tests { + use halo2curves::pasta::{Eq, Fp}; + + use crate::circuit::SimpleFloorPlanner; + + use super::*; + + #[test] + fn circuit_cost_without_permutation() { + const K: u32 = 4; + + struct MyCircuit; + impl Circuit for MyCircuit { + type Config = (); + type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + Self + } + + fn configure(_meta: &mut ConstraintSystem) -> Self::Config {} + + fn synthesize( + &self, + _config: Self::Config, + _layouter: impl crate::circuit::Layouter, + ) -> Result<(), Error> { + Ok(()) + } + } + CircuitCost::::measure(K, &MyCircuit).proof_size(1); + } +} diff --git a/halo2_proofs/src/dev/failure.rs b/halo2_proofs/src/dev/failure.rs index 0aae3b50be..eb71531041 100644 --- a/halo2_proofs/src/dev/failure.rs +++ b/halo2_proofs/src/dev/failure.rs @@ -13,8 +13,7 @@ use super::{ use crate::dev::metadata::Constraint; use crate::{ dev::{AdviceCellValue, CellValue, Instance, Value}, - plonk::{Any, Assigned, Column, ConstraintSystem, Expression, Gate}, - poly::Rotation, + plonk::{Any, Column, ConstraintSystem, Expression, Gate}, }; mod emitter; @@ -101,16 +100,17 @@ impl FailureLocation { .iter() .enumerate() .find(|(_, r)| { - if r.rows.is_none() { - return false; + if let Some((start, end)) = r.rows { + // We match the region if any input columns overlap, rather than all of + // them, because matching complex selector columns is hard. As long as + // regions are rectangles, and failures occur due to assignments entirely + // within single regions, "any" will be equivalent to "all". If these + // assumptions change, we'll start getting bug reports from users :) + (start..=end).contains(&failure_row) && !failure_columns.is_disjoint(&r.columns) + } else { + // Zero-area region + false } - let (start, end) = r.rows.unwrap(); - // We match the region if any input columns overlap, rather than all of - // them, because matching complex selector columns is hard. As long as - // regions are rectangles, and failures occur due to assignments entirely - // within single regions, "any" will be equivalent to "all". If these - // assumptions change, we'll start getting bug reports from users :) - (start..=end).contains(&failure_row) && !failure_columns.is_disjoint(&r.columns) }) .map(|(r_i, r)| FailureLocation::InRegion { region: (r_i, r.name.clone(), r.annotations.clone()).into(), @@ -139,6 +139,20 @@ pub enum VerifyFailure { /// offset 0, but the gate uses `Rotation::prev()`). offset: isize, }, + /// An instance cell used in an active gate was not assigned to. + InstanceCellNotAssigned { + /// The index of the active gate. + gate: metadata::Gate, + /// The region in which this gate was activated. + region: metadata::Region, + /// The offset (relative to the start of the region) at which the active gate + /// queries this cell. + gate_offset: usize, + /// The column in which this cell should be assigned. + column: Column, + /// The absolute row at which this cell should be assigned. + row: usize, + }, /// A constraint was not satisfied for a particular row. ConstraintNotSatisfied { /// The polynomial constraint that is not satisfied. @@ -225,6 +239,19 @@ impl fmt::Display for VerifyFailure { region, gate, gate_offset, column, offset, region.get_column_annotation((*column).into()) ) } + Self::InstanceCellNotAssigned { + gate, + region, + gate_offset, + column, + row, + } => { + write!( + f, + "{} uses {} at offset {}, which requires cell in instance column {:?} at row {} to be assigned.", + region, gate, gate_offset, column, row + ) + } Self::ConstraintNotSatisfied { constraint, location, diff --git a/halo2_proofs/src/dev/gates.rs b/halo2_proofs/src/dev/gates.rs index fd4c039623..352415bcd9 100644 --- a/halo2_proofs/src/dev/gates.rs +++ b/halo2_proofs/src/dev/gates.rs @@ -7,10 +7,7 @@ use ff::PrimeField; use crate::{ dev::util, - plonk::{ - sealed::{self, SealedPhase}, - Circuit, ConstraintSystem, FirstPhase, - }, + plonk::{sealed::SealedPhase, Circuit, ConstraintSystem, FirstPhase}, }; #[derive(Debug)] diff --git a/halo2_proofs/src/dev/graph.rs b/halo2_proofs/src/dev/graph.rs index 3a51c8fa2b..370ce8f174 100644 --- a/halo2_proofs/src/dev/graph.rs +++ b/halo2_proofs/src/dev/graph.rs @@ -80,8 +80,6 @@ struct Graph { current_namespace: Vec, } -impl crate::dev::SyncDeps for Graph {} - impl Assignment for Graph { fn enter_region(&mut self, _: N) where diff --git a/halo2_proofs/src/dev/graph/layout.rs b/halo2_proofs/src/dev/graph/layout.rs index 4031bfce40..3b243e3bd7 100644 --- a/halo2_proofs/src/dev/graph/layout.rs +++ b/halo2_proofs/src/dev/graph/layout.rs @@ -3,16 +3,13 @@ use plotters::{ coord::Shift, prelude::{DrawingArea, DrawingAreaErrorKind, DrawingBackend}, }; -use std::cmp; use std::collections::HashSet; use std::ops::Range; use crate::{ - circuit::{layouter::RegionColumn, Value}, - plonk::{ - Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, ConstraintSystem, Error, - Fixed, FloorPlanner, Instance, Selector, - }, + circuit::layouter::RegionColumn, + dev::cost::Layout, + plonk::{Any, Circuit, Column, ConstraintSystem, FloorPlanner}, }; /// Graphical renderer for circuit layouts. @@ -324,171 +321,3 @@ impl CircuitLayout { Ok(()) } } - -#[derive(Debug)] -struct Region { - /// The name of the region. Not required to be unique. - name: String, - /// The columns used by this region. - columns: HashSet, - /// The row that this region starts on, if known. - offset: Option, - /// The number of rows that this region takes up. - rows: usize, - /// The cells assigned in this region. We store this as a `Vec` so that if any cells - /// are double-assigned, they will be visibly darker. - cells: Vec<(RegionColumn, usize)>, -} - -#[derive(Default)] -struct Layout { - k: u32, - regions: Vec, - current_region: Option, - total_rows: usize, - /// Any cells assigned outside of a region. We store this as a `Vec` so that if any - /// cells are double-assigned, they will be visibly darker. - loose_cells: Vec<(RegionColumn, usize)>, - /// Pairs of cells between which we have equality constraints. - equality: Vec<(Column, usize, Column, usize)>, - /// Selector assignments used for optimization pass - selectors: Vec>, -} - -impl crate::dev::SyncDeps for Layout {} - -impl Layout { - fn new(k: u32, n: usize, num_selectors: usize) -> Self { - Layout { - k, - regions: vec![], - current_region: None, - total_rows: 0, - /// Any cells assigned outside of a region. We store this as a `Vec` so that if any - /// cells are double-assigned, they will be visibly darker. - loose_cells: vec![], - /// Pairs of cells between which we have equality constraints. - equality: vec![], - /// Selector assignments used for optimization pass - selectors: vec![vec![false; n]; num_selectors], - } - } - - fn update(&mut self, column: RegionColumn, row: usize) { - self.total_rows = cmp::max(self.total_rows, row + 1); - - if let Some(region) = self.current_region { - let region = &mut self.regions[region]; - region.columns.insert(column); - - // The region offset is the earliest row assigned to. - let mut offset = region.offset.unwrap_or(row); - if row < offset { - // The first row assigned was not at offset 0 within the region. - region.rows += offset - row; - offset = row; - } - // The number of rows in this region is the gap between the earliest and - // latest rows assigned. - region.rows = cmp::max(region.rows, row - offset + 1); - region.offset = Some(offset); - - region.cells.push((column, row)); - } else { - self.loose_cells.push((column, row)); - } - } -} - -impl Assignment for Layout { - fn enter_region(&mut self, name_fn: N) - where - NR: Into, - N: FnOnce() -> NR, - { - assert!(self.current_region.is_none()); - self.current_region = Some(self.regions.len()); - self.regions.push(Region { - name: name_fn().into(), - columns: HashSet::default(), - offset: None, - rows: 0, - cells: vec![], - }) - } - - fn exit_region(&mut self) { - assert!(self.current_region.is_some()); - self.current_region = None; - } - - fn enable_selector(&mut self, _: A, selector: &Selector, row: usize) -> Result<(), Error> - where - A: FnOnce() -> AR, - AR: Into, - { - if let Some(cell) = self.selectors[selector.0].get_mut(row) { - *cell = true; - } else { - return Err(Error::not_enough_rows_available(self.k)); - } - - self.update((*selector).into(), row); - Ok(()) - } - - fn query_instance(&self, _: Column, _: usize) -> Result, Error> { - Ok(Value::unknown()) - } - - fn assign_advice<'v>( - &mut self, - column: Column, - row: usize, - _: Value>, - ) -> Value<&'v Assigned> { - self.update(Column::::from(column).into(), row); - Value::unknown() - } - - fn assign_fixed(&mut self, column: Column, row: usize, _: Assigned) { - self.update(Column::::from(column).into(), row); - } - - fn copy(&mut self, l_col: Column, l_row: usize, r_col: Column, r_row: usize) { - self.equality.push((l_col, l_row, r_col, r_row)); - } - - fn fill_from_row( - &mut self, - _: Column, - _: usize, - _: Value>, - ) -> Result<(), Error> { - Ok(()) - } - - fn get_challenge(&self, _: Challenge) -> Value { - Value::unknown() - } - - fn annotate_column(&mut self, _annotation: A, _column: Column) - where - A: FnOnce() -> AR, - AR: Into, - { - // Do nothing - } - - fn push_namespace(&mut self, _: N) - where - NR: Into, - N: FnOnce() -> NR, - { - // Do nothing; we don't care about namespaces in this context. - } - - fn pop_namespace(&mut self, _: Option) { - // Do nothing; we don't care about namespaces in this context. - } -} diff --git a/halo2_proofs/src/dev/tfp.rs b/halo2_proofs/src/dev/tfp.rs new file mode 100644 index 0000000000..c763c44643 --- /dev/null +++ b/halo2_proofs/src/dev/tfp.rs @@ -0,0 +1,494 @@ +use std::{fmt, marker::PhantomData}; + +use ff::Field; +use tracing::{debug, debug_span, span::EnteredSpan}; + +use crate::{ + circuit::{ + layouter::{RegionLayouter, SyncDeps}, + AssignedCell, Cell, Layouter, Region, Table, Value, + }, + plonk::{ + Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, ConstraintSystem, Error, + Fixed, FloorPlanner, Instance, Selector, + }, +}; + +/// A helper type that augments a [`FloorPlanner`] with [`tracing`] spans and events. +/// +/// `TracingFloorPlanner` can be used to instrument your circuit and determine exactly +/// what is happening during a particular run of keygen or proving. This can be useful for +/// identifying unexpected non-determinism or changes to a circuit. +/// +/// # No stability guarantees +/// +/// The `tracing` output is intended for use during circuit development. It should not be +/// considered production-stable, and the precise format or data exposed may change at any +/// time. +/// +/// # Examples +/// +/// ``` +/// use ff::Field; +/// use halo2_proofs::{ +/// circuit::{floor_planner, Layouter, Value}, +/// dev::TracingFloorPlanner, +/// plonk::{Circuit, ConstraintSystem, Error}, +/// }; +/// +/// # struct MyCircuit { +/// # some_witness: Value, +/// # }; +/// # #[derive(Clone)] +/// # struct MyConfig; +/// impl Circuit for MyCircuit { +/// // Wrap `TracingFloorPlanner` around your existing floor planner of choice. +/// //type FloorPlanner = floor_planner::V1; +/// type FloorPlanner = TracingFloorPlanner; +/// +/// // The rest of your `Circuit` implementation is unchanged. +/// type Config = MyConfig; +/// +/// #[cfg(feature = "circuit-params")] +/// type Params = (); +/// +/// fn without_witnesses(&self) -> Self { +/// Self { some_witness: Value::unknown() } +/// } +/// +/// fn configure(meta: &mut ConstraintSystem) -> Self::Config { +/// // .. +/// # todo!() +/// } +/// +/// fn synthesize(&self, config: Self::Config, layouter: impl Layouter) -> Result<(), Error> { +/// // .. +/// # todo!() +/// } +/// } +/// +/// #[test] +/// fn some_circuit_test() { +/// // At the start of your test, enable tracing. +/// tracing_subscriber::fmt() +/// .with_max_level(tracing::Level::DEBUG) +/// .with_ansi(false) +/// .without_time() +/// .init(); +/// +/// // Now when the rest of the test runs, you will get `tracing` output for every +/// // operation that the circuit performs under the hood! +/// } +/// ``` +#[derive(Debug)] +pub struct TracingFloorPlanner { + _phantom: PhantomData

, +} + +impl FloorPlanner for TracingFloorPlanner

{ + fn synthesize + SyncDeps, C: Circuit>( + cs: &mut CS, + circuit: &C, + config: C::Config, + constants: Vec>, + ) -> Result<(), Error> { + P::synthesize( + &mut TracingAssignment::new(cs), + &TracingCircuit::borrowed(circuit), + config, + constants, + ) + } +} + +/// A helper type that augments a [`Circuit`] with [`tracing`] spans and events. +enum TracingCircuit<'c, F: Field, C: Circuit> { + Borrowed(&'c C, PhantomData), + Owned(C, PhantomData), +} + +impl<'c, F: Field, C: Circuit> TracingCircuit<'c, F, C> { + fn borrowed(circuit: &'c C) -> Self { + Self::Borrowed(circuit, PhantomData) + } + + fn owned(circuit: C) -> Self { + Self::Owned(circuit, PhantomData) + } + + fn inner_ref(&self) -> &C { + match self { + TracingCircuit::Borrowed(circuit, ..) => circuit, + TracingCircuit::Owned(circuit, ..) => circuit, + } + } +} + +impl<'c, F: Field, C: Circuit> Circuit for TracingCircuit<'c, F, C> { + type Config = C::Config; + type FloorPlanner = C::FloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + Self::owned(self.inner_ref().without_witnesses()) + } + + fn configure(meta: &mut ConstraintSystem) -> Self::Config { + let _span = debug_span!("configure").entered(); + C::configure(meta) + } + + fn synthesize(&self, config: Self::Config, layouter: impl Layouter) -> Result<(), Error> { + let _span = debug_span!("synthesize").entered(); + self.inner_ref() + .synthesize(config, TracingLayouter::new(layouter)) + } +} + +/// A helper type that augments a [`Layouter`] with [`tracing`] spans and events. +struct TracingLayouter> { + layouter: L, + namespace_spans: Vec, + _phantom: PhantomData, +} + +impl> TracingLayouter { + fn new(layouter: L) -> Self { + Self { + layouter, + namespace_spans: vec![], + _phantom: PhantomData, + } + } +} + +impl> Layouter for TracingLayouter { + type Root = Self; + + fn assign_region(&mut self, name: N, assignment: A) -> Result + where + A: FnOnce(Region<'_, F>) -> Result, + N: Fn() -> NR, + NR: Into, + { + let _span = debug_span!("region", name = name().into()).entered(); + self.layouter.assign_region(name, |region| { + let mut region = TracingRegion(region); + let region: &mut dyn RegionLayouter = &mut region; + assignment(region.into()) + }) + } + + fn assign_table(&mut self, name: N, assignment: A) -> Result<(), Error> + where + A: FnMut(Table<'_, F>) -> Result<(), Error>, + N: Fn() -> NR, + NR: Into, + { + let _span = debug_span!("table", name = name().into()).entered(); + self.layouter.assign_table(name, assignment) + } + + fn constrain_instance(&mut self, cell: Cell, column: Column, row: usize) { + self.layouter.constrain_instance(cell, column, row); + } + + fn next_phase(&mut self) { + self.layouter.next_phase(); + } + + fn get_challenge(&self, _: Challenge) -> Value { + Value::unknown() + } + + fn get_root(&mut self) -> &mut Self::Root { + self + } + + fn push_namespace(&mut self, name_fn: N) + where + NR: Into, + N: FnOnce() -> NR, + { + let name = name_fn().into(); + self.namespace_spans.push(debug_span!("ns", name).entered()); + self.layouter.push_namespace(|| name); + } + + fn pop_namespace(&mut self, gadget_name: Option) { + self.layouter.pop_namespace(gadget_name); + self.namespace_spans.pop(); + } +} + +fn debug_value_and_return_cell(value: AssignedCell) -> Cell { + if let Some(v) = value.value().into_option() { + debug!(target: "assigned", value = ?v); + } + value.cell() +} + +/// A helper type that augments a [`Region`] with [`tracing`] spans and events. +#[derive(Debug)] +struct TracingRegion<'r, F: Field>(Region<'r, F>); + +impl<'r, F: Field> RegionLayouter for TracingRegion<'r, F> { + fn enable_selector<'v>( + &'v mut self, + annotation: &'v (dyn Fn() -> String + 'v), + selector: &Selector, + offset: usize, + ) -> Result<(), Error> { + let _guard = debug_span!("enable_selector", name = annotation(), offset = offset).entered(); + debug!(target: "layouter", "Entered"); + self.0.enable_selector(annotation, selector, offset) + } + + fn name_column<'v>( + &'v mut self, + _: &'v (dyn std::ops::Fn() -> std::string::String + 'v), + _: Column, + ) { + } + + fn assign_advice<'v>( + &mut self, + // annotation: &'v (dyn Fn() -> String + 'v), + column: Column, + offset: usize, + to: Value>, // &'v mut (dyn FnMut() -> Value> + 'v), + ) -> AssignedCell<&'v Assigned, F> { + let _guard = + debug_span!("assign_advice", /*name = annotation(), */column = ?column, offset = offset) + .entered(); + debug!(target: "layouter", "Entered"); + self.0.assign_advice(/*annotation, */ column, offset, to) + // .map(debug_value_and_return_cell) + } + + fn assign_advice_from_constant<'v>( + &'v mut self, + annotation: &'v (dyn Fn() -> String + 'v), + column: Column, + offset: usize, + constant: Assigned, + ) -> Result { + let _guard = debug_span!("assign_advice_from_constant", + name = annotation(), + column = ?column, + offset = offset, + constant = ?constant, + ) + .entered(); + debug!(target: "layouter", "Entered"); + self.0 + .assign_advice_from_constant(annotation, column, offset, constant) + .map(debug_value_and_return_cell) + } + + fn assign_advice_from_instance<'v>( + &mut self, + annotation: &'v (dyn Fn() -> String + 'v), + instance: Column, + row: usize, + advice: Column, + offset: usize, + ) -> Result<(Cell, Value), Error> { + let _guard = debug_span!("assign_advice_from_instance", + name = annotation(), + instance = ?instance, + row = row, + advice = ?advice, + offset = offset, + ) + .entered(); + debug!(target: "layouter", "Entered"); + self.0 + .assign_advice_from_instance(annotation, instance, row, advice, offset) + .map(|value| { + if let Some(v) = value.value().into_option() { + debug!(target: "assigned", value = ?v); + } + (value.cell(), value.value().cloned()) + }) + } + + fn instance_value( + &mut self, + instance: Column, + row: usize, + ) -> Result, Error> { + self.0.instance_value(instance, row) + } + + fn assign_fixed( + &mut self, + // annotation: &'v (dyn Fn() -> String + 'v), + column: Column, + offset: usize, + to: Assigned, + ) -> Cell { + let _guard = + debug_span!("assign_fixed", /*name = annotation(), */column = ?column, offset = offset) + .entered(); + debug!(target: "layouter", "Entered"); + self.0.assign_fixed(/*annotation, */ column, offset, to) + // .map(debug_value_and_return_cell) + } + + fn constrain_constant(&mut self, cell: Cell, constant: Assigned) -> Result<(), Error> { + debug!(target: "constrain_constant", cell = ?cell, constant = ?constant); + self.0.constrain_constant(cell, constant) + } + + fn constrain_equal(&mut self, left: Cell, right: Cell) { + debug!(target: "constrain_equal", left = ?left, right = ?right); + self.0.constrain_equal(left, right); + } + + fn get_challenge(&self, challenge: Challenge) -> Value { + self.0.get_challenge(challenge) + } + + fn next_phase(&mut self) { + self.0.next_phase(); + } +} + +/// A helper type that augments an [`Assignment`] with [`tracing`] spans and events. +struct TracingAssignment<'cs, F: Field, CS: Assignment> { + cs: &'cs mut CS, + in_region: bool, + _phantom: PhantomData, +} + +impl<'cs, F: Field, CS: Assignment> TracingAssignment<'cs, F, CS> { + fn new(cs: &'cs mut CS) -> Self { + Self { + cs, + in_region: false, + _phantom: PhantomData, + } + } +} + +impl<'cs, F: Field, CS: Assignment> Assignment for TracingAssignment<'cs, F, CS> { + fn enter_region(&mut self, name_fn: N) + where + NR: Into, + N: FnOnce() -> NR, + { + self.in_region = true; + self.cs.enter_region(name_fn); + } + + fn annotate_column(&mut self, _: A, _: Column) + where + A: FnOnce() -> AR, + AR: Into, + { + } + + fn exit_region(&mut self) { + self.cs.exit_region(); + self.in_region = false; + } + + fn enable_selector( + &mut self, + annotation: A, + selector: &Selector, + row: usize, + ) -> Result<(), Error> + where + A: FnOnce() -> AR, + AR: Into, + { + let annotation = annotation().into(); + if self.in_region { + debug!(target: "position", row = row); + } else { + debug!(target: "enable_selector", name = annotation, row = row); + } + self.cs.enable_selector(|| annotation, selector, row) + } + + fn query_instance(&self, column: Column, row: usize) -> Result, Error> { + let _guard = debug_span!("positioned").entered(); + debug!(target: "query_instance", column = ?column, row = row); + self.cs.query_instance(column, row) + } + + fn assign_advice<'v>( + &mut self, + column: Column, + row: usize, + to: Value>, + ) -> Value<&'v Assigned> { + // let annotation = annotation().into(); + if self.in_region { + debug!(target: "position", row = row); + } else { + debug!(target: "assign_advice", /*name = annotation, */ column = ?column, row = row); + } + self.cs.assign_advice(column, row, to) + } + + fn assign_fixed(&mut self, column: Column, row: usize, to: Assigned) { + // let annotation = annotation().into(); + if self.in_region { + debug!(target: "position", row = row); + } else { + debug!(target: "assign_fixed", /*name = annotation, */column = ?column, row = row); + } + self.cs.assign_fixed(column, row, to); + } + + fn copy( + &mut self, + left_column: Column, + left_row: usize, + right_column: Column, + right_row: usize, + ) { + let _guard = debug_span!("positioned").entered(); + debug!( + target: "copy", + left_column = ?left_column, + left_row = left_row, + right_column = ?right_column, + right_row = right_row, + ); + self.cs.copy(left_column, left_row, right_column, right_row); + } + + fn fill_from_row( + &mut self, + column: Column, + row: usize, + to: Value>, + ) -> Result<(), Error> { + let _guard = debug_span!("positioned").entered(); + debug!(target: "fill_from_row", column = ?column, row = row); + self.cs.fill_from_row(column, row, to) + } + + fn get_challenge(&self, _: Challenge) -> Value { + Value::unknown() + } + + fn push_namespace(&mut self, name_fn: N) + where + NR: Into, + N: FnOnce() -> NR, + { + // We enter namespace spans in TracingLayouter. + self.cs.push_namespace(name_fn) + } + + fn pop_namespace(&mut self, gadget_name: Option) { + self.cs.pop_namespace(gadget_name); + // We exit namespace spans in TracingLayouter. + } +} diff --git a/halo2_proofs/src/dev/util.rs b/halo2_proofs/src/dev/util.rs index f09695a4a9..28489acf91 100644 --- a/halo2_proofs/src/dev/util.rs +++ b/halo2_proofs/src/dev/util.rs @@ -1,7 +1,7 @@ use group::ff::Field; use std::collections::BTreeMap; -use super::{metadata, CellValue, Value}; +use super::{metadata, CellValue, InstanceValue, Value}; use crate::{ plonk::{ Advice, AdviceQuery, Any, Column, ColumnType, Expression, FixedQuery, Gate, InstanceQuery, @@ -88,12 +88,13 @@ pub(super) fn load_instance<'a, F: Field, T: ColumnType, Q: Into + Cop n: i32, row: i32, queries: &'a [(Column, Rotation)], - cells: &'a [Vec], + cells: &'a [Vec>], ) -> impl Fn(Q) -> Value + 'a { move |query| { let (column, at) = &queries[query.into().index.unwrap()]; let resolved_row = (row + at.0) % n; - Value::Real(cells[column.index()][resolved_row as usize]) + let cell = &cells[column.index()][resolved_row as usize]; + Value::Real(cell.value()) } } diff --git a/halo2_proofs/src/helpers.rs b/halo2_proofs/src/helpers.rs index 6b2019be9c..a918190a98 100644 --- a/halo2_proofs/src/helpers.rs +++ b/halo2_proofs/src/helpers.rs @@ -1,7 +1,6 @@ use crate::poly::Polynomial; use ff::PrimeField; use halo2curves::{serde::SerdeObject, CurveAffine}; -use pairing::Engine; use std::io; /// This enum specifies how various types are serialized and deserialized. diff --git a/halo2_proofs/src/lib.rs b/halo2_proofs/src/lib.rs index 03848c59d0..bd1d38d636 100644 --- a/halo2_proofs/src/lib.rs +++ b/halo2_proofs/src/lib.rs @@ -1,29 +1,12 @@ //! # halo2_proofs #![cfg_attr(docsrs, feature(doc_cfg))] -// Build without warnings on stable 1.51 and later. -#![allow(unknown_lints)] -// Disable old lint warnings until our MSRV is at least 1.51. -#![allow(renamed_and_removed_lints)] -// Use the old lint name to build without warnings until our MSRV is at least 1.51. -#![allow(clippy::unknown_clippy_lints)] // The actual lints we want to disable. -#![allow( - clippy::op_ref, - clippy::assign_op_pattern, - clippy::too_many_arguments, - clippy::suspicious_arithmetic_impl, - clippy::many_single_char_names, - clippy::same_item_push, - clippy::upper_case_acronyms -)] -#![deny(broken_intra_doc_links)] +#![allow(clippy::op_ref, clippy::many_single_char_names)] +#![deny(rustdoc::broken_intra_doc_links)] #![deny(missing_debug_implementations)] // #![deny(missing_docs)] // #![deny(unsafe_code)] -// Remove this once we update pasta_curves -#![allow(unused_imports)] -#![allow(clippy::derive_partial_eq_without_eq)] #![feature(associated_type_defaults)] pub mod arithmetic; diff --git a/halo2_proofs/src/multicore.rs b/halo2_proofs/src/multicore.rs index a22eac9e79..fa11d7e627 100644 --- a/halo2_proofs/src/multicore.rs +++ b/halo2_proofs/src/multicore.rs @@ -1,5 +1,70 @@ -//! An interface for dealing with the kinds of parallel computations involved in -//! `halo2`. It's currently just a (very!) thin wrapper around [`rayon`] but may -//! be extended in the future to allow for various parallelism strategies. +#[cfg(all( + feature = "multicore", + target_arch = "wasm32", + not(target_feature = "atomics") +))] +compile_error!( + "The multicore feature flag is not supported on wasm32 architectures without atomics" +); -pub use rayon::{current_num_threads, scope, Scope}; +pub use maybe_rayon::{ + iter::{IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator}, + join, scope, Scope, +}; + +#[cfg(feature = "multicore")] +pub use maybe_rayon::{ + current_num_threads, + iter::{IndexedParallelIterator, IntoParallelRefIterator}, + slice::{ParallelSlice, ParallelSliceMut}, +}; + +#[cfg(not(feature = "multicore"))] +pub fn current_num_threads() -> usize { + 1 +} + +pub trait TryFoldAndReduce { + /// Implements `iter.try_fold().try_reduce()` for `rayon::iter::ParallelIterator`, + /// falling back on `Iterator::try_fold` when the `multicore` feature flag is + /// disabled. + /// The `try_fold_and_reduce` function can only be called by a iter with + /// `Result` item type because the `fold_op` must meet the trait + /// bounds of both `try_fold` and `try_reduce` from rayon. + fn try_fold_and_reduce( + self, + identity: impl Fn() -> T + Send + Sync, + fold_op: impl Fn(T, Result) -> Result + Send + Sync, + ) -> Result; +} + +#[cfg(feature = "multicore")] +impl TryFoldAndReduce for I +where + T: Send + Sync, + E: Send + Sync, + I: maybe_rayon::iter::ParallelIterator>, +{ + fn try_fold_and_reduce( + self, + identity: impl Fn() -> T + Send + Sync, + fold_op: impl Fn(T, Result) -> Result + Send + Sync, + ) -> Result { + self.try_fold(&identity, &fold_op) + .try_reduce(&identity, |a, b| fold_op(a, Ok(b))) + } +} + +#[cfg(not(feature = "multicore"))] +impl TryFoldAndReduce for I +where + I: std::iter::Iterator>, +{ + fn try_fold_and_reduce( + mut self, + identity: impl Fn() -> T + Send + Sync, + fold_op: impl Fn(T, Result) -> Result + Send + Sync, + ) -> Result { + self.try_fold(identity(), fold_op) + } +} diff --git a/halo2_proofs/src/plonk.rs b/halo2_proofs/src/plonk.rs index f3a03f1cd4..7c25115b8e 100644 --- a/halo2_proofs/src/plonk.rs +++ b/halo2_proofs/src/plonk.rs @@ -14,8 +14,8 @@ use crate::helpers::{ SerdePrimeField, }; use crate::poly::{ - commitment::Params, Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, - PinnedEvaluationDomain, Polynomial, + Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, PinnedEvaluationDomain, + Polynomial, }; use crate::transcript::{ChallengeScalar, EncodedChallenge, Transcript}; use crate::SerdeFormat; @@ -41,7 +41,7 @@ pub use prover::*; pub use verifier::*; use evaluation::Evaluator; -use std::env::var; + use std::io; /// This is a verifying key which allows for the verification of proofs for a @@ -259,6 +259,11 @@ impl VerifyingKey { pub fn cs(&self) -> &ConstraintSystem { &self.cs } + + /// Returns representative of this `VerifyingKey` in transcripts + pub fn transcript_repr(&self) -> C::Scalar { + self.transcript_repr + } } /// Minimal representation of a verification key that can be used to identify diff --git a/halo2_proofs/src/plonk/assigned.rs b/halo2_proofs/src/plonk/assigned.rs index 919ddcdfb5..07de325678 100644 --- a/halo2_proofs/src/plonk/assigned.rs +++ b/halo2_proofs/src/plonk/assigned.rs @@ -446,7 +446,6 @@ mod tests { mod proptests { use std::{ cmp, - convert::TryFrom, ops::{Add, Mul, Neg, Sub}, }; diff --git a/halo2_proofs/src/plonk/circuit.rs b/halo2_proofs/src/plonk/circuit.rs index d2da999567..1bd3cb1fd6 100644 --- a/halo2_proofs/src/plonk/circuit.rs +++ b/halo2_proofs/src/plonk/circuit.rs @@ -9,10 +9,9 @@ use core::cmp::max; use core::ops::{Add, Mul}; use ff::Field; use sealed::SealedPhase; -use std::cmp::Ordering; use std::collections::HashMap; use std::env::var; -use std::fmt::{Debug, Formatter}; +use std::fmt::Debug; use std::{ convert::TryFrom, ops::{Neg, Sub}, @@ -96,7 +95,6 @@ impl PartialOrd for Column { } pub(crate) mod sealed { - use std::ops::Add; /// Phase of advice column #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] @@ -575,7 +573,7 @@ impl InstanceQuery { /// they cannot simultaneously be used as general fixed columns. /// /// [`Layouter::assign_table`]: crate::circuit::Layouter::assign_table -#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Ord, PartialOrd)] pub struct TableColumn { /// The fixed column that this table column is stored in. /// @@ -871,6 +869,7 @@ impl Expression { /// Evaluate the polynomial using the provided closures to perform the /// operations. + #[allow(clippy::too_many_arguments)] pub fn evaluate( &self, constant: &impl Fn(F) -> T, @@ -980,6 +979,7 @@ impl Expression { /// Evaluate the polynomial lazily using the provided closures to perform the /// operations. + #[allow(clippy::too_many_arguments)] pub fn evaluate_lazy( &self, constant: &impl Fn(F) -> T, diff --git a/halo2_proofs/src/plonk/error.rs b/halo2_proofs/src/plonk/error.rs index 33fdae9083..d4a7e11c14 100644 --- a/halo2_proofs/src/plonk/error.rs +++ b/halo2_proofs/src/plonk/error.rs @@ -1,8 +1,8 @@ -use std::cmp; use std::error; use std::fmt; use std::io; +use super::TableColumn; use super::{Any, Column}; /// This is an error that could occur during proving or circuit synthesis. @@ -37,6 +37,8 @@ pub enum Error { /// The instance sets up a copy constraint involving a column that has not been /// included in the permutation. ColumnNotInPermutation(Column), + /// An error relating to a lookup table. + TableError(TableError), } impl From for Error { @@ -79,6 +81,7 @@ impl fmt::Display for Error { "Column {:?} must be included in the permutation. Help: try applying `meta.enable_equalty` on the column", column ), + Error::TableError(error) => write!(f, "{}", error) } } } @@ -91,3 +94,45 @@ impl error::Error for Error { } } } + +/// This is an error that could occur during table synthesis. +#[derive(Debug)] +pub enum TableError { + /// A `TableColumn` has not been assigned. + ColumnNotAssigned(TableColumn), + /// A Table has columns of uneven lengths. + UnevenColumnLengths((TableColumn, usize), (TableColumn, usize)), + /// Attempt to assign a used `TableColumn` + UsedColumn(TableColumn), + /// Attempt to overwrite a default value + OverwriteDefault(TableColumn, String, String), +} + +impl fmt::Display for TableError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TableError::ColumnNotAssigned(col) => { + write!( + f, + "{:?} not fully assigned. Help: assign a value at offset 0.", + col + ) + } + TableError::UnevenColumnLengths((col, col_len), (table, table_len)) => write!( + f, + "{:?} has length {} while {:?} has length {}", + col, col_len, table, table_len + ), + TableError::UsedColumn(col) => { + write!(f, "{:?} has already been used", col) + } + TableError::OverwriteDefault(col, default, val) => { + write!( + f, + "Attempted to overwrite default value {} with {} in {:?}", + default, val, col + ) + } + } + } +} diff --git a/halo2_proofs/src/plonk/evaluation.rs b/halo2_proofs/src/plonk/evaluation.rs index f1bee28b3a..c5a4977be4 100644 --- a/halo2_proofs/src/plonk/evaluation.rs +++ b/halo2_proofs/src/plonk/evaluation.rs @@ -1,30 +1,11 @@ use crate::multicore; -use crate::plonk::lookup::prover::Committed; -use crate::plonk::permutation::Argument; -use crate::plonk::{lookup, permutation, AdviceQuery, Any, FixedQuery, InstanceQuery, ProvingKey}; +use crate::plonk::{lookup, permutation, Any, ProvingKey}; use crate::poly::Basis; use crate::{ - arithmetic::{eval_polynomial, parallelize, CurveAffine}, - poly::{ - commitment::Params, Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, - Polynomial, ProverQuery, Rotation, - }, - transcript::{EncodedChallenge, TranscriptWrite}, -}; -use group::prime::PrimeCurve; -use group::{ - ff::{BatchInvert, Field, PrimeField, WithSmallOrderMulGroup}, - Curve, -}; -use std::any::TypeId; -use std::convert::TryInto; -use std::num::ParseIntError; -use std::slice; -use std::{ - collections::BTreeMap, - iter, - ops::{Index, Mul, MulAssign}, + arithmetic::{parallelize, CurveAffine}, + poly::{Coeff, ExtendedLagrangeCoeff, Polynomial, Rotation}, }; +use group::ff::{Field, PrimeField, WithSmallOrderMulGroup}; use super::{shuffle, ConstraintSystem, Expression}; @@ -68,6 +49,7 @@ impl Default for ValueSource { impl ValueSource { /// Get the value for this source + #[allow(clippy::too_many_arguments)] pub fn get( &self, rotations: &[usize], @@ -128,6 +110,7 @@ pub enum Calculation { impl Calculation { /// Get the resulting value of this calculation + #[allow(clippy::too_many_arguments)] pub fn evaluate( &self, rotations: &[usize], @@ -312,6 +295,7 @@ impl Evaluator { } /// Evaluate h poly + #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn evaluate_h( &self, pk: &ProvingKey, @@ -792,6 +776,7 @@ impl GraphEvaluator { } } + #[allow(clippy::too_many_arguments)] pub fn evaluate( &self, data: &mut EvaluationData, diff --git a/halo2_proofs/src/plonk/keygen.rs b/halo2_proofs/src/plonk/keygen.rs index 5ae0007469..f455b4ac0b 100644 --- a/halo2_proofs/src/plonk/keygen.rs +++ b/halo2_proofs/src/plonk/keygen.rs @@ -11,15 +11,14 @@ use super::{ Selector, }, evaluation::Evaluator, - permutation, Assigned, Challenge, Error, Expression, LagrangeCoeff, Polynomial, ProvingKey, - VerifyingKey, + permutation, Assigned, Challenge, Error, LagrangeCoeff, Polynomial, ProvingKey, VerifyingKey, }; use crate::{ arithmetic::{parallelize, CurveAffine}, - circuit::{layouter::SyncDeps, Value}, + circuit::Value, poly::{ batch_invert_assigned, - commitment::{Blind, Params, MSM}, + commitment::{Blind, Params}, EvaluationDomain, }, }; @@ -61,8 +60,6 @@ struct Assembly { _marker: std::marker::PhantomData, } -impl SyncDeps for Assembly {} - impl Assignment for Assembly { fn enter_region(&mut self, _: N) where diff --git a/halo2_proofs/src/plonk/lookup/prover.rs b/halo2_proofs/src/plonk/lookup/prover.rs index ce1fa735bc..7e50ff1c30 100644 --- a/halo2_proofs/src/plonk/lookup/prover.rs +++ b/halo2_proofs/src/plonk/lookup/prover.rs @@ -3,13 +3,18 @@ use super::super::{ ProvingKey, }; use super::Argument; +use crate::multicore::{self, IntoParallelIterator}; +#[cfg(feature = "multicore")] +use crate::multicore::{ + IndexedParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator, ParallelIterator, + ParallelSliceMut, +}; use crate::plonk::evaluation::evaluate; use crate::{ arithmetic::{eval_polynomial, parallelize, CurveAffine}, poly::{ commitment::{Blind, Params}, - Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, ProverQuery, - Rotation, + Coeff, EvaluationDomain, LagrangeCoeff, Polynomial, ProverQuery, Rotation, }, transcript::{EncodedChallenge, TranscriptWrite}, }; @@ -21,11 +26,9 @@ use group::{ Curve, }; use rand_core::RngCore; -use rayon::prelude::*; -use rustc_hash::FxHashMap; + use std::collections::HashMap; use std::hash::Hash; -use std::{any::TypeId, convert::TryInto, num::ParseIntError, ops::Index}; use std::{ collections::BTreeMap, iter, @@ -68,6 +71,7 @@ impl> Argument { /// - constructs Permuted struct using permuted_input_value = A', and /// permuted_table_expression = S'. /// The Permuted struct is used to update the Lookup, and is then returned. + #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn commit_permuted< 'a, 'params: 'a, @@ -407,7 +411,7 @@ fn permute_expression_pair<'params, C: CurveAffine, P: Params<'params, C>, R: Rn where C::Scalar: Hash, { - let num_threads = rayon::current_num_threads(); + let num_threads = multicore::current_num_threads(); // heuristic on when multi-threading isn't worth it // for now it seems like multi-threading is often worth it /*if params.n() < (num_threads as u64) << 10 { @@ -429,6 +433,8 @@ where let input_time = start_timer!(|| "permute_par input hashmap (cpu par)"); // count input_expression unique values using a HashMap, using rayon parallel fold+reduce let capacity = usable_rows / num_threads + 1; + + #[cfg(feature = "multicore")] let input_uniques: HashMap = input_expression .par_iter() .fold( @@ -445,11 +451,21 @@ where m1 }) .unwrap(); + #[cfg(not(feature = "multicore"))] + let input_uniques: HashMap = + input_expression + .iter() + .fold(HashMap::with_capacity(capacity), |mut acc, coeff| { + *acc.entry(*coeff).or_insert(0) += 1; + acc + }); #[cfg(feature = "profile")] end_timer!(input_time); #[cfg(feature = "profile")] let timer = start_timer!(|| "permute_par input unique ranges (cpu par)"); + + #[cfg(feature = "multicore")] let input_unique_ranges = input_uniques .par_iter() .fold( @@ -473,6 +489,19 @@ where [r1, r2].concat() }) .unwrap(); + #[cfg(not(feature = "multicore"))] + let input_unique_ranges = input_uniques.iter().fold( + Vec::with_capacity(capacity), + |mut input_ranges, (&coeff, &count)| { + if input_ranges.is_empty() { + input_ranges.push((coeff, 0..count)); + } else { + let prev_end = input_ranges.last().unwrap().1.end; + input_ranges.push((coeff, prev_end..prev_end + count)); + } + input_ranges + }, + ); #[cfg(feature = "profile")] end_timer!(timer); @@ -483,14 +512,19 @@ where end_timer!(to_vec_time); #[cfg(feature = "profile")] let sort_table_time = start_timer!(|| "permute_par sort table"); + #[cfg(feature = "multicore")] sorted_table_coeffs.par_sort(); + #[cfg(not(feature = "multicore"))] + sorted_table_coeffs.sort(); #[cfg(feature = "profile")] end_timer!(sort_table_time); #[cfg(feature = "profile")] let timer = start_timer!(|| "leftover table coeffs (cpu par)"); + let leftover_table_coeffs: Vec = sorted_table_coeffs - .par_iter() + .as_slice() + .into_par_iter() .enumerate() .filter_map(|(i, coeff)| { ((i != 0 && coeff == &sorted_table_coeffs[i - 1]) || !input_uniques.contains_key(coeff)) @@ -510,7 +544,7 @@ where let leftover_range_end = range.end - i - 1; [(coeff, coeff)].into_par_iter().chain( leftover_table_coeffs[leftover_range_start..leftover_range_end] - .par_iter() + .into_par_iter() .map(move |leftover_table_coeff| (coeff, *leftover_table_coeff)), ) }) @@ -546,7 +580,10 @@ fn permute_expression_pair_seq<'params, C: CurveAffine, P: Params<'params, C>, R permuted_input_expression.truncate(usable_rows); // Sort input lookup expression values + #[cfg(feature = "multicore")] permuted_input_expression.par_sort(); + #[cfg(not(feature = "multicore"))] + permuted_input_expression.sort(); // A BTreeMap of each unique element in the table expression and its count let mut leftover_table_map: BTreeMap = table_expression diff --git a/halo2_proofs/src/plonk/lookup/verifier.rs b/halo2_proofs/src/plonk/lookup/verifier.rs index 1dc111f2cc..bbc86c8e9d 100644 --- a/halo2_proofs/src/plonk/lookup/verifier.rs +++ b/halo2_proofs/src/plonk/lookup/verifier.rs @@ -90,6 +90,7 @@ impl Committed { } impl Evaluated { + #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn expressions<'a>( &'a self, l_0: C::Scalar, @@ -141,7 +142,7 @@ impl Evaluated { std::iter::empty() .chain( - // l_0(X) * (1 - z'(X)) = 0 + // l_0(X) * (1 - z(X)) = 0 Some(l_0 * &(C::Scalar::ONE - &self.product_eval)), ) .chain( diff --git a/halo2_proofs/src/plonk/permutation.rs b/halo2_proofs/src/plonk/permutation.rs index 0eba912093..625d60ef85 100644 --- a/halo2_proofs/src/plonk/permutation.rs +++ b/halo2_proofs/src/plonk/permutation.rs @@ -10,7 +10,6 @@ use crate::{ poly::{Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial}, SerdeFormat, }; -use ff::PrimeField; pub(crate) mod keygen; pub(crate) mod prover; diff --git a/halo2_proofs/src/plonk/permutation/keygen.rs b/halo2_proofs/src/plonk/permutation/keygen.rs index e694fc81b7..94fa4518db 100644 --- a/halo2_proofs/src/plonk/permutation/keygen.rs +++ b/halo2_proofs/src/plonk/permutation/keygen.rs @@ -1,22 +1,22 @@ -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; - use ff::{Field, PrimeField}; use group::Curve; -use rayon::prelude::{ - IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, - IntoParallelRefMutIterator, ParallelIterator, ParallelSliceMut, -}; use super::{Argument, ProvingKey, VerifyingKey}; use crate::{ arithmetic::{parallelize, CurveAffine}, plonk::{Any, Column, Error}, poly::{ - commitment::{Blind, CommitmentScheme, Params}, + commitment::{Blind, Params}, EvaluationDomain, }, }; +#[cfg(feature = "multicore")] +use crate::multicore::{IndexedParallelIterator, ParallelIterator}; + +#[cfg(feature = "thread-safe-region")] +use std::collections::{BTreeSet, HashMap}; + #[cfg(not(feature = "thread-safe-region"))] /// Struct that accumulates all the necessary data in order to construct the permutation argument. #[derive(Clone, Debug, PartialEq, Eq)] @@ -133,12 +133,21 @@ impl Assembly { &self.columns } + #[cfg(feature = "multicore")] /// Returns mappings of the copies. pub fn mapping( &self, ) -> impl Iterator + '_> { + use crate::multicore::IntoParallelRefIterator; + self.mapping.iter().map(|c| c.par_iter().copied()) } + + #[cfg(not(feature = "multicore"))] + /// Returns mappings of the copies. + pub fn mapping(&self) -> impl Iterator + '_> { + self.mapping.iter().map(|c| c.iter().copied()) + } } #[cfg(feature = "thread-safe-region")] @@ -239,6 +248,8 @@ impl Assembly { /// Builds the ordered mapping of the cycles. /// This will only get executed once. pub fn build_ordered_mapping(&mut self) { + use crate::multicore::IntoParallelRefMutIterator; + // will only get called once if self.ordered_cycles.is_empty() && !self.cycles.is_empty() { self.ordered_cycles = self @@ -304,16 +315,25 @@ impl Assembly { &self.columns } + #[cfg(feature = "multicore")] /// Returns mappings of the copies. pub fn mapping( &self, ) -> impl Iterator + '_> { + use crate::multicore::IntoParallelIterator; + (0..self.num_cols).map(move |i| { (0..self.col_len) .into_par_iter() .map(move |j| self.mapping_at_idx(i, j)) }) } + + #[cfg(not(feature = "multicore"))] + /// Returns mappings of the copies. + pub fn mapping(&self) -> impl Iterator + '_> { + (0..self.num_cols).map(move |i| (0..self.col_len).map(move |j| self.mapping_at_idx(i, j))) + } } pub(crate) fn build_pk<'params, C: CurveAffine, P: Params<'params, C>>( diff --git a/halo2_proofs/src/plonk/permutation/prover.rs b/halo2_proofs/src/plonk/permutation/prover.rs index e0ea709575..3565723399 100644 --- a/halo2_proofs/src/plonk/permutation/prover.rs +++ b/halo2_proofs/src/plonk/permutation/prover.rs @@ -4,9 +4,6 @@ use group::{ Curve, }; use rand_core::RngCore; -use rayon::prelude::{ - IndexedParallelIterator, IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator, -}; use std::iter::{self, ExactSizeIterator}; use super::super::{circuit::Any, ChallengeBeta, ChallengeGamma, ChallengeX}; @@ -15,7 +12,6 @@ use crate::{ arithmetic::{eval_polynomial, parallelize, CurveAffine}, plonk::{self, Error}, poly::{ - self, commitment::{Blind, Params}, Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, ProverQuery, Rotation, }, @@ -46,6 +42,7 @@ pub(crate) struct Evaluated { } impl Argument { + #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn commit< 'params, C: CurveAffine, diff --git a/halo2_proofs/src/plonk/permutation/verifier.rs b/halo2_proofs/src/plonk/permutation/verifier.rs index 88fed41a18..a4637422ae 100644 --- a/halo2_proofs/src/plonk/permutation/verifier.rs +++ b/halo2_proofs/src/plonk/permutation/verifier.rs @@ -99,6 +99,7 @@ impl Committed { } impl Evaluated { + #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn expressions<'a>( &'a self, vk: &'a plonk::VerifyingKey, diff --git a/halo2_proofs/src/plonk/prover.rs b/halo2_proofs/src/plonk/prover.rs index b7c9652bde..0329e289c7 100644 --- a/halo2_proofs/src/plonk/prover.rs +++ b/halo2_proofs/src/plonk/prover.rs @@ -1,43 +1,38 @@ #[cfg(feature = "profile")] use ark_std::{end_timer, start_timer}; -use ff::{Field, FromUniformBytes, PrimeField, WithSmallOrderMulGroup}; +use ff::{Field, WithSmallOrderMulGroup}; use group::Curve; -use halo2curves::CurveExt; use rand_core::RngCore; -use std::collections::BTreeSet; -use std::env::var; + use std::hash::Hash; use std::marker::PhantomData; use std::ops::RangeTo; -use std::rc::Rc; -use std::sync::atomic::AtomicUsize; -use std::{collections::HashMap, iter, mem, sync::atomic::Ordering}; + +use std::{collections::HashMap, iter}; use super::{ circuit::{ - sealed::{self, SealedPhase}, - Advice, Any, Assignment, Challenge, Circuit, Column, ConstraintSystem, FirstPhase, Fixed, - FloorPlanner, Instance, Selector, + sealed::{self}, + Advice, Any, Assignment, Challenge, Circuit, Column, ConstraintSystem, Fixed, FloorPlanner, + Instance, Selector, }, lookup, permutation, shuffle, vanishing, ChallengeBeta, ChallengeGamma, ChallengeTheta, - ChallengeX, ChallengeY, Error, Expression, ProvingKey, + ChallengeX, ChallengeY, Error, ProvingKey, }; -use crate::circuit::layouter::SyncDeps; + use crate::{ arithmetic::{eval_polynomial, CurveAffine}, circuit::Value, plonk::Assigned, poly::{ - self, commitment::{Blind, CommitmentScheme, Params, Prover}, - Basis, Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, ProverQuery, + Basis, Coeff, LagrangeCoeff, Polynomial, ProverQuery, }, }; use crate::{ poly::batch_invert_assigned, transcript::{EncodedChallenge, TranscriptWrite}, }; -use group::prime::PrimeCurveAffine; /// This creates a proof for the provided `circuit` when given the public /// parameters `params` and the proving key [`ProvingKey`] that was @@ -63,6 +58,10 @@ pub fn create_proof< where Scheme::Scalar: Hash + WithSmallOrderMulGroup<3>, { + if circuits.len() != instances.len() { + return Err(Error::InvalidInstances); + } + for instance in instances.iter() { if instance.len() != pk.vk.cs.num_instance_columns { return Err(Error::InvalidInstances); @@ -152,19 +151,6 @@ where _marker: PhantomData<(P, E)>, } - impl<'params, 'a, 'b, F, Scheme, P, C, E, R, T> SyncDeps - for WitnessCollection<'params, 'a, 'b, Scheme, P, C, E, R, T> - where - F: Field, - Scheme: CommitmentScheme, - P: Prover<'params, Scheme>, - C: CurveAffine, - E: EncodedChallenge, - R: RngCore, - T: TranscriptWrite, - { - } - impl<'params, 'a, 'b, F, Scheme, P, C, E, R, T> Assignment for WitnessCollection<'params, 'a, 'b, Scheme, P, C, E, R, T> where @@ -659,7 +645,7 @@ where let eval_time = start_timer!(|| "Commit to vanishing argument's h(X) commitments"); let x: ChallengeX<_> = transcript.squeeze_challenge_scalar(); - let xn = x.pow([params.n(), 0, 0, 0]); + let xn = x.pow([params.n()]); if P::QUERY_INSTANCE { // Compute and hash instance evals for each circuit instance @@ -813,3 +799,69 @@ where end_timer!(multiopen_time); multiopen_res } + +#[test] +fn test_create_proof() { + use crate::{ + circuit::SimpleFloorPlanner, + plonk::{keygen_pk, keygen_vk}, + poly::kzg::{ + commitment::{KZGCommitmentScheme, ParamsKZG}, + multiopen::ProverSHPLONK, + }, + transcript::{Blake2bWrite, Challenge255, TranscriptWriterBuffer}, + }; + use halo2curves::bn256::Bn256; + use rand_core::OsRng; + + #[derive(Clone, Copy)] + struct MyCircuit; + + impl Circuit for MyCircuit { + type Config = (); + type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + *self + } + + fn configure(_meta: &mut ConstraintSystem) -> Self::Config {} + + fn synthesize( + &self, + _config: Self::Config, + _layouter: impl crate::circuit::Layouter, + ) -> Result<(), Error> { + Ok(()) + } + } + + let params: ParamsKZG = ParamsKZG::setup(3, OsRng); + let vk = keygen_vk(¶ms, &MyCircuit).expect("keygen_vk should not fail"); + let pk = keygen_pk(¶ms, vk, &MyCircuit).expect("keygen_pk should not fail"); + let mut transcript = Blake2bWrite::<_, _, Challenge255<_>>::init(vec![]); + + // Create proof with wrong number of instances + let proof = create_proof::, ProverSHPLONK<_>, _, _, _, _>( + ¶ms, + &pk, + &[MyCircuit, MyCircuit], + &[], + OsRng, + &mut transcript, + ); + assert!(matches!(proof.unwrap_err(), Error::InvalidInstances)); + + // Create proof with correct number of instances + create_proof::, ProverSHPLONK<_>, _, _, _, _>( + ¶ms, + &pk, + &[MyCircuit, MyCircuit], + &[&[], &[]], + OsRng, + &mut transcript, + ) + .expect("proof generation should not fail"); +} diff --git a/halo2_proofs/src/plonk/shuffle/prover.rs b/halo2_proofs/src/plonk/shuffle/prover.rs index ef3a459f22..fd30436a47 100644 --- a/halo2_proofs/src/plonk/shuffle/prover.rs +++ b/halo2_proofs/src/plonk/shuffle/prover.rs @@ -1,6 +1,5 @@ use super::super::{ - circuit::Expression, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, Error, - ProvingKey, + circuit::Expression, ChallengeGamma, ChallengeTheta, ChallengeX, Error, ProvingKey, }; use super::Argument; use crate::plonk::evaluation::evaluate; @@ -8,20 +7,14 @@ use crate::{ arithmetic::{eval_polynomial, parallelize, CurveAffine}, poly::{ commitment::{Blind, Params}, - Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, ProverQuery, - Rotation, + Coeff, EvaluationDomain, LagrangeCoeff, Polynomial, ProverQuery, Rotation, }, transcript::{EncodedChallenge, TranscriptWrite}, }; use ff::WithSmallOrderMulGroup; -use group::{ - ff::{BatchInvert, Field}, - Curve, -}; +use group::{ff::BatchInvert, Curve}; use rand_core::RngCore; -use std::{any::TypeId, convert::TryInto, num::ParseIntError, ops::Index}; use std::{ - collections::BTreeMap, iter, ops::{Mul, MulAssign}, }; @@ -47,6 +40,7 @@ impl> Argument { /// [S_0, S_1, ..., S_{m-1}], this method /// - constructs A_compressed = \theta^{m-1} A_0 + theta^{m-2} A_1 + ... + \theta A_{m-2} + A_{m-1} /// and S_compressed = \theta^{m-1} S_0 + theta^{m-2} S_1 + ... + \theta S_{m-2} + S_{m-1}, + #[allow(clippy::too_many_arguments)] fn compress<'a, 'params: 'a, C, P: Params<'params, C>>( &self, pk: &ProvingKey, @@ -99,6 +93,7 @@ impl> Argument { /// constructs the grand product polynomial over the shuffle. /// The grand product polynomial is used to populate the Product struct. /// The Product struct is added to the Shuffle and finally returned by the method. + #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn commit_product< 'a, 'params: 'a, diff --git a/halo2_proofs/src/plonk/shuffle/verifier.rs b/halo2_proofs/src/plonk/shuffle/verifier.rs index b02febb694..379cc5c8a1 100644 --- a/halo2_proofs/src/plonk/shuffle/verifier.rs +++ b/halo2_proofs/src/plonk/shuffle/verifier.rs @@ -52,6 +52,7 @@ impl Committed { } impl Evaluated { + #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn expressions<'a>( &'a self, l_0: C::Scalar, diff --git a/halo2_proofs/src/plonk/vanishing/prover.rs b/halo2_proofs/src/plonk/vanishing/prover.rs index f003d7cb4d..71c492213d 100644 --- a/halo2_proofs/src/plonk/vanishing/prover.rs +++ b/halo2_proofs/src/plonk/vanishing/prover.rs @@ -1,16 +1,14 @@ use std::iter; -use ff::{Field, PrimeField}; +use ff::Field; use group::Curve; -use rand_core::{RngCore, SeedableRng}; -use rayon::{current_num_threads, prelude::*}; +use rand_core::RngCore; use super::Argument; use crate::{ arithmetic::{eval_polynomial, parallelize, CurveAffine}, - plonk::{ChallengeX, ChallengeY, Error}, + plonk::{ChallengeX, Error}, poly::{ - self, commitment::{Blind, ParamsProver}, Coeff, EvaluationDomain, ExtendedLagrangeCoeff, Polynomial, ProverQuery, }, diff --git a/halo2_proofs/src/plonk/verifier.rs b/halo2_proofs/src/plonk/verifier.rs index 1e431be41b..76675bcdfa 100644 --- a/halo2_proofs/src/plonk/verifier.rs +++ b/halo2_proofs/src/plonk/verifier.rs @@ -1,28 +1,25 @@ use ff::{Field, FromUniformBytes, WithSmallOrderMulGroup}; use group::Curve; -use rand_core::RngCore; use std::iter; use super::{ vanishing, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, ChallengeY, Error, VerifyingKey, }; -use crate::arithmetic::{compute_inner_product, CurveAffine}; +use crate::arithmetic::compute_inner_product; use crate::poly::commitment::{CommitmentScheme, Verifier}; use crate::poly::VerificationStrategy; use crate::poly::{ - commitment::{Blind, Params, MSM}, - Guard, VerifierQuery, + commitment::{Blind, Params}, + VerifierQuery, }; -use crate::transcript::{read_n_points, read_n_scalars, EncodedChallenge, TranscriptRead}; +use crate::transcript::{read_n_scalars, EncodedChallenge, TranscriptRead}; #[cfg(feature = "batch")] mod batch; #[cfg(feature = "batch")] pub use batch::BatchVerifier; -use crate::poly::commitment::ParamsVerifier; - /// Returns a boolean indicating whether or not the proof is valid pub fn verify_proof< 'params, @@ -188,7 +185,7 @@ where }) .collect::, _>>()? } else { - let xn = x.pow([params.n(), 0, 0, 0]); + let xn = x.pow([params.n()]); let (min_rotation, max_rotation) = vk.cs .instance_queries @@ -267,7 +264,7 @@ where // commitments open to the correct values. let vanishing = { // x^n - let xn = x.pow([params.n(), 0, 0, 0]); + let xn = x.pow([params.n()]); let blinding_factors = vk.cs.blinding_factors(); let l_evals = vk diff --git a/halo2_proofs/src/plonk/verifier/batch.rs b/halo2_proofs/src/plonk/verifier/batch.rs index 04e08be4af..173d552e5f 100644 --- a/halo2_proofs/src/plonk/verifier/batch.rs +++ b/halo2_proofs/src/plonk/verifier/batch.rs @@ -1,14 +1,11 @@ -use std::{io, marker::PhantomData}; - use ff::FromUniformBytes; use group::ff::Field; use halo2curves::CurveAffine; -use rand_core::{OsRng, RngCore}; -use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; +use rand_core::OsRng; use super::{verify_proof, VerificationStrategy}; use crate::{ - multicore, + multicore::{IntoParallelIterator, TryFoldAndReduce}, plonk::{Error, VerifyingKey}, poly::{ commitment::{Params, MSM}, @@ -22,6 +19,9 @@ use crate::{ transcript::{Blake2bRead, TranscriptReadBuffer}, }; +#[cfg(feature = "multicore")] +use crate::multicore::{IndexedParallelIterator, ParallelIterator}; + /// A proof verification strategy that returns the proof's MSM. /// /// `BatchVerifier` handles the accumulation of the MSMs for the batched proofs. @@ -123,11 +123,10 @@ where e }) }) - .try_fold( + .try_fold_and_reduce( || params.empty_msm(), - |msm, res| res.map(|proof_msm| accumulate_msm(msm, proof_msm)), - ) - .try_reduce(|| params.empty_msm(), |a, b| Ok(accumulate_msm(a, b))); + |acc, res| res.map(|proof_msm| accumulate_msm(acc, proof_msm)), + ); match final_msm { Ok(msm) => msm.check(), diff --git a/halo2_proofs/src/poly.rs b/halo2_proofs/src/poly.rs index 8124a43ede..7cb50aa013 100644 --- a/halo2_proofs/src/poly.rs +++ b/halo2_proofs/src/poly.rs @@ -12,9 +12,11 @@ use crate::helpers::SerdePrimeField; use crate::plonk::Assigned; use crate::SerdeFormat; -use ff::PrimeField; +#[cfg(feature = "multicore")] +use crate::multicore::{ + IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator, ParallelSlice, +}; use group::ff::{BatchInvert, Field}; -use rayon::prelude::*; /// Generic commitment scheme structures pub mod commitment; @@ -163,7 +165,7 @@ impl Polynomial { } impl Polynomial { - /// Reads polynomial from buffer using `SerdePrimeField::read`. + /// Reads polynomial from buffer using `SerdePrimeField::read`. pub(crate) fn read(reader: &mut R, format: SerdeFormat) -> Self { let mut poly_len = [0u8; 4]; reader.read_exact(&mut poly_len).unwrap(); @@ -174,7 +176,7 @@ impl Polynomial { } } - /// Writes polynomial to buffer using `SerdePrimeField::write`. + /// Writes polynomial to buffer using `SerdePrimeField::write`. pub(crate) fn write(&self, writer: &mut W, format: SerdeFormat) { writer .write_all(&(self.values.len() as u32).to_be_bytes()) @@ -209,7 +211,8 @@ where .filter_map(|d| d.as_mut()) .batch_invert(); - assigned + #[cfg(feature = "multicore")] + return assigned .par_iter() .zip(assigned_denominators.par_chunks(n)) .map(|(poly, inv_denoms)| { @@ -224,7 +227,25 @@ where _marker: PhantomData, } }) - .collect() + .collect(); + + #[cfg(not(feature = "multicore"))] + return assigned + .iter() + .zip(assigned_denominators.chunks(n)) + .map(|(poly, inv_denoms)| { + debug_assert_eq!(inv_denoms.len(), poly.as_ref().len()); + Polynomial { + values: poly + .as_ref() + .iter() + .zip(inv_denoms.iter()) + .map(|(a, inv_den)| a.numerator() * inv_den.unwrap_or(F::ONE)) + .collect(), + _marker: PhantomData, + } + }) + .collect(); } impl Polynomial, LagrangeCoeff> { @@ -326,7 +347,7 @@ impl<'a, F: Field, B: Basis> Sub for &'a Polynomial { /// Describes the relative rotation of a vector. Negative numbers represent /// reverse (leftmost) rotations and positive numbers represent forward (rightmost) /// rotations. Zero represents no rotation. -#[derive(Copy, Clone, Debug, PartialEq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct Rotation(pub i32); impl Rotation { diff --git a/halo2_proofs/src/poly/commitment.rs b/halo2_proofs/src/poly/commitment.rs index cacf764512..8d7bad374b 100644 --- a/halo2_proofs/src/poly/commitment.rs +++ b/halo2_proofs/src/poly/commitment.rs @@ -6,12 +6,11 @@ use super::{ use crate::poly::Error; use crate::transcript::{EncodedChallenge, TranscriptRead, TranscriptWrite}; use ff::Field; -use group::{prime::PrimeCurveAffine, Curve, Group}; -use halo2curves::{CurveAffine, CurveExt}; +use halo2curves::CurveAffine; use rand_core::RngCore; use std::{ fmt::Debug, - io::{self, Read, Write}, + io::{self}, ops::{Add, AddAssign, Mul, MulAssign}, }; diff --git a/halo2_proofs/src/poly/domain.rs b/halo2_proofs/src/poly/domain.rs index d16e87867c..3e6ae9816b 100644 --- a/halo2_proofs/src/poly/domain.rs +++ b/halo2_proofs/src/poly/domain.rs @@ -9,10 +9,7 @@ use crate::{ use super::{Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, Rotation}; -use group::{ - ff::{BatchInvert, Field, PrimeField, WithSmallOrderMulGroup}, - Group, -}; +use group::ff::{BatchInvert, Field, WithSmallOrderMulGroup}; use std::{env::var, marker::PhantomData}; @@ -448,6 +445,9 @@ impl> EvaluationDomain { #[cfg(feature = "profile")] println!("k: {}, extended_k: {}", k, extended_k); + // ensure extended_k <= S + assert!(extended_k <= F::S); + let mut extended_omega = F::ROOT_OF_UNITY; // Get extended_omega, the 2^{extended_k}'th root of unity @@ -965,7 +965,7 @@ fn test_l_i() { let mut l = vec![]; let mut points = vec![]; for i in 0..8 { - points.push(domain.omega.pow([i, 0, 0, 0])); + points.push(domain.omega.pow([i])); } for i in 0..8 { let mut l_i = vec![Scalar::zero(); 8]; @@ -975,7 +975,7 @@ fn test_l_i() { } let x = Scalar::random(OsRng); - let xn = x.pow([8, 0, 0, 0]); + let xn = x.pow([8]); let evaluations = domain.l_i_range(x, xn, -7..=7); for i in 0..8 { diff --git a/halo2_proofs/src/poly/evaluator.rs b/halo2_proofs/src/poly/evaluator.rs deleted file mode 100644 index 5d20221255..0000000000 --- a/halo2_proofs/src/poly/evaluator.rs +++ /dev/null @@ -1,699 +0,0 @@ -use std::{ - cmp, - collections::{HashMap, HashSet}, - fmt, - hash::{Hash, Hasher}, - marker::PhantomData, - ops::{Add, Mul, MulAssign, Neg, Sub}, - sync::Arc, -}; - -use group::ff::Field; -use halo2curves::Field; - -use super::{ - Basis, Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, Rotation, -}; -use crate::{arithmetic::parallelize, multicore}; - -/// Returns `(chunk_size, num_chunks)` suitable for processing the given polynomial length -/// in the current parallelization environment. -fn get_chunk_params(poly_len: usize) -> (usize, usize) { - // Check the level of parallelization we have available. - let num_threads = multicore::current_num_threads(); - // We scale the number of chunks by a constant factor, to ensure that if not all - // threads are available, we can achieve more uniform throughput and don't end up - // waiting on a couple of threads to process the last chunks. - let num_chunks = num_threads * 4; - // Calculate the ideal chunk size for the desired throughput. We use ceiling - // division to ensure the minimum chunk size is 1. - // chunk_size = ceil(poly_len / num_chunks) - let chunk_size = (poly_len + num_chunks - 1) / num_chunks; - // Now re-calculate num_chunks from the actual chunk size. - // num_chunks = ceil(poly_len / chunk_size) - let num_chunks = (poly_len + chunk_size - 1) / chunk_size; - - (chunk_size, num_chunks) -} - -/// A reference to a polynomial registered with an [`Evaluator`]. -#[derive(Clone, Copy)] -pub(crate) struct AstLeaf { - index: usize, - rotation: Rotation, - _evaluator: PhantomData<(E, B)>, -} - -impl fmt::Debug for AstLeaf { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("AstLeaf") - .field("index", &self.index) - .field("rotation", &self.rotation) - .finish() - } -} - -impl PartialEq for AstLeaf { - fn eq(&self, rhs: &Self) -> bool { - // We compare rotations by offset, which doesn't account for equivalent rotations. - self.index.eq(&rhs.index) && self.rotation.0.eq(&rhs.rotation.0) - } -} - -impl Eq for AstLeaf {} - -impl Hash for AstLeaf { - fn hash(&self, state: &mut H) { - self.index.hash(state); - self.rotation.0.hash(state); - } -} - -impl AstLeaf { - /// Produces a new `AstLeaf` node corresponding to the underlying polynomial at a - /// _new_ rotation. Existing rotations applied to this leaf node are ignored and the - /// returned polynomial is not rotated _relative_ to the previous structure. - pub(crate) fn with_rotation(&self, rotation: Rotation) -> Self { - AstLeaf { - index: self.index, - rotation, - _evaluator: PhantomData::default(), - } - } -} - -/// An evaluation context for polynomial operations. -/// -/// This context enables us to de-duplicate queries of circuit columns (and the rotations -/// they might require), by storing a list of all the underlying polynomials involved in -/// any query (which are almost certainly column polynomials). We use the context like so: -/// -/// - We register each underlying polynomial with the evaluator, which returns a reference -/// to it as a [`AstLeaf`]. -/// - The references are then used to build up a [`Ast`] that represents the overall -/// operations to be applied to the polynomials. -/// - Finally, we call [`Evaluator::evaluate`] passing in the [`Ast`]. -pub(crate) struct Evaluator { - polys: Vec>, - _context: E, -} - -/// Constructs a new `Evaluator`. -/// -/// The `context` parameter is used to provide type safety for evaluators. It ensures that -/// an evaluator will only be used to evaluate [`Ast`]s containing [`AstLeaf`]s obtained -/// from itself. It should be set to the empty closure `|| {}`, because anonymous closures -/// all have unique types. -pub(crate) fn new_evaluator(context: E) -> Evaluator { - Evaluator { - polys: vec![], - _context: context, - } -} - -impl Evaluator { - /// Registers the given polynomial for use in this evaluation context. - /// - /// This API treats each registered polynomial as unique, even if the same polynomial - /// is added multiple times. - pub(crate) fn register_poly(&mut self, poly: Polynomial) -> AstLeaf { - let index = self.polys.len(); - self.polys.push(poly); - - AstLeaf { - index, - rotation: Rotation::cur(), - _evaluator: PhantomData::default(), - } - } - - /// Evaluates the given polynomial operation against this context. - pub(crate) fn evaluate( - &self, - ast: &Ast, - domain: &EvaluationDomain, - ) -> Polynomial - where - E: Copy + Send + Sync, - F: Field, - B: BasisOps, - { - // Traverse `ast` to collect the used leaves. - fn collect_rotations( - ast: &Ast, - ) -> HashSet> { - match ast { - Ast::Poly(leaf) => vec![*leaf].into_iter().collect(), - Ast::Add(a, b) | Ast::Mul(AstMul(a, b)) => { - let lhs = collect_rotations(a); - let rhs = collect_rotations(b); - lhs.union(&rhs).cloned().collect() - } - Ast::Scale(a, _) => collect_rotations(a), - Ast::DistributePowers(terms, _) => terms - .iter() - .flat_map(|term| collect_rotations(term).into_iter()) - .collect(), - Ast::LinearTerm(_) | Ast::ConstantTerm(_) => HashSet::default(), - } - } - let leaves = collect_rotations(ast); - - // Produce the rotated polynomials. - let rotated: HashMap<_, _> = leaves - .iter() - .cloned() - .map(|leaf| { - ( - leaf, - B::rotate(domain, &self.polys[leaf.index], leaf.rotation), - ) - }) - .collect(); - - // We're working in a single basis, so all polynomials are the same length. - let poly_len = self.polys.first().unwrap().len(); - let (chunk_size, num_chunks) = get_chunk_params(poly_len); - - // Split each rotated polynomial into chunks. - let chunks: Vec> = (0..num_chunks) - .map(|i| { - rotated - .iter() - .map(|(leaf, poly)| { - ( - *leaf, - poly.chunks(chunk_size) - .nth(i) - .expect("num_chunks was calculated correctly"), - ) - }) - .collect() - }) - .collect(); - - struct AstContext<'a, E, F: Field, B: Basis> { - domain: &'a EvaluationDomain, - poly_len: usize, - chunk_size: usize, - chunk_index: usize, - leaves: &'a HashMap, &'a [F]>, - } - - fn recurse( - ast: &Ast, - ctx: &AstContext<'_, E, F, B>, - ) -> Vec { - match ast { - Ast::Poly(leaf) => ctx.leaves.get(leaf).expect("We prepared this").to_vec(), - Ast::Add(a, b) => { - let mut lhs = recurse(a, ctx); - let rhs = recurse(b, ctx); - for (lhs, rhs) in lhs.iter_mut().zip(rhs.iter()) { - *lhs += *rhs; - } - lhs - } - Ast::Mul(AstMul(a, b)) => { - let mut lhs = recurse(a, ctx); - let rhs = recurse(b, ctx); - for (lhs, rhs) in lhs.iter_mut().zip(rhs.iter()) { - *lhs *= *rhs; - } - lhs - } - Ast::Scale(a, scalar) => { - let mut lhs = recurse(a, ctx); - for lhs in lhs.iter_mut() { - *lhs *= scalar; - } - lhs - } - Ast::DistributePowers(terms, base) => terms.iter().fold( - B::constant_term(ctx.poly_len, ctx.chunk_size, ctx.chunk_index, F::ZERO), - |mut acc, term| { - let term = recurse(term, ctx); - for (acc, term) in acc.iter_mut().zip(term) { - *acc *= base; - *acc += term; - } - acc - }, - ), - Ast::LinearTerm(scalar) => B::linear_term( - ctx.domain, - ctx.poly_len, - ctx.chunk_size, - ctx.chunk_index, - *scalar, - ), - Ast::ConstantTerm(scalar) => { - B::constant_term(ctx.poly_len, ctx.chunk_size, ctx.chunk_index, *scalar) - } - } - } - - // Apply `ast` to each chunk in parallel, writing the result into an output - // polynomial. - let mut result = B::empty_poly(domain); - multicore::scope(|scope| { - for (chunk_index, (out, leaves)) in - result.chunks_mut(chunk_size).zip(chunks.iter()).enumerate() - { - scope.spawn(move |_| { - let ctx = AstContext { - domain, - poly_len, - chunk_size, - chunk_index, - leaves, - }; - out.copy_from_slice(&recurse(ast, &ctx)); - }); - } - }); - result - } -} - -/// Struct representing the [`Ast::Mul`] case. -/// -/// This struct exists to make the internals of this case private so that we don't -/// accidentally construct this case directly, because it can only be implemented for the -/// [`ExtendedLagrangeCoeff`] basis. -#[derive(Clone)] -pub(crate) struct AstMul(Arc>, Arc>); - -impl fmt::Debug for AstMul { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("AstMul") - .field(&self.0) - .field(&self.1) - .finish() - } -} - -/// A polynomial operation backed by an [`Evaluator`]. -#[derive(Clone)] -pub(crate) enum Ast { - Poly(AstLeaf), - Add(Arc>, Arc>), - Mul(AstMul), - Scale(Arc>, F), - /// Represents a linear combination of a vector of nodes and the powers of a - /// field element, where the nodes are ordered from highest to lowest degree - /// terms. - DistributePowers(Arc>>, F), - /// The degree-1 term of a polynomial. - /// - /// The field element is the coefficient of the term in the standard basis, not the - /// coefficient basis. - LinearTerm(F), - /// The degree-0 term of a polynomial. - /// - /// The field element is the same in both the standard and evaluation bases. - ConstantTerm(F), -} - -impl Ast { - pub fn distribute_powers>(i: I, base: F) -> Self { - Ast::DistributePowers(Arc::new(i.into_iter().collect()), base) - } -} - -impl fmt::Debug for Ast { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::Poly(leaf) => f.debug_tuple("Poly").field(leaf).finish(), - Self::Add(lhs, rhs) => f.debug_tuple("Add").field(lhs).field(rhs).finish(), - Self::Mul(x) => f.debug_tuple("Mul").field(x).finish(), - Self::Scale(base, scalar) => f.debug_tuple("Scale").field(base).field(scalar).finish(), - Self::DistributePowers(terms, base) => f - .debug_tuple("DistributePowers") - .field(terms) - .field(base) - .finish(), - Self::LinearTerm(x) => f.debug_tuple("LinearTerm").field(x).finish(), - Self::ConstantTerm(x) => f.debug_tuple("ConstantTerm").field(x).finish(), - } - } -} - -impl From> for Ast { - fn from(leaf: AstLeaf) -> Self { - Ast::Poly(leaf) - } -} - -impl Ast { - pub(crate) fn one() -> Self { - Self::ConstantTerm(F::ONE) - } -} - -impl Neg for Ast { - type Output = Ast; - - fn neg(self) -> Self::Output { - Ast::Scale(Arc::new(self), -F::ONE) - } -} - -impl Neg for &Ast { - type Output = Ast; - - fn neg(self) -> Self::Output { - -(self.clone()) - } -} - -impl Add for Ast { - type Output = Ast; - - fn add(self, other: Self) -> Self::Output { - Ast::Add(Arc::new(self), Arc::new(other)) - } -} - -impl<'a, E: Clone, F: Field, B: Basis> Add<&'a Ast> for &'a Ast { - type Output = Ast; - - fn add(self, other: &'a Ast) -> Self::Output { - self.clone() + other.clone() - } -} - -impl Add> for Ast { - type Output = Ast; - - fn add(self, other: AstLeaf) -> Self::Output { - Ast::Add(Arc::new(self), Arc::new(other.into())) - } -} - -impl Sub for Ast { - type Output = Ast; - - fn sub(self, other: Self) -> Self::Output { - self + (-other) - } -} - -impl<'a, E: Clone, F: Field, B: Basis> Sub<&'a Ast> for &'a Ast { - type Output = Ast; - - fn sub(self, other: &'a Ast) -> Self::Output { - self + &(-other) - } -} - -impl Sub> for Ast { - type Output = Ast; - - fn sub(self, other: AstLeaf) -> Self::Output { - self + (-Ast::from(other)) - } -} - -impl Mul for Ast { - type Output = Ast; - - fn mul(self, other: Self) -> Self::Output { - Ast::Mul(AstMul(Arc::new(self), Arc::new(other))) - } -} - -impl<'a, E: Clone, F: Field> Mul<&'a Ast> for &'a Ast { - type Output = Ast; - - fn mul(self, other: &'a Ast) -> Self::Output { - self.clone() * other.clone() - } -} - -impl Mul> for Ast { - type Output = Ast; - - fn mul(self, other: AstLeaf) -> Self::Output { - Ast::Mul(AstMul(Arc::new(self), Arc::new(other.into()))) - } -} - -impl Mul for Ast { - type Output = Ast; - - fn mul(self, other: Self) -> Self::Output { - Ast::Mul(AstMul(Arc::new(self), Arc::new(other))) - } -} - -impl<'a, E: Clone, F: Field> Mul<&'a Ast> - for &'a Ast -{ - type Output = Ast; - - fn mul(self, other: &'a Ast) -> Self::Output { - self.clone() * other.clone() - } -} - -impl Mul> for Ast { - type Output = Ast; - - fn mul(self, other: AstLeaf) -> Self::Output { - Ast::Mul(AstMul(Arc::new(self), Arc::new(other.into()))) - } -} - -impl Mul for Ast { - type Output = Ast; - - fn mul(self, other: F) -> Self::Output { - Ast::Scale(Arc::new(self), other) - } -} - -impl Mul for &Ast { - type Output = Ast; - - fn mul(self, other: F) -> Self::Output { - Ast::Scale(Arc::new(self.clone()), other) - } -} - -impl MulAssign for Ast { - fn mul_assign(&mut self, rhs: Self) { - *self = self.clone().mul(rhs) - } -} - -/// Operations which can be performed over a given basis. -pub(crate) trait BasisOps: Basis { - fn empty_poly(domain: &EvaluationDomain) -> Polynomial; - fn constant_term( - poly_len: usize, - chunk_size: usize, - chunk_index: usize, - scalar: F, - ) -> Vec; - fn linear_term( - domain: &EvaluationDomain, - poly_len: usize, - chunk_size: usize, - chunk_index: usize, - scalar: F, - ) -> Vec; - fn rotate( - domain: &EvaluationDomain, - poly: &Polynomial, - rotation: Rotation, - ) -> Polynomial; -} - -impl BasisOps for Coeff { - fn empty_poly(domain: &EvaluationDomain) -> Polynomial { - domain.empty_coeff() - } - - fn constant_term( - poly_len: usize, - chunk_size: usize, - chunk_index: usize, - scalar: F, - ) -> Vec { - let mut chunk = vec![F::ZERO; cmp::min(chunk_size, poly_len - chunk_size * chunk_index)]; - if chunk_index == 0 { - chunk[0] = scalar; - } - chunk - } - - fn linear_term( - _: &EvaluationDomain, - poly_len: usize, - chunk_size: usize, - chunk_index: usize, - scalar: F, - ) -> Vec { - let mut chunk = vec![F::ZERO; cmp::min(chunk_size, poly_len - chunk_size * chunk_index)]; - // If the chunk size is 1 (e.g. if we have a small k and many threads), then the - // linear coefficient is the second chunk. Otherwise, the chunk size is greater - // than one, and the linear coefficient is the second element of the first chunk. - // Note that we check against the original chunk size, not the potentially-short - // actual size of the current chunk, because we want to know whether the size of - // the previous chunk was 1. - if chunk_size == 1 && chunk_index == 1 { - chunk[0] = scalar; - } else if chunk_index == 0 { - chunk[1] = scalar; - } - chunk - } - - fn rotate( - _: &EvaluationDomain, - _: &Polynomial, - _: Rotation, - ) -> Polynomial { - panic!("Can't rotate polynomials in the standard basis") - } -} - -impl BasisOps for LagrangeCoeff { - fn empty_poly(domain: &EvaluationDomain) -> Polynomial { - domain.empty_lagrange() - } - - fn constant_term( - poly_len: usize, - chunk_size: usize, - chunk_index: usize, - scalar: F, - ) -> Vec { - vec![scalar; cmp::min(chunk_size, poly_len - chunk_size * chunk_index)] - } - - fn linear_term( - domain: &EvaluationDomain, - poly_len: usize, - chunk_size: usize, - chunk_index: usize, - scalar: F, - ) -> Vec { - // Take every power of omega within the chunk, and multiply by scalar. - let omega = domain.get_omega(); - let start = chunk_size * chunk_index; - (0..cmp::min(chunk_size, poly_len - start)) - .scan(omega.pow_vartime(&[start as u64]) * scalar, |acc, _| { - let ret = *acc; - *acc *= omega; - Some(ret) - }) - .collect() - } - - fn rotate( - _: &EvaluationDomain, - poly: &Polynomial, - rotation: Rotation, - ) -> Polynomial { - poly.rotate(rotation) - } -} - -impl BasisOps for ExtendedLagrangeCoeff { - fn empty_poly(domain: &EvaluationDomain) -> Polynomial { - domain.empty_extended() - } - - fn constant_term( - poly_len: usize, - chunk_size: usize, - chunk_index: usize, - scalar: F, - ) -> Vec { - vec![scalar; cmp::min(chunk_size, poly_len - chunk_size * chunk_index)] - } - - fn linear_term( - domain: &EvaluationDomain, - poly_len: usize, - chunk_size: usize, - chunk_index: usize, - scalar: F, - ) -> Vec { - // Take every power of the extended omega within the chunk, and multiply by scalar. - let omega = domain.get_extended_omega(); - let start = chunk_size * chunk_index; - (0..cmp::min(chunk_size, poly_len - start)) - .scan( - omega.pow_vartime(&[start as u64]) * F::ZETA * scalar, - |acc, _| { - let ret = *acc; - *acc *= omega; - Some(ret) - }, - ) - .collect() - } - - fn rotate( - domain: &EvaluationDomain, - poly: &Polynomial, - rotation: Rotation, - ) -> Polynomial { - domain.rotate_extended(poly, rotation) - } -} - -#[cfg(test)] -mod tests { - use std::iter; - - use halo2curves::pasta::pallas; - - use super::{get_chunk_params, new_evaluator, Ast, BasisOps, Evaluator}; - use crate::{ - multicore, - poly::{Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff}, - }; - - #[test] - fn short_chunk_regression_test() { - // Pick the smallest polynomial length that is guaranteed to produce a short chunk - // on this machine. - let k = match (1..16) - .map(|k| (k, get_chunk_params(1 << k))) - .find(|(k, (chunk_size, num_chunks))| (1 << k) < chunk_size * num_chunks) - .map(|(k, _)| k) - { - Some(k) => k, - None => { - // We are on a machine with a power-of-two number of threads, and cannot - // trigger the bug. - eprintln!( - "can't find a polynomial length for short_chunk_regression_test; skipping" - ); - return; - } - }; - eprintln!("Testing short-chunk regression with k = {}", k); - - fn test_case( - k: u32, - mut evaluator: Evaluator, - ) { - // Instantiate the evaluator with a trivial polynomial. - let domain = EvaluationDomain::new(1, k); - evaluator.register_poly(B::empty_poly(&domain)); - - // With the bug present, these will panic. - let _ = evaluator.evaluate(&Ast::ConstantTerm(pallas::Base::zero()), &domain); - let _ = evaluator.evaluate(&Ast::LinearTerm(pallas::Base::zero()), &domain); - } - - test_case(k, new_evaluator::<_, _, Coeff>(|| {})); - test_case(k, new_evaluator::<_, _, LagrangeCoeff>(|| {})); - test_case(k, new_evaluator::<_, _, ExtendedLagrangeCoeff>(|| {})); - } -} diff --git a/halo2_proofs/src/poly/ipa/commitment.rs b/halo2_proofs/src/poly/ipa/commitment.rs index f9b4ad059b..7be053c49c 100644 --- a/halo2_proofs/src/poly/ipa/commitment.rs +++ b/halo2_proofs/src/poly/ipa/commitment.rs @@ -3,18 +3,14 @@ //! //! [halo]: https://eprint.iacr.org/2019/1021 -use crate::arithmetic::{ - best_fft, best_multiexp, g_to_lagrange, parallelize, CurveAffine, CurveExt, -}; +use crate::arithmetic::{best_multiexp, g_to_lagrange, parallelize, CurveAffine, CurveExt}; use crate::helpers::CurveRead; -use crate::poly::commitment::{Blind, CommitmentScheme, Params, ParamsProver, ParamsVerifier, MSM}; +use crate::poly::commitment::{Blind, CommitmentScheme, Params, ParamsProver, ParamsVerifier}; use crate::poly::ipa::msm::MSMIPA; use crate::poly::{Coeff, LagrangeCoeff, Polynomial}; -use ff::{Field, PrimeField}; -use group::{prime::PrimeCurveAffine, Curve, Group}; +use group::{Curve, Group}; use std::marker::PhantomData; -use std::ops::{Add, AddAssign, Mul, MulAssign}; mod prover; mod verifier; @@ -233,21 +229,13 @@ impl<'params, C: CurveAffine> ParamsProver<'params, C> for ParamsIPA { #[cfg(test)] mod test { - - use crate::arithmetic::{best_fft, best_multiexp, parallelize, CurveAffine, CurveExt}; - use crate::helpers::CurveRead; use crate::poly::commitment::ParamsProver; - use crate::poly::commitment::{Blind, CommitmentScheme, Params, MSM}; + use crate::poly::commitment::{Blind, Params, MSM}; use crate::poly::ipa::commitment::{create_proof, verify_proof, ParamsIPA}; use crate::poly::ipa::msm::MSMIPA; - use crate::poly::{Coeff, LagrangeCoeff, Polynomial}; - - use ff::{Field, PrimeField}; - use group::{prime::PrimeCurveAffine, Curve, Group}; - use std::marker::PhantomData; - use std::ops::{Add, AddAssign, Mul, MulAssign}; - use std::io; + use ff::Field; + use group::Curve; #[test] fn test_commit_lagrange_epaffine() { diff --git a/halo2_proofs/src/poly/ipa/commitment/prover.rs b/halo2_proofs/src/poly/ipa/commitment/prover.rs index d176987c96..344dbc0e65 100644 --- a/halo2_proofs/src/poly/ipa/commitment/prover.rs +++ b/halo2_proofs/src/poly/ipa/commitment/prover.rs @@ -1,7 +1,7 @@ use ff::Field; use rand_core::RngCore; -use super::{Params, ParamsIPA}; +use super::ParamsIPA; use crate::arithmetic::{ best_multiexp, compute_inner_product, eval_polynomial, parallelize, CurveAffine, }; @@ -11,7 +11,7 @@ use crate::poly::{commitment::Blind, Coeff, Polynomial}; use crate::transcript::{EncodedChallenge, TranscriptWrite}; use group::Curve; -use std::io::{self, Write}; +use std::io::{self}; /// Create a polynomial commitment opening proof for the polynomial defined /// by the coefficients `px`, the blinding factor `blind` used for the @@ -51,7 +51,7 @@ pub fn create_proof< // Evaluate the random polynomial at x_3 let s_at_x3 = eval_polynomial(&s_poly[..], x_3); // Subtract constant coefficient to get a random polynomial with a root at x_3 - s_poly[0] = s_poly[0] - &s_at_x3; + s_poly[0] -= &s_at_x3; // And sample a random blind let s_poly_blind = Blind(C::Scalar::random(&mut rng)); @@ -72,7 +72,7 @@ pub fn create_proof< // zero. let mut p_prime_poly = s_poly * xi + p_poly; let v = eval_polynomial(&p_prime_poly, x_3); - p_prime_poly[0] = p_prime_poly[0] - &v; + p_prime_poly[0] -= &v; let p_prime_blind = s_poly_blind * Blind(xi) + p_blind; // This accumulates the synthetic blinding factor `f` starting diff --git a/halo2_proofs/src/poly/ipa/commitment/verifier.rs b/halo2_proofs/src/poly/ipa/commitment/verifier.rs index 0b60842899..cf258625d5 100644 --- a/halo2_proofs/src/poly/ipa/commitment/verifier.rs +++ b/halo2_proofs/src/poly/ipa/commitment/verifier.rs @@ -1,18 +1,9 @@ -use std::io::Read; - -use group::{ - ff::{BatchInvert, Field}, - Curve, -}; +use group::ff::{BatchInvert, Field}; use super::ParamsIPA; -use crate::poly::ipa::commitment::{IPACommitmentScheme, ParamsVerifierIPA}; -use crate::{ - arithmetic::{best_multiexp, CurveAffine}, - poly::ipa::strategy::GuardIPA, -}; +use crate::{arithmetic::CurveAffine, poly::ipa::strategy::GuardIPA}; use crate::{ - poly::{commitment::MSM, ipa::msm::MSMIPA, strategy::Guard, Error}, + poly::{commitment::MSM, ipa::msm::MSMIPA, Error}, transcript::{EncodedChallenge, TranscriptRead}, }; @@ -75,6 +66,9 @@ pub fn verify_proof<'params, C: CurveAffine, E: EncodedChallenge, T: Transcri // P' + \sum([u_j^{-1}] L_j) + \sum([u_j] R_j) // + [-c] G'_0 + [-cbz] U + [-f] W // = 0 + // + // Note that the guard returned from this function does not include + // the [-c]G'_0 term. let c = transcript.read_scalar().map_err(|_| Error::SamplingError)?; let neg_c = -c; diff --git a/halo2_proofs/src/poly/ipa/msm.rs b/halo2_proofs/src/poly/ipa/msm.rs index 3316e25337..a615ddce49 100644 --- a/halo2_proofs/src/poly/ipa/msm.rs +++ b/halo2_proofs/src/poly/ipa/msm.rs @@ -1,9 +1,5 @@ -use super::commitment::{IPACommitmentScheme, ParamsIPA}; -use crate::arithmetic::{best_multiexp, parallelize, CurveAffine}; -use crate::poly::{ - commitment::{CommitmentScheme, Params, MSM}, - ipa::commitment::ParamsVerifierIPA, -}; +use crate::arithmetic::{best_multiexp, CurveAffine}; +use crate::poly::{commitment::MSM, ipa::commitment::ParamsVerifierIPA}; use ff::Field; use group::Group; use std::collections::BTreeMap; @@ -222,13 +218,10 @@ impl<'a, C: CurveAffine> MSMIPA<'a, C> { #[cfg(test)] mod tests { - use super::ParamsIPA; - use crate::poly::commitment::ParamsProver; use crate::poly::{ - commitment::{Params, MSM}, - ipa::msm::MSMIPA, + commitment::{ParamsProver, MSM}, + ipa::{commitment::ParamsIPA, msm::MSMIPA}, }; - use group::Curve; use halo2curves::{ pasta::{Ep, EpAffine, Fp, Fq}, CurveAffine, diff --git a/halo2_proofs/src/poly/ipa/multiopen.rs b/halo2_proofs/src/poly/ipa/multiopen.rs index fd6aa78544..b78acb5934 100644 --- a/halo2_proofs/src/poly/ipa/multiopen.rs +++ b/halo2_proofs/src/poly/ipa/multiopen.rs @@ -4,7 +4,7 @@ //! [halo]: https://eprint.iacr.org/2019/1021 use super::*; -use crate::{arithmetic::CurveAffine, poly::query::Query, transcript::ChallengeScalar}; +use crate::{poly::query::Query, transcript::ChallengeScalar}; use ff::Field; use std::collections::{BTreeMap, BTreeSet}; diff --git a/halo2_proofs/src/poly/ipa/multiopen/prover.rs b/halo2_proofs/src/poly/ipa/multiopen/prover.rs index f09bc4425f..2ae745d457 100644 --- a/halo2_proofs/src/poly/ipa/multiopen/prover.rs +++ b/halo2_proofs/src/poly/ipa/multiopen/prover.rs @@ -1,9 +1,7 @@ -use super::{ - construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4, Query, -}; +use super::{construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4}; use crate::arithmetic::{eval_polynomial, kate_division, CurveAffine}; use crate::poly::commitment::ParamsProver; -use crate::poly::commitment::{Blind, Params, Prover}; +use crate::poly::commitment::{Blind, Prover}; use crate::poly::ipa::commitment::{self, IPACommitmentScheme, ParamsIPA}; use crate::poly::query::ProverQuery; use crate::poly::{Coeff, Polynomial}; diff --git a/halo2_proofs/src/poly/ipa/multiopen/verifier.rs b/halo2_proofs/src/poly/ipa/multiopen/verifier.rs index 391f89e15b..d559e33384 100644 --- a/halo2_proofs/src/poly/ipa/multiopen/verifier.rs +++ b/halo2_proofs/src/poly/ipa/multiopen/verifier.rs @@ -1,20 +1,14 @@ use std::fmt::Debug; -use std::io::Read; -use std::marker::PhantomData; use ff::Field; -use rand_core::RngCore; -use super::{ - construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4, Query, -}; +use super::{construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4}; use crate::arithmetic::{eval_polynomial, lagrange_interpolate, CurveAffine}; use crate::poly::commitment::{Params, Verifier, MSM}; use crate::poly::ipa::commitment::{IPACommitmentScheme, ParamsIPA, ParamsVerifierIPA}; use crate::poly::ipa::msm::MSMIPA; use crate::poly::ipa::strategy::GuardIPA; use crate::poly::query::{CommitmentReference, VerifierQuery}; -use crate::poly::strategy::VerificationStrategy; use crate::poly::Error; use crate::transcript::{EncodedChallenge, TranscriptRead}; @@ -57,7 +51,9 @@ impl<'params, C: CurveAffine> Verifier<'params, IPACommitmentScheme> // Compress the commitments and expected evaluations at x together. // using the challenge x_1 - let mut q_commitments: Vec<_> = vec![self.params.empty_msm(); point_sets.len()]; + let mut q_commitments: Vec<_> = vec![ + (self.params.empty_msm(), C::Scalar::ONE); // (accumulator, next x_1 power). + point_sets.len()]; // A vec of vecs of evals. The outer vec corresponds to the point set, // while the inner vec corresponds to the points in a particular set. @@ -65,28 +61,32 @@ impl<'params, C: CurveAffine> Verifier<'params, IPACommitmentScheme> for point_set in point_sets.iter() { q_eval_sets.push(vec![C::Scalar::ZERO; point_set.len()]); } + { let mut accumulate = |set_idx: usize, new_commitment: CommitmentReference>, evals: Vec| { - q_commitments[set_idx].scale(*x_1); + let (q_commitment, x_1_power) = &mut q_commitments[set_idx]; match new_commitment { CommitmentReference::Commitment(c) => { - q_commitments[set_idx].append_term(C::Scalar::ONE, (*c).into()); + q_commitment.append_term(*x_1_power, (*c).into()); } CommitmentReference::MSM(msm) => { - q_commitments[set_idx].add_msm(msm); + let mut msm = msm.clone(); + msm.scale(*x_1_power); + q_commitment.add_msm(&msm); } } for (eval, set_eval) in evals.iter().zip(q_eval_sets[set_idx].iter_mut()) { - *set_eval *= &(*x_1); - *set_eval += eval; + *set_eval += (*eval) * (*x_1_power); } + *x_1_power *= *x_1; }; // Each commitment corresponds to evaluations at a set of points. // For each set, we collapse each commitment's evals pointwise. - for commitment_data in commitment_map.into_iter() { + // Run in order of increasing x_1 powers. + for commitment_data in commitment_map.into_iter().rev() { accumulate( commitment_data.set_index, // set_idx, commitment_data.commitment, // commitment, @@ -135,7 +135,7 @@ impl<'params, C: CurveAffine> Verifier<'params, IPACommitmentScheme> msm.append_term(C::Scalar::ONE, q_prime_commitment.into()); let (msm, v) = q_commitments.into_iter().zip(u.iter()).fold( (msm, msm_eval), - |(mut msm, msm_eval), (q_commitment, q_eval)| { + |(mut msm, msm_eval), ((q_commitment, _), q_eval)| { msm.scale(*x_4); msm.add_msm(&q_commitment); (msm, msm_eval * &(*x_4) + q_eval) diff --git a/halo2_proofs/src/poly/ipa/strategy.rs b/halo2_proofs/src/poly/ipa/strategy.rs index c8d385f90c..d2d1b3d364 100644 --- a/halo2_proofs/src/poly/ipa/strategy.rs +++ b/halo2_proofs/src/poly/ipa/strategy.rs @@ -1,10 +1,6 @@ -use std::marker::PhantomData; - -use super::commitment::{IPACommitmentScheme, ParamsIPA, ParamsVerifierIPA}; +use super::commitment::{IPACommitmentScheme, ParamsIPA}; use super::msm::MSMIPA; use super::multiopen::VerifierIPA; -use crate::poly::commitment::CommitmentScheme; -use crate::transcript::TranscriptRead; use crate::{ arithmetic::best_multiexp, plonk::Error, @@ -12,12 +8,11 @@ use crate::{ commitment::MSM, strategy::{Guard, VerificationStrategy}, }, - transcript::EncodedChallenge, }; use ff::Field; use group::Curve; use halo2curves::CurveAffine; -use rand_core::{OsRng, RngCore}; +use rand_core::OsRng; /// Wrapper for verification accumulator #[derive(Debug, Clone)] diff --git a/halo2_proofs/src/poly/kzg/commitment.rs b/halo2_proofs/src/poly/kzg/commitment.rs index 253fd1b86a..d19b5a1048 100644 --- a/halo2_proofs/src/poly/kzg/commitment.rs +++ b/halo2_proofs/src/poly/kzg/commitment.rs @@ -1,8 +1,6 @@ -use crate::arithmetic::{ - best_fft, best_multiexp, g_to_lagrange, parallelize, CurveAffine, CurveExt, -}; +use crate::arithmetic::{best_multiexp, g_to_lagrange, parallelize}; use crate::helpers::SerdeCurveAffine; -use crate::poly::commitment::{Blind, CommitmentScheme, Params, ParamsProver, ParamsVerifier, MSM}; +use crate::poly::commitment::{Blind, CommitmentScheme, Params, ParamsProver, ParamsVerifier}; use crate::poly::{Coeff, LagrangeCoeff, Polynomial}; use crate::SerdeFormat; @@ -12,7 +10,6 @@ use pairing::Engine; use rand_core::{OsRng, RngCore}; use std::fmt::Debug; use std::marker::PhantomData; -use std::ops::{Add, AddAssign, Mul, MulAssign}; use std::io; @@ -346,21 +343,10 @@ where #[cfg(test)] mod test { - use crate::arithmetic::{best_fft, best_multiexp, parallelize, CurveAffine, CurveExt}; use crate::poly::commitment::ParamsProver; - use crate::poly::commitment::{Blind, CommitmentScheme, Params, MSM}; - use crate::poly::kzg::commitment::{ParamsKZG, ParamsVerifierKZG}; - use crate::poly::kzg::msm::MSMKZG; - use crate::poly::kzg::multiopen::ProverSHPLONK; - use crate::poly::{Coeff, LagrangeCoeff, Polynomial}; - - use ff::{Field, PrimeField}; - use group::{prime::PrimeCurveAffine, Curve, Group}; - use halo2curves::bn256::G1Affine; - use std::marker::PhantomData; - use std::ops::{Add, AddAssign, Mul, MulAssign}; - - use std::io; + use crate::poly::commitment::{Blind, Params}; + use crate::poly::kzg::commitment::ParamsKZG; + use ff::Field; #[test] fn test_commit_lagrange() { @@ -391,13 +377,8 @@ mod test { fn test_parameter_serialisation_roundtrip() { const K: u32 = 4; - use ff::Field; - use rand_core::OsRng; - - use super::super::commitment::{Blind, Params}; - use crate::arithmetic::eval_polynomial; - use crate::halo2curves::bn256::{Bn256, Fr}; - use crate::poly::EvaluationDomain; + use super::super::commitment::Params; + use crate::halo2curves::bn256::Bn256; let params0 = ParamsKZG::::new(K); let mut data = vec![]; diff --git a/halo2_proofs/src/poly/kzg/msm.rs b/halo2_proofs/src/poly/kzg/msm.rs index 4116c8bd53..389c6fd605 100644 --- a/halo2_proofs/src/poly/kzg/msm.rs +++ b/halo2_proofs/src/poly/kzg/msm.rs @@ -1,6 +1,6 @@ use std::fmt::Debug; -use super::commitment::{KZGCommitmentScheme, ParamsKZG}; +use super::commitment::ParamsKZG; use crate::{ arithmetic::{best_multiexp, parallelize, CurveAffine}, poly::commitment::MSM, @@ -96,8 +96,6 @@ impl PreMSM { } pub(crate) fn normalize(self) -> MSMKZG { - use group::prime::PrimeCurveAffine; - let (scalars, bases) = self .projectives_msms .into_iter() diff --git a/halo2_proofs/src/poly/kzg/multiopen/gwc.rs b/halo2_proofs/src/poly/kzg/multiopen/gwc.rs index 8e7d742fc0..3fd28dd00a 100644 --- a/halo2_proofs/src/poly/kzg/multiopen/gwc.rs +++ b/halo2_proofs/src/poly/kzg/multiopen/gwc.rs @@ -4,20 +4,9 @@ mod verifier; pub use prover::ProverGWC; pub use verifier::VerifierGWC; -use crate::{ - arithmetic::{eval_polynomial, CurveAffine}, - poly::{ - commitment::{Params, ParamsVerifier}, - query::Query, - Coeff, Polynomial, - }, - transcript::ChallengeScalar, -}; +use crate::{poly::query::Query, transcript::ChallengeScalar}; use ff::Field; -use std::{ - collections::{BTreeMap, BTreeSet}, - marker::PhantomData, -}; +use std::marker::PhantomData; #[derive(Clone, Copy, Debug)] struct U {} diff --git a/halo2_proofs/src/poly/kzg/multiopen/gwc/prover.rs b/halo2_proofs/src/poly/kzg/multiopen/gwc/prover.rs index 4d772bfa75..f4e703244b 100644 --- a/halo2_proofs/src/poly/kzg/multiopen/gwc/prover.rs +++ b/halo2_proofs/src/poly/kzg/multiopen/gwc/prover.rs @@ -1,23 +1,18 @@ use super::{construct_intermediate_sets, ChallengeV, Query}; -use crate::arithmetic::{eval_polynomial, kate_division, powers, CurveAffine}; +use crate::arithmetic::{kate_division, powers}; use crate::helpers::SerdeCurveAffine; use crate::poly::commitment::ParamsProver; use crate::poly::commitment::Prover; use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG}; use crate::poly::query::ProverQuery; -use crate::poly::Rotation; -use crate::poly::{ - commitment::{Blind, Params}, - Coeff, Polynomial, -}; +use crate::poly::{commitment::Blind, Polynomial}; use crate::transcript::{EncodedChallenge, TranscriptWrite}; -use ff::{Field, PrimeField}; use group::Curve; use pairing::Engine; use rand_core::RngCore; use std::fmt::Debug; -use std::io::{self, Write}; +use std::io; use std::marker::PhantomData; /// Concrete KZG prover with GWC variant diff --git a/halo2_proofs/src/poly/kzg/multiopen/gwc/verifier.rs b/halo2_proofs/src/poly/kzg/multiopen/gwc/verifier.rs index 62f16417d7..0b3ec99d8e 100644 --- a/halo2_proofs/src/poly/kzg/multiopen/gwc/verifier.rs +++ b/halo2_proofs/src/poly/kzg/multiopen/gwc/verifier.rs @@ -1,28 +1,20 @@ use std::fmt::Debug; -use std::io::Read; -use std::marker::PhantomData; use super::{construct_intermediate_sets, ChallengeU, ChallengeV}; -use crate::arithmetic::{eval_polynomial, lagrange_interpolate, powers, CurveAffine}; +use crate::arithmetic::powers; use crate::helpers::SerdeCurveAffine; use crate::poly::commitment::Verifier; use crate::poly::commitment::MSM; use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG}; use crate::poly::kzg::msm::{DualMSM, MSMKZG}; -use crate::poly::kzg::strategy::{AccumulatorStrategy, GuardKZG, SingleStrategy}; +use crate::poly::kzg::strategy::GuardKZG; use crate::poly::query::Query; use crate::poly::query::{CommitmentReference, VerifierQuery}; -use crate::poly::strategy::VerificationStrategy; -use crate::poly::{ - commitment::{Params, ParamsVerifier}, - Error, -}; +use crate::poly::Error; use crate::transcript::{EncodedChallenge, TranscriptRead}; -use ff::{Field, PrimeField}; -use group::Group; -use pairing::{Engine, MillerLoopResult, MultiMillerLoop}; -use rand_core::OsRng; +use ff::Field; +use pairing::{Engine, MultiMillerLoop}; #[derive(Debug)] /// Concrete KZG verifier with GWC variant diff --git a/halo2_proofs/src/poly/kzg/multiopen/shplonk.rs b/halo2_proofs/src/poly/kzg/multiopen/shplonk.rs index 8a746ab402..7a814cd900 100644 --- a/halo2_proofs/src/poly/kzg/multiopen/shplonk.rs +++ b/halo2_proofs/src/poly/kzg/multiopen/shplonk.rs @@ -1,25 +1,18 @@ mod prover; mod verifier; -use std::{ - collections::{btree_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, - hash::Hash, - marker::PhantomData, - sync::Arc, -}; - -use crate::{ - arithmetic::{eval_polynomial, lagrange_interpolate, CurveAffine}, - poly::{query::Query, Coeff, Polynomial}, - transcript::ChallengeScalar, -}; +use std::hash::Hash; +use crate::multicore::IntoParallelIterator; +use crate::{poly::query::Query, transcript::ChallengeScalar}; use ff::Field; pub use prover::ProverSHPLONK; -use rayon::prelude::*; use rustc_hash::FxHashSet; pub use verifier::VerifierSHPLONK; +#[cfg(feature = "multicore")] +use crate::multicore::ParallelIterator; + #[derive(Clone, Copy, Debug)] struct U {} type ChallengeU = ChallengeScalar; @@ -129,7 +122,8 @@ where .into_par_iter() .map(|commitment| { let evals: Vec = rotations_vec - .par_iter() + .as_slice() + .into_par_iter() .map(|&&rotation| get_eval(commitment, rotation)) .collect(); Commitment((commitment, evals)) @@ -151,18 +145,10 @@ where #[cfg(test)] mod proptests { - use proptest::{ - collection::vec, - prelude::*, - sample::{select, subsequence}, - strategy::Strategy, - }; - use super::{construct_intermediate_sets, Commitment, IntermediateSets}; - use crate::poly::Rotation; - use ff::{Field, FromUniformBytes}; + use ff::FromUniformBytes; use halo2curves::bn256::Fr; - use std::collections::BTreeMap; + use proptest::{collection::vec, prelude::*, sample::select}; use std::convert::TryFrom; #[derive(Debug, Clone)] diff --git a/halo2_proofs/src/poly/kzg/multiopen/shplonk/prover.rs b/halo2_proofs/src/poly/kzg/multiopen/shplonk/prover.rs index 0a98ee0af6..d8d598c098 100644 --- a/halo2_proofs/src/poly/kzg/multiopen/shplonk/prover.rs +++ b/halo2_proofs/src/poly/kzg/multiopen/shplonk/prover.rs @@ -1,11 +1,10 @@ use std::fmt::Debug; use std::hash::Hash; -use std::io::{self, Write}; use std::marker::PhantomData; use std::ops::MulAssign; use super::{ - construct_intermediate_sets, ChallengeU, ChallengeV, ChallengeY, Commitment, Query, RotationSet, + construct_intermediate_sets, ChallengeU, ChallengeV, ChallengeY, Commitment, RotationSet, }; use crate::arithmetic::{ eval_polynomial, evaluate_vanishing_polynomial, kate_division, lagrange_interpolate, @@ -15,16 +14,19 @@ use crate::helpers::SerdeCurveAffine; use crate::poly::commitment::{Blind, ParamsProver, Prover}; use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG}; use crate::poly::query::{PolynomialPointer, ProverQuery}; -use crate::poly::Rotation; -use crate::poly::{commitment::Params, Coeff, Polynomial}; +use crate::poly::{Coeff, Polynomial}; use crate::transcript::{EncodedChallenge, TranscriptWrite}; -use ff::{Field, PrimeField}; +use crate::multicore::IntoParallelIterator; +use ff::Field; use group::Curve; use pairing::Engine; use rand_core::RngCore; -use rayon::prelude::*; -use rustc_hash::FxHashSet; + +use std::io; + +#[cfg(feature = "multicore")] +use crate::multicore::ParallelIterator; fn div_by_vanishing(poly: Polynomial, roots: &[F]) -> Vec { let poly = roots @@ -139,40 +141,41 @@ where // for different sets that are already combined with anoter challenge let y: ChallengeY<_> = transcript.squeeze_challenge_scalar(); - let quotient_contribution = - |rotation_set: &RotationSetExtension| -> Polynomial { - // [P_i_0(X) - R_i_0(X), P_i_1(X) - R_i_1(X), ... ] - let numerators = rotation_set - .commitments - .par_iter() - .map(|commitment| commitment.quotient_contribution()) - .collect::>(); - - // define numerator polynomial as - // N_i_j(X) = (P_i_j(X) - R_i_j(X)) - // and combine polynomials with same evaluation point set - // N_i(X) = linear_combinination(y, N_i_j(X)) - // where y is random scalar to combine numerator polynomials - let n_x = numerators - .into_iter() - .zip(powers(*y)) - .map(|(numerator, power_of_y)| numerator * power_of_y) - .reduce(|acc, numerator| acc + &numerator) - .unwrap(); - - let points = &rotation_set.points[..]; - - // quotient contribution of this evaluation set is - // Q_i(X) = N_i(X) / Z_i(X) where - // Z_i(X) = (x - r_i_0) * (x - r_i_1) * ... - let mut poly = div_by_vanishing(n_x, points); - poly.resize(self.params.n as usize, E::Fr::ZERO); - - Polynomial { - values: poly, - _marker: PhantomData, - } - }; + let quotient_contribution = |rotation_set: &RotationSetExtension| { + // [P_i_0(X) - R_i_0(X), P_i_1(X) - R_i_1(X), ... ] + #[allow(clippy::needless_collect)] + let numerators = rotation_set + .commitments + .as_slice() + .into_par_iter() + .map(|commitment| commitment.quotient_contribution()) + .collect::>(); + + // define numerator polynomial as + // N_i_j(X) = (P_i_j(X) - R_i_j(X)) + // and combine polynomials with same evaluation point set + // N_i(X) = linear_combinination(y, N_i_j(X)) + // where y is random scalar to combine numerator polynomials + let n_x = numerators + .into_iter() + .zip(powers(*y)) + .map(|(numerator, power_of_y)| numerator * power_of_y) + .reduce(|acc, numerator| acc + &numerator) + .unwrap(); + + let points = &rotation_set.points[..]; + + // quotient contribution of this evaluation set is + // Q_i(X) = N_i(X) / Z_i(X) where + // Z_i(X) = (x - r_i_0) * (x - r_i_1) * ... + let mut poly = div_by_vanishing(n_x, points); + poly.resize(self.params.n as usize, E::Fr::ZERO); + + Polynomial { + values: poly, + _marker: PhantomData, + } + }; let intermediate_sets = construct_intermediate_sets(queries); let (rotation_sets, super_point_set) = ( @@ -185,7 +188,8 @@ where .map(|rotation_set| { let commitments: Vec> = rotation_set .commitments - .par_iter() + .as_slice() + .into_par_iter() .map(|commitment_data| commitment_data.extend(&rotation_set.points)) .collect(); rotation_set.extend(commitments) @@ -194,8 +198,10 @@ where let v: ChallengeV<_> = transcript.squeeze_challenge_scalar(); + #[allow(clippy::needless_collect)] let quotient_polynomials = rotation_sets - .par_iter() + .as_slice() + .into_par_iter() .map(quotient_contribution) .collect::>(); @@ -210,42 +216,43 @@ where transcript.write_point(h)?; let u: ChallengeU<_> = transcript.squeeze_challenge_scalar(); - let linearisation_contribution = - |rotation_set: RotationSetExtension| -> (Polynomial, E::Fr) { - let mut diffs = super_point_set.clone(); - for point in rotation_set.points.iter() { - diffs.remove(point); - } - let diffs = diffs.into_iter().collect::>(); - - // calculate difference vanishing polynomial evaluation - let z_i = evaluate_vanishing_polynomial(&diffs[..], *u); - - // inner linearisation contibutions are - // [P_i_0(X) - r_i_0, P_i_1(X) - r_i_1, ... ] where - // r_i_j = R_i_j(u) is the evaluation of low degree equivalent polynomial - // where u is random evaluation point - let inner_contributions = rotation_set - .commitments - .par_iter() - .map(|commitment| commitment.linearisation_contribution(*u)) - .collect::>(); - - // define inner contributor polynomial as - // L_i_j(X) = (P_i_j(X) - r_i_j) - // and combine polynomials with same evaluation point set - // L_i(X) = linear_combinination(y, L_i_j(X)) - // where y is random scalar to combine inner contibutors - let l_x: Polynomial = inner_contributions - .into_iter() - .zip(powers(*y)) - .map(|(poly, power_of_y)| poly * power_of_y) - .reduce(|acc, poly| acc + &poly) - .unwrap(); - - // finally scale l_x by difference vanishing polynomial evaluation z_i - (l_x * z_i, z_i) - }; + let linearisation_contribution = |rotation_set: RotationSetExtension| { + let mut diffs = super_point_set.clone(); + for point in rotation_set.points.iter() { + diffs.remove(point); + } + let diffs = diffs.into_iter().collect::>(); + + // calculate difference vanishing polynomial evaluation + let z_i = evaluate_vanishing_polynomial(&diffs[..], *u); + + // inner linearisation contibutions are + // [P_i_0(X) - r_i_0, P_i_1(X) - r_i_1, ... ] where + // r_i_j = R_i_j(u) is the evaluation of low degree equivalent polynomial + // where u is random evaluation point + #[allow(clippy::needless_collect)] + let inner_contributions = rotation_set + .commitments + .as_slice() + .into_par_iter() + .map(|commitment| commitment.linearisation_contribution(*u)) + .collect::>(); + + // define inner contributor polynomial as + // L_i_j(X) = (P_i_j(X) - r_i_j) + // and combine polynomials with same evaluation point set + // L_i(X) = linear_combinination(y, L_i_j(X)) + // where y is random scalar to combine inner contibutors + let l_x: Polynomial = inner_contributions + .into_iter() + .zip(powers(*y)) + .map(|(poly, power_of_y)| poly * power_of_y) + .reduce(|acc, poly| acc + &poly) + .unwrap(); + + // finally scale l_x by difference vanishing polynomial evaluation z_i + (l_x * z_i, z_i) + }; #[allow(clippy::type_complexity)] let (linearisation_contibutions, z_diffs): ( diff --git a/halo2_proofs/src/poly/kzg/multiopen/shplonk/verifier.rs b/halo2_proofs/src/poly/kzg/multiopen/shplonk/verifier.rs index 0fe1bcfe2f..c10269ce82 100644 --- a/halo2_proofs/src/poly/kzg/multiopen/shplonk/verifier.rs +++ b/halo2_proofs/src/poly/kzg/multiopen/shplonk/verifier.rs @@ -1,11 +1,10 @@ use std::fmt::Debug; use std::hash::Hash; -use std::io::Read; use super::ChallengeY; use super::{construct_intermediate_sets, ChallengeU, ChallengeV}; use crate::arithmetic::{ - eval_polynomial, evaluate_vanishing_polynomial, lagrange_interpolate, powers, CurveAffine, + eval_polynomial, evaluate_vanishing_polynomial, lagrange_interpolate, powers, }; use crate::helpers::SerdeCurveAffine; use crate::poly::commitment::Verifier; @@ -13,19 +12,12 @@ use crate::poly::commitment::MSM; use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG}; use crate::poly::kzg::msm::DualMSM; use crate::poly::kzg::msm::{PreMSM, MSMKZG}; -use crate::poly::kzg::strategy::{AccumulatorStrategy, GuardKZG, SingleStrategy}; -use crate::poly::query::Query; +use crate::poly::kzg::strategy::GuardKZG; use crate::poly::query::{CommitmentReference, VerifierQuery}; -use crate::poly::strategy::VerificationStrategy; -use crate::poly::{ - commitment::{Params, ParamsVerifier}, - Error, -}; +use crate::poly::Error; use crate::transcript::{EncodedChallenge, TranscriptRead}; -use ff::{Field, PrimeField}; -use group::Group; -use pairing::{Engine, MillerLoopResult, MultiMillerLoop}; -use rand_core::OsRng; +use ff::Field; +use pairing::{Engine, MultiMillerLoop}; use std::ops::MulAssign; /// Concrete KZG multiopen verifier with SHPLONK variant diff --git a/halo2_proofs/src/poly/kzg/strategy.rs b/halo2_proofs/src/poly/kzg/strategy.rs index 9ee93b130f..afbc596bd7 100644 --- a/halo2_proofs/src/poly/kzg/strategy.rs +++ b/halo2_proofs/src/poly/kzg/strategy.rs @@ -1,25 +1,19 @@ -use std::{fmt::Debug, marker::PhantomData}; - use super::{ commitment::{KZGCommitmentScheme, ParamsKZG}, - msm::{DualMSM, MSMKZG}, - multiopen::VerifierGWC, + msm::DualMSM, }; use crate::{ helpers::SerdeCurveAffine, plonk::Error, poly::{ - commitment::{Verifier, MSM}, - ipa::msm::MSMIPA, + commitment::Verifier, strategy::{Guard, VerificationStrategy}, }, - transcript::{EncodedChallenge, TranscriptRead}, }; -use ff::{Field, PrimeField}; -use group::Group; -use halo2curves::CurveAffine; -use pairing::{Engine, MillerLoopResult, MultiMillerLoop}; +use ff::Field; +use pairing::{Engine, MultiMillerLoop}; use rand_core::OsRng; +use std::fmt::Debug; /// Wrapper for linear verification accumulator #[derive(Debug, Clone)] diff --git a/halo2_proofs/src/poly/multiopen_test.rs b/halo2_proofs/src/poly/multiopen_test.rs index b7c36d908e..47c6731167 100644 --- a/halo2_proofs/src/poly/multiopen_test.rs +++ b/halo2_proofs/src/poly/multiopen_test.rs @@ -2,33 +2,28 @@ mod test { use crate::arithmetic::eval_polynomial; use crate::plonk::Error; + use crate::poly::commitment::Blind; use crate::poly::commitment::ParamsProver; - use crate::poly::commitment::{Blind, ParamsVerifier, MSM}; - use crate::poly::query::PolynomialPointer; use crate::poly::{ commitment::{CommitmentScheme, Params, Prover, Verifier}, query::{ProverQuery, VerifierQuery}, strategy::VerificationStrategy, EvaluationDomain, }; - use crate::poly::{Coeff, Polynomial}; use crate::transcript::{ - self, Blake2bRead, Blake2bWrite, Challenge255, EncodedChallenge, Keccak256Read, - Keccak256Write, TranscriptRead, TranscriptReadBuffer, TranscriptWrite, - TranscriptWriterBuffer, + Blake2bRead, Blake2bWrite, Challenge255, EncodedChallenge, Keccak256Read, Keccak256Write, + TranscriptReadBuffer, TranscriptWriterBuffer, }; - use ff::{Field, PrimeField, WithSmallOrderMulGroup}; - use group::{Curve, Group}; - use halo2curves::CurveAffine; - use rand_core::{OsRng, RngCore}; - use std::io::{Read, Write}; + use ff::WithSmallOrderMulGroup; + use group::Curve; + use rand_core::OsRng; #[test] fn test_roundtrip_ipa() { use crate::poly::ipa::commitment::{IPACommitmentScheme, ParamsIPA}; use crate::poly::ipa::multiopen::{ProverIPA, VerifierIPA}; use crate::poly::ipa::strategy::AccumulatorStrategy; - use halo2curves::pasta::{Ep, EqAffine, Fp}; + use halo2curves::pasta::EqAffine; const K: u32 = 4; @@ -65,7 +60,7 @@ mod test { use crate::poly::ipa::commitment::{IPACommitmentScheme, ParamsIPA}; use crate::poly::ipa::multiopen::{ProverIPA, VerifierIPA}; use crate::poly::ipa::strategy::AccumulatorStrategy; - use halo2curves::pasta::{Ep, EqAffine, Fp}; + use halo2curves::pasta::EqAffine; const K: u32 = 4; @@ -102,8 +97,7 @@ mod test { use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG}; use crate::poly::kzg::multiopen::{ProverGWC, VerifierGWC}; use crate::poly::kzg::strategy::AccumulatorStrategy; - use halo2curves::bn256::{Bn256, G1Affine}; - use pairing::Engine; + use halo2curves::bn256::Bn256; const K: u32 = 4; @@ -134,8 +128,7 @@ mod test { use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG}; use crate::poly::kzg::multiopen::{ProverSHPLONK, VerifierSHPLONK}; use crate::poly::kzg::strategy::AccumulatorStrategy; - use halo2curves::bn256::{Bn256, G1Affine}; - use pairing::Engine; + use halo2curves::bn256::Bn256; const K: u32 = 4; diff --git a/halo2_proofs/src/poly/query.rs b/halo2_proofs/src/poly/query.rs index c596e6a71c..b9894edd38 100644 --- a/halo2_proofs/src/poly/query.rs +++ b/halo2_proofs/src/poly/query.rs @@ -1,11 +1,10 @@ -use std::{fmt::Debug, ops::Deref}; +use std::fmt::Debug; -use super::commitment::{Blind, CommitmentScheme, Params, MSM}; +use super::commitment::{Blind, MSM}; use crate::{ arithmetic::eval_polynomial, - poly::{commitment, Coeff, Polynomial}, + poly::{Coeff, Polynomial}, }; -use ff::Field; use halo2curves::CurveAffine; pub trait Query: Sized + Clone + Send + Sync { @@ -100,6 +99,7 @@ impl<'com, C: CurveAffine, M: MSM> Clone for VerifierQuery<'com, C, M> { } } +#[allow(clippy::upper_case_acronyms)] #[derive(Clone, Debug)] pub enum CommitmentReference<'r, C: CurveAffine, M: MSM> { Commitment(&'r C), diff --git a/halo2_proofs/src/poly/strategy.rs b/halo2_proofs/src/poly/strategy.rs index 36480d372f..850f95e6c9 100644 --- a/halo2_proofs/src/poly/strategy.rs +++ b/halo2_proofs/src/poly/strategy.rs @@ -1,11 +1,5 @@ -use halo2curves::CurveAffine; -use rand_core::RngCore; - -use super::commitment::{CommitmentScheme, Verifier, MSM}; -use crate::{ - plonk::Error, - transcript::{EncodedChallenge, TranscriptRead}, -}; +use super::commitment::{CommitmentScheme, Verifier}; +use crate::plonk::Error; /// Guards is unfinished verification result. Implement this to construct various /// verification strategies such as aggregation and recursion.