From 0008b866c1b458bf7129fcd88e877d5b58c25e2f Mon Sep 17 00:00:00 2001 From: zhenfei Date: Fri, 12 Jan 2024 06:36:03 -0500 Subject: [PATCH] [wip] Sync halo2 lib 0.4.0 (#80) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: call synthesize in `MockProver` multiple times to behave same as real prover * modify previous commit * Expose mod `permutation` and re-export `permutation::keygen::Assembly` (#149) * feat: expose mod ule `permutation` and re-export `permutation::keygen::Assembly` * feat: derive `lone` for `permutation::keygen::Assembly` * feat: bump MSRV for `inferno` * change: Migrate workspace to pasta_curves-0.5 (#157) * change: Migrate workspace to pasta_curves-0.5 This ports the majority of the workspace to the `pasta_curves-0.5.0` leaving some tricky edge-cases that we need to handle carefully. Resolves: #132 * fix: Complete latest trait bounds to compile halo2proofs * change: Migrate examples & benches to pasta 0.5 * change: Migrate halo2_gadgets to pasta-0.5 * change: Update gadgets outdated code with latest upstream * fix: Sha3 gadget circuit * fix: doc tests * chore: Update merged main * fix: Apply review suggestions * fix previous commit * Extend Circuit trait to take parameters in config (#168) * Extend Circuit trait to take parameters in config The Circuit trait is extended with the following: ``` pub trait Circuit { /// [...] type Params: Default; fn params(&self) -> Self::Params { Self::Params::default() } fn configure_with_params(meta: &mut ConstraintSystem, params: &Self::Params) -> Self::Config { Self::configure(meta) } fn configure(meta: &mut ConstraintSystem) -> Self::Config; } ``` This allows runtime parametrization of the circuit configuration. The extension to the Circuit trait has been designed to minimize the breaking change: existing circuits only need to define the associated `type Params`. Unfortunately "Associated type defaults" are unstable in Rust, otherwise this would be a non-breaking change. See https://github.com/rust-lang/rust/issues/29661 * Implement circuit params under feature flag * Don't overwrite configure method * Fix doc test * Allow halo2 constraint names to have non static names (#156) * static ref to String type in Gates, Constraints, VirtualCell, Argument * 'lookup'.to_string() * return &str for gate name and constriant_name, also run fmt * Update halo2_gadgets/Cargo.toml Co-authored-by: Han * upgrade rust-toochain --------- Co-authored-by: Carlos Pérez <37264926+CPerezz@users.noreply.github.com> Co-authored-by: Han * Improve halo2 query calls (#154) * return expression from cell * add example * selector * recurse Expression to fill in index * minimized changes from the original * backword compatible meta.query_X & challange.expr() * cargo fmt * fixed lookup to pass all tests * Update comments Co-authored-by: Brecht Devos * Update comments Co-authored-by: Brecht Devos * Update comments Co-authored-by: Brecht Devos * Update comments Co-authored-by: Brecht Devos * Update comments Co-authored-by: Brecht Devos * Update comments Co-authored-by: Brecht Devos * update Co-authored-by: Brecht Devos * add primitives.rs back * remove example2 * backward compatible meta.query_X & Column.cur(), next(), prev(), at(usize) * impl Debug & make side effects only when query.index.is_none() * change impl Debug for Expression instead & revert test in plonk_api * upgrade rust-toolchain * Update halo2_proofs/src/plonk/circuit.rs Co-authored-by: Han * Update halo2_proofs/src/plonk/circuit.rs Co-authored-by: Han * ran clippy * Update halo2_proofs/src/plonk/circuit.rs Co-authored-by: Han --------- Co-authored-by: Brecht Devos Co-authored-by: Han * Implement Clone trait for Hash, Absorbing, and Sponge structs (#171) * fix: Fix serialization for VerifyingKey (#178) Now the value returned when the number of selectors is a multiple of 8 is correct. Resolves: #175 * Add more getters to expose internal fields * add a constructor (#164) * add a constructor * add more comment * fix as review * remove clone * remove * no need to use new variable * change comment * fix clippy * rename to from_parts * remove n declaration * feat: send sync region (#180) * feat: send / sync region * Update layout.rs * update * lol * debug * Update keygen.rs * Update keygen.rs * Update keygen.rs * Update keygen.rs * thread-safe-region feature flag * cleanup * patch dev-graph * patch non-determinism in mapping creation * reduce mem usage for vk and pk * mock proving examples * swap for hashmap for insertion speed * reduce update overhead * replace BTree with Vec * add benchmarks * make the benchmarks massive * patch clippy * simplify lifetimes * patch benches * Update halo2_proofs/src/plonk/permutation/keygen.rs Co-authored-by: Han * Update halo2_proofs/examples/vector-mul.rs Co-authored-by: Han * rm benches * order once * patch lints --------- Co-authored-by: Han * fix previous commit * Fix `parallelize` workload imbalance (#186) * fix parallelize workload imbalance * remove the need of unsafe * Updates halo2_curves dependency to released package (#190) THe package release ressets the version from those inherited by the legacy halo2curves repo's fork history. The upstream diff is: https://github.com/privacy-scaling-explorations/halo2curves/compare/9f5c50810bbefe779ee5cf1d852b2fe85dc35d5e..9a7f726fa74c8765bc7cdab11519cf285d169ecf * fix: explicitly define mds diff type (#196) * fix: explicitly define mds diff type * rm paren * feat: expose `transcript_repr` of `VerifyingKey` and reduce the trait constraint (#200) * implement native shuffle argument and api fix: remove nonsense comment strictly check shuffle rows address doc typos move compression into product commitment typo add shuffle errors for `verify_at_rows_par` dedup expression evaluation cargo fmt fix fields in sanity-checks feature * feat: public cells to allow for implementations of custom `Layouter` (#192) * feat: public cells * Update mds.rs * Update mds.rs * Update single_pass.rs Co-authored-by: Han * bump toolchain to resolve errors * fix clippy errors for CI run * rustfmt post clippy * plz let it be the last lint * patch clippy lints in gadgets * clippy lints for sha256 bench * patch halo2proof benches * Update assigned.rs * Update halo2_gadgets/src/poseidon/primitives/mds.rs Co-authored-by: Han * Update halo2_gadgets/src/poseidon/primitives/mds.rs Co-authored-by: Han --------- Co-authored-by: Han * Synchronize with upstream (#199) * refactor: add default impl for `SyncDeps` for backward compatability * feat: pick changes from https://github.com/zcash/halo2/pull/728 and changes of flag `test-dev-graph` * feat: pick changes from https://github.com/zcash/halo2/pull/622 * feat: pick changes about mod `circuit` and mod `dev` * feat: pick rest changes of `halo2_proofs` * fix: when `--no-default-features` * ci: sync from upstream, and deduplicate jobs when push to `main`, and remove always failing job `codecov`. * fix: make `commit_zk` runnable when `--no-default-features` * chore: Update rust-toolchain to 1.66 for testing (#208) * chore: Update rust-toolchain to 1.66 for testing Note that tests will not compile due to the silent MSRV bump in `blake2b_simd`. Hence, we need to use `1.66` as toolchain. Resolves: #207 * change: UIpdate MSRVs in Cargo.toml * fix: clippy (#203) * fix: clippy * fmt * fix: Final clippy complains & adjustments --------- Co-authored-by: CPerezz * Implement Sum and Product for Expression (#209) * Make it Eq to make it easier for tests * Implement Sum and Product for Expression * Make it readable * chore: update poseidon dependency * fix: compiling bug with feautes=parallel_syn * feat(MockProver): replace errors by asserts(privacy-scaling-explorations#150) * boundary offset lost when resolving conflict * disable multiphase prover * Sync halo2 lib 0.4.0 merging (#81) * Use thread pool for assign_regions (#57) * feat: use rayon threadpool * feat: add UT for many subregions * refact: move common struct out to module level * refact: reuse common configure code * fix ci errors --------- Co-authored-by: kunxian xia * Move `env_logger` dependency to dev-depdendencies (only for test). (#69) * sync ff/group 0.13 * fix clippy * fix clippy * fmg * [FEAT] Upgrading table16 for SHA256 (#73) * upgrade sha256 * fix clippy * Bus auto (#72) * bus: expose global offset of regions * bus-auto: add query_advice and query_fixed function in witness generation * bus-auto: fix clippy --------- Co-authored-by: Aurélien Nicolas * fix-tob-scroll-21 (#59) * fix-tob-scroll-21 * expose param field for re-randomization * enable accessing for table16 (#75) * chore: update poseidon link * merge sha256 gadget changes * Fix the CI errors (#78) * cargo fmt * fix clippy error * Feat: switch to logup scheme for lookup argument (#71) * Multi-input mv-lookup. (#49) * Add mv_lookup.rs * mv_lookup::prover, mv_lookup::verifier * Replace lookup with mv_lookup * replace halo2 with mv lookup Co-authored-by: ying tong * cleanups Co-authored-by: ying tong * ConstraintSystem: setup lookup_tracker Co-authored-by: Andrija * mv_lookup::hybrid_prover Co-authored-by: Andrija * WIP * mv_multi_lookup: enable lookup caching Co-authored-by: therealyingtong * Rename hybrid_lookup -> lookup * Chunk lookups using user-provided minimum degree Co-authored-by: Andrija * mv_lookup bench Co-authored-by: Andrija * Introduce counter feature for FFTs and MSMs Co-authored-by: Andrija * Fix off-by-one errors in chunk_lookup Co-authored-by: Andrija * bench wip * time evaluate_h * KZG * more efficient batch inversion * extended lookup example * Finalize mv lookup Author: therealyingtong * Remove main/ * Fix according to the comments * replace scan with parallel grand sum computation * Revert Cargo.lock * mv lookup Argument name * parallel batch invert --------- Co-authored-by: Andrija Co-authored-by: ying tong Co-authored-by: therealyingtong * fmt * fix unit test * fix clippy errors * add todo in mv_lookup's prover * fmt and clippy * fix clippy * add detailed running time of steps in logup's prover * fmt * add more log hooks * more running time logs * use par invert * use sorted-vector to store how many times a table element occurs in input * par the process to get inputs_inv_sum * use par * fix par * add feature to skip inv sums * add new feature flag * fix clippy error --------- Co-authored-by: Sphere L Co-authored-by: Andrija Co-authored-by: ying tong Co-authored-by: therealyingtong * fix some simple building errs * upgrade pathfinder_simd to newer version as it can't compile on mac m1 pro * resolve merge conflict * fmt * clippy * more clippy fix * more lint fix * fmt * minor syntax fix * fix ipa multiopen test failure * fix clippy warning * fmt * fix par scan of log_inv diff * remove uncessary clone --------- Co-authored-by: alannotnerd Co-authored-by: kunxian xia Co-authored-by: Steven Co-authored-by: Carlos Pérez <37264926+CPerezz@users.noreply.github.com> Co-authored-by: zhenfei Co-authored-by: Ho Co-authored-by: naure Co-authored-by: Aurélien Nicolas Co-authored-by: Sphere L Co-authored-by: Andrija Co-authored-by: ying tong Co-authored-by: therealyingtong --------- Co-authored-by: han0110 Co-authored-by: Velaciela Co-authored-by: Carlos Pérez <37264926+CPerezz@users.noreply.github.com> Co-authored-by: Eduard S Co-authored-by: CeciliaZ030 <45245961+CeciliaZ030@users.noreply.github.com> Co-authored-by: Brecht Devos Co-authored-by: Enrico Bottazzi <85900164+enricobottazzi@users.noreply.github.com> Co-authored-by: Ethan-000 Co-authored-by: dante <45801863+alexander-camuto@users.noreply.github.com> Co-authored-by: Mamy Ratsimbazafy Co-authored-by: François Garillot <4142+huitseeker@users.noreply.github.com> Co-authored-by: kilic Co-authored-by: Thor <7041313+thor314@users.noreply.github.com> Co-authored-by: CPerezz Co-authored-by: chokermaxx <135603985+chokermaxx@users.noreply.github.com> Co-authored-by: Zhang Zhuo Co-authored-by: alannotnerd Co-authored-by: kunxian xia Co-authored-by: Steven Co-authored-by: Ho Co-authored-by: naure Co-authored-by: Aurélien Nicolas Co-authored-by: Sphere L Co-authored-by: Andrija Co-authored-by: ying tong Co-authored-by: therealyingtong --- .github/workflows/ci.yml | 79 +- .github/workflows/ci_main.yml | 106 --- Cargo.lock | 138 +--- README.md | 6 +- halo2/Cargo.toml | 2 +- halo2_gadgets/Cargo.toml | 19 +- halo2_gadgets/benches/poseidon.rs | 4 +- halo2_gadgets/benches/sha256.rs | 23 +- halo2_gadgets/src/ecc.rs | 4 +- halo2_gadgets/src/ecc/chip/constants.rs | 14 +- halo2_gadgets/src/ecc/chip/mul.rs | 6 +- halo2_gadgets/src/ecc/chip/mul_fixed.rs | 8 +- halo2_gadgets/src/ecc/chip/mul_fixed/short.rs | 2 + halo2_gadgets/src/poseidon.rs | 4 +- halo2_gadgets/src/poseidon/pow5.rs | 51 +- halo2_gadgets/src/poseidon/primitives.rs | 10 +- .../src/poseidon/primitives/grain.rs | 2 +- halo2_gadgets/src/poseidon/primitives/mds.rs | 3 +- .../src/poseidon/primitives/p128pow5t3.rs | 6 +- halo2_gadgets/src/sha256/table16.rs | 4 +- .../src/sha256/table16/compression.rs | 2 + .../src/sha256/table16/message_schedule.rs | 2 + .../src/sha256/table16/spread_table.rs | 2 + halo2_gadgets/src/sinsemilla.rs | 13 +- halo2_gadgets/src/sinsemilla/merkle.rs | 4 +- halo2_gadgets/src/utilities.rs | 11 +- halo2_gadgets/src/utilities/cond_swap.rs | 2 + .../src/utilities/decompose_running_sum.rs | 2 + .../src/utilities/lookup_range_check.rs | 16 +- halo2_proofs/Cargo.toml | 31 +- halo2_proofs/README.md | 6 +- halo2_proofs/benches/commit_zk.rs | 80 ++ halo2_proofs/benches/dev_lookup.rs | 2 + halo2_proofs/benches/lookups.rs | 5 +- halo2_proofs/benches/plonk.rs | 2 + halo2_proofs/examples/circuit-layout.rs | 2 + halo2_proofs/examples/serialization.rs | 12 +- halo2_proofs/examples/shuffle.rs | 30 +- halo2_proofs/examples/shuffle_api.rs | 216 +++++ halo2_proofs/examples/simple-example.rs | 2 + halo2_proofs/examples/two-chip.rs | 2 + halo2_proofs/examples/vector-mul.rs | 350 ++++++++ halo2_proofs/src/arithmetic.rs | 95 ++- halo2_proofs/src/circuit.rs | 24 +- .../src/circuit/floor_planner/single_pass.rs | 123 +-- halo2_proofs/src/circuit/floor_planner/v1.rs | 50 +- .../src/circuit/floor_planner/v1/strategy.rs | 21 +- halo2_proofs/src/circuit/layouter.rs | 54 +- halo2_proofs/src/circuit/table_layouter.rs | 413 ++++++++++ halo2_proofs/src/circuit/value.rs | 5 + halo2_proofs/src/dev.rs | 761 +++++++++++++----- halo2_proofs/src/dev/cost.rs | 239 +++++- halo2_proofs/src/dev/failure.rs | 265 +++++- halo2_proofs/src/dev/failure/emitter.rs | 2 +- halo2_proofs/src/dev/gates.rs | 25 +- halo2_proofs/src/dev/graph.rs | 3 + halo2_proofs/src/dev/graph/layout.rs | 221 +---- halo2_proofs/src/dev/metadata.rs | 62 +- halo2_proofs/src/dev/tfp.rs | 544 +++++++++++++ halo2_proofs/src/dev/util.rs | 19 +- halo2_proofs/src/helpers.rs | 7 +- halo2_proofs/src/lib.rs | 27 +- halo2_proofs/src/multicore.rs | 73 +- halo2_proofs/src/plonk.rs | 67 +- halo2_proofs/src/plonk/assigned.rs | 3 +- halo2_proofs/src/plonk/circuit.rs | 486 +++++++++-- .../src/plonk/circuit/compress_selectors.rs | 4 +- halo2_proofs/src/plonk/error.rs | 48 +- halo2_proofs/src/plonk/evaluation.rs | 154 +++- halo2_proofs/src/plonk/keygen.rs | 28 +- halo2_proofs/src/plonk/lookup.rs | 15 +- halo2_proofs/src/plonk/lookup/prover.rs | 7 +- halo2_proofs/src/plonk/lookup/verifier.rs | 9 +- halo2_proofs/src/plonk/mv_lookup.rs | 7 +- halo2_proofs/src/plonk/mv_lookup/prover.rs | 48 +- halo2_proofs/src/plonk/mv_lookup/verifier.rs | 10 +- halo2_proofs/src/plonk/permutation.rs | 8 +- halo2_proofs/src/plonk/permutation/keygen.rs | 466 ++++++++--- halo2_proofs/src/plonk/permutation/prover.rs | 4 +- .../src/plonk/permutation/verifier.rs | 3 +- halo2_proofs/src/plonk/prover.rs | 148 +++- halo2_proofs/src/plonk/shuffle.rs | 67 ++ halo2_proofs/src/plonk/shuffle/prover.rs | 250 ++++++ halo2_proofs/src/plonk/shuffle/verifier.rs | 138 ++++ halo2_proofs/src/plonk/vanishing/prover.rs | 3 +- halo2_proofs/src/plonk/verifier.rs | 181 +++-- halo2_proofs/src/plonk/verifier/batch.rs | 17 +- halo2_proofs/src/poly.rs | 5 +- halo2_proofs/src/poly/commitment.rs | 5 +- halo2_proofs/src/poly/domain.rs | 26 +- halo2_proofs/src/poly/ipa/commitment.rs | 24 +- .../src/poly/ipa/commitment/prover.rs | 8 +- .../src/poly/ipa/commitment/verifier.rs | 18 +- halo2_proofs/src/poly/ipa/msm.rs | 15 +- halo2_proofs/src/poly/ipa/multiopen.rs | 2 +- halo2_proofs/src/poly/ipa/multiopen/prover.rs | 8 +- .../src/poly/ipa/multiopen/verifier.rs | 30 +- halo2_proofs/src/poly/ipa/strategy.rs | 9 +- halo2_proofs/src/poly/kzg/commitment.rs | 59 +- halo2_proofs/src/poly/kzg/msm.rs | 6 +- halo2_proofs/src/poly/kzg/multiopen/gwc.rs | 15 +- .../src/poly/kzg/multiopen/gwc/prover.rs | 12 +- .../src/poly/kzg/multiopen/gwc/verifier.rs | 18 +- .../src/poly/kzg/multiopen/shplonk.rs | 33 +- .../src/poly/kzg/multiopen/shplonk/prover.rs | 155 ++-- .../poly/kzg/multiopen/shplonk/verifier.rs | 16 +- halo2_proofs/src/poly/kzg/strategy.rs | 16 +- halo2_proofs/src/poly/multiopen.rs | 8 +- halo2_proofs/src/poly/multiopen_test.rs | 27 +- halo2_proofs/src/poly/query.rs | 8 +- halo2_proofs/src/poly/strategy.rs | 10 +- halo2_proofs/src/transcript.rs | 20 +- halo2_proofs/src/transcript/blake2b.rs | 2 +- halo2_proofs/tests/plonk_api.rs | 14 +- rust-toolchain | 2 +- 115 files changed, 5234 insertions(+), 1826 deletions(-) delete mode 100644 .github/workflows/ci_main.yml create mode 100644 halo2_proofs/benches/commit_zk.rs create mode 100644 halo2_proofs/examples/shuffle_api.rs create mode 100644 halo2_proofs/examples/vector-mul.rs create mode 100644 halo2_proofs/src/circuit/table_layouter.rs create mode 100644 halo2_proofs/src/dev/tfp.rs create mode 100644 halo2_proofs/src/plonk/shuffle.rs create mode 100644 halo2_proofs/src/plonk/shuffle/prover.rs create mode 100644 halo2_proofs/src/plonk/shuffle/verifier.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 74ddd131fb..56efd9a3d8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,14 +1,25 @@ name: CI checks -on: [pull_request, push] +on: + merge_group: + pull_request: + push: + branches: + - main jobs: test: - name: Test on ${{ matrix.os }} + name: Test on ${{ matrix.os }} with ${{ matrix.feature_set }} features runs-on: ${{ matrix.os }} strategy: matrix: + feature_set: [basic, all] os: [ubuntu-latest, windows-latest, macOS-latest] + include: + - feature_set: basic + features: batch,dev-graph,gadget-traces + - feature_set: all + features: batch,dev-graph,gadget-traces,multicore,test-dev-graph,thread-safe-region,sanity-checks,circuit-params steps: - uses: actions/checkout@v3 @@ -19,8 +30,67 @@ jobs: uses: actions-rs/cargo@v1 with: command: test - args: --verbose --release --all --all-features - if: ${{ false }} + args: --verbose --release --workspace --no-default-features --features "${{ matrix.features }}" + + build: + name: Build target ${{ matrix.target }} + runs-on: ubuntu-latest + strategy: + matrix: + target: + - wasm32-unknown-unknown + - wasm32-wasi + + steps: + - uses: actions/checkout@v3 + - uses: actions-rs/toolchain@v1 + with: + override: false + - name: Add target + run: rustup target add ${{ matrix.target }} + - name: cargo build + uses: actions-rs/cargo@v1 + with: + command: build + args: --no-default-features --features batch,dev-graph,gadget-traces --target ${{ matrix.target }} + + bitrot: + name: Bitrot check + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - uses: actions-rs/toolchain@v1 + with: + override: false + # Build benchmarks to prevent bitrot + - name: Build benchmarks + uses: actions-rs/cargo@v1 + with: + command: build + args: --benches --examples --all-features + + doc-links: + name: Intra-doc links + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - uses: actions-rs/toolchain@v1 + with: + override: false + - name: cargo fetch + uses: actions-rs/cargo@v1 + with: + command: fetch + + # Ensure intra-documentation links all resolve correctly + # Requires #![deny(intra_doc_link_resolution_failure)] in crates. + - name: Check intra-doc links + uses: actions-rs/cargo@v1 + with: + command: doc + args: --all --document-private-items fmt: name: Rustfmt @@ -36,3 +106,4 @@ jobs: with: command: fmt args: --all -- --check + diff --git a/.github/workflows/ci_main.yml b/.github/workflows/ci_main.yml deleted file mode 100644 index 400bff09bd..0000000000 --- a/.github/workflows/ci_main.yml +++ /dev/null @@ -1,106 +0,0 @@ -name: CI checks main - -on: - push: - branches: - - main -jobs: - test: - name: Test on ${{ matrix.os }} - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macOS-latest] - - steps: - - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 - with: - override: false - - name: Run tests - uses: actions-rs/cargo@v1 - with: - command: test - args: --verbose --release --all --all-features - bitrot: - name: Bitrot check - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 - with: - override: false - # Build benchmarks to prevent bitrot - - name: Build benchmarks - uses: actions-rs/cargo@v1 - with: - command: build - args: --benches --examples --all-features - - codecov: - name: Code coverage - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - # Use stable for this to ensure that cargo-tarpaulin can be built. - - uses: actions-rs/toolchain@v1 - with: - override: false - - name: Install cargo-tarpaulin - uses: actions-rs/cargo@v1 - with: - command: install - args: cargo-tarpaulin - - name: Generate coverage report - uses: actions-rs/cargo@v1 - with: - command: tarpaulin - args: --all-features --timeout 600 --out Xml - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3.1.0 - - doc-links: - name: Intra-doc links - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 - with: - override: false - - name: cargo fetch - uses: actions-rs/cargo@v1 - with: - command: fetch - - # Ensure intra-documentation links all resolve correctly - # Requires #![deny(intra_doc_link_resolution_failure)] in crates. - - name: Check intra-doc links - uses: actions-rs/cargo@v1 - with: - command: doc - args: --all --document-private-items - - build: - name: Build target ${{ matrix.target }} - runs-on: ubuntu-latest - strategy: - matrix: - target: - - wasm32-unknown-unknown - - wasm32-wasi - - steps: - - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 - with: - override: false - - name: Add target - run: rustup target add ${{ matrix.target }} - - name: cargo build - uses: actions-rs/cargo@v1 - with: - command: build - args: --features dev-graph,gadget-traces,unstable --target ${{ matrix.target }} diff --git a/Cargo.lock b/Cargo.lock index 3a36d84c12..a25d8cd0f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -215,7 +215,7 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a" dependencies = [ - "rustc_version 0.4.0", + "rustc_version", ] [[package]] @@ -236,19 +236,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" -[[package]] -name = "chrono" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" -dependencies = [ - "libc", - "num-integer", - "num-traits", - "time", - "winapi", -] - [[package]] name = "clap" version = "2.34.0" @@ -789,16 +776,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "gif" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3a7187e78088aead22ceedeee99779455b23fc231fe13ec443f99bb71694e5b" -dependencies = [ - "color_quant", - "weezl", -] - [[package]] name = "gimli" version = "0.26.1" @@ -859,7 +836,7 @@ dependencies = [ "ff", "group", "halo2_proofs", - "halo2curves 0.3.2", + "halo2curves", "lazy_static", "plotters", "pprof", @@ -869,6 +846,12 @@ dependencies = [ "uint", ] +[[package]] +name = "halo2_legacy_pdqsort" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47716fe1ae67969c5e0b2ef826f32db8c3be72be325e1aa3c1951d06b5575ec5" + [[package]] name = "halo2_proofs" version = "0.2.0" @@ -885,14 +868,17 @@ dependencies = [ "getrandom", "group", "gumdrop", - "halo2curves 0.1.0", + "halo2_legacy_pdqsort", + "halo2curves", "lazy_static", "log", + "maybe-rayon", "num-bigint", "num-integer", "plotters", "poseidon", "proptest", + "rand_chacha", "rand_core", "rayon", "sha3", @@ -923,24 +909,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "halo2curves" -version = "0.3.2" -source = "git+https://github.com/privacy-scaling-explorations/halo2curves?tag=0.3.2#9f5c50810bbefe779ee5cf1d852b2fe85dc35d5e" -dependencies = [ - "ff", - "group", - "lazy_static", - "num-bigint", - "num-traits", - "pasta_curves", - "paste", - "rand", - "rand_core", - "static_assertions", - "subtle", -] - [[package]] name = "hashbrown" version = "0.11.2" @@ -1106,6 +1074,16 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "maybe-rayon" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea1f30cedd69f0a2954655f7188c6a834246d2bcf1e315e2ac40c4b24dc9519" +dependencies = [ + "cfg-if 1.0.0", + "rayon", +] + [[package]] name = "memchr" version = "2.5.0" @@ -1324,20 +1302,11 @@ dependencies = [ [[package]] name = "pathfinder_simd" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39fe46acc5503595e5949c17b818714d26fdf9b4920eacf3b2947f0199f4a6ff" -dependencies = [ - "rustc_version 0.3.3", -] - -[[package]] -name = "pest" -version = "2.1.3" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" +checksum = "0444332826c70dc47be74a7c6a5fc44e23a7905ad6858d4162b658320455ef93" dependencies = [ - "ucd-trie", + "rustc_version", ] [[package]] @@ -1358,9 +1327,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a" dependencies = [ - "chrono", "font-kit", - "image", "lazy_static", "num-traits", "pathfinder_geometry", @@ -1384,7 +1351,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21362fa905695e5618aefd169358f52e0e8bc4a8e05333cf780fda8cddc00b54" dependencies = [ - "gif", "image", "plotters-backend", ] @@ -1415,7 +1381,7 @@ name = "poseidon" version = "0.2.0" source = "git+https://github.com/scroll-tech/poseidon.git?branch=main#5787dd3d2ce7a9e9601a035c396ac0c03449b54d" dependencies = [ - "halo2curves 0.1.0", + "halo2curves", "subtle", ] @@ -1642,22 +1608,13 @@ version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" -[[package]] -name = "rustc_version" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" -dependencies = [ - "semver 0.11.0", -] - [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.7", + "semver", ] [[package]] @@ -1693,30 +1650,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "semver" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" -dependencies = [ - "semver-parser", -] - [[package]] name = "semver" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d65bd28f48be7196d222d95b9243287f48d27aca604e08497513019ff0502cc4" -[[package]] -name = "semver-parser" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" -dependencies = [ - "pest", -] - [[package]] name = "serde" version = "1.0.136" @@ -1945,17 +1884,6 @@ dependencies = [ "syn", ] -[[package]] -name = "time" -version = "0.1.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" -dependencies = [ - "libc", - "wasi", - "winapi", -] - [[package]] name = "tinytemplate" version = "1.2.1" @@ -2010,12 +1938,6 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" -[[package]] -name = "ucd-trie" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" - [[package]] name = "uint" version = "0.9.4" @@ -2142,12 +2064,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "weezl" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b77fdfd5a253be4ab714e4ffa3c49caf146b4de743e97510c0656cf90f1e8e" - [[package]] name = "winapi" version = "0.3.9" diff --git a/README.md b/README.md index 69167e0716..4de513c4b7 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ ## Minimum Supported Rust Version -Requires Rust **1.56.1** or higher. +Requires Rust **1.65.0** or higher. Minimum supported Rust version can be changed in the future, but it will be done with a minor version bump. @@ -14,6 +14,10 @@ minor version bump. `halo2` currently uses [rayon](https://github.com/rayon-rs/rayon) for parallel computation. The `RAYON_NUM_THREADS` environment variable can be used to set the number of threads. +You can disable `rayon` by disabling the `"multicore"` feature. +Warning! Halo2 will lose access to parallelism if you disable the `"multicore"` feature. +This will significantly degrade performance. + ## License Licensed under either of diff --git a/halo2/Cargo.toml b/halo2/Cargo.toml index 7a6bbaa94f..ba934102b6 100644 --- a/halo2/Cargo.toml +++ b/halo2/Cargo.toml @@ -19,7 +19,7 @@ all-features = true rustdoc-args = ["--cfg", "docsrs", "--html-in-header", "katex-header.html"] [dependencies] -halo2_proofs = { version = "0.2", path = "../halo2_proofs" } +halo2_proofs = { version = "0.2", path = "../halo2_proofs", default-features = false } [lib] bench = false diff --git a/halo2_gadgets/Cargo.toml b/halo2_gadgets/Cargo.toml index 136ef63a16..7281b4293b 100644 --- a/halo2_gadgets/Cargo.toml +++ b/halo2_gadgets/Cargo.toml @@ -9,7 +9,7 @@ authors = [ "Kris Nuttycombe ", ] edition = "2021" -rust-version = "1.56.1" +rust-version = "1.66.0" description = "Reusable gadgets and chip implementations for Halo 2" license = "MIT OR Apache-2.0" repository = "https://github.com/zcash/halo2" @@ -26,16 +26,16 @@ arrayvec = "0.7.0" bitvec = "1" ff = { version = "0.13", features = ["bits"] } group = "0.13" -halo2_proofs = { version = "0.2", path = "../halo2_proofs" } +halo2_proofs = { version = "0.2", path = "../halo2_proofs", default-features = false } lazy_static = "1" -halo2curves = { git = 'https://github.com/privacy-scaling-explorations/halo2curves', tag = "0.3.2" } +halo2curves = { version = "0.1.0" } proptest = { version = "1.0.0", optional = true } rand = "0.8" subtle = "2.3" -uint = "0.9.2" # MSRV 1.56.1 +uint = "0.9.2" # Developer tooling dependencies -plotters = { version = "0.3.0", optional = true } +plotters = { version = "0.3.0", default-features = false, optional = true } [dev-dependencies] criterion = "0.3" @@ -48,7 +48,14 @@ pprof = { version = "0.8", features = ["criterion", "flamegraph"] } # MSRV 1.56 bench = false [features] -dev-graph = ["halo2_proofs/dev-graph", "plotters"] +test-dev-graph = [ + "halo2_proofs/dev-graph", + "plotters", + "plotters/bitmap_backend", + "plotters/bitmap_encoder", + "plotters/ttf", +] +circuit-params = ["halo2_proofs/circuit-params"] test-dependencies = ["proptest"] unstable = [] diff --git a/halo2_gadgets/benches/poseidon.rs b/halo2_gadgets/benches/poseidon.rs index 4494e77a0f..4175318399 100644 --- a/halo2_gadgets/benches/poseidon.rs +++ b/halo2_gadgets/benches/poseidon.rs @@ -53,6 +53,8 @@ where { type Config = MyConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { Self { @@ -133,7 +135,7 @@ impl Spec for MySpec Fp { - val.pow_vartime(&[5]) + val.pow_vartime([5]) } fn secure_mds() -> usize { diff --git a/halo2_gadgets/benches/sha256.rs b/halo2_gadgets/benches/sha256.rs index cb5b5c3194..b1b8e0a2bd 100644 --- a/halo2_gadgets/benches/sha256.rs +++ b/halo2_gadgets/benches/sha256.rs @@ -8,7 +8,7 @@ use halo2curves::pasta::{pallas, EqAffine}; use rand::rngs::OsRng; use std::{ - fs::File, + fs::{create_dir_all, File}, io::{prelude::*, BufReader}, path::Path, }; @@ -40,6 +40,8 @@ fn bench(name: &str, k: u32, c: &mut Criterion) { impl Circuit for MyCircuit { type Config = Table16Config; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { Self::default() @@ -89,20 +91,23 @@ fn bench(name: &str, k: u32, c: &mut Criterion) { } } + // Create parent directory for assets + create_dir_all("./benches/sha256_assets").expect("Failed to create sha256_assets directory"); + // Initialize the polynomial commitment parameters let params_path = Path::new("./benches/sha256_assets/sha256_params"); - if File::open(¶ms_path).is_err() { + if File::open(params_path).is_err() { let params: ParamsIPA = ParamsIPA::new(k); let mut buf = Vec::new(); params.write(&mut buf).expect("Failed to write params"); - let mut file = File::create(¶ms_path).expect("Failed to create sha256_params"); + let mut file = File::create(params_path).expect("Failed to create sha256_params"); file.write_all(&buf[..]) .expect("Failed to write params to file"); } - let params_fs = File::open(¶ms_path).expect("couldn't load sha256_params"); + let params_fs = File::open(params_path).expect("couldn't load sha256_params"); let params: ParamsIPA = ParamsIPA::read::<_>(&mut BufReader::new(params_fs)).expect("Failed to read params"); @@ -129,23 +134,23 @@ fn bench(name: &str, k: u32, c: &mut Criterion) { // Create a proof let proof_path = Path::new("./benches/sha256_assets/sha256_proof"); - if File::open(&proof_path).is_err() { + if File::open(proof_path).is_err() { let mut transcript = Blake2bWrite::<_, _, Challenge255<_>>::init(vec![]); create_proof::, ProverIPA<_>, _, _, _, _>( ¶ms, &pk, &[circuit], - &[], + &[&[]], OsRng, &mut transcript, ) .expect("proof generation should not fail"); let proof: Vec = transcript.finalize(); - let mut file = File::create(&proof_path).expect("Failed to create sha256_proof"); + let mut file = File::create(proof_path).expect("Failed to create sha256_proof"); file.write_all(&proof[..]).expect("Failed to write proof"); } - let mut proof_fs = File::open(&proof_path).expect("couldn't load sha256_proof"); + let mut proof_fs = File::open(proof_path).expect("couldn't load sha256_proof"); let mut proof = Vec::::new(); proof_fs .read_to_end(&mut proof) @@ -160,7 +165,7 @@ fn bench(name: &str, k: u32, c: &mut Criterion) { ¶ms, pk.get_vk(), strategy, - &[], + &[&[]], &mut transcript, ) .unwrap(); diff --git a/halo2_gadgets/src/ecc.rs b/halo2_gadgets/src/ecc.rs index 08f34b15f2..8d71db8580 100644 --- a/halo2_gadgets/src/ecc.rs +++ b/halo2_gadgets/src/ecc.rs @@ -731,6 +731,8 @@ pub(crate) mod tests { impl Circuit for MyCircuit { type Config = EccConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { MyCircuit { test_errors: false } @@ -901,7 +903,7 @@ pub(crate) mod tests { assert_eq!(prover.verify(), Ok(())) } - #[cfg(feature = "dev-graph")] + #[cfg(feature = "test-dev-graph")] #[test] fn print_ecc_chip() { use plotters::prelude::*; diff --git a/halo2_gadgets/src/ecc/chip/constants.rs b/halo2_gadgets/src/ecc/chip/constants.rs index 6296165464..90a989b9af 100644 --- a/halo2_gadgets/src/ecc/chip/constants.rs +++ b/halo2_gadgets/src/ecc/chip/constants.rs @@ -48,8 +48,8 @@ fn compute_window_table(base: C, num_windows: usize) -> Vec<[C; (0..H) .map(|k| { // scalar = (k+2)*(8^w) - let scalar = C::Scalar::from(k as u64 + 2) - * C::Scalar::from(H as u64).pow(&[w as u64, 0, 0, 0]); + let scalar = + C::Scalar::from(k as u64 + 2) * C::Scalar::from(H as u64).pow([w as u64]); (base * scalar).to_affine() }) .collect::>() @@ -62,14 +62,14 @@ fn compute_window_table(base: C, num_windows: usize) -> Vec<[C; // For the last window, we compute [k * (2^3)^w - sum]B, where sum is defined // as sum = \sum_{j = 0}^{`num_windows - 2`} 2^{3j+1} let sum = (0..(num_windows - 1)).fold(C::Scalar::ZERO, |acc, j| { - acc + C::Scalar::from(2).pow(&[FIXED_BASE_WINDOW_SIZE as u64 * j as u64 + 1, 0, 0, 0]) + acc + C::Scalar::from(2).pow([FIXED_BASE_WINDOW_SIZE as u64 * j as u64 + 1]) }); window_table.push( (0..H) .map(|k| { // scalar = k * (2^3)^w - sum, where w = `num_windows - 1` let scalar = C::Scalar::from(k as u64) - * C::Scalar::from(H as u64).pow(&[(num_windows - 1) as u64, 0, 0, 0]) + * C::Scalar::from(H as u64).pow([(num_windows - 1) as u64]) - sum; (base * scalar).to_affine() }) @@ -197,7 +197,7 @@ pub fn test_lagrange_coeffs(base: C, num_windows: usize) { // Compute the actual x-coordinate of the multiple [(k+2)*(8^w)]B. let point = base * C::Scalar::from(bits as u64 + 2) - * C::Scalar::from(H as u64).pow(&[idx as u64, 0, 0, 0]); + * C::Scalar::from(H as u64).pow([idx as u64]); let x = *point.to_affine().coordinates().unwrap().x(); // Check that the interpolated x-coordinate matches the actual one. @@ -214,10 +214,10 @@ pub fn test_lagrange_coeffs(base: C, num_windows: usize) { // Compute the actual x-coordinate of the multiple [k * (8^84) - offset]B, // where offset = \sum_{j = 0}^{83} 2^{3j+1} let offset = (0..(num_windows - 1)).fold(C::Scalar::ZERO, |acc, w| { - acc + C::Scalar::from(2).pow(&[FIXED_BASE_WINDOW_SIZE as u64 * w as u64 + 1, 0, 0, 0]) + acc + C::Scalar::from(2).pow([FIXED_BASE_WINDOW_SIZE as u64 * w as u64 + 1]) }); let scalar = C::Scalar::from(bits as u64) - * C::Scalar::from(H as u64).pow(&[(num_windows - 1) as u64, 0, 0, 0]) + * C::Scalar::from(H as u64).pow([(num_windows - 1) as u64]) - offset; let point = base * scalar; let x = *point.to_affine().coordinates().unwrap().x(); diff --git a/halo2_gadgets/src/ecc/chip/mul.rs b/halo2_gadgets/src/ecc/chip/mul.rs index 7896bf58bd..9499191262 100644 --- a/halo2_gadgets/src/ecc/chip/mul.rs +++ b/halo2_gadgets/src/ecc/chip/mul.rs @@ -277,9 +277,9 @@ impl Config { let zs = { let mut zs = std::iter::empty() .chain(Some(z_init)) - .chain(zs_incomplete_hi.into_iter()) - .chain(zs_incomplete_lo.into_iter()) - .chain(zs_complete.into_iter()) + .chain(zs_incomplete_hi) + .chain(zs_incomplete_lo) + .chain(zs_complete) .chain(Some(z_0)) .collect::>(); assert_eq!(zs.len(), pallas::Scalar::NUM_BITS as usize + 1); diff --git a/halo2_gadgets/src/ecc/chip/mul_fixed.rs b/halo2_gadgets/src/ecc/chip/mul_fixed.rs index 0005a108bb..ce478fdb86 100644 --- a/halo2_gadgets/src/ecc/chip/mul_fixed.rs +++ b/halo2_gadgets/src/ecc/chip/mul_fixed.rs @@ -372,7 +372,7 @@ impl> Config { base: &F, ) -> Result { // `scalar = [(k_w + 2) ⋅ 8^w] - let scalar = k.map(|k| (k + *TWO_SCALAR) * (*H_SCALAR).pow(&[w as u64, 0, 0, 0])); + let scalar = k.map(|k| (k + *TWO_SCALAR) * (*H_SCALAR).pow([w as u64])); self.process_window::<_, NUM_WINDOWS>(region, offset, w, k_usize, scalar, base) } @@ -389,12 +389,12 @@ impl> Config { // offset_acc = \sum_{j = 0}^{NUM_WINDOWS - 2} 2^{FIXED_BASE_WINDOW_SIZE*j + 1} let offset_acc = (0..(NUM_WINDOWS - 1)).fold(pallas::Scalar::zero(), |acc, w| { - acc + (*TWO_SCALAR).pow(&[FIXED_BASE_WINDOW_SIZE as u64 * w as u64 + 1, 0, 0, 0]) + acc + (*TWO_SCALAR).pow([FIXED_BASE_WINDOW_SIZE as u64 * w as u64 + 1]) }); // `scalar = [k * 8^(NUM_WINDOWS - 1) - offset_acc]`. let scalar = scalar.windows_field()[scalar.windows_field().len() - 1] - .map(|k| k * (*H_SCALAR).pow(&[(NUM_WINDOWS - 1) as u64, 0, 0, 0]) - offset_acc); + .map(|k| k * (*H_SCALAR).pow([(NUM_WINDOWS - 1) as u64]) - offset_acc); self.process_window::<_, NUM_WINDOWS>( region, @@ -490,7 +490,7 @@ impl ScalarFixed { .by_vals() .take(FIXED_BASE_WINDOW_SIZE) .rev() - .fold(0, |acc, b| 2 * acc + if b { 1 } else { 0 }) + .fold(0, |acc, b| 2 * acc + usize::from(b)) }) }) .collect::>() diff --git a/halo2_gadgets/src/ecc/chip/mul_fixed/short.rs b/halo2_gadgets/src/ecc/chip/mul_fixed/short.rs index 4c92becb86..42363baa11 100644 --- a/halo2_gadgets/src/ecc/chip/mul_fixed/short.rs +++ b/halo2_gadgets/src/ecc/chip/mul_fixed/short.rs @@ -434,6 +434,8 @@ pub mod tests { impl Circuit for MyCircuit { type Config = EccConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { Self::default() diff --git a/halo2_gadgets/src/poseidon.rs b/halo2_gadgets/src/poseidon.rs index ffd745ad6d..bfd78f3dee 100644 --- a/halo2_gadgets/src/poseidon.rs +++ b/halo2_gadgets/src/poseidon.rs @@ -157,7 +157,7 @@ impl< .unwrap(), ), state, - _marker: PhantomData::default(), + _marker: PhantomData, }) } @@ -204,7 +204,7 @@ impl< chip: self.chip, mode, state: self.state, - _marker: PhantomData::default(), + _marker: PhantomData, }) } } diff --git a/halo2_gadgets/src/poseidon/pow5.rs b/halo2_gadgets/src/poseidon/pow5.rs index 52b0d27312..51c1f059ca 100644 --- a/halo2_gadgets/src/poseidon/pow5.rs +++ b/halo2_gadgets/src/poseidon/pow5.rs @@ -240,30 +240,25 @@ impl, const WIDTH: usize, const RATE: usize> // Load the initial state into this region. let state = Pow5State::load(&mut region, config, initial_state)?; - let state = (0..config.half_full_rounds).fold(Ok(state), |res, r| { - res.and_then(|state| state.full_round(&mut region, config, r, r)) + let state = (0..config.half_full_rounds) + .try_fold(state, |res, r| res.full_round(&mut region, config, r, r))?; + + let state = (0..config.half_partial_rounds).try_fold(state, |res, r| { + res.partial_round( + &mut region, + config, + config.half_full_rounds + 2 * r, + config.half_full_rounds + r, + ) })?; - let state = (0..config.half_partial_rounds).fold(Ok(state), |res, r| { - res.and_then(|state| { - state.partial_round( - &mut region, - config, - config.half_full_rounds + 2 * r, - config.half_full_rounds + r, - ) - }) - })?; - - let state = (0..config.half_full_rounds).fold(Ok(state), |res, r| { - res.and_then(|state| { - state.full_round( - &mut region, - config, - config.half_full_rounds + 2 * config.half_partial_rounds + r, - config.half_full_rounds + config.half_partial_rounds + r, - ) - }) + let state = (0..config.half_full_rounds).try_fold(state, |res, r| { + res.full_round( + &mut region, + config, + config.half_full_rounds + 2 * config.half_partial_rounds + r, + config.half_full_rounds + config.half_partial_rounds + r, + ) })?; Ok(state.0) @@ -444,7 +439,7 @@ impl Pow5State { .value() .map(|v| *v + config.round_constants[round][idx]) }); - let r: Value> = q.map(|q| q.map(|q| q.pow(&config.alpha))).collect(); + let r: Value> = q.map(|q| q.map(|q| q.pow(config.alpha))).collect(); let m = &config.m_reg; let state = m.iter().map(|m_i| { r.as_ref().map(|r| { @@ -470,7 +465,7 @@ impl Pow5State { let p: Value> = self.0.iter().map(|word| word.0.value().cloned()).collect(); let r: Value> = p.map(|p| { - let r_0 = (p[0] + config.round_constants[round][0]).pow(&config.alpha); + let r_0 = (p[0] + config.round_constants[round][0]).pow(config.alpha); let r_i = p[1..] .iter() .enumerate() @@ -510,7 +505,7 @@ impl Pow5State { } let r_mid: Value> = p_mid.map(|p| { - let r_0 = (p[0] + config.round_constants[round + 1][0]).pow(&config.alpha); + let r_0 = (p[0] + config.round_constants[round + 1][0]).pow(config.alpha); let r_i = p[1..] .iter() .enumerate() @@ -620,6 +615,8 @@ mod tests { { type Config = Pow5Config; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { PermuteCircuit::(PhantomData) @@ -735,6 +732,8 @@ mod tests { { type Config = Pow5Config; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { Self { @@ -865,7 +864,7 @@ mod tests { } } - #[cfg(feature = "dev-graph")] + #[cfg(feature = "test-dev-graph")] #[test] fn print_poseidon_chip() { use plotters::prelude::*; diff --git a/halo2_gadgets/src/poseidon/primitives.rs b/halo2_gadgets/src/poseidon/primitives.rs index a8662a00e8..c456c87f54 100644 --- a/halo2_gadgets/src/poseidon/primitives.rs +++ b/halo2_gadgets/src/poseidon/primitives.rs @@ -166,7 +166,7 @@ mod private { pub trait SpongeMode: private::SealedSpongeMode {} /// The absorbing state of the `Sponge`. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct Absorbing(pub(crate) SpongeRate); /// The squeezing state of the `Sponge`. @@ -188,6 +188,7 @@ impl Absorbing { } } +#[derive(Clone)] /// A Poseidon sponge. pub(crate) struct Sponge< F: Field, @@ -219,7 +220,7 @@ impl, const T: usize, const RATE: usize> state, mds_matrix, round_constants, - _marker: PhantomData::default(), + _marker: PhantomData, } } @@ -256,7 +257,7 @@ impl, const T: usize, const RATE: usize> state: self.state, mds_matrix: self.mds_matrix, round_constants: self.round_constants, - _marker: PhantomData::default(), + _marker: PhantomData, } } } @@ -329,6 +330,7 @@ impl Domain for Const } } +#[derive(Clone)] /// A Poseidon hash function, built around a sponge. pub struct Hash< F: Field, @@ -362,7 +364,7 @@ impl, D: Domain, const T: usize, const RA pub fn init() -> Self { Hash { sponge: Sponge::new(D::initial_capacity_element()), - _domain: PhantomData::default(), + _domain: PhantomData, } } } diff --git a/halo2_gadgets/src/poseidon/primitives/grain.rs b/halo2_gadgets/src/poseidon/primitives/grain.rs index 99711f97f3..1a780d5899 100644 --- a/halo2_gadgets/src/poseidon/primitives/grain.rs +++ b/halo2_gadgets/src/poseidon/primitives/grain.rs @@ -70,7 +70,7 @@ impl Grain { let mut grain = Grain { state, next_bit: STATE, - _field: PhantomData::default(), + _field: PhantomData, }; // Discard the first 160 bits. diff --git a/halo2_gadgets/src/poseidon/primitives/mds.rs b/halo2_gadgets/src/poseidon/primitives/mds.rs index 892ee11f6b..f1642d21c3 100644 --- a/halo2_gadgets/src/poseidon/primitives/mds.rs +++ b/halo2_gadgets/src/poseidon/primitives/mds.rs @@ -82,7 +82,8 @@ pub(super) fn generate_mds + Ord, const T: usize>( acc } else { // We can invert freely; by construction, the elements of xs are distinct. - acc * (x - x_m) * (x_j - x_m).invert().unwrap() + let diff: F = x_j - *x_m; + acc * (x - x_m) * diff.invert().unwrap() } }) }; diff --git a/halo2_gadgets/src/poseidon/primitives/p128pow5t3.rs b/halo2_gadgets/src/poseidon/primitives/p128pow5t3.rs index c8d54d9afd..fdd4b22959 100644 --- a/halo2_gadgets/src/poseidon/primitives/p128pow5t3.rs +++ b/halo2_gadgets/src/poseidon/primitives/p128pow5t3.rs @@ -22,7 +22,7 @@ impl Spec for P128Pow5T3 { } fn sbox(val: Fp) -> Fp { - val.pow_vartime(&[5]) + val.pow_vartime([5]) } fn secure_mds() -> usize { @@ -48,7 +48,7 @@ impl Spec for P128Pow5T3 { } fn sbox(val: Fq) -> Fq { - val.pow_vartime(&[5]) + val.pow_vartime([5]) } fn secure_mds() -> usize { @@ -84,7 +84,7 @@ mod tests { impl P128Pow5T3Gen { pub fn new() -> Self { - P128Pow5T3Gen(PhantomData::default()) + P128Pow5T3Gen(PhantomData) } } diff --git a/halo2_gadgets/src/sha256/table16.rs b/halo2_gadgets/src/sha256/table16.rs index 41798978de..efc7fd8bd1 100644 --- a/halo2_gadgets/src/sha256/table16.rs +++ b/halo2_gadgets/src/sha256/table16.rs @@ -531,7 +531,7 @@ trait Table16Assignment { } #[cfg(test)] -#[cfg(feature = "dev-graph")] +#[cfg(feature = "test-dev-graph")] mod tests { use super::super::{Sha256, BLOCK_SIZE}; use super::{message_schedule::msg_schedule_test_input, Table16Chip, Table16Config}; @@ -549,6 +549,8 @@ mod tests { impl Circuit for MyCircuit { type Config = Table16Config; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { MyCircuit {} diff --git a/halo2_gadgets/src/sha256/table16/compression.rs b/halo2_gadgets/src/sha256/table16/compression.rs index 9c2de95034..b2903b7b82 100644 --- a/halo2_gadgets/src/sha256/table16/compression.rs +++ b/halo2_gadgets/src/sha256/table16/compression.rs @@ -1008,6 +1008,8 @@ mod tests { impl Circuit for MyCircuit { type Config = Table16Config; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { MyCircuit {} diff --git a/halo2_gadgets/src/sha256/table16/message_schedule.rs b/halo2_gadgets/src/sha256/table16/message_schedule.rs index 792e980ff8..044db8c182 100644 --- a/halo2_gadgets/src/sha256/table16/message_schedule.rs +++ b/halo2_gadgets/src/sha256/table16/message_schedule.rs @@ -415,6 +415,8 @@ mod tests { impl Circuit for MyCircuit { type Config = Table16Config; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { MyCircuit {} diff --git a/halo2_gadgets/src/sha256/table16/spread_table.rs b/halo2_gadgets/src/sha256/table16/spread_table.rs index 87a71efd52..d1484c8d11 100644 --- a/halo2_gadgets/src/sha256/table16/spread_table.rs +++ b/halo2_gadgets/src/sha256/table16/spread_table.rs @@ -306,6 +306,8 @@ mod tests { impl Circuit for MyCircuit { type Config = SpreadTableConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { MyCircuit {} diff --git a/halo2_gadgets/src/sinsemilla.rs b/halo2_gadgets/src/sinsemilla.rs index 2670529229..4a20ce4830 100644 --- a/halo2_gadgets/src/sinsemilla.rs +++ b/halo2_gadgets/src/sinsemilla.rs @@ -197,7 +197,7 @@ where // Each message piece must have at most `floor(C::Base::CAPACITY / K)` words. // This ensures that the all-ones bitstring is canonical in the field. let piece_max_num_words = C::Base::CAPACITY as usize / K; - assert!(num_words <= piece_max_num_words as usize); + assert!(num_words <= piece_max_num_words); // Closure to parse a bitstring (little-endian) into a base field element. let to_base_field = |bits: &[Value]| -> Value { @@ -496,6 +496,7 @@ pub(crate) mod tests { #[derive(Debug, Clone, Eq, PartialEq)] pub(crate) struct TestHashDomain; + #[allow(non_snake_case)] impl HashDomains for TestHashDomain { fn Q(&self) -> pallas::Affine { *Q @@ -525,6 +526,8 @@ pub(crate) mod tests { SinsemillaConfig, ); type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { MyCircuit {} @@ -652,11 +655,7 @@ pub(crate) mod tests { |(l, (left, right))| { let merkle_crh = sinsemilla::HashDomain::from_Q((*Q).into()); let point = merkle_crh - .hash_to_point( - l.into_iter() - .chain(left.into_iter()) - .chain(right.into_iter()), - ) + .hash_to_point(l.into_iter().chain(left).chain(right)) .unwrap(); point.to_affine() }, @@ -737,7 +736,7 @@ pub(crate) mod tests { assert_eq!(prover.verify(), Ok(())) } - #[cfg(feature = "dev-graph")] + #[cfg(feature = "test-dev-graph")] #[test] fn print_sinsemilla_chip() { use plotters::prelude::*; diff --git a/halo2_gadgets/src/sinsemilla/merkle.rs b/halo2_gadgets/src/sinsemilla/merkle.rs index a9ae781d5c..47e5c953e3 100644 --- a/halo2_gadgets/src/sinsemilla/merkle.rs +++ b/halo2_gadgets/src/sinsemilla/merkle.rs @@ -213,6 +213,8 @@ pub mod tests { MerkleConfig, ); type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { Self::default() @@ -380,7 +382,7 @@ pub mod tests { assert_eq!(prover.verify(), Ok(())) } - #[cfg(feature = "dev-graph")] + #[cfg(feature = "test-dev-graph")] #[test] fn print_merkle_chip() { use plotters::prelude::*; diff --git a/halo2_gadgets/src/utilities.rs b/halo2_gadgets/src/utilities.rs index 683a553b7e..f86be625f6 100644 --- a/halo2_gadgets/src/utilities.rs +++ b/halo2_gadgets/src/utilities.rs @@ -99,7 +99,7 @@ impl RangeConstrained> { Self { inner: value.map(|value| bitrange_subset(value, bitrange)), num_bits, - _phantom: PhantomData::default(), + _phantom: PhantomData, } } } @@ -114,7 +114,7 @@ impl RangeConstrained> { Self { inner: cell, num_bits, - _phantom: PhantomData::default(), + _phantom: PhantomData, } } @@ -123,7 +123,7 @@ impl RangeConstrained> { RangeConstrained { inner: self.inner.value().copied(), num_bits: self.num_bits, - _phantom: PhantomData::default(), + _phantom: PhantomData, } } } @@ -271,6 +271,8 @@ mod tests { impl Circuit for MyCircuit { type Config = Config; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { MyCircuit(self.0) @@ -403,8 +405,7 @@ mod tests { } assert_eq!(field_elem, sum); }; - - decompose(pallas::Base::random(rng), &[0..255]); + // decompose(pallas::Base::random(rng), &[0..255]); decompose(pallas::Base::random(rng), &[0..1, 1..255]); decompose(pallas::Base::random(rng), &[0..254, 254..255]); decompose(pallas::Base::random(rng), &[0..127, 127..255]); diff --git a/halo2_gadgets/src/utilities/cond_swap.rs b/halo2_gadgets/src/utilities/cond_swap.rs index 2dc96d5904..d71b2599a7 100644 --- a/halo2_gadgets/src/utilities/cond_swap.rs +++ b/halo2_gadgets/src/utilities/cond_swap.rs @@ -217,6 +217,8 @@ mod tests { impl Circuit for MyCircuit { type Config = CondSwapConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { Self::default() diff --git a/halo2_gadgets/src/utilities/decompose_running_sum.rs b/halo2_gadgets/src/utilities/decompose_running_sum.rs index 96fd279942..041044ffcb 100644 --- a/halo2_gadgets/src/utilities/decompose_running_sum.rs +++ b/halo2_gadgets/src/utilities/decompose_running_sum.rs @@ -243,6 +243,8 @@ mod tests { { type Config = RunningSumConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { Self { diff --git a/halo2_gadgets/src/utilities/lookup_range_check.rs b/halo2_gadgets/src/utilities/lookup_range_check.rs index 36be7c7745..7312206970 100644 --- a/halo2_gadgets/src/utilities/lookup_range_check.rs +++ b/halo2_gadgets/src/utilities/lookup_range_check.rs @@ -49,7 +49,7 @@ impl RangeConstrained> { .map(|inner| Self { inner, num_bits, - _phantom: PhantomData::default(), + _phantom: PhantomData, }) } } @@ -410,6 +410,8 @@ mod tests { impl Circuit for MyCircuit { type Config = LookupRangeCheckConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { *self @@ -506,6 +508,8 @@ mod tests { impl Circuit for MyCircuit { type Config = LookupRangeCheckConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { MyCircuit { @@ -582,13 +586,13 @@ mod tests { assert_eq!( prover.verify(), Err(vec![VerifyFailure::Lookup { - name: "lookup", + name: "lookup".to_string(), lookup_index: 0, location: FailureLocation::InRegion { region: (1, "Range check 6 bits").into(), offset: 1, }, - }]) + }]), ); } @@ -603,7 +607,7 @@ mod tests { prover.verify(), Err(vec![ VerifyFailure::Lookup { - name: "lookup", + name: "lookup".to_string(), lookup_index: 0, location: FailureLocation::InRegion { region: (1, "Range check 6 bits").into(), @@ -611,7 +615,7 @@ mod tests { }, }, VerifyFailure::Lookup { - name: "lookup", + name: "lookup".to_string(), lookup_index: 0, location: FailureLocation::InRegion { region: (1, "Range check 6 bits").into(), @@ -641,7 +645,7 @@ mod tests { assert_eq!( prover.verify(), Err(vec![VerifyFailure::Lookup { - name: "lookup", + name: "lookup".to_string(), lookup_index: 0, location: FailureLocation::InRegion { region: (1, "Range check 6 bits").into(), diff --git a/halo2_proofs/Cargo.toml b/halo2_proofs/Cargo.toml index 94d7df91ab..0935cfcca1 100644 --- a/halo2_proofs/Cargo.toml +++ b/halo2_proofs/Cargo.toml @@ -8,7 +8,7 @@ authors = [ "Jack Grigg ", ] edition = "2021" -rust-version = "1.56.1" +rust-version = "1.66.0" description = """ Fast PLONK-based zero-knowledge proving system with no trusted setup """ @@ -48,24 +48,26 @@ name = "fft" harness = false [dependencies] -backtrace = { version = "0.3", optional = true } rayon = "1.5.1" +backtrace = { version = "0.3", optional = true } ff = "0.13" group = "0.13" halo2curves = { version = "0.1.0", features = [ "derive_serde" ] } rand_core = { version = "0.6", default-features = false } tracing = "0.1" -blake2b_simd = "1" +blake2b_simd = "1" # MSRV 1.66.0 sha3 = "0.9.1" subtle = "2.3" cfg-if = "0.1" poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" } num-integer = "0.1" num-bigint = { version = "0.4", features = ["rand"] } +rand_chacha = "0.3" +maybe-rayon = { version = "0.1.0", default-features = false } crossbeam = "0.8.0" # Developer tooling dependencies -plotters = { version = "0.3.0", optional = true } +plotters = { version = "0.3.0", default-features = false, optional = true } tabbycat = { version = "0.1", features = ["attributes"], optional = true } lazy_static = { version = "1", optional = true } log = "0.4.17" @@ -74,6 +76,9 @@ log = "0.4.17" ark-std = { version = "0.3.0" } +# Legacy circuit compatibility +halo2_legacy_pdqsort = { version = "0.1.0", optional = true } + [dev-dependencies] assert_matches = "1.5" criterion = "0.3" @@ -86,23 +91,35 @@ rand_core = { version = "0.6", default-features = false, features = ["getrandom" getrandom = { version = "0.2", features = ["js"] } [features] -default = ["batch", "gwc", "logup_skip_inv"] +default = ["batch", "gwc", "multicore", "parallel_syn", "logup_skip_inv"] +multicore = ["maybe-rayon/threads"] dev-graph = ["plotters", "tabbycat"] +test-dev-graph = [ + "dev-graph", + "plotters/bitmap_backend", + "plotters/bitmap_encoder", + "plotters/ttf", +] gadget-traces = ["backtrace"] +thread-safe-region = [] sanity-checks = [] batch = ["rand_core/getrandom"] shplonk = [] gwc = [] parallel_syn = [] -phase-check = [] profile = ["ark-std/print-trace"] counter = ["lazy_static"] mock-batch-inv = [] +circuit-params = [] logup_skip_inv = [] +# todo: we have both mutliphase prover and phase check. consider merge them +phase-check = [] +multiphase-mock-prover = [] + [lib] bench = false [[example]] name = "circuit-layout" -required-features = ["dev-graph"] +required-features = ["test-dev-graph"] diff --git a/halo2_proofs/README.md b/halo2_proofs/README.md index 7c226ff24c..7aeebeb849 100644 --- a/halo2_proofs/README.md +++ b/halo2_proofs/README.md @@ -4,7 +4,7 @@ ## Minimum Supported Rust Version -Requires Rust **1.56.1** or higher. +Requires Rust **1.65.0** or higher. Minimum supported Rust version can be changed in the future, but it will be done with a minor version bump. @@ -15,6 +15,10 @@ minor version bump. computation. The `RAYON_NUM_THREADS` environment variable can be used to set the number of threads. +You can disable `rayon` by disabling the `"multicore"` feature. +Warning! Halo2 will lose access to parallelism if you disable the `"multicore"` feature. +This will significantly degrade performance. + ## License Licensed under either of diff --git a/halo2_proofs/benches/commit_zk.rs b/halo2_proofs/benches/commit_zk.rs new file mode 100644 index 0000000000..a6a33a934b --- /dev/null +++ b/halo2_proofs/benches/commit_zk.rs @@ -0,0 +1,80 @@ +extern crate criterion; + +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use group::ff::Field; +use halo2_proofs::arithmetic::parallelize; +use halo2curves::pasta::pallas::Scalar; +use rand_chacha::rand_core::RngCore; +use rand_chacha::ChaCha20Rng; +use rand_core::SeedableRng; +use std::{collections::HashMap, iter}; + +#[cfg(feature = "multicore")] +use maybe_rayon::current_num_threads; + +#[cfg(not(feature = "multicore"))] +fn current_num_threads() -> usize { + 1 +} + +fn rand_poly_serial(mut rng: ChaCha20Rng, domain: usize) -> Vec { + // Sample a random polynomial of degree n - 1 + let mut random_poly = vec![Scalar::zero(); 1 << domain]; + for coeff in random_poly.iter_mut() { + *coeff = Scalar::random(&mut rng); + } + + random_poly +} + +fn rand_poly_par(mut rng: ChaCha20Rng, domain: usize) -> Vec { + // Sample a random polynomial of degree n - 1 + let n = 1usize << domain; + let mut random_poly = vec![Scalar::ZERO; n]; + + let num_threads = current_num_threads(); + let chunk_size = n / num_threads; + let thread_seeds = (0..) + .step_by(chunk_size + 1) + .take(n % num_threads) + .chain( + (chunk_size != 0) + .then(|| ((n % num_threads) * (chunk_size + 1)..).step_by(chunk_size)) + .into_iter() + .flatten(), + ) + .take(num_threads) + .zip(iter::repeat_with(|| { + let mut seed = [0u8; 32]; + rng.fill_bytes(&mut seed); + ChaCha20Rng::from_seed(seed) + })) + .collect::>(); + + parallelize(&mut random_poly, |chunk, offset| { + let mut rng = thread_seeds[&offset].clone(); + chunk.iter_mut().for_each(|v| *v = Scalar::random(&mut rng)); + }); + random_poly +} + +fn bench_commit(c: &mut Criterion) { + let mut group = c.benchmark_group("Blinder_poly"); + let rand = ChaCha20Rng::from_seed([1u8; 32]); + for i in [ + 18usize, 19usize, 20usize, 21usize, 22usize, 23usize, 24usize, 25usize, + ] + .iter() + { + group.bench_with_input(BenchmarkId::new("serial", i), i, |b, i| { + b.iter(|| rand_poly_serial(rand.clone(), *i)) + }); + group.bench_with_input(BenchmarkId::new("parallel", i), i, |b, i| { + b.iter(|| rand_poly_par(rand.clone(), *i)) + }); + } + group.finish(); +} + +criterion_group!(benches, bench_commit); +criterion_main!(benches); diff --git a/halo2_proofs/benches/dev_lookup.rs b/halo2_proofs/benches/dev_lookup.rs index 745a36b928..62ed5a7f19 100644 --- a/halo2_proofs/benches/dev_lookup.rs +++ b/halo2_proofs/benches/dev_lookup.rs @@ -28,6 +28,8 @@ fn criterion_benchmark(c: &mut Criterion) { impl Circuit for MyCircuit { type Config = MyConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { Self::default() diff --git a/halo2_proofs/benches/lookups.rs b/halo2_proofs/benches/lookups.rs index 601b1a4285..aca5cecc94 100644 --- a/halo2_proofs/benches/lookups.rs +++ b/halo2_proofs/benches/lookups.rs @@ -41,6 +41,7 @@ fn criterion_benchmark(c: &mut Criterion) { impl Circuit for MyCircuit { type Config = MyConfig; type FloorPlanner = SimpleFloorPlanner; + type Params = (); fn without_witnesses(&self) -> Self { Self::default() @@ -58,8 +59,8 @@ fn criterion_benchmark(c: &mut Criterion) { meta.create_gate("degree 6 gate", |meta| { let dummy_selector = meta.query_selector(dummy_selector); - let constraints = vec![dummy_selector.clone(); 4] - .iter() + let constraints = std::iter::repeat(dummy_selector.clone()) + .take(4) .fold(dummy_selector.clone(), |acc, val| acc * val.clone()); Constraints::with_selector(dummy_selector, Some(constraints)) }); diff --git a/halo2_proofs/benches/plonk.rs b/halo2_proofs/benches/plonk.rs index cf02cf87e8..9c9bd2618a 100644 --- a/halo2_proofs/benches/plonk.rs +++ b/halo2_proofs/benches/plonk.rs @@ -183,6 +183,8 @@ fn criterion_benchmark(c: &mut Criterion) { impl Circuit for MyCircuit { type Config = PlonkConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { Self { diff --git a/halo2_proofs/examples/circuit-layout.rs b/halo2_proofs/examples/circuit-layout.rs index 6a73f51ff8..18de27a783 100644 --- a/halo2_proofs/examples/circuit-layout.rs +++ b/halo2_proofs/examples/circuit-layout.rs @@ -161,6 +161,8 @@ impl StandardCs for StandardPlonk { impl Circuit for MyCircuit { type Config = PlonkConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { Self { diff --git a/halo2_proofs/examples/serialization.rs b/halo2_proofs/examples/serialization.rs index 91ed5464e4..39b6b1192f 100644 --- a/halo2_proofs/examples/serialization.rs +++ b/halo2_proofs/examples/serialization.rs @@ -86,6 +86,8 @@ struct StandardPlonk(Fr); impl Circuit for StandardPlonk { type Config = StandardPlonkConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { Self::default() @@ -140,8 +142,14 @@ fn main() { let f = File::open("serialization-test.pk").unwrap(); let mut reader = BufReader::new(f); - let pk = ProvingKey::::read::<_, StandardPlonk>(&mut reader, SerdeFormat::RawBytes) - .unwrap(); + #[allow(clippy::unit_arg)] + let pk = ProvingKey::::read::<_, StandardPlonk>( + &mut reader, + SerdeFormat::RawBytes, + #[cfg(feature = "circuit-params")] + circuit.params(), + ) + .unwrap(); std::fs::remove_file("serialization-test.pk").unwrap(); diff --git a/halo2_proofs/examples/shuffle.rs b/halo2_proofs/examples/shuffle.rs index 08d16b9f27..17bbb3330a 100644 --- a/halo2_proofs/examples/shuffle.rs +++ b/halo2_proofs/examples/shuffle.rs @@ -12,7 +12,7 @@ use halo2_proofs::{ multiopen::{ProverIPA, VerifierIPA}, strategy::AccumulatorStrategy, }, - Rotation, VerificationStrategy, + VerificationStrategy, }, transcript::{ Blake2bRead, Blake2bWrite, Challenge255, TranscriptReadBuffer, TranscriptWriterBuffer, @@ -63,29 +63,23 @@ impl MyConfig { // Second phase let z = meta.advice_column_in(SecondPhase); - meta.create_gate("z should start with 1", |meta| { - let q_first = meta.query_selector(q_first); - let z = meta.query_advice(z, Rotation::cur()); + meta.create_gate("z should start with 1", |_| { let one = Expression::Constant(F::ONE); - vec![q_first * (one - z)] + vec![q_first.expr() * (one - z.cur())] }); - meta.create_gate("z should end with 1", |meta| { - let q_last = meta.query_selector(q_last); - let z = meta.query_advice(z, Rotation::cur()); + meta.create_gate("z should end with 1", |_| { let one = Expression::Constant(F::ONE); - vec![q_last * (one - z)] + vec![q_last.expr() * (one - z.cur())] }); - meta.create_gate("z should have valid transition", |meta| { - let q_shuffle = meta.query_selector(q_shuffle); - let original = original.map(|advice| meta.query_advice(advice, Rotation::cur())); - let shuffled = shuffled.map(|advice| meta.query_advice(advice, Rotation::cur())); - let [theta, gamma] = [theta, gamma].map(|challenge| meta.query_challenge(challenge)); - let [z, z_w] = - [Rotation::cur(), Rotation::next()].map(|rotation| meta.query_advice(z, rotation)); + meta.create_gate("z should have valid transition", |_| { + let q_shuffle = q_shuffle.expr(); + let original = original.map(|advice| advice.cur()); + let shuffled = shuffled.map(|advice| advice.cur()); + let [theta, gamma] = [theta, gamma].map(|challenge| challenge.expr()); // Compress let original = original @@ -99,7 +93,7 @@ impl MyConfig { .reduce(|acc, a| acc * theta.clone() + a) .unwrap(); - vec![q_shuffle * (z * (original + gamma.clone()) - z_w * (shuffled + gamma))] + vec![q_shuffle * (z.cur() * (original + gamma.clone()) - z.next() * (shuffled + gamma))] }); Self { @@ -136,6 +130,8 @@ impl MyCircuit { impl Circuit for MyCircuit { type Config = MyConfig; type FloorPlanner = V1; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { Self::default() diff --git a/halo2_proofs/examples/shuffle_api.rs b/halo2_proofs/examples/shuffle_api.rs new file mode 100644 index 0000000000..259e038d06 --- /dev/null +++ b/halo2_proofs/examples/shuffle_api.rs @@ -0,0 +1,216 @@ +use std::{marker::PhantomData, vec}; + +use ff::FromUniformBytes; +use halo2_proofs::{ + arithmetic::Field, + circuit::{Layouter, SimpleFloorPlanner, Value}, + plonk::{ + create_proof, keygen_pk, keygen_vk, verify_proof, Advice, Circuit, Column, + ConstraintSystem, Error, Fixed, Selector, + }, + poly::Rotation, + poly::{ + commitment::ParamsProver, + ipa::{ + commitment::{IPACommitmentScheme, ParamsIPA}, + multiopen::{ProverIPA, VerifierIPA}, + strategy::AccumulatorStrategy, + }, + VerificationStrategy, + }, + transcript::{ + Blake2bRead, Blake2bWrite, Challenge255, TranscriptReadBuffer, TranscriptWriterBuffer, + }, +}; +use halo2curves::{pasta::EqAffine, CurveAffine}; +use rand_core::OsRng; + +struct ShuffleChip { + config: ShuffleConfig, + _marker: PhantomData, +} + +#[derive(Clone, Debug)] +struct ShuffleConfig { + input_0: Column, + input_1: Column, + shuffle_0: Column, + shuffle_1: Column, + s_input: Selector, + s_shuffle: Selector, +} + +impl ShuffleChip { + fn construct(config: ShuffleConfig) -> Self { + Self { + config, + _marker: PhantomData, + } + } + + fn configure( + meta: &mut ConstraintSystem, + input_0: Column, + input_1: Column, + shuffle_0: Column, + shuffle_1: Column, + ) -> ShuffleConfig { + let s_shuffle = meta.complex_selector(); + let s_input = meta.complex_selector(); + meta.shuffle("shuffle", |meta| { + let s_input = meta.query_selector(s_input); + let s_shuffle = meta.query_selector(s_shuffle); + let input_0 = meta.query_advice(input_0, Rotation::cur()); + let input_1 = meta.query_fixed(input_1, Rotation::cur()); + let shuffle_0 = meta.query_advice(shuffle_0, Rotation::cur()); + let shuffle_1 = meta.query_advice(shuffle_1, Rotation::cur()); + vec![ + (s_input.clone() * input_0, s_shuffle.clone() * shuffle_0), + (s_input * input_1, s_shuffle * shuffle_1), + ] + }); + ShuffleConfig { + input_0, + input_1, + shuffle_0, + shuffle_1, + s_input, + s_shuffle, + } + } +} + +#[derive(Default)] +struct MyCircuit { + input_0: Vec>, + input_1: Vec, + shuffle_0: Vec>, + shuffle_1: Vec>, +} + +impl Circuit for MyCircuit { + // Since we are using a single chip for everything, we can just reuse its config. + type Config = ShuffleConfig; + type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + Self::default() + } + + fn configure(meta: &mut ConstraintSystem) -> Self::Config { + let input_0 = meta.advice_column(); + let input_1 = meta.fixed_column(); + let shuffle_0 = meta.advice_column(); + let shuffle_1 = meta.advice_column(); + ShuffleChip::configure(meta, input_0, input_1, shuffle_0, shuffle_1) + } + + fn synthesize( + &self, + config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + let ch = ShuffleChip::::construct(config); + layouter.assign_region( + || "load inputs", + |mut region| { + for (i, (input_0, input_1)) in + self.input_0.iter().zip(self.input_1.iter()).enumerate() + { + region.assign_advice(|| "input_0", ch.config.input_0, i, || *input_0)?; + region.assign_fixed( + || "input_1", + ch.config.input_1, + i, + || Value::known(*input_1), + )?; + ch.config.s_input.enable(&mut region, i)?; + } + Ok(()) + }, + )?; + layouter.assign_region( + || "load shuffles", + |mut region| { + for (i, (shuffle_0, shuffle_1)) in + self.shuffle_0.iter().zip(self.shuffle_1.iter()).enumerate() + { + region.assign_advice(|| "shuffle_0", ch.config.shuffle_0, i, || *shuffle_0)?; + region.assign_advice(|| "shuffle_1", ch.config.shuffle_1, i, || *shuffle_1)?; + ch.config.s_shuffle.enable(&mut region, i)?; + } + Ok(()) + }, + )?; + Ok(()) + } +} + +fn test_prover(k: u32, circuit: MyCircuit, expected: bool) +where + C::Scalar: FromUniformBytes<64>, +{ + let params = ParamsIPA::::new(k); + let vk = keygen_vk(¶ms, &circuit).unwrap(); + let pk = keygen_pk(¶ms, vk, &circuit).unwrap(); + + let proof = { + let mut transcript = Blake2bWrite::<_, _, Challenge255<_>>::init(vec![]); + + create_proof::, ProverIPA, _, _, _, _>( + ¶ms, + &pk, + &[circuit], + &[&[]], + OsRng, + &mut transcript, + ) + .expect("proof generation should not fail"); + + transcript.finalize() + }; + + let accepted = { + let strategy = AccumulatorStrategy::new(¶ms); + let mut transcript = Blake2bRead::<_, _, Challenge255<_>>::init(&proof[..]); + + verify_proof::, VerifierIPA, _, _, _>( + ¶ms, + pk.get_vk(), + strategy, + &[&[]], + &mut transcript, + ) + .map(|strategy| strategy.finalize()) + .unwrap_or_default() + }; + + assert_eq!(accepted, expected); +} + +fn main() { + use halo2_proofs::dev::MockProver; + use halo2curves::pasta::Fp; + const K: u32 = 4; + let input_0 = [1, 2, 4, 1] + .map(|e: u64| Value::known(Fp::from(e))) + .to_vec(); + let input_1 = [10, 20, 40, 10].map(Fp::from).to_vec(); + let shuffle_0 = [4, 1, 1, 2] + .map(|e: u64| Value::known(Fp::from(e))) + .to_vec(); + let shuffle_1 = [40, 10, 10, 20] + .map(|e: u64| Value::known(Fp::from(e))) + .to_vec(); + let circuit = MyCircuit { + input_0, + input_1, + shuffle_0, + shuffle_1, + }; + let prover = MockProver::run(K, &circuit, vec![]).unwrap(); + prover.assert_satisfied(); + test_prover::(K, circuit, true); +} diff --git a/halo2_proofs/examples/simple-example.rs b/halo2_proofs/examples/simple-example.rs index 2273ce9759..242257a692 100644 --- a/halo2_proofs/examples/simple-example.rs +++ b/halo2_proofs/examples/simple-example.rs @@ -248,6 +248,8 @@ impl Circuit for MyCircuit { // Since we are using a single chip for everything, we can just reuse its config. type Config = FieldConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { Self::default() diff --git a/halo2_proofs/examples/two-chip.rs b/halo2_proofs/examples/two-chip.rs index 4113d94143..336f9c4957 100644 --- a/halo2_proofs/examples/two-chip.rs +++ b/halo2_proofs/examples/two-chip.rs @@ -458,6 +458,8 @@ impl Circuit for MyCircuit { // Since we are using a single chip for everything, we can just reuse its config. type Config = FieldConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { Self::default() diff --git a/halo2_proofs/examples/vector-mul.rs b/halo2_proofs/examples/vector-mul.rs new file mode 100644 index 0000000000..4b971dc97f --- /dev/null +++ b/halo2_proofs/examples/vector-mul.rs @@ -0,0 +1,350 @@ +use std::marker::PhantomData; + +use halo2_proofs::{ + arithmetic::Field, + circuit::{AssignedCell, Chip, Layouter, Region, SimpleFloorPlanner, Value}, + plonk::{Advice, Circuit, Column, ConstraintSystem, Error, Instance, Selector}, + poly::Rotation, +}; + +// ANCHOR: instructions +trait NumericInstructions: Chip { + /// Variable representing a number. + type Num; + + /// Loads a number into the circuit as a private input. + fn load_private( + &self, + layouter: impl Layouter, + a: &[Value], + ) -> Result, Error>; + + /// Returns `c = a * b`. The caller is responsible for ensuring that `a.len() == b.len()`. + fn mul( + &self, + layouter: impl Layouter, + a: &[Self::Num], + b: &[Self::Num], + ) -> Result, Error>; + + /// Exposes a number as a public input to the circuit. + fn expose_public( + &self, + layouter: impl Layouter, + num: &Self::Num, + row: usize, + ) -> Result<(), Error>; +} +// ANCHOR_END: instructions + +// ANCHOR: chip +/// The chip that will implement our instructions! Chips store their own +/// config, as well as type markers if necessary. +struct FieldChip { + config: FieldConfig, + _marker: PhantomData, +} +// ANCHOR_END: chip + +// ANCHOR: chip-config +/// Chip state is stored in a config struct. This is generated by the chip +/// during configuration, and then stored inside the chip. +#[derive(Clone, Debug)] +struct FieldConfig { + /// For this chip, we will use two advice columns to implement our instructions. + /// These are also the columns through which we communicate with other parts of + /// the circuit. + advice: [Column; 3], + + /// This is the public input (instance) column. + instance: Column, + + // We need a selector to enable the multiplication gate, so that we aren't placing + // any constraints on cells where `NumericInstructions::mul` is not being used. + // This is important when building larger circuits, where columns are used by + // multiple sets of instructions. + s_mul: Selector, +} + +impl FieldChip { + fn construct(config: >::Config) -> Self { + Self { + config, + _marker: PhantomData, + } + } + + fn configure( + meta: &mut ConstraintSystem, + advice: [Column; 3], + instance: Column, + ) -> >::Config { + meta.enable_equality(instance); + for column in &advice { + meta.enable_equality(*column); + } + let s_mul = meta.selector(); + + // Define our multiplication gate! + meta.create_gate("mul", |meta| { + // To implement multiplication, we need three advice cells and a selector + // cell. We arrange them like so: + // + // | a0 | a1 | a2 | s_mul | + // |-----|-----|-----|-------| + // | lhs | rhs | out | s_mul | + // + // Gates may refer to any relative offsets we want, but each distinct + // offset adds a cost to the proof. The most common offsets are 0 (the + // current row), 1 (the next row), and -1 (the previous row), for which + // `Rotation` has specific constructors. + let lhs = meta.query_advice(advice[0], Rotation::cur()); + let rhs = meta.query_advice(advice[1], Rotation::cur()); + let out = meta.query_advice(advice[2], Rotation::cur()); + let s_mul = meta.query_selector(s_mul); + + // Finally, we return the polynomial expressions that constrain this gate. + // For our multiplication gate, we only need a single polynomial constraint. + // + // The polynomial expressions returned from `create_gate` will be + // constrained by the proving system to equal zero. Our expression + // has the following properties: + // - When s_mul = 0, any value is allowed in lhs, rhs, and out. + // - When s_mul != 0, this constrains lhs * rhs = out. + vec![s_mul * (lhs * rhs - out)] + }); + + FieldConfig { + advice, + instance, + s_mul, + } + } +} +// ANCHOR_END: chip-config + +// ANCHOR: chip-impl +impl Chip for FieldChip { + type Config = FieldConfig; + type Loaded = (); + + fn config(&self) -> &Self::Config { + &self.config + } + + fn loaded(&self) -> &Self::Loaded { + &() + } +} +// ANCHOR_END: chip-impl + +// ANCHOR: instructions-impl +/// A variable representing a number. +#[derive(Clone, Debug)] +struct Number(AssignedCell); + +impl NumericInstructions for FieldChip { + type Num = Number; + + fn load_private( + &self, + mut layouter: impl Layouter, + values: &[Value], + ) -> Result, Error> { + let config = self.config(); + + layouter.assign_region( + || "load private", + |mut region| { + values + .iter() + .enumerate() + .map(|(i, value)| { + region + .assign_advice(|| "private input", config.advice[0], i, || *value) + .map(Number) + }) + .collect() + }, + ) + } + + fn mul( + &self, + mut layouter: impl Layouter, + a: &[Self::Num], + b: &[Self::Num], + ) -> Result, Error> { + let config = self.config(); + assert_eq!(a.len(), b.len()); + + #[cfg(feature = "thread-safe-region")] + { + use maybe_rayon::prelude::{ + IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator, + }; + layouter.assign_region( + || "mul", + |region: Region<'_, F>| { + let thread_safe_region = std::sync::Mutex::new(region); + a.par_iter() + .zip(b.par_iter()) + .enumerate() + .map(|(i, (a, b))| { + let mut region = thread_safe_region.lock().unwrap(); + + config.s_mul.enable(&mut region, i)?; + + a.0.copy_advice(|| "lhs", &mut region, config.advice[0], i)?; + b.0.copy_advice(|| "rhs", &mut region, config.advice[1], i)?; + + let value = a.0.value().copied() * b.0.value(); + + // Finally, we do the assignment to the output, returning a + // variable to be used in another part of the circuit. + region + .assign_advice(|| "lhs * rhs", config.advice[2], i, || value) + .map(Number) + }) + .collect() + }, + ) + } + + #[cfg(not(feature = "thread-safe-region"))] + layouter.assign_region( + || "mul", + |mut region: Region<'_, F>| { + a.iter() + .zip(b.iter()) + .enumerate() + .map(|(i, (a, b))| { + config.s_mul.enable(&mut region, i)?; + + a.0.copy_advice(|| "lhs", &mut region, config.advice[0], i)?; + b.0.copy_advice(|| "rhs", &mut region, config.advice[1], i)?; + + let value = a.0.value().copied() * b.0.value(); + + // Finally, we do the assignment to the output, returning a + // variable to be used in another part of the circuit. + region + .assign_advice(|| "lhs * rhs", config.advice[2], i, || value) + .map(Number) + }) + .collect() + }, + ) + } + + fn expose_public( + &self, + mut layouter: impl Layouter, + num: &Self::Num, + row: usize, + ) -> Result<(), Error> { + let config = self.config(); + + layouter.constrain_instance(num.0.cell(), config.instance, row) + } +} +// ANCHOR_END: instructions-impl + +// ANCHOR: circuit +/// The full circuit implementation. +/// +/// In this struct we store the private input variables. We use `Option` because +/// they won't have any value during key generation. During proving, if any of these +/// were `None` we would get an error. +#[derive(Default)] +struct MyCircuit { + a: Vec>, + b: Vec>, +} + +impl Circuit for MyCircuit { + // Since we are using a single chip for everything, we can just reuse its config. + type Config = FieldConfig; + type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + Self::default() + } + + fn configure(meta: &mut ConstraintSystem) -> Self::Config { + // We create the three advice columns that FieldChip uses for I/O. + let advice = [ + meta.advice_column(), + meta.advice_column(), + meta.advice_column(), + ]; + + // We also need an instance column to store public inputs. + let instance = meta.instance_column(); + + FieldChip::configure(meta, advice, instance) + } + + fn synthesize( + &self, + config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + let field_chip = FieldChip::::construct(config); + + // Load our private values into the circuit. + let a = field_chip.load_private(layouter.namespace(|| "load a"), &self.a)?; + let b = field_chip.load_private(layouter.namespace(|| "load b"), &self.b)?; + + let ab = field_chip.mul(layouter.namespace(|| "a * b"), &a, &b)?; + + for (i, c) in ab.iter().enumerate() { + // Expose the result as a public input to the circuit. + field_chip.expose_public(layouter.namespace(|| "expose c"), c, i)?; + } + Ok(()) + } +} +// ANCHOR_END: circuit + +fn main() { + use halo2_proofs::dev::MockProver; + use halo2curves::pasta::Fp; + + const N: usize = 20000; + // ANCHOR: test-circuit + // The number of rows in our circuit cannot exceed 2^k. Since our example + // circuit is very small, we can pick a very small value here. + let k = 16; + + // Prepare the private and public inputs to the circuit! + let a = [Fp::from(2); N]; + let b = [Fp::from(3); N]; + let c: Vec = a.iter().zip(b).map(|(&a, b)| a * b).collect(); + + // Instantiate the circuit with the private inputs. + let circuit = MyCircuit { + a: a.iter().map(|&x| Value::known(x)).collect(), + b: b.iter().map(|&x| Value::known(x)).collect(), + }; + + // Arrange the public input. We expose the multiplication result in row 0 + // of the instance column, so we position it there in our public inputs. + let mut public_inputs = c; + + let start = std::time::Instant::now(); + // Given the correct public input, our circuit will verify. + let prover = MockProver::run(k, &circuit, vec![public_inputs.clone()]).unwrap(); + assert_eq!(prover.verify(), Ok(())); + println!("positive test took {:?}", start.elapsed()); + + // If we try some other public input, the proof will fail! + let start = std::time::Instant::now(); + public_inputs[0] += Fp::one(); + let prover = MockProver::run(k, &circuit, vec![public_inputs]).unwrap(); + assert!(prover.verify().is_err()); + println!("negative test took {:?}", start.elapsed()); + // ANCHOR_END: test-circuit +} diff --git a/halo2_proofs/src/arithmetic.rs b/halo2_proofs/src/arithmetic.rs index 849a7022cc..dfb7f51b3f 100644 --- a/halo2_proofs/src/arithmetic.rs +++ b/halo2_proofs/src/arithmetic.rs @@ -53,7 +53,7 @@ fn multiexp_serial(coeffs: &[C::Scalar], bases: &[C], acc: &mut let mut tmp = u64::from_le_bytes(v); tmp >>= skip_bits - (skip_bytes * 8); - tmp = tmp % (1 << c); + tmp %= 1 << c; tmp as usize } @@ -112,7 +112,7 @@ fn multiexp_serial(coeffs: &[C::Scalar], bases: &[C], acc: &mut let mut running_sum = C::Curve::identity(); for exp in buckets.into_iter().rev() { running_sum = exp.add(running_sum); - *acc = *acc + &running_sum; + *acc += &running_sum; } } } @@ -188,7 +188,7 @@ pub fn best_multiexp(coeffs: &[C::Scalar], bases: &[C]) -> C::Cu pub fn best_fft>(a: &mut [G], omega: Scalar, log_n: u32) { let threads = multicore::current_num_threads(); let log_split = log2_floor(threads) as usize; - let n = a.len() as usize; + let n = a.len(); let sub_n = n >> log_split; let split_m = 1 << log_split; @@ -215,7 +215,7 @@ fn serial_fft>(a: &mut [G], omega: Scalar, lo for k in 0..n as usize { let rk = bitreverse(k, log_n as usize); if k < rk { - a.swap(rk as usize, k as usize); + a.swap(rk, k); } } @@ -258,7 +258,7 @@ fn serial_split_fft>( let high_idx = omega_idx >> SPARSE_TWIDDLE_DEGREE; let mut w_m = twiddle_lut[low_idx]; if high_idx > 0 { - w_m = w_m * twiddle_lut[(1 << SPARSE_TWIDDLE_DEGREE) + high_idx]; + w_m *= twiddle_lut[(1 << SPARSE_TWIDDLE_DEGREE) + high_idx]; } let mut k = 0; @@ -298,28 +298,28 @@ fn split_radix_fft>( // used to be: vec![G::identity; split_m]; // let mut t1 = a.clone(); // if unsafe code is allowed, a 10% performance improvement can be achieved - let mut t1: Vec = Vec::with_capacity(split_m as usize); + let mut t1: Vec = Vec::with_capacity(split_m); unsafe { - t1.set_len(split_m as usize); + t1.set_len(split_m); } for i in 0..split_m { - t1[bitreverse(i, log_split)] = a[(i * sub_n + sub_fft_offset)]; + t1[bitreverse(i, log_split)] = a[i * sub_n + sub_fft_offset]; } serial_split_fft(&mut t1, twiddle_lut, sub_n, log_split as u32); let sparse_degree = SPARSE_TWIDDLE_DEGREE; - let omega_idx = sub_fft_offset as usize; + let omega_idx = sub_fft_offset; let low_idx = omega_idx % (1 << sparse_degree); let high_idx = omega_idx >> sparse_degree; let mut omega = twiddle_lut[low_idx]; if high_idx > 0 { - omega = omega * twiddle_lut[(1 << sparse_degree) + high_idx]; + omega *= twiddle_lut[(1 << sparse_degree) + high_idx]; } let mut w_m = Scalar::ONE; for i in 0..split_m { t1[i] *= &w_m; tmp[i] = t1[i]; - w_m = w_m * omega; + w_m *= omega; } } @@ -339,7 +339,7 @@ pub fn generate_twiddle_lookup_table( let mut w_n = omega.pow_vartime([start as u64, 0, 0, 0]); for twiddle_lut in twiddle_lut.iter_mut() { *twiddle_lut = w_n; - w_n = w_n * omega; + w_n *= omega; } }); return twiddle_lut; @@ -348,14 +348,14 @@ pub fn generate_twiddle_lookup_table( // sparse let low_degree_lut_len = 1 << sparse_degree; let high_degree_lut_len = 1 << (log_n - sparse_degree - without_last_level as u32); - let mut twiddle_lut = vec![F::ZERO; (low_degree_lut_len + high_degree_lut_len) as usize]; + let mut twiddle_lut = vec![F::ZERO; low_degree_lut_len + high_degree_lut_len]; parallelize( &mut twiddle_lut[..low_degree_lut_len], |twiddle_lut, start| { let mut w_n = omega.pow_vartime([start as u64, 0, 0, 0]); for twiddle_lut in twiddle_lut.iter_mut() { *twiddle_lut = w_n; - w_n = w_n * omega; + w_n *= omega; } }, ); @@ -366,7 +366,7 @@ pub fn generate_twiddle_lookup_table( let mut w_n = high_degree_omega.pow_vartime([start as u64, 0, 0, 0]); for twiddle_lut in twiddle_lut.iter_mut() { *twiddle_lut = w_n; - w_n = w_n * high_degree_omega; + w_n *= high_degree_omega; } }, ); @@ -374,7 +374,7 @@ pub fn generate_twiddle_lookup_table( } pub fn parallel_fft>(a: &mut [G], omega: Scalar, log_n: u32) { - let n = a.len() as usize; + let n = a.len(); assert_eq!(n, 1 << log_n); let log_split = log2_floor(multicore::current_num_threads()) as usize; @@ -541,29 +541,60 @@ pub fn par_invert(values: &mut [F]) { }); } -/// This simple utility function will parallelize an operation that is to be +/// This utility function will parallelize an operation that is to be /// performed over a mutable slice. pub(crate) fn parallelize_internal( v: &mut [T], f: F, ) -> Vec { - let n = v.len(); + // Algorithm rationale: + // + // Using the stdlib `chunks_mut` will lead to severe load imbalance. + // From https://github.com/rust-lang/rust/blob/e94bda3/library/core/src/slice/iter.rs#L1607-L1637 + // if the division is not exact, the last chunk will be the remainder. + // + // Dividing 40 items on 12 threads will lead to a chunk size of 40/12 = 3, + // There will be a 13 chunks of size 3 and 1 of size 1 distributed on 12 threads. + // This leads to 1 thread working on 6 iterations, 1 on 4 iterations and 10 on 3 iterations, + // a load imbalance of 2x. + // + // Instead we can divide work into chunks of size + // 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3 = 4*4 + 3*8 = 40 + // + // This would lead to a 6/4 = 1.5x speedup compared to naive chunks_mut + // + // See also OpenMP spec (page 60) + // http://www.openmp.org/mp-documents/openmp-4.5.pdf + // "When no chunk_size is specified, the iteration space is divided into chunks + // that are approximately equal in size, and at most one chunk is distributed to + // each thread. The size of the chunks is unspecified in this case." + // This implies chunks are the same size ±1 + + let f = &f; + let total_iters = v.len(); let num_threads = multicore::current_num_threads(); - let mut chunk = (n as usize) / num_threads; - if chunk < num_threads { - chunk = 1; - } + let base_chunk_size = total_iters / num_threads; + let cutoff_chunk_id = total_iters % num_threads; + let split_pos = cutoff_chunk_id * (base_chunk_size + 1); + let (v_hi, v_lo) = v.split_at_mut(split_pos); multicore::scope(|scope| { + // Skip special-case: number of iterations is cleanly divided by number of threads. let mut chunk_starts = vec![]; - for (chunk_num, v) in v.chunks_mut(chunk).enumerate() { - let f = f.clone(); - scope.spawn(move |_| { - let start = chunk_num * chunk; - f(v, start); - }); - let start = chunk_num * chunk; - chunk_starts.push(start); + if cutoff_chunk_id != 0 { + for (chunk_id, chunk) in v_hi.chunks_exact_mut(base_chunk_size + 1).enumerate() { + let offset = chunk_id * (base_chunk_size + 1); + scope.spawn(move |_| f(chunk, offset)); + chunk_starts.push(offset); + } + } + // Skip special-case: less iterations than number of threads. + if base_chunk_size != 0 { + for (chunk_id, chunk) in v_lo.chunks_exact_mut(base_chunk_size).enumerate() { + let offset = split_pos + (chunk_id * base_chunk_size); + scope.spawn(move |_| f(chunk, offset)); + chunk_starts.push(offset); + } } chunk_starts @@ -621,7 +652,7 @@ pub fn lagrange_interpolate(points: &[F], evals: &[F]) -> Vec { .enumerate() .filter(|&(k, _)| k != j) .map(|a| a.1) - .zip(denoms.into_iter()) + .zip(denoms) { product.resize(tmp.len() + 1, F::ZERO); for ((a, b), product) in tmp @@ -636,7 +667,7 @@ pub fn lagrange_interpolate(points: &[F], evals: &[F]) -> Vec { } assert_eq!(tmp.len(), points.len()); assert_eq!(product.len(), points.len() - 1); - for (final_coeff, interpolation_coeff) in final_poly.iter_mut().zip(tmp.into_iter()) { + for (final_coeff, interpolation_coeff) in final_poly.iter_mut().zip(tmp) { *final_coeff += interpolation_coeff * eval; } } diff --git a/halo2_proofs/src/circuit.rs b/halo2_proofs/src/circuit.rs index 6bfdfdb780..2081d37bcd 100644 --- a/halo2_proofs/src/circuit.rs +++ b/halo2_proofs/src/circuit.rs @@ -1,6 +1,6 @@ //! Traits and structs for implementing circuit components. -use std::{convert::TryInto, fmt, marker::PhantomData}; +use std::{fmt, marker::PhantomData}; use ff::Field; @@ -16,6 +16,9 @@ pub mod floor_planner; pub use floor_planner::single_pass::SimpleFloorPlanner; pub mod layouter; +mod table_layouter; + +pub use table_layouter::{SimpleTableLayouter, TableLayouter}; /// A chip implements a set of instructions that can be used by gadgets. /// @@ -324,6 +327,19 @@ impl<'r, F: Field> Region<'r, F> { }) } + /// Returns the value of the instance column's cell at absolute location `row`. + /// + /// This method is only provided for convenience; it does not create any constraints. + /// Callers still need to use [`Self::assign_advice_from_instance`] to constrain the + /// instance values in their circuit. + pub fn instance_value( + &mut self, + instance: Column, + row: usize, + ) -> Result, Error> { + self.region.instance_value(instance, row) + } + /// Assign a fixed value. /// /// Even though `to` has `FnMut` bounds, it is guaranteed to be called at most once. @@ -384,11 +400,11 @@ impl<'r, F: Field> Region<'r, F> { /// A lookup table in the circuit. #[derive(Debug)] pub struct Table<'r, F: Field> { - table: &'r mut dyn layouter::TableLayouter, + table: &'r mut dyn TableLayouter, } -impl<'r, F: Field> From<&'r mut dyn layouter::TableLayouter> for Table<'r, F> { - fn from(table: &'r mut dyn layouter::TableLayouter) -> Self { +impl<'r, F: Field> From<&'r mut dyn TableLayouter> for Table<'r, F> { + fn from(table: &'r mut dyn TableLayouter) -> Self { Table { table } } } diff --git a/halo2_proofs/src/circuit/floor_planner/single_pass.rs b/halo2_proofs/src/circuit/floor_planner/single_pass.rs index 7844dee158..4a6d78e7a8 100644 --- a/halo2_proofs/src/circuit/floor_planner/single_pass.rs +++ b/halo2_proofs/src/circuit/floor_planner/single_pass.rs @@ -2,8 +2,6 @@ use std::cmp; use std::collections::HashMap; use std::fmt; use std::marker::PhantomData; -use std::ops::Range; -use std::sync::{Arc, Mutex}; use std::time::Instant; use rayon::prelude::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; @@ -14,10 +12,10 @@ use ark_std::{end_timer, start_timer}; use crate::{ circuit::{ - layouter::{RegionColumn, RegionLayouter, RegionShape, TableLayouter}, + layouter::{RegionColumn, RegionLayouter, RegionShape, SyncDeps, TableLayouter}, + table_layouter::{compute_table_lengths, SimpleTableLayouter}, Cell, Layouter, Region, RegionIndex, RegionStart, Table, Value, }, - multicore, plonk::{ Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, Error, Fixed, FloorPlanner, Instance, Selector, TableColumn, @@ -33,7 +31,7 @@ use crate::{ pub struct SimpleFloorPlanner; impl FloorPlanner for SimpleFloorPlanner { - fn synthesize, C: Circuit>( + fn synthesize + SyncDeps, C: Circuit>( cs: &mut CS, circuit: &C, config: C::Config, @@ -99,7 +97,9 @@ impl<'a, F: Field, CS: Assignment + 'a> SingleChipLayouter<'a, F, CS> { } } -impl<'a, F: Field, CS: Assignment + 'a> Layouter for SingleChipLayouter<'a, F, CS> { +impl<'a, F: Field, CS: Assignment + 'a + SyncDeps> Layouter + for SingleChipLayouter<'a, F, CS> +{ type Root = Self; fn assign_region(&mut self, name: N, mut assignment: A) -> Result @@ -359,24 +359,7 @@ impl<'a, F: Field, CS: Assignment + 'a> Layouter for SingleChipLayouter<'a // Check that all table columns have the same length `first_unused`, // and all cells up to that length are assigned. - let first_unused = { - match default_and_assigned - .values() - .map(|(_, assigned)| { - if assigned.iter().all(|b| *b) { - Some(assigned.len()) - } else { - None - } - }) - .reduce(|acc, item| match (acc, item) { - (Some(a), Some(b)) if a == b => Some(a), - _ => None, - }) { - Some(Some(len)) => len, - _ => return Err(Error::Synthesis), // TODO better error - } - }; + let first_unused = compute_table_lengths(&default_and_assigned)?; // Record these columns so that we can prevent them from being used again. for column in default_and_assigned.keys() { @@ -457,7 +440,7 @@ impl<'r, 'a, F: Field, CS: Assignment + 'a> SingleChipLayouterRegion<'r, 'a, } } -impl<'r, 'a, F: Field, CS: Assignment + 'a> RegionLayouter +impl<'r, 'a, F: Field, CS: Assignment + 'a + SyncDeps> RegionLayouter for SingleChipLayouterRegion<'r, 'a, F, CS> { fn enable_selector<'v>( @@ -550,6 +533,14 @@ impl<'r, 'a, F: Field, CS: Assignment + 'a> RegionLayouter Ok((cell, value)) } + fn instance_value( + &mut self, + instance: Column, + row: usize, + ) -> Result, Error> { + self.layouter.cs.query_instance(instance, row) + } + fn assign_fixed<'v>( &'v mut self, annotation: &'v (dyn Fn() -> String + 'v), @@ -592,86 +583,6 @@ impl<'r, 'a, F: Field, CS: Assignment + 'a> RegionLayouter } } -/// The default value to fill a table column with. -/// -/// - The outer `Option` tracks whether the value in row 0 of the table column has been -/// assigned yet. This will always be `Some` once a valid table has been completely -/// assigned. -/// - The inner `Value` tracks whether the underlying `Assignment` is evaluating -/// witnesses or not. -type DefaultTableValue = Option>>; - -pub(crate) struct SimpleTableLayouter<'r, 'a, F: Field, CS: Assignment + 'a> { - cs: &'a mut CS, - used_columns: &'r [TableColumn], - // maps from a fixed column to a pair (default value, vector saying which rows are assigned) - pub(crate) default_and_assigned: HashMap, Vec)>, -} - -impl<'r, 'a, F: Field, CS: Assignment + 'a> fmt::Debug for SimpleTableLayouter<'r, 'a, F, CS> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SimpleTableLayouter") - .field("used_columns", &self.used_columns) - .field("default_and_assigned", &self.default_and_assigned) - .finish() - } -} - -impl<'r, 'a, F: Field, CS: Assignment + 'a> SimpleTableLayouter<'r, 'a, F, CS> { - pub(crate) fn new(cs: &'a mut CS, used_columns: &'r [TableColumn]) -> Self { - SimpleTableLayouter { - cs, - used_columns, - default_and_assigned: HashMap::default(), - } - } -} - -impl<'r, 'a, F: Field, CS: Assignment + 'a> TableLayouter - for SimpleTableLayouter<'r, 'a, F, CS> -{ - fn assign_cell<'v>( - &'v mut self, - annotation: &'v (dyn Fn() -> String + 'v), - column: TableColumn, - offset: usize, - to: &'v mut (dyn FnMut() -> Value> + 'v), - ) -> Result<(), Error> { - if self.used_columns.contains(&column) { - return Err(Error::Synthesis); // TODO better error - } - - let entry = self.default_and_assigned.entry(column).or_default(); - - let mut value = Value::unknown(); - self.cs.assign_fixed( - annotation, - column.inner(), - offset, // tables are always assigned starting at row 0 - || { - let res = to(); - value = res; - res - }, - )?; - - match (entry.0.is_none(), offset) { - // Use the value at offset 0 as the default value for this table column. - (true, 0) => entry.0 = Some(value), - // Since there is already an existing default value for this table column, - // the caller should not be attempting to assign another value at offset 0. - (false, 0) => return Err(Error::Synthesis), // TODO better error - _ => (), - } - if entry.1.len() <= offset { - entry.1.resize(offset + 1, false); - } - entry.1[offset] = true; - - Ok(()) - } -} - #[cfg(test)] mod tests { use halo2curves::pasta::vesta; @@ -689,6 +600,8 @@ mod tests { impl Circuit for MyCircuit { type Config = Column; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { MyCircuit {} diff --git a/halo2_proofs/src/circuit/floor_planner/v1.rs b/halo2_proofs/src/circuit/floor_planner/v1.rs index 8a366d84cf..642be88d54 100644 --- a/halo2_proofs/src/circuit/floor_planner/v1.rs +++ b/halo2_proofs/src/circuit/floor_planner/v1.rs @@ -4,8 +4,8 @@ use ff::Field; use crate::{ circuit::{ - floor_planner::single_pass::SimpleTableLayouter, - layouter::{RegionColumn, RegionLayouter, RegionShape, TableLayouter}, + layouter::{RegionColumn, RegionLayouter, RegionShape, SyncDeps, TableLayouter}, + table_layouter::{compute_table_lengths, SimpleTableLayouter}, Cell, Layouter, Region, RegionIndex, RegionStart, Table, Value, }, plonk::{ @@ -43,7 +43,7 @@ impl<'a, F: Field, CS: Assignment + 'a> fmt::Debug for V1Plan<'a, F, CS> { } } -impl<'a, F: Field, CS: Assignment> V1Plan<'a, F, CS> { +impl<'a, F: Field, CS: Assignment + SyncDeps> V1Plan<'a, F, CS> { /// Creates a new v1 layouter. pub fn new(cs: &'a mut CS) -> Result { let ret = V1Plan { @@ -57,7 +57,7 @@ impl<'a, F: Field, CS: Assignment> V1Plan<'a, F, CS> { } impl FloorPlanner for V1 { - fn synthesize, C: Circuit>( + fn synthesize + SyncDeps, C: Circuit>( cs: &mut CS, circuit: &C, config: C::Config, @@ -90,8 +90,8 @@ impl FloorPlanner for V1 { // - Determine how many rows our planned circuit will require. let first_unassigned_row = column_allocations - .iter() - .map(|(_, a)| a.unbounded_interval_start()) + .values() + .map(|a| a.unbounded_interval_start()) .max() .unwrap_or(0); @@ -128,8 +128,7 @@ impl FloorPlanner for V1 { if constant_positions().count() < plan.constants.len() { return Err(Error::NotEnoughColumnsForConstants); } - for ((fixed_column, fixed_row), (value, advice)) in - constant_positions().zip(plan.constants.into_iter()) + for ((fixed_column, fixed_row), (value, advice)) in constant_positions().zip(plan.constants) { plan.cs.assign_fixed( || format!("Constant({:?})", value.evaluate()), @@ -169,7 +168,7 @@ impl<'p, 'a, F: Field, CS: Assignment + 'a> V1Pass<'p, 'a, F, CS> { } } -impl<'p, 'a, F: Field, CS: Assignment + 'a> Layouter for V1Pass<'p, 'a, F, CS> { +impl<'p, 'a, F: Field, CS: Assignment + SyncDeps> Layouter for V1Pass<'p, 'a, F, CS> { type Root = Self; fn assign_region(&mut self, name: N, assignment: A) -> Result @@ -293,7 +292,7 @@ pub struct AssignmentPass<'p, 'a, F: Field, CS: Assignment + 'a> { region_index: usize, } -impl<'p, 'a, F: Field, CS: Assignment + 'a> AssignmentPass<'p, 'a, F, CS> { +impl<'p, 'a, F: Field, CS: Assignment + SyncDeps> AssignmentPass<'p, 'a, F, CS> { fn new(plan: &'p mut V1Plan<'a, F, CS>) -> Self { AssignmentPass { plan, @@ -342,24 +341,7 @@ impl<'p, 'a, F: Field, CS: Assignment + 'a> AssignmentPass<'p, 'a, F, CS> { // Check that all table columns have the same length `first_unused`, // and all cells up to that length are assigned. - let first_unused = { - match default_and_assigned - .values() - .map(|(_, assigned)| { - if assigned.iter().all(|b| *b) { - Some(assigned.len()) - } else { - None - } - }) - .reduce(|acc, item| match (acc, item) { - (Some(a), Some(b)) if a == b => Some(a), - _ => None, - }) { - Some(Some(len)) => len, - _ => return Err(Error::Synthesis), // TODO better error - } - }; + let first_unused = compute_table_lengths(&default_and_assigned)?; // Record these columns so that we can prevent them from being used again. for column in default_and_assigned.keys() { @@ -413,7 +395,7 @@ impl<'r, 'a, F: Field, CS: Assignment + 'a> V1Region<'r, 'a, F, CS> { } } -impl<'r, 'a, F: Field, CS: Assignment + 'a> RegionLayouter for V1Region<'r, 'a, F, CS> { +impl<'r, 'a, F: Field, CS: Assignment + SyncDeps> RegionLayouter for V1Region<'r, 'a, F, CS> { fn enable_selector<'v>( &'v mut self, annotation: &'v (dyn Fn() -> String + 'v), @@ -496,6 +478,14 @@ impl<'r, 'a, F: Field, CS: Assignment + 'a> RegionLayouter for V1Region<'r Ok((cell, value)) } + fn instance_value( + &mut self, + instance: Column, + row: usize, + ) -> Result, Error> { + self.plan.cs.query_instance(instance, row) + } + fn assign_fixed<'v>( &'v mut self, annotation: &'v (dyn Fn() -> String + 'v), @@ -562,6 +552,8 @@ mod tests { impl Circuit for MyCircuit { type Config = Column; type FloorPlanner = super::V1; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { MyCircuit {} diff --git a/halo2_proofs/src/circuit/floor_planner/v1/strategy.rs b/halo2_proofs/src/circuit/floor_planner/v1/strategy.rs index f9acd0f57d..71745de245 100644 --- a/halo2_proofs/src/circuit/floor_planner/v1/strategy.rs +++ b/halo2_proofs/src/circuit/floor_planner/v1/strategy.rs @@ -199,7 +199,7 @@ pub fn slot_in_biggest_advice_first( region_shapes: Vec, ) -> (Vec, CircuitAllocations) { let mut sorted_regions: Vec<_> = region_shapes.into_iter().collect(); - sorted_regions.sort_unstable_by_key(|shape| { + let sort_key = |shape: &RegionShape| { // Count the number of advice columns let advice_cols = shape .columns() @@ -211,7 +211,24 @@ pub fn slot_in_biggest_advice_first( .count(); // Sort by advice area (since this has the most contention). advice_cols * shape.row_count() - }); + }; + + // This used to incorrectly use `sort_unstable_by_key` with non-unique keys, which gave + // output that differed between 32-bit and 64-bit platforms, and potentially between Rust + // versions. + // We now use `sort_by_cached_key` with non-unique keys, and rely on `region_shapes` + // being sorted by region index (which we also rely on below to return `RegionStart`s + // in the correct order). + #[cfg(not(feature = "floor-planner-v1-legacy-pdqsort"))] + sorted_regions.sort_by_cached_key(sort_key); + + // To preserve compatibility, when the "floor-planner-v1-legacy-pdqsort" feature is enabled, + // we use a copy of the pdqsort implementation from the Rust 1.56.1 standard library, fixed + // to its behaviour on 64-bit platforms. + // https://github.com/rust-lang/rust/blob/1.56.1/library/core/src/slice/mod.rs#L2365-L2402 + #[cfg(feature = "floor-planner-v1-legacy-pdqsort")] + halo2_legacy_pdqsort::sort::quicksort(&mut sorted_regions, |a, b| sort_key(a).lt(&sort_key(b))); + sorted_regions.reverse(); // Lay out the sorted regions. diff --git a/halo2_proofs/src/circuit/layouter.rs b/halo2_proofs/src/circuit/layouter.rs index 4c9572e16b..1b7470776f 100644 --- a/halo2_proofs/src/circuit/layouter.rs +++ b/halo2_proofs/src/circuit/layouter.rs @@ -6,8 +6,23 @@ use std::fmt; use ff::Field; +pub use super::table_layouter::TableLayouter; use super::{Cell, RegionIndex, Value}; -use crate::plonk::{Advice, Any, Assigned, Column, Error, Fixed, Instance, Selector, TableColumn}; +use crate::plonk::{Advice, Any, Assigned, Column, Error, Fixed, Instance, Selector}; + +/// Intermediate trait requirements for [`RegionLayouter`] when thread-safe regions are enabled. +#[cfg(feature = "thread-safe-region")] +pub trait SyncDeps: Send + Sync {} + +#[cfg(feature = "thread-safe-region")] +impl SyncDeps for T {} + +/// Intermediate trait requirements for [`RegionLayouter`]. +#[cfg(not(feature = "thread-safe-region"))] +pub trait SyncDeps {} + +#[cfg(not(feature = "thread-safe-region"))] +impl SyncDeps for T {} /// Helper trait for implementing a custom [`Layouter`]. /// @@ -39,7 +54,7 @@ use crate::plonk::{Advice, Any, Assigned, Column, Error, Fixed, Instance, Select /// `Chip::Config`). /// /// [`Layouter`]: super::Layouter -pub trait RegionLayouter: fmt::Debug { +pub trait RegionLayouter: fmt::Debug + SyncDeps { /// Enables a selector at the given offset. fn enable_selector<'v>( &'v mut self, @@ -90,7 +105,8 @@ pub trait RegionLayouter: fmt::Debug { /// Assign the value of the instance column's cell at absolute location /// `row` to the column `advice` at `offset` within this region. /// - /// Returns the advice cell, and its value if known. + /// Returns the advice cell that has been equality-constrained to the + /// instance cell, and its value if known. fn assign_advice_from_instance<'v>( &mut self, annotation: &'v (dyn Fn() -> String + 'v), @@ -100,7 +116,11 @@ pub trait RegionLayouter: fmt::Debug { offset: usize, ) -> Result<(Cell, Value), Error>; - /// Assign a fixed value + /// Returns the value of the instance column's cell at absolute location `row`. + fn instance_value(&mut self, instance: Column, row: usize) + -> Result, Error>; + + /// Assigns a fixed value fn assign_fixed<'v>( &'v mut self, annotation: &'v (dyn Fn() -> String + 'v), @@ -123,24 +143,6 @@ pub trait RegionLayouter: fmt::Debug { fn global_offset(&self, row_offset: usize) -> usize; } -/// Helper trait for implementing a custom [`Layouter`]. -/// -/// This trait is used for implementing table assignments. -/// -/// [`Layouter`]: super::Layouter -pub trait TableLayouter: fmt::Debug { - /// Assigns a fixed value to a table cell. - /// - /// Returns an error if the table cell has already been assigned to. - fn assign_cell<'v>( - &'v mut self, - annotation: &'v (dyn Fn() -> String + 'v), - column: TableColumn, - offset: usize, - to: &'v mut (dyn FnMut() -> Value> + 'v), - ) -> Result<(), Error>; -} - /// The shape of a region. For a region at a certain index, we track /// the set of columns it uses as well as the number of rows it uses. #[derive(Clone, Debug)] @@ -285,6 +287,14 @@ impl RegionLayouter for RegionShape { )) } + fn instance_value( + &mut self, + _instance: Column, + _row: usize, + ) -> Result, Error> { + Ok(Value::unknown()) + } + fn assign_fixed<'v>( &'v mut self, _: &'v (dyn Fn() -> String + 'v), diff --git a/halo2_proofs/src/circuit/table_layouter.rs b/halo2_proofs/src/circuit/table_layouter.rs new file mode 100644 index 0000000000..5efe11735c --- /dev/null +++ b/halo2_proofs/src/circuit/table_layouter.rs @@ -0,0 +1,413 @@ +//! Implementations of common table layouters. + +use std::{ + collections::HashMap, + fmt::{self, Debug}, +}; + +use ff::Field; + +use crate::plonk::{Assigned, Assignment, Error, TableColumn, TableError}; + +use super::Value; + +/// Helper trait for implementing a custom [`Layouter`]. +/// +/// This trait is used for implementing table assignments. +/// +/// [`Layouter`]: super::Layouter +pub trait TableLayouter: std::fmt::Debug { + /// Assigns a fixed value to a table cell. + /// + /// Returns an error if the table cell has already been assigned to. + fn assign_cell<'v>( + &'v mut self, + annotation: &'v (dyn Fn() -> String + 'v), + column: TableColumn, + offset: usize, + to: &'v mut (dyn FnMut() -> Value> + 'v), + ) -> Result<(), Error>; +} + +/// The default value to fill a table column with. +/// +/// - The outer `Option` tracks whether the value in row 0 of the table column has been +/// assigned yet. This will always be `Some` once a valid table has been completely +/// assigned. +/// - The inner `Value` tracks whether the underlying `Assignment` is evaluating +/// witnesses or not. +type DefaultTableValue = Option>>; + +/// A table layouter that can be used to assign values to a table. +pub struct SimpleTableLayouter<'r, 'a, F: Field, CS: Assignment + 'a> { + cs: &'a mut CS, + used_columns: &'r [TableColumn], + /// maps from a fixed column to a pair (default value, vector saying which rows are assigned) + pub default_and_assigned: HashMap, Vec)>, +} + +impl<'r, 'a, F: Field, CS: Assignment + 'a> fmt::Debug for SimpleTableLayouter<'r, 'a, F, CS> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SimpleTableLayouter") + .field("used_columns", &self.used_columns) + .field("default_and_assigned", &self.default_and_assigned) + .finish() + } +} + +impl<'r, 'a, F: Field, CS: Assignment + 'a> SimpleTableLayouter<'r, 'a, F, CS> { + /// Returns a new SimpleTableLayouter + pub fn new(cs: &'a mut CS, used_columns: &'r [TableColumn]) -> Self { + SimpleTableLayouter { + cs, + used_columns, + default_and_assigned: HashMap::default(), + } + } +} + +impl<'r, 'a, F: Field, CS: Assignment + 'a> TableLayouter + for SimpleTableLayouter<'r, 'a, F, CS> +{ + fn assign_cell<'v>( + &'v mut self, + annotation: &'v (dyn Fn() -> String + 'v), + column: TableColumn, + offset: usize, + to: &'v mut (dyn FnMut() -> Value> + 'v), + ) -> Result<(), Error> { + if self.used_columns.contains(&column) { + return Err(Error::TableError(TableError::UsedColumn(column))); + } + + let entry = self.default_and_assigned.entry(column).or_default(); + + let mut value = Value::unknown(); + self.cs.assign_fixed( + annotation, + column.inner(), + offset, // tables are always assigned starting at row 0 + || { + let res = to(); + value = res; + res + }, + )?; + + match (entry.0.is_none(), offset) { + // Use the value at offset 0 as the default value for this table column. + (true, 0) => entry.0 = Some(value), + // Since there is already an existing default value for this table column, + // the caller should not be attempting to assign another value at offset 0. + (false, 0) => { + return Err(Error::TableError(TableError::OverwriteDefault( + column, + format!("{:?}", entry.0.unwrap()), + format!("{:?}", value), + ))) + } + _ => (), + } + if entry.1.len() <= offset { + entry.1.resize(offset + 1, false); + } + entry.1[offset] = true; + + Ok(()) + } +} + +pub(crate) fn compute_table_lengths( + default_and_assigned: &HashMap, Vec)>, +) -> Result { + let column_lengths: Result, Error> = default_and_assigned + .iter() + .map(|(col, (default_value, assigned))| { + if default_value.is_none() || assigned.is_empty() { + return Err(Error::TableError(TableError::ColumnNotAssigned(*col))); + } + if assigned.iter().all(|b| *b) { + // All values in the column have been assigned + Ok((col, assigned.len())) + } else { + Err(Error::TableError(TableError::ColumnNotAssigned(*col))) + } + }) + .collect(); + let column_lengths = column_lengths?; + column_lengths + .into_iter() + .try_fold((None, 0), |acc, (col, col_len)| { + if acc.1 == 0 || acc.1 == col_len { + Ok((Some(*col), col_len)) + } else { + let mut cols = [(*col, col_len), (acc.0.unwrap(), acc.1)]; + cols.sort(); + Err(Error::TableError(TableError::UnevenColumnLengths( + cols[0], cols[1], + ))) + } + }) + .map(|col_len| col_len.1) +} + +#[cfg(test)] +mod tests { + use halo2curves::pasta::Fp; + + use crate::{ + circuit::{Layouter, SimpleFloorPlanner}, + dev::MockProver, + plonk::{Circuit, ConstraintSystem}, + poly::Rotation, + }; + + use super::*; + + #[test] + fn table_no_default() { + const K: u32 = 4; + + #[derive(Clone)] + struct FaultyCircuitConfig { + table: TableColumn, + } + + struct FaultyCircuit; + + impl Circuit for FaultyCircuit { + type Config = FaultyCircuitConfig; + type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + Self + } + + fn configure(meta: &mut ConstraintSystem) -> Self::Config { + let a = meta.advice_column(); + let table = meta.lookup_table_column(); + + meta.lookup("", |cells| { + let a = cells.query_advice(a, Rotation::cur()); + vec![(a, table)] + }); + + Self::Config { table } + } + + fn synthesize( + &self, + config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + layouter.assign_table( + || "duplicate assignment", + |mut table| { + table.assign_cell( + || "default", + config.table, + 1, + || Value::known(Fp::zero()), + ) + }, + ) + } + } + + let prover = MockProver::run(K, &FaultyCircuit, vec![]); + assert_eq!( + format!("{}", prover.unwrap_err()), + "TableColumn { inner: Column { index: 0, column_type: Fixed } } not fully assigned. Help: assign a value at offset 0." + ); + } + + #[test] + fn table_overwrite_default() { + const K: u32 = 4; + + #[derive(Clone)] + struct FaultyCircuitConfig { + table: TableColumn, + } + + struct FaultyCircuit; + + impl Circuit for FaultyCircuit { + type Config = FaultyCircuitConfig; + type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + Self + } + + fn configure(meta: &mut ConstraintSystem) -> Self::Config { + let a = meta.advice_column(); + let table = meta.lookup_table_column(); + + meta.lookup("", |cells| { + let a = cells.query_advice(a, Rotation::cur()); + vec![(a, table)] + }); + + Self::Config { table } + } + + fn synthesize( + &self, + config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + layouter.assign_table( + || "duplicate assignment", + |mut table| { + table.assign_cell( + || "default", + config.table, + 0, + || Value::known(Fp::zero()), + )?; + table.assign_cell( + || "duplicate", + config.table, + 0, + || Value::known(Fp::zero()), + ) + }, + ) + } + } + + let prover = MockProver::run(K, &FaultyCircuit, vec![]); + assert_eq!( + format!("{}", prover.unwrap_err()), + "Attempted to overwrite default value Value { inner: Some(Trivial(0x0000000000000000000000000000000000000000000000000000000000000000)) } with Value { inner: Some(Trivial(0x0000000000000000000000000000000000000000000000000000000000000000)) } in TableColumn { inner: Column { index: 0, column_type: Fixed } }" + ); + } + + #[test] + fn table_reuse_column() { + const K: u32 = 4; + + #[derive(Clone)] + struct FaultyCircuitConfig { + table: TableColumn, + } + + struct FaultyCircuit; + + impl Circuit for FaultyCircuit { + type Config = FaultyCircuitConfig; + type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + Self + } + + fn configure(meta: &mut ConstraintSystem) -> Self::Config { + let a = meta.advice_column(); + let table = meta.lookup_table_column(); + + meta.lookup("", |cells| { + let a = cells.query_advice(a, Rotation::cur()); + vec![(a, table)] + }); + + Self::Config { table } + } + + fn synthesize( + &self, + config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + layouter.assign_table( + || "first assignment", + |mut table| { + table.assign_cell( + || "default", + config.table, + 0, + || Value::known(Fp::zero()), + ) + }, + )?; + + layouter.assign_table( + || "reuse", + |mut table| { + table.assign_cell(|| "reuse", config.table, 1, || Value::known(Fp::zero())) + }, + ) + } + } + + let prover = MockProver::run(K, &FaultyCircuit, vec![]); + assert_eq!( + format!("{}", prover.unwrap_err()), + "TableColumn { inner: Column { index: 0, column_type: Fixed } } has already been used" + ); + } + + #[test] + fn table_uneven_columns() { + const K: u32 = 4; + + #[derive(Clone)] + struct FaultyCircuitConfig { + table: (TableColumn, TableColumn), + } + + struct FaultyCircuit; + + impl Circuit for FaultyCircuit { + type Config = FaultyCircuitConfig; + type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + Self + } + + fn configure(meta: &mut ConstraintSystem) -> Self::Config { + let a = meta.advice_column(); + let table = (meta.lookup_table_column(), meta.lookup_table_column()); + meta.lookup("", |cells| { + let a = cells.query_advice(a, Rotation::cur()); + + vec![(a.clone(), table.0), (a, table.1)] + }); + + Self::Config { table } + } + + fn synthesize( + &self, + config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + layouter.assign_table( + || "table with uneven columns", + |mut table| { + table.assign_cell(|| "", config.table.0, 0, || Value::known(Fp::zero()))?; + table.assign_cell(|| "", config.table.0, 1, || Value::known(Fp::zero()))?; + + table.assign_cell(|| "", config.table.1, 0, || Value::known(Fp::zero())) + }, + ) + } + } + + let prover = MockProver::run(K, &FaultyCircuit, vec![]); + assert_eq!( + format!("{}", prover.unwrap_err()), + "TableColumn { inner: Column { index: 0, column_type: Fixed } } has length 2 while TableColumn { inner: Column { index: 1, column_type: Fixed } } has length 1" + ); + } +} diff --git a/halo2_proofs/src/circuit/value.rs b/halo2_proofs/src/circuit/value.rs index 1a1bf4f9a2..7052d8bc06 100644 --- a/halo2_proofs/src/circuit/value.rs +++ b/halo2_proofs/src/circuit/value.rs @@ -68,6 +68,11 @@ impl Value { } } + /// ONLY FOR INTERNAL CRATE USAGE; DO NOT EXPOSE! + pub(crate) fn into_option(self) -> Option { + self.inner + } + /// Enforces an assertion on the contained value, if known. /// /// The assertion is ignored if `self` is [`Value::unknown()`]. Do not try to enforce diff --git a/halo2_proofs/src/dev.rs b/halo2_proofs/src/dev.rs index a83478366c..2eb4e063e1 100644 --- a/halo2_proofs/src/dev.rs +++ b/halo2_proofs/src/dev.rs @@ -2,35 +2,32 @@ use std::collections::HashMap; use std::collections::HashSet; -use std::fmt; use std::iter; -use std::ops::{Add, DerefMut, Mul, Neg, Range}; +use std::ops::{Add, Mul, Neg, Range}; use std::sync::Arc; -use std::time::{Duration, Instant}; use blake2b_simd::blake2b; +use ff::Field; use ff::FromUniformBytes; -use ff::{BatchInvert, Field}; -use group::Group; use crate::plonk::permutation::keygen::Assembly; -use crate::plonk::sealed::SealedPhase; -use crate::plonk::FirstPhase; -use crate::plonk::ThirdPhase; use crate::{ circuit, plonk::{ - permutation, Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, ColumnType, - ConstraintSystem, Error, Expression, Fixed, FloorPlanner, Instance, Phase, Selector, - VirtualCell, + permutation, sealed, Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, + ConstraintSystem, Error, Expression, Fixed, FloorPlanner, Instance, Selector, }, - poly::Rotation, }; -use rayon::{ - iter::{ - IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, ParallelIterator, - }, - slice::ParallelSliceMut, + +#[cfg(feature = "multiphase-mock-prover")] +use crate::{plonk::sealed::SealedPhase, plonk::FirstPhase, plonk::Phase}; +#[cfg(feature = "multiphase-mock-prover")] +use ff::BatchInvert; + +#[cfg(feature = "multicore")] +use crate::multicore::{ + IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, ParallelIterator, + ParallelSliceMut, }; pub mod metadata; @@ -47,11 +44,12 @@ mod gates; pub use gates::CircuitGates; use crate::two_dim_vec_to_vec_of_slice; +mod tfp; +pub use tfp::TracingFloorPlanner; #[cfg(feature = "dev-graph")] mod graph; -use crate::circuit::Cell; use crate::helpers::CopyCell; #[cfg(feature = "dev-graph")] #[cfg_attr(docsrs, doc(cfg(feature = "dev-graph")))] @@ -98,7 +96,7 @@ impl Region { } /// The value of a particular cell within the circuit. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Eq)] pub enum CellValue { /// An unassigned cell. Unassigned, @@ -344,6 +342,8 @@ impl Mul for Value { /// impl Circuit for MyCircuit { /// type Config = MyConfig; /// type FloorPlanner = SimpleFloorPlanner; +/// #[cfg(feature = "circuit-params")] +/// type Params = (); /// /// fn without_witnesses(&self) -> Self { /// Self::default() @@ -411,13 +411,15 @@ impl Mul for Value { /// }]) /// ); /// -/// // If we provide a too-small K, we get an error. -/// assert!(matches!( -/// MockProver::::run(2, &circuit, vec![]).unwrap_err(), -/// Error::NotEnoughRowsAvailable { -/// current_k, -/// } if current_k == 2, -/// )); +/// // If we provide a too-small K, we get a panic. +/// use std::panic; +/// let result = panic::catch_unwind(|| { +/// MockProver::::run(2, &circuit, vec![]).unwrap_err() +/// }); +/// assert_eq!( +/// result.unwrap_err().downcast_ref::().unwrap(), +/// "n=4, minimum_rows=8, k=2" +/// ); /// ``` #[derive(Debug)] pub struct MockProver<'a, F: Field> { @@ -440,7 +442,7 @@ pub struct MockProver<'a, F: Field> { // This field is used only if the "phase_check" feature is turned on. advice_prev: Vec>>, // The instance cells in the circuit, arranged as [column][row]. - instance: Vec>, + instance: Vec>>, selectors_vec: Arc>>, selectors: Vec<&'a mut [bool]>, @@ -455,7 +457,29 @@ pub struct MockProver<'a, F: Field> { // A range of available rows for assignment and copies. usable_rows: Range, - current_phase: crate::plonk::sealed::Phase, + current_phase: sealed::Phase, // crate::plonk::sealed::Phase, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +enum InstanceValue { + Assigned(F), + Padding, +} + +impl InstanceValue { + fn value(&self) -> F { + match self { + InstanceValue::Assigned(v) => *v, + InstanceValue::Padding => F::ZERO, + } + } +} + +#[cfg(feature = "multiphase-mock-prover")] +impl<'a, F: Field> MockProver<'a, F> { + fn in_phase(&self, phase: P) -> bool { + self.current_phase == phase.to_sealed() + } } impl<'a, F: Field> Assignment for MockProver<'a, F> { @@ -464,6 +488,11 @@ impl<'a, F: Field> Assignment for MockProver<'a, F> { NR: Into, N: FnOnce() -> NR, { + #[cfg(feature = "multiphase-mock-prover")] + if !self.in_phase(FirstPhase) { + return; + } + assert!(self.current_region.is_none()); self.current_region = Some(Region { name: name().into(), @@ -477,6 +506,11 @@ impl<'a, F: Field> Assignment for MockProver<'a, F> { } fn exit_region(&mut self) { + #[cfg(feature = "multiphase-mock-prover")] + if !self.in_phase(FirstPhase) { + return; + } + self.regions.push(self.current_region.take().unwrap()); } @@ -485,6 +519,11 @@ impl<'a, F: Field> Assignment for MockProver<'a, F> { A: FnOnce() -> AR, AR: Into, { + #[cfg(feature = "multiphase-mock-prover")] + if !self.in_phase(FirstPhase) { + return; + } + if let Some(region) = self.current_region.as_mut() { region .annotations @@ -497,6 +536,21 @@ impl<'a, F: Field> Assignment for MockProver<'a, F> { A: FnOnce() -> AR, AR: Into, { + #[cfg(feature = "multiphase-mock-prover")] + { + if !self.in_phase(FirstPhase) { + return Ok(()); + } + assert!( + self.usable_rows.contains(&row), + "row={} not in usable_rows={:?}, k={}", + row, + self.usable_rows, + self.k, + ); + } + + #[cfg(not(feature = "multiphase-mock-prover"))] if !self.usable_rows.contains(&row) { return Err(Error::not_enough_rows_available(self.k)); } @@ -704,15 +758,26 @@ impl<'a, F: Field> Assignment for MockProver<'a, F> { column: Column, row: usize, ) -> Result, Error> { + #[cfg(feature = "multiphase-mock-prover")] + assert!( + self.usable_rows.contains(&row), + "row={}, usable_rows={:?}, k={}", + row, + self.usable_rows, + self.k, + ); + + #[cfg(not(feature = "multiphase-mock-prover"))] if !self.usable_rows.contains(&row) { return Err(Error::not_enough_rows_available(self.k)); } - self.instance + Ok(self + .instance .get(column.index()) .and_then(|column| column.get(row)) - .map(|v| circuit::Value::known(*v)) - .ok_or(Error::BoundsFailure) + .map(|v| circuit::Value::known(v.value())) + .expect("bound failure")) } fn assign_advice( @@ -733,6 +798,26 @@ impl<'a, F: Field> Assignment for MockProver<'a, F> { return Ok(()); } + #[cfg(feature = "multiphase-mock-prover")] + if self.in_phase(FirstPhase) { + assert!( + self.usable_rows.contains(&row), + "row={}, usable_rows={:?}, k={}", + row, + self.usable_rows, + self.k, + ); + if let Some(region) = self.current_region.as_mut() { + region.update_extent(column.into(), row); + region + .cells + .entry((column.into(), row)) + .and_modify(|count| *count += 1) + .or_default(); + } + } + + #[cfg(not(feature = "multiphase-mock-prover"))] if !self.usable_rows.contains(&row) { return Err(Error::not_enough_rows_available(self.k)); } @@ -747,6 +832,7 @@ impl<'a, F: Field> Assignment for MockProver<'a, F> { )); } + #[cfg(not(feature = "multiphase-mock-prover"))] if let Some(region) = self.current_region.as_mut() { region.update_extent(column.into(), row); region @@ -775,11 +861,23 @@ impl<'a, F: Field> Assignment for MockProver<'a, F> { #[cfg(feature = "mock-batch-inv")] let assigned = CellValue::from(val_res?); - *self - .advice - .get_mut(column.index()) - .and_then(|v| v.get_mut(row - self.rw_rows.start)) - .ok_or(Error::BoundsFailure)? = assigned; + #[cfg(feature = "multiphase-mock-prover")] + if self.in_phase(column.column_type().phase) { + *self + .advice + .get_mut(column.index()) + .and_then(|v| v.get_mut(row - self.rw_rows.start)) + .expect("bounds failure") = assigned; + } + + #[cfg(not(feature = "multiphase-mock-prover"))] + { + *self + .advice + .get_mut(column.index()) + .and_then(|v| v.get_mut(row - self.rw_rows.start)) + .ok_or(Error::BoundsFailure)? = assigned; + } #[cfg(feature = "phase-check")] // if false && self.current_phase.0 > column.column_type().phase.0 { @@ -814,10 +912,25 @@ impl<'a, F: Field> Assignment for MockProver<'a, F> { A: FnOnce() -> AR, AR: Into, { + #[cfg(not(feature = "multiphase-mock-prover"))] if !self.usable_rows.contains(&row) { return Err(Error::not_enough_rows_available(self.k)); } + #[cfg(feature = "multiphase-mock-prover")] + { + if !self.in_phase(FirstPhase) { + return Ok(()); + } + + assert!( + self.usable_rows.contains(&row), + "row={}, usable_rows={:?}, k={}", + row, + self.usable_rows, + self.k, + ); + } if !self.rw_rows.contains(&row) { return Err(Error::InvalidRange( row, @@ -865,10 +978,27 @@ impl<'a, F: Field> Assignment for MockProver<'a, F> { right_column: Column, right_row: usize, ) -> Result<(), crate::plonk::Error> { + #[cfg(not(feature = "multiphase-mock-prover"))] if !self.usable_rows.contains(&left_row) || !self.usable_rows.contains(&right_row) { return Err(Error::not_enough_rows_available(self.k)); } + #[cfg(feature = "multiphase-mock-prover")] + { + if !self.in_phase(FirstPhase) { + return Ok(()); + } + + assert!( + self.usable_rows.contains(&left_row) && self.usable_rows.contains(&right_row), + "left_row={}, right_row={}, usable_rows={:?}, k={}", + left_row, + right_row, + self.usable_rows, + self.k, + ); + } + match self.permutation.as_mut() { Some(permutation) => permutation.copy(left_column, left_row, right_column, right_row), None => { @@ -896,9 +1026,24 @@ impl<'a, F: Field> Assignment for MockProver<'a, F> { from_row: usize, to: circuit::Value>, ) -> Result<(), Error> { + #[cfg(not(feature = "multiphase-mock-prover"))] if !self.usable_rows.contains(&from_row) { return Err(Error::not_enough_rows_available(self.k)); } + #[cfg(feature = "multiphase-mock-prover")] + { + if !self.in_phase(FirstPhase) { + return Ok(()); + } + + assert!( + self.usable_rows.contains(&from_row), + "row={}, usable_rows={:?}, k={}", + from_row, + self.usable_rows, + self.k, + ); + } for row in self.usable_rows.clone().skip(from_row) { self.assign_fixed(|| "", col, row, || to)?; @@ -939,29 +1084,41 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { let n = 1 << k; let mut cs = ConstraintSystem::default(); + #[cfg(feature = "circuit-params")] + let config = ConcreteCircuit::configure_with_params(&mut cs, circuit.params()); + #[cfg(not(feature = "circuit-params"))] let config = ConcreteCircuit::configure(&mut cs); let cs = cs.chunk_lookups(); - let cs = cs; - if n < cs.minimum_rows() { - return Err(Error::not_enough_rows_available(k)); - } + assert!( + n >= cs.minimum_rows(), + "n={}, minimum_rows={}, k={}", + n, + cs.minimum_rows(), + k, + ); - if instance.len() != cs.num_instance_columns { - return Err(Error::InvalidInstances); - } + assert_eq!(instance.len(), cs.num_instance_columns); let instance = instance .into_iter() - .map(|mut instance| { - if instance.len() > n - (cs.blinding_factors() + 1) { - return Err(Error::InstanceTooLarge); + .map(|instance| { + assert!( + instance.len() <= n - (cs.blinding_factors() + 1), + "instance.len={}, n={}, cs.blinding_factors={}", + instance.len(), + n, + cs.blinding_factors() + ); + + let mut instance_values = vec![InstanceValue::Padding; n]; + for (idx, value) in instance.into_iter().enumerate() { + instance_values[idx] = InstanceValue::Assigned(value); } - instance.resize(n, F::ZERO); - Ok(instance) + instance_values }) - .collect::, _>>()?; + .collect::>(); // Fixed columns contain no blinding factors. let fixed_vec = Arc::new(vec![vec![CellValue::Unassigned; n]; cs.num_fixed_columns]); @@ -1084,7 +1241,20 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { #[cfg(not(feature = "phase-check"))] { - let syn_time = Instant::now(); + let syn_time = std::time::Instant::now(); + + #[cfg(feature = "multiphase-mock-prover")] + for current_phase in prover.cs.phases() { + prover.current_phase = current_phase; + ConcreteCircuit::FloorPlanner::synthesize( + &mut prover, + circuit, + config.clone(), + constants.clone(), + )?; + } + + #[cfg(not(feature = "multiphase-mock-prover"))] ConcreteCircuit::FloorPlanner::synthesize(&mut prover, circuit, config, constants)?; log::info!("MockProver synthesize took {:?}", syn_time.elapsed()); } @@ -1126,6 +1296,9 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { }; debug_assert_eq!(Arc::strong_count(&prover.fixed_vec), 1); + #[cfg(feature = "thread-safe-region")] + prover.permutation.as_mut().unwrap().build_ordered_mapping(); + Ok(prover) } @@ -1192,17 +1365,42 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { // Determine where this cell should have been assigned. let cell_row = ((gate_row + n + cell.rotation.0) % n) as usize; - // Check that it was assigned! - if r.cells.get(&(cell.column, cell_row)).is_some() { - None - } else { - Some(VerifyFailure::CellNotAssigned { - gate: (gate_index, gate.name()).into(), - region: (r_i, r.name.clone(), r.annotations.clone()).into(), - gate_offset: *selector_row, - column: cell.column, - offset: cell_row as isize - r.rows.unwrap().0 as isize, - }) + match cell.column.column_type() { + Any::Instance => { + // Handle instance cells, which are not in the region. + let instance_value = + &self.instance[cell.column.index()][cell_row]; + match instance_value { + InstanceValue::Assigned(_) => None, + _ => Some(VerifyFailure::InstanceCellNotAssigned { + gate: (gate_index, gate.name()).into(), + region: (r_i, r.name.clone()).into(), + gate_offset: *selector_row, + column: cell.column.try_into().unwrap(), + row: cell_row, + }), + } + } + _ => { + // Check that it was assigned! + if r.cells.contains_key(&(cell.column, cell_row)) { + None + } else { + Some(VerifyFailure::CellNotAssigned { + gate: (gate_index, gate.name()).into(), + region: ( + r_i, + r.name.clone(), + r.annotations.clone(), + ) + .into(), + gate_offset: *selector_row, + column: cell.column, + offset: cell_row as isize + - r.rows.unwrap().0 as isize, + }) + } + } } }) }) @@ -1219,11 +1417,7 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { .flat_map(|(gate_index, gate)| { let blinding_rows = (self.n as usize - (self.cs.blinding_factors() + 1))..(self.n as usize); - (gate_row_ids - .clone() - .into_iter() - .chain(blinding_rows.into_iter())) - .flat_map(move |row| { + (gate_row_ids.clone().chain(blinding_rows)).flat_map(move |row| { let row = row as i32 + n; gate.polynomials().iter().enumerate().filter_map( move |(poly_index, poly)| match poly.evaluate_lazy( @@ -1294,6 +1488,43 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { }) }); + let load = |expression: &Expression, row| { + expression.evaluate_lazy( + &|scalar| Value::Real(scalar), + &|_| panic!("virtual selectors are removed during optimization"), + &|query| { + let query = self.cs.fixed_queries[query.index.unwrap()]; + let column_index = query.0.index(); + let rotation = query.1 .0; + self.fixed[column_index][(row as i32 + n + rotation) as usize % n as usize] + .into() + }, + &|query| { + let query = self.cs.advice_queries[query.index.unwrap()]; + let column_index = query.0.index(); + let rotation = query.1 .0; + self.advice[column_index][(row as i32 + n + rotation) as usize % n as usize] + .into() + }, + &|query| { + let query = self.cs.instance_queries[query.index.unwrap()]; + let column_index = query.0.index(); + let rotation = query.1 .0; + Value::Real( + self.instance[column_index] + [(row as i32 + n + rotation) as usize % n as usize] + .value(), + ) + }, + &|challenge| Value::Real(self.challenges[challenge.index()]), + &|a| -a, + &|a, b| a + b, + &|a, b| a * b, + &|a, scalar| a * scalar, + &Value::Real(F::ZERO), + ) + }; + let mut cached_table = Vec::new(); let mut cached_table_identifier = Vec::new(); // Check that all lookups exist in their respective tables. @@ -1303,47 +1534,6 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { .iter() .enumerate() .flat_map(|(lookup_index, lookup)| { - let load = |expression: &Expression, row| { - expression.evaluate_lazy( - &|scalar| Value::Real(scalar), - &|_| panic!("virtual selectors are removed during optimization"), - &|query| { - let query = self.cs.fixed_queries[query.index]; - let column_index = query.0.index(); - let rotation = query.1 .0; - self.fixed[column_index] - [(row as i32 + n + rotation) as usize % n as usize] - .into() - }, - &|query| { - let query = self.cs.advice_queries[query.index]; - let column_index = query.0.index(); - let rotation = query.1 .0; - self.advice[column_index] - [(row as i32 + n + rotation) as usize % n as usize] - .into() - }, - &|query| { - let query = self.cs.instance_queries[query.index]; - let column_index = query.0.index(); - let rotation = query.1 .0; - Value::Real( - self.instance[column_index] - [(row as i32 + n + rotation) as usize % n as usize], - ) - }, - &|challenge| Value::Real(self.challenges[challenge.index()]), - &|a| -a, - &|a, b| a + b, - &|a, b| a * b, - &|a, scalar| a * scalar, - &Value::Real(F::ZERO), - ) - }; - - for input_expressions in lookup.inputs_expressions.iter() { - assert!(lookup.table_expressions.len() == input_expressions.len()); - } assert!(self.usable_rows.end > 0); // We optimize on the basis that the table might have been filled so that the last @@ -1422,7 +1612,7 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { assert!(table.binary_search(input).is_err()); Some(VerifyFailure::Lookup { - name: lookup.name, + name: lookup.name.to_string(), lookup_index, location: FailureLocation::find_expressions( &self.cs, @@ -1440,6 +1630,71 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { .collect::>() }); + let shuffle_errors = + self.cs + .shuffles + .iter() + .enumerate() + .flat_map(|(shuffle_index, shuffle)| { + assert!(shuffle.shuffle_expressions.len() == shuffle.input_expressions.len()); + assert!(self.usable_rows.end > 0); + + let mut shuffle_rows: Vec>> = self + .usable_rows + .clone() + .map(|row| { + let t = shuffle + .shuffle_expressions + .iter() + .map(move |c| load(c, row)) + .collect(); + t + }) + .collect(); + shuffle_rows.sort(); + + let mut input_rows: Vec<(Vec>, usize)> = self + .usable_rows + .clone() + .map(|input_row| { + let t = shuffle + .input_expressions + .iter() + .map(move |c| load(c, input_row)) + .collect(); + + (t, input_row) + }) + .collect(); + input_rows.sort(); + + input_rows + .iter() + .zip(shuffle_rows.iter()) + .filter_map(|((input_value, row), shuffle_value)| { + if shuffle_value != input_value { + Some(VerifyFailure::Shuffle { + name: shuffle.name.clone(), + shuffle_index, + location: FailureLocation::find_expressions( + &self.cs, + &self.regions, + *row, + shuffle.input_expressions.iter(), + ), + }) + } else { + None + } + }) + .collect::>() + }); + + let mapping = self + .permutation + .as_ref() + .expect("root cs permutation must be Some") + .mapping(); // Check that permutations preserve the original values of the cells. let perm_errors = { // Original values of columns involved in the permutation. @@ -1451,22 +1706,21 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { .map(|c: &Column| match c.column_type() { Any::Advice(_) => self.advice[c.index()][row], Any::Fixed => self.fixed[c.index()][row], - Any::Instance => CellValue::Assigned(self.instance[c.index()][row]), + Any::Instance => { + let cell: &InstanceValue = &self.instance[c.index()][row]; + CellValue::Assigned(cell.value()) + } }) .unwrap() }; // Iterate over each column of the permutation - self.permutation - .as_ref() - .expect("root cs permutation must be Some") - .mapping - .iter() - .enumerate() - .flat_map(move |(column, values)| { - // Iterate over each row of the column to check that the cell's - // value is preserved by the mapping. - values.iter().enumerate().filter_map(move |(row, cell)| { + mapping.enumerate().flat_map(move |(column, values)| { + // Iterate over each row of the column to check that the cell's + // value is preserved by the mapping. + values + .enumerate() + .filter_map(move |(row, cell)| { let original_cell = original(column, row); let permuted_cell = original(cell.0, cell.1); if original_cell == permuted_cell { @@ -1484,7 +1738,8 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { }) } }) - }) + .collect::>() + }) }; let mut errors: Vec<_> = iter::empty() @@ -1492,6 +1747,7 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { .chain(gate_errors) .chain(lookup_errors.flatten()) .chain(perm_errors) + .chain(shuffle_errors) .collect(); if errors.is_empty() { Ok(()) @@ -1513,6 +1769,7 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { /// Returns `Ok(())` if this `MockProver` is satisfied, or a list of errors indicating /// the reasons that the circuit is not satisfied. /// Constraints and lookup are checked at `usable_rows`, parallelly. + #[cfg(feature = "multicore")] pub fn verify_par(&self) -> Result<(), Vec> { self.verify_at_rows_par(self.usable_rows.clone(), self.usable_rows.clone()) } @@ -1520,6 +1777,7 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { /// Returns `Ok(())` if this `MockProver` is satisfied, or a list of errors indicating /// the reasons that the circuit is not satisfied. /// Constraints are only checked at `gate_row_ids`, and lookup inputs are only checked at `lookup_input_row_ids`, parallelly. + #[cfg(feature = "multicore")] pub fn verify_at_rows_par>( &self, gate_row_ids: I, @@ -1573,23 +1831,44 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { let cell_row = ((gate_row + n + cell.rotation.0) % n) as usize; - // Check that it was assigned! - if r.cells.contains_key(&(cell.column, cell_row)) { - None - } else { - Some(VerifyFailure::CellNotAssigned { - gate: (gate_index, gate.name()).into(), - region: ( - r_i, - r.name.clone(), - r.annotations.clone(), - ) - .into(), - gate_offset: *selector_row, - column: cell.column, - offset: cell_row as isize - - r.rows.unwrap().0 as isize, - }) + match cell.column.column_type() { + Any::Instance => { + // Handle instance cells, which are not in the region. + let instance_value = + &self.instance[cell.column.index()][cell_row]; + match instance_value { + InstanceValue::Assigned(_) => None, + _ => Some( + VerifyFailure::InstanceCellNotAssigned { + gate: (gate_index, gate.name()).into(), + region: (r_i, r.name.clone()).into(), + gate_offset: *selector_row, + column: cell.column.try_into().unwrap(), + row: cell_row, + }, + ), + } + } + _ => { + // Check that it was assigned! + if r.cells.contains_key(&(cell.column, cell_row)) { + None + } else { + Some(VerifyFailure::CellNotAssigned { + gate: (gate_index, gate.name()).into(), + region: ( + r_i, + r.name.clone(), + r.annotations.clone(), + ) + .into(), + gate_offset: *selector_row, + column: cell.column, + offset: cell_row as isize + - r.rows.unwrap().0 as isize, + }) + } + } } }) .collect::>() @@ -1688,6 +1967,36 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { .collect::>() }); + let load = |expression: &Expression, row| { + expression.evaluate_lazy( + &|scalar| Value::Real(scalar), + &|_| panic!("virtual selectors are removed during optimization"), + &|query| { + self.fixed[query.column_index] + [(row as i32 + n + query.rotation.0) as usize % n as usize] + .into() + }, + &|query| { + self.advice[query.column_index] + [(row as i32 + n + query.rotation.0) as usize % n as usize] + .into() + }, + &|query| { + Value::Real( + self.instance[query.column_index] + [(row as i32 + n + query.rotation.0) as usize % n as usize] + .value(), + ) + }, + &|challenge| Value::Real(self.challenges[challenge.index()]), + &|a| -a, + &|a, b| a + b, + &|a, b| a * b, + &|a, scalar| a * scalar, + &Value::Real(F::ZERO), + ) + }; + let mut cached_table = Vec::new(); let mut cached_table_identifier = Vec::new(); // Check that all lookups exist in their respective tables. @@ -1697,38 +2006,6 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { .iter() .enumerate() .flat_map(|(lookup_index, lookup)| { - let load = |expression: &Expression, row| { - expression.evaluate_lazy( - &|scalar| Value::Real(scalar), - &|_| panic!("virtual selectors are removed during optimization"), - &|query| { - self.fixed[query.column_index] - [(row as i32 + n + query.rotation.0) as usize % n as usize] - .into() - }, - &|query| { - self.advice[query.column_index] - [(row as i32 + n + query.rotation.0) as usize % n as usize] - .into() - }, - &|query| { - Value::Real( - self.instance[query.column_index] - [(row as i32 + n + query.rotation.0) as usize % n as usize], - ) - }, - &|challenge| Value::Real(self.challenges[challenge.index()]), - &|a| -a, - &|a, b| a + b, - &|a, b| a * b, - &|a, scalar| a * scalar, - &Value::Real(F::ZERO), - ) - }; - - for input_expressions in lookup.inputs_expressions.iter() { - assert!(lookup.table_expressions.len() == input_expressions.len()); - } assert!(self.usable_rows.end > 0); // We optimize on the basis that the table might have been filled so that the last @@ -1803,7 +2080,7 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { .filter_map(move |(input, input_row)| { if table.binary_search(input).is_err() { Some(VerifyFailure::Lookup { - name: lookup.name, + name: lookup.name.to_string(), lookup_index, location: FailureLocation::find_expressions( &self.cs, @@ -1821,6 +2098,71 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { .collect::>() }); + let shuffle_errors = + self.cs + .shuffles + .iter() + .enumerate() + .flat_map(|(shuffle_index, shuffle)| { + assert!(shuffle.shuffle_expressions.len() == shuffle.input_expressions.len()); + assert!(self.usable_rows.end > 0); + + let mut shuffle_rows: Vec>> = self + .usable_rows + .clone() + .map(|row| { + let t = shuffle + .shuffle_expressions + .iter() + .map(move |c| load(c, row)) + .collect(); + t + }) + .collect(); + shuffle_rows.sort(); + + let mut input_rows: Vec<(Vec>, usize)> = self + .usable_rows + .clone() + .map(|input_row| { + let t = shuffle + .input_expressions + .iter() + .map(move |c| load(c, input_row)) + .collect(); + + (t, input_row) + }) + .collect(); + input_rows.sort(); + + input_rows + .iter() + .zip(shuffle_rows.iter()) + .filter_map(|((input_value, row), shuffle_value)| { + if shuffle_value != input_value { + Some(VerifyFailure::Shuffle { + name: shuffle.name.clone(), + shuffle_index, + location: FailureLocation::find_expressions( + &self.cs, + &self.regions, + *row, + shuffle.input_expressions.iter(), + ), + }) + } else { + None + } + }) + .collect::>() + }); + + let mapping = self + .permutation + .as_ref() + .expect("root cs permutation must be Some") + .mapping(); // Check that permutations preserve the original values of the cells. let perm_errors = { // Original values of columns involved in the permutation. @@ -1832,44 +2174,40 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { .map(|c: &Column| match c.column_type() { Any::Advice(_) => self.advice[c.index()][row], Any::Fixed => self.fixed[c.index()][row], - Any::Instance => CellValue::Assigned(self.instance[c.index()][row]), + Any::Instance => { + let cell: &InstanceValue = &self.instance[c.index()][row]; + CellValue::Assigned(cell.value()) + } }) .unwrap() }; // Iterate over each column of the permutation - self.permutation - .as_ref() - .expect("root cs permutation must be Some") - .mapping - .iter() - .enumerate() - .flat_map(move |(column, values)| { - // Iterate over each row of the column to check that the cell's - // value is preserved by the mapping. - values - .par_iter() - .enumerate() - .filter_map(move |(row, cell)| { - let original_cell = original(column, row); - let permuted_cell = original(cell.0, cell.1); - if original_cell == permuted_cell { - None - } else { - let columns = self.cs.permutation.get_columns(); - let column = columns.get(column).unwrap(); - Some(VerifyFailure::Permutation { - column: (*column).into(), - location: FailureLocation::find( - &self.regions, - row, - Some(column).into_iter().cloned().collect(), - ), - }) - } - }) - .collect::>() - }) + mapping.enumerate().flat_map(move |(column, values)| { + // Iterate over each row of the column to check that the cell's + // value is preserved by the mapping. + values + .enumerate() + .filter_map(move |(row, cell)| { + let original_cell = original(column, row); + let permuted_cell = original(cell.0, cell.1); + if original_cell == permuted_cell { + None + } else { + let columns = self.cs.permutation.get_columns(); + let column = columns.get(column).unwrap(); + Some(VerifyFailure::Permutation { + column: (*column).into(), + location: FailureLocation::find( + &self.regions, + row, + Some(column).into_iter().cloned().collect(), + ), + }) + } + }) + .collect::>() + }) }; let mut errors: Vec<_> = iter::empty() @@ -1877,6 +2215,7 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { .chain(gate_errors) .chain(lookup_errors.flatten()) .chain(perm_errors) + .chain(shuffle_errors) .collect(); if errors.is_empty() { Ok(()) @@ -1925,6 +2264,7 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { /// ```ignore /// assert_eq!(prover.verify_par(), Ok(())); /// ``` + #[cfg(feature = "multicore")] pub fn assert_satisfied_par(&self) { if let Err(errs) = self.verify_par() { for err in errs { @@ -1946,6 +2286,7 @@ impl<'a, F: FromUniformBytes<64> + Ord> MockProver<'a, F> { /// ```ignore /// assert_eq!(prover.verify_at_rows_par(), Ok(())); /// ``` + #[cfg(feature = "multicore")] pub fn assert_satisfied_at_rows_par>( &self, gate_row_ids: I, @@ -2006,6 +2347,8 @@ mod tests { impl Circuit for FaultyCircuit { type Config = FaultyCircuitConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn configure(meta: &mut ConstraintSystem) -> Self::Config { let a = meta.advice_column(); @@ -2092,6 +2435,8 @@ mod tests { impl Circuit for FaultyCircuit { type Config = FaultyCircuitConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn configure(meta: &mut ConstraintSystem) -> Self::Config { let a = meta.advice_column(); @@ -2235,7 +2580,7 @@ mod tests { assert_eq!( prover.verify(), Err(vec![VerifyFailure::Lookup { - name: "lookup", + name: "lookup".to_string(), lookup_index: 0, location: FailureLocation::InRegion { region: (1, "Faulty synthesis").into(), @@ -2261,6 +2606,8 @@ mod tests { impl Circuit for FaultyCircuit { type Config = FaultyCircuitConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn configure(meta: &mut ConstraintSystem) -> Self::Config { let a = meta.advice_column(); @@ -2303,7 +2650,7 @@ mod tests { || Value::known(Fp::from(2 * i as u64)), ) }) - .fold(Ok(()), |acc, res| acc.and(res)) + .try_fold((), |_, res| res) }, )?; @@ -2367,7 +2714,7 @@ mod tests { assert_eq!( prover.verify(), Err(vec![VerifyFailure::Lookup { - name: "lookup", + name: "lookup".to_string(), lookup_index: 0, location: FailureLocation::InRegion { region: (2, "Faulty synthesis").into(), @@ -2395,6 +2742,8 @@ mod tests { impl Circuit for FaultyCircuit { type Config = FaultyCircuitConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn configure(meta: &mut ConstraintSystem) -> Self::Config { let a = meta.advice_column(); diff --git a/halo2_proofs/src/dev/cost.rs b/halo2_proofs/src/dev/cost.rs index effbd97976..8288006938 100644 --- a/halo2_proofs/src/dev/cost.rs +++ b/halo2_proofs/src/dev/cost.rs @@ -2,6 +2,7 @@ use std::ops::Range; use std::{ + cmp, collections::{HashMap, HashSet}, iter, marker::PhantomData, @@ -12,7 +13,7 @@ use ff::{Field, PrimeField}; use group::prime::PrimeGroup; use crate::{ - circuit::Value, + circuit::{layouter::RegionColumn, Value}, plonk::{ Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, ConstraintSystem, Error, Fixed, FloorPlanner, Instance, Selector, @@ -21,10 +22,11 @@ use crate::{ }; /// Measures a circuit to determine its costs, and explain what contributes to them. +#[allow(dead_code)] #[derive(Debug)] pub struct CircuitCost> { /// Power-of-2 bound on the number of rows in the circuit. - k: usize, + k: u32, /// Maximum degree of the circuit. max_deg: usize, /// Number of advice columns. @@ -39,25 +41,141 @@ pub struct CircuitCost> { permutation_cols: usize, /// Number of distinct sets of points in the multiopening argument. point_sets: usize, + /// Maximum rows used over all columns + max_rows: usize, + /// Maximum rows used over all advice columns + max_advice_rows: usize, + /// Maximum rows used over all fixed columns + max_fixed_rows: usize, + num_fixed_columns: usize, + num_advice_columns: usize, + num_instance_columns: usize, + num_total_columns: usize, _marker: PhantomData<(G, ConcreteCircuit)>, } -struct Assembly { - selectors: Vec>, +/// Region implementation used by Layout +#[allow(dead_code)] +#[derive(Debug)] +pub(crate) struct LayoutRegion { + /// The name of the region. Not required to be unique. + pub(crate) name: String, + /// The columns used by this region. + pub(crate) columns: HashSet, + /// The row that this region starts on, if known. + pub(crate) offset: Option, + /// The number of rows that this region takes up. + pub(crate) rows: usize, + /// The cells assigned in this region. + pub(crate) cells: Vec<(RegionColumn, usize)>, +} + +/// Cost and graphing layouter +#[derive(Default, Debug)] +pub(crate) struct Layout { + /// k = 1 << n + pub(crate) k: u32, + /// Regions of the layout + pub(crate) regions: Vec, + current_region: Option, + /// Total row count + pub(crate) total_rows: usize, + /// Total advice rows + pub(crate) total_advice_rows: usize, + /// Total fixed rows + pub(crate) total_fixed_rows: usize, + /// Any cells assigned outside of a region. + pub(crate) loose_cells: Vec<(RegionColumn, usize)>, + /// Pairs of cells between which we have equality constraints. + pub(crate) equality: Vec<(Column, usize, Column, usize)>, + /// Selector assignments used for optimization pass + pub(crate) selectors: Vec>, } -impl Assignment for Assembly { - fn enter_region(&mut self, _: N) +impl Layout { + /// Creates a empty layout + pub fn new(k: u32, n: usize, num_selectors: usize) -> Self { + Layout { + k, + regions: vec![], + current_region: None, + total_rows: 0, + total_advice_rows: 0, + total_fixed_rows: 0, + /// Any cells assigned outside of a region. + loose_cells: vec![], + /// Pairs of cells between which we have equality constraints. + equality: vec![], + /// Selector assignments used for optimization pass + selectors: vec![vec![false; n]; num_selectors], + } + } + + /// Update layout metadata + pub fn update(&mut self, column: RegionColumn, row: usize) { + self.total_rows = cmp::max(self.total_rows, row + 1); + + if let RegionColumn::Column(col) = column { + match col.column_type() { + Any::Advice(_) => { + self.total_advice_rows = cmp::max(self.total_advice_rows, row + 1) + } + Any::Fixed => self.total_fixed_rows = cmp::max(self.total_fixed_rows, row + 1), + _ => {} + } + } + + if let Some(region) = self.current_region { + let region = &mut self.regions[region]; + region.columns.insert(column); + + // The region offset is the earliest row assigned to. + let mut offset = region.offset.unwrap_or(row); + if row < offset { + // The first row assigned was not at offset 0 within the region. + region.rows += offset - row; + offset = row; + } + // The number of rows in this region is the gap between the earliest and + // latest rows assigned. + region.rows = cmp::max(region.rows, row - offset + 1); + region.offset = Some(offset); + + region.cells.push((column, row)); + } else { + self.loose_cells.push((column, row)); + } + } +} + +impl Assignment for Layout { + fn enter_region(&mut self, name_fn: N) where NR: Into, N: FnOnce() -> NR, { - // Do nothing; we don't care about regions in this context. + assert!(self.current_region.is_none()); + self.current_region = Some(self.regions.len()); + self.regions.push(LayoutRegion { + name: name_fn().into(), + columns: HashSet::default(), + offset: None, + rows: 0, + cells: vec![], + }) + } + + fn annotate_column(&mut self, _: A, _: Column) + where + A: FnOnce() -> AR, + AR: Into, + { } fn exit_region(&mut self) { - // Do nothing; we don't care about regions in this context. + assert!(self.current_region.is_some()); + self.current_region = None; } fn enable_selector(&mut self, _: A, selector: &Selector, row: usize) -> Result<(), Error> @@ -65,8 +183,13 @@ impl Assignment for Assembly { A: FnOnce() -> AR, AR: Into, { - self.selectors[selector.0][row] = true; + if let Some(cell) = self.selectors[selector.0].get_mut(row) { + *cell = true; + } else { + return Err(Error::not_enough_rows_available(self.k)); + } + self.update((*selector).into(), row); Ok(()) } @@ -93,8 +216,8 @@ impl Assignment for Assembly { fn assign_advice( &mut self, _: A, - _: Column, - _: usize, + column: Column, + row: usize, _: V, ) -> Result<(), Error> where @@ -103,14 +226,15 @@ impl Assignment for Assembly { A: FnOnce() -> AR, AR: Into, { + self.update(Column::::from(column).into(), row); Ok(()) } fn assign_fixed( &mut self, _: A, - _: Column, - _: usize, + column: Column, + row: usize, _: V, ) -> Result<(), Error> where @@ -119,10 +243,18 @@ impl Assignment for Assembly { A: FnOnce() -> AR, AR: Into, { + self.update(Column::::from(column).into(), row); Ok(()) } - fn copy(&mut self, _: Column, _: usize, _: Column, _: usize) -> Result<(), Error> { + fn copy( + &mut self, + l_col: Column, + l_row: usize, + r_col: Column, + r_row: usize, + ) -> Result<(), crate::plonk::Error> { + self.equality.push((l_col, l_row, r_col, r_row)); Ok(()) } @@ -139,14 +271,6 @@ impl Assignment for Assembly { Value::unknown() } - fn annotate_column(&mut self, _annotation: A, _column: Column) - where - A: FnOnce() -> AR, - AR: Into, - { - // Do nothing - } - fn push_namespace(&mut self, _: N) where NR: Into, @@ -164,21 +288,19 @@ impl> CircuitCost Self { + pub fn measure(k: u32, circuit: &ConcreteCircuit) -> Self { // Collect the layout details. let mut cs = ConstraintSystem::default(); let config = ConcreteCircuit::configure(&mut cs); - let mut assembly = Assembly { - selectors: vec![vec![false; 1 << k]; cs.num_selectors], - }; + let mut layout = Layout::new(k, 1 << k, cs.num_selectors); ConcreteCircuit::FloorPlanner::synthesize( - &mut assembly, + &mut layout, circuit, config, cs.constants.clone(), ) .unwrap(); - let (cs, _) = cs.compress_selectors(assembly.selectors); + let (cs, _) = cs.compress_selectors(layout.selectors); assert!((1 << k) >= cs.minimum_rows()); @@ -233,7 +355,16 @@ impl> CircuitCost> CircuitCost> CircuitCost From> for usize { + proof.polycomm.len(point, scalar) } } + +#[cfg(test)] +mod tests { + use halo2curves::pasta::{Eq, Fp}; + + use crate::circuit::SimpleFloorPlanner; + + use super::*; + + #[test] + fn circuit_cost_without_permutation() { + const K: u32 = 4; + + struct MyCircuit; + impl Circuit for MyCircuit { + type Config = (); + type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + Self + } + + fn configure(_meta: &mut ConstraintSystem) -> Self::Config {} + + fn synthesize( + &self, + _config: Self::Config, + _layouter: impl crate::circuit::Layouter, + ) -> Result<(), Error> { + Ok(()) + } + } + CircuitCost::::measure(K, &MyCircuit).proof_size(1); + } +} diff --git a/halo2_proofs/src/dev/failure.rs b/halo2_proofs/src/dev/failure.rs index 06aaafe201..37f7d2d8d6 100644 --- a/halo2_proofs/src/dev/failure.rs +++ b/halo2_proofs/src/dev/failure.rs @@ -14,7 +14,6 @@ use crate::dev::metadata::Constraint; use crate::{ dev::{Instance, Value}, plonk::{Any, Column, ConstraintSystem, Expression, Gate}, - poly::Rotation, }; mod emitter; @@ -70,9 +69,9 @@ impl FailureLocation { expression.evaluate( &|_| vec![], &|_| panic!("virtual selectors are removed during optimization"), - &|query| vec![cs.fixed_queries[query.index].0.into()], - &|query| vec![cs.advice_queries[query.index].0.into()], - &|query| vec![cs.instance_queries[query.index].0.into()], + &|query| vec![cs.fixed_queries[query.index.unwrap()].0.into()], + &|query| vec![cs.advice_queries[query.index.unwrap()].0.into()], + &|query| vec![cs.instance_queries[query.index.unwrap()].0.into()], &|_| vec![], &|a| a, &|mut a, mut b| { @@ -101,16 +100,17 @@ impl FailureLocation { .iter() .enumerate() .find(|(_, r)| { - if r.rows.is_none() { - return false; + if let Some((start, end)) = r.rows { + // We match the region if any input columns overlap, rather than all of + // them, because matching complex selector columns is hard. As long as + // regions are rectangles, and failures occur due to assignments entirely + // within single regions, "any" will be equivalent to "all". If these + // assumptions change, we'll start getting bug reports from users :) + (start..=end).contains(&failure_row) && !failure_columns.is_disjoint(&r.columns) + } else { + // Zero-area region + false } - let (start, end) = r.rows.unwrap(); - // We match the region if any input columns overlap, rather than all of - // them, because matching complex selector columns is hard. As long as - // regions are rectangles, and failures occur due to assignments entirely - // within single regions, "any" will be equivalent to "all". If these - // assumptions change, we'll start getting bug reports from users :) - (start..=end).contains(&failure_row) && !failure_columns.is_disjoint(&r.columns) }) .map(|(r_i, r)| FailureLocation::InRegion { region: (r_i, r.name.clone(), r.annotations.clone()).into(), @@ -139,6 +139,20 @@ pub enum VerifyFailure { /// offset 0, but the gate uses `Rotation::prev()`). offset: isize, }, + /// An instance cell used in an active gate was not assigned to. + InstanceCellNotAssigned { + /// The index of the active gate. + gate: metadata::Gate, + /// The region in which this gate was activated. + region: metadata::Region, + /// The offset (relative to the start of the region) at which the active gate + /// queries this cell. + gate_offset: usize, + /// The column in which this cell should be assigned. + column: Column, + /// The absolute row at which this cell should be assigned. + row: usize, + }, /// A constraint was not satisfied for a particular row. ConstraintNotSatisfied { /// The polynomial constraint that is not satisfied. @@ -158,7 +172,7 @@ pub enum VerifyFailure { }, /// A lookup input did not exist in its corresponding table. Lookup { - name: &'static str, + name: String, /// The index of the lookup that is not satisfied. These indices are assigned in /// the order in which `ConstraintSystem::lookup` is called during /// `Circuit::configure`. @@ -177,6 +191,28 @@ pub enum VerifyFailure { /// lookup is active on a row adjacent to an unrelated region. location: FailureLocation, }, + /// A shuffle input did not exist in its corresponding map. + Shuffle { + /// The name of the lookup that is not satisfied. + name: String, + /// The index of the lookup that is not satisfied. These indices are assigned in + /// the order in which `ConstraintSystem::lookup` is called during + /// `Circuit::configure`. + shuffle_index: usize, + /// The location at which the lookup is not satisfied. + /// + /// `FailureLocation::InRegion` is most common, and may be due to the intentional + /// use of a lookup (if its inputs are conditional on a complex selector), or an + /// unintentional lookup constraint that overlaps the region (indicating that the + /// lookup's inputs should be made conditional). + /// + /// `FailureLocation::OutsideRegion` is uncommon, and could mean that: + /// - The input expressions do not correctly constrain a default value that exists + /// in the table when the lookup is not being used. + /// - The input expressions use a column queried at a non-zero `Rotation`, and the + /// lookup is active on a row adjacent to an unrelated region. + location: FailureLocation, + }, /// A permutation did not preserve the original value of a cell. Permutation { /// The column in which this permutation is not satisfied. @@ -202,6 +238,19 @@ impl fmt::Display for VerifyFailure { region, gate, gate_offset, column, offset, region.get_column_annotation((*column).into()) ) } + Self::InstanceCellNotAssigned { + gate, + region, + gate_offset, + column, + row, + } => { + write!( + f, + "{} uses {} at offset {}, which requires cell in instance column {:?} at row {} to be assigned.", + region, gate, gate_offset, column, row + ) + } Self::ConstraintNotSatisfied { constraint, location, @@ -240,6 +289,17 @@ impl fmt::Display for VerifyFailure { name, lookup_index, location ) } + Self::Shuffle { + name, + shuffle_index, + location, + } => { + write!( + f, + "Shuffle {}(index: {}) is not satisfied {}", + name, shuffle_index, location + ) + } Self::Permutation { column, location } => { write!( f, @@ -276,7 +336,7 @@ impl Debug for VerifyFailure { }; let debug = ConstraintCaseDebug { - constraint: *constraint, + constraint: constraint.clone(), location: location.clone(), cell_values: cell_values .iter() @@ -624,6 +684,176 @@ fn render_lookup( } } +fn render_shuffle( + prover: &MockProver, + name: &str, + shuffle_index: usize, + location: &FailureLocation, +) { + let n = prover.n as i32; + let cs = &prover.cs; + let shuffle = &cs.shuffles[shuffle_index]; + + // Get the absolute row on which the shuffle's inputs are being queried, so we can + // fetch the input values. + let row = match location { + FailureLocation::InRegion { region, offset } => { + prover.regions[region.index].rows.unwrap().0 + offset + } + FailureLocation::OutsideRegion { row } => *row, + } as i32; + + let shuffle_columns = shuffle.shuffle_expressions.iter().map(|expr| { + expr.evaluate( + &|f| format! {"Const: {:#?}", f}, + &|s| format! {"S{}", s.0}, + &|query| { + format!( + "{:?}", + prover + .cs + .general_column_annotations + .get(&metadata::Column::from((Any::Fixed, query.column_index))) + .cloned() + .unwrap_or_else(|| format!("F{}", query.column_index())) + ) + }, + &|query| { + format!( + "{:?}", + prover + .cs + .general_column_annotations + .get(&metadata::Column::from((Any::advice(), query.column_index))) + .cloned() + .unwrap_or_else(|| format!("A{}", query.column_index())) + ) + }, + &|query| { + format!( + "{:?}", + prover + .cs + .general_column_annotations + .get(&metadata::Column::from((Any::Instance, query.column_index))) + .cloned() + .unwrap_or_else(|| format!("I{}", query.column_index())) + ) + }, + &|challenge| format! {"C{}", challenge.index()}, + &|query| format! {"-{}", query}, + &|a, b| format! {"{} + {}", a,b}, + &|a, b| format! {"{} * {}", a,b}, + &|a, b| format! {"{} * {:?}", a, b}, + ) + }); + + fn cell_value<'a, F: Field, Q: Into + Copy>( + load: impl Fn(Q) -> Value + 'a, + ) -> impl Fn(Q) -> BTreeMap + 'a { + move |query| { + let AnyQuery { + column_type, + column_index, + rotation, + .. + } = query.into(); + Some(( + ((column_type, column_index).into(), rotation.0).into(), + match load(query) { + Value::Real(v) => util::format_value(v), + Value::Poison => unreachable!(), + }, + )) + .into_iter() + .collect() + } + } + + eprintln!("error: input does not exist in shuffle"); + eprint!(" ("); + for i in 0..shuffle.input_expressions.len() { + eprint!("{}L{}", if i == 0 { "" } else { ", " }, i); + } + eprint!(") <-> ("); + for (i, column) in shuffle_columns.enumerate() { + eprint!("{}{}", if i == 0 { "" } else { ", " }, column); + } + eprintln!(")"); + + eprintln!(); + eprintln!(" Shuffle '{}' inputs:", name); + for (i, input) in shuffle.input_expressions.iter().enumerate() { + // Fetch the cell values (since we don't store them in VerifyFailure::Shuffle). + let cell_values = input.evaluate( + &|_| BTreeMap::default(), + &|_| panic!("virtual selectors are removed during optimization"), + &cell_value(&util::load_slice(n, row, &cs.fixed_queries, &prover.fixed)), + &cell_value(&util::load_slice( + n, + row, + &cs.advice_queries, + &prover.advice, + )), + &cell_value(&util::load_instance( + n, + row, + &cs.instance_queries, + &prover.instance, + )), + &|_| BTreeMap::default(), + &|a| a, + &|mut a, mut b| { + a.append(&mut b); + a + }, + &|mut a, mut b| { + a.append(&mut b); + a + }, + &|a, _| a, + ); + + // Collect the necessary rendering information: + // - The columns involved in this constraint. + // - How many cells are in each column. + // - The grid of cell values, indexed by rotation. + let mut columns = BTreeMap::::default(); + let mut layout = BTreeMap::>::default(); + for (i, (cell, _)) in cell_values.iter().enumerate() { + *columns.entry(cell.column).or_default() += 1; + layout + .entry(cell.rotation) + .or_default() + .entry(cell.column) + .or_insert(format!("x{}", i)); + } + + if i != 0 { + eprintln!(); + } + eprintln!( + " Sh{} = {}", + i, + emitter::expression_to_string(input, &layout) + ); + eprintln!(" ^"); + + emitter::render_cell_layout(" | ", location, &columns, &layout, |_, rotation| { + if rotation == 0 { + eprint!(" <--{{ Shuffle '{}' inputs queried here", name); + } + }); + + // Print the map from local variables to assigned values. + eprintln!(" |"); + eprintln!(" | Assigned cell values:"); + for (i, (_, value)) in cell_values.iter().enumerate() { + eprintln!(" | x{} = {}", i, value); + } + } +} + impl VerifyFailure { /// Emits this failure in pretty-printed format to stderr. pub(super) fn emit(&self, prover: &MockProver) { @@ -654,6 +884,11 @@ impl VerifyFailure { lookup_index, location, } => render_lookup(prover, name, *lookup_index, location), + Self::Shuffle { + name, + shuffle_index, + location, + } => render_shuffle(prover, name, *shuffle_index, location), _ => eprintln!("{}", self), } } diff --git a/halo2_proofs/src/dev/failure/emitter.rs b/halo2_proofs/src/dev/failure/emitter.rs index e84ba8013e..edd61f3060 100644 --- a/halo2_proofs/src/dev/failure/emitter.rs +++ b/halo2_proofs/src/dev/failure/emitter.rs @@ -153,7 +153,7 @@ pub(super) fn expression_to_string( label.clone() } else if query.rotation.0 == 0 { // This is most likely a merged selector - format!("S{}", query.index) + format!("S{}", query.index.unwrap()) } else { // No idea how we'd get here... format!("F{}@{}", query.column_index, query.rotation.0) diff --git a/halo2_proofs/src/dev/gates.rs b/halo2_proofs/src/dev/gates.rs index cfc71c021e..352415bcd9 100644 --- a/halo2_proofs/src/dev/gates.rs +++ b/halo2_proofs/src/dev/gates.rs @@ -7,22 +7,19 @@ use ff::PrimeField; use crate::{ dev::util, - plonk::{ - sealed::{self, SealedPhase}, - Circuit, ConstraintSystem, FirstPhase, - }, + plonk::{sealed::SealedPhase, Circuit, ConstraintSystem, FirstPhase}, }; #[derive(Debug)] struct Constraint { - name: &'static str, + name: String, expression: String, queries: BTreeSet, } #[derive(Debug)] struct Gate { - name: &'static str, + name: String, constraints: Vec, } @@ -49,6 +46,8 @@ struct Gate { /// impl Circuit for MyCircuit { /// type Config = MyConfig; /// type FloorPlanner = SimpleFloorPlanner; +/// #[cfg(feature = "circuit-params")] +/// type Params = (); /// /// fn without_witnesses(&self) -> Self { /// Self::default() @@ -79,6 +78,9 @@ struct Gate { /// } /// } /// +/// #[cfg(feature = "circuit-params")] +/// let gates = CircuitGates::collect::(()); +/// #[cfg(not(feature = "circuit-params"))] /// let gates = CircuitGates::collect::(); /// assert_eq!( /// format!("{}", gates), @@ -103,22 +105,27 @@ pub struct CircuitGates { impl CircuitGates { /// Collects the gates from within the circuit. - pub fn collect>() -> Self { + pub fn collect>( + #[cfg(feature = "circuit-params")] params: C::Params, + ) -> Self { // Collect the graph details. let mut cs = ConstraintSystem::default(); + #[cfg(feature = "circuit-params")] + let _ = C::configure_with_params(&mut cs, params); + #[cfg(not(feature = "circuit-params"))] let _ = C::configure(&mut cs); let gates = cs .gates .iter() .map(|gate| Gate { - name: gate.name(), + name: gate.name().to_string(), constraints: gate .polynomials() .iter() .enumerate() .map(|(i, constraint)| Constraint { - name: gate.constraint_name(i), + name: gate.constraint_name(i).to_string(), expression: constraint.evaluate( &util::format_value, &|selector| format!("S{}", selector.0), diff --git a/halo2_proofs/src/dev/graph.rs b/halo2_proofs/src/dev/graph.rs index ec9d3d3bfa..744f41d209 100644 --- a/halo2_proofs/src/dev/graph.rs +++ b/halo2_proofs/src/dev/graph.rs @@ -23,6 +23,9 @@ pub fn circuit_dot_graph>( ) -> String { // Collect the graph details. let mut cs = ConstraintSystem::default(); + #[cfg(feature = "circuit-params")] + let config = ConcreteCircuit::configure_with_params(&mut cs, circuit.params()); + #[cfg(not(feature = "circuit-params"))] let config = ConcreteCircuit::configure(&mut cs); let mut graph = Graph::default(); ConcreteCircuit::FloorPlanner::synthesize(&mut graph, circuit, config, cs.constants).unwrap(); diff --git a/halo2_proofs/src/dev/graph/layout.rs b/halo2_proofs/src/dev/graph/layout.rs index fb7e83e74b..7d00434aa2 100644 --- a/halo2_proofs/src/dev/graph/layout.rs +++ b/halo2_proofs/src/dev/graph/layout.rs @@ -3,16 +3,13 @@ use plotters::{ coord::Shift, prelude::{DrawingArea, DrawingAreaErrorKind, DrawingBackend}, }; -use std::cmp; use std::collections::HashSet; use std::ops::Range; use crate::{ - circuit::{layouter::RegionColumn, Value}, - plonk::{ - Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, ConstraintSystem, Error, - Fixed, FloorPlanner, Instance, Selector, - }, + circuit::layouter::RegionColumn, + dev::cost::Layout, + plonk::{Any, Circuit, Column, ConstraintSystem, FloorPlanner}, }; /// Graphical renderer for circuit layouts. @@ -97,6 +94,9 @@ impl CircuitLayout { let n = 1 << k; // Collect the layout details. let mut cs = ConstraintSystem::default(); + #[cfg(feature = "circuit-params")] + let config = ConcreteCircuit::configure_with_params(&mut cs, circuit.params()); + #[cfg(not(feature = "circuit-params"))] let config = ConcreteCircuit::configure(&mut cs); let mut layout = Layout::new(k, n, cs.num_selectors); ConcreteCircuit::FloorPlanner::synthesize( @@ -321,212 +321,3 @@ impl CircuitLayout { Ok(()) } } - -#[derive(Debug)] -struct Region { - /// The name of the region. Not required to be unique. - name: String, - /// The columns used by this region. - columns: HashSet, - /// The row that this region starts on, if known. - offset: Option, - /// The number of rows that this region takes up. - rows: usize, - /// The cells assigned in this region. We store this as a `Vec` so that if any cells - /// are double-assigned, they will be visibly darker. - cells: Vec<(RegionColumn, usize)>, -} - -#[derive(Default)] -struct Layout { - k: u32, - regions: Vec, - current_region: Option, - total_rows: usize, - /// Any cells assigned outside of a region. We store this as a `Vec` so that if any - /// cells are double-assigned, they will be visibly darker. - loose_cells: Vec<(RegionColumn, usize)>, - /// Pairs of cells between which we have equality constraints. - equality: Vec<(Column, usize, Column, usize)>, - /// Selector assignments used for optimization pass - selectors: Vec>, -} - -impl Layout { - fn new(k: u32, n: usize, num_selectors: usize) -> Self { - Layout { - k, - regions: vec![], - current_region: None, - total_rows: 0, - /// Any cells assigned outside of a region. We store this as a `Vec` so that if any - /// cells are double-assigned, they will be visibly darker. - loose_cells: vec![], - /// Pairs of cells between which we have equality constraints. - equality: vec![], - /// Selector assignments used for optimization pass - selectors: vec![vec![false; n]; num_selectors], - } - } - - fn update(&mut self, column: RegionColumn, row: usize) { - self.total_rows = cmp::max(self.total_rows, row + 1); - - if let Some(region) = self.current_region { - let region = &mut self.regions[region]; - region.columns.insert(column); - - // The region offset is the earliest row assigned to. - let mut offset = region.offset.unwrap_or(row); - if row < offset { - // The first row assigned was not at offset 0 within the region. - region.rows += offset - row; - offset = row; - } - // The number of rows in this region is the gap between the earliest and - // latest rows assigned. - region.rows = cmp::max(region.rows, row - offset + 1); - region.offset = Some(offset); - - region.cells.push((column, row)); - } else { - self.loose_cells.push((column, row)); - } - } -} - -impl Assignment for Layout { - fn enter_region(&mut self, name_fn: N) - where - NR: Into, - N: FnOnce() -> NR, - { - assert!(self.current_region.is_none()); - self.current_region = Some(self.regions.len()); - self.regions.push(Region { - name: name_fn().into(), - columns: HashSet::default(), - offset: None, - rows: 0, - cells: vec![], - }) - } - - fn exit_region(&mut self) { - assert!(self.current_region.is_some()); - self.current_region = None; - } - - fn enable_selector(&mut self, _: A, selector: &Selector, row: usize) -> Result<(), Error> - where - A: FnOnce() -> AR, - AR: Into, - { - if let Some(cell) = self.selectors[selector.0].get_mut(row) { - *cell = true; - } else { - return Err(Error::not_enough_rows_available(self.k)); - } - - self.update((*selector).into(), row); - Ok(()) - } - - fn fork(&mut self, _ranges: &[Range]) -> Result, Error> { - todo!() - } - - fn merge(&mut self, _sub_cs: Vec) -> Result<(), Error> { - todo!() - } - - fn query_advice(&self, _column: Column, _row: usize) -> Result { - Ok(F::ZERO) - } - - fn query_fixed(&self, _column: Column, _row: usize) -> Result { - Ok(F::ZERO) - } - - fn query_instance(&self, _: Column, _: usize) -> Result, Error> { - Ok(Value::unknown()) - } - - fn assign_advice( - &mut self, - _: A, - column: Column, - row: usize, - _: V, - ) -> Result<(), Error> - where - V: FnOnce() -> Value, - VR: Into>, - A: FnOnce() -> AR, - AR: Into, - { - self.update(Column::::from(column).into(), row); - Ok(()) - } - - fn assign_fixed( - &mut self, - _: A, - column: Column, - row: usize, - _: V, - ) -> Result<(), Error> - where - V: FnOnce() -> Value, - VR: Into>, - A: FnOnce() -> AR, - AR: Into, - { - self.update(Column::::from(column).into(), row); - Ok(()) - } - - fn copy( - &mut self, - l_col: Column, - l_row: usize, - r_col: Column, - r_row: usize, - ) -> Result<(), crate::plonk::Error> { - self.equality.push((l_col, l_row, r_col, r_row)); - Ok(()) - } - - fn fill_from_row( - &mut self, - _: Column, - _: usize, - _: Value>, - ) -> Result<(), Error> { - Ok(()) - } - - fn get_challenge(&self, _: Challenge) -> Value { - Value::unknown() - } - - fn annotate_column(&mut self, _annotation: A, _column: Column) - where - A: FnOnce() -> AR, - AR: Into, - { - // Do nothing - } - - fn push_namespace(&mut self, _: N) - where - NR: Into, - N: FnOnce() -> NR, - { - // Do nothing; we don't care about namespaces in this context. - } - - fn pop_namespace(&mut self, _: Option) { - // Do nothing; we don't care about namespaces in this context. - } -} diff --git a/halo2_proofs/src/dev/metadata.rs b/halo2_proofs/src/dev/metadata.rs index 5fd0835bad..f81bfa67a7 100644 --- a/halo2_proofs/src/dev/metadata.rs +++ b/halo2_proofs/src/dev/metadata.rs @@ -15,6 +15,17 @@ pub struct Column { pub(super) index: usize, } +impl Column { + /// Return the column type. + pub fn column_type(&self) -> Any { + self.column_type + } + /// Return the column index. + pub fn index(&self) -> usize { + self.index + } +} + impl fmt::Display for Column { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Column('{:?}', {})", self.column_type, self.index) @@ -75,7 +86,7 @@ impl fmt::Display for DebugColumn { /// within a custom gate. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct VirtualCell { - name: &'static str, + name: String, pub(super) column: Column, pub(super) rotation: i32, } @@ -83,17 +94,17 @@ pub struct VirtualCell { impl From<(Column, i32)> for VirtualCell { fn from((column, rotation): (Column, i32)) -> Self { VirtualCell { - name: "", + name: "".to_string(), column, rotation, } } } -impl From<(&'static str, Column, i32)> for VirtualCell { - fn from((name, column, rotation): (&'static str, Column, i32)) -> Self { +impl> From<(S, Column, i32)> for VirtualCell { + fn from((name, column, rotation): (S, Column, i32)) -> Self { VirtualCell { - name, + name: name.as_ref().to_string(), column, rotation, } @@ -103,7 +114,7 @@ impl From<(&'static str, Column, i32)> for VirtualCell { impl From for VirtualCell { fn from(c: plonk::VirtualCell) -> Self { VirtualCell { - name: "", + name: "".to_string(), column: c.column.into(), rotation: c.rotation.0, } @@ -114,7 +125,7 @@ impl fmt::Display for VirtualCell { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}@{}", self.column, self.rotation)?; if !self.name.is_empty() { - write!(f, "({})", self.name)?; + write!(f, "({})", self.name.as_str())?; } Ok(()) } @@ -123,7 +134,7 @@ impl fmt::Display for VirtualCell { /// Helper structure used to be able to inject Column annotations inside a `Display` or `Debug` call. #[derive(Clone, Debug)] pub(super) struct DebugVirtualCell { - name: &'static str, + name: String, column: DebugColumn, rotation: i32, } @@ -131,7 +142,7 @@ pub(super) struct DebugVirtualCell { impl From<(&VirtualCell, Option<&HashMap>)> for DebugVirtualCell { fn from(info: (&VirtualCell, Option<&HashMap>)) -> Self { DebugVirtualCell { - name: info.0.name, + name: info.0.name.clone(), column: DebugColumn::from((info.0.column, info.1)), rotation: info.0.rotation, } @@ -149,30 +160,33 @@ impl fmt::Display for DebugVirtualCell { } /// Metadata about a configured gate within a circuit. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[derive(Debug, PartialEq, Eq, Clone)] pub struct Gate { /// The index of the active gate. These indices are assigned in the order in which /// `ConstraintSystem::create_gate` is called during `Circuit::configure`. pub(super) index: usize, /// The name of the active gate. These are specified by the gate creator (such as /// a chip implementation), and is not enforced to be unique. - pub(super) name: &'static str, + pub(super) name: String, } impl fmt::Display for Gate { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Gate {} ('{}')", self.index, self.name) + write!(f, "Gate {} ('{}')", self.index, self.name.as_str()) } } -impl From<(usize, &'static str)> for Gate { - fn from((index, name): (usize, &'static str)) -> Self { - Gate { index, name } +impl> From<(usize, S)> for Gate { + fn from((index, name): (usize, S)) -> Self { + Gate { + index, + name: name.as_ref().to_string(), + } } } /// Metadata about a configured constraint within a circuit. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[derive(Debug, PartialEq, Eq, Clone)] pub struct Constraint { /// The gate containing the constraint. pub(super) gate: Gate, @@ -182,7 +196,7 @@ pub struct Constraint { pub(super) index: usize, /// The name of the constraint. This is specified by the gate creator (such as a chip /// implementation), and is not enforced to be unique. - pub(super) name: &'static str, + pub(super) name: String, } impl fmt::Display for Constraint { @@ -194,7 +208,7 @@ impl fmt::Display for Constraint { if self.name.is_empty() { String::new() } else { - format!(" ('{}')", self.name) + format!(" ('{}')", self.name.as_str()) }, self.gate.index, self.gate.name, @@ -202,9 +216,13 @@ impl fmt::Display for Constraint { } } -impl From<(Gate, usize, &'static str)> for Constraint { - fn from((gate, index, name): (Gate, usize, &'static str)) -> Self { - Constraint { gate, index, name } +impl> From<(Gate, usize, S)> for Constraint { + fn from((gate, index, name): (Gate, usize, S)) -> Self { + Constraint { + gate, + index, + name: name.as_ref().to_string(), + } } } @@ -250,7 +268,7 @@ impl Debug for Region { impl fmt::Display for Region { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Region {} ('{}')", self.index, self.name) + write!(f, "Region {} ('{}')", self.index, self.name.as_str()) } } diff --git a/halo2_proofs/src/dev/tfp.rs b/halo2_proofs/src/dev/tfp.rs new file mode 100644 index 0000000000..65340b35a8 --- /dev/null +++ b/halo2_proofs/src/dev/tfp.rs @@ -0,0 +1,544 @@ +use std::{fmt, marker::PhantomData}; + +use ff::Field; +use tracing::{debug, debug_span, span::EnteredSpan}; + +use crate::{ + circuit::{ + layouter::{RegionLayouter, SyncDeps}, + AssignedCell, Cell, Layouter, Region, Table, Value, + }, + plonk::{ + Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, ConstraintSystem, Error, + Fixed, FloorPlanner, Instance, Selector, + }, +}; + +/// A helper type that augments a [`FloorPlanner`] with [`tracing`] spans and events. +/// +/// `TracingFloorPlanner` can be used to instrument your circuit and determine exactly +/// what is happening during a particular run of keygen or proving. This can be useful for +/// identifying unexpected non-determinism or changes to a circuit. +/// +/// # No stability guarantees +/// +/// The `tracing` output is intended for use during circuit development. It should not be +/// considered production-stable, and the precise format or data exposed may change at any +/// time. +/// +/// # Examples +/// +/// ``` +/// use ff::Field; +/// use halo2_proofs::{ +/// circuit::{floor_planner, Layouter, Value}, +/// dev::TracingFloorPlanner, +/// plonk::{Circuit, ConstraintSystem, Error}, +/// }; +/// +/// # struct MyCircuit { +/// # some_witness: Value, +/// # }; +/// # #[derive(Clone)] +/// # struct MyConfig; +/// impl Circuit for MyCircuit { +/// // Wrap `TracingFloorPlanner` around your existing floor planner of choice. +/// //type FloorPlanner = floor_planner::V1; +/// type FloorPlanner = TracingFloorPlanner; +/// +/// // The rest of your `Circuit` implementation is unchanged. +/// type Config = MyConfig; +/// +/// #[cfg(feature = "circuit-params")] +/// type Params = (); +/// +/// fn without_witnesses(&self) -> Self { +/// Self { some_witness: Value::unknown() } +/// } +/// +/// fn configure(meta: &mut ConstraintSystem) -> Self::Config { +/// // .. +/// # todo!() +/// } +/// +/// fn synthesize(&self, config: Self::Config, layouter: impl Layouter) -> Result<(), Error> { +/// // .. +/// # todo!() +/// } +/// } +/// +/// #[test] +/// fn some_circuit_test() { +/// // At the start of your test, enable tracing. +/// tracing_subscriber::fmt() +/// .with_max_level(tracing::Level::DEBUG) +/// .with_ansi(false) +/// .without_time() +/// .init(); +/// +/// // Now when the rest of the test runs, you will get `tracing` output for every +/// // operation that the circuit performs under the hood! +/// } +/// ``` +#[derive(Debug)] +pub struct TracingFloorPlanner { + _phantom: PhantomData

, +} + +impl FloorPlanner for TracingFloorPlanner

{ + fn synthesize + SyncDeps, C: Circuit>( + cs: &mut CS, + circuit: &C, + config: C::Config, + constants: Vec>, + ) -> Result<(), Error> { + P::synthesize( + &mut TracingAssignment::new(cs), + &TracingCircuit::borrowed(circuit), + config, + constants, + ) + } +} + +/// A helper type that augments a [`Circuit`] with [`tracing`] spans and events. +enum TracingCircuit<'c, F: Field, C: Circuit> { + Borrowed(&'c C, PhantomData), + Owned(C, PhantomData), +} + +impl<'c, F: Field, C: Circuit> TracingCircuit<'c, F, C> { + fn borrowed(circuit: &'c C) -> Self { + Self::Borrowed(circuit, PhantomData) + } + + fn owned(circuit: C) -> Self { + Self::Owned(circuit, PhantomData) + } + + fn inner_ref(&self) -> &C { + match self { + TracingCircuit::Borrowed(circuit, ..) => circuit, + TracingCircuit::Owned(circuit, ..) => circuit, + } + } +} + +impl<'c, F: Field, C: Circuit> Circuit for TracingCircuit<'c, F, C> { + type Config = C::Config; + type FloorPlanner = C::FloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + Self::owned(self.inner_ref().without_witnesses()) + } + + fn configure(meta: &mut ConstraintSystem) -> Self::Config { + let _span = debug_span!("configure").entered(); + C::configure(meta) + } + + fn synthesize(&self, config: Self::Config, layouter: impl Layouter) -> Result<(), Error> { + let _span = debug_span!("synthesize").entered(); + self.inner_ref() + .synthesize(config, TracingLayouter::new(layouter)) + } +} + +/// A helper type that augments a [`Layouter`] with [`tracing`] spans and events. +struct TracingLayouter> { + layouter: L, + namespace_spans: Vec, + _phantom: PhantomData, +} + +impl> TracingLayouter { + fn new(layouter: L) -> Self { + Self { + layouter, + namespace_spans: vec![], + _phantom: PhantomData, + } + } +} + +impl> Layouter for TracingLayouter { + type Root = Self; + + fn assign_region(&mut self, name: N, mut assignment: A) -> Result + where + A: FnMut(Region<'_, F>) -> Result, + N: Fn() -> NR, + NR: Into, + { + let _span = debug_span!("region", name = name().into()).entered(); + self.layouter.assign_region(name, |region| { + let mut region = TracingRegion(region); + let region: &mut dyn RegionLayouter = &mut region; + assignment(region.into()) + }) + } + + #[cfg(feature = "parallel_syn")] + fn assign_regions( + &mut self, + _name: N, + _assignments: Vec, + ) -> Result, Error> + where + A: FnMut(Region<'_, F>) -> Result + Send, + AR: Send, + N: Fn() -> NR, + NR: Into, + { + // todo + unimplemented!() + } + + fn assign_table(&mut self, name: N, assignment: A) -> Result<(), Error> + where + A: FnMut(Table<'_, F>) -> Result<(), Error>, + N: Fn() -> NR, + NR: Into, + { + let _span = debug_span!("table", name = name().into()).entered(); + self.layouter.assign_table(name, assignment) + } + + fn constrain_instance( + &mut self, + cell: Cell, + column: Column, + row: usize, + ) -> Result<(), Error> { + self.layouter.constrain_instance(cell, column, row) + } + + fn get_challenge(&self, _: Challenge) -> Value { + Value::unknown() + } + + fn get_root(&mut self) -> &mut Self::Root { + self + } + + fn push_namespace(&mut self, name_fn: N) + where + NR: Into, + N: FnOnce() -> NR, + { + let name = name_fn().into(); + self.namespace_spans.push(debug_span!("ns", name).entered()); + self.layouter.push_namespace(|| name); + } + + fn pop_namespace(&mut self, gadget_name: Option) { + self.layouter.pop_namespace(gadget_name); + self.namespace_spans.pop(); + } +} + +fn debug_value_and_return_cell(value: AssignedCell) -> Cell { + if let Some(v) = value.value().into_option() { + debug!(target: "assigned", value = ?v); + } + value.cell() +} + +/// A helper type that augments a [`Region`] with [`tracing`] spans and events. +#[derive(Debug)] +struct TracingRegion<'r, F: Field>(Region<'r, F>); + +impl<'r, F: Field> RegionLayouter for TracingRegion<'r, F> { + fn enable_selector<'v>( + &'v mut self, + annotation: &'v (dyn Fn() -> String + 'v), + selector: &Selector, + offset: usize, + ) -> Result<(), Error> { + let _guard = debug_span!("enable_selector", name = annotation(), offset = offset).entered(); + debug!(target: "layouter", "Entered"); + self.0.enable_selector(annotation, selector, offset) + } + + fn name_column<'v>( + &'v mut self, + _: &'v (dyn std::ops::Fn() -> std::string::String + 'v), + _: Column, + ) { + } + + fn query_advice(&self, column: Column, offset: usize) -> Result { + self.0.query_advice(column, offset) + } + + fn query_fixed(&self, column: Column, offset: usize) -> Result { + self.0.query_fixed(column, offset) + } + + fn assign_advice<'v>( + &'v mut self, + annotation: &'v (dyn Fn() -> String + 'v), + column: Column, + offset: usize, + to: &'v mut (dyn FnMut() -> Value> + 'v), + ) -> Result { + let _guard = + debug_span!("assign_advice", name = annotation(), column = ?column, offset = offset) + .entered(); + debug!(target: "layouter", "Entered"); + self.0 + .assign_advice(annotation, column, offset, to) + .map(debug_value_and_return_cell) + } + + fn assign_advice_from_constant<'v>( + &'v mut self, + annotation: &'v (dyn Fn() -> String + 'v), + column: Column, + offset: usize, + constant: Assigned, + ) -> Result { + let _guard = debug_span!("assign_advice_from_constant", + name = annotation(), + column = ?column, + offset = offset, + constant = ?constant, + ) + .entered(); + debug!(target: "layouter", "Entered"); + self.0 + .assign_advice_from_constant(annotation, column, offset, constant) + .map(debug_value_and_return_cell) + } + + fn assign_advice_from_instance<'v>( + &mut self, + annotation: &'v (dyn Fn() -> String + 'v), + instance: Column, + row: usize, + advice: Column, + offset: usize, + ) -> Result<(Cell, Value), Error> { + let _guard = debug_span!("assign_advice_from_instance", + name = annotation(), + instance = ?instance, + row = row, + advice = ?advice, + offset = offset, + ) + .entered(); + debug!(target: "layouter", "Entered"); + self.0 + .assign_advice_from_instance(annotation, instance, row, advice, offset) + .map(|value| { + if let Some(v) = value.value().into_option() { + debug!(target: "assigned", value = ?v); + } + (value.cell(), value.value().cloned()) + }) + } + + fn instance_value( + &mut self, + instance: Column, + row: usize, + ) -> Result, Error> { + self.0.instance_value(instance, row) + } + + fn assign_fixed<'v>( + &'v mut self, + annotation: &'v (dyn Fn() -> String + 'v), + column: Column, + offset: usize, + to: &'v mut (dyn FnMut() -> Value> + 'v), + ) -> Result { + let _guard = + debug_span!("assign_fixed", name = annotation(), column = ?column, offset = offset) + .entered(); + debug!(target: "layouter", "Entered"); + self.0 + .assign_fixed(annotation, column, offset, to) + .map(debug_value_and_return_cell) + } + + fn constrain_constant(&mut self, cell: Cell, constant: Assigned) -> Result<(), Error> { + debug!(target: "constrain_constant", cell = ?cell, constant = ?constant); + self.0.constrain_constant(cell, constant) + } + + fn constrain_equal(&mut self, left: Cell, right: Cell) -> Result<(), Error> { + debug!(target: "constrain_equal", left = ?left, right = ?right); + self.0.constrain_equal(left, right) + } + + fn global_offset(&self, row_offset: usize) -> usize { + self.0.global_offset(row_offset) + } +} + +/// A helper type that augments an [`Assignment`] with [`tracing`] spans and events. +struct TracingAssignment<'cs, F: Field, CS: Assignment> { + cs: &'cs mut CS, + in_region: bool, + _phantom: PhantomData, +} + +impl<'cs, F: Field, CS: Assignment> TracingAssignment<'cs, F, CS> { + fn new(cs: &'cs mut CS) -> Self { + Self { + cs, + in_region: false, + _phantom: PhantomData, + } + } +} + +impl<'cs, F: Field, CS: Assignment> Assignment for TracingAssignment<'cs, F, CS> { + fn enter_region(&mut self, name_fn: N) + where + NR: Into, + N: FnOnce() -> NR, + { + self.in_region = true; + self.cs.enter_region(name_fn); + } + + fn annotate_column(&mut self, _: A, _: Column) + where + A: FnOnce() -> AR, + AR: Into, + { + } + + fn exit_region(&mut self) { + self.cs.exit_region(); + self.in_region = false; + } + + fn enable_selector( + &mut self, + annotation: A, + selector: &Selector, + row: usize, + ) -> Result<(), Error> + where + A: FnOnce() -> AR, + AR: Into, + { + let annotation = annotation().into(); + if self.in_region { + debug!(target: "position", row = row); + } else { + debug!(target: "enable_selector", name = annotation, row = row); + } + self.cs.enable_selector(|| annotation, selector, row) + } + + fn query_advice(&self, column: Column, row: usize) -> Result { + self.cs.query_advice(column, row) + } + + fn query_fixed(&self, column: Column, row: usize) -> Result { + self.cs.query_fixed(column, row) + } + + fn query_instance(&self, column: Column, row: usize) -> Result, Error> { + let _guard = debug_span!("positioned").entered(); + debug!(target: "query_instance", column = ?column, row = row); + self.cs.query_instance(column, row) + } + + fn assign_advice( + &mut self, + annotation: A, + column: Column, + row: usize, + to: V, + ) -> Result<(), Error> + where + V: FnOnce() -> Value, + VR: Into>, + A: FnOnce() -> AR, + AR: Into, + { + let annotation = annotation().into(); + if self.in_region { + debug!(target: "position", row = row); + } else { + debug!(target: "assign_advice", name = annotation, column = ?column, row = row); + } + self.cs.assign_advice(|| annotation, column, row, to) + } + + fn assign_fixed( + &mut self, + annotation: A, + column: Column, + row: usize, + to: V, + ) -> Result<(), Error> + where + V: FnOnce() -> Value, + VR: Into>, + A: FnOnce() -> AR, + AR: Into, + { + let annotation = annotation().into(); + if self.in_region { + debug!(target: "position", row = row); + } else { + debug!(target: "assign_fixed", name = annotation, column = ?column, row = row); + } + self.cs.assign_fixed(|| annotation, column, row, to) + } + + fn copy( + &mut self, + left_column: Column, + left_row: usize, + right_column: Column, + right_row: usize, + ) -> Result<(), Error> { + let _guard = debug_span!("positioned").entered(); + debug!( + target: "copy", + left_column = ?left_column, + left_row = left_row, + right_column = ?right_column, + right_row = right_row, + ); + self.cs.copy(left_column, left_row, right_column, right_row) + } + + fn fill_from_row( + &mut self, + column: Column, + row: usize, + to: Value>, + ) -> Result<(), Error> { + let _guard = debug_span!("positioned").entered(); + debug!(target: "fill_from_row", column = ?column, row = row); + self.cs.fill_from_row(column, row, to) + } + + fn get_challenge(&self, _: Challenge) -> Value { + Value::unknown() + } + + fn push_namespace(&mut self, name_fn: N) + where + NR: Into, + N: FnOnce() -> NR, + { + // We enter namespace spans in TracingLayouter. + self.cs.push_namespace(name_fn) + } + + fn pop_namespace(&mut self, gadget_name: Option) { + self.cs.pop_namespace(gadget_name); + // We exit namespace spans in TracingLayouter. + } +} diff --git a/halo2_proofs/src/dev/util.rs b/halo2_proofs/src/dev/util.rs index f4cd83c34a..29092e1aea 100644 --- a/halo2_proofs/src/dev/util.rs +++ b/halo2_proofs/src/dev/util.rs @@ -1,7 +1,7 @@ use group::ff::Field; use std::collections::BTreeMap; -use super::{metadata, CellValue, Value}; +use super::{metadata, CellValue, InstanceValue, Value}; use crate::{ plonk::{ Advice, AdviceQuery, Any, Column, ColumnType, Expression, FixedQuery, Gate, InstanceQuery, @@ -12,7 +12,7 @@ use crate::{ pub(crate) struct AnyQuery { /// Query index - pub index: usize, + pub index: Option, /// Column type pub column_type: Any, /// Column index @@ -79,7 +79,7 @@ pub(super) fn load<'a, F: Field, T: ColumnType, Q: Into + Copy>( cells: &'a [Vec>], ) -> impl Fn(Q) -> Value + 'a { move |query| { - let (column, at) = &queries[query.into().index]; + let (column, at) = &queries[query.into().index.unwrap()]; let resolved_row = (row + at.0) % n; cells[column.index()][resolved_row as usize].into() } @@ -93,8 +93,8 @@ pub(super) fn load_slice<'a, F: Field, T: ColumnType, Q: Into + Copy>( cells: &'a [&mut [CellValue]], ) -> impl Fn(Q) -> Value + 'a { move |query| { - let (column, at) = &queries[query.into().index]; - let resolved_row = (row + at.0 + n) % n; + let (column, at) = &queries[query.into().index.unwrap()]; + let resolved_row = (row + at.0) % n; cells[column.index()][resolved_row as usize].into() } } @@ -103,12 +103,13 @@ pub(super) fn load_instance<'a, F: Field, T: ColumnType, Q: Into + Cop n: i32, row: i32, queries: &'a [(Column, Rotation)], - cells: &'a [Vec], + cells: &'a [Vec>], ) -> impl Fn(Q) -> Value + 'a { move |query| { - let (column, at) = &queries[query.into().index]; - let resolved_row = (row + at.0 + n) % n; - Value::Real(cells[column.index()][resolved_row as usize]) + let (column, at) = &queries[query.into().index.unwrap()]; + let resolved_row = (row + at.0) % n; + let cell = &cells[column.index()][resolved_row as usize]; + Value::Real(cell.value()) } } diff --git a/halo2_proofs/src/helpers.rs b/halo2_proofs/src/helpers.rs index 09ac94accc..179bdbed26 100644 --- a/halo2_proofs/src/helpers.rs +++ b/halo2_proofs/src/helpers.rs @@ -1,8 +1,7 @@ use crate::plonk::{Any, Column}; use crate::poly::Polynomial; -use ff::PrimeField; -use ff::{Field, FromUniformBytes}; -use halo2curves::{pairing::Engine, serde::SerdeObject, CurveAffine}; +use ff::{FromUniformBytes, PrimeField}; +use halo2curves::{serde::SerdeObject, CurveAffine}; use num_bigint::BigUint; use std::io; @@ -93,6 +92,7 @@ macro_rules! two_dim_vec_to_vec_of_slice { #[cfg(test)] mod test { use super::*; + use ff::Field; use halo2curves::bn256::{Fq, G1Affine}; use rand_core::OsRng; #[test] @@ -112,6 +112,7 @@ mod test { } } } + pub trait SerdeCurveAffine: CurveAffine + SerdeObject { /// Reads an element from the buffer and parses it according to the `format`: /// - `Processed`: Reads a compressed curve element and decompress it diff --git a/halo2_proofs/src/lib.rs b/halo2_proofs/src/lib.rs index 39fd43d4e5..d670143b21 100644 --- a/halo2_proofs/src/lib.rs +++ b/halo2_proofs/src/lib.rs @@ -1,32 +1,17 @@ //! # halo2_proofs #![cfg_attr(docsrs, feature(doc_cfg))] -// Build without warnings on stable 1.51 and later. -#![allow(unknown_lints)] -// Disable old lint warnings until our MSRV is at least 1.51. -#![allow(renamed_and_removed_lints)] -// Use the old lint name to build without warnings until our MSRV is at least 1.51. -#![allow(clippy::unknown_clippy_lints)] // The actual lints we want to disable. -#![allow( - clippy::op_ref, - clippy::assign_op_pattern, - clippy::too_many_arguments, - clippy::suspicious_arithmetic_impl, - clippy::many_single_char_names, - clippy::same_item_push, - clippy::upper_case_acronyms, - clippy::uninit_vec -)] -#![deny(broken_intra_doc_links)] +#![allow(clippy::op_ref, clippy::many_single_char_names)] +#![deny(rustdoc::broken_intra_doc_links)] #![deny(missing_debug_implementations)] +#![feature(stmt_expr_attributes)] +// #![deny(missing_docs)] // #![deny(unsafe_code)] -// Remove this once we update pasta_curves -#![allow(unused_imports)] -#![allow(clippy::derive_partial_eq_without_eq)] +#![allow(clippy::uninit_vec)] +#![allow(clippy::too_many_arguments)] #[cfg(feature = "counter")] -#[macro_use] extern crate lazy_static; #[cfg(feature = "counter")] diff --git a/halo2_proofs/src/multicore.rs b/halo2_proofs/src/multicore.rs index a22eac9e79..60502f07d2 100644 --- a/halo2_proofs/src/multicore.rs +++ b/halo2_proofs/src/multicore.rs @@ -1,5 +1,70 @@ -//! An interface for dealing with the kinds of parallel computations involved in -//! `halo2`. It's currently just a (very!) thin wrapper around [`rayon`] but may -//! be extended in the future to allow for various parallelism strategies. +#[cfg(all( + feature = "multicore", + target_arch = "wasm32", + not(target_feature = "atomics") +))] +compile_error!( + "The multicore feature flag is not supported on wasm32 architectures without atomics" +); -pub use rayon::{current_num_threads, scope, Scope}; +pub use maybe_rayon::{ + iter::{IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator}, + join, scope, Scope, +}; + +#[cfg(feature = "multicore")] +pub use maybe_rayon::{ + current_num_threads, + iter::{IndexedParallelIterator, IntoParallelRefIterator}, + slice::ParallelSliceMut, +}; + +#[cfg(not(feature = "multicore"))] +pub fn current_num_threads() -> usize { + 1 +} + +pub trait TryFoldAndReduce { + /// Implements `iter.try_fold().try_reduce()` for `rayon::iter::ParallelIterator`, + /// falling back on `Iterator::try_fold` when the `multicore` feature flag is + /// disabled. + /// The `try_fold_and_reduce` function can only be called by a iter with + /// `Result` item type because the `fold_op` must meet the trait + /// bounds of both `try_fold` and `try_reduce` from rayon. + fn try_fold_and_reduce( + self, + identity: impl Fn() -> T + Send + Sync, + fold_op: impl Fn(T, Result) -> Result + Send + Sync, + ) -> Result; +} + +#[cfg(feature = "multicore")] +impl TryFoldAndReduce for I +where + T: Send + Sync, + E: Send + Sync, + I: maybe_rayon::iter::ParallelIterator>, +{ + fn try_fold_and_reduce( + self, + identity: impl Fn() -> T + Send + Sync, + fold_op: impl Fn(T, Result) -> Result + Send + Sync, + ) -> Result { + self.try_fold(&identity, &fold_op) + .try_reduce(&identity, |a, b| fold_op(a, Ok(b))) + } +} + +#[cfg(not(feature = "multicore"))] +impl TryFoldAndReduce for I +where + I: std::iter::Iterator>, +{ + fn try_fold_and_reduce( + mut self, + identity: impl Fn() -> T + Send + Sync, + fold_op: impl Fn(T, Result) -> Result + Send + Sync, + ) -> Result { + self.try_fold(identity(), fold_op) + } +} diff --git a/halo2_proofs/src/plonk.rs b/halo2_proofs/src/plonk.rs index 96d34992e1..4da095e197 100644 --- a/halo2_proofs/src/plonk.rs +++ b/halo2_proofs/src/plonk.rs @@ -7,17 +7,13 @@ use blake2b_simd::Params as Blake2bParams; use group::ff::{Field, FromUniformBytes, PrimeField}; -use halo2curves::pairing::Engine; use crate::arithmetic::CurveAffine; use crate::helpers::{ polynomial_slice_byte_length, read_polynomial_vec, write_polynomial_slice, SerdeCurveAffine, SerdePrimeField, }; -use crate::poly::{ - commitment::Params, Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, - PinnedEvaluationDomain, Polynomial, -}; +use crate::poly::{Coeff, EvaluationDomain, LagrangeCoeff, PinnedEvaluationDomain, Polynomial}; use crate::transcript::{ChallengeScalar, EncodedChallenge, Transcript}; use crate::SerdeFormat; @@ -29,7 +25,8 @@ mod keygen; #[allow(dead_code)] mod lookup; mod mv_lookup; -pub(crate) mod permutation; +pub mod permutation; +mod shuffle; mod vanishing; mod prover; @@ -106,11 +103,16 @@ where pub fn read>( reader: &mut R, format: SerdeFormat, + #[cfg(feature = "circuit-params")] params: ConcreteCircuit::Params, ) -> io::Result { let mut k = [0u8; 4]; reader.read_exact(&mut k)?; let k = u32::from_be_bytes(k); - let (domain, cs, _) = keygen::create_domain::(k); + let (domain, cs, _) = keygen::create_domain::( + k, + #[cfg(feature = "circuit-params")] + params, + ); let mut num_fixed_columns = [0u8; 4]; reader.read_exact(&mut num_fixed_columns)?; let num_fixed_columns = u32::from_be_bytes(num_fixed_columns); @@ -147,8 +149,14 @@ where pub fn from_bytes>( mut bytes: &[u8], format: SerdeFormat, + #[cfg(feature = "circuit-params")] params: ConcreteCircuit::Params, ) -> io::Result { - Self::read::<_, ConcreteCircuit>(&mut bytes, format) + Self::read::<_, ConcreteCircuit>( + &mut bytes, + format, + #[cfg(feature = "circuit-params")] + params, + ) } } @@ -159,14 +167,13 @@ where fn bytes_length(&self) -> usize { 8 + (self.fixed_commitments.len() * C::default().to_bytes().as_ref().len()) + self.permutation.bytes_length() - /* - + self.selectors.len() - * (self - .selectors - .get(0) - .map(|selector| selector.len() / 8 + 1) - .unwrap_or(0)) - */ + // scroll/halo2: we don’t need to store + // + self.selectors.len() + // * (self + // .selectors + // .get(0) + // .map(|selector| (selector.len() + 7) / 8) + // .unwrap_or(0)) } fn from_parts( @@ -174,8 +181,11 @@ where fixed_commitments: Vec, permutation: permutation::VerifyingKey, cs: ConstraintSystem, - //selectors: Vec>, - ) -> Self { + // selectors: Vec>, + ) -> Self + where + C::ScalarExt: FromUniformBytes<64>, + { // Compute cached values. let cs_degree = cs.degree(); @@ -243,6 +253,11 @@ where pub fn cs(&self) -> &ConstraintSystem { &self.cs } + + /// Returns representative of this `VerifyingKey` in transcripts + pub fn transcript_repr(&self) -> C::Scalar { + self.transcript_repr + } } /// Minimal representation of a verification key that can be used to identify @@ -333,8 +348,14 @@ where pub fn read>( reader: &mut R, format: SerdeFormat, + #[cfg(feature = "circuit-params")] params: ConcreteCircuit::Params, ) -> io::Result { - let vk = VerifyingKey::::read::(reader, format)?; + let vk = VerifyingKey::::read::( + reader, + format, + #[cfg(feature = "circuit-params")] + params, + )?; let l0 = Polynomial::read(reader, format)?; let l_last = Polynomial::read(reader, format)?; let l_active_row = Polynomial::read(reader, format)?; @@ -367,8 +388,14 @@ where pub fn from_bytes>( mut bytes: &[u8], format: SerdeFormat, + #[cfg(feature = "circuit-params")] params: ConcreteCircuit::Params, ) -> io::Result { - Self::read::<_, ConcreteCircuit>(&mut bytes, format) + Self::read::<_, ConcreteCircuit>( + &mut bytes, + format, + #[cfg(feature = "circuit-params")] + params, + ) } } diff --git a/halo2_proofs/src/plonk/assigned.rs b/halo2_proofs/src/plonk/assigned.rs index 46d527b915..07de325678 100644 --- a/halo2_proofs/src/plonk/assigned.rs +++ b/halo2_proofs/src/plonk/assigned.rs @@ -446,7 +446,6 @@ mod tests { mod proptests { use std::{ cmp, - convert::TryFrom, ops::{Add, Mul, Neg, Sub}, }; @@ -613,7 +612,7 @@ mod proptests { // Ensure that: // - we have at least one value to apply unary operators to. // - we can apply every binary operator pairwise sequentially. - cmp::max(if num_unary > 0 { 1 } else { 0 }, num_binary + 1)), + cmp::max(usize::from(num_unary > 0), num_binary + 1)), operations in arb_operators(num_unary, num_binary).prop_shuffle(), ) -> (Vec>, Vec) { (values, operations) diff --git a/halo2_proofs/src/plonk/circuit.rs b/halo2_proofs/src/plonk/circuit.rs index 251c146aeb..2e330b5680 100644 --- a/halo2_proofs/src/plonk/circuit.rs +++ b/halo2_proofs/src/plonk/circuit.rs @@ -1,10 +1,17 @@ +use crate::circuit::layouter::SyncDeps; +use crate::dev::metadata; +use crate::plonk::shuffle; +use crate::{ + circuit::{Layouter, Region, Value}, + poly::Rotation, +}; use core::cmp::max; use core::ops::{Add, Mul}; use ff::Field; -use std::collections::{BTreeMap, HashMap}; +use sealed::SealedPhase; +use std::collections::BTreeMap; use std::fmt::Debug; -use std::hash::Hasher; -use std::marker::PhantomData; +use std::iter::{Product, Sum}; use std::ops::Range; use std::{ convert::TryFrom, @@ -12,19 +19,14 @@ use std::{ }; use super::{mv_lookup, permutation, Assigned, Error}; -use crate::dev::metadata; -use crate::{ - circuit::{Layouter, Region, Value}, - poly::Rotation, -}; -use sealed::SealedPhase; - mod compress_selectors; /// A column type pub trait ColumnType: 'static + Sized + Copy + std::fmt::Debug + PartialEq + Eq + Into { + /// Return expression from cell + fn query_cell(&self, index: usize, at: Rotation) -> Expression; } /// A column with an index and type @@ -48,6 +50,31 @@ impl Column { pub fn column_type(&self) -> &C { &self.column_type } + + /// Return expression from column at a relative position + pub fn query_cell(&self, at: Rotation) -> Expression { + self.column_type.query_cell(self.index, at) + } + + /// Return expression from column at the current row + pub fn cur(&self) -> Expression { + self.query_cell(Rotation::cur()) + } + + /// Return expression from column at the next row + pub fn next(&self) -> Expression { + self.query_cell(Rotation::next()) + } + + /// Return expression from column at the previous row + pub fn prev(&self) -> Expression { + self.query_cell(Rotation::prev()) + } + + /// Return expression from column at the specified rotation + pub fn rot(&self, rotation: i32) -> Expression { + self.query_cell(Rotation(rotation)) + } } impl Ord for Column { @@ -79,6 +106,12 @@ pub(crate) mod sealed { } } + impl SealedPhase for Phase { + fn to_sealed(self) -> Phase { + self + } + } + /// Sealed trait to help keep `Phase` private. pub trait SealedPhase { fn to_sealed(self) -> Phase; @@ -231,10 +264,56 @@ impl PartialOrd for Any { } } -impl ColumnType for Advice {} -impl ColumnType for Fixed {} -impl ColumnType for Instance {} -impl ColumnType for Any {} +impl ColumnType for Advice { + fn query_cell(&self, index: usize, at: Rotation) -> Expression { + Expression::Advice(AdviceQuery { + index: None, + column_index: index, + rotation: at, + phase: self.phase, + }) + } +} +impl ColumnType for Fixed { + fn query_cell(&self, index: usize, at: Rotation) -> Expression { + Expression::Fixed(FixedQuery { + index: None, + column_index: index, + rotation: at, + }) + } +} +impl ColumnType for Instance { + fn query_cell(&self, index: usize, at: Rotation) -> Expression { + Expression::Instance(InstanceQuery { + index: None, + column_index: index, + rotation: at, + }) + } +} +impl ColumnType for Any { + fn query_cell(&self, index: usize, at: Rotation) -> Expression { + match self { + Any::Advice(Advice { phase }) => Expression::Advice(AdviceQuery { + index: None, + column_index: index, + rotation: at, + phase: *phase, + }), + Any::Fixed => Expression::Fixed(FixedQuery { + index: None, + column_index: index, + rotation: at, + }), + Any::Instance => Expression::Instance(InstanceQuery { + index: None, + column_index: index, + rotation: at, + }), + } + } +} impl From for Any { fn from(advice: Advice) -> Any { @@ -388,13 +467,23 @@ impl Selector { pub fn is_simple(&self) -> bool { self.1 } + + /// Returns index of this selector + pub fn index(&self) -> usize { + self.0 + } + + /// Return expression from selector + pub fn expr(&self) -> Expression { + Expression::Selector(*self) + } } /// Query of fixed column at a certain relative location #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub struct FixedQuery { /// Query index - pub(crate) index: usize, + pub(crate) index: Option, /// Column index pub(crate) column_index: usize, /// Rotation of this query @@ -404,7 +493,7 @@ pub struct FixedQuery { impl FixedQuery { /// Index pub fn index(&self) -> usize { - self.index + self.index.unwrap() } /// Column index pub fn column_index(&self) -> usize { @@ -425,7 +514,7 @@ impl FixedQuery { #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub struct AdviceQuery { /// Query index - pub(crate) index: usize, + pub(crate) index: Option, /// Column index pub(crate) column_index: usize, /// Rotation of this query @@ -437,7 +526,7 @@ pub struct AdviceQuery { impl AdviceQuery { /// Index pub fn index(&self) -> usize { - self.index + self.index.unwrap() } /// Column index pub fn column_index(&self) -> usize { @@ -463,7 +552,7 @@ impl AdviceQuery { #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub struct InstanceQuery { /// Query index - pub(crate) index: usize, + pub(crate) index: Option, /// Column index pub(crate) column_index: usize, /// Rotation of this query @@ -473,7 +562,7 @@ pub struct InstanceQuery { impl InstanceQuery { /// Index pub fn index(&self) -> usize { - self.index + self.index.unwrap() } /// Column index pub fn column_index(&self) -> usize { @@ -496,7 +585,7 @@ impl InstanceQuery { /// they cannot simultaneously be used as general fixed columns. /// /// [`Layouter::assign_table`]: crate::circuit::Layouter::assign_table -#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Ord, PartialOrd)] pub struct TableColumn { /// The fixed column that this table column is stored in. /// @@ -509,7 +598,8 @@ pub struct TableColumn { } impl TableColumn { - pub(crate) fn inner(&self) -> Column { + /// Returns inner column + pub fn inner(&self) -> Column { self.inner } } @@ -531,6 +621,11 @@ impl Challenge { pub fn phase(&self) -> u8 { self.phase.0 } + + /// Return Expression + pub fn expr(&self) -> Expression { + Expression::Challenge(*self) + } } /// This trait allows a [`Circuit`] to direct some backend to assign a witness @@ -680,7 +775,7 @@ pub trait FloorPlanner { /// - Perform any necessary setup or measurement tasks, which may involve one or more /// calls to `Circuit::default().synthesize(config, &mut layouter)`. /// - Call `circuit.synthesize(config, &mut layouter)` exactly once. - fn synthesize, C: Circuit>( + fn synthesize + SyncDeps, C: Circuit>( cs: &mut CS, circuit: &C, config: C::Config, @@ -697,11 +792,33 @@ pub trait Circuit { /// The floor planner used for this circuit. This is an associated type of the /// `Circuit` trait because its behaviour is circuit-critical. type FloorPlanner: FloorPlanner; + /// Optional circuit configuration parameters. Requires the `circuit-params` feature. + #[cfg(feature = "circuit-params")] + type Params: Default; /// Returns a copy of this circuit with no witness values (i.e. all witnesses set to /// `None`). For most circuits, this will be equal to `Self::default()`. fn without_witnesses(&self) -> Self; + /// Returns a reference to the parameters that should be used to configure the circuit. + /// Requires the `circuit-params` feature. + #[cfg(feature = "circuit-params")] + fn params(&self) -> Self::Params { + Self::Params::default() + } + + /// The circuit is given an opportunity to describe the exact gate + /// arrangement, column arrangement, etc. Takes a runtime parameter. The default + /// implementation calls `configure` ignoring the `_params` argument in order to easily support + /// circuits that don't use configuration parameters. + #[cfg(feature = "circuit-params")] + fn configure_with_params( + meta: &mut ConstraintSystem, + _params: Self::Params, + ) -> Self::Config { + Self::configure(meta) + } + /// The circuit is given an opportunity to describe the exact gate /// arrangement, column arrangement, etc. fn configure(meta: &mut ConstraintSystem) -> Self::Config; @@ -738,8 +855,62 @@ pub enum Expression { } impl Expression { + /// Make side effects + pub fn query_cells(&mut self, cells: &mut VirtualCells<'_, F>) { + match self { + Expression::Constant(_) => (), + Expression::Selector(selector) => { + if !cells.queried_selectors.contains(selector) { + cells.queried_selectors.push(*selector); + } + } + Expression::Fixed(query) => { + if query.index.is_none() { + let col = Column { + index: query.column_index, + column_type: Fixed, + }; + cells.queried_cells.push((col, query.rotation).into()); + query.index = Some(cells.meta.query_fixed_index(col, query.rotation)); + } + } + Expression::Advice(query) => { + if query.index.is_none() { + let col = Column { + index: query.column_index, + column_type: Advice { phase: query.phase }, + }; + cells.queried_cells.push((col, query.rotation).into()); + query.index = Some(cells.meta.query_advice_index(col, query.rotation)); + } + } + Expression::Instance(query) => { + if query.index.is_none() { + let col = Column { + index: query.column_index, + column_type: Instance, + }; + cells.queried_cells.push((col, query.rotation).into()); + query.index = Some(cells.meta.query_instance_index(col, query.rotation)); + } + } + Expression::Challenge(_) => (), + Expression::Negated(a) => a.query_cells(cells), + Expression::Sum(a, b) => { + a.query_cells(cells); + b.query_cells(cells); + } + Expression::Product(a, b) => { + a.query_cells(cells); + b.query_cells(cells); + } + Expression::Scaled(a, _) => a.query_cells(cells), + }; + } + /// Evaluate the polynomial using the provided closures to perform the /// operations. + #[allow(clippy::too_many_arguments)] pub fn evaluate( &self, constant: &impl Fn(F) -> T, @@ -849,6 +1020,7 @@ impl Expression { /// Evaluate the polynomial lazily using the provided closures to perform the /// operations. + #[allow(clippy::too_many_arguments)] pub fn evaluate_lazy( &self, constant: &impl Fn(F) -> T, @@ -1124,43 +1296,43 @@ impl std::fmt::Debug for Expression { Expression::Constant(scalar) => f.debug_tuple("Constant").field(scalar).finish(), Expression::Selector(selector) => f.debug_tuple("Selector").field(selector).finish(), // Skip enum variant and print query struct directly to maintain backwards compatibility. - Expression::Fixed(FixedQuery { - index, - column_index, - rotation, - }) => f - .debug_struct("Fixed") - .field("query_index", index) - .field("column_index", column_index) - .field("rotation", rotation) - .finish(), - Expression::Advice(AdviceQuery { - index, - column_index, - rotation, - phase, - }) => { + Expression::Fixed(query) => { + let mut debug_struct = f.debug_struct("Fixed"); + match query.index { + None => debug_struct.field("query_index", &query.index), + Some(idx) => debug_struct.field("query_index", &idx), + }; + debug_struct + .field("column_index", &query.column_index) + .field("rotation", &query.rotation) + .finish() + } + Expression::Advice(query) => { let mut debug_struct = f.debug_struct("Advice"); + match query.index { + None => debug_struct.field("query_index", &query.index), + Some(idx) => debug_struct.field("query_index", &idx), + }; debug_struct - .field("query_index", index) - .field("column_index", column_index) - .field("rotation", rotation); + .field("column_index", &query.column_index) + .field("rotation", &query.rotation); // Only show advice's phase if it's not in first phase. - if *phase != FirstPhase.to_sealed() { - debug_struct.field("phase", phase); + if query.phase != FirstPhase.to_sealed() { + debug_struct.field("phase", &query.phase); } debug_struct.finish() } - Expression::Instance(InstanceQuery { - index, - column_index, - rotation, - }) => f - .debug_struct("Instance") - .field("query_index", index) - .field("column_index", column_index) - .field("rotation", rotation) - .finish(), + Expression::Instance(query) => { + let mut debug_struct = f.debug_struct("Instance"); + match query.index { + None => debug_struct.field("query_index", &query.index), + Some(idx) => debug_struct.field("query_index", &idx), + }; + debug_struct + .field("column_index", &query.column_index) + .field("rotation", &query.rotation) + .finish() + } Expression::Challenge(challenge) => { f.debug_tuple("Challenge").field(challenge).finish() } @@ -1218,6 +1390,20 @@ impl Mul for Expression { } } +impl Sum for Expression { + fn sum>(iter: I) -> Self { + iter.reduce(|acc, x| acc + x) + .unwrap_or(Expression::Constant(F::ZERO)) + } +} + +impl Product for Expression { + fn product>(iter: I) -> Self { + iter.reduce(|acc, x| acc * x) + .unwrap_or(Expression::Constant(F::ONE)) + } +} + /// Represents an index into a vector where each entry corresponds to a distinct /// point that polynomials are queried at. #[derive(Copy, Clone, Debug)] @@ -1245,25 +1431,34 @@ impl>> From<(Col, Rotation)> for VirtualCell { /// These are returned by the closures passed to `ConstraintSystem::create_gate`. #[derive(Debug)] pub struct Constraint { - name: &'static str, + name: String, poly: Expression, } impl From> for Constraint { fn from(poly: Expression) -> Self { - Constraint { name: "", poly } + Constraint { + name: "".to_string(), + poly, + } } } -impl From<(&'static str, Expression)> for Constraint { - fn from((name, poly): (&'static str, Expression)) -> Self { - Constraint { name, poly } +impl> From<(S, Expression)> for Constraint { + fn from((name, poly): (S, Expression)) -> Self { + Constraint { + name: name.as_ref().to_string(), + poly, + } } } impl From> for Vec> { fn from(poly: Expression) -> Self { - vec![Constraint { name: "", poly }] + vec![Constraint { + name: "".to_string(), + poly, + }] } } @@ -1345,7 +1540,7 @@ impl>, Iter: IntoIterator> IntoIterato fn into_iter(self) -> Self::IntoIter { std::iter::repeat(self.selector) - .zip(self.constraints.into_iter()) + .zip(self.constraints) .map(apply_selector_to_constraint) } } @@ -1353,8 +1548,8 @@ impl>, Iter: IntoIterator> IntoIterato /// Gate #[derive(Clone, Debug, PartialEq, Eq)] pub struct Gate { - name: &'static str, - constraint_names: Vec<&'static str>, + name: String, + constraint_names: Vec, pub polys: Vec>, /// We track queried selectors separately from other cells, so that we can use them to /// trigger debug checks on gates. @@ -1363,12 +1558,14 @@ pub struct Gate { } impl Gate { - pub(crate) fn name(&self) -> &'static str { - self.name + /// Returns the gate name. + pub fn name(&self) -> &str { + self.name.as_str() } - pub(crate) fn constraint_name(&self, constraint_index: usize) -> &'static str { - self.constraint_names[constraint_index] + /// Returns the name of the constraint at index `constraint_index`. + pub fn constraint_name(&self, constraint_index: usize) -> &str { + self.constraint_names[constraint_index].as_str() } /// Returns constraints of this gate @@ -1431,6 +1628,10 @@ pub struct ConstraintSystem { // input expressions and a sequence of table expressions involved in the lookup. pub lookups: Vec>, + // Vector of shuffle arguments, where each corresponds to a sequence of + // input expressions and a sequence of shuffle expressions involved in the shuffle. + pub(crate) shuffles: Vec>, + // List of indexes of Fixed columns which are associated to a circuit-general Column tied to their annotation. pub(crate) general_column_annotations: BTreeMap, @@ -1457,6 +1658,7 @@ pub struct PinnedConstraintSystem<'a, F: Field> { fixed_queries: &'a Vec<(Column, Rotation)>, permutation: &'a permutation::Argument, lookups_map: &'a BTreeMap>, + shuffles: &'a Vec>, constants: &'a Vec>, minimum_degree: &'a Option, } @@ -1519,6 +1721,7 @@ impl Default for ConstraintSystem { permutation: permutation::Argument::new(), lookups_map: BTreeMap::default(), lookups: Vec::new(), + shuffles: Vec::new(), general_column_annotations: BTreeMap::new(), constants: vec![], minimum_degree: None, @@ -1545,6 +1748,7 @@ impl ConstraintSystem { instance_queries: &self.instance_queries, permutation: &self.permutation, lookups_map: &self.lookups_map, + shuffles: &self.shuffles, constants: &self.constants, minimum_degree: &self.minimum_degree, } @@ -1573,26 +1777,25 @@ impl ConstraintSystem { /// /// `table_map` returns a map between input expressions and the table columns /// they need to match. - pub fn lookup( + pub fn lookup>( &mut self, // FIXME use name in debug messages - _name: &'static str, + _name: S, table_map: impl FnOnce(&mut VirtualCells<'_, F>) -> Vec<(Expression, TableColumn)>, ) { let mut cells = VirtualCells::new(self); let (input_expressions, table_expressions): (Vec<_>, Vec<_>) = table_map(&mut cells) .into_iter() - .map(|(input, table)| { + .map(|(mut input, table)| { if input.contains_simple_selector() { panic!("expression containing simple selector supplied to lookup argument"); } - - let table = cells.query_fixed(table.inner(), Rotation::cur()); - + let mut table = cells.query_fixed(table.inner(), Rotation::cur()); + input.query_cells(&mut cells); + table.query_cells(&mut cells); (input, table) }) .unzip(); - let table_expressions_identifier = table_expressions .iter() .fold(String::new(), |string, expr| string + &expr.identifier()); @@ -1681,10 +1884,10 @@ impl ConstraintSystem { /// /// `table_map` returns a map between input expressions and the table expressions /// they need to match. - pub fn lookup_any( + pub fn lookup_any>( &mut self, // FIXME use name in debug messages - _name: &'static str, + _name: S, table_map: impl FnOnce(&mut VirtualCells<'_, F>) -> Vec<(Expression, Expression)>, ) { let mut cells = VirtualCells::new(self); @@ -1705,6 +1908,29 @@ impl ConstraintSystem { }); } + /// Add a shuffle argument for some input expressions and table expressions. + pub fn shuffle>( + &mut self, + name: S, + shuffle_map: impl FnOnce(&mut VirtualCells<'_, F>) -> Vec<(Expression, Expression)>, + ) -> usize { + let mut cells = VirtualCells::new(self); + let shuffle_map = shuffle_map(&mut cells) + .into_iter() + .map(|(mut input, mut table)| { + input.query_cells(&mut cells); + table.query_cells(&mut cells); + (input, table) + }) + .collect(); + let index = self.shuffles.len(); + + self.shuffles + .push(shuffle::Argument::new(name.as_ref(), shuffle_map)); + + index + } + fn query_fixed_index(&mut self, column: Column, at: Rotation) -> usize { // Return existing query, if it exists for (index, fixed_query) in self.fixed_queries.iter().enumerate() { @@ -1822,29 +2048,32 @@ impl ConstraintSystem { /// /// A gate is required to contain polynomial constraints. This method will panic if /// `constraints` returns an empty iterator. - pub fn create_gate>, Iter: IntoIterator>( + pub fn create_gate>, Iter: IntoIterator, S: AsRef>( &mut self, - name: &'static str, + name: S, constraints: impl FnOnce(&mut VirtualCells<'_, F>) -> Iter, ) { let mut cells = VirtualCells::new(self); let constraints = constraints(&mut cells); - let queried_selectors = cells.queried_selectors; - let queried_cells = cells.queried_cells; - let (constraint_names, polys): (_, Vec<_>) = constraints .into_iter() .map(|c| c.into()) - .map(|c| (c.name, c.poly)) + .map(|mut c: Constraint| { + c.poly.query_cells(&mut cells); + (c.name, c.poly) + }) .unzip(); + let queried_selectors = cells.queried_selectors; + let queried_cells = cells.queried_cells; + assert!( !polys.is_empty(), "Gates must contain at least one constraint." ); self.gates.push(Gate { - name, + name: name.as_ref().to_string(), constraint_names, polys, queried_selectors, @@ -1859,7 +2088,7 @@ impl ConstraintSystem { /// find which fixed column corresponds with a given `Selector`. /// /// Do not call this twice. Yes, this should be a builder pattern instead. - pub(crate) fn compress_selectors(mut self, selectors: Vec>) -> (Self, Vec>) { + pub fn compress_selectors(mut self, selectors: Vec>) -> (Self, Vec>) { // The number of provided selector assignments must be the number we // counted for this constraint system. assert_eq!(selectors.len(), self.num_selectors); @@ -1883,7 +2112,7 @@ impl ConstraintSystem { let (polys, selector_assignment) = compress_selectors::process( selectors .into_iter() - .zip(degrees.into_iter()) + .zip(degrees) .enumerate() .map( |(i, (activations, max_degree))| compress_selectors::SelectorDescription { @@ -1898,7 +2127,7 @@ impl ConstraintSystem { let column = self.fixed_column(); new_columns.push(column); Expression::Fixed(FixedQuery { - index: self.query_fixed_index(column, Rotation::cur()), + index: Some(self.query_fixed_index(column, Rotation::cur())), column_index: column.index, rotation: Rotation::cur(), }) @@ -1966,6 +2195,15 @@ impl ConstraintSystem { replace_selectors(expr, &selector_replacements, true); } + for expr in self.shuffles.iter_mut().flat_map(|shuffle| { + shuffle + .input_expressions + .iter_mut() + .chain(shuffle.shuffle_expressions.iter_mut()) + }) { + replace_selectors(expr, &selector_replacements, true); + } + (self, polys) } @@ -2146,6 +2384,17 @@ impl ConstraintSystem { .unwrap_or(1), ); + // The lookup argument also serves alongside the gates and must be accounted + // for. + degree = std::cmp::max( + degree, + self.shuffles + .iter() + .map(|l| l.required_degree()) + .max() + .unwrap_or(1), + ); + // Account for each gate to ensure our quotient polynomial is the // correct degree and that our extended domain is the right size. degree = std::cmp::max(degree, self.max_gate_degree()); @@ -2219,6 +2468,11 @@ impl ConstraintSystem { self.num_instance_columns } + /// Returns number of selectors + pub fn num_selectors(&self) -> usize { + self.num_selectors + } + /// Returns number of challenges pub fn num_challenges(&self) -> usize { self.num_challenges @@ -2242,6 +2496,11 @@ impl ConstraintSystem { &self.gates } + /// Returns general column annotations + pub fn general_column_annotations(&self) -> &BTreeMap { + &self.general_column_annotations + } + /// Returns advice queries pub fn advice_queries(&self) -> &Vec<(Column, Rotation)> { &self.advice_queries @@ -2267,6 +2526,11 @@ impl ConstraintSystem { &self.lookups } + /// Returns shuffle arguments + pub fn shuffles(&self) -> &Vec> { + &self.shuffles + } + /// Returns constants pub fn constants(&self) -> &Vec> { &self.constants @@ -2301,7 +2565,7 @@ impl<'a, F: Field> VirtualCells<'a, F> { pub fn query_fixed(&mut self, column: Column, at: Rotation) -> Expression { self.queried_cells.push((column, at).into()); Expression::Fixed(FixedQuery { - index: self.meta.query_fixed_index(column, at), + index: Some(self.meta.query_fixed_index(column, at)), column_index: column.index, rotation: at, }) @@ -2311,7 +2575,7 @@ impl<'a, F: Field> VirtualCells<'a, F> { pub fn query_advice(&mut self, column: Column, at: Rotation) -> Expression { self.queried_cells.push((column, at).into()); Expression::Advice(AdviceQuery { - index: self.meta.query_advice_index(column, at), + index: Some(self.meta.query_advice_index(column, at)), column_index: column.index, rotation: at, phase: column.column_type().phase, @@ -2322,7 +2586,7 @@ impl<'a, F: Field> VirtualCells<'a, F> { pub fn query_instance(&mut self, column: Column, at: Rotation) -> Expression { self.queried_cells.push((column, at).into()); Expression::Instance(InstanceQuery { - index: self.meta.query_instance_index(column, at), + index: Some(self.meta.query_instance_index(column, at)), column_index: column.index, rotation: at, }) @@ -2343,3 +2607,47 @@ impl<'a, F: Field> VirtualCells<'a, F> { Expression::Challenge(challenge) } } + +#[cfg(test)] +mod tests { + use super::Expression; + use halo2curves::bn256::Fr; + + #[test] + fn iter_sum() { + let exprs: Vec> = vec![ + Expression::Constant(1.into()), + Expression::Constant(2.into()), + Expression::Constant(3.into()), + ]; + let happened: Expression = exprs.into_iter().sum(); + let expected: Expression = Expression::Sum( + Box::new(Expression::Sum( + Box::new(Expression::Constant(1.into())), + Box::new(Expression::Constant(2.into())), + )), + Box::new(Expression::Constant(3.into())), + ); + + assert_eq!(happened, expected); + } + + #[test] + fn iter_product() { + let exprs: Vec> = vec![ + Expression::Constant(1.into()), + Expression::Constant(2.into()), + Expression::Constant(3.into()), + ]; + let happened: Expression = exprs.into_iter().product(); + let expected: Expression = Expression::Product( + Box::new(Expression::Product( + Box::new(Expression::Constant(1.into())), + Box::new(Expression::Constant(2.into())), + )), + Box::new(Expression::Constant(3.into())), + ); + + assert_eq!(happened, expected); + } +} diff --git a/halo2_proofs/src/plonk/circuit/compress_selectors.rs b/halo2_proofs/src/plonk/circuit/compress_selectors.rs index f52a0e7c9a..95459258a1 100644 --- a/halo2_proofs/src/plonk/circuit/compress_selectors.rs +++ b/halo2_proofs/src/plonk/circuit/compress_selectors.rs @@ -283,7 +283,7 @@ mod tests { let (combination_assignments, selector_assignments) = process::(selectors.clone(), max_degree, || { let tmp = Expression::Fixed(FixedQuery { - index: query, + index: Some(query), column_index: query, rotation: Rotation::cur(), }); @@ -322,7 +322,7 @@ mod tests { &|_| panic!("should not occur in returned expressions"), &|query| { // Should be the correct combination in the expression - assert_eq!(selector.combination_index, query.index); + assert_eq!(selector.combination_index, query.index.unwrap()); assignment }, &|_| panic!("should not occur in returned expressions"), diff --git a/halo2_proofs/src/plonk/error.rs b/halo2_proofs/src/plonk/error.rs index 84b14cbd2d..756fa30efb 100644 --- a/halo2_proofs/src/plonk/error.rs +++ b/halo2_proofs/src/plonk/error.rs @@ -1,9 +1,8 @@ -use crate::circuit::RegionIndex; -use std::cmp; use std::error; use std::fmt; use std::io; +use super::TableColumn; use super::{Any, Column}; /// This is an error that could occur during proving or circuit synthesis. @@ -40,6 +39,8 @@ pub enum Error { /// The instance sets up a copy constraint involving a column that has not been /// included in the permutation. ColumnNotInPermutation(Column), + /// An error relating to a lookup table. + TableError(TableError), } impl From for Error { @@ -88,6 +89,7 @@ impl fmt::Display for Error { "Column {:?} must be included in the permutation. Help: try applying `meta.enable_equalty` on the column", column ), + Error::TableError(error) => write!(f, "{}", error) } } } @@ -100,3 +102,45 @@ impl error::Error for Error { } } } + +/// This is an error that could occur during table synthesis. +#[derive(Debug)] +pub enum TableError { + /// A `TableColumn` has not been assigned. + ColumnNotAssigned(TableColumn), + /// A Table has columns of uneven lengths. + UnevenColumnLengths((TableColumn, usize), (TableColumn, usize)), + /// Attempt to assign a used `TableColumn` + UsedColumn(TableColumn), + /// Attempt to overwrite a default value + OverwriteDefault(TableColumn, String, String), +} + +impl fmt::Display for TableError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TableError::ColumnNotAssigned(col) => { + write!( + f, + "{:?} not fully assigned. Help: assign a value at offset 0.", + col + ) + } + TableError::UnevenColumnLengths((col, col_len), (table, table_len)) => write!( + f, + "{:?} has length {} while {:?} has length {}", + col, col_len, table, table_len + ), + TableError::UsedColumn(col) => { + write!(f, "{:?} has already been used", col) + } + TableError::OverwriteDefault(col, default, val) => { + write!( + f, + "Attempted to overwrite default value {} with {} in {:?}", + default, val, col + ) + } + } + } +} diff --git a/halo2_proofs/src/plonk/evaluation.rs b/halo2_proofs/src/plonk/evaluation.rs index f626bcbb54..b168b02ad0 100644 --- a/halo2_proofs/src/plonk/evaluation.rs +++ b/halo2_proofs/src/plonk/evaluation.rs @@ -1,38 +1,17 @@ use crate::multicore; -use crate::plonk::lookup::prover::Committed; -use crate::plonk::permutation::Argument; -use crate::plonk::{ - mv_lookup, permutation, AdviceQuery, Any, FixedQuery, InstanceQuery, ProvingKey, -}; +use crate::plonk::{mv_lookup, permutation, Any, ProvingKey}; use crate::poly::Basis; use crate::{ - arithmetic::{eval_polynomial, parallelize, CurveAffine}, - poly::{ - commitment::Params, Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, - Polynomial, ProverQuery, Rotation, - }, - transcript::{EncodedChallenge, TranscriptWrite}, -}; -use group::prime::PrimeCurve; -use group::{ - ff::{BatchInvert, Field, PrimeField, WithSmallOrderMulGroup}, - Curve, -}; -use rayon::prelude::IntoParallelIterator; -use rayon::prelude::ParallelIterator; -use std::any::TypeId; -use std::convert::TryInto; -use std::num::ParseIntError; -use std::process::exit; -use std::slice; -use std::sync::atomic::fence; -use std::{ - collections::BTreeMap, - iter, - ops::{Index, Mul, MulAssign}, + arithmetic::{parallelize, CurveAffine}, + poly::{Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, Rotation}, }; +#[cfg(not(feature = "logup_skip_inv"))] +use ff::BatchInvert; +use group::ff::{Field, PrimeField, WithSmallOrderMulGroup}; +#[cfg(not(feature = "logup_skip_inv"))] +use rayon::iter::{IntoParallelIterator, ParallelIterator}; -use super::{ConstraintSystem, Expression}; +use super::{shuffle, ConstraintSystem, Expression}; /// Return the index in the polynomial of size `isize` after rotation `rot`. fn get_rotation_idx(idx: usize, rot: i32, rot_scale: i32, isize: i32) -> usize { @@ -75,6 +54,7 @@ impl Default for ValueSource { impl ValueSource { /// Get the value for this source + #[allow(clippy::too_many_arguments)] pub fn get( &self, rotations: &[usize], @@ -135,6 +115,7 @@ pub enum Calculation { impl Calculation { /// Get the resulting value of this calculation + #[allow(clippy::too_many_arguments)] pub fn evaluate( &self, rotations: &[usize], @@ -193,6 +174,8 @@ pub struct Evaluator { pub custom_gates: GraphEvaluator, /// Lookups evalution pub lookups: Vec<(Vec>, GraphEvaluator)>, + /// Shuffle evalution + pub shuffles: Vec>, } /// GraphEvaluator @@ -294,10 +277,44 @@ impl Evaluator { ev.lookups.push((graph_inputs.to_vec(), graph_table)); } + // Shuffles + for shuffle in cs.shuffles.iter() { + let evaluate_lc = |expressions: &Vec>, graph: &mut GraphEvaluator| { + let parts = expressions + .iter() + .map(|expr| graph.add_expression(expr)) + .collect(); + graph.add_calculation(Calculation::Horner( + ValueSource::Constant(0), + parts, + ValueSource::Theta(), + )) + }; + + let mut graph_input = GraphEvaluator::default(); + let compressed_input_coset = evaluate_lc(&shuffle.input_expressions, &mut graph_input); + let _ = graph_input.add_calculation(Calculation::Add( + compressed_input_coset, + ValueSource::Beta(), + )); + + let mut graph_shuffle = GraphEvaluator::default(); + let compressed_shuffle_coset = + evaluate_lc(&shuffle.shuffle_expressions, &mut graph_shuffle); + let _ = graph_shuffle.add_calculation(Calculation::Add( + compressed_shuffle_coset, + ValueSource::Beta(), + )); + + ev.shuffles.push(graph_input); + ev.shuffles.push(graph_shuffle); + } + ev } /// Evaluate h poly + #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn evaluate_h( &self, pk: &ProvingKey, @@ -309,6 +326,7 @@ impl Evaluator { gamma: C::ScalarExt, theta: C::ScalarExt, lookups: &[Vec>], + shuffles: &[Vec>], permutations: &[permutation::prover::Committed], ) -> Polynomial { let domain = &pk.vk.domain; @@ -365,10 +383,11 @@ impl Evaluator { // Core expression evaluations let num_threads = multicore::current_num_threads(); - for (((advice, instance), lookups), permutation) in advice + for ((((advice, instance), lookups), shuffles), permutation) in advice .iter() .zip(instance.iter()) .zip(lookups.iter()) + .zip(shuffles.iter()) .zip(permutations.iter()) { // Custom gates @@ -435,8 +454,8 @@ impl Evaluator { // Permutation constraints parallelize(&mut values, |values, start| { - let mut beta_term = current_extended_omega - * omega.pow_vartime(&[start as u64, 0, 0, 0]); + let mut beta_term = + current_extended_omega * omega.pow_vartime([start as u64, 0, 0, 0]); for (i, value) in values.iter_mut().enumerate() { let idx = start + i; let r_next = get_rotation_idx(idx, 1, rot_scale, isize); @@ -546,7 +565,7 @@ impl Evaluator { &gamma, &theta, &y, - &C::ScalarExt::zero(), + &C::Scalar::ZERO, idx, rot_scale, isize, @@ -573,7 +592,7 @@ impl Evaluator { inputs_values_for_extended_domain [i * inputs_len..(i + 1) * inputs_len] .iter() - .fold(C::Scalar::zero(), |acc, x| acc + x) + .fold(C::Scalar::ZERO, |acc, x| acc + x) }) .collect::>() }) @@ -704,6 +723,70 @@ impl Evaluator { } }); } + + // Shuffle constraints + for (n, shuffle) in shuffles.iter().enumerate() { + let product_coset = + pk.vk.domain.coeff_to_extended(shuffle.product_poly.clone()); + + // Shuffle constraints + parallelize(&mut values, |values, start| { + let input_evaluator = &self.shuffles[2 * n]; + let shuffle_evaluator = &self.shuffles[2 * n + 1]; + let mut eval_data_input = shuffle_evaluator.instance(); + let mut eval_data_shuffle = shuffle_evaluator.instance(); + for (i, value) in values.iter_mut().enumerate() { + let idx = start + i; + + let input_value = input_evaluator.evaluate( + &mut eval_data_input, + fixed, + advice, + instance, + challenges, + &beta, + &gamma, + &theta, + &y, + &C::ScalarExt::ZERO, + idx, + rot_scale, + isize, + ); + + let shuffle_value = shuffle_evaluator.evaluate( + &mut eval_data_shuffle, + fixed, + advice, + instance, + challenges, + &beta, + &gamma, + &theta, + &y, + &C::ScalarExt::ZERO, + idx, + rot_scale, + isize, + ); + + let r_next = get_rotation_idx(idx, 1, rot_scale, isize); + + // l_0(X) * (1 - z(X)) = 0 + *value = *value * y + ((one - product_coset[idx]) * l0[idx]); + // l_last(X) * (z(X)^2 - z(X)) = 0 + *value = *value * y + + ((product_coset[idx] * product_coset[idx] + - product_coset[idx]) + * l_last[idx]); + // (1 - (l_last(X) + l_blind(X))) * (z(\omega X) (s(X) + \gamma) - z(X) (a(X) + \gamma)) = 0 + *value = *value * y + + l_active_row[idx] + * (product_coset[r_next] * shuffle_value + - product_coset[idx] * input_value) + } + }); + } } current_extended_omega *= extended_omega; values @@ -889,6 +972,7 @@ impl GraphEvaluator { } } + #[allow(clippy::too_many_arguments)] pub fn evaluate( &self, data: &mut EvaluationData, diff --git a/halo2_proofs/src/plonk/keygen.rs b/halo2_proofs/src/plonk/keygen.rs index 92ef096f32..89e53b296a 100644 --- a/halo2_proofs/src/plonk/keygen.rs +++ b/halo2_proofs/src/plonk/keygen.rs @@ -12,8 +12,7 @@ use super::{ Selector, }, evaluation::Evaluator, - permutation, Assigned, Challenge, Error, Expression, LagrangeCoeff, Polynomial, ProvingKey, - VerifyingKey, + permutation, Assigned, Challenge, Error, LagrangeCoeff, Polynomial, ProvingKey, VerifyingKey, }; use crate::helpers::CopyCell; use crate::{ @@ -21,7 +20,7 @@ use crate::{ circuit::Value, poly::{ batch_invert_assigned, - commitment::{Blind, Params, MSM}, + commitment::{Blind, Params}, EvaluationDomain, }, two_dim_vec_to_vec_of_slice, @@ -29,6 +28,7 @@ use crate::{ pub(crate) fn create_domain( k: u32, + #[cfg(feature = "circuit-params")] params: ConcreteCircuit::Params, ) -> ( EvaluationDomain, ConstraintSystem, @@ -39,6 +39,9 @@ where ConcreteCircuit: Circuit, { let mut cs = ConstraintSystem::default(); + #[cfg(feature = "circuit-params")] + let config = ConcreteCircuit::configure_with_params(&mut cs, params); + #[cfg(not(feature = "circuit-params"))] let config = ConcreteCircuit::configure(&mut cs); let cs = cs.chunk_lookups(); @@ -260,7 +263,7 @@ impl<'a, F: Field> Assignment for Assembly<'a, F> { .fixed .get_mut(column.index()) .and_then(|v| v.get_mut(row - self.rw_rows.start)) - .ok_or(Error::BoundsFailure)? = to().into_field().assign()?; + .expect("bounds failure") = to().into_field().assign()?; Ok(()) } @@ -304,10 +307,7 @@ impl<'a, F: Field> Assignment for Assembly<'a, F> { return Err(Error::not_enough_rows_available(self.k)); } - let col = self - .fixed - .get_mut(column.index()) - .ok_or(Error::BoundsFailure)?; + let col = self.fixed.get_mut(column.index()).expect("bounds failure"); let filler = to.assign()?; for row in self.usable_rows.clone().skip(from_row) { @@ -353,7 +353,11 @@ where ConcreteCircuit: Circuit, C::Scalar: FromUniformBytes<64>, { - let (domain, cs, config) = create_domain::(params.k()); + let (domain, cs, config) = create_domain::( + params.k(), + #[cfg(feature = "circuit-params")] + circuit.params(), + ); if (params.n() as usize) < cs.minimum_rows() { return Err(Error::not_enough_rows_available(params.k())); @@ -478,7 +482,11 @@ where ConcreteCircuit: Circuit, C::Scalar: FromUniformBytes<64>, { - let (domain, cs, config) = create_domain::(params.k()); + let (domain, cs, config) = create_domain::( + params.k(), + #[cfg(feature = "circuit-params")] + circuit.params(), + ); if (params.n() as usize) < cs.minimum_rows() { return Err(Error::not_enough_rows_available(params.k())); diff --git a/halo2_proofs/src/plonk/lookup.rs b/halo2_proofs/src/plonk/lookup.rs index c1c4d0ea31..7d1accd8ed 100644 --- a/halo2_proofs/src/plonk/lookup.rs +++ b/halo2_proofs/src/plonk/lookup.rs @@ -7,9 +7,9 @@ pub(crate) mod verifier; #[derive(Clone, PartialEq, Eq)] pub struct Argument { - pub name: &'static str, - pub input_expressions: Vec>, - pub table_expressions: Vec>, + pub(crate) name: String, + pub(crate) input_expressions: Vec>, + pub(crate) table_expressions: Vec>, } impl Debug for Argument { @@ -25,10 +25,10 @@ impl Argument { /// Constructs a new lookup argument. /// /// `table_map` is a sequence of `(input, table)` tuples. - pub fn new(name: &'static str, table_map: Vec<(Expression, Expression)>) -> Self { + pub fn new>(name: S, table_map: Vec<(Expression, Expression)>) -> Self { let (input_expressions, table_expressions) = table_map.into_iter().unzip(); Argument { - name, + name: name.as_ref().to_string(), input_expressions, table_expressions, } @@ -91,4 +91,9 @@ impl Argument { pub fn table_expressions(&self) -> &Vec> { &self.table_expressions } + + /// Returns name of this argument + pub fn name(&self) -> &str { + &self.name + } } diff --git a/halo2_proofs/src/plonk/lookup/prover.rs b/halo2_proofs/src/plonk/lookup/prover.rs index 0f0c85d7e3..028b298853 100644 --- a/halo2_proofs/src/plonk/lookup/prover.rs +++ b/halo2_proofs/src/plonk/lookup/prover.rs @@ -8,8 +8,7 @@ use crate::{ arithmetic::{eval_polynomial, parallelize, CurveAffine}, poly::{ commitment::{Blind, Params}, - Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, ProverQuery, - Rotation, + Coeff, EvaluationDomain, LagrangeCoeff, Polynomial, ProverQuery, Rotation, }, transcript::{EncodedChallenge, TranscriptWrite}, }; @@ -19,7 +18,6 @@ use group::{ Curve, }; use rand_core::RngCore; -use std::{any::TypeId, convert::TryInto, num::ParseIntError, ops::Index}; use std::{ collections::BTreeMap, iter, @@ -62,6 +60,7 @@ impl> Argument { /// - constructs Permuted struct using permuted_input_value = A', and /// permuted_table_expression = S'. /// The Permuted struct is used to update the Lookup, and is then returned. + #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn commit_permuted< 'a, 'params: 'a, @@ -443,7 +442,7 @@ fn permute_expression_pair<'params, C: CurveAffine, P: Params<'params, C>, R: Rn // Populate permuted table at unfilled rows with leftover table elements for (coeff, count) in leftover_table_map.iter() { for _ in 0..*count { - permuted_table_coeffs[repeated_input_rows.pop().unwrap() as usize] = *coeff; + permuted_table_coeffs[repeated_input_rows.pop().unwrap()] = *coeff; } } assert!(repeated_input_rows.is_empty()); diff --git a/halo2_proofs/src/plonk/lookup/verifier.rs b/halo2_proofs/src/plonk/lookup/verifier.rs index 990866b786..548c4c274d 100644 --- a/halo2_proofs/src/plonk/lookup/verifier.rs +++ b/halo2_proofs/src/plonk/lookup/verifier.rs @@ -90,6 +90,7 @@ impl Committed { } impl Evaluated { + #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn expressions<'a>( &'a self, l_0: C::Scalar, @@ -120,9 +121,9 @@ impl Evaluated { expression.evaluate( &|scalar| scalar, &|_| panic!("virtual selectors are removed during optimization"), - &|query| fixed_evals[query.index], - &|query| advice_evals[query.index], - &|query| instance_evals[query.index], + &|query| fixed_evals[query.index.unwrap()], + &|query| advice_evals[query.index.unwrap()], + &|query| instance_evals[query.index.unwrap()], &|challenge| challenges[challenge.index()], &|a| -a, &|a, b| a + &b, @@ -141,7 +142,7 @@ impl Evaluated { std::iter::empty() .chain( - // l_0(X) * (1 - z'(X)) = 0 + // l_0(X) * (1 - z(X)) = 0 Some(l_0 * &(C::Scalar::ONE - &self.product_eval)), ) .chain( diff --git a/halo2_proofs/src/plonk/mv_lookup.rs b/halo2_proofs/src/plonk/mv_lookup.rs index b4fd2d825f..5c26c20b78 100644 --- a/halo2_proofs/src/plonk/mv_lookup.rs +++ b/halo2_proofs/src/plonk/mv_lookup.rs @@ -57,11 +57,8 @@ impl Argument { input_degree }; - let inputs_expressions_degree: usize = self - .inputs_expressions - .iter() - .map(|input_expressions| expr_degree(input_expressions)) - .sum(); + let inputs_expressions_degree: usize = + self.inputs_expressions.iter().map(expr_degree).sum(); let table_degree = expr_degree(&self.table_expressions); diff --git a/halo2_proofs/src/plonk/mv_lookup/prover.rs b/halo2_proofs/src/plonk/mv_lookup/prover.rs index 52d434289b..49ee6b2fd6 100644 --- a/halo2_proofs/src/plonk/mv_lookup/prover.rs +++ b/halo2_proofs/src/plonk/mv_lookup/prover.rs @@ -1,6 +1,5 @@ use super::super::{ - circuit::Expression, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, Error, - ProvingKey, + circuit::Expression, ChallengeBeta, ChallengeTheta, ChallengeX, Error, ProvingKey, }; use super::Argument; use crate::plonk::evaluation::evaluate; @@ -8,25 +7,15 @@ use crate::{ arithmetic::{eval_polynomial, parallelize, CurveAffine}, poly::{ commitment::{Blind, Params}, - Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, ProverQuery, - Rotation, + Coeff, EvaluationDomain, LagrangeCoeff, Polynomial, ProverQuery, Rotation, }, transcript::{EncodedChallenge, TranscriptWrite}, }; use ark_std::{end_timer, start_timer}; -use blake2b_simd::Hash; -use ff::{BitViewSized, PrimeField, PrimeFieldBits, WithSmallOrderMulGroup}; -use group::{ - ff::{BatchInvert, Field}, - Curve, -}; +use ff::{PrimeField, WithSmallOrderMulGroup}; +use group::{ff::Field, Curve}; use rand_core::RngCore; -use rayon::current_num_threads; -use std::collections::{BTreeSet, HashSet}; -use std::time::Instant; -use std::{any::TypeId, convert::TryInto, num::ParseIntError, ops::Index}; use std::{ - collections::BTreeMap, iter, ops::{Mul, MulAssign}, }; @@ -72,7 +61,8 @@ impl + Ord> Argument { fixed_values: &'a [Polynomial], instance_values: &'a [Polynomial], challenges: &'a [C::Scalar], - mut rng: R, // in case we want to blind (do we actually need zk?) + #[cfg(feature = "sanity-checks")] mut rng: R, // in case we want to blind (do we actually need zk?) + #[cfg(not(feature = "sanity-checks"))] rng: R, transcript: &mut T, ) -> Result, Error> where @@ -134,7 +124,6 @@ impl + Ord> Argument { let m_time = start_timer!(|| "m(X) values"); let m_values: Vec = { use std::sync::atomic::{AtomicU64, Ordering}; - use std::sync::RwLock; let m_values: Vec = (0..params.n()).map(|_| AtomicU64::new(0)).collect(); for compressed_input_expression in compressed_inputs_expressions.iter() { @@ -154,7 +143,7 @@ impl + Ord> Argument { m_values .par_iter() - .map(|mi| F::from(mi.load(Ordering::Relaxed) as u64)) + .map(|mi| F::from(mi.load(Ordering::Relaxed))) .collect() }; end_timer!(m_time); @@ -313,32 +302,24 @@ impl Prepared { let phi = { // parallelized version of log_derivatives_diff.scan() let active_size = params.n() as usize - blinding_factors; - let chunk = { - let num_threads = crate::multicore::current_num_threads(); - let mut chunk = (active_size as usize) / num_threads; - if chunk < num_threads { - chunk = 1; - } - chunk - }; - let num_chunks = (active_size as usize + chunk - 1) / chunk; - let mut segment_sum = vec![C::Scalar::ZERO; num_chunks]; let mut grand_sum = iter::once(C::Scalar::ZERO) .chain(log_derivatives_diff) .take(active_size) .collect::>(); // TODO: remove the implicit assumption that parallelize() split the grand_sum // into segments that each has `chunk` elements except the last. - parallelize(&mut grand_sum, |segment_grand_sum, _| { + let segment_starts = parallelize_internal(&mut grand_sum, |segment_grand_sum, _| { for i in 1..segment_grand_sum.len() { segment_grand_sum[i] += segment_grand_sum[i - 1]; } }); - for i in 1..segment_sum.len() { - segment_sum[i] = segment_sum[i - 1] + grand_sum[i * chunk - 1]; + let mut segment_sum = vec![C::Scalar::ZERO; grand_sum.len()]; + for i in 1..segment_starts.len() { + segment_sum[segment_starts[i]] = + segment_sum[segment_starts[i - 1]] + grand_sum[segment_starts[i] - 1]; } parallelize(&mut grand_sum, |grand_sum, start| { - let prefix_sum = segment_sum[start / chunk]; + let prefix_sum = segment_sum[start]; for v in grand_sum.iter_mut() { *v += prefix_sum; } @@ -479,6 +460,7 @@ impl Evaluated { } } +#[cfg(test)] mod benches { use ark_std::rand::thread_rng; use ff::Field; @@ -498,7 +480,6 @@ mod benches { let n = 1 << log_n; let dur = Instant::now(); let _table: BTreeMap = (0..n) - .into_iter() .map(|_| Fr::random(&mut rng)) .enumerate() .map(|(i, x)| (x, i)) @@ -514,7 +495,6 @@ mod benches { let n = 1 << log_n; let dur = Instant::now(); let _table: BTreeMap = (0..n) - .into_iter() .map(Fr::from) .enumerate() .map(|(i, x)| (x, i)) diff --git a/halo2_proofs/src/plonk/mv_lookup/verifier.rs b/halo2_proofs/src/plonk/mv_lookup/verifier.rs index 361bb5f972..0593f3bcca 100644 --- a/halo2_proofs/src/plonk/mv_lookup/verifier.rs +++ b/halo2_proofs/src/plonk/mv_lookup/verifier.rs @@ -1,8 +1,6 @@ use std::iter; -use super::super::{ - circuit::Expression, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, -}; +use super::super::{circuit::Expression, ChallengeBeta, ChallengeTheta, ChallengeX}; use super::Argument; use crate::{ arithmetic::CurveAffine, @@ -109,9 +107,9 @@ impl Evaluated { expression.evaluate( &|scalar| scalar, &|_| panic!("virtual selectors are removed during optimization"), - &|query| fixed_evals[query.index], - &|query| advice_evals[query.index], - &|query| instance_evals[query.index], + &|query| fixed_evals[query.index()], + &|query| advice_evals[query.index()], + &|query| instance_evals[query.index()], &|challenge| challenges[challenge.index()], &|a| -a, &|a, b| a + &b, diff --git a/halo2_proofs/src/plonk/permutation.rs b/halo2_proofs/src/plonk/permutation.rs index b353493c57..2ce6abb8d3 100644 --- a/halo2_proofs/src/plonk/permutation.rs +++ b/halo2_proofs/src/plonk/permutation.rs @@ -1,3 +1,5 @@ +//! Implementation of permutation argument. + use super::circuit::{Any, Column}; use crate::{ arithmetic::CurveAffine, @@ -5,15 +7,16 @@ use crate::{ polynomial_slice_byte_length, read_polynomial_vec, write_polynomial_slice, SerdeCurveAffine, SerdePrimeField, }, - poly::{Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial}, + poly::{Coeff, LagrangeCoeff, Polynomial}, SerdeFormat, }; -use ff::PrimeField; pub(crate) mod keygen; pub(crate) mod prover; pub(crate) mod verifier; +pub use keygen::Assembly; + use std::io; /// A permutation argument. @@ -72,6 +75,7 @@ impl Argument { } } + /// Returns columns that participate on the permutation argument. pub fn get_columns(&self) -> Vec> { self.columns.clone() } diff --git a/halo2_proofs/src/plonk/permutation/keygen.rs b/halo2_proofs/src/plonk/permutation/keygen.rs index 1d73c82b42..6d1e184a1e 100644 --- a/halo2_proofs/src/plonk/permutation/keygen.rs +++ b/halo2_proofs/src/plonk/permutation/keygen.rs @@ -6,24 +6,32 @@ use crate::{ arithmetic::{parallelize, CurveAffine}, plonk::{Any, Column, Error}, poly::{ - commitment::{Blind, CommitmentScheme, Params}, + commitment::{Blind, Params}, EvaluationDomain, }, }; +#[cfg(feature = "multicore")] +use crate::multicore::{IndexedParallelIterator, ParallelIterator}; + +#[cfg(feature = "thread-safe-region")] +use std::collections::{BTreeSet, HashMap}; + +#[cfg(not(feature = "thread-safe-region"))] /// Struct that accumulates all the necessary data in order to construct the permutation argument. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct Assembly { /// Columns that participate on the copy permutation argument. - pub columns: Vec>, + columns: Vec>, /// Mapping of the actual copies done. - pub mapping: Vec>, + mapping: Vec>, /// Some aux data used to swap positions directly when sorting. - pub aux: Vec>, + aux: Vec>, /// More aux data - pub sizes: Vec>, + sizes: Vec>, } +#[cfg(not(feature = "thread-safe-region"))] impl Assembly { pub(crate) fn new(n: usize, p: &Argument) -> Self { // Initialize the copy vector to keep track of copy constraints in all @@ -67,6 +75,16 @@ impl Assembly { if left_row >= self.mapping[left_column].len() || right_row >= self.mapping[right_column].len() { + log::error!( + "BoundsFailure: left_row: {}, right_row: {}", + left_row, + right_row + ); + log::error!( + "BoundsFailure: left_max: {}, right_max: {}", + self.mapping[left_column].len(), + self.mapping[right_column].len() + ); return Err(Error::BoundsFailure); } @@ -108,134 +126,360 @@ impl Assembly { domain: &EvaluationDomain, p: &Argument, ) -> VerifyingKey { - // Compute [omega^0, omega^1, ..., omega^{params.n - 1}] - let mut omega_powers = vec![C::Scalar::ZERO; params.n() as usize]; - { - let omega = domain.get_omega(); - parallelize(&mut omega_powers, |o, start| { - let mut cur = omega.pow_vartime([start as u64]); - for v in o.iter_mut() { - *v = cur; - cur *= ω - } - }) + build_vk(params, domain, p, |i, j| self.mapping[i][j]) + } + + pub(crate) fn build_pk<'params, C: CurveAffine, P: Params<'params, C>>( + self, + params: &P, + domain: &EvaluationDomain, + p: &Argument, + ) -> ProvingKey { + build_pk(params, domain, p, |i, j| self.mapping[i][j]) + } + + /// Returns columns that participate in the permutation argument. + pub fn columns(&self) -> &[Column] { + &self.columns + } + + #[cfg(feature = "multicore")] + /// Returns mappings of the copies. + pub fn mapping( + &self, + ) -> impl Iterator + '_> { + use crate::multicore::IntoParallelRefIterator; + + self.mapping.iter().map(|c| c.par_iter().copied()) + } + + #[cfg(not(feature = "multicore"))] + /// Returns mappings of the copies. + pub fn mapping(&self) -> impl Iterator + '_> { + self.mapping.iter().map(|c| c.iter().copied()) + } +} + +#[cfg(feature = "thread-safe-region")] +/// Struct that accumulates all the necessary data in order to construct the permutation argument. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Assembly { + /// Columns that participate on the copy permutation argument. + columns: Vec>, + /// Mapping of the actual copies done. + cycles: Vec>, + /// Mapping of the actual copies done. + ordered_cycles: Vec>, + /// Mapping of the actual copies done. + aux: HashMap<(usize, usize), usize>, + /// total length of a column + col_len: usize, + /// number of columns + num_cols: usize, +} + +#[cfg(feature = "thread-safe-region")] +impl Assembly { + pub(crate) fn new(n: usize, p: &Argument) -> Self { + Assembly { + columns: p.columns.clone(), + cycles: Vec::with_capacity(n), + ordered_cycles: Vec::with_capacity(n), + aux: HashMap::new(), + col_len: n, + num_cols: p.columns.len(), } + } - // Compute [omega_powers * \delta^0, omega_powers * \delta^1, ..., omega_powers * \delta^m] - let mut deltaomega = vec![omega_powers; p.columns.len()]; - { - parallelize(&mut deltaomega, |o, start| { - let mut cur = C::Scalar::DELTA.pow_vartime(&[start as u64]); - for omega_powers in o.iter_mut() { - for v in omega_powers { - *v *= &cur; - } - cur *= &::DELTA; - } - }); + pub(crate) fn copy( + &mut self, + left_column: Column, + left_row: usize, + right_column: Column, + right_row: usize, + ) -> Result<(), Error> { + let left_column = self + .columns + .iter() + .position(|c| c == &left_column) + .ok_or(Error::ColumnNotInPermutation(left_column))?; + let right_column = self + .columns + .iter() + .position(|c| c == &right_column) + .ok_or(Error::ColumnNotInPermutation(right_column))?; + + // Check bounds + if left_row >= self.col_len || right_row >= self.col_len { + return Err(Error::BoundsFailure); } - // Computes the permutation polynomial based on the permutation - // description in the assembly. - let mut permutations = vec![domain.empty_lagrange(); p.columns.len()]; - { - parallelize(&mut permutations, |o, start| { - for (x, permutation_poly) in o.iter_mut().enumerate() { - let i = start + x; - for (j, p) in permutation_poly.iter_mut().enumerate() { - let (permuted_i, permuted_j) = self.mapping[i][j]; - *p = deltaomega[permuted_i][permuted_j]; - } - } - }); + let left_cycle = self.aux.get(&(left_column, left_row)); + let right_cycle = self.aux.get(&(right_column, right_row)); + + // extract cycle elements + let right_cycle_elems = match right_cycle { + Some(i) => { + let entry = self.cycles[*i].clone(); + self.cycles[*i] = vec![]; + entry + } + None => [(right_column, right_row)].into(), + }; + + assert!(right_cycle_elems.contains(&(right_column, right_row))); + + // merge cycles + let cycle_idx = match left_cycle { + Some(i) => { + let entry = &mut self.cycles[*i]; + entry.extend(right_cycle_elems.clone()); + *i + } + // if they were singletons -- create a new cycle entry + None => { + let mut set: Vec<(usize, usize)> = right_cycle_elems.clone(); + set.push((left_column, left_row)); + self.cycles.push(set); + let cycle_idx = self.cycles.len() - 1; + self.aux.insert((left_column, left_row), cycle_idx); + cycle_idx + } + }; + + let index_updates = vec![cycle_idx; right_cycle_elems.len()].into_iter(); + let updates = right_cycle_elems.into_iter().zip(index_updates); + + self.aux.extend(updates); + + Ok(()) + } + + /// Builds the ordered mapping of the cycles. + /// This will only get executed once. + pub fn build_ordered_mapping(&mut self) { + use crate::multicore::IntoParallelRefMutIterator; + + // will only get called once + if self.ordered_cycles.is_empty() && !self.cycles.is_empty() { + self.ordered_cycles = self + .cycles + .par_iter_mut() + .map(|col| { + let mut set = BTreeSet::new(); + set.extend(col.clone()); + // free up memory + *col = vec![]; + set + }) + .collect(); } + } - // Pre-compute commitments for the URS. - let mut commitments = Vec::with_capacity(p.columns.len()); - for permutation in &permutations { - // Compute commitment to permutation polynomial - commitments.push( - params - .commit_lagrange(permutation, Blind::default()) - .to_affine(), - ); + fn mapping_at_idx(&self, col: usize, row: usize) -> (usize, usize) { + assert!( + !self.ordered_cycles.is_empty() || self.cycles.is_empty(), + "cycles have not been ordered" + ); + + if let Some(cycle_idx) = self.aux.get(&(col, row)) { + let cycle = &self.ordered_cycles[*cycle_idx]; + let mut cycle_iter = cycle.range(( + std::ops::Bound::Excluded((col, row)), + std::ops::Bound::Unbounded, + )); + // point to the next node in the cycle + match cycle_iter.next() { + Some((i, j)) => (*i, *j), + // wrap back around to the first element which SHOULD exist + None => *(cycle.iter().next().unwrap()), + } + // is a singleton + } else { + (col, row) } + } - VerifyingKey { commitments } + pub(crate) fn build_vk<'params, C: CurveAffine, P: Params<'params, C>>( + &mut self, + params: &P, + domain: &EvaluationDomain, + p: &Argument, + ) -> VerifyingKey { + self.build_ordered_mapping(); + build_vk(params, domain, p, |i, j| self.mapping_at_idx(i, j)) } pub(crate) fn build_pk<'params, C: CurveAffine, P: Params<'params, C>>( - self, + &mut self, params: &P, domain: &EvaluationDomain, p: &Argument, ) -> ProvingKey { - // Compute [omega^0, omega^1, ..., omega^{params.n - 1}] - let mut omega_powers = vec![C::Scalar::ZERO; params.n() as usize]; - { - let omega = domain.get_omega(); - parallelize(&mut omega_powers, |o, start| { - let mut cur = omega.pow_vartime([start as u64]); - for v in o.iter_mut() { - *v = cur; - cur *= ω - } - }) - } + self.build_ordered_mapping(); + build_pk(params, domain, p, |i, j| self.mapping_at_idx(i, j)) + } - // Compute [omega_powers * \delta^0, omega_powers * \delta^1, ..., omega_powers * \delta^m] - let mut deltaomega = vec![omega_powers; p.columns.len()]; - { - parallelize(&mut deltaomega, |o, start| { - let mut cur = C::Scalar::DELTA.pow_vartime(&[start as u64]); - for omega_powers in o.iter_mut() { - for v in omega_powers { - *v *= &cur; - } - cur *= &C::Scalar::DELTA; + /// Returns columns that participate in the permutation argument. + pub fn columns(&self) -> &[Column] { + &self.columns + } + + #[cfg(feature = "multicore")] + /// Returns mappings of the copies. + pub fn mapping( + &self, + ) -> impl Iterator + '_> { + use crate::multicore::IntoParallelIterator; + + (0..self.num_cols).map(move |i| { + (0..self.col_len) + .into_par_iter() + .map(move |j| self.mapping_at_idx(i, j)) + }) + } + + #[cfg(not(feature = "multicore"))] + /// Returns mappings of the copies. + pub fn mapping(&self) -> impl Iterator + '_> { + (0..self.num_cols).map(move |i| (0..self.col_len).map(move |j| self.mapping_at_idx(i, j))) + } +} + +pub(crate) fn build_pk<'params, C: CurveAffine, P: Params<'params, C>>( + params: &P, + domain: &EvaluationDomain, + p: &Argument, + mapping: impl Fn(usize, usize) -> (usize, usize) + Sync, +) -> ProvingKey { + // Compute [omega^0, omega^1, ..., omega^{params.n - 1}] + let mut omega_powers = vec![C::Scalar::ZERO; params.n() as usize]; + { + let omega = domain.get_omega(); + parallelize(&mut omega_powers, |o, start| { + let mut cur = omega.pow_vartime([start as u64]); + for v in o.iter_mut() { + *v = cur; + cur *= ω + } + }) + } + + // Compute [omega_powers * \delta^0, omega_powers * \delta^1, ..., omega_powers * \delta^m] + let mut deltaomega = vec![omega_powers; p.columns.len()]; + { + parallelize(&mut deltaomega, |o, start| { + let mut cur = C::Scalar::DELTA.pow_vartime([start as u64]); + for omega_powers in o.iter_mut() { + for v in omega_powers { + *v *= &cur; } - }); - } + cur *= &::DELTA; + } + }); + } - // Compute permutation polynomials. - let mut permutations = vec![domain.empty_lagrange(); p.columns.len()]; - { - parallelize(&mut permutations, |o, start| { - for (x, permutation_poly) in o.iter_mut().enumerate() { - let i = start + x; - for (j, p) in permutation_poly.iter_mut().enumerate() { - let (permuted_i, permuted_j) = self.mapping[i][j]; - *p = deltaomega[permuted_i][permuted_j]; - } + // Compute permutation polynomials, convert to coset form. + let mut permutations = vec![domain.empty_lagrange(); p.columns.len()]; + { + parallelize(&mut permutations, |o, start| { + for (x, permutation_poly) in o.iter_mut().enumerate() { + let i = start + x; + for (j, p) in permutation_poly.iter_mut().enumerate() { + let (permuted_i, permuted_j) = mapping(i, j); + *p = deltaomega[permuted_i][permuted_j]; } - }); - } + } + }); + } - let mut polys = vec![domain.empty_coeff(); p.columns.len()]; - { - parallelize(&mut polys, |o, start| { - for (x, poly) in o.iter_mut().enumerate() { - let i = start + x; - let permutation_poly = permutations[i].clone(); - *poly = domain.lagrange_to_coeff(permutation_poly); + let mut polys = vec![domain.empty_coeff(); p.columns.len()]; + { + parallelize(&mut polys, |o, start| { + for (x, poly) in o.iter_mut().enumerate() { + let i = start + x; + let permutation_poly = permutations[i].clone(); + *poly = domain.lagrange_to_coeff(permutation_poly); + } + }); + } + + // let mut cosets = vec![domain.empty_extended(); p.columns.len()]; + // { + // parallelize(&mut cosets, |o, start| { + // for (x, coset) in o.iter_mut().enumerate() { + // let i = start + x; + // let poly = polys[i].clone(); + // *coset = domain.coeff_to_extended(poly); + // } + // }); + // } + + ProvingKey { + permutations, + polys, + // cosets, + } +} + +pub(crate) fn build_vk<'params, C: CurveAffine, P: Params<'params, C>>( + params: &P, + domain: &EvaluationDomain, + p: &Argument, + mapping: impl Fn(usize, usize) -> (usize, usize) + Sync, +) -> VerifyingKey { + // Compute [omega^0, omega^1, ..., omega^{params.n - 1}] + let mut omega_powers = vec![C::Scalar::ZERO; params.n() as usize]; + { + let omega = domain.get_omega(); + parallelize(&mut omega_powers, |o, start| { + let mut cur = omega.pow_vartime([start as u64]); + for v in o.iter_mut() { + *v = cur; + cur *= ω + } + }) + } + + // Compute [omega_powers * \delta^0, omega_powers * \delta^1, ..., omega_powers * \delta^m] + let mut deltaomega = vec![omega_powers; p.columns.len()]; + { + parallelize(&mut deltaomega, |o, start| { + let mut cur = C::Scalar::DELTA.pow_vartime([start as u64]); + for omega_powers in o.iter_mut() { + for v in omega_powers { + *v *= &cur; } - }); - } + cur *= &::DELTA; + } + }); + } - let mut cosets = vec![domain.empty_extended(); p.columns.len()]; - { - parallelize(&mut cosets, |o, start| { - for (x, coset) in o.iter_mut().enumerate() { - let i = start + x; - let poly = polys[i].clone(); - *coset = domain.coeff_to_extended(poly); + // Computes the permutation polynomial based on the permutation + // description in the assembly. + let mut permutations = vec![domain.empty_lagrange(); p.columns.len()]; + { + parallelize(&mut permutations, |o, start| { + for (x, permutation_poly) in o.iter_mut().enumerate() { + let i = start + x; + for (j, p) in permutation_poly.iter_mut().enumerate() { + let (permuted_i, permuted_j) = mapping(i, j); + *p = deltaomega[permuted_i][permuted_j]; } - }); - } + } + }); + } - ProvingKey { - permutations, - polys, - } + // Pre-compute commitments for the URS. + let mut commitments = Vec::with_capacity(p.columns.len()); + for permutation in &permutations { + // Compute commitment to permutation polynomial + commitments.push( + params + .commit_lagrange(permutation, Blind::default()) + .to_affine(), + ); } + + VerifyingKey { commitments } } diff --git a/halo2_proofs/src/plonk/permutation/prover.rs b/halo2_proofs/src/plonk/permutation/prover.rs index 0618952f5a..28dfa7f4eb 100644 --- a/halo2_proofs/src/plonk/permutation/prover.rs +++ b/halo2_proofs/src/plonk/permutation/prover.rs @@ -12,9 +12,8 @@ use crate::{ arithmetic::{eval_polynomial, parallelize, CurveAffine}, plonk::{self, Error}, poly::{ - self, commitment::{Blind, Params}, - Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, ProverQuery, Rotation, + Coeff, LagrangeCoeff, Polynomial, ProverQuery, Rotation, }, transcript::{EncodedChallenge, TranscriptWrite}, }; @@ -42,6 +41,7 @@ pub(crate) struct Evaluated { } impl Argument { + #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn commit< 'params, C: CurveAffine, diff --git a/halo2_proofs/src/plonk/permutation/verifier.rs b/halo2_proofs/src/plonk/permutation/verifier.rs index ecdef3f59a..080acf24e7 100644 --- a/halo2_proofs/src/plonk/permutation/verifier.rs +++ b/halo2_proofs/src/plonk/permutation/verifier.rs @@ -103,6 +103,7 @@ impl Committed { } impl Evaluated { + #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn expressions<'a>( &'a self, vk: &'a plonk::VerifyingKey, @@ -183,7 +184,7 @@ impl Evaluated { let mut right = set.permutation_product_eval; let mut current_delta = (*beta * &*x) * &(::DELTA - .pow_vartime(&[(chunk_index * chunk_len) as u64])); + .pow_vartime([(chunk_index * chunk_len) as u64])); for eval in columns.iter().map(|&column| match column.column_type() { Any::Advice(_) => { advice_evals[vk.cs.get_any_query_index(column, Rotation::cur())] diff --git a/halo2_proofs/src/plonk/prover.rs b/halo2_proofs/src/plonk/prover.rs index 768216e4c5..4ed6195ebc 100644 --- a/halo2_proofs/src/plonk/prover.rs +++ b/halo2_proofs/src/plonk/prover.rs @@ -1,32 +1,29 @@ -use ff::{Field, FromUniformBytes, PrimeField, WithSmallOrderMulGroup}; +use crate::plonk::shuffle; +use ff::{Field, FromUniformBytes, WithSmallOrderMulGroup}; use group::Curve; -use halo2curves::CurveExt; use rand_core::RngCore; use std::collections::BTreeSet; -use std::env::var; use std::ops::{Range, RangeTo}; -use std::sync::atomic::AtomicUsize; use std::sync::Arc; -use std::time::Instant; -use std::{collections::HashMap, iter, mem, sync::atomic::Ordering}; +use std::{collections::HashMap, iter}; use super::{ circuit::{ - sealed::{self, SealedPhase}, - Advice, Any, Assignment, Challenge, Circuit, Column, ConstraintSystem, FirstPhase, Fixed, - FloorPlanner, Instance, Selector, + sealed::{self}, + Advice, Any, Assignment, Challenge, Circuit, Column, ConstraintSystem, Fixed, FloorPlanner, + Instance, Selector, }, mv_lookup, permutation, vanishing, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, - ChallengeY, Error, Expression, ProvingKey, + ChallengeY, Error, ProvingKey, }; + use crate::{ arithmetic::{eval_polynomial, CurveAffine}, circuit::Value, plonk::Assigned, poly::{ - self, commitment::{Blind, CommitmentScheme, Params, Prover}, - Basis, Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, ProverQuery, + Basis, Coeff, LagrangeCoeff, Polynomial, ProverQuery, }, two_dim_vec_to_vec_of_slice, }; @@ -60,6 +57,10 @@ pub fn create_proof< where Scheme::Scalar: WithSmallOrderMulGroup<3> + FromUniformBytes<64> + Ord, { + if circuits.len() != instances.len() { + return Err(Error::InvalidInstances); + } + for instance in instances.iter() { if instance.len() != pk.vk.cs.num_instance_columns { return Err(Error::InvalidInstances); @@ -71,6 +72,9 @@ where let domain = &pk.vk.domain; let mut meta = ConstraintSystem::default(); + #[cfg(feature = "circuit-params")] + let config = ConcreteCircuit::configure_with_params(&mut meta, circuits[0].params()); + #[cfg(not(feature = "circuit-params"))] let config = ConcreteCircuit::configure(&mut meta); // Selector optimizations cannot be applied here; use the ConstraintSystem @@ -285,11 +289,13 @@ where return Err(Error::not_enough_rows_available(self.k)); } - self.instances + Ok(self + .instances .get(column.index()) .and_then(|column| column.get(row)) .map(|v| Value::known(*v)) .ok_or(Error::BoundsFailure) + .expect("bound failure")) } fn assign_advice( @@ -323,7 +329,7 @@ where .advice .get_mut(column.index()) .and_then(|v| v.get_mut(row - self.rw_rows.start)) - .ok_or(Error::BoundsFailure)? = to().into_field().assign()?; + .expect("bounds failure") = to().into_field().assign()?; Ok(()) } @@ -619,6 +625,34 @@ where .collect::, _>>()?; end_timer!(lookup_commit_time); + let shuffles: Vec>> = instance + .iter() + .zip(advice.iter()) + .map(|(instance, advice)| -> Result, _> { + // Compress expressions for each shuffle + pk.vk + .cs + .shuffles + .iter() + .map(|shuffle| { + shuffle.commit_product( + pk, + params, + domain, + theta, + gamma, + &advice.advice_polys, + &pk.fixed_values, + &instance.instance_values, + &challenges, + &mut rng, + transcript, + ) + }) + .collect::, _>>() + }) + .collect::, _>>()?; + // Commit to the vanishing argument's random polynomial for blinding h(x_3) let vanishing = vanishing::Argument::commit(params, domain, &mut rng, transcript)?; @@ -661,6 +695,7 @@ where *gamma, *theta, &lookups, + &shuffles, &permutations, ); @@ -668,7 +703,7 @@ where let vanishing = vanishing.construct(params, domain, h_poly, &mut rng, transcript)?; let x: ChallengeX<_> = transcript.squeeze_challenge_scalar(); - let xn = x.pow(&[params.n() as u64, 0, 0, 0]); + let xn = x.pow([params.n()]); if P::QUERY_INSTANCE { // Compute and hash instance evals for each circuit instance @@ -747,12 +782,24 @@ where }) .collect::, _>>()?; + // Evaluate the shuffles, if any, at omega^i x. + let shuffles: Vec>> = shuffles + .into_iter() + .map(|shuffles| -> Result, _> { + shuffles + .into_iter() + .map(|p| p.evaluate(pk, x, transcript)) + .collect::, _>>() + }) + .collect::, _>>()?; + let instances = instance .iter() .zip(advice.iter()) .zip(permutations.iter()) .zip(lookups.iter()) - .flat_map(|(((instance, advice), permutation), lookups)| { + .zip(shuffles.iter()) + .flat_map(|((((instance, advice), permutation), lookups), shuffles)| { iter::empty() .chain( P::QUERY_INSTANCE @@ -778,7 +825,8 @@ where }), ) .chain(permutation.open(pk, x)) - .chain(lookups.iter().flat_map(move |p| p.open(pk, x)).into_iter()) + .chain(lookups.iter().flat_map(move |p| p.open(pk, x))) + .chain(shuffles.iter().flat_map(move |p| p.open(pk, x))) }) .chain( pk.vk @@ -800,3 +848,69 @@ where .create_proof(rng, transcript, instances) .map_err(|_| Error::ConstraintSystemFailure) } + +#[test] +fn test_create_proof() { + use crate::{ + circuit::SimpleFloorPlanner, + plonk::{keygen_pk, keygen_vk}, + poly::kzg::{ + commitment::{KZGCommitmentScheme, ParamsKZG}, + multiopen::ProverSHPLONK, + }, + transcript::{Blake2bWrite, Challenge255, TranscriptWriterBuffer}, + }; + use halo2curves::bn256::Bn256; + use rand_core::OsRng; + + #[derive(Clone, Copy)] + struct MyCircuit; + + impl Circuit for MyCircuit { + type Config = (); + type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + *self + } + + fn configure(_meta: &mut ConstraintSystem) -> Self::Config {} + + fn synthesize( + &self, + _config: Self::Config, + _layouter: impl crate::circuit::Layouter, + ) -> Result<(), Error> { + Ok(()) + } + } + + let params: ParamsKZG = ParamsKZG::setup(3, OsRng); + let vk = keygen_vk(¶ms, &MyCircuit).expect("keygen_vk should not fail"); + let pk = keygen_pk(¶ms, vk, &MyCircuit).expect("keygen_pk should not fail"); + let mut transcript = Blake2bWrite::<_, _, Challenge255<_>>::init(vec![]); + + // Create proof with wrong number of instances + let proof = create_proof::, ProverSHPLONK<_>, _, _, _, _>( + ¶ms, + &pk, + &[MyCircuit, MyCircuit], + &[], + OsRng, + &mut transcript, + ); + assert!(matches!(proof.unwrap_err(), Error::InvalidInstances)); + + // Create proof with correct number of instances + create_proof::, ProverSHPLONK<_>, _, _, _, _>( + ¶ms, + &pk, + &[MyCircuit, MyCircuit], + &[&[], &[]], + OsRng, + &mut transcript, + ) + .expect("proof generation should not fail"); +} diff --git a/halo2_proofs/src/plonk/shuffle.rs b/halo2_proofs/src/plonk/shuffle.rs new file mode 100644 index 0000000000..2cb925a3c7 --- /dev/null +++ b/halo2_proofs/src/plonk/shuffle.rs @@ -0,0 +1,67 @@ +use super::circuit::Expression; +use ff::Field; +use std::fmt::{self, Debug}; + +pub(crate) mod prover; +pub(crate) mod verifier; + +#[derive(Clone, PartialEq, Eq)] +pub struct Argument { + pub(crate) name: String, + pub(crate) input_expressions: Vec>, + pub(crate) shuffle_expressions: Vec>, +} + +impl Debug for Argument { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Argument") + .field("input_expressions", &self.input_expressions) + .field("shuffle_expressions", &self.shuffle_expressions) + .finish() + } +} + +impl Argument { + /// Constructs a new shuffle argument. + /// + /// `shuffle` is a sequence of `(input, shuffle)` tuples. + pub fn new>(name: S, shuffle: Vec<(Expression, Expression)>) -> Self { + let (input_expressions, shuffle_expressions) = shuffle.into_iter().unzip(); + Argument { + name: name.as_ref().to_string(), + input_expressions, + shuffle_expressions, + } + } + + pub(crate) fn required_degree(&self) -> usize { + assert_eq!(self.input_expressions.len(), self.shuffle_expressions.len()); + + let mut input_degree = 1; + for expr in self.input_expressions.iter() { + input_degree = std::cmp::max(input_degree, expr.degree()); + } + let mut shuffle_degree = 1; + for expr in self.shuffle_expressions.iter() { + shuffle_degree = std::cmp::max(shuffle_degree, expr.degree()); + } + + // (1 - (l_last + l_blind)) (z(\omega X) (s(X) + \gamma) - z(X) (a(X) + \gamma)) + std::cmp::max(2 + shuffle_degree, 2 + input_degree) + } + + /// Returns input of this argument + pub fn input_expressions(&self) -> &Vec> { + &self.input_expressions + } + + /// Returns table of this argument + pub fn shuffle_expressions(&self) -> &Vec> { + &self.shuffle_expressions + } + + /// Returns name of this argument + pub fn name(&self) -> &str { + &self.name + } +} diff --git a/halo2_proofs/src/plonk/shuffle/prover.rs b/halo2_proofs/src/plonk/shuffle/prover.rs new file mode 100644 index 0000000000..fd30436a47 --- /dev/null +++ b/halo2_proofs/src/plonk/shuffle/prover.rs @@ -0,0 +1,250 @@ +use super::super::{ + circuit::Expression, ChallengeGamma, ChallengeTheta, ChallengeX, Error, ProvingKey, +}; +use super::Argument; +use crate::plonk::evaluation::evaluate; +use crate::{ + arithmetic::{eval_polynomial, parallelize, CurveAffine}, + poly::{ + commitment::{Blind, Params}, + Coeff, EvaluationDomain, LagrangeCoeff, Polynomial, ProverQuery, Rotation, + }, + transcript::{EncodedChallenge, TranscriptWrite}, +}; +use ff::WithSmallOrderMulGroup; +use group::{ff::BatchInvert, Curve}; +use rand_core::RngCore; +use std::{ + iter, + ops::{Mul, MulAssign}, +}; + +#[derive(Debug)] +struct Compressed { + input_expression: Polynomial, + shuffle_expression: Polynomial, +} + +#[derive(Debug)] +pub(in crate::plonk) struct Committed { + pub(in crate::plonk) product_poly: Polynomial, + product_blind: Blind, +} + +pub(in crate::plonk) struct Evaluated { + constructed: Committed, +} + +impl> Argument { + /// Given a Shuffle with input expressions [A_0, A_1, ..., A_{m-1}] and table expressions + /// [S_0, S_1, ..., S_{m-1}], this method + /// - constructs A_compressed = \theta^{m-1} A_0 + theta^{m-2} A_1 + ... + \theta A_{m-2} + A_{m-1} + /// and S_compressed = \theta^{m-1} S_0 + theta^{m-2} S_1 + ... + \theta S_{m-2} + S_{m-1}, + #[allow(clippy::too_many_arguments)] + fn compress<'a, 'params: 'a, C, P: Params<'params, C>>( + &self, + pk: &ProvingKey, + params: &P, + domain: &EvaluationDomain, + theta: ChallengeTheta, + advice_values: &'a [Polynomial], + fixed_values: &'a [Polynomial], + instance_values: &'a [Polynomial], + challenges: &'a [C::Scalar], + ) -> Compressed + where + C: CurveAffine, + C::Curve: Mul + MulAssign, + { + // Closure to get values of expressions and compress them + let compress_expressions = |expressions: &[Expression]| { + let compressed_expression = expressions + .iter() + .map(|expression| { + pk.vk.domain.lagrange_from_vec(evaluate( + expression, + params.n() as usize, + 1, + fixed_values, + advice_values, + instance_values, + challenges, + )) + }) + .fold(domain.empty_lagrange(), |acc, expression| { + acc * *theta + &expression + }); + compressed_expression + }; + + // Get values of input expressions involved in the shuffle and compress them + let input_expression = compress_expressions(&self.input_expressions); + + // Get values of table expressions involved in the shuffle and compress them + let shuffle_expression = compress_expressions(&self.shuffle_expressions); + + Compressed { + input_expression, + shuffle_expression, + } + } + + /// Given a Shuffle with input expressions and table expressions this method + /// constructs the grand product polynomial over the shuffle. + /// The grand product polynomial is used to populate the Product struct. + /// The Product struct is added to the Shuffle and finally returned by the method. + #[allow(clippy::too_many_arguments)] + pub(in crate::plonk) fn commit_product< + 'a, + 'params: 'a, + C, + P: Params<'params, C>, + E: EncodedChallenge, + R: RngCore, + T: TranscriptWrite, + >( + &self, + pk: &ProvingKey, + params: &P, + domain: &EvaluationDomain, + theta: ChallengeTheta, + gamma: ChallengeGamma, + advice_values: &'a [Polynomial], + fixed_values: &'a [Polynomial], + instance_values: &'a [Polynomial], + challenges: &'a [C::Scalar], + mut rng: R, + transcript: &mut T, + ) -> Result, Error> + where + C: CurveAffine, + C::Curve: Mul + MulAssign, + { + let compressed = self.compress( + pk, + params, + domain, + theta, + advice_values, + fixed_values, + instance_values, + challenges, + ); + + let blinding_factors = pk.vk.cs.blinding_factors(); + + let mut shuffle_product = vec![C::Scalar::ZERO; params.n() as usize]; + parallelize(&mut shuffle_product, |shuffle_product, start| { + for (shuffle_product, shuffle_value) in shuffle_product + .iter_mut() + .zip(compressed.shuffle_expression[start..].iter()) + { + *shuffle_product = *gamma + shuffle_value; + } + }); + + shuffle_product.iter_mut().batch_invert(); + + parallelize(&mut shuffle_product, |product, start| { + for (i, product) in product.iter_mut().enumerate() { + let i = i + start; + *product *= &(*gamma + compressed.input_expression[i]); + } + }); + + // Compute the evaluations of the shuffle product polynomial + // over our domain, starting with z[0] = 1 + let z = iter::once(C::Scalar::ONE) + .chain(shuffle_product) + .scan(C::Scalar::ONE, |state, cur| { + *state *= &cur; + Some(*state) + }) + // Take all rows including the "last" row which should + // be a boolean (and ideally 1, else soundness is broken) + .take(params.n() as usize - blinding_factors) + // Chain random blinding factors. + .chain((0..blinding_factors).map(|_| C::Scalar::random(&mut rng))) + .collect::>(); + assert_eq!(z.len(), params.n() as usize); + let z = pk.vk.domain.lagrange_from_vec(z); + + #[cfg(feature = "sanity-checks")] + { + // While in Lagrange basis, check that product is correctly constructed + let u = (params.n() as usize) - (blinding_factors + 1); + assert_eq!(z[0], C::Scalar::ONE); + for i in 0..u { + let mut left = z[i + 1]; + let input_value = &compressed.input_expression[i]; + let shuffle_value = &compressed.shuffle_expression[i]; + left *= &(*gamma + shuffle_value); + let mut right = z[i]; + right *= &(*gamma + input_value); + assert_eq!(left, right); + } + assert_eq!(z[u], C::Scalar::ONE); + } + + let product_blind = Blind(C::Scalar::random(rng)); + let product_commitment = params.commit_lagrange(&z, product_blind).to_affine(); + let z = pk.vk.domain.lagrange_to_coeff(z); + + // Hash product commitment + transcript.write_point(product_commitment)?; + + Ok(Committed:: { + product_poly: z, + product_blind, + }) + } +} + +impl Committed { + pub(in crate::plonk) fn evaluate, T: TranscriptWrite>( + self, + pk: &ProvingKey, + x: ChallengeX, + transcript: &mut T, + ) -> Result, Error> { + let domain = &pk.vk.domain; + let x_next = domain.rotate_omega(*x, Rotation::next()); + + let product_eval = eval_polynomial(&self.product_poly, *x); + let product_next_eval = eval_polynomial(&self.product_poly, x_next); + + // Hash each advice evaluation + for eval in iter::empty() + .chain(Some(product_eval)) + .chain(Some(product_next_eval)) + { + transcript.write_scalar(eval)?; + } + + Ok(Evaluated { constructed: self }) + } +} + +impl Evaluated { + pub(in crate::plonk) fn open<'a>( + &'a self, + pk: &'a ProvingKey, + x: ChallengeX, + ) -> impl Iterator> + Clone { + let x_next = pk.vk.domain.rotate_omega(*x, Rotation::next()); + + iter::empty() + // Open shuffle product commitments at x + .chain(Some(ProverQuery { + point: *x, + poly: &self.constructed.product_poly, + blind: self.constructed.product_blind, + })) + // Open shuffle product commitments at x_next + .chain(Some(ProverQuery { + point: x_next, + poly: &self.constructed.product_poly, + blind: self.constructed.product_blind, + })) + } +} diff --git a/halo2_proofs/src/plonk/shuffle/verifier.rs b/halo2_proofs/src/plonk/shuffle/verifier.rs new file mode 100644 index 0000000000..379cc5c8a1 --- /dev/null +++ b/halo2_proofs/src/plonk/shuffle/verifier.rs @@ -0,0 +1,138 @@ +use std::iter; + +use super::super::{circuit::Expression, ChallengeGamma, ChallengeTheta, ChallengeX}; +use super::Argument; +use crate::{ + arithmetic::CurveAffine, + plonk::{Error, VerifyingKey}, + poly::{commitment::MSM, Rotation, VerifierQuery}, + transcript::{EncodedChallenge, TranscriptRead}, +}; +use ff::Field; + +pub struct Committed { + product_commitment: C, +} + +pub struct Evaluated { + committed: Committed, + product_eval: C::Scalar, + product_next_eval: C::Scalar, +} + +impl Argument { + pub(in crate::plonk) fn read_product_commitment< + C: CurveAffine, + E: EncodedChallenge, + T: TranscriptRead, + >( + &self, + transcript: &mut T, + ) -> Result, Error> { + let product_commitment = transcript.read_point()?; + + Ok(Committed { product_commitment }) + } +} + +impl Committed { + pub(crate) fn evaluate, T: TranscriptRead>( + self, + transcript: &mut T, + ) -> Result, Error> { + let product_eval = transcript.read_scalar()?; + let product_next_eval = transcript.read_scalar()?; + + Ok(Evaluated { + committed: self, + product_eval, + product_next_eval, + }) + } +} + +impl Evaluated { + #[allow(clippy::too_many_arguments)] + pub(in crate::plonk) fn expressions<'a>( + &'a self, + l_0: C::Scalar, + l_last: C::Scalar, + l_blind: C::Scalar, + argument: &'a Argument, + theta: ChallengeTheta, + gamma: ChallengeGamma, + advice_evals: &[C::Scalar], + fixed_evals: &[C::Scalar], + instance_evals: &[C::Scalar], + challenges: &[C::Scalar], + ) -> impl Iterator + 'a { + let active_rows = C::Scalar::ONE - (l_last + l_blind); + + let product_expression = || { + // z(\omega X) (s(X) + \gamma) - z(X) (a(X) + \gamma) + let compress_expressions = |expressions: &[Expression]| { + expressions + .iter() + .map(|expression| { + expression.evaluate( + &|scalar| scalar, + &|_| panic!("virtual selectors are removed during optimization"), + &|query| fixed_evals[query.index.unwrap()], + &|query| advice_evals[query.index.unwrap()], + &|query| instance_evals[query.index.unwrap()], + &|challenge| challenges[challenge.index()], + &|a| -a, + &|a, b| a + &b, + &|a, b| a * &b, + &|a, scalar| a * &scalar, + ) + }) + .fold(C::Scalar::ZERO, |acc, eval| acc * &*theta + &eval) + }; + // z(\omega X) (s(X) + \gamma) + let left = self.product_next_eval + * &(compress_expressions(&argument.shuffle_expressions) + &*gamma); + // z(X) (a(X) + \gamma) + let right = + self.product_eval * &(compress_expressions(&argument.input_expressions) + &*gamma); + + (left - &right) * &active_rows + }; + + std::iter::empty() + .chain( + // l_0(X) * (1 - z'(X)) = 0 + Some(l_0 * &(C::Scalar::ONE - &self.product_eval)), + ) + .chain( + // l_last(X) * (z(X)^2 - z(X)) = 0 + Some(l_last * &(self.product_eval.square() - &self.product_eval)), + ) + .chain( + // (1 - (l_last(X) + l_blind(X))) * ( z(\omega X) (s(X) + \gamma) - z(X) (a(X) + \gamma)) + Some(product_expression()), + ) + } + + pub(in crate::plonk) fn queries<'r, M: MSM + 'r>( + &'r self, + vk: &'r VerifyingKey, + x: ChallengeX, + ) -> impl Iterator> + Clone { + let x_next = vk.domain.rotate_omega(*x, Rotation::next()); + + iter::empty() + // Open shuffle product commitment at x + .chain(Some(VerifierQuery::new_commitment( + &self.committed.product_commitment, + *x, + self.product_eval, + ))) + // Open shuffle product commitment at \omega x + .chain(Some(VerifierQuery::new_commitment( + &self.committed.product_commitment, + x_next, + self.product_next_eval, + ))) + } +} diff --git a/halo2_proofs/src/plonk/vanishing/prover.rs b/halo2_proofs/src/plonk/vanishing/prover.rs index d394b8c0de..1b055682b0 100644 --- a/halo2_proofs/src/plonk/vanishing/prover.rs +++ b/halo2_proofs/src/plonk/vanishing/prover.rs @@ -7,9 +7,8 @@ use rand_core::RngCore; use super::Argument; use crate::{ arithmetic::{eval_polynomial, CurveAffine}, - plonk::{ChallengeX, ChallengeY, Error}, + plonk::{ChallengeX, Error}, poly::{ - self, commitment::{Blind, ParamsProver}, Coeff, EvaluationDomain, ExtendedLagrangeCoeff, Polynomial, ProverQuery, }, diff --git a/halo2_proofs/src/plonk/verifier.rs b/halo2_proofs/src/plonk/verifier.rs index 315a3b6ca1..a50e49c53b 100644 --- a/halo2_proofs/src/plonk/verifier.rs +++ b/halo2_proofs/src/plonk/verifier.rs @@ -1,28 +1,25 @@ use ff::{Field, FromUniformBytes, WithSmallOrderMulGroup}; use group::Curve; -use rand_core::RngCore; use std::iter; use super::{ vanishing, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, ChallengeY, Error, VerifyingKey, }; -use crate::arithmetic::{compute_inner_product, CurveAffine}; +use crate::arithmetic::compute_inner_product; use crate::poly::commitment::{CommitmentScheme, Verifier}; use crate::poly::VerificationStrategy; use crate::poly::{ - commitment::{Blind, Params, MSM}, - Guard, VerifierQuery, + commitment::{Blind, Params}, + VerifierQuery, }; -use crate::transcript::{read_n_points, read_n_scalars, EncodedChallenge, TranscriptRead}; +use crate::transcript::{read_n_scalars, EncodedChallenge, TranscriptRead}; #[cfg(feature = "batch")] mod batch; #[cfg(feature = "batch")] pub use batch::BatchVerifier; -use crate::poly::commitment::ParamsVerifier; - /// Returns a boolean indicating whether or not the proof is valid pub fn verify_proof< 'params, @@ -125,9 +122,9 @@ where // Sample theta challenge for keeping lookup columns linearly independent let theta: ChallengeTheta<_> = transcript.squeeze_challenge_scalar(); - let lookups_permuted = (0..num_proofs) + let lookups_prepared = (0..num_proofs) .map(|_| -> Result, _> { - // Hash each lookup permuted commitment + // Hash each lookup prepared commitment vk.cs .lookups .iter() @@ -149,7 +146,7 @@ where }) .collect::, _>>()?; - let lookups_committed = lookups_permuted + let lookups_committed = lookups_prepared .into_iter() .map(|lookups| { // Hash each lookup sum commitment @@ -160,6 +157,17 @@ where }) .collect::, _>>()?; + let shuffles_committed = (0..num_proofs) + .map(|_| -> Result, _> { + // Hash each shuffle product commitment + vk.cs + .shuffles + .iter() + .map(|argument| argument.read_product_commitment(transcript)) + .collect::, _>>() + }) + .collect::, _>>()?; + let vanishing = vanishing::Argument::read_commitments_before_y(transcript)?; // Sample y challenge, which keeps the gates linearly independent. @@ -177,7 +185,7 @@ where }) .collect::, _>>()? } else { - let xn = x.pow(&[params.n() as u64, 0, 0, 0]); + let xn = x.pow([params.n()]); let (min_rotation, max_rotation) = vk.cs .instance_queries @@ -242,11 +250,21 @@ where }) .collect::, _>>()?; + let shuffles_evaluated = shuffles_committed + .into_iter() + .map(|shuffles| -> Result, _> { + shuffles + .into_iter() + .map(|shuffle| shuffle.evaluate(transcript)) + .collect::, _>>() + }) + .collect::, _>>()?; + // This check ensures the circuit is satisfied so long as the polynomial // commitments open to the correct values. let vanishing = { // x^n - let xn = x.pow(&[params.n() as u64, 0, 0, 0]); + let xn = x.pow([params.n()]); let blinding_factors = vk.cs.blinding_factors(); let l_evals = vk @@ -265,58 +283,79 @@ where .zip(instance_evals.iter()) .zip(permutations_evaluated.iter()) .zip(lookups_evaluated.iter()) - .flat_map(|(((advice_evals, instance_evals), permutation), lookups)| { - let challenges = &challenges; - let fixed_evals = &fixed_evals; - std::iter::empty() - // Evaluate the circuit using the custom gates provided - .chain(vk.cs.gates.iter().flat_map(move |gate| { - gate.polynomials().iter().map(move |poly| { - poly.evaluate( - &|scalar| scalar, - &|_| panic!("virtual selectors are removed during optimization"), - &|query| fixed_evals[query.index], - &|query| advice_evals[query.index], - &|query| instance_evals[query.index], - &|challenge| challenges[challenge.index()], - &|a| -a, - &|a, b| a + &b, - &|a, b| a * &b, - &|a, scalar| a * &scalar, - ) - }) - })) - .chain(permutation.expressions( - vk, - &vk.cs.permutation, - &permutations_common, - advice_evals, - fixed_evals, - instance_evals, - l_0, - l_last, - l_blind, - beta, - gamma, - x, - )) - .chain(lookups.iter().zip(vk.cs.lookups.iter()).flat_map( - move |(p, argument)| { - p.expressions( - l_0, - l_last, - l_blind, - argument, - theta, - beta, - advice_evals, - fixed_evals, - instance_evals, - challenges, - ) - }, - )) - }); + .zip(shuffles_evaluated.iter()) + .flat_map( + |((((advice_evals, instance_evals), permutation), lookups), shuffles)| { + let challenges = &challenges; + let fixed_evals = &fixed_evals; + std::iter::empty() + // Evaluate the circuit using the custom gates provided + .chain(vk.cs.gates.iter().flat_map(move |gate| { + gate.polynomials().iter().map(move |poly| { + poly.evaluate( + &|scalar| scalar, + &|_| { + panic!("virtual selectors are removed during optimization") + }, + &|query| fixed_evals[query.index.unwrap()], + &|query| advice_evals[query.index.unwrap()], + &|query| instance_evals[query.index.unwrap()], + &|challenge| challenges[challenge.index()], + &|a| -a, + &|a, b| a + &b, + &|a, b| a * &b, + &|a, scalar| a * &scalar, + ) + }) + })) + .chain(permutation.expressions( + vk, + &vk.cs.permutation, + &permutations_common, + advice_evals, + fixed_evals, + instance_evals, + l_0, + l_last, + l_blind, + beta, + gamma, + x, + )) + .chain(lookups.iter().zip(vk.cs.lookups.iter()).flat_map( + move |(p, argument)| { + p.expressions( + l_0, + l_last, + l_blind, + argument, + theta, + beta, + advice_evals, + fixed_evals, + instance_evals, + challenges, + ) + }, + )) + .chain(shuffles.iter().zip(vk.cs.shuffles.iter()).flat_map( + move |(p, argument)| { + p.expressions( + l_0, + l_last, + l_blind, + argument, + theta, + gamma, + advice_evals, + fixed_evals, + instance_evals, + challenges, + ) + }, + )) + }, + ); vanishing.verify(params, expressions, y, xn) }; @@ -328,13 +367,20 @@ where .zip(advice_evals.iter()) .zip(permutations_evaluated.iter()) .zip(lookups_evaluated.iter()) + .zip(shuffles_evaluated.iter()) .flat_map( |( ( - (((instance_commitments, instance_evals), advice_commitments), advice_evals), - permutation, + ( + ( + ((instance_commitments, instance_evals), advice_commitments), + advice_evals, + ), + permutation, + ), + lookups, ), - lookups, + shuffles, )| { iter::empty() .chain( @@ -362,6 +408,7 @@ where )) .chain(permutation.queries(vk, x)) .chain(lookups.iter().flat_map(move |p| p.queries(vk, x))) + .chain(shuffles.iter().flat_map(move |p| p.queries(vk, x))) }, ) .chain( diff --git a/halo2_proofs/src/plonk/verifier/batch.rs b/halo2_proofs/src/plonk/verifier/batch.rs index 04e08be4af..173d552e5f 100644 --- a/halo2_proofs/src/plonk/verifier/batch.rs +++ b/halo2_proofs/src/plonk/verifier/batch.rs @@ -1,14 +1,11 @@ -use std::{io, marker::PhantomData}; - use ff::FromUniformBytes; use group::ff::Field; use halo2curves::CurveAffine; -use rand_core::{OsRng, RngCore}; -use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; +use rand_core::OsRng; use super::{verify_proof, VerificationStrategy}; use crate::{ - multicore, + multicore::{IntoParallelIterator, TryFoldAndReduce}, plonk::{Error, VerifyingKey}, poly::{ commitment::{Params, MSM}, @@ -22,6 +19,9 @@ use crate::{ transcript::{Blake2bRead, TranscriptReadBuffer}, }; +#[cfg(feature = "multicore")] +use crate::multicore::{IndexedParallelIterator, ParallelIterator}; + /// A proof verification strategy that returns the proof's MSM. /// /// `BatchVerifier` handles the accumulation of the MSMs for the batched proofs. @@ -123,11 +123,10 @@ where e }) }) - .try_fold( + .try_fold_and_reduce( || params.empty_msm(), - |msm, res| res.map(|proof_msm| accumulate_msm(msm, proof_msm)), - ) - .try_reduce(|| params.empty_msm(), |a, b| Ok(accumulate_msm(a, b))); + |acc, res| res.map(|proof_msm| accumulate_msm(acc, proof_msm)), + ); match final_msm { Ok(msm) => msm.check(), diff --git a/halo2_proofs/src/poly.rs b/halo2_proofs/src/poly.rs index 31f6d1f71d..cbe6fd8f1c 100644 --- a/halo2_proofs/src/poly.rs +++ b/halo2_proofs/src/poly.rs @@ -7,7 +7,6 @@ use crate::helpers::SerdePrimeField; use crate::plonk::Assigned; use crate::SerdeFormat; -use ff::PrimeField; use group::ff::{BatchInvert, Field}; use std::fmt::Debug; use std::io; @@ -200,7 +199,7 @@ pub(crate) fn batch_invert_assigned( assigned .iter() - .zip(assigned_denominators.into_iter()) + .zip(assigned_denominators) .map(|(poly, inv_denoms)| poly.invert(inv_denoms.into_iter().map(|d| d.unwrap_or(F::ONE)))) .collect() } @@ -215,7 +214,7 @@ impl Polynomial, LagrangeCoeff> { values: self .values .iter() - .zip(inv_denoms.into_iter()) + .zip(inv_denoms) .map(|(a, inv_den)| a.numerator() * inv_den) .collect(), _marker: self._marker, diff --git a/halo2_proofs/src/poly/commitment.rs b/halo2_proofs/src/poly/commitment.rs index 3a0b68f62a..590767e68e 100644 --- a/halo2_proofs/src/poly/commitment.rs +++ b/halo2_proofs/src/poly/commitment.rs @@ -6,12 +6,11 @@ use super::{ use crate::poly::Error; use crate::transcript::{EncodedChallenge, TranscriptRead, TranscriptWrite}; use ff::Field; -use group::{Curve, Group}; -use halo2curves::{CurveAffine, CurveExt}; +use halo2curves::CurveAffine; use rand_core::RngCore; use std::{ fmt::Debug, - io::{self, Read, Write}, + io::{self}, ops::{Add, AddAssign, Mul, MulAssign}, }; diff --git a/halo2_proofs/src/poly/domain.rs b/halo2_proofs/src/poly/domain.rs index e99c7f6c59..0f3f01c014 100644 --- a/halo2_proofs/src/poly/domain.rs +++ b/halo2_proofs/src/poly/domain.rs @@ -8,10 +8,7 @@ use crate::{ use super::{Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, Rotation}; use ff::WithSmallOrderMulGroup; -use group::{ - ff::{BatchInvert, Field, PrimeField}, - Group, -}; +use group::ff::{BatchInvert, Field}; use std::marker::PhantomData; @@ -54,6 +51,9 @@ impl> EvaluationDomain { extended_k += 1; } + // ensure extended_k <= S + assert!(extended_k <= F::S); + let mut extended_omega = F::ROOT_OF_UNITY; // Get extended_omega, the 2^{extended_k}'th root of unity @@ -88,8 +88,8 @@ impl> EvaluationDomain { { // Compute the evaluations of t(X) = X^n - 1 in the coset evaluation domain. // We don't have to compute all of them, because it will repeat. - let orig = F::ZETA.pow_vartime(&[n as u64, 0, 0, 0]); - let step = extended_omega.pow_vartime(&[n as u64, 0, 0, 0]); + let orig = F::ZETA.pow_vartime([n, 0, 0, 0]); + let step = extended_omega.pow_vartime([n, 0, 0, 0]); let mut cur = orig; loop { t_evaluations.push(cur); @@ -175,7 +175,7 @@ impl> EvaluationDomain { &self, values: Vec>, ) -> Polynomial { - assert_eq!(values.len(), (self.extended_len() >> self.k) as usize); + assert_eq!(values.len(), (self.extended_len() >> self.k)); assert_eq!(values[0].len(), self.n as usize); // transpose the values in parallel @@ -456,10 +456,10 @@ impl> EvaluationDomain { /// fn distribute_powers(&self, a: &mut [F], c: F) { parallelize(a, |a, index| { - let mut c_power = c.pow_vartime(&[index as u64, 0, 0, 0]); + let mut c_power = c.pow_vartime([index as u64, 0, 0, 0]); for a in a { - *a = *a * c_power; - c_power = c_power * c; + *a *= &c_power; + c_power *= c; } }); } @@ -651,17 +651,17 @@ fn test_l_i() { let mut l = vec![]; let mut points = vec![]; for i in 0..8 { - points.push(domain.omega.pow(&[i, 0, 0, 0])); + points.push(domain.omega.pow([i])); } for i in 0..8 { - let mut l_i = vec![Scalar::zero(); 8]; + let mut l_i = vec![Scalar::ZERO; 8]; l_i[i] = Scalar::ONE; let l_i = lagrange_interpolate(&points[..], &l_i[..]); l.push(l_i); } let x = Scalar::random(OsRng); - let xn = x.pow(&[8, 0, 0, 0]); + let xn = x.pow([8]); let evaluations = domain.l_i_range(x, xn, -7..=7); for i in 0..8 { diff --git a/halo2_proofs/src/poly/ipa/commitment.rs b/halo2_proofs/src/poly/ipa/commitment.rs index f9b4ad059b..9e30355121 100644 --- a/halo2_proofs/src/poly/ipa/commitment.rs +++ b/halo2_proofs/src/poly/ipa/commitment.rs @@ -3,18 +3,14 @@ //! //! [halo]: https://eprint.iacr.org/2019/1021 -use crate::arithmetic::{ - best_fft, best_multiexp, g_to_lagrange, parallelize, CurveAffine, CurveExt, -}; +use crate::arithmetic::{best_multiexp, g_to_lagrange, parallelize, CurveAffine, CurveExt}; use crate::helpers::CurveRead; -use crate::poly::commitment::{Blind, CommitmentScheme, Params, ParamsProver, ParamsVerifier, MSM}; +use crate::poly::commitment::{Blind, CommitmentScheme, Params, ParamsProver, ParamsVerifier}; use crate::poly::ipa::msm::MSMIPA; use crate::poly::{Coeff, LagrangeCoeff, Polynomial}; -use ff::{Field, PrimeField}; -use group::{prime::PrimeCurveAffine, Curve, Group}; +use group::{Curve, Group}; use std::marker::PhantomData; -use std::ops::{Add, AddAssign, Mul, MulAssign}; mod prover; mod verifier; @@ -233,21 +229,13 @@ impl<'params, C: CurveAffine> ParamsProver<'params, C> for ParamsIPA { #[cfg(test)] mod test { - - use crate::arithmetic::{best_fft, best_multiexp, parallelize, CurveAffine, CurveExt}; - use crate::helpers::CurveRead; use crate::poly::commitment::ParamsProver; - use crate::poly::commitment::{Blind, CommitmentScheme, Params, MSM}; + use crate::poly::commitment::{Blind, Params, MSM}; use crate::poly::ipa::commitment::{create_proof, verify_proof, ParamsIPA}; use crate::poly::ipa::msm::MSMIPA; - use crate::poly::{Coeff, LagrangeCoeff, Polynomial}; - - use ff::{Field, PrimeField}; - use group::{prime::PrimeCurveAffine, Curve, Group}; - use std::marker::PhantomData; - use std::ops::{Add, AddAssign, Mul, MulAssign}; + use group::Curve; - use std::io; + use ff::Field; #[test] fn test_commit_lagrange_epaffine() { diff --git a/halo2_proofs/src/poly/ipa/commitment/prover.rs b/halo2_proofs/src/poly/ipa/commitment/prover.rs index d176987c96..344dbc0e65 100644 --- a/halo2_proofs/src/poly/ipa/commitment/prover.rs +++ b/halo2_proofs/src/poly/ipa/commitment/prover.rs @@ -1,7 +1,7 @@ use ff::Field; use rand_core::RngCore; -use super::{Params, ParamsIPA}; +use super::ParamsIPA; use crate::arithmetic::{ best_multiexp, compute_inner_product, eval_polynomial, parallelize, CurveAffine, }; @@ -11,7 +11,7 @@ use crate::poly::{commitment::Blind, Coeff, Polynomial}; use crate::transcript::{EncodedChallenge, TranscriptWrite}; use group::Curve; -use std::io::{self, Write}; +use std::io::{self}; /// Create a polynomial commitment opening proof for the polynomial defined /// by the coefficients `px`, the blinding factor `blind` used for the @@ -51,7 +51,7 @@ pub fn create_proof< // Evaluate the random polynomial at x_3 let s_at_x3 = eval_polynomial(&s_poly[..], x_3); // Subtract constant coefficient to get a random polynomial with a root at x_3 - s_poly[0] = s_poly[0] - &s_at_x3; + s_poly[0] -= &s_at_x3; // And sample a random blind let s_poly_blind = Blind(C::Scalar::random(&mut rng)); @@ -72,7 +72,7 @@ pub fn create_proof< // zero. let mut p_prime_poly = s_poly * xi + p_poly; let v = eval_polynomial(&p_prime_poly, x_3); - p_prime_poly[0] = p_prime_poly[0] - &v; + p_prime_poly[0] -= &v; let p_prime_blind = s_poly_blind * Blind(xi) + p_blind; // This accumulates the synthetic blinding factor `f` starting diff --git a/halo2_proofs/src/poly/ipa/commitment/verifier.rs b/halo2_proofs/src/poly/ipa/commitment/verifier.rs index 0b60842899..cf258625d5 100644 --- a/halo2_proofs/src/poly/ipa/commitment/verifier.rs +++ b/halo2_proofs/src/poly/ipa/commitment/verifier.rs @@ -1,18 +1,9 @@ -use std::io::Read; - -use group::{ - ff::{BatchInvert, Field}, - Curve, -}; +use group::ff::{BatchInvert, Field}; use super::ParamsIPA; -use crate::poly::ipa::commitment::{IPACommitmentScheme, ParamsVerifierIPA}; -use crate::{ - arithmetic::{best_multiexp, CurveAffine}, - poly::ipa::strategy::GuardIPA, -}; +use crate::{arithmetic::CurveAffine, poly::ipa::strategy::GuardIPA}; use crate::{ - poly::{commitment::MSM, ipa::msm::MSMIPA, strategy::Guard, Error}, + poly::{commitment::MSM, ipa::msm::MSMIPA, Error}, transcript::{EncodedChallenge, TranscriptRead}, }; @@ -75,6 +66,9 @@ pub fn verify_proof<'params, C: CurveAffine, E: EncodedChallenge, T: Transcri // P' + \sum([u_j^{-1}] L_j) + \sum([u_j] R_j) // + [-c] G'_0 + [-cbz] U + [-f] W // = 0 + // + // Note that the guard returned from this function does not include + // the [-c]G'_0 term. let c = transcript.read_scalar().map_err(|_| Error::SamplingError)?; let neg_c = -c; diff --git a/halo2_proofs/src/poly/ipa/msm.rs b/halo2_proofs/src/poly/ipa/msm.rs index 3316e25337..a615ddce49 100644 --- a/halo2_proofs/src/poly/ipa/msm.rs +++ b/halo2_proofs/src/poly/ipa/msm.rs @@ -1,9 +1,5 @@ -use super::commitment::{IPACommitmentScheme, ParamsIPA}; -use crate::arithmetic::{best_multiexp, parallelize, CurveAffine}; -use crate::poly::{ - commitment::{CommitmentScheme, Params, MSM}, - ipa::commitment::ParamsVerifierIPA, -}; +use crate::arithmetic::{best_multiexp, CurveAffine}; +use crate::poly::{commitment::MSM, ipa::commitment::ParamsVerifierIPA}; use ff::Field; use group::Group; use std::collections::BTreeMap; @@ -222,13 +218,10 @@ impl<'a, C: CurveAffine> MSMIPA<'a, C> { #[cfg(test)] mod tests { - use super::ParamsIPA; - use crate::poly::commitment::ParamsProver; use crate::poly::{ - commitment::{Params, MSM}, - ipa::msm::MSMIPA, + commitment::{ParamsProver, MSM}, + ipa::{commitment::ParamsIPA, msm::MSMIPA}, }; - use group::Curve; use halo2curves::{ pasta::{Ep, EpAffine, Fp, Fq}, CurveAffine, diff --git a/halo2_proofs/src/poly/ipa/multiopen.rs b/halo2_proofs/src/poly/ipa/multiopen.rs index fd6aa78544..b78acb5934 100644 --- a/halo2_proofs/src/poly/ipa/multiopen.rs +++ b/halo2_proofs/src/poly/ipa/multiopen.rs @@ -4,7 +4,7 @@ //! [halo]: https://eprint.iacr.org/2019/1021 use super::*; -use crate::{arithmetic::CurveAffine, poly::query::Query, transcript::ChallengeScalar}; +use crate::{poly::query::Query, transcript::ChallengeScalar}; use ff::Field; use std::collections::{BTreeMap, BTreeSet}; diff --git a/halo2_proofs/src/poly/ipa/multiopen/prover.rs b/halo2_proofs/src/poly/ipa/multiopen/prover.rs index 6d65e7ae64..2ae745d457 100644 --- a/halo2_proofs/src/poly/ipa/multiopen/prover.rs +++ b/halo2_proofs/src/poly/ipa/multiopen/prover.rs @@ -1,9 +1,7 @@ -use super::{ - construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4, Query, -}; +use super::{construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4}; use crate::arithmetic::{eval_polynomial, kate_division, CurveAffine}; use crate::poly::commitment::ParamsProver; -use crate::poly::commitment::{Blind, Params, Prover}; +use crate::poly::commitment::{Blind, Prover}; use crate::poly::ipa::commitment::{self, IPACommitmentScheme, ParamsIPA}; use crate::poly::query::ProverQuery; use crate::poly::{Coeff, Polynomial}; @@ -109,7 +107,7 @@ impl<'params, C: CurveAffine> Prover<'params, IPACommitmentScheme> for Prover let x_4: ChallengeX4<_> = transcript.squeeze_challenge_scalar(); - let (p_poly, p_poly_blind) = q_polys.into_iter().zip(q_blinds.into_iter()).fold( + let (p_poly, p_poly_blind) = q_polys.into_iter().zip(q_blinds).fold( (q_prime_poly, q_prime_blind), |(q_prime_poly, q_prime_blind), (poly, blind)| { ( diff --git a/halo2_proofs/src/poly/ipa/multiopen/verifier.rs b/halo2_proofs/src/poly/ipa/multiopen/verifier.rs index 391f89e15b..d559e33384 100644 --- a/halo2_proofs/src/poly/ipa/multiopen/verifier.rs +++ b/halo2_proofs/src/poly/ipa/multiopen/verifier.rs @@ -1,20 +1,14 @@ use std::fmt::Debug; -use std::io::Read; -use std::marker::PhantomData; use ff::Field; -use rand_core::RngCore; -use super::{ - construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4, Query, -}; +use super::{construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4}; use crate::arithmetic::{eval_polynomial, lagrange_interpolate, CurveAffine}; use crate::poly::commitment::{Params, Verifier, MSM}; use crate::poly::ipa::commitment::{IPACommitmentScheme, ParamsIPA, ParamsVerifierIPA}; use crate::poly::ipa::msm::MSMIPA; use crate::poly::ipa::strategy::GuardIPA; use crate::poly::query::{CommitmentReference, VerifierQuery}; -use crate::poly::strategy::VerificationStrategy; use crate::poly::Error; use crate::transcript::{EncodedChallenge, TranscriptRead}; @@ -57,7 +51,9 @@ impl<'params, C: CurveAffine> Verifier<'params, IPACommitmentScheme> // Compress the commitments and expected evaluations at x together. // using the challenge x_1 - let mut q_commitments: Vec<_> = vec![self.params.empty_msm(); point_sets.len()]; + let mut q_commitments: Vec<_> = vec![ + (self.params.empty_msm(), C::Scalar::ONE); // (accumulator, next x_1 power). + point_sets.len()]; // A vec of vecs of evals. The outer vec corresponds to the point set, // while the inner vec corresponds to the points in a particular set. @@ -65,28 +61,32 @@ impl<'params, C: CurveAffine> Verifier<'params, IPACommitmentScheme> for point_set in point_sets.iter() { q_eval_sets.push(vec![C::Scalar::ZERO; point_set.len()]); } + { let mut accumulate = |set_idx: usize, new_commitment: CommitmentReference>, evals: Vec| { - q_commitments[set_idx].scale(*x_1); + let (q_commitment, x_1_power) = &mut q_commitments[set_idx]; match new_commitment { CommitmentReference::Commitment(c) => { - q_commitments[set_idx].append_term(C::Scalar::ONE, (*c).into()); + q_commitment.append_term(*x_1_power, (*c).into()); } CommitmentReference::MSM(msm) => { - q_commitments[set_idx].add_msm(msm); + let mut msm = msm.clone(); + msm.scale(*x_1_power); + q_commitment.add_msm(&msm); } } for (eval, set_eval) in evals.iter().zip(q_eval_sets[set_idx].iter_mut()) { - *set_eval *= &(*x_1); - *set_eval += eval; + *set_eval += (*eval) * (*x_1_power); } + *x_1_power *= *x_1; }; // Each commitment corresponds to evaluations at a set of points. // For each set, we collapse each commitment's evals pointwise. - for commitment_data in commitment_map.into_iter() { + // Run in order of increasing x_1 powers. + for commitment_data in commitment_map.into_iter().rev() { accumulate( commitment_data.set_index, // set_idx, commitment_data.commitment, // commitment, @@ -135,7 +135,7 @@ impl<'params, C: CurveAffine> Verifier<'params, IPACommitmentScheme> msm.append_term(C::Scalar::ONE, q_prime_commitment.into()); let (msm, v) = q_commitments.into_iter().zip(u.iter()).fold( (msm, msm_eval), - |(mut msm, msm_eval), (q_commitment, q_eval)| { + |(mut msm, msm_eval), ((q_commitment, _), q_eval)| { msm.scale(*x_4); msm.add_msm(&q_commitment); (msm, msm_eval * &(*x_4) + q_eval) diff --git a/halo2_proofs/src/poly/ipa/strategy.rs b/halo2_proofs/src/poly/ipa/strategy.rs index c8d385f90c..d2d1b3d364 100644 --- a/halo2_proofs/src/poly/ipa/strategy.rs +++ b/halo2_proofs/src/poly/ipa/strategy.rs @@ -1,10 +1,6 @@ -use std::marker::PhantomData; - -use super::commitment::{IPACommitmentScheme, ParamsIPA, ParamsVerifierIPA}; +use super::commitment::{IPACommitmentScheme, ParamsIPA}; use super::msm::MSMIPA; use super::multiopen::VerifierIPA; -use crate::poly::commitment::CommitmentScheme; -use crate::transcript::TranscriptRead; use crate::{ arithmetic::best_multiexp, plonk::Error, @@ -12,12 +8,11 @@ use crate::{ commitment::MSM, strategy::{Guard, VerificationStrategy}, }, - transcript::EncodedChallenge, }; use ff::Field; use group::Curve; use halo2curves::CurveAffine; -use rand_core::{OsRng, RngCore}; +use rand_core::OsRng; /// Wrapper for verification accumulator #[derive(Debug, Clone)] diff --git a/halo2_proofs/src/poly/kzg/commitment.rs b/halo2_proofs/src/poly/kzg/commitment.rs index f9fc508c6a..08a1a88869 100644 --- a/halo2_proofs/src/poly/kzg/commitment.rs +++ b/halo2_proofs/src/poly/kzg/commitment.rs @@ -1,8 +1,6 @@ -use crate::arithmetic::{ - best_fft, best_multiexp, g_to_lagrange, parallelize, CurveAffine, CurveExt, -}; +use crate::arithmetic::{best_multiexp, g_to_lagrange, parallelize}; use crate::helpers::SerdeCurveAffine; -use crate::poly::commitment::{Blind, CommitmentScheme, Params, ParamsProver, ParamsVerifier, MSM}; +use crate::poly::commitment::{Blind, CommitmentScheme, Params, ParamsProver, ParamsVerifier}; use crate::poly::{Coeff, LagrangeCoeff, Polynomial}; use crate::SerdeFormat; @@ -12,7 +10,6 @@ use halo2curves::pairing::Engine; use rand_core::{OsRng, RngCore}; use std::fmt::Debug; use std::marker::PhantomData; -use std::ops::{Add, AddAssign, Mul, MulAssign}; use std::io; @@ -109,7 +106,7 @@ where } let n_inv = Option::::from(E::Scalar::from(n).invert()) .expect("inversion should be ok for n = 1<, + g_lagrange: Option>, + g2: E::G2Affine, + s_g2: E::G2Affine, + ) -> Self { + Self { + k, + n: 1 << k, + g_lagrange: if let Some(g_l) = g_lagrange { + g_l + } else { + g_to_lagrange(g.iter().map(PrimeCurveAffine::to_curve).collect(), k) + }, + g, + g2, + s_g2, + } + } + /// Returns gernerator on G2 pub fn g2(&self) -> E::G2Affine { self.g2 @@ -359,21 +380,10 @@ where #[cfg(test)] mod test { - use crate::arithmetic::{best_fft, best_multiexp, parallelize, CurveAffine, CurveExt}; use crate::poly::commitment::ParamsProver; - use crate::poly::commitment::{Blind, CommitmentScheme, Params, MSM}; - use crate::poly::kzg::commitment::{ParamsKZG, ParamsVerifierKZG}; - use crate::poly::kzg::msm::MSMKZG; - use crate::poly::kzg::multiopen::ProverSHPLONK; - use crate::poly::{Coeff, LagrangeCoeff, Polynomial}; - - use ff::{Field, PrimeField}; - use group::{prime::PrimeCurveAffine, Curve, Group}; - use halo2curves::bn256::G1Affine; - use std::marker::PhantomData; - use std::ops::{Add, AddAssign, Mul, MulAssign}; - - use std::io; + use crate::poly::commitment::{Blind, Params}; + use crate::poly::kzg::commitment::ParamsKZG; + use ff::Field; #[test] fn test_commit_lagrange() { @@ -404,13 +414,8 @@ mod test { fn test_parameter_serialisation_roundtrip() { const K: u32 = 4; - use ff::Field; - use rand_core::OsRng; - - use super::super::commitment::{Blind, Params}; - use crate::arithmetic::eval_polynomial; - use crate::halo2curves::bn256::{Bn256, Fr}; - use crate::poly::EvaluationDomain; + use super::super::commitment::Params; + use crate::halo2curves::bn256::Bn256; let params0 = ParamsKZG::::new(K); let mut data = vec![]; diff --git a/halo2_proofs/src/poly/kzg/msm.rs b/halo2_proofs/src/poly/kzg/msm.rs index abd8309f56..2cc6d8a1e4 100644 --- a/halo2_proofs/src/poly/kzg/msm.rs +++ b/halo2_proofs/src/poly/kzg/msm.rs @@ -1,8 +1,8 @@ use std::fmt::Debug; -use super::commitment::{KZGCommitmentScheme, ParamsKZG}; +use super::commitment::ParamsKZG; use crate::{ - arithmetic::{best_multiexp, parallelize, CurveAffine}, + arithmetic::{best_multiexp, parallelize}, poly::commitment::MSM, }; use group::{Curve, Group}; @@ -92,8 +92,6 @@ impl PreMSM { } pub(crate) fn normalize(self) -> MSMKZG { - use group::prime::PrimeCurveAffine; - let (scalars, bases) = self .projectives_msms .into_iter() diff --git a/halo2_proofs/src/poly/kzg/multiopen/gwc.rs b/halo2_proofs/src/poly/kzg/multiopen/gwc.rs index 8e7d742fc0..3fd28dd00a 100644 --- a/halo2_proofs/src/poly/kzg/multiopen/gwc.rs +++ b/halo2_proofs/src/poly/kzg/multiopen/gwc.rs @@ -4,20 +4,9 @@ mod verifier; pub use prover::ProverGWC; pub use verifier::VerifierGWC; -use crate::{ - arithmetic::{eval_polynomial, CurveAffine}, - poly::{ - commitment::{Params, ParamsVerifier}, - query::Query, - Coeff, Polynomial, - }, - transcript::ChallengeScalar, -}; +use crate::{poly::query::Query, transcript::ChallengeScalar}; use ff::Field; -use std::{ - collections::{BTreeMap, BTreeSet}, - marker::PhantomData, -}; +use std::marker::PhantomData; #[derive(Clone, Copy, Debug)] struct U {} diff --git a/halo2_proofs/src/poly/kzg/multiopen/gwc/prover.rs b/halo2_proofs/src/poly/kzg/multiopen/gwc/prover.rs index d5d977fb2d..89b629bfa4 100644 --- a/halo2_proofs/src/poly/kzg/multiopen/gwc/prover.rs +++ b/halo2_proofs/src/poly/kzg/multiopen/gwc/prover.rs @@ -1,23 +1,19 @@ use super::{construct_intermediate_sets, ChallengeV, Query}; -use crate::arithmetic::{eval_polynomial, kate_division, powers, CurveAffine}; +use crate::arithmetic::{kate_division, powers}; use crate::helpers::SerdeCurveAffine; use crate::poly::commitment::ParamsProver; use crate::poly::commitment::Prover; use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG}; use crate::poly::query::ProverQuery; -use crate::poly::Rotation; -use crate::poly::{ - commitment::{Blind, Params}, - Coeff, Polynomial, -}; +use crate::poly::{commitment::Blind, Polynomial}; use crate::transcript::{EncodedChallenge, TranscriptWrite}; -use ff::{Field, PrimeField}; +use ff::PrimeField; use group::Curve; use halo2curves::pairing::Engine; use rand_core::RngCore; use std::fmt::Debug; -use std::io::{self, Write}; +use std::io; use std::marker::PhantomData; /// Concrete KZG prover with GWC variant diff --git a/halo2_proofs/src/poly/kzg/multiopen/gwc/verifier.rs b/halo2_proofs/src/poly/kzg/multiopen/gwc/verifier.rs index 1d0c9468b4..ee01bd05bc 100644 --- a/halo2_proofs/src/poly/kzg/multiopen/gwc/verifier.rs +++ b/halo2_proofs/src/poly/kzg/multiopen/gwc/verifier.rs @@ -1,28 +1,20 @@ use std::fmt::Debug; -use std::io::Read; -use std::marker::PhantomData; use super::{construct_intermediate_sets, ChallengeU, ChallengeV}; -use crate::arithmetic::{eval_polynomial, lagrange_interpolate, powers, CurveAffine}; +use crate::arithmetic::powers; use crate::helpers::SerdeCurveAffine; use crate::poly::commitment::Verifier; use crate::poly::commitment::MSM; use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG}; use crate::poly::kzg::msm::{DualMSM, MSMKZG}; -use crate::poly::kzg::strategy::{AccumulatorStrategy, GuardKZG, SingleStrategy}; +use crate::poly::kzg::strategy::GuardKZG; use crate::poly::query::Query; use crate::poly::query::{CommitmentReference, VerifierQuery}; -use crate::poly::strategy::VerificationStrategy; -use crate::poly::{ - commitment::{Params, ParamsVerifier}, - Error, -}; +use crate::poly::Error; use crate::transcript::{EncodedChallenge, TranscriptRead}; use ff::{Field, PrimeField}; -use group::Group; -use halo2curves::pairing::{Engine, MillerLoopResult, MultiMillerLoop}; -use rand_core::OsRng; +use halo2curves::pairing::{Engine, MultiMillerLoop}; #[derive(Debug)] /// Concrete KZG verifier with GWC variant @@ -77,7 +69,7 @@ where let mut witness_with_aux = MSMKZG::::new(); for ((commitment_at_a_point, wi), power_of_u) in - commitment_data.iter().zip(w.into_iter()).zip(powers(*u)) + commitment_data.iter().zip(w).zip(powers(*u)) { assert!(!commitment_at_a_point.queries.is_empty()); let z = commitment_at_a_point.point; diff --git a/halo2_proofs/src/poly/kzg/multiopen/shplonk.rs b/halo2_proofs/src/poly/kzg/multiopen/shplonk.rs index d14132fb43..2af5ab5e43 100644 --- a/halo2_proofs/src/poly/kzg/multiopen/shplonk.rs +++ b/halo2_proofs/src/poly/kzg/multiopen/shplonk.rs @@ -1,19 +1,13 @@ mod prover; mod verifier; -use crate::{ - arithmetic::{eval_polynomial, lagrange_interpolate, CurveAffine}, - poly::{query::Query, Coeff, Polynomial}, - transcript::ChallengeScalar, -}; +use crate::multicore::IntoParallelIterator; +#[cfg(feature = "multicore")] +use crate::multicore::ParallelIterator; +use crate::{poly::query::Query, transcript::ChallengeScalar}; use ff::Field; pub use prover::ProverSHPLONK; -use rayon::prelude::*; -use std::{ - collections::{btree_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, - marker::PhantomData, - sync::Arc, -}; +use std::collections::BTreeSet; pub use verifier::VerifierSHPLONK; #[derive(Clone, Copy, Debug)] @@ -126,7 +120,8 @@ where .into_par_iter() .map(|commitment| { let evals: Vec = rotations_vec - .par_iter() + .as_slice() + .into_par_iter() .map(|&&rotation| get_eval(commitment, rotation)) .collect(); Commitment((commitment, evals)) @@ -148,18 +143,10 @@ where #[cfg(test)] mod proptests { - use proptest::{ - collection::vec, - prelude::*, - sample::{select, subsequence}, - strategy::Strategy, - }; - use super::{construct_intermediate_sets, Commitment, IntermediateSets}; - use crate::poly::Rotation; - use ff::{Field, FromUniformBytes}; + use ff::FromUniformBytes; use halo2curves::pasta::Fp; - use std::collections::BTreeMap; + use proptest::{collection::vec, prelude::*, sample::select}; use std::convert::TryFrom; #[derive(Debug, Clone)] @@ -212,7 +199,7 @@ mod proptests { col_indices in vec(select((0..num_cols).collect::>()), num_queries), point_indices in vec(select((0..num_points).collect::>()), num_queries) ) -> Vec<(usize, usize)> { - col_indices.into_iter().zip(point_indices.into_iter()).collect() + col_indices.into_iter().zip(point_indices).collect() } } diff --git a/halo2_proofs/src/poly/kzg/multiopen/shplonk/prover.rs b/halo2_proofs/src/poly/kzg/multiopen/shplonk/prover.rs index bb50eafe58..ba1e2822ce 100644 --- a/halo2_proofs/src/poly/kzg/multiopen/shplonk/prover.rs +++ b/halo2_proofs/src/poly/kzg/multiopen/shplonk/prover.rs @@ -1,5 +1,5 @@ use super::{ - construct_intermediate_sets, ChallengeU, ChallengeV, ChallengeY, Commitment, Query, RotationSet, + construct_intermediate_sets, ChallengeU, ChallengeV, ChallengeY, Commitment, RotationSet, }; use crate::arithmetic::{ eval_polynomial, evaluate_vanishing_polynomial, kate_division, lagrange_interpolate, @@ -9,20 +9,22 @@ use crate::helpers::SerdeCurveAffine; use crate::poly::commitment::{Blind, ParamsProver, Prover}; use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG}; use crate::poly::query::{PolynomialPointer, ProverQuery}; -use crate::poly::Rotation; -use crate::poly::{commitment::Params, Coeff, Polynomial}; +use crate::poly::{Coeff, Polynomial}; use crate::transcript::{EncodedChallenge, TranscriptWrite}; +use crate::multicore::IntoParallelIterator; use ff::{Field, PrimeField}; use group::Curve; use halo2curves::pairing::Engine; use rand_core::RngCore; -use rayon::prelude::*; use std::fmt::Debug; -use std::io::{self, Write}; +use std::io; use std::marker::PhantomData; use std::ops::MulAssign; +#[cfg(feature = "multicore")] +use crate::multicore::ParallelIterator; + fn div_by_vanishing(poly: Polynomial, roots: &[F]) -> Vec { let poly = roots .iter() @@ -136,40 +138,41 @@ where // for different sets that are already combined with anoter challenge let y: ChallengeY<_> = transcript.squeeze_challenge_scalar(); - let quotient_contribution = - |rotation_set: &RotationSetExtension| -> Polynomial { - // [P_i_0(X) - R_i_0(X), P_i_1(X) - R_i_1(X), ... ] - let numerators = rotation_set - .commitments - .par_iter() - .map(|commitment| commitment.quotient_contribution()) - .collect::>(); - - // define numerator polynomial as - // N_i_j(X) = (P_i_j(X) - R_i_j(X)) - // and combine polynomials with same evaluation point set - // N_i(X) = linear_combinination(y, N_i_j(X)) - // where y is random scalar to combine numerator polynomials - let n_x = numerators - .into_iter() - .zip(powers(*y)) - .map(|(numerator, power_of_y)| numerator * power_of_y) - .reduce(|acc, numerator| acc + &numerator) - .unwrap(); - - let points = &rotation_set.points[..]; - - // quotient contribution of this evaluation set is - // Q_i(X) = N_i(X) / Z_i(X) where - // Z_i(X) = (x - r_i_0) * (x - r_i_1) * ... - let mut poly = div_by_vanishing(n_x, points); - poly.resize(self.params.n as usize, E::Scalar::ZERO); - - Polynomial { - values: poly, - _marker: PhantomData, - } - }; + let quotient_contribution = |rotation_set: &RotationSetExtension| { + // [P_i_0(X) - R_i_0(X), P_i_1(X) - R_i_1(X), ... ] + #[allow(clippy::needless_collect)] + let numerators = rotation_set + .commitments + .as_slice() + .into_par_iter() + .map(|commitment| commitment.quotient_contribution()) + .collect::>(); + + // define numerator polynomial as + // N_i_j(X) = (P_i_j(X) - R_i_j(X)) + // and combine polynomials with same evaluation point set + // N_i(X) = linear_combinination(y, N_i_j(X)) + // where y is random scalar to combine numerator polynomials + let n_x = numerators + .into_iter() + .zip(powers(*y)) + .map(|(numerator, power_of_y)| numerator * power_of_y) + .reduce(|acc, numerator| acc + &numerator) + .unwrap(); + + let points = &rotation_set.points[..]; + + // quotient contribution of this evaluation set is + // Q_i(X) = N_i(X) / Z_i(X) where + // Z_i(X) = (x - r_i_0) * (x - r_i_1) * ... + let mut poly = div_by_vanishing(n_x, points); + poly.resize(self.params.n as usize, E::Scalar::ZERO); + + Polynomial { + values: poly, + _marker: PhantomData, + } + }; let intermediate_sets = construct_intermediate_sets(queries); let (rotation_sets, super_point_set) = ( @@ -182,7 +185,8 @@ where .map(|rotation_set| { let commitments: Vec> = rotation_set .commitments - .par_iter() + .as_slice() + .into_par_iter() .map(|commitment_data| commitment_data.extend(&rotation_set.points)) .collect(); rotation_set.extend(commitments) @@ -191,8 +195,10 @@ where let v: ChallengeV<_> = transcript.squeeze_challenge_scalar(); + #[allow(clippy::needless_collect)] let quotient_polynomials = rotation_sets - .par_iter() + .as_slice() + .into_par_iter() .map(quotient_contribution) .collect::>(); @@ -207,36 +213,43 @@ where transcript.write_point(h)?; let u: ChallengeU<_> = transcript.squeeze_challenge_scalar(); - let linearisation_contribution = - |rotation_set: RotationSetExtension| -> (Polynomial, E::Scalar) { - let mut diffs = super_point_set.clone(); - for point in rotation_set.points.iter() { - diffs.remove(point); - } - let diffs = diffs.into_iter().collect::>(); - - // calculate difference vanishing polynomial evaluation - let z_i = evaluate_vanishing_polynomial(&diffs[..], *u); - - // inner linearisation contibutions are - // [P_i_0(X) - r_i_0, P_i_1(X) - r_i_1, ... ] where - // r_i_j = R_i_j(u) is the evaluation of low degree equivalent polynomial - // where u is random evaluation point - let inner_contributions = rotation_set - .commitments - .par_iter() - .map(|commitment| commitment.linearisation_contribution(*u)).collect::>(); - - // define inner contributor polynomial as - // L_i_j(X) = (P_i_j(X) - r_i_j) - // and combine polynomials with same evaluation point set - // L_i(X) = linear_combinination(y, L_i_j(X)) - // where y is random scalar to combine inner contibutors - let l_x: Polynomial = inner_contributions.into_iter().zip(powers(*y)).map(|(poly, power_of_y)| poly * power_of_y).reduce(|acc, poly| acc + &poly).unwrap(); - - // finally scale l_x by difference vanishing polynomial evaluation z_i - (l_x * z_i, z_i) - }; + let linearisation_contribution = |rotation_set: RotationSetExtension| { + let mut diffs = super_point_set.clone(); + for point in rotation_set.points.iter() { + diffs.remove(point); + } + let diffs = diffs.into_iter().collect::>(); + + // calculate difference vanishing polynomial evaluation + let z_i = evaluate_vanishing_polynomial(&diffs[..], *u); + + // inner linearisation contibutions are + // [P_i_0(X) - r_i_0, P_i_1(X) - r_i_1, ... ] where + // r_i_j = R_i_j(u) is the evaluation of low degree equivalent polynomial + // where u is random evaluation point + #[allow(clippy::needless_collect)] + let inner_contributions = rotation_set + .commitments + .as_slice() + .into_par_iter() + .map(|commitment| commitment.linearisation_contribution(*u)) + .collect::>(); + + // define inner contributor polynomial as + // L_i_j(X) = (P_i_j(X) - r_i_j) + // and combine polynomials with same evaluation point set + // L_i(X) = linear_combinination(y, L_i_j(X)) + // where y is random scalar to combine inner contibutors + let l_x: Polynomial = inner_contributions + .into_iter() + .zip(powers(*y)) + .map(|(poly, power_of_y)| poly * power_of_y) + .reduce(|acc, poly| acc + &poly) + .unwrap(); + + // finally scale l_x by difference vanishing polynomial evaluation z_i + (l_x * z_i, z_i) + }; #[allow(clippy::type_complexity)] let (linearisation_contibutions, z_diffs): ( diff --git a/halo2_proofs/src/poly/kzg/multiopen/shplonk/verifier.rs b/halo2_proofs/src/poly/kzg/multiopen/shplonk/verifier.rs index d1d1660e9c..53930f88dd 100644 --- a/halo2_proofs/src/poly/kzg/multiopen/shplonk/verifier.rs +++ b/halo2_proofs/src/poly/kzg/multiopen/shplonk/verifier.rs @@ -1,10 +1,9 @@ use std::fmt::Debug; -use std::io::Read; use super::ChallengeY; use super::{construct_intermediate_sets, ChallengeU, ChallengeV}; use crate::arithmetic::{ - eval_polynomial, evaluate_vanishing_polynomial, lagrange_interpolate, powers, CurveAffine, + eval_polynomial, evaluate_vanishing_polynomial, lagrange_interpolate, powers, }; use crate::helpers::SerdeCurveAffine; use crate::poly::commitment::Verifier; @@ -12,19 +11,12 @@ use crate::poly::commitment::MSM; use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG}; use crate::poly::kzg::msm::DualMSM; use crate::poly::kzg::msm::{PreMSM, MSMKZG}; -use crate::poly::kzg::strategy::{AccumulatorStrategy, GuardKZG, SingleStrategy}; -use crate::poly::query::Query; +use crate::poly::kzg::strategy::GuardKZG; use crate::poly::query::{CommitmentReference, VerifierQuery}; -use crate::poly::strategy::VerificationStrategy; -use crate::poly::{ - commitment::{Params, ParamsVerifier}, - Error, -}; +use crate::poly::Error; use crate::transcript::{EncodedChallenge, TranscriptRead}; use ff::{Field, PrimeField}; -use group::Group; -use halo2curves::pairing::{Engine, MillerLoopResult, MultiMillerLoop}; -use rand_core::OsRng; +use halo2curves::pairing::{Engine, MultiMillerLoop}; use std::ops::MulAssign; /// Concrete KZG multiopen verifier with SHPLONK variant diff --git a/halo2_proofs/src/poly/kzg/strategy.rs b/halo2_proofs/src/poly/kzg/strategy.rs index 15cdba2069..14b6565b80 100644 --- a/halo2_proofs/src/poly/kzg/strategy.rs +++ b/halo2_proofs/src/poly/kzg/strategy.rs @@ -1,27 +1,19 @@ -use std::{fmt::Debug, marker::PhantomData}; - use super::{ commitment::{KZGCommitmentScheme, ParamsKZG}, - msm::{DualMSM, MSMKZG}, - multiopen::VerifierGWC, + msm::DualMSM, }; use crate::{ helpers::SerdeCurveAffine, plonk::Error, poly::{ - commitment::{Verifier, MSM}, - ipa::msm::MSMIPA, + commitment::Verifier, strategy::{Guard, VerificationStrategy}, }, - transcript::{EncodedChallenge, TranscriptRead}, }; use ff::{Field, PrimeField}; -use group::Group; -use halo2curves::{ - pairing::{Engine, MillerLoopResult, MultiMillerLoop}, - CurveAffine, -}; +use halo2curves::pairing::{Engine, MultiMillerLoop}; use rand_core::OsRng; +use std::fmt::Debug; /// Wrapper for linear verification accumulator #[derive(Debug, Clone)] diff --git a/halo2_proofs/src/poly/multiopen.rs b/halo2_proofs/src/poly/multiopen.rs index 854018e90a..9cff28bde8 100644 --- a/halo2_proofs/src/poly/multiopen.rs +++ b/halo2_proofs/src/poly/multiopen.rs @@ -5,7 +5,7 @@ use super::{commitment::ParamsVerifier, PairMSM}; use crate::{ - arithmetic::{eval_polynomial, CurveAffine, FieldExt}, + arithmetic::{eval_polynomial, CurveAffine}, pairing::arithmetic::{MillerLoopResult, MultiMillerLoop}, poly::{msm::MSM, Coeff, Error, Polynomial}, }; @@ -129,7 +129,7 @@ impl<'r, 'params: 'r, C: CurveAffine> PartialEq for CommitmentReference<'r, C> { } } -trait Query: Sized + Clone { +trait Query: Sized + Clone { type Commitment: PartialEq + Clone; fn get_rotation(&self) -> Rotation; @@ -141,7 +141,7 @@ trait Query: Sized + Clone { #[cfg(test)] mod tests { - use crate::arithmetic::{eval_polynomial, FieldExt}; + use crate::arithmetic::{eval_polynomial}; use crate::pairing::bn256::{Bn256, Fr, G1Affine}; use crate::poly::{ commitment::{Params, ParamsVerifier}, @@ -173,7 +173,7 @@ mod tests { use rand_core::OsRng; use super::*; - use crate::arithmetic::{eval_polynomial, FieldExt}; + use crate::arithmetic::{eval_polynomial}; use crate::poly::{commitment::Params, EvaluationDomain}; use crate::transcript::Challenge255; diff --git a/halo2_proofs/src/poly/multiopen_test.rs b/halo2_proofs/src/poly/multiopen_test.rs index d57243f712..47c6731167 100644 --- a/halo2_proofs/src/poly/multiopen_test.rs +++ b/halo2_proofs/src/poly/multiopen_test.rs @@ -2,33 +2,28 @@ mod test { use crate::arithmetic::eval_polynomial; use crate::plonk::Error; + use crate::poly::commitment::Blind; use crate::poly::commitment::ParamsProver; - use crate::poly::commitment::{Blind, ParamsVerifier, MSM}; - use crate::poly::query::PolynomialPointer; use crate::poly::{ commitment::{CommitmentScheme, Params, Prover, Verifier}, query::{ProverQuery, VerifierQuery}, strategy::VerificationStrategy, EvaluationDomain, }; - use crate::poly::{Coeff, Polynomial}; use crate::transcript::{ - self, Blake2bRead, Blake2bWrite, Challenge255, EncodedChallenge, Keccak256Read, - Keccak256Write, TranscriptRead, TranscriptReadBuffer, TranscriptWrite, - TranscriptWriterBuffer, + Blake2bRead, Blake2bWrite, Challenge255, EncodedChallenge, Keccak256Read, Keccak256Write, + TranscriptReadBuffer, TranscriptWriterBuffer, }; - use ff::{Field, PrimeField, WithSmallOrderMulGroup}; - use group::{Curve, Group}; - use halo2curves::CurveAffine; - use rand_core::{OsRng, RngCore}; - use std::io::{Read, Write}; + use ff::WithSmallOrderMulGroup; + use group::Curve; + use rand_core::OsRng; #[test] fn test_roundtrip_ipa() { use crate::poly::ipa::commitment::{IPACommitmentScheme, ParamsIPA}; use crate::poly::ipa::multiopen::{ProverIPA, VerifierIPA}; use crate::poly::ipa::strategy::AccumulatorStrategy; - use halo2curves::pasta::{Ep, EqAffine, Fp}; + use halo2curves::pasta::EqAffine; const K: u32 = 4; @@ -65,7 +60,7 @@ mod test { use crate::poly::ipa::commitment::{IPACommitmentScheme, ParamsIPA}; use crate::poly::ipa::multiopen::{ProverIPA, VerifierIPA}; use crate::poly::ipa::strategy::AccumulatorStrategy; - use halo2curves::pasta::{Ep, EqAffine, Fp}; + use halo2curves::pasta::EqAffine; const K: u32 = 4; @@ -102,8 +97,7 @@ mod test { use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG}; use crate::poly::kzg::multiopen::{ProverGWC, VerifierGWC}; use crate::poly::kzg::strategy::AccumulatorStrategy; - use halo2curves::bn256::{Bn256, G1Affine}; - use halo2curves::pairing::Engine; + use halo2curves::bn256::Bn256; const K: u32 = 4; @@ -134,8 +128,7 @@ mod test { use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG}; use crate::poly::kzg::multiopen::{ProverSHPLONK, VerifierSHPLONK}; use crate::poly::kzg::strategy::AccumulatorStrategy; - use halo2curves::bn256::{Bn256, G1Affine}; - use halo2curves::pairing::Engine; + use halo2curves::bn256::Bn256; const K: u32 = 4; diff --git a/halo2_proofs/src/poly/query.rs b/halo2_proofs/src/poly/query.rs index c596e6a71c..b9894edd38 100644 --- a/halo2_proofs/src/poly/query.rs +++ b/halo2_proofs/src/poly/query.rs @@ -1,11 +1,10 @@ -use std::{fmt::Debug, ops::Deref}; +use std::fmt::Debug; -use super::commitment::{Blind, CommitmentScheme, Params, MSM}; +use super::commitment::{Blind, MSM}; use crate::{ arithmetic::eval_polynomial, - poly::{commitment, Coeff, Polynomial}, + poly::{Coeff, Polynomial}, }; -use ff::Field; use halo2curves::CurveAffine; pub trait Query: Sized + Clone + Send + Sync { @@ -100,6 +99,7 @@ impl<'com, C: CurveAffine, M: MSM> Clone for VerifierQuery<'com, C, M> { } } +#[allow(clippy::upper_case_acronyms)] #[derive(Clone, Debug)] pub enum CommitmentReference<'r, C: CurveAffine, M: MSM> { Commitment(&'r C), diff --git a/halo2_proofs/src/poly/strategy.rs b/halo2_proofs/src/poly/strategy.rs index 36480d372f..850f95e6c9 100644 --- a/halo2_proofs/src/poly/strategy.rs +++ b/halo2_proofs/src/poly/strategy.rs @@ -1,11 +1,5 @@ -use halo2curves::CurveAffine; -use rand_core::RngCore; - -use super::commitment::{CommitmentScheme, Verifier, MSM}; -use crate::{ - plonk::Error, - transcript::{EncodedChallenge, TranscriptRead}, -}; +use super::commitment::{CommitmentScheme, Verifier}; +use crate::plonk::Error; /// Guards is unfinished verification result. Implement this to construct various /// verification strategies such as aggregation and recursion. diff --git a/halo2_proofs/src/transcript.rs b/halo2_proofs/src/transcript.rs index 166b0e6f90..ce823850da 100644 --- a/halo2_proofs/src/transcript.rs +++ b/halo2_proofs/src/transcript.rs @@ -253,12 +253,12 @@ where C::Scalar: FromUniformBytes<64>, { fn squeeze_challenge(&mut self) -> Challenge255 { - self.state.update(&[KECCAK256_PREFIX_CHALLENGE]); + self.state.update([KECCAK256_PREFIX_CHALLENGE]); let mut state_lo = self.state.clone(); let mut state_hi = self.state.clone(); - state_lo.update(&[KECCAK256_PREFIX_CHALLENGE_LO]); - state_hi.update(&[KECCAK256_PREFIX_CHALLENGE_HI]); + state_lo.update([KECCAK256_PREFIX_CHALLENGE_LO]); + state_hi.update([KECCAK256_PREFIX_CHALLENGE_HI]); let result_lo: [u8; 32] = state_lo.finalize().as_slice().try_into().unwrap(); let result_hi: [u8; 32] = state_hi.finalize().as_slice().try_into().unwrap(); @@ -270,7 +270,7 @@ where } fn common_point(&mut self, point: C) -> io::Result<()> { - self.state.update(&[KECCAK256_PREFIX_POINT]); + self.state.update([KECCAK256_PREFIX_POINT]); let coords: Coordinates = Option::from(point.coordinates()).ok_or_else(|| { io::Error::new( io::ErrorKind::Other, @@ -284,7 +284,7 @@ where } fn common_scalar(&mut self, scalar: C::Scalar) -> io::Result<()> { - self.state.update(&[KECCAK256_PREFIX_SCALAR]); + self.state.update([KECCAK256_PREFIX_SCALAR]); self.state.update(scalar.to_repr().as_ref()); Ok(()) @@ -427,12 +427,12 @@ where C::Scalar: FromUniformBytes<64>, { fn squeeze_challenge(&mut self) -> Challenge255 { - self.state.update(&[KECCAK256_PREFIX_CHALLENGE]); + self.state.update([KECCAK256_PREFIX_CHALLENGE]); let mut state_lo = self.state.clone(); let mut state_hi = self.state.clone(); - state_lo.update(&[KECCAK256_PREFIX_CHALLENGE_LO]); - state_hi.update(&[KECCAK256_PREFIX_CHALLENGE_HI]); + state_lo.update([KECCAK256_PREFIX_CHALLENGE_LO]); + state_hi.update([KECCAK256_PREFIX_CHALLENGE_HI]); let result_lo: [u8; 32] = state_lo.finalize().as_slice().try_into().unwrap(); let result_hi: [u8; 32] = state_hi.finalize().as_slice().try_into().unwrap(); @@ -444,7 +444,7 @@ where } fn common_point(&mut self, point: C) -> io::Result<()> { - self.state.update(&[KECCAK256_PREFIX_POINT]); + self.state.update([KECCAK256_PREFIX_POINT]); let coords: Coordinates = Option::from(point.coordinates()).ok_or_else(|| { io::Error::new( io::ErrorKind::Other, @@ -458,7 +458,7 @@ where } fn common_scalar(&mut self, scalar: C::Scalar) -> io::Result<()> { - self.state.update(&[KECCAK256_PREFIX_SCALAR]); + self.state.update([KECCAK256_PREFIX_SCALAR]); self.state.update(scalar.to_repr().as_ref()); Ok(()) diff --git a/halo2_proofs/src/transcript/blake2b.rs b/halo2_proofs/src/transcript/blake2b.rs index 99954e6efd..78f59811aa 100644 --- a/halo2_proofs/src/transcript/blake2b.rs +++ b/halo2_proofs/src/transcript/blake2b.rs @@ -5,7 +5,7 @@ use super::{ use blake2b_simd::{Params as Blake2bParams, State as Blake2bState}; use ff::Field; use group::ff::PrimeField; -use halo2curves::{Coordinates, CurveAffine, FieldExt}; +use halo2curves::{Coordinates, CurveAffine}; use num_bigint::BigUint; use std::convert::TryInto; use std::io::{self, Read, Write}; diff --git a/halo2_proofs/tests/plonk_api.rs b/halo2_proofs/tests/plonk_api.rs index 1f1f0a2aa7..d27c3b8ccd 100644 --- a/halo2_proofs/tests/plonk_api.rs +++ b/halo2_proofs/tests/plonk_api.rs @@ -514,7 +514,7 @@ fn plonk_api() { let cs = StandardPlonk::new(config); #[cfg(feature = "parallel_syn")] - let mut is_first_pass_vec = vec![true; 8]; + let mut is_first_pass_vec = [true; 8]; let _ = cs.public_input(&mut layouter, || Value::known(F::ONE + F::ONE))?; @@ -525,8 +525,7 @@ fn plonk_api() { layouter.assign_regions( || "regions", (0..8) - .into_iter() - .zip(is_first_pass_vec.chunks_mut(1).into_iter()) + .zip(is_first_pass_vec.chunks_mut(1)) .map(|(_, is_first_pass)| { |mut region: Region<'_, F>| -> Result<(), Error> { let n = 1 << 13; @@ -717,7 +716,10 @@ fn plonk_api() { Ok(prover) => prover, Err(e) => panic!("{:?}", e), }; + #[cfg(feature = "multicore")] assert_eq!(prover.verify_par(), Ok(())); + #[cfg(not(feature = "multicore"))] + assert_eq!(prover.verify(), Ok(())); log::info!("mock proving succeed!"); let params = ParamsKZG::::new(K); @@ -1222,6 +1224,8 @@ fn plonk_api_with_many_subregions() { impl Circuit for MyCircuit { type Config = PlonkConfig; type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "circuit-params")] + type Params = (); fn without_witnesses(&self) -> Self { Self { @@ -1249,7 +1253,6 @@ fn plonk_api_with_many_subregions() { layouter.assign_regions( || "regions", (0..(1 << 14)) - .into_iter() .map(|_| { let mut is_first_pass = true; move |mut region: Region<'_, F>| -> Result<(), Error> { @@ -1405,7 +1408,10 @@ fn plonk_api_with_many_subregions() { Ok(prover) => prover, Err(e) => panic!("{:?}", e), }; + #[cfg(feature = "multicore")] assert_eq!(prover.verify_par(), Ok(())); + #[cfg(not(feature = "multicore"))] + assert_eq!(prover.verify(), Ok(())); log::info!("mock proving succeed!"); let params = ParamsKZG::::new(K); diff --git a/rust-toolchain b/rust-toolchain index af92bdd9f5..ee2d639b86 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -1.63.0 +nightly-2023-08-12 \ No newline at end of file